]>
Commit | Line | Data |
---|---|---|
ebfd146a | 1 | /* Statement Analysis and Transformation for Vectorization |
85ec4feb | 2 | Copyright (C) 2003-2018 Free Software Foundation, Inc. |
b8698a0f | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
ebfd146a IR |
4 | and Ira Rosen <irar@il.ibm.com> |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
c7131fb2 | 25 | #include "backend.h" |
957060b5 AM |
26 | #include "target.h" |
27 | #include "rtl.h" | |
ebfd146a | 28 | #include "tree.h" |
c7131fb2 | 29 | #include "gimple.h" |
c7131fb2 | 30 | #include "ssa.h" |
957060b5 AM |
31 | #include "optabs-tree.h" |
32 | #include "insn-config.h" | |
33 | #include "recog.h" /* FIXME: for insn_data */ | |
34 | #include "cgraph.h" | |
957060b5 | 35 | #include "dumpfile.h" |
c7131fb2 | 36 | #include "alias.h" |
40e23961 | 37 | #include "fold-const.h" |
d8a2d370 | 38 | #include "stor-layout.h" |
2fb9a547 | 39 | #include "tree-eh.h" |
45b0be94 | 40 | #include "gimplify.h" |
5be5c238 | 41 | #include "gimple-iterator.h" |
18f429e2 | 42 | #include "gimplify-me.h" |
442b4905 | 43 | #include "tree-cfg.h" |
e28030cf | 44 | #include "tree-ssa-loop-manip.h" |
ebfd146a | 45 | #include "cfgloop.h" |
0136f8f0 AH |
46 | #include "tree-ssa-loop.h" |
47 | #include "tree-scalar-evolution.h" | |
ebfd146a | 48 | #include "tree-vectorizer.h" |
9b2b7279 | 49 | #include "builtins.h" |
70439f0d | 50 | #include "internal-fn.h" |
5ebaa477 | 51 | #include "tree-vector-builder.h" |
f151c9e1 | 52 | #include "vec-perm-indices.h" |
7cfb4d93 RS |
53 | #include "tree-ssa-loop-niter.h" |
54 | #include "gimple-fold.h" | |
ebfd146a | 55 | |
7ee2468b SB |
56 | /* For lang_hooks.types.type_for_mode. */ |
57 | #include "langhooks.h" | |
ebfd146a | 58 | |
c3e7ee41 BS |
59 | /* Return the vectorized type for the given statement. */ |
60 | ||
61 | tree | |
62 | stmt_vectype (struct _stmt_vec_info *stmt_info) | |
63 | { | |
64 | return STMT_VINFO_VECTYPE (stmt_info); | |
65 | } | |
66 | ||
67 | /* Return TRUE iff the given statement is in an inner loop relative to | |
68 | the loop being vectorized. */ | |
69 | bool | |
70 | stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info) | |
71 | { | |
355fe088 | 72 | gimple *stmt = STMT_VINFO_STMT (stmt_info); |
c3e7ee41 BS |
73 | basic_block bb = gimple_bb (stmt); |
74 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
75 | struct loop* loop; | |
76 | ||
77 | if (!loop_vinfo) | |
78 | return false; | |
79 | ||
80 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
81 | ||
82 | return (bb->loop_father == loop->inner); | |
83 | } | |
84 | ||
85 | /* Record the cost of a statement, either by directly informing the | |
86 | target model or by saving it in a vector for later processing. | |
87 | Return a preliminary estimate of the statement's cost. */ | |
88 | ||
89 | unsigned | |
92345349 | 90 | record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, |
c3e7ee41 | 91 | enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, |
92345349 | 92 | int misalign, enum vect_cost_model_location where) |
c3e7ee41 | 93 | { |
cc9fe6bb JH |
94 | if ((kind == vector_load || kind == unaligned_load) |
95 | && STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
96 | kind = vector_gather_load; | |
97 | if ((kind == vector_store || kind == unaligned_store) | |
98 | && STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
99 | kind = vector_scatter_store; | |
68435eb2 | 100 | |
211ee39b | 101 | stmt_info_for_cost si = { count, kind, where, stmt_info, misalign }; |
68435eb2 RB |
102 | body_cost_vec->safe_push (si); |
103 | ||
104 | tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; | |
105 | return (unsigned) | |
106 | (builtin_vectorization_cost (kind, vectype, misalign) * count); | |
c3e7ee41 BS |
107 | } |
108 | ||
272c6793 RS |
109 | /* Return a variable of type ELEM_TYPE[NELEMS]. */ |
110 | ||
111 | static tree | |
112 | create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems) | |
113 | { | |
114 | return create_tmp_var (build_array_type_nelts (elem_type, nelems), | |
115 | "vect_array"); | |
116 | } | |
117 | ||
118 | /* ARRAY is an array of vectors created by create_vector_array. | |
119 | Return an SSA_NAME for the vector in index N. The reference | |
82570274 | 120 | is part of the vectorization of STMT_INFO and the vector is associated |
272c6793 RS |
121 | with scalar destination SCALAR_DEST. */ |
122 | ||
123 | static tree | |
82570274 RS |
124 | read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
125 | tree scalar_dest, tree array, unsigned HOST_WIDE_INT n) | |
272c6793 RS |
126 | { |
127 | tree vect_type, vect, vect_name, array_ref; | |
355fe088 | 128 | gimple *new_stmt; |
272c6793 RS |
129 | |
130 | gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE); | |
131 | vect_type = TREE_TYPE (TREE_TYPE (array)); | |
132 | vect = vect_create_destination_var (scalar_dest, vect_type); | |
133 | array_ref = build4 (ARRAY_REF, vect_type, array, | |
134 | build_int_cst (size_type_node, n), | |
135 | NULL_TREE, NULL_TREE); | |
136 | ||
137 | new_stmt = gimple_build_assign (vect, array_ref); | |
138 | vect_name = make_ssa_name (vect, new_stmt); | |
139 | gimple_assign_set_lhs (new_stmt, vect_name); | |
82570274 | 140 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
141 | |
142 | return vect_name; | |
143 | } | |
144 | ||
145 | /* ARRAY is an array of vectors created by create_vector_array. | |
146 | Emit code to store SSA_NAME VECT in index N of the array. | |
82570274 | 147 | The store is part of the vectorization of STMT_INFO. */ |
272c6793 RS |
148 | |
149 | static void | |
82570274 RS |
150 | write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
151 | tree vect, tree array, unsigned HOST_WIDE_INT n) | |
272c6793 RS |
152 | { |
153 | tree array_ref; | |
355fe088 | 154 | gimple *new_stmt; |
272c6793 RS |
155 | |
156 | array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array, | |
157 | build_int_cst (size_type_node, n), | |
158 | NULL_TREE, NULL_TREE); | |
159 | ||
160 | new_stmt = gimple_build_assign (array_ref, vect); | |
82570274 | 161 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
162 | } |
163 | ||
164 | /* PTR is a pointer to an array of type TYPE. Return a representation | |
165 | of *PTR. The memory reference replaces those in FIRST_DR | |
166 | (and its group). */ | |
167 | ||
168 | static tree | |
44fc7854 | 169 | create_array_ref (tree type, tree ptr, tree alias_ptr_type) |
272c6793 | 170 | { |
44fc7854 | 171 | tree mem_ref; |
272c6793 | 172 | |
272c6793 RS |
173 | mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0)); |
174 | /* Arrays have the same alignment as their type. */ | |
644ffefd | 175 | set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0); |
272c6793 RS |
176 | return mem_ref; |
177 | } | |
178 | ||
82570274 | 179 | /* Add a clobber of variable VAR to the vectorization of STMT_INFO. |
3ba4ff41 RS |
180 | Emit the clobber before *GSI. */ |
181 | ||
182 | static void | |
82570274 RS |
183 | vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
184 | tree var) | |
3ba4ff41 RS |
185 | { |
186 | tree clobber = build_clobber (TREE_TYPE (var)); | |
187 | gimple *new_stmt = gimple_build_assign (var, clobber); | |
82570274 | 188 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3ba4ff41 RS |
189 | } |
190 | ||
ebfd146a IR |
191 | /* Utility functions used by vect_mark_stmts_to_be_vectorized. */ |
192 | ||
193 | /* Function vect_mark_relevant. | |
194 | ||
32e8e429 | 195 | Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */ |
ebfd146a IR |
196 | |
197 | static void | |
32e8e429 | 198 | vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info, |
97ecdb46 | 199 | enum vect_relevant relevant, bool live_p) |
ebfd146a | 200 | { |
ebfd146a IR |
201 | enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info); |
202 | bool save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
203 | ||
73fbfcad | 204 | if (dump_enabled_p ()) |
66c16fd9 RB |
205 | { |
206 | dump_printf_loc (MSG_NOTE, vect_location, | |
207 | "mark relevant %d, live %d: ", relevant, live_p); | |
86a91c0a | 208 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
66c16fd9 | 209 | } |
ebfd146a | 210 | |
83197f37 IR |
211 | /* If this stmt is an original stmt in a pattern, we might need to mark its |
212 | related pattern stmt instead of the original stmt. However, such stmts | |
213 | may have their own uses that are not in any pattern, in such cases the | |
214 | stmt itself should be marked. */ | |
ebfd146a IR |
215 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) |
216 | { | |
97ecdb46 JJ |
217 | /* This is the last stmt in a sequence that was detected as a |
218 | pattern that can potentially be vectorized. Don't mark the stmt | |
219 | as relevant/live because it's not going to be vectorized. | |
220 | Instead mark the pattern-stmt that replaces it. */ | |
83197f37 | 221 | |
97ecdb46 JJ |
222 | if (dump_enabled_p ()) |
223 | dump_printf_loc (MSG_NOTE, vect_location, | |
224 | "last stmt in pattern. don't mark" | |
225 | " relevant/live.\n"); | |
10681ce8 RS |
226 | stmt_vec_info old_stmt_info = stmt_info; |
227 | stmt_info = STMT_VINFO_RELATED_STMT (stmt_info); | |
228 | gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info); | |
97ecdb46 JJ |
229 | save_relevant = STMT_VINFO_RELEVANT (stmt_info); |
230 | save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
ebfd146a IR |
231 | } |
232 | ||
233 | STMT_VINFO_LIVE_P (stmt_info) |= live_p; | |
234 | if (relevant > STMT_VINFO_RELEVANT (stmt_info)) | |
235 | STMT_VINFO_RELEVANT (stmt_info) = relevant; | |
236 | ||
237 | if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant | |
238 | && STMT_VINFO_LIVE_P (stmt_info) == save_live_p) | |
239 | { | |
73fbfcad | 240 | if (dump_enabled_p ()) |
78c60e3d | 241 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 242 | "already marked relevant/live.\n"); |
ebfd146a IR |
243 | return; |
244 | } | |
245 | ||
86a91c0a | 246 | worklist->safe_push (stmt_info); |
ebfd146a IR |
247 | } |
248 | ||
249 | ||
b28ead45 AH |
250 | /* Function is_simple_and_all_uses_invariant |
251 | ||
32e8e429 | 252 | Return true if STMT_INFO is simple and all uses of it are invariant. */ |
b28ead45 AH |
253 | |
254 | bool | |
32e8e429 RS |
255 | is_simple_and_all_uses_invariant (stmt_vec_info stmt_info, |
256 | loop_vec_info loop_vinfo) | |
b28ead45 AH |
257 | { |
258 | tree op; | |
b28ead45 AH |
259 | ssa_op_iter iter; |
260 | ||
32e8e429 RS |
261 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
262 | if (!stmt) | |
b28ead45 AH |
263 | return false; |
264 | ||
265 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) | |
266 | { | |
267 | enum vect_def_type dt = vect_uninitialized_def; | |
268 | ||
894dd753 | 269 | if (!vect_is_simple_use (op, loop_vinfo, &dt)) |
b28ead45 AH |
270 | { |
271 | if (dump_enabled_p ()) | |
272 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
273 | "use not simple.\n"); | |
274 | return false; | |
275 | } | |
276 | ||
277 | if (dt != vect_external_def && dt != vect_constant_def) | |
278 | return false; | |
279 | } | |
280 | return true; | |
281 | } | |
282 | ||
ebfd146a IR |
283 | /* Function vect_stmt_relevant_p. |
284 | ||
82570274 RS |
285 | Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO, |
286 | is "relevant for vectorization". | |
ebfd146a IR |
287 | |
288 | A stmt is considered "relevant for vectorization" if: | |
289 | - it has uses outside the loop. | |
290 | - it has vdefs (it alters memory). | |
291 | - control stmts in the loop (except for the exit condition). | |
292 | ||
293 | CHECKME: what other side effects would the vectorizer allow? */ | |
294 | ||
295 | static bool | |
82570274 | 296 | vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, |
ebfd146a IR |
297 | enum vect_relevant *relevant, bool *live_p) |
298 | { | |
299 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
300 | ssa_op_iter op_iter; | |
301 | imm_use_iterator imm_iter; | |
302 | use_operand_p use_p; | |
303 | def_operand_p def_p; | |
304 | ||
8644a673 | 305 | *relevant = vect_unused_in_scope; |
ebfd146a IR |
306 | *live_p = false; |
307 | ||
308 | /* cond stmt other than loop exit cond. */ | |
82570274 RS |
309 | if (is_ctrl_stmt (stmt_info->stmt) |
310 | && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type) | |
8644a673 | 311 | *relevant = vect_used_in_scope; |
ebfd146a IR |
312 | |
313 | /* changing memory. */ | |
82570274 RS |
314 | if (gimple_code (stmt_info->stmt) != GIMPLE_PHI) |
315 | if (gimple_vdef (stmt_info->stmt) | |
316 | && !gimple_clobber_p (stmt_info->stmt)) | |
ebfd146a | 317 | { |
73fbfcad | 318 | if (dump_enabled_p ()) |
78c60e3d | 319 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 320 | "vec_stmt_relevant_p: stmt has vdefs.\n"); |
8644a673 | 321 | *relevant = vect_used_in_scope; |
ebfd146a IR |
322 | } |
323 | ||
324 | /* uses outside the loop. */ | |
82570274 | 325 | FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF) |
ebfd146a IR |
326 | { |
327 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p)) | |
328 | { | |
329 | basic_block bb = gimple_bb (USE_STMT (use_p)); | |
330 | if (!flow_bb_inside_loop_p (loop, bb)) | |
331 | { | |
73fbfcad | 332 | if (dump_enabled_p ()) |
78c60e3d | 333 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 334 | "vec_stmt_relevant_p: used out of loop.\n"); |
ebfd146a | 335 | |
3157b0c2 AO |
336 | if (is_gimple_debug (USE_STMT (use_p))) |
337 | continue; | |
338 | ||
ebfd146a IR |
339 | /* We expect all such uses to be in the loop exit phis |
340 | (because of loop closed form) */ | |
341 | gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI); | |
342 | gcc_assert (bb == single_exit (loop)->dest); | |
343 | ||
344 | *live_p = true; | |
345 | } | |
346 | } | |
347 | } | |
348 | ||
3a2edf4c | 349 | if (*live_p && *relevant == vect_unused_in_scope |
82570274 | 350 | && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo)) |
b28ead45 AH |
351 | { |
352 | if (dump_enabled_p ()) | |
353 | dump_printf_loc (MSG_NOTE, vect_location, | |
354 | "vec_stmt_relevant_p: stmt live but not relevant.\n"); | |
355 | *relevant = vect_used_only_live; | |
356 | } | |
357 | ||
ebfd146a IR |
358 | return (*live_p || *relevant); |
359 | } | |
360 | ||
361 | ||
b8698a0f | 362 | /* Function exist_non_indexing_operands_for_use_p |
ebfd146a | 363 | |
32e8e429 RS |
364 | USE is one of the uses attached to STMT_INFO. Check if USE is |
365 | used in STMT_INFO for anything other than indexing an array. */ | |
ebfd146a IR |
366 | |
367 | static bool | |
32e8e429 | 368 | exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info) |
ebfd146a IR |
369 | { |
370 | tree operand; | |
59a05b0c | 371 | |
ff802fa1 | 372 | /* USE corresponds to some operand in STMT. If there is no data |
ebfd146a IR |
373 | reference in STMT, then any operand that corresponds to USE |
374 | is not indexing an array. */ | |
375 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
376 | return true; | |
59a05b0c | 377 | |
ebfd146a IR |
378 | /* STMT has a data_ref. FORNOW this means that its of one of |
379 | the following forms: | |
380 | -1- ARRAY_REF = var | |
381 | -2- var = ARRAY_REF | |
382 | (This should have been verified in analyze_data_refs). | |
383 | ||
384 | 'var' in the second case corresponds to a def, not a use, | |
b8698a0f | 385 | so USE cannot correspond to any operands that are not used |
ebfd146a IR |
386 | for array indexing. |
387 | ||
388 | Therefore, all we need to check is if STMT falls into the | |
389 | first case, and whether var corresponds to USE. */ | |
ebfd146a | 390 | |
86a91c0a | 391 | gassign *assign = dyn_cast <gassign *> (stmt_info->stmt); |
beb456c3 | 392 | if (!assign || !gimple_assign_copy_p (assign)) |
5ce9450f | 393 | { |
86a91c0a | 394 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); |
beb456c3 | 395 | if (call && gimple_call_internal_p (call)) |
bfaa08b7 | 396 | { |
beb456c3 | 397 | internal_fn ifn = gimple_call_internal_fn (call); |
bfaa08b7 RS |
398 | int mask_index = internal_fn_mask_index (ifn); |
399 | if (mask_index >= 0 | |
beb456c3 | 400 | && use == gimple_call_arg (call, mask_index)) |
bfaa08b7 | 401 | return true; |
f307441a RS |
402 | int stored_value_index = internal_fn_stored_value_index (ifn); |
403 | if (stored_value_index >= 0 | |
beb456c3 | 404 | && use == gimple_call_arg (call, stored_value_index)) |
f307441a | 405 | return true; |
bfaa08b7 | 406 | if (internal_gather_scatter_fn_p (ifn) |
beb456c3 | 407 | && use == gimple_call_arg (call, 1)) |
bfaa08b7 | 408 | return true; |
bfaa08b7 | 409 | } |
5ce9450f JJ |
410 | return false; |
411 | } | |
412 | ||
beb456c3 | 413 | if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME) |
59a05b0c | 414 | return false; |
beb456c3 | 415 | operand = gimple_assign_rhs1 (assign); |
ebfd146a IR |
416 | if (TREE_CODE (operand) != SSA_NAME) |
417 | return false; | |
418 | ||
419 | if (operand == use) | |
420 | return true; | |
421 | ||
422 | return false; | |
423 | } | |
424 | ||
425 | ||
b8698a0f | 426 | /* |
ebfd146a IR |
427 | Function process_use. |
428 | ||
429 | Inputs: | |
32e8e429 | 430 | - a USE in STMT_VINFO in a loop represented by LOOP_VINFO |
b28ead45 | 431 | - RELEVANT - enum value to be set in the STMT_VINFO of the stmt |
ff802fa1 | 432 | that defined USE. This is done by calling mark_relevant and passing it |
ebfd146a | 433 | the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant). |
aec7ae7d JJ |
434 | - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't |
435 | be performed. | |
ebfd146a IR |
436 | |
437 | Outputs: | |
438 | Generally, LIVE_P and RELEVANT are used to define the liveness and | |
439 | relevance info of the DEF_STMT of this USE: | |
32e8e429 RS |
440 | STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p |
441 | STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant | |
ebfd146a IR |
442 | Exceptions: |
443 | - case 1: If USE is used only for address computations (e.g. array indexing), | |
b8698a0f | 444 | which does not need to be directly vectorized, then the liveness/relevance |
ebfd146a | 445 | of the respective DEF_STMT is left unchanged. |
32e8e429 RS |
446 | - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt, |
447 | we skip DEF_STMT cause it had already been processed. | |
448 | - case 3: If DEF_STMT and STMT_VINFO are in different nests, then | |
449 | "relevant" will be modified accordingly. | |
ebfd146a IR |
450 | |
451 | Return true if everything is as expected. Return false otherwise. */ | |
452 | ||
453 | static bool | |
32e8e429 | 454 | process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo, |
eca52fdd | 455 | enum vect_relevant relevant, vec<stmt_vec_info> *worklist, |
aec7ae7d | 456 | bool force) |
ebfd146a | 457 | { |
ebfd146a IR |
458 | stmt_vec_info dstmt_vinfo; |
459 | basic_block bb, def_bb; | |
ebfd146a IR |
460 | enum vect_def_type dt; |
461 | ||
b8698a0f | 462 | /* case 1: we are only interested in uses that need to be vectorized. Uses |
ebfd146a | 463 | that are used for address computation are not considered relevant. */ |
86a91c0a | 464 | if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo)) |
ebfd146a IR |
465 | return true; |
466 | ||
fef96d8e | 467 | if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo)) |
b8698a0f | 468 | { |
73fbfcad | 469 | if (dump_enabled_p ()) |
78c60e3d | 470 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 471 | "not vectorized: unsupported use in stmt.\n"); |
ebfd146a IR |
472 | return false; |
473 | } | |
474 | ||
fef96d8e | 475 | if (!dstmt_vinfo) |
ebfd146a IR |
476 | return true; |
477 | ||
fef96d8e | 478 | def_bb = gimple_bb (dstmt_vinfo->stmt); |
ebfd146a | 479 | |
fef96d8e RS |
480 | /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO). |
481 | DSTMT_VINFO must have already been processed, because this should be the | |
b8698a0f | 482 | only way that STMT, which is a reduction-phi, was put in the worklist, |
fef96d8e | 483 | as there should be no other uses for DSTMT_VINFO in the loop. So we just |
ebfd146a | 484 | check that everything is as expected, and we are done. */ |
86a91c0a RS |
485 | bb = gimple_bb (stmt_vinfo->stmt); |
486 | if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI | |
ebfd146a | 487 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def |
fef96d8e | 488 | && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI |
ebfd146a IR |
489 | && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def |
490 | && bb->loop_father == def_bb->loop_father) | |
491 | { | |
73fbfcad | 492 | if (dump_enabled_p ()) |
78c60e3d | 493 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 494 | "reduc-stmt defining reduc-phi in the same nest.\n"); |
ebfd146a | 495 | gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction); |
b8698a0f | 496 | gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo) |
8644a673 | 497 | || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope); |
ebfd146a IR |
498 | return true; |
499 | } | |
500 | ||
501 | /* case 3a: outer-loop stmt defining an inner-loop stmt: | |
502 | outer-loop-header-bb: | |
fef96d8e | 503 | d = dstmt_vinfo |
ebfd146a IR |
504 | inner-loop: |
505 | stmt # use (d) | |
506 | outer-loop-tail-bb: | |
507 | ... */ | |
508 | if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father)) | |
509 | { | |
73fbfcad | 510 | if (dump_enabled_p ()) |
78c60e3d | 511 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 512 | "outer-loop def-stmt defining inner-loop stmt.\n"); |
7c5222ff | 513 | |
ebfd146a IR |
514 | switch (relevant) |
515 | { | |
8644a673 | 516 | case vect_unused_in_scope: |
7c5222ff IR |
517 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ? |
518 | vect_used_in_scope : vect_unused_in_scope; | |
ebfd146a | 519 | break; |
7c5222ff | 520 | |
ebfd146a | 521 | case vect_used_in_outer_by_reduction: |
7c5222ff | 522 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
ebfd146a IR |
523 | relevant = vect_used_by_reduction; |
524 | break; | |
7c5222ff | 525 | |
ebfd146a | 526 | case vect_used_in_outer: |
7c5222ff | 527 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
8644a673 | 528 | relevant = vect_used_in_scope; |
ebfd146a | 529 | break; |
7c5222ff | 530 | |
8644a673 | 531 | case vect_used_in_scope: |
ebfd146a IR |
532 | break; |
533 | ||
534 | default: | |
535 | gcc_unreachable (); | |
b8698a0f | 536 | } |
ebfd146a IR |
537 | } |
538 | ||
539 | /* case 3b: inner-loop stmt defining an outer-loop stmt: | |
540 | outer-loop-header-bb: | |
541 | ... | |
542 | inner-loop: | |
fef96d8e | 543 | d = dstmt_vinfo |
06066f92 | 544 | outer-loop-tail-bb (or outer-loop-exit-bb in double reduction): |
ebfd146a IR |
545 | stmt # use (d) */ |
546 | else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father)) | |
547 | { | |
73fbfcad | 548 | if (dump_enabled_p ()) |
78c60e3d | 549 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 550 | "inner-loop def-stmt defining outer-loop stmt.\n"); |
7c5222ff | 551 | |
ebfd146a IR |
552 | switch (relevant) |
553 | { | |
8644a673 | 554 | case vect_unused_in_scope: |
b8698a0f | 555 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def |
06066f92 | 556 | || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ? |
a70d6342 | 557 | vect_used_in_outer_by_reduction : vect_unused_in_scope; |
ebfd146a IR |
558 | break; |
559 | ||
ebfd146a | 560 | case vect_used_by_reduction: |
b28ead45 | 561 | case vect_used_only_live: |
ebfd146a IR |
562 | relevant = vect_used_in_outer_by_reduction; |
563 | break; | |
564 | ||
8644a673 | 565 | case vect_used_in_scope: |
ebfd146a IR |
566 | relevant = vect_used_in_outer; |
567 | break; | |
568 | ||
569 | default: | |
570 | gcc_unreachable (); | |
571 | } | |
572 | } | |
643a9684 RB |
573 | /* We are also not interested in uses on loop PHI backedges that are |
574 | inductions. Otherwise we'll needlessly vectorize the IV increment | |
e294f495 RB |
575 | and cause hybrid SLP for SLP inductions. Unless the PHI is live |
576 | of course. */ | |
86a91c0a | 577 | else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI |
643a9684 | 578 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def |
e294f495 | 579 | && ! STMT_VINFO_LIVE_P (stmt_vinfo) |
86a91c0a RS |
580 | && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, |
581 | loop_latch_edge (bb->loop_father)) | |
643a9684 RB |
582 | == use)) |
583 | { | |
584 | if (dump_enabled_p ()) | |
585 | dump_printf_loc (MSG_NOTE, vect_location, | |
586 | "induction value on backedge.\n"); | |
587 | return true; | |
588 | } | |
589 | ||
ebfd146a | 590 | |
fef96d8e | 591 | vect_mark_relevant (worklist, dstmt_vinfo, relevant, false); |
ebfd146a IR |
592 | return true; |
593 | } | |
594 | ||
595 | ||
596 | /* Function vect_mark_stmts_to_be_vectorized. | |
597 | ||
598 | Not all stmts in the loop need to be vectorized. For example: | |
599 | ||
600 | for i... | |
601 | for j... | |
602 | 1. T0 = i + j | |
603 | 2. T1 = a[T0] | |
604 | ||
605 | 3. j = j + 1 | |
606 | ||
607 | Stmt 1 and 3 do not need to be vectorized, because loop control and | |
608 | addressing of vectorized data-refs are handled differently. | |
609 | ||
610 | This pass detects such stmts. */ | |
611 | ||
612 | bool | |
613 | vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) | |
614 | { | |
ebfd146a IR |
615 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
616 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
617 | unsigned int nbbs = loop->num_nodes; | |
618 | gimple_stmt_iterator si; | |
ebfd146a | 619 | unsigned int i; |
ebfd146a | 620 | basic_block bb; |
ebfd146a | 621 | bool live_p; |
b28ead45 | 622 | enum vect_relevant relevant; |
ebfd146a | 623 | |
adac3a68 | 624 | DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized"); |
ebfd146a | 625 | |
eca52fdd | 626 | auto_vec<stmt_vec_info, 64> worklist; |
ebfd146a IR |
627 | |
628 | /* 1. Init worklist. */ | |
629 | for (i = 0; i < nbbs; i++) | |
630 | { | |
631 | bb = bbs[i]; | |
632 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
b8698a0f | 633 | { |
a1824cfd | 634 | stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
73fbfcad | 635 | if (dump_enabled_p ()) |
ebfd146a | 636 | { |
78c60e3d | 637 | dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? "); |
a1824cfd | 638 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0); |
ebfd146a IR |
639 | } |
640 | ||
a1824cfd RS |
641 | if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p)) |
642 | vect_mark_relevant (&worklist, phi_info, relevant, live_p); | |
ebfd146a IR |
643 | } |
644 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
645 | { | |
a1824cfd | 646 | stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
73fbfcad | 647 | if (dump_enabled_p ()) |
ebfd146a | 648 | { |
78c60e3d | 649 | dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? "); |
a1824cfd | 650 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
b8698a0f | 651 | } |
ebfd146a | 652 | |
a1824cfd RS |
653 | if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p)) |
654 | vect_mark_relevant (&worklist, stmt_info, relevant, live_p); | |
ebfd146a IR |
655 | } |
656 | } | |
657 | ||
658 | /* 2. Process_worklist */ | |
9771b263 | 659 | while (worklist.length () > 0) |
ebfd146a IR |
660 | { |
661 | use_operand_p use_p; | |
662 | ssa_op_iter iter; | |
663 | ||
eca52fdd | 664 | stmt_vec_info stmt_vinfo = worklist.pop (); |
73fbfcad | 665 | if (dump_enabled_p ()) |
ebfd146a | 666 | { |
eca52fdd RS |
667 | dump_printf_loc (MSG_NOTE, vect_location, |
668 | "worklist: examine stmt: "); | |
669 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0); | |
ebfd146a IR |
670 | } |
671 | ||
b8698a0f | 672 | /* Examine the USEs of STMT. For each USE, mark the stmt that defines it |
b28ead45 AH |
673 | (DEF_STMT) as relevant/irrelevant according to the relevance property |
674 | of STMT. */ | |
ebfd146a | 675 | relevant = STMT_VINFO_RELEVANT (stmt_vinfo); |
ebfd146a | 676 | |
b28ead45 AH |
677 | /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is |
678 | propagated as is to the DEF_STMTs of its USEs. | |
ebfd146a IR |
679 | |
680 | One exception is when STMT has been identified as defining a reduction | |
b28ead45 | 681 | variable; in this case we set the relevance to vect_used_by_reduction. |
ebfd146a | 682 | This is because we distinguish between two kinds of relevant stmts - |
b8698a0f | 683 | those that are used by a reduction computation, and those that are |
ff802fa1 | 684 | (also) used by a regular computation. This allows us later on to |
b8698a0f | 685 | identify stmts that are used solely by a reduction, and therefore the |
7c5222ff | 686 | order of the results that they produce does not have to be kept. */ |
ebfd146a | 687 | |
b28ead45 | 688 | switch (STMT_VINFO_DEF_TYPE (stmt_vinfo)) |
ebfd146a | 689 | { |
06066f92 | 690 | case vect_reduction_def: |
b28ead45 AH |
691 | gcc_assert (relevant != vect_unused_in_scope); |
692 | if (relevant != vect_unused_in_scope | |
693 | && relevant != vect_used_in_scope | |
694 | && relevant != vect_used_by_reduction | |
695 | && relevant != vect_used_only_live) | |
06066f92 | 696 | { |
b28ead45 AH |
697 | if (dump_enabled_p ()) |
698 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
699 | "unsupported use of reduction.\n"); | |
700 | return false; | |
06066f92 | 701 | } |
06066f92 | 702 | break; |
b8698a0f | 703 | |
06066f92 | 704 | case vect_nested_cycle: |
b28ead45 AH |
705 | if (relevant != vect_unused_in_scope |
706 | && relevant != vect_used_in_outer_by_reduction | |
707 | && relevant != vect_used_in_outer) | |
06066f92 | 708 | { |
73fbfcad | 709 | if (dump_enabled_p ()) |
78c60e3d | 710 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 711 | "unsupported use of nested cycle.\n"); |
7c5222ff | 712 | |
06066f92 IR |
713 | return false; |
714 | } | |
b8698a0f L |
715 | break; |
716 | ||
06066f92 | 717 | case vect_double_reduction_def: |
b28ead45 AH |
718 | if (relevant != vect_unused_in_scope |
719 | && relevant != vect_used_by_reduction | |
720 | && relevant != vect_used_only_live) | |
06066f92 | 721 | { |
73fbfcad | 722 | if (dump_enabled_p ()) |
78c60e3d | 723 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 724 | "unsupported use of double reduction.\n"); |
7c5222ff | 725 | |
7c5222ff | 726 | return false; |
06066f92 | 727 | } |
b8698a0f | 728 | break; |
7c5222ff | 729 | |
06066f92 IR |
730 | default: |
731 | break; | |
7c5222ff | 732 | } |
b8698a0f | 733 | |
aec7ae7d | 734 | if (is_pattern_stmt_p (stmt_vinfo)) |
9d5e7640 IR |
735 | { |
736 | /* Pattern statements are not inserted into the code, so | |
737 | FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we | |
738 | have to scan the RHS or function arguments instead. */ | |
86a91c0a | 739 | if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt)) |
beb456c3 RS |
740 | { |
741 | enum tree_code rhs_code = gimple_assign_rhs_code (assign); | |
742 | tree op = gimple_assign_rhs1 (assign); | |
69d2aade JJ |
743 | |
744 | i = 1; | |
745 | if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op)) | |
746 | { | |
86a91c0a RS |
747 | if (!process_use (stmt_vinfo, TREE_OPERAND (op, 0), |
748 | loop_vinfo, relevant, &worklist, false) | |
749 | || !process_use (stmt_vinfo, TREE_OPERAND (op, 1), | |
750 | loop_vinfo, relevant, &worklist, false)) | |
566d377a | 751 | return false; |
69d2aade JJ |
752 | i = 2; |
753 | } | |
beb456c3 RS |
754 | for (; i < gimple_num_ops (assign); i++) |
755 | { | |
756 | op = gimple_op (assign, i); | |
afbe6325 | 757 | if (TREE_CODE (op) == SSA_NAME |
86a91c0a | 758 | && !process_use (stmt_vinfo, op, loop_vinfo, relevant, |
afbe6325 | 759 | &worklist, false)) |
07687835 | 760 | return false; |
9d5e7640 IR |
761 | } |
762 | } | |
86a91c0a | 763 | else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt)) |
beb456c3 RS |
764 | { |
765 | for (i = 0; i < gimple_call_num_args (call); i++) | |
766 | { | |
767 | tree arg = gimple_call_arg (call, i); | |
86a91c0a | 768 | if (!process_use (stmt_vinfo, arg, loop_vinfo, relevant, |
aec7ae7d | 769 | &worklist, false)) |
07687835 | 770 | return false; |
beb456c3 RS |
771 | } |
772 | } | |
9d5e7640 IR |
773 | } |
774 | else | |
86a91c0a | 775 | FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE) |
9d5e7640 IR |
776 | { |
777 | tree op = USE_FROM_PTR (use_p); | |
86a91c0a | 778 | if (!process_use (stmt_vinfo, op, loop_vinfo, relevant, |
aec7ae7d | 779 | &worklist, false)) |
07687835 | 780 | return false; |
9d5e7640 | 781 | } |
aec7ae7d | 782 | |
3bab6342 | 783 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo)) |
aec7ae7d | 784 | { |
134c85ca | 785 | gather_scatter_info gs_info; |
86a91c0a | 786 | if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info)) |
134c85ca | 787 | gcc_unreachable (); |
86a91c0a | 788 | if (!process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant, |
134c85ca | 789 | &worklist, true)) |
566d377a | 790 | return false; |
aec7ae7d | 791 | } |
ebfd146a IR |
792 | } /* while worklist */ |
793 | ||
ebfd146a IR |
794 | return true; |
795 | } | |
796 | ||
68435eb2 RB |
797 | /* Compute the prologue cost for invariant or constant operands. */ |
798 | ||
799 | static unsigned | |
800 | vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info, | |
801 | unsigned opno, enum vect_def_type dt, | |
802 | stmt_vector_for_cost *cost_vec) | |
803 | { | |
b9787581 | 804 | gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt; |
68435eb2 RB |
805 | tree op = gimple_op (stmt, opno); |
806 | unsigned prologue_cost = 0; | |
807 | ||
808 | /* Without looking at the actual initializer a vector of | |
809 | constants can be implemented as load from the constant pool. | |
810 | When all elements are the same we can use a splat. */ | |
811 | tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op)); | |
812 | unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length (); | |
813 | unsigned num_vects_to_check; | |
814 | unsigned HOST_WIDE_INT const_nunits; | |
815 | unsigned nelt_limit; | |
816 | if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits) | |
817 | && ! multiple_p (const_nunits, group_size)) | |
818 | { | |
819 | num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node); | |
820 | nelt_limit = const_nunits; | |
821 | } | |
822 | else | |
823 | { | |
824 | /* If either the vector has variable length or the vectors | |
825 | are composed of repeated whole groups we only need to | |
826 | cost construction once. All vectors will be the same. */ | |
827 | num_vects_to_check = 1; | |
828 | nelt_limit = group_size; | |
829 | } | |
830 | tree elt = NULL_TREE; | |
831 | unsigned nelt = 0; | |
832 | for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j) | |
833 | { | |
834 | unsigned si = j % group_size; | |
835 | if (nelt == 0) | |
b9787581 | 836 | elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno); |
68435eb2 RB |
837 | /* ??? We're just tracking whether all operands of a single |
838 | vector initializer are the same, ideally we'd check if | |
839 | we emitted the same one already. */ | |
b9787581 | 840 | else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, |
68435eb2 RB |
841 | opno)) |
842 | elt = NULL_TREE; | |
843 | nelt++; | |
844 | if (nelt == nelt_limit) | |
845 | { | |
846 | /* ??? We need to pass down stmt_info for a vector type | |
847 | even if it points to the wrong stmt. */ | |
848 | prologue_cost += record_stmt_cost | |
849 | (cost_vec, 1, | |
850 | dt == vect_external_def | |
851 | ? (elt ? scalar_to_vec : vec_construct) | |
852 | : vector_load, | |
853 | stmt_info, 0, vect_prologue); | |
854 | nelt = 0; | |
855 | } | |
856 | } | |
857 | ||
858 | return prologue_cost; | |
859 | } | |
ebfd146a | 860 | |
b8698a0f | 861 | /* Function vect_model_simple_cost. |
ebfd146a | 862 | |
b8698a0f | 863 | Models cost for simple operations, i.e. those that only emit ncopies of a |
ebfd146a IR |
864 | single op. Right now, this does not account for multiple insns that could |
865 | be generated for the single vector op. We will handle that shortly. */ | |
866 | ||
68435eb2 | 867 | static void |
b8698a0f | 868 | vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, |
92345349 | 869 | enum vect_def_type *dt, |
4fc5ebf1 | 870 | int ndts, |
68435eb2 RB |
871 | slp_tree node, |
872 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 873 | { |
92345349 | 874 | int inside_cost = 0, prologue_cost = 0; |
ebfd146a | 875 | |
68435eb2 | 876 | gcc_assert (cost_vec != NULL); |
ebfd146a | 877 | |
68435eb2 RB |
878 | /* ??? Somehow we need to fix this at the callers. */ |
879 | if (node) | |
880 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node); | |
881 | ||
882 | if (node) | |
883 | { | |
884 | /* Scan operands and account for prologue cost of constants/externals. | |
885 | ??? This over-estimates cost for multiple uses and should be | |
886 | re-engineered. */ | |
b9787581 | 887 | gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt; |
68435eb2 RB |
888 | tree lhs = gimple_get_lhs (stmt); |
889 | for (unsigned i = 0; i < gimple_num_ops (stmt); ++i) | |
890 | { | |
891 | tree op = gimple_op (stmt, i); | |
68435eb2 RB |
892 | enum vect_def_type dt; |
893 | if (!op || op == lhs) | |
894 | continue; | |
894dd753 | 895 | if (vect_is_simple_use (op, stmt_info->vinfo, &dt) |
68435eb2 RB |
896 | && (dt == vect_constant_def || dt == vect_external_def)) |
897 | prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info, | |
898 | i, dt, cost_vec); | |
899 | } | |
900 | } | |
901 | else | |
902 | /* Cost the "broadcast" of a scalar operand in to a vector operand. | |
903 | Use scalar_to_vec to cost the broadcast, as elsewhere in the vector | |
904 | cost model. */ | |
905 | for (int i = 0; i < ndts; i++) | |
906 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) | |
907 | prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, | |
908 | stmt_info, 0, vect_prologue); | |
909 | ||
910 | /* Adjust for two-operator SLP nodes. */ | |
911 | if (node && SLP_TREE_TWO_OPERATORS (node)) | |
912 | { | |
913 | ncopies *= 2; | |
914 | inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm, | |
915 | stmt_info, 0, vect_body); | |
916 | } | |
c3e7ee41 BS |
917 | |
918 | /* Pass the inside-of-loop statements to the target-specific cost model. */ | |
68435eb2 RB |
919 | inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt, |
920 | stmt_info, 0, vect_body); | |
c3e7ee41 | 921 | |
73fbfcad | 922 | if (dump_enabled_p ()) |
78c60e3d SS |
923 | dump_printf_loc (MSG_NOTE, vect_location, |
924 | "vect_model_simple_cost: inside_cost = %d, " | |
e645e942 | 925 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
926 | } |
927 | ||
928 | ||
8bd37302 BS |
929 | /* Model cost for type demotion and promotion operations. PWR is normally |
930 | zero for single-step promotions and demotions. It will be one if | |
931 | two-step promotion/demotion is required, and so on. Each additional | |
932 | step doubles the number of instructions required. */ | |
933 | ||
934 | static void | |
935 | vect_model_promotion_demotion_cost (stmt_vec_info stmt_info, | |
68435eb2 RB |
936 | enum vect_def_type *dt, int pwr, |
937 | stmt_vector_for_cost *cost_vec) | |
8bd37302 BS |
938 | { |
939 | int i, tmp; | |
92345349 | 940 | int inside_cost = 0, prologue_cost = 0; |
c3e7ee41 | 941 | |
8bd37302 BS |
942 | for (i = 0; i < pwr + 1; i++) |
943 | { | |
944 | tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ? | |
945 | (i + 1) : i; | |
68435eb2 RB |
946 | inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp), |
947 | vec_promote_demote, stmt_info, 0, | |
948 | vect_body); | |
8bd37302 BS |
949 | } |
950 | ||
951 | /* FORNOW: Assuming maximum 2 args per stmts. */ | |
952 | for (i = 0; i < 2; i++) | |
92345349 | 953 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) |
68435eb2 RB |
954 | prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt, |
955 | stmt_info, 0, vect_prologue); | |
8bd37302 | 956 | |
73fbfcad | 957 | if (dump_enabled_p ()) |
78c60e3d SS |
958 | dump_printf_loc (MSG_NOTE, vect_location, |
959 | "vect_model_promotion_demotion_cost: inside_cost = %d, " | |
e645e942 | 960 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
8bd37302 BS |
961 | } |
962 | ||
ebfd146a IR |
963 | /* Function vect_model_store_cost |
964 | ||
0d0293ac MM |
965 | Models cost for stores. In the case of grouped accesses, one access |
966 | has the overhead of the grouped access attributed to it. */ | |
ebfd146a | 967 | |
68435eb2 | 968 | static void |
b8698a0f | 969 | vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, |
68435eb2 | 970 | enum vect_def_type dt, |
2de001ee | 971 | vect_memory_access_type memory_access_type, |
9ce4345a | 972 | vec_load_store_type vls_type, slp_tree slp_node, |
68435eb2 | 973 | stmt_vector_for_cost *cost_vec) |
ebfd146a | 974 | { |
92345349 | 975 | unsigned int inside_cost = 0, prologue_cost = 0; |
bffb8014 | 976 | stmt_vec_info first_stmt_info = stmt_info; |
892a981f | 977 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
ebfd146a | 978 | |
68435eb2 RB |
979 | /* ??? Somehow we need to fix this at the callers. */ |
980 | if (slp_node) | |
981 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
982 | ||
9ce4345a | 983 | if (vls_type == VLS_STORE_INVARIANT) |
68435eb2 RB |
984 | { |
985 | if (slp_node) | |
986 | prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info, | |
987 | 1, dt, cost_vec); | |
988 | else | |
989 | prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, | |
990 | stmt_info, 0, vect_prologue); | |
991 | } | |
ebfd146a | 992 | |
892a981f RS |
993 | /* Grouped stores update all elements in the group at once, |
994 | so we want the DR for the first statement. */ | |
995 | if (!slp_node && grouped_access_p) | |
bffb8014 | 996 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
ebfd146a | 997 | |
892a981f RS |
998 | /* True if we should include any once-per-group costs as well as |
999 | the cost of the statement itself. For SLP we only get called | |
1000 | once per group anyhow. */ | |
bffb8014 | 1001 | bool first_stmt_p = (first_stmt_info == stmt_info); |
892a981f | 1002 | |
272c6793 | 1003 | /* We assume that the cost of a single store-lanes instruction is |
2c53b149 | 1004 | equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped |
272c6793 | 1005 | access is instead being provided by a permute-and-store operation, |
2de001ee RS |
1006 | include the cost of the permutes. */ |
1007 | if (first_stmt_p | |
1008 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 1009 | { |
e1377713 ES |
1010 | /* Uses a high and low interleave or shuffle operations for each |
1011 | needed permute. */ | |
bffb8014 | 1012 | int group_size = DR_GROUP_SIZE (first_stmt_info); |
e1377713 | 1013 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
68435eb2 | 1014 | inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm, |
92345349 | 1015 | stmt_info, 0, vect_body); |
ebfd146a | 1016 | |
73fbfcad | 1017 | if (dump_enabled_p ()) |
78c60e3d | 1018 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1019 | "vect_model_store_cost: strided group_size = %d .\n", |
78c60e3d | 1020 | group_size); |
ebfd146a IR |
1021 | } |
1022 | ||
cee62fee | 1023 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
ebfd146a | 1024 | /* Costs of the stores. */ |
067bc855 RB |
1025 | if (memory_access_type == VMAT_ELEMENTWISE |
1026 | || memory_access_type == VMAT_GATHER_SCATTER) | |
c5126ce8 RS |
1027 | { |
1028 | /* N scalar stores plus extracting the elements. */ | |
1029 | unsigned int assumed_nunits = vect_nunits_for_cost (vectype); | |
68435eb2 | 1030 | inside_cost += record_stmt_cost (cost_vec, |
c5126ce8 RS |
1031 | ncopies * assumed_nunits, |
1032 | scalar_store, stmt_info, 0, vect_body); | |
1033 | } | |
f2e2a985 | 1034 | else |
57c454d2 | 1035 | vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec); |
ebfd146a | 1036 | |
2de001ee RS |
1037 | if (memory_access_type == VMAT_ELEMENTWISE |
1038 | || memory_access_type == VMAT_STRIDED_SLP) | |
c5126ce8 RS |
1039 | { |
1040 | /* N scalar stores plus extracting the elements. */ | |
1041 | unsigned int assumed_nunits = vect_nunits_for_cost (vectype); | |
68435eb2 | 1042 | inside_cost += record_stmt_cost (cost_vec, |
c5126ce8 RS |
1043 | ncopies * assumed_nunits, |
1044 | vec_to_scalar, stmt_info, 0, vect_body); | |
1045 | } | |
cee62fee | 1046 | |
73fbfcad | 1047 | if (dump_enabled_p ()) |
78c60e3d SS |
1048 | dump_printf_loc (MSG_NOTE, vect_location, |
1049 | "vect_model_store_cost: inside_cost = %d, " | |
e645e942 | 1050 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
1051 | } |
1052 | ||
1053 | ||
720f5239 IR |
1054 | /* Calculate cost of DR's memory access. */ |
1055 | void | |
57c454d2 | 1056 | vect_get_store_cost (stmt_vec_info stmt_info, int ncopies, |
c3e7ee41 | 1057 | unsigned int *inside_cost, |
92345349 | 1058 | stmt_vector_for_cost *body_cost_vec) |
720f5239 | 1059 | { |
89fa689a RS |
1060 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
1061 | int alignment_support_scheme | |
1062 | = vect_supportable_dr_alignment (dr_info, false); | |
720f5239 IR |
1063 | |
1064 | switch (alignment_support_scheme) | |
1065 | { | |
1066 | case dr_aligned: | |
1067 | { | |
92345349 BS |
1068 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
1069 | vector_store, stmt_info, 0, | |
1070 | vect_body); | |
720f5239 | 1071 | |
73fbfcad | 1072 | if (dump_enabled_p ()) |
78c60e3d | 1073 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1074 | "vect_model_store_cost: aligned.\n"); |
720f5239 IR |
1075 | break; |
1076 | } | |
1077 | ||
1078 | case dr_unaligned_supported: | |
1079 | { | |
720f5239 | 1080 | /* Here, we assign an additional cost for the unaligned store. */ |
92345349 | 1081 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1082 | unaligned_store, stmt_info, |
89fa689a RS |
1083 | DR_MISALIGNMENT (dr_info), |
1084 | vect_body); | |
73fbfcad | 1085 | if (dump_enabled_p ()) |
78c60e3d SS |
1086 | dump_printf_loc (MSG_NOTE, vect_location, |
1087 | "vect_model_store_cost: unaligned supported by " | |
e645e942 | 1088 | "hardware.\n"); |
720f5239 IR |
1089 | break; |
1090 | } | |
1091 | ||
38eec4c6 UW |
1092 | case dr_unaligned_unsupported: |
1093 | { | |
1094 | *inside_cost = VECT_MAX_COST; | |
1095 | ||
73fbfcad | 1096 | if (dump_enabled_p ()) |
78c60e3d | 1097 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1098 | "vect_model_store_cost: unsupported access.\n"); |
38eec4c6 UW |
1099 | break; |
1100 | } | |
1101 | ||
720f5239 IR |
1102 | default: |
1103 | gcc_unreachable (); | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | ||
ebfd146a IR |
1108 | /* Function vect_model_load_cost |
1109 | ||
892a981f RS |
1110 | Models cost for loads. In the case of grouped accesses, one access has |
1111 | the overhead of the grouped access attributed to it. Since unaligned | |
b8698a0f | 1112 | accesses are supported for loads, we also account for the costs of the |
ebfd146a IR |
1113 | access scheme chosen. */ |
1114 | ||
68435eb2 RB |
1115 | static void |
1116 | vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, | |
2de001ee | 1117 | vect_memory_access_type memory_access_type, |
68435eb2 | 1118 | slp_instance instance, |
2de001ee | 1119 | slp_tree slp_node, |
68435eb2 | 1120 | stmt_vector_for_cost *cost_vec) |
ebfd146a | 1121 | { |
92345349 | 1122 | unsigned int inside_cost = 0, prologue_cost = 0; |
892a981f | 1123 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
ebfd146a | 1124 | |
68435eb2 RB |
1125 | gcc_assert (cost_vec); |
1126 | ||
1127 | /* ??? Somehow we need to fix this at the callers. */ | |
1128 | if (slp_node) | |
1129 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
1130 | ||
1131 | if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) | |
1132 | { | |
1133 | /* If the load is permuted then the alignment is determined by | |
1134 | the first group element not by the first scalar stmt DR. */ | |
bffb8014 | 1135 | stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
68435eb2 RB |
1136 | /* Record the cost for the permutation. */ |
1137 | unsigned n_perms; | |
1138 | unsigned assumed_nunits | |
bffb8014 | 1139 | = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info)); |
68435eb2 RB |
1140 | unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size; |
1141 | vect_transform_slp_perm_load (slp_node, vNULL, NULL, | |
1142 | slp_vf, instance, true, | |
1143 | &n_perms); | |
1144 | inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm, | |
bffb8014 | 1145 | first_stmt_info, 0, vect_body); |
68435eb2 RB |
1146 | /* And adjust the number of loads performed. This handles |
1147 | redundancies as well as loads that are later dead. */ | |
bffb8014 | 1148 | auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info)); |
68435eb2 RB |
1149 | bitmap_clear (perm); |
1150 | for (unsigned i = 0; | |
1151 | i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i) | |
1152 | bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]); | |
1153 | ncopies = 0; | |
1154 | bool load_seen = false; | |
bffb8014 | 1155 | for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i) |
68435eb2 RB |
1156 | { |
1157 | if (i % assumed_nunits == 0) | |
1158 | { | |
1159 | if (load_seen) | |
1160 | ncopies++; | |
1161 | load_seen = false; | |
1162 | } | |
1163 | if (bitmap_bit_p (perm, i)) | |
1164 | load_seen = true; | |
1165 | } | |
1166 | if (load_seen) | |
1167 | ncopies++; | |
1168 | gcc_assert (ncopies | |
bffb8014 RS |
1169 | <= (DR_GROUP_SIZE (first_stmt_info) |
1170 | - DR_GROUP_GAP (first_stmt_info) | |
68435eb2 RB |
1171 | + assumed_nunits - 1) / assumed_nunits); |
1172 | } | |
1173 | ||
892a981f RS |
1174 | /* Grouped loads read all elements in the group at once, |
1175 | so we want the DR for the first statement. */ | |
bffb8014 | 1176 | stmt_vec_info first_stmt_info = stmt_info; |
892a981f | 1177 | if (!slp_node && grouped_access_p) |
bffb8014 | 1178 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
ebfd146a | 1179 | |
892a981f RS |
1180 | /* True if we should include any once-per-group costs as well as |
1181 | the cost of the statement itself. For SLP we only get called | |
1182 | once per group anyhow. */ | |
bffb8014 | 1183 | bool first_stmt_p = (first_stmt_info == stmt_info); |
892a981f | 1184 | |
272c6793 | 1185 | /* We assume that the cost of a single load-lanes instruction is |
2c53b149 | 1186 | equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped |
272c6793 | 1187 | access is instead being provided by a load-and-permute operation, |
2de001ee RS |
1188 | include the cost of the permutes. */ |
1189 | if (first_stmt_p | |
1190 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 1191 | { |
2c23db6d ES |
1192 | /* Uses an even and odd extract operations or shuffle operations |
1193 | for each needed permute. */ | |
bffb8014 | 1194 | int group_size = DR_GROUP_SIZE (first_stmt_info); |
2c23db6d | 1195 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
68435eb2 RB |
1196 | inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm, |
1197 | stmt_info, 0, vect_body); | |
ebfd146a | 1198 | |
73fbfcad | 1199 | if (dump_enabled_p ()) |
e645e942 TJ |
1200 | dump_printf_loc (MSG_NOTE, vect_location, |
1201 | "vect_model_load_cost: strided group_size = %d .\n", | |
78c60e3d | 1202 | group_size); |
ebfd146a IR |
1203 | } |
1204 | ||
1205 | /* The loads themselves. */ | |
067bc855 RB |
1206 | if (memory_access_type == VMAT_ELEMENTWISE |
1207 | || memory_access_type == VMAT_GATHER_SCATTER) | |
a82960aa | 1208 | { |
a21892ad BS |
1209 | /* N scalar loads plus gathering them into a vector. */ |
1210 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
c5126ce8 | 1211 | unsigned int assumed_nunits = vect_nunits_for_cost (vectype); |
68435eb2 | 1212 | inside_cost += record_stmt_cost (cost_vec, |
c5126ce8 | 1213 | ncopies * assumed_nunits, |
92345349 | 1214 | scalar_load, stmt_info, 0, vect_body); |
a82960aa RG |
1215 | } |
1216 | else | |
57c454d2 | 1217 | vect_get_load_cost (stmt_info, ncopies, first_stmt_p, |
92345349 | 1218 | &inside_cost, &prologue_cost, |
68435eb2 | 1219 | cost_vec, cost_vec, true); |
2de001ee RS |
1220 | if (memory_access_type == VMAT_ELEMENTWISE |
1221 | || memory_access_type == VMAT_STRIDED_SLP) | |
68435eb2 | 1222 | inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct, |
892a981f | 1223 | stmt_info, 0, vect_body); |
720f5239 | 1224 | |
73fbfcad | 1225 | if (dump_enabled_p ()) |
78c60e3d SS |
1226 | dump_printf_loc (MSG_NOTE, vect_location, |
1227 | "vect_model_load_cost: inside_cost = %d, " | |
e645e942 | 1228 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
720f5239 IR |
1229 | } |
1230 | ||
1231 | ||
1232 | /* Calculate cost of DR's memory access. */ | |
1233 | void | |
57c454d2 | 1234 | vect_get_load_cost (stmt_vec_info stmt_info, int ncopies, |
c3e7ee41 | 1235 | bool add_realign_cost, unsigned int *inside_cost, |
92345349 BS |
1236 | unsigned int *prologue_cost, |
1237 | stmt_vector_for_cost *prologue_cost_vec, | |
1238 | stmt_vector_for_cost *body_cost_vec, | |
1239 | bool record_prologue_costs) | |
720f5239 | 1240 | { |
89fa689a RS |
1241 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
1242 | int alignment_support_scheme | |
1243 | = vect_supportable_dr_alignment (dr_info, false); | |
720f5239 IR |
1244 | |
1245 | switch (alignment_support_scheme) | |
ebfd146a IR |
1246 | { |
1247 | case dr_aligned: | |
1248 | { | |
92345349 BS |
1249 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1250 | stmt_info, 0, vect_body); | |
ebfd146a | 1251 | |
73fbfcad | 1252 | if (dump_enabled_p ()) |
78c60e3d | 1253 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1254 | "vect_model_load_cost: aligned.\n"); |
ebfd146a IR |
1255 | |
1256 | break; | |
1257 | } | |
1258 | case dr_unaligned_supported: | |
1259 | { | |
720f5239 | 1260 | /* Here, we assign an additional cost for the unaligned load. */ |
92345349 | 1261 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1262 | unaligned_load, stmt_info, |
89fa689a RS |
1263 | DR_MISALIGNMENT (dr_info), |
1264 | vect_body); | |
c3e7ee41 | 1265 | |
73fbfcad | 1266 | if (dump_enabled_p ()) |
78c60e3d SS |
1267 | dump_printf_loc (MSG_NOTE, vect_location, |
1268 | "vect_model_load_cost: unaligned supported by " | |
e645e942 | 1269 | "hardware.\n"); |
ebfd146a IR |
1270 | |
1271 | break; | |
1272 | } | |
1273 | case dr_explicit_realign: | |
1274 | { | |
92345349 BS |
1275 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2, |
1276 | vector_load, stmt_info, 0, vect_body); | |
1277 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, | |
1278 | vec_perm, stmt_info, 0, vect_body); | |
ebfd146a IR |
1279 | |
1280 | /* FIXME: If the misalignment remains fixed across the iterations of | |
1281 | the containing loop, the following cost should be added to the | |
92345349 | 1282 | prologue costs. */ |
ebfd146a | 1283 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1284 | *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt, |
1285 | stmt_info, 0, vect_body); | |
ebfd146a | 1286 | |
73fbfcad | 1287 | if (dump_enabled_p ()) |
e645e942 TJ |
1288 | dump_printf_loc (MSG_NOTE, vect_location, |
1289 | "vect_model_load_cost: explicit realign\n"); | |
8bd37302 | 1290 | |
ebfd146a IR |
1291 | break; |
1292 | } | |
1293 | case dr_explicit_realign_optimized: | |
1294 | { | |
73fbfcad | 1295 | if (dump_enabled_p ()) |
e645e942 | 1296 | dump_printf_loc (MSG_NOTE, vect_location, |
78c60e3d | 1297 | "vect_model_load_cost: unaligned software " |
e645e942 | 1298 | "pipelined.\n"); |
ebfd146a IR |
1299 | |
1300 | /* Unaligned software pipeline has a load of an address, an initial | |
ff802fa1 | 1301 | load, and possibly a mask operation to "prime" the loop. However, |
0d0293ac | 1302 | if this is an access in a group of loads, which provide grouped |
ebfd146a | 1303 | access, then the above cost should only be considered for one |
ff802fa1 | 1304 | access in the group. Inside the loop, there is a load op |
ebfd146a IR |
1305 | and a realignment op. */ |
1306 | ||
92345349 | 1307 | if (add_realign_cost && record_prologue_costs) |
ebfd146a | 1308 | { |
92345349 BS |
1309 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 2, |
1310 | vector_stmt, stmt_info, | |
1311 | 0, vect_prologue); | |
ebfd146a | 1312 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1313 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 1, |
1314 | vector_stmt, stmt_info, | |
1315 | 0, vect_prologue); | |
ebfd146a IR |
1316 | } |
1317 | ||
92345349 BS |
1318 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1319 | stmt_info, 0, vect_body); | |
1320 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm, | |
1321 | stmt_info, 0, vect_body); | |
8bd37302 | 1322 | |
73fbfcad | 1323 | if (dump_enabled_p ()) |
78c60e3d | 1324 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 TJ |
1325 | "vect_model_load_cost: explicit realign optimized" |
1326 | "\n"); | |
8bd37302 | 1327 | |
ebfd146a IR |
1328 | break; |
1329 | } | |
1330 | ||
38eec4c6 UW |
1331 | case dr_unaligned_unsupported: |
1332 | { | |
1333 | *inside_cost = VECT_MAX_COST; | |
1334 | ||
73fbfcad | 1335 | if (dump_enabled_p ()) |
78c60e3d | 1336 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1337 | "vect_model_load_cost: unsupported access.\n"); |
38eec4c6 UW |
1338 | break; |
1339 | } | |
1340 | ||
ebfd146a IR |
1341 | default: |
1342 | gcc_unreachable (); | |
1343 | } | |
ebfd146a IR |
1344 | } |
1345 | ||
418b7df3 | 1346 | /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in |
32e8e429 | 1347 | the loop preheader for the vectorized stmt STMT_VINFO. */ |
ebfd146a | 1348 | |
418b7df3 | 1349 | static void |
32e8e429 RS |
1350 | vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, |
1351 | gimple_stmt_iterator *gsi) | |
ebfd146a | 1352 | { |
ebfd146a | 1353 | if (gsi) |
a1824cfd | 1354 | vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi); |
ebfd146a IR |
1355 | else |
1356 | { | |
1357 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); | |
b8698a0f | 1358 | |
a70d6342 IR |
1359 | if (loop_vinfo) |
1360 | { | |
1361 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
418b7df3 RG |
1362 | basic_block new_bb; |
1363 | edge pe; | |
a70d6342 | 1364 | |
86a91c0a RS |
1365 | if (nested_in_vect_loop_p (loop, stmt_vinfo)) |
1366 | loop = loop->inner; | |
b8698a0f | 1367 | |
a70d6342 | 1368 | pe = loop_preheader_edge (loop); |
418b7df3 | 1369 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); |
a70d6342 IR |
1370 | gcc_assert (!new_bb); |
1371 | } | |
1372 | else | |
1373 | { | |
1374 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo); | |
1375 | basic_block bb; | |
1376 | gimple_stmt_iterator gsi_bb_start; | |
1377 | ||
1378 | gcc_assert (bb_vinfo); | |
1379 | bb = BB_VINFO_BB (bb_vinfo); | |
12aaf609 | 1380 | gsi_bb_start = gsi_after_labels (bb); |
418b7df3 | 1381 | gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT); |
a70d6342 | 1382 | } |
ebfd146a IR |
1383 | } |
1384 | ||
73fbfcad | 1385 | if (dump_enabled_p ()) |
ebfd146a | 1386 | { |
78c60e3d SS |
1387 | dump_printf_loc (MSG_NOTE, vect_location, |
1388 | "created new init_stmt: "); | |
1389 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); | |
ebfd146a | 1390 | } |
418b7df3 RG |
1391 | } |
1392 | ||
1393 | /* Function vect_init_vector. | |
ebfd146a | 1394 | |
5467ee52 RG |
1395 | Insert a new stmt (INIT_STMT) that initializes a new variable of type |
1396 | TYPE with the value VAL. If TYPE is a vector type and VAL does not have | |
1397 | vector type a vector with all elements equal to VAL is created first. | |
1398 | Place the initialization at BSI if it is not NULL. Otherwise, place the | |
1399 | initialization at the loop preheader. | |
418b7df3 | 1400 | Return the DEF of INIT_STMT. |
32e8e429 | 1401 | It will be used in the vectorization of STMT_INFO. */ |
418b7df3 RG |
1402 | |
1403 | tree | |
32e8e429 RS |
1404 | vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, |
1405 | gimple_stmt_iterator *gsi) | |
418b7df3 | 1406 | { |
355fe088 | 1407 | gimple *init_stmt; |
418b7df3 RG |
1408 | tree new_temp; |
1409 | ||
e412ece4 RB |
1410 | /* We abuse this function to push sth to a SSA name with initial 'val'. */ |
1411 | if (! useless_type_conversion_p (type, TREE_TYPE (val))) | |
418b7df3 | 1412 | { |
e412ece4 RB |
1413 | gcc_assert (TREE_CODE (type) == VECTOR_TYPE); |
1414 | if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val))) | |
418b7df3 | 1415 | { |
5a308cf1 IE |
1416 | /* Scalar boolean value should be transformed into |
1417 | all zeros or all ones value before building a vector. */ | |
1418 | if (VECTOR_BOOLEAN_TYPE_P (type)) | |
1419 | { | |
b3d51f23 IE |
1420 | tree true_val = build_all_ones_cst (TREE_TYPE (type)); |
1421 | tree false_val = build_zero_cst (TREE_TYPE (type)); | |
5a308cf1 IE |
1422 | |
1423 | if (CONSTANT_CLASS_P (val)) | |
1424 | val = integer_zerop (val) ? false_val : true_val; | |
1425 | else | |
1426 | { | |
1427 | new_temp = make_ssa_name (TREE_TYPE (type)); | |
1428 | init_stmt = gimple_build_assign (new_temp, COND_EXPR, | |
1429 | val, true_val, false_val); | |
a1824cfd | 1430 | vect_init_vector_1 (stmt_info, init_stmt, gsi); |
5a308cf1 IE |
1431 | val = new_temp; |
1432 | } | |
1433 | } | |
1434 | else if (CONSTANT_CLASS_P (val)) | |
42fd8198 | 1435 | val = fold_convert (TREE_TYPE (type), val); |
418b7df3 RG |
1436 | else |
1437 | { | |
b731b390 | 1438 | new_temp = make_ssa_name (TREE_TYPE (type)); |
e412ece4 RB |
1439 | if (! INTEGRAL_TYPE_P (TREE_TYPE (val))) |
1440 | init_stmt = gimple_build_assign (new_temp, | |
1441 | fold_build1 (VIEW_CONVERT_EXPR, | |
1442 | TREE_TYPE (type), | |
1443 | val)); | |
1444 | else | |
1445 | init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val); | |
a1824cfd | 1446 | vect_init_vector_1 (stmt_info, init_stmt, gsi); |
5467ee52 | 1447 | val = new_temp; |
418b7df3 RG |
1448 | } |
1449 | } | |
5467ee52 | 1450 | val = build_vector_from_val (type, val); |
418b7df3 RG |
1451 | } |
1452 | ||
0e22bb5a RB |
1453 | new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_"); |
1454 | init_stmt = gimple_build_assign (new_temp, val); | |
a1824cfd | 1455 | vect_init_vector_1 (stmt_info, init_stmt, gsi); |
0e22bb5a | 1456 | return new_temp; |
ebfd146a IR |
1457 | } |
1458 | ||
c83a894c | 1459 | /* Function vect_get_vec_def_for_operand_1. |
a70d6342 | 1460 | |
32e8e429 RS |
1461 | For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def |
1462 | with type DT that will be used in the vectorized stmt. */ | |
ebfd146a IR |
1463 | |
1464 | tree | |
32e8e429 RS |
1465 | vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info, |
1466 | enum vect_def_type dt) | |
ebfd146a IR |
1467 | { |
1468 | tree vec_oprnd; | |
1eede195 | 1469 | stmt_vec_info vec_stmt_info; |
ebfd146a IR |
1470 | |
1471 | switch (dt) | |
1472 | { | |
81c40241 | 1473 | /* operand is a constant or a loop invariant. */ |
ebfd146a | 1474 | case vect_constant_def: |
81c40241 | 1475 | case vect_external_def: |
c83a894c AH |
1476 | /* Code should use vect_get_vec_def_for_operand. */ |
1477 | gcc_unreachable (); | |
ebfd146a | 1478 | |
81c40241 | 1479 | /* operand is defined inside the loop. */ |
8644a673 | 1480 | case vect_internal_def: |
ebfd146a | 1481 | { |
ebfd146a | 1482 | /* Get the def from the vectorized stmt. */ |
1eede195 RS |
1483 | vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info); |
1484 | /* Get vectorized pattern statement. */ | |
1485 | if (!vec_stmt_info | |
1486 | && STMT_VINFO_IN_PATTERN_P (def_stmt_info) | |
1487 | && !STMT_VINFO_RELEVANT (def_stmt_info)) | |
1488 | vec_stmt_info = (STMT_VINFO_VEC_STMT | |
1489 | (STMT_VINFO_RELATED_STMT (def_stmt_info))); | |
1490 | gcc_assert (vec_stmt_info); | |
1491 | if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt)) | |
1492 | vec_oprnd = PHI_RESULT (phi); | |
ebfd146a | 1493 | else |
1eede195 RS |
1494 | vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt); |
1495 | return vec_oprnd; | |
ebfd146a IR |
1496 | } |
1497 | ||
c78e3652 | 1498 | /* operand is defined by a loop header phi. */ |
ebfd146a | 1499 | case vect_reduction_def: |
06066f92 | 1500 | case vect_double_reduction_def: |
7c5222ff | 1501 | case vect_nested_cycle: |
ebfd146a IR |
1502 | case vect_induction_def: |
1503 | { | |
32e8e429 | 1504 | gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI); |
ebfd146a | 1505 | |
1eede195 | 1506 | /* Get the def from the vectorized stmt. */ |
1eede195 RS |
1507 | vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info); |
1508 | if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt)) | |
1509 | vec_oprnd = PHI_RESULT (phi); | |
6dbbece6 | 1510 | else |
1eede195 RS |
1511 | vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt); |
1512 | return vec_oprnd; | |
ebfd146a IR |
1513 | } |
1514 | ||
1515 | default: | |
1516 | gcc_unreachable (); | |
1517 | } | |
1518 | } | |
1519 | ||
1520 | ||
c83a894c AH |
1521 | /* Function vect_get_vec_def_for_operand. |
1522 | ||
32e8e429 RS |
1523 | OP is an operand in STMT_VINFO. This function returns a (vector) def |
1524 | that will be used in the vectorized stmt for STMT_VINFO. | |
c83a894c AH |
1525 | |
1526 | In the case that OP is an SSA_NAME which is defined in the loop, then | |
1527 | STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def. | |
1528 | ||
1529 | In case OP is an invariant or constant, a new stmt that creates a vector def | |
1530 | needs to be introduced. VECTYPE may be used to specify a required type for | |
1531 | vector invariant. */ | |
1532 | ||
1533 | tree | |
32e8e429 | 1534 | vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype) |
c83a894c AH |
1535 | { |
1536 | gimple *def_stmt; | |
1537 | enum vect_def_type dt; | |
1538 | bool is_simple_use; | |
c83a894c AH |
1539 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); |
1540 | ||
1541 | if (dump_enabled_p ()) | |
1542 | { | |
1543 | dump_printf_loc (MSG_NOTE, vect_location, | |
1544 | "vect_get_vec_def_for_operand: "); | |
1545 | dump_generic_expr (MSG_NOTE, TDF_SLIM, op); | |
1546 | dump_printf (MSG_NOTE, "\n"); | |
1547 | } | |
1548 | ||
fef96d8e RS |
1549 | stmt_vec_info def_stmt_info; |
1550 | is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt, | |
1551 | &def_stmt_info, &def_stmt); | |
c83a894c AH |
1552 | gcc_assert (is_simple_use); |
1553 | if (def_stmt && dump_enabled_p ()) | |
1554 | { | |
1555 | dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = "); | |
1556 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); | |
1557 | } | |
1558 | ||
1559 | if (dt == vect_constant_def || dt == vect_external_def) | |
1560 | { | |
1561 | tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo); | |
1562 | tree vector_type; | |
1563 | ||
1564 | if (vectype) | |
1565 | vector_type = vectype; | |
2568d8a1 | 1566 | else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op)) |
c83a894c AH |
1567 | && VECTOR_BOOLEAN_TYPE_P (stmt_vectype)) |
1568 | vector_type = build_same_sized_truth_vector_type (stmt_vectype); | |
1569 | else | |
1570 | vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); | |
1571 | ||
1572 | gcc_assert (vector_type); | |
86a91c0a | 1573 | return vect_init_vector (stmt_vinfo, op, vector_type, NULL); |
c83a894c AH |
1574 | } |
1575 | else | |
fef96d8e | 1576 | return vect_get_vec_def_for_operand_1 (def_stmt_info, dt); |
c83a894c AH |
1577 | } |
1578 | ||
1579 | ||
ebfd146a IR |
1580 | /* Function vect_get_vec_def_for_stmt_copy |
1581 | ||
ff802fa1 | 1582 | Return a vector-def for an operand. This function is used when the |
b8698a0f L |
1583 | vectorized stmt to be created (by the caller to this function) is a "copy" |
1584 | created in case the vectorized result cannot fit in one vector, and several | |
ff802fa1 | 1585 | copies of the vector-stmt are required. In this case the vector-def is |
ebfd146a | 1586 | retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field |
e4057a39 | 1587 | of the stmt that defines VEC_OPRND. VINFO describes the vectorization. |
ebfd146a IR |
1588 | |
1589 | Context: | |
1590 | In case the vectorization factor (VF) is bigger than the number | |
1591 | of elements that can fit in a vectype (nunits), we have to generate | |
ff802fa1 | 1592 | more than one vector stmt to vectorize the scalar stmt. This situation |
b8698a0f | 1593 | arises when there are multiple data-types operated upon in the loop; the |
ebfd146a IR |
1594 | smallest data-type determines the VF, and as a result, when vectorizing |
1595 | stmts operating on wider types we need to create 'VF/nunits' "copies" of the | |
1596 | vector stmt (each computing a vector of 'nunits' results, and together | |
b8698a0f | 1597 | computing 'VF' results in each iteration). This function is called when |
ebfd146a IR |
1598 | vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in |
1599 | which VF=16 and nunits=4, so the number of copies required is 4): | |
1600 | ||
1601 | scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT | |
b8698a0f | 1602 | |
ebfd146a IR |
1603 | S1: x = load VS1.0: vx.0 = memref0 VS1.1 |
1604 | VS1.1: vx.1 = memref1 VS1.2 | |
1605 | VS1.2: vx.2 = memref2 VS1.3 | |
b8698a0f | 1606 | VS1.3: vx.3 = memref3 |
ebfd146a IR |
1607 | |
1608 | S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1 | |
1609 | VSnew.1: vz1 = vx.1 + ... VSnew.2 | |
1610 | VSnew.2: vz2 = vx.2 + ... VSnew.3 | |
1611 | VSnew.3: vz3 = vx.3 + ... | |
1612 | ||
1613 | The vectorization of S1 is explained in vectorizable_load. | |
1614 | The vectorization of S2: | |
b8698a0f L |
1615 | To create the first vector-stmt out of the 4 copies - VSnew.0 - |
1616 | the function 'vect_get_vec_def_for_operand' is called to | |
ff802fa1 | 1617 | get the relevant vector-def for each operand of S2. For operand x it |
ebfd146a IR |
1618 | returns the vector-def 'vx.0'. |
1619 | ||
b8698a0f L |
1620 | To create the remaining copies of the vector-stmt (VSnew.j), this |
1621 | function is called to get the relevant vector-def for each operand. It is | |
1622 | obtained from the respective VS1.j stmt, which is recorded in the | |
ebfd146a IR |
1623 | STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND. |
1624 | ||
b8698a0f L |
1625 | For example, to obtain the vector-def 'vx.1' in order to create the |
1626 | vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'. | |
1627 | Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the | |
ebfd146a IR |
1628 | STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1', |
1629 | and return its def ('vx.1'). | |
1630 | Overall, to create the above sequence this function will be called 3 times: | |
e4057a39 RS |
1631 | vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0); |
1632 | vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1); | |
1633 | vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */ | |
ebfd146a IR |
1634 | |
1635 | tree | |
e4057a39 | 1636 | vect_get_vec_def_for_stmt_copy (vec_info *vinfo, tree vec_oprnd) |
ebfd146a | 1637 | { |
e4057a39 RS |
1638 | stmt_vec_info def_stmt_info = vinfo->lookup_def (vec_oprnd); |
1639 | if (!def_stmt_info) | |
1640 | /* Do nothing; can reuse same def. */ | |
ebfd146a IR |
1641 | return vec_oprnd; |
1642 | ||
e4057a39 | 1643 | def_stmt_info = STMT_VINFO_RELATED_STMT (def_stmt_info); |
ebfd146a | 1644 | gcc_assert (def_stmt_info); |
e4057a39 RS |
1645 | if (gphi *phi = dyn_cast <gphi *> (def_stmt_info->stmt)) |
1646 | vec_oprnd = PHI_RESULT (phi); | |
ebfd146a | 1647 | else |
e4057a39 | 1648 | vec_oprnd = gimple_get_lhs (def_stmt_info->stmt); |
ebfd146a IR |
1649 | return vec_oprnd; |
1650 | } | |
1651 | ||
1652 | ||
1653 | /* Get vectorized definitions for the operands to create a copy of an original | |
ff802fa1 | 1654 | stmt. See vect_get_vec_def_for_stmt_copy () for details. */ |
ebfd146a | 1655 | |
c78e3652 | 1656 | void |
e4057a39 | 1657 | vect_get_vec_defs_for_stmt_copy (vec_info *vinfo, |
9771b263 DN |
1658 | vec<tree> *vec_oprnds0, |
1659 | vec<tree> *vec_oprnds1) | |
ebfd146a | 1660 | { |
9771b263 | 1661 | tree vec_oprnd = vec_oprnds0->pop (); |
ebfd146a | 1662 | |
e4057a39 | 1663 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd); |
9771b263 | 1664 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a | 1665 | |
9771b263 | 1666 | if (vec_oprnds1 && vec_oprnds1->length ()) |
ebfd146a | 1667 | { |
9771b263 | 1668 | vec_oprnd = vec_oprnds1->pop (); |
e4057a39 | 1669 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd); |
9771b263 | 1670 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1671 | } |
1672 | } | |
1673 | ||
1674 | ||
c78e3652 | 1675 | /* Get vectorized definitions for OP0 and OP1. */ |
ebfd146a | 1676 | |
c78e3652 | 1677 | void |
32e8e429 | 1678 | vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, |
9771b263 DN |
1679 | vec<tree> *vec_oprnds0, |
1680 | vec<tree> *vec_oprnds1, | |
306b0c92 | 1681 | slp_tree slp_node) |
ebfd146a IR |
1682 | { |
1683 | if (slp_node) | |
d092494c IR |
1684 | { |
1685 | int nops = (op1 == NULL_TREE) ? 1 : 2; | |
ef062b13 TS |
1686 | auto_vec<tree> ops (nops); |
1687 | auto_vec<vec<tree> > vec_defs (nops); | |
d092494c | 1688 | |
9771b263 | 1689 | ops.quick_push (op0); |
d092494c | 1690 | if (op1) |
9771b263 | 1691 | ops.quick_push (op1); |
d092494c | 1692 | |
306b0c92 | 1693 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
d092494c | 1694 | |
37b5ec8f | 1695 | *vec_oprnds0 = vec_defs[0]; |
d092494c | 1696 | if (op1) |
37b5ec8f | 1697 | *vec_oprnds1 = vec_defs[1]; |
d092494c | 1698 | } |
ebfd146a IR |
1699 | else |
1700 | { | |
1701 | tree vec_oprnd; | |
1702 | ||
9771b263 | 1703 | vec_oprnds0->create (1); |
a1824cfd | 1704 | vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info); |
9771b263 | 1705 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a IR |
1706 | |
1707 | if (op1) | |
1708 | { | |
9771b263 | 1709 | vec_oprnds1->create (1); |
a1824cfd | 1710 | vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info); |
9771b263 | 1711 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1712 | } |
1713 | } | |
1714 | } | |
1715 | ||
bb6c2b68 RS |
1716 | /* Helper function called by vect_finish_replace_stmt and |
1717 | vect_finish_stmt_generation. Set the location of the new | |
e1bd7296 | 1718 | statement and create and return a stmt_vec_info for it. */ |
bb6c2b68 | 1719 | |
e1bd7296 | 1720 | static stmt_vec_info |
32e8e429 | 1721 | vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt) |
bb6c2b68 | 1722 | { |
bb6c2b68 RS |
1723 | vec_info *vinfo = stmt_info->vinfo; |
1724 | ||
e1bd7296 | 1725 | stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt); |
bb6c2b68 RS |
1726 | |
1727 | if (dump_enabled_p ()) | |
1728 | { | |
1729 | dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: "); | |
1730 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0); | |
1731 | } | |
1732 | ||
86a91c0a | 1733 | gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt)); |
bb6c2b68 RS |
1734 | |
1735 | /* While EH edges will generally prevent vectorization, stmt might | |
1736 | e.g. be in a must-not-throw region. Ensure newly created stmts | |
1737 | that could throw are part of the same region. */ | |
86a91c0a | 1738 | int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt); |
bb6c2b68 RS |
1739 | if (lp_nr != 0 && stmt_could_throw_p (vec_stmt)) |
1740 | add_stmt_to_eh_lp (vec_stmt, lp_nr); | |
e1bd7296 RS |
1741 | |
1742 | return vec_stmt_info; | |
bb6c2b68 RS |
1743 | } |
1744 | ||
32e8e429 RS |
1745 | /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT, |
1746 | which sets the same scalar result as STMT_INFO did. Create and return a | |
e1bd7296 | 1747 | stmt_vec_info for VEC_STMT. */ |
bb6c2b68 | 1748 | |
e1bd7296 | 1749 | stmt_vec_info |
32e8e429 | 1750 | vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt) |
bb6c2b68 | 1751 | { |
a1824cfd | 1752 | gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt)); |
bb6c2b68 | 1753 | |
a1824cfd | 1754 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt); |
bb6c2b68 RS |
1755 | gsi_replace (&gsi, vec_stmt, false); |
1756 | ||
a1824cfd | 1757 | return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); |
bb6c2b68 | 1758 | } |
ebfd146a | 1759 | |
32e8e429 | 1760 | /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it |
e1bd7296 | 1761 | before *GSI. Create and return a stmt_vec_info for VEC_STMT. */ |
ebfd146a | 1762 | |
e1bd7296 | 1763 | stmt_vec_info |
32e8e429 | 1764 | vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt, |
ebfd146a IR |
1765 | gimple_stmt_iterator *gsi) |
1766 | { | |
a1824cfd | 1767 | gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL); |
ebfd146a | 1768 | |
54e8e2c3 RG |
1769 | if (!gsi_end_p (*gsi) |
1770 | && gimple_has_mem_ops (vec_stmt)) | |
1771 | { | |
355fe088 | 1772 | gimple *at_stmt = gsi_stmt (*gsi); |
54e8e2c3 RG |
1773 | tree vuse = gimple_vuse (at_stmt); |
1774 | if (vuse && TREE_CODE (vuse) == SSA_NAME) | |
1775 | { | |
1776 | tree vdef = gimple_vdef (at_stmt); | |
1777 | gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt)); | |
1778 | /* If we have an SSA vuse and insert a store, update virtual | |
1779 | SSA form to avoid triggering the renamer. Do so only | |
1780 | if we can easily see all uses - which is what almost always | |
1781 | happens with the way vectorized stmts are inserted. */ | |
1782 | if ((vdef && TREE_CODE (vdef) == SSA_NAME) | |
1783 | && ((is_gimple_assign (vec_stmt) | |
1784 | && !is_gimple_reg (gimple_assign_lhs (vec_stmt))) | |
1785 | || (is_gimple_call (vec_stmt) | |
1786 | && !(gimple_call_flags (vec_stmt) | |
1787 | & (ECF_CONST|ECF_PURE|ECF_NOVOPS))))) | |
1788 | { | |
1789 | tree new_vdef = copy_ssa_name (vuse, vec_stmt); | |
1790 | gimple_set_vdef (vec_stmt, new_vdef); | |
1791 | SET_USE (gimple_vuse_op (at_stmt), new_vdef); | |
1792 | } | |
1793 | } | |
1794 | } | |
ebfd146a | 1795 | gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); |
a1824cfd | 1796 | return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); |
ebfd146a IR |
1797 | } |
1798 | ||
70439f0d RS |
1799 | /* We want to vectorize a call to combined function CFN with function |
1800 | decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN | |
1801 | as the types of all inputs. Check whether this is possible using | |
1802 | an internal function, returning its code if so or IFN_LAST if not. */ | |
ebfd146a | 1803 | |
70439f0d RS |
1804 | static internal_fn |
1805 | vectorizable_internal_function (combined_fn cfn, tree fndecl, | |
1806 | tree vectype_out, tree vectype_in) | |
ebfd146a | 1807 | { |
70439f0d RS |
1808 | internal_fn ifn; |
1809 | if (internal_fn_p (cfn)) | |
1810 | ifn = as_internal_fn (cfn); | |
1811 | else | |
1812 | ifn = associated_internal_fn (fndecl); | |
1813 | if (ifn != IFN_LAST && direct_internal_fn_p (ifn)) | |
1814 | { | |
1815 | const direct_internal_fn_info &info = direct_internal_fn (ifn); | |
1816 | if (info.vectorizable) | |
1817 | { | |
1818 | tree type0 = (info.type0 < 0 ? vectype_out : vectype_in); | |
1819 | tree type1 = (info.type1 < 0 ? vectype_out : vectype_in); | |
d95ab70a RS |
1820 | if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1), |
1821 | OPTIMIZE_FOR_SPEED)) | |
70439f0d RS |
1822 | return ifn; |
1823 | } | |
1824 | } | |
1825 | return IFN_LAST; | |
ebfd146a IR |
1826 | } |
1827 | ||
5ce9450f | 1828 | |
82570274 | 1829 | static tree permute_vec_elements (tree, tree, tree, stmt_vec_info, |
5ce9450f JJ |
1830 | gimple_stmt_iterator *); |
1831 | ||
7cfb4d93 RS |
1832 | /* Check whether a load or store statement in the loop described by |
1833 | LOOP_VINFO is possible in a fully-masked loop. This is testing | |
1834 | whether the vectorizer pass has the appropriate support, as well as | |
1835 | whether the target does. | |
1836 | ||
1837 | VLS_TYPE says whether the statement is a load or store and VECTYPE | |
1838 | is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE | |
1839 | says how the load or store is going to be implemented and GROUP_SIZE | |
1840 | is the number of load or store statements in the containing group. | |
bfaa08b7 RS |
1841 | If the access is a gather load or scatter store, GS_INFO describes |
1842 | its arguments. | |
7cfb4d93 RS |
1843 | |
1844 | Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not | |
1845 | supported, otherwise record the required mask types. */ | |
1846 | ||
1847 | static void | |
1848 | check_load_store_masking (loop_vec_info loop_vinfo, tree vectype, | |
1849 | vec_load_store_type vls_type, int group_size, | |
bfaa08b7 RS |
1850 | vect_memory_access_type memory_access_type, |
1851 | gather_scatter_info *gs_info) | |
7cfb4d93 RS |
1852 | { |
1853 | /* Invariant loads need no special support. */ | |
1854 | if (memory_access_type == VMAT_INVARIANT) | |
1855 | return; | |
1856 | ||
1857 | vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo); | |
1858 | machine_mode vecmode = TYPE_MODE (vectype); | |
1859 | bool is_load = (vls_type == VLS_LOAD); | |
1860 | if (memory_access_type == VMAT_LOAD_STORE_LANES) | |
1861 | { | |
1862 | if (is_load | |
1863 | ? !vect_load_lanes_supported (vectype, group_size, true) | |
1864 | : !vect_store_lanes_supported (vectype, group_size, true)) | |
1865 | { | |
1866 | if (dump_enabled_p ()) | |
1867 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1868 | "can't use a fully-masked loop because the" | |
1869 | " target doesn't have an appropriate masked" | |
1870 | " load/store-lanes instruction.\n"); | |
1871 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; | |
1872 | return; | |
1873 | } | |
1874 | unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype); | |
1875 | vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype); | |
1876 | return; | |
1877 | } | |
1878 | ||
bfaa08b7 RS |
1879 | if (memory_access_type == VMAT_GATHER_SCATTER) |
1880 | { | |
f307441a RS |
1881 | internal_fn ifn = (is_load |
1882 | ? IFN_MASK_GATHER_LOAD | |
1883 | : IFN_MASK_SCATTER_STORE); | |
bfaa08b7 | 1884 | tree offset_type = TREE_TYPE (gs_info->offset); |
f307441a | 1885 | if (!internal_gather_scatter_fn_supported_p (ifn, vectype, |
bfaa08b7 RS |
1886 | gs_info->memory_type, |
1887 | TYPE_SIGN (offset_type), | |
1888 | gs_info->scale)) | |
1889 | { | |
1890 | if (dump_enabled_p ()) | |
1891 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1892 | "can't use a fully-masked loop because the" | |
1893 | " target doesn't have an appropriate masked" | |
f307441a | 1894 | " gather load or scatter store instruction.\n"); |
bfaa08b7 RS |
1895 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; |
1896 | return; | |
1897 | } | |
1898 | unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype); | |
1899 | vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype); | |
1900 | return; | |
1901 | } | |
1902 | ||
7cfb4d93 RS |
1903 | if (memory_access_type != VMAT_CONTIGUOUS |
1904 | && memory_access_type != VMAT_CONTIGUOUS_PERMUTE) | |
1905 | { | |
1906 | /* Element X of the data must come from iteration i * VF + X of the | |
1907 | scalar loop. We need more work to support other mappings. */ | |
1908 | if (dump_enabled_p ()) | |
1909 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1910 | "can't use a fully-masked loop because an access" | |
1911 | " isn't contiguous.\n"); | |
1912 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; | |
1913 | return; | |
1914 | } | |
1915 | ||
1916 | machine_mode mask_mode; | |
1917 | if (!(targetm.vectorize.get_mask_mode | |
1918 | (GET_MODE_NUNITS (vecmode), | |
1919 | GET_MODE_SIZE (vecmode)).exists (&mask_mode)) | |
1920 | || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load)) | |
1921 | { | |
1922 | if (dump_enabled_p ()) | |
1923 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1924 | "can't use a fully-masked loop because the target" | |
1925 | " doesn't have the appropriate masked load or" | |
1926 | " store.\n"); | |
1927 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; | |
1928 | return; | |
1929 | } | |
1930 | /* We might load more scalars than we need for permuting SLP loads. | |
1931 | We checked in get_group_load_store_type that the extra elements | |
1932 | don't leak into a new vector. */ | |
1933 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
1934 | poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
1935 | unsigned int nvectors; | |
1936 | if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors)) | |
1937 | vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype); | |
1938 | else | |
1939 | gcc_unreachable (); | |
1940 | } | |
1941 | ||
1942 | /* Return the mask input to a masked load or store. VEC_MASK is the vectorized | |
1943 | form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask | |
1944 | that needs to be applied to all loads and stores in a vectorized loop. | |
1945 | Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK. | |
1946 | ||
1947 | MASK_TYPE is the type of both masks. If new statements are needed, | |
1948 | insert them before GSI. */ | |
1949 | ||
1950 | static tree | |
1951 | prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask, | |
1952 | gimple_stmt_iterator *gsi) | |
1953 | { | |
1954 | gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask))); | |
1955 | if (!loop_mask) | |
1956 | return vec_mask; | |
1957 | ||
1958 | gcc_assert (TREE_TYPE (loop_mask) == mask_type); | |
1959 | tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and"); | |
1960 | gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR, | |
1961 | vec_mask, loop_mask); | |
1962 | gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT); | |
1963 | return and_res; | |
1964 | } | |
1965 | ||
429ef523 | 1966 | /* Determine whether we can use a gather load or scatter store to vectorize |
32e8e429 RS |
1967 | strided load or store STMT_INFO by truncating the current offset to a |
1968 | smaller width. We need to be able to construct an offset vector: | |
429ef523 RS |
1969 | |
1970 | { 0, X, X*2, X*3, ... } | |
1971 | ||
32e8e429 | 1972 | without loss of precision, where X is STMT_INFO's DR_STEP. |
429ef523 RS |
1973 | |
1974 | Return true if this is possible, describing the gather load or scatter | |
1975 | store in GS_INFO. MASKED_P is true if the load or store is conditional. */ | |
1976 | ||
1977 | static bool | |
32e8e429 RS |
1978 | vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info, |
1979 | loop_vec_info loop_vinfo, bool masked_p, | |
429ef523 RS |
1980 | gather_scatter_info *gs_info) |
1981 | { | |
89fa689a RS |
1982 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
1983 | data_reference *dr = dr_info->dr; | |
429ef523 RS |
1984 | tree step = DR_STEP (dr); |
1985 | if (TREE_CODE (step) != INTEGER_CST) | |
1986 | { | |
1987 | /* ??? Perhaps we could use range information here? */ | |
1988 | if (dump_enabled_p ()) | |
1989 | dump_printf_loc (MSG_NOTE, vect_location, | |
1990 | "cannot truncate variable step.\n"); | |
1991 | return false; | |
1992 | } | |
1993 | ||
1994 | /* Get the number of bits in an element. */ | |
1995 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
1996 | scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype)); | |
1997 | unsigned int element_bits = GET_MODE_BITSIZE (element_mode); | |
1998 | ||
1999 | /* Set COUNT to the upper limit on the number of elements - 1. | |
2000 | Start with the maximum vectorization factor. */ | |
2001 | unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1; | |
2002 | ||
2003 | /* Try lowering COUNT to the number of scalar latch iterations. */ | |
2004 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2005 | widest_int max_iters; | |
2006 | if (max_loop_iterations (loop, &max_iters) | |
2007 | && max_iters < count) | |
2008 | count = max_iters.to_shwi (); | |
2009 | ||
2010 | /* Try scales of 1 and the element size. */ | |
89fa689a | 2011 | int scales[] = { 1, vect_get_scalar_dr_size (dr_info) }; |
4a669ac3 | 2012 | wi::overflow_type overflow = wi::OVF_NONE; |
429ef523 RS |
2013 | for (int i = 0; i < 2; ++i) |
2014 | { | |
2015 | int scale = scales[i]; | |
2016 | widest_int factor; | |
2017 | if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor)) | |
2018 | continue; | |
2019 | ||
2020 | /* See whether we can calculate (COUNT - 1) * STEP / SCALE | |
2021 | in OFFSET_BITS bits. */ | |
4a669ac3 AH |
2022 | widest_int range = wi::mul (count, factor, SIGNED, &overflow); |
2023 | if (overflow) | |
429ef523 RS |
2024 | continue; |
2025 | signop sign = range >= 0 ? UNSIGNED : SIGNED; | |
2026 | if (wi::min_precision (range, sign) > element_bits) | |
2027 | { | |
4a669ac3 | 2028 | overflow = wi::OVF_UNKNOWN; |
429ef523 RS |
2029 | continue; |
2030 | } | |
2031 | ||
2032 | /* See whether the target supports the operation. */ | |
2033 | tree memory_type = TREE_TYPE (DR_REF (dr)); | |
2034 | if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype, | |
2035 | memory_type, element_bits, sign, scale, | |
2036 | &gs_info->ifn, &gs_info->element_type)) | |
2037 | continue; | |
2038 | ||
2039 | tree offset_type = build_nonstandard_integer_type (element_bits, | |
2040 | sign == UNSIGNED); | |
2041 | ||
2042 | gs_info->decl = NULL_TREE; | |
2043 | /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET, | |
2044 | but we don't need to store that here. */ | |
2045 | gs_info->base = NULL_TREE; | |
2046 | gs_info->offset = fold_convert (offset_type, step); | |
929b4411 | 2047 | gs_info->offset_dt = vect_constant_def; |
429ef523 RS |
2048 | gs_info->offset_vectype = NULL_TREE; |
2049 | gs_info->scale = scale; | |
2050 | gs_info->memory_type = memory_type; | |
2051 | return true; | |
2052 | } | |
2053 | ||
4a669ac3 | 2054 | if (overflow && dump_enabled_p ()) |
429ef523 RS |
2055 | dump_printf_loc (MSG_NOTE, vect_location, |
2056 | "truncating gather/scatter offset to %d bits" | |
2057 | " might change its value.\n", element_bits); | |
2058 | ||
2059 | return false; | |
2060 | } | |
2061 | ||
ab2fc782 | 2062 | /* Return true if we can use gather/scatter internal functions to |
82570274 | 2063 | vectorize STMT_INFO, which is a grouped or strided load or store. |
429ef523 RS |
2064 | MASKED_P is true if load or store is conditional. When returning |
2065 | true, fill in GS_INFO with the information required to perform the | |
2066 | operation. */ | |
ab2fc782 RS |
2067 | |
2068 | static bool | |
82570274 RS |
2069 | vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info, |
2070 | loop_vec_info loop_vinfo, bool masked_p, | |
ab2fc782 RS |
2071 | gather_scatter_info *gs_info) |
2072 | { | |
82570274 | 2073 | if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info) |
ab2fc782 | 2074 | || gs_info->decl) |
82570274 | 2075 | return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo, |
429ef523 | 2076 | masked_p, gs_info); |
ab2fc782 RS |
2077 | |
2078 | scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type); | |
2079 | unsigned int element_bits = GET_MODE_BITSIZE (element_mode); | |
2080 | tree offset_type = TREE_TYPE (gs_info->offset); | |
2081 | unsigned int offset_bits = TYPE_PRECISION (offset_type); | |
2082 | ||
2083 | /* Enforced by vect_check_gather_scatter. */ | |
2084 | gcc_assert (element_bits >= offset_bits); | |
2085 | ||
2086 | /* If the elements are wider than the offset, convert the offset to the | |
2087 | same width, without changing its sign. */ | |
2088 | if (element_bits > offset_bits) | |
2089 | { | |
2090 | bool unsigned_p = TYPE_UNSIGNED (offset_type); | |
2091 | offset_type = build_nonstandard_integer_type (element_bits, unsigned_p); | |
2092 | gs_info->offset = fold_convert (offset_type, gs_info->offset); | |
2093 | } | |
2094 | ||
2095 | if (dump_enabled_p ()) | |
2096 | dump_printf_loc (MSG_NOTE, vect_location, | |
2097 | "using gather/scatter for strided/grouped access," | |
2098 | " scale = %d\n", gs_info->scale); | |
2099 | ||
2100 | return true; | |
2101 | } | |
2102 | ||
32e8e429 | 2103 | /* STMT_INFO is a non-strided load or store, meaning that it accesses |
62da9e14 RS |
2104 | elements with a known constant step. Return -1 if that step |
2105 | is negative, 0 if it is zero, and 1 if it is greater than zero. */ | |
2106 | ||
2107 | static int | |
32e8e429 | 2108 | compare_step_with_zero (stmt_vec_info stmt_info) |
62da9e14 | 2109 | { |
89fa689a RS |
2110 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
2111 | return tree_int_cst_compare (vect_dr_behavior (dr_info)->step, | |
3f5e8a76 | 2112 | size_zero_node); |
62da9e14 RS |
2113 | } |
2114 | ||
2115 | /* If the target supports a permute mask that reverses the elements in | |
2116 | a vector of type VECTYPE, return that mask, otherwise return null. */ | |
2117 | ||
2118 | static tree | |
2119 | perm_mask_for_reverse (tree vectype) | |
2120 | { | |
928686b1 | 2121 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
62da9e14 | 2122 | |
d980067b RS |
2123 | /* The encoding has a single stepped pattern. */ |
2124 | vec_perm_builder sel (nunits, 1, 3); | |
928686b1 | 2125 | for (int i = 0; i < 3; ++i) |
908a1a16 | 2126 | sel.quick_push (nunits - 1 - i); |
62da9e14 | 2127 | |
e3342de4 RS |
2128 | vec_perm_indices indices (sel, 1, nunits); |
2129 | if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices)) | |
62da9e14 | 2130 | return NULL_TREE; |
e3342de4 | 2131 | return vect_gen_perm_mask_checked (vectype, indices); |
62da9e14 | 2132 | } |
5ce9450f | 2133 | |
32e8e429 | 2134 | /* STMT_INFO is either a masked or unconditional store. Return the value |
c3a8f964 RS |
2135 | being stored. */ |
2136 | ||
f307441a | 2137 | tree |
32e8e429 | 2138 | vect_get_store_rhs (stmt_vec_info stmt_info) |
c3a8f964 | 2139 | { |
32e8e429 | 2140 | if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt)) |
c3a8f964 RS |
2141 | { |
2142 | gcc_assert (gimple_assign_single_p (assign)); | |
2143 | return gimple_assign_rhs1 (assign); | |
2144 | } | |
32e8e429 | 2145 | if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt)) |
c3a8f964 RS |
2146 | { |
2147 | internal_fn ifn = gimple_call_internal_fn (call); | |
f307441a RS |
2148 | int index = internal_fn_stored_value_index (ifn); |
2149 | gcc_assert (index >= 0); | |
32e8e429 | 2150 | return gimple_call_arg (call, index); |
c3a8f964 RS |
2151 | } |
2152 | gcc_unreachable (); | |
2153 | } | |
2154 | ||
2de001ee | 2155 | /* A subroutine of get_load_store_type, with a subset of the same |
32e8e429 | 2156 | arguments. Handle the case where STMT_INFO is part of a grouped load |
2de001ee RS |
2157 | or store. |
2158 | ||
2159 | For stores, the statements in the group are all consecutive | |
2160 | and there is no gap at the end. For loads, the statements in the | |
2161 | group might not be consecutive; there can be gaps between statements | |
2162 | as well as at the end. */ | |
2163 | ||
2164 | static bool | |
32e8e429 | 2165 | get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, |
7e11fc7f | 2166 | bool masked_p, vec_load_store_type vls_type, |
429ef523 RS |
2167 | vect_memory_access_type *memory_access_type, |
2168 | gather_scatter_info *gs_info) | |
2de001ee | 2169 | { |
2de001ee RS |
2170 | vec_info *vinfo = stmt_info->vinfo; |
2171 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
2172 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; | |
bffb8014 | 2173 | stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
89fa689a | 2174 | dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
bffb8014 RS |
2175 | unsigned int group_size = DR_GROUP_SIZE (first_stmt_info); |
2176 | bool single_element_p = (stmt_info == first_stmt_info | |
2c53b149 | 2177 | && !DR_GROUP_NEXT_ELEMENT (stmt_info)); |
bffb8014 | 2178 | unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info); |
928686b1 | 2179 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2de001ee RS |
2180 | |
2181 | /* True if the vectorized statements would access beyond the last | |
2182 | statement in the group. */ | |
2183 | bool overrun_p = false; | |
2184 | ||
2185 | /* True if we can cope with such overrun by peeling for gaps, so that | |
2186 | there is at least one final scalar iteration after the vector loop. */ | |
7e11fc7f RS |
2187 | bool can_overrun_p = (!masked_p |
2188 | && vls_type == VLS_LOAD | |
2189 | && loop_vinfo | |
2190 | && !loop->inner); | |
2de001ee RS |
2191 | |
2192 | /* There can only be a gap at the end of the group if the stride is | |
2193 | known at compile time. */ | |
2194 | gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0); | |
2195 | ||
2196 | /* Stores can't yet have gaps. */ | |
2197 | gcc_assert (slp || vls_type == VLS_LOAD || gap == 0); | |
2198 | ||
2199 | if (slp) | |
2200 | { | |
2201 | if (STMT_VINFO_STRIDED_P (stmt_info)) | |
2202 | { | |
2c53b149 | 2203 | /* Try to use consecutive accesses of DR_GROUP_SIZE elements, |
2de001ee RS |
2204 | separated by the stride, until we have a complete vector. |
2205 | Fall back to scalar accesses if that isn't possible. */ | |
928686b1 | 2206 | if (multiple_p (nunits, group_size)) |
2de001ee RS |
2207 | *memory_access_type = VMAT_STRIDED_SLP; |
2208 | else | |
2209 | *memory_access_type = VMAT_ELEMENTWISE; | |
2210 | } | |
2211 | else | |
2212 | { | |
2213 | overrun_p = loop_vinfo && gap != 0; | |
2214 | if (overrun_p && vls_type != VLS_LOAD) | |
2215 | { | |
2216 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2217 | "Grouped store with gaps requires" | |
2218 | " non-consecutive accesses\n"); | |
2219 | return false; | |
2220 | } | |
f702e7d4 RS |
2221 | /* An overrun is fine if the trailing elements are smaller |
2222 | than the alignment boundary B. Every vector access will | |
2223 | be a multiple of B and so we are guaranteed to access a | |
2224 | non-gap element in the same B-sized block. */ | |
f9ef2c76 | 2225 | if (overrun_p |
89fa689a RS |
2226 | && gap < (vect_known_alignment_in_bytes (first_dr_info) |
2227 | / vect_get_scalar_dr_size (first_dr_info))) | |
f9ef2c76 | 2228 | overrun_p = false; |
2de001ee RS |
2229 | if (overrun_p && !can_overrun_p) |
2230 | { | |
2231 | if (dump_enabled_p ()) | |
2232 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2233 | "Peeling for outer loop is not supported\n"); | |
2234 | return false; | |
2235 | } | |
2236 | *memory_access_type = VMAT_CONTIGUOUS; | |
2237 | } | |
2238 | } | |
2239 | else | |
2240 | { | |
2241 | /* We can always handle this case using elementwise accesses, | |
2242 | but see if something more efficient is available. */ | |
2243 | *memory_access_type = VMAT_ELEMENTWISE; | |
2244 | ||
2245 | /* If there is a gap at the end of the group then these optimizations | |
2246 | would access excess elements in the last iteration. */ | |
2247 | bool would_overrun_p = (gap != 0); | |
f702e7d4 RS |
2248 | /* An overrun is fine if the trailing elements are smaller than the |
2249 | alignment boundary B. Every vector access will be a multiple of B | |
2250 | and so we are guaranteed to access a non-gap element in the | |
2251 | same B-sized block. */ | |
f9ef2c76 | 2252 | if (would_overrun_p |
7e11fc7f | 2253 | && !masked_p |
89fa689a RS |
2254 | && gap < (vect_known_alignment_in_bytes (first_dr_info) |
2255 | / vect_get_scalar_dr_size (first_dr_info))) | |
f9ef2c76 | 2256 | would_overrun_p = false; |
f702e7d4 | 2257 | |
2de001ee | 2258 | if (!STMT_VINFO_STRIDED_P (stmt_info) |
62da9e14 | 2259 | && (can_overrun_p || !would_overrun_p) |
86a91c0a | 2260 | && compare_step_with_zero (stmt_info) > 0) |
2de001ee | 2261 | { |
6737facb RS |
2262 | /* First cope with the degenerate case of a single-element |
2263 | vector. */ | |
2264 | if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)) | |
2265 | *memory_access_type = VMAT_CONTIGUOUS; | |
2266 | ||
2267 | /* Otherwise try using LOAD/STORE_LANES. */ | |
2268 | if (*memory_access_type == VMAT_ELEMENTWISE | |
2269 | && (vls_type == VLS_LOAD | |
7e11fc7f RS |
2270 | ? vect_load_lanes_supported (vectype, group_size, masked_p) |
2271 | : vect_store_lanes_supported (vectype, group_size, | |
2272 | masked_p))) | |
2de001ee RS |
2273 | { |
2274 | *memory_access_type = VMAT_LOAD_STORE_LANES; | |
2275 | overrun_p = would_overrun_p; | |
2276 | } | |
2277 | ||
2278 | /* If that fails, try using permuting loads. */ | |
2279 | if (*memory_access_type == VMAT_ELEMENTWISE | |
2280 | && (vls_type == VLS_LOAD | |
2281 | ? vect_grouped_load_supported (vectype, single_element_p, | |
2282 | group_size) | |
2283 | : vect_grouped_store_supported (vectype, group_size))) | |
2284 | { | |
2285 | *memory_access_type = VMAT_CONTIGUOUS_PERMUTE; | |
2286 | overrun_p = would_overrun_p; | |
2287 | } | |
2288 | } | |
429ef523 RS |
2289 | |
2290 | /* As a last resort, trying using a gather load or scatter store. | |
2291 | ||
2292 | ??? Although the code can handle all group sizes correctly, | |
2293 | it probably isn't a win to use separate strided accesses based | |
2294 | on nearby locations. Or, even if it's a win over scalar code, | |
2295 | it might not be a win over vectorizing at a lower VF, if that | |
2296 | allows us to use contiguous accesses. */ | |
2297 | if (*memory_access_type == VMAT_ELEMENTWISE | |
2298 | && single_element_p | |
2299 | && loop_vinfo | |
86a91c0a | 2300 | && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo, |
429ef523 RS |
2301 | masked_p, gs_info)) |
2302 | *memory_access_type = VMAT_GATHER_SCATTER; | |
2de001ee RS |
2303 | } |
2304 | ||
bffb8014 | 2305 | if (vls_type != VLS_LOAD && first_stmt_info == stmt_info) |
2de001ee RS |
2306 | { |
2307 | /* STMT is the leader of the group. Check the operands of all the | |
2308 | stmts of the group. */ | |
bffb8014 RS |
2309 | stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info); |
2310 | while (next_stmt_info) | |
2de001ee | 2311 | { |
bffb8014 | 2312 | tree op = vect_get_store_rhs (next_stmt_info); |
2de001ee | 2313 | enum vect_def_type dt; |
894dd753 | 2314 | if (!vect_is_simple_use (op, vinfo, &dt)) |
2de001ee RS |
2315 | { |
2316 | if (dump_enabled_p ()) | |
2317 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2318 | "use not simple.\n"); | |
2319 | return false; | |
2320 | } | |
bffb8014 | 2321 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
2de001ee RS |
2322 | } |
2323 | } | |
2324 | ||
2325 | if (overrun_p) | |
2326 | { | |
2327 | gcc_assert (can_overrun_p); | |
2328 | if (dump_enabled_p ()) | |
2329 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2330 | "Data access with gaps requires scalar " | |
2331 | "epilogue loop\n"); | |
2332 | LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; | |
2333 | } | |
2334 | ||
2335 | return true; | |
2336 | } | |
2337 | ||
62da9e14 | 2338 | /* A subroutine of get_load_store_type, with a subset of the same |
32e8e429 | 2339 | arguments. Handle the case where STMT_INFO is a load or store that |
62da9e14 RS |
2340 | accesses consecutive elements with a negative step. */ |
2341 | ||
2342 | static vect_memory_access_type | |
32e8e429 | 2343 | get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype, |
62da9e14 RS |
2344 | vec_load_store_type vls_type, |
2345 | unsigned int ncopies) | |
2346 | { | |
89fa689a | 2347 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
62da9e14 RS |
2348 | dr_alignment_support alignment_support_scheme; |
2349 | ||
2350 | if (ncopies > 1) | |
2351 | { | |
2352 | if (dump_enabled_p ()) | |
2353 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2354 | "multiple types with negative step.\n"); | |
2355 | return VMAT_ELEMENTWISE; | |
2356 | } | |
2357 | ||
89fa689a | 2358 | alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false); |
62da9e14 RS |
2359 | if (alignment_support_scheme != dr_aligned |
2360 | && alignment_support_scheme != dr_unaligned_supported) | |
2361 | { | |
2362 | if (dump_enabled_p ()) | |
2363 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2364 | "negative step but alignment required.\n"); | |
2365 | return VMAT_ELEMENTWISE; | |
2366 | } | |
2367 | ||
2368 | if (vls_type == VLS_STORE_INVARIANT) | |
2369 | { | |
2370 | if (dump_enabled_p ()) | |
2371 | dump_printf_loc (MSG_NOTE, vect_location, | |
2372 | "negative step with invariant source;" | |
2373 | " no permute needed.\n"); | |
2374 | return VMAT_CONTIGUOUS_DOWN; | |
2375 | } | |
2376 | ||
2377 | if (!perm_mask_for_reverse (vectype)) | |
2378 | { | |
2379 | if (dump_enabled_p ()) | |
2380 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2381 | "negative step and reversing not supported.\n"); | |
2382 | return VMAT_ELEMENTWISE; | |
2383 | } | |
2384 | ||
2385 | return VMAT_CONTIGUOUS_REVERSE; | |
2386 | } | |
2387 | ||
32e8e429 | 2388 | /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true |
2de001ee RS |
2389 | if there is a memory access type that the vectorized form can use, |
2390 | storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers | |
2391 | or scatters, fill in GS_INFO accordingly. | |
2392 | ||
2393 | SLP says whether we're performing SLP rather than loop vectorization. | |
7e11fc7f | 2394 | MASKED_P is true if the statement is conditional on a vectorized mask. |
62da9e14 RS |
2395 | VECTYPE is the vector type that the vectorized statements will use. |
2396 | NCOPIES is the number of vector statements that will be needed. */ | |
2de001ee RS |
2397 | |
2398 | static bool | |
32e8e429 RS |
2399 | get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, |
2400 | bool masked_p, vec_load_store_type vls_type, | |
2401 | unsigned int ncopies, | |
2de001ee RS |
2402 | vect_memory_access_type *memory_access_type, |
2403 | gather_scatter_info *gs_info) | |
2404 | { | |
2de001ee RS |
2405 | vec_info *vinfo = stmt_info->vinfo; |
2406 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4d694b27 | 2407 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2de001ee RS |
2408 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
2409 | { | |
2410 | *memory_access_type = VMAT_GATHER_SCATTER; | |
86a91c0a | 2411 | if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)) |
2de001ee | 2412 | gcc_unreachable (); |
894dd753 | 2413 | else if (!vect_is_simple_use (gs_info->offset, vinfo, |
2de001ee RS |
2414 | &gs_info->offset_dt, |
2415 | &gs_info->offset_vectype)) | |
2416 | { | |
2417 | if (dump_enabled_p ()) | |
2418 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2419 | "%s index use not simple.\n", | |
2420 | vls_type == VLS_LOAD ? "gather" : "scatter"); | |
2421 | return false; | |
2422 | } | |
2423 | } | |
2424 | else if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) | |
2425 | { | |
86a91c0a RS |
2426 | if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p, |
2427 | vls_type, memory_access_type, gs_info)) | |
2de001ee RS |
2428 | return false; |
2429 | } | |
2430 | else if (STMT_VINFO_STRIDED_P (stmt_info)) | |
2431 | { | |
2432 | gcc_assert (!slp); | |
ab2fc782 | 2433 | if (loop_vinfo |
86a91c0a | 2434 | && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo, |
429ef523 | 2435 | masked_p, gs_info)) |
ab2fc782 RS |
2436 | *memory_access_type = VMAT_GATHER_SCATTER; |
2437 | else | |
2438 | *memory_access_type = VMAT_ELEMENTWISE; | |
2de001ee RS |
2439 | } |
2440 | else | |
62da9e14 | 2441 | { |
86a91c0a | 2442 | int cmp = compare_step_with_zero (stmt_info); |
62da9e14 RS |
2443 | if (cmp < 0) |
2444 | *memory_access_type = get_negative_load_store_type | |
86a91c0a | 2445 | (stmt_info, vectype, vls_type, ncopies); |
62da9e14 RS |
2446 | else if (cmp == 0) |
2447 | { | |
2448 | gcc_assert (vls_type == VLS_LOAD); | |
2449 | *memory_access_type = VMAT_INVARIANT; | |
2450 | } | |
2451 | else | |
2452 | *memory_access_type = VMAT_CONTIGUOUS; | |
2453 | } | |
2de001ee | 2454 | |
4d694b27 RS |
2455 | if ((*memory_access_type == VMAT_ELEMENTWISE |
2456 | || *memory_access_type == VMAT_STRIDED_SLP) | |
2457 | && !nunits.is_constant ()) | |
2458 | { | |
2459 | if (dump_enabled_p ()) | |
2460 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2461 | "Not using elementwise accesses due to variable " | |
2462 | "vectorization factor.\n"); | |
2463 | return false; | |
2464 | } | |
2465 | ||
2de001ee RS |
2466 | /* FIXME: At the moment the cost model seems to underestimate the |
2467 | cost of using elementwise accesses. This check preserves the | |
2468 | traditional behavior until that can be fixed. */ | |
2469 | if (*memory_access_type == VMAT_ELEMENTWISE | |
4aa157e8 | 2470 | && !STMT_VINFO_STRIDED_P (stmt_info) |
bffb8014 | 2471 | && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info) |
2c53b149 RB |
2472 | && !DR_GROUP_NEXT_ELEMENT (stmt_info) |
2473 | && !pow2p_hwi (DR_GROUP_SIZE (stmt_info)))) | |
2de001ee RS |
2474 | { |
2475 | if (dump_enabled_p ()) | |
2476 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2477 | "not falling back to elementwise accesses\n"); | |
2478 | return false; | |
2479 | } | |
2480 | return true; | |
2481 | } | |
2482 | ||
aaeefd88 | 2483 | /* Return true if boolean argument MASK is suitable for vectorizing |
32e8e429 | 2484 | conditional load or store STMT_INFO. When returning true, store the type |
929b4411 RS |
2485 | of the definition in *MASK_DT_OUT and the type of the vectorized mask |
2486 | in *MASK_VECTYPE_OUT. */ | |
aaeefd88 RS |
2487 | |
2488 | static bool | |
32e8e429 | 2489 | vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask, |
929b4411 RS |
2490 | vect_def_type *mask_dt_out, |
2491 | tree *mask_vectype_out) | |
aaeefd88 RS |
2492 | { |
2493 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask))) | |
2494 | { | |
2495 | if (dump_enabled_p ()) | |
2496 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2497 | "mask argument is not a boolean.\n"); | |
2498 | return false; | |
2499 | } | |
2500 | ||
2501 | if (TREE_CODE (mask) != SSA_NAME) | |
2502 | { | |
2503 | if (dump_enabled_p ()) | |
2504 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2505 | "mask argument is not an SSA name.\n"); | |
2506 | return false; | |
2507 | } | |
2508 | ||
929b4411 | 2509 | enum vect_def_type mask_dt; |
aaeefd88 | 2510 | tree mask_vectype; |
894dd753 | 2511 | if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype)) |
aaeefd88 RS |
2512 | { |
2513 | if (dump_enabled_p ()) | |
2514 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2515 | "mask use not simple.\n"); | |
2516 | return false; | |
2517 | } | |
2518 | ||
2519 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2520 | if (!mask_vectype) | |
2521 | mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype)); | |
2522 | ||
2523 | if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)) | |
2524 | { | |
2525 | if (dump_enabled_p ()) | |
2526 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2527 | "could not find an appropriate vector mask type.\n"); | |
2528 | return false; | |
2529 | } | |
2530 | ||
2531 | if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype), | |
2532 | TYPE_VECTOR_SUBPARTS (vectype))) | |
2533 | { | |
2534 | if (dump_enabled_p ()) | |
2535 | { | |
2536 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2537 | "vector mask type "); | |
2538 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype); | |
2539 | dump_printf (MSG_MISSED_OPTIMIZATION, | |
2540 | " does not match vector data type "); | |
2541 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); | |
2542 | dump_printf (MSG_MISSED_OPTIMIZATION, ".\n"); | |
2543 | } | |
2544 | return false; | |
2545 | } | |
2546 | ||
929b4411 | 2547 | *mask_dt_out = mask_dt; |
aaeefd88 RS |
2548 | *mask_vectype_out = mask_vectype; |
2549 | return true; | |
2550 | } | |
2551 | ||
3133c3b6 | 2552 | /* Return true if stored value RHS is suitable for vectorizing store |
32e8e429 | 2553 | statement STMT_INFO. When returning true, store the type of the |
929b4411 RS |
2554 | definition in *RHS_DT_OUT, the type of the vectorized store value in |
2555 | *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */ | |
3133c3b6 RS |
2556 | |
2557 | static bool | |
32e8e429 RS |
2558 | vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs, |
2559 | vect_def_type *rhs_dt_out, tree *rhs_vectype_out, | |
2560 | vec_load_store_type *vls_type_out) | |
3133c3b6 RS |
2561 | { |
2562 | /* In the case this is a store from a constant make sure | |
2563 | native_encode_expr can handle it. */ | |
2564 | if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0) | |
2565 | { | |
2566 | if (dump_enabled_p ()) | |
2567 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2568 | "cannot encode constant as a byte sequence.\n"); | |
2569 | return false; | |
2570 | } | |
2571 | ||
929b4411 | 2572 | enum vect_def_type rhs_dt; |
3133c3b6 | 2573 | tree rhs_vectype; |
894dd753 | 2574 | if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype)) |
3133c3b6 RS |
2575 | { |
2576 | if (dump_enabled_p ()) | |
2577 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2578 | "use not simple.\n"); | |
2579 | return false; | |
2580 | } | |
2581 | ||
2582 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2583 | if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype)) | |
2584 | { | |
2585 | if (dump_enabled_p ()) | |
2586 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2587 | "incompatible vector types.\n"); | |
2588 | return false; | |
2589 | } | |
2590 | ||
929b4411 | 2591 | *rhs_dt_out = rhs_dt; |
3133c3b6 | 2592 | *rhs_vectype_out = rhs_vectype; |
929b4411 | 2593 | if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def) |
3133c3b6 RS |
2594 | *vls_type_out = VLS_STORE_INVARIANT; |
2595 | else | |
2596 | *vls_type_out = VLS_STORE; | |
2597 | return true; | |
2598 | } | |
2599 | ||
82570274 | 2600 | /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO. |
bc9587eb RS |
2601 | Note that we support masks with floating-point type, in which case the |
2602 | floats are interpreted as a bitmask. */ | |
2603 | ||
2604 | static tree | |
82570274 | 2605 | vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype) |
bc9587eb RS |
2606 | { |
2607 | if (TREE_CODE (masktype) == INTEGER_TYPE) | |
2608 | return build_int_cst (masktype, -1); | |
2609 | else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE) | |
2610 | { | |
2611 | tree mask = build_int_cst (TREE_TYPE (masktype), -1); | |
2612 | mask = build_vector_from_val (masktype, mask); | |
82570274 | 2613 | return vect_init_vector (stmt_info, mask, masktype, NULL); |
bc9587eb RS |
2614 | } |
2615 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype))) | |
2616 | { | |
2617 | REAL_VALUE_TYPE r; | |
2618 | long tmp[6]; | |
2619 | for (int j = 0; j < 6; ++j) | |
2620 | tmp[j] = -1; | |
2621 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype))); | |
2622 | tree mask = build_real (TREE_TYPE (masktype), r); | |
2623 | mask = build_vector_from_val (masktype, mask); | |
82570274 | 2624 | return vect_init_vector (stmt_info, mask, masktype, NULL); |
bc9587eb RS |
2625 | } |
2626 | gcc_unreachable (); | |
2627 | } | |
2628 | ||
2629 | /* Build an all-zero merge value of type VECTYPE while vectorizing | |
82570274 | 2630 | STMT_INFO as a gather load. */ |
bc9587eb RS |
2631 | |
2632 | static tree | |
82570274 | 2633 | vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype) |
bc9587eb RS |
2634 | { |
2635 | tree merge; | |
2636 | if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE) | |
2637 | merge = build_int_cst (TREE_TYPE (vectype), 0); | |
2638 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype))) | |
2639 | { | |
2640 | REAL_VALUE_TYPE r; | |
2641 | long tmp[6]; | |
2642 | for (int j = 0; j < 6; ++j) | |
2643 | tmp[j] = 0; | |
2644 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype))); | |
2645 | merge = build_real (TREE_TYPE (vectype), r); | |
2646 | } | |
2647 | else | |
2648 | gcc_unreachable (); | |
2649 | merge = build_vector_from_val (vectype, merge); | |
82570274 | 2650 | return vect_init_vector (stmt_info, merge, vectype, NULL); |
bc9587eb RS |
2651 | } |
2652 | ||
32e8e429 RS |
2653 | /* Build a gather load call while vectorizing STMT_INFO. Insert new |
2654 | instructions before GSI and add them to VEC_STMT. GS_INFO describes | |
2655 | the gather load operation. If the load is conditional, MASK is the | |
2656 | unvectorized condition and MASK_DT is its definition type, otherwise | |
2657 | MASK is null. */ | |
c48d2d35 RS |
2658 | |
2659 | static void | |
32e8e429 RS |
2660 | vect_build_gather_load_calls (stmt_vec_info stmt_info, |
2661 | gimple_stmt_iterator *gsi, | |
1eede195 | 2662 | stmt_vec_info *vec_stmt, |
32e8e429 | 2663 | gather_scatter_info *gs_info, |
e4057a39 | 2664 | tree mask) |
c48d2d35 | 2665 | { |
c48d2d35 RS |
2666 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
2667 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2668 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2669 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
2670 | int ncopies = vect_get_num_copies (loop_vinfo, vectype); | |
2671 | edge pe = loop_preheader_edge (loop); | |
2672 | enum { NARROW, NONE, WIDEN } modifier; | |
2673 | poly_uint64 gather_off_nunits | |
2674 | = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype); | |
2675 | ||
2676 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl)); | |
2677 | tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl)); | |
2678 | tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2679 | tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2680 | tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2681 | tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2682 | tree scaletype = TREE_VALUE (arglist); | |
2683 | gcc_checking_assert (types_compatible_p (srctype, rettype) | |
2684 | && (!mask || types_compatible_p (srctype, masktype))); | |
2685 | ||
2686 | tree perm_mask = NULL_TREE; | |
2687 | tree mask_perm_mask = NULL_TREE; | |
2688 | if (known_eq (nunits, gather_off_nunits)) | |
2689 | modifier = NONE; | |
2690 | else if (known_eq (nunits * 2, gather_off_nunits)) | |
2691 | { | |
2692 | modifier = WIDEN; | |
2693 | ||
2694 | /* Currently widening gathers and scatters are only supported for | |
2695 | fixed-length vectors. */ | |
2696 | int count = gather_off_nunits.to_constant (); | |
2697 | vec_perm_builder sel (count, count, 1); | |
2698 | for (int i = 0; i < count; ++i) | |
2699 | sel.quick_push (i | (count / 2)); | |
2700 | ||
2701 | vec_perm_indices indices (sel, 1, count); | |
2702 | perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype, | |
2703 | indices); | |
2704 | } | |
2705 | else if (known_eq (nunits, gather_off_nunits * 2)) | |
2706 | { | |
2707 | modifier = NARROW; | |
2708 | ||
2709 | /* Currently narrowing gathers and scatters are only supported for | |
2710 | fixed-length vectors. */ | |
2711 | int count = nunits.to_constant (); | |
2712 | vec_perm_builder sel (count, count, 1); | |
2713 | sel.quick_grow (count); | |
2714 | for (int i = 0; i < count; ++i) | |
2715 | sel[i] = i < count / 2 ? i : i + count / 2; | |
2716 | vec_perm_indices indices (sel, 2, count); | |
2717 | perm_mask = vect_gen_perm_mask_checked (vectype, indices); | |
2718 | ||
2719 | ncopies *= 2; | |
2720 | ||
2721 | if (mask) | |
2722 | { | |
2723 | for (int i = 0; i < count; ++i) | |
2724 | sel[i] = i | (count / 2); | |
2725 | indices.new_vector (sel, 2, count); | |
2726 | mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices); | |
2727 | } | |
2728 | } | |
2729 | else | |
2730 | gcc_unreachable (); | |
2731 | ||
86a91c0a RS |
2732 | tree scalar_dest = gimple_get_lhs (stmt_info->stmt); |
2733 | tree vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
c48d2d35 RS |
2734 | |
2735 | tree ptr = fold_convert (ptrtype, gs_info->base); | |
2736 | if (!is_gimple_min_invariant (ptr)) | |
2737 | { | |
2738 | gimple_seq seq; | |
2739 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
2740 | basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
2741 | gcc_assert (!new_bb); | |
2742 | } | |
2743 | ||
2744 | tree scale = build_int_cst (scaletype, gs_info->scale); | |
2745 | ||
2746 | tree vec_oprnd0 = NULL_TREE; | |
2747 | tree vec_mask = NULL_TREE; | |
2748 | tree src_op = NULL_TREE; | |
2749 | tree mask_op = NULL_TREE; | |
2750 | tree prev_res = NULL_TREE; | |
2751 | stmt_vec_info prev_stmt_info = NULL; | |
2752 | ||
2753 | if (!mask) | |
2754 | { | |
86a91c0a RS |
2755 | src_op = vect_build_zero_merge_argument (stmt_info, rettype); |
2756 | mask_op = vect_build_all_ones_mask (stmt_info, masktype); | |
c48d2d35 RS |
2757 | } |
2758 | ||
2759 | for (int j = 0; j < ncopies; ++j) | |
2760 | { | |
2761 | tree op, var; | |
c48d2d35 RS |
2762 | if (modifier == WIDEN && (j & 1)) |
2763 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, | |
86a91c0a | 2764 | perm_mask, stmt_info, gsi); |
c48d2d35 RS |
2765 | else if (j == 0) |
2766 | op = vec_oprnd0 | |
86a91c0a | 2767 | = vect_get_vec_def_for_operand (gs_info->offset, stmt_info); |
c48d2d35 | 2768 | else |
e4057a39 RS |
2769 | op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo, |
2770 | vec_oprnd0); | |
c48d2d35 RS |
2771 | |
2772 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
2773 | { | |
2774 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)), | |
2775 | TYPE_VECTOR_SUBPARTS (idxtype))); | |
2776 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); | |
2777 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); | |
e1bd7296 | 2778 | gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
86a91c0a | 2779 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
c48d2d35 RS |
2780 | op = var; |
2781 | } | |
2782 | ||
2783 | if (mask) | |
2784 | { | |
2785 | if (mask_perm_mask && (j & 1)) | |
2786 | mask_op = permute_vec_elements (mask_op, mask_op, | |
86a91c0a | 2787 | mask_perm_mask, stmt_info, gsi); |
c48d2d35 RS |
2788 | else |
2789 | { | |
2790 | if (j == 0) | |
86a91c0a | 2791 | vec_mask = vect_get_vec_def_for_operand (mask, stmt_info); |
c48d2d35 | 2792 | else |
e4057a39 RS |
2793 | vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo, |
2794 | vec_mask); | |
c48d2d35 RS |
2795 | |
2796 | mask_op = vec_mask; | |
2797 | if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask))) | |
2798 | { | |
2799 | gcc_assert | |
2800 | (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)), | |
2801 | TYPE_VECTOR_SUBPARTS (masktype))); | |
2802 | var = vect_get_new_ssa_name (masktype, vect_simple_var); | |
2803 | mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); | |
e1bd7296 RS |
2804 | gassign *new_stmt |
2805 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); | |
86a91c0a | 2806 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
c48d2d35 RS |
2807 | mask_op = var; |
2808 | } | |
2809 | } | |
2810 | src_op = mask_op; | |
2811 | } | |
2812 | ||
e1bd7296 RS |
2813 | gcall *new_call = gimple_build_call (gs_info->decl, 5, src_op, ptr, op, |
2814 | mask_op, scale); | |
c48d2d35 | 2815 | |
e1bd7296 | 2816 | stmt_vec_info new_stmt_info; |
c48d2d35 RS |
2817 | if (!useless_type_conversion_p (vectype, rettype)) |
2818 | { | |
2819 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype), | |
2820 | TYPE_VECTOR_SUBPARTS (rettype))); | |
2821 | op = vect_get_new_ssa_name (rettype, vect_simple_var); | |
e1bd7296 | 2822 | gimple_call_set_lhs (new_call, op); |
86a91c0a | 2823 | vect_finish_stmt_generation (stmt_info, new_call, gsi); |
c48d2d35 RS |
2824 | var = make_ssa_name (vec_dest); |
2825 | op = build1 (VIEW_CONVERT_EXPR, vectype, op); | |
e1bd7296 | 2826 | gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
86a91c0a RS |
2827 | new_stmt_info |
2828 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
c48d2d35 RS |
2829 | } |
2830 | else | |
2831 | { | |
e1bd7296 RS |
2832 | var = make_ssa_name (vec_dest, new_call); |
2833 | gimple_call_set_lhs (new_call, var); | |
86a91c0a RS |
2834 | new_stmt_info |
2835 | = vect_finish_stmt_generation (stmt_info, new_call, gsi); | |
c48d2d35 RS |
2836 | } |
2837 | ||
c48d2d35 RS |
2838 | if (modifier == NARROW) |
2839 | { | |
2840 | if ((j & 1) == 0) | |
2841 | { | |
2842 | prev_res = var; | |
2843 | continue; | |
2844 | } | |
86a91c0a RS |
2845 | var = permute_vec_elements (prev_res, var, perm_mask, |
2846 | stmt_info, gsi); | |
e1bd7296 | 2847 | new_stmt_info = loop_vinfo->lookup_def (var); |
c48d2d35 RS |
2848 | } |
2849 | ||
ddf98a96 | 2850 | if (prev_stmt_info == NULL) |
e1bd7296 | 2851 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
c48d2d35 | 2852 | else |
e1bd7296 RS |
2853 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
2854 | prev_stmt_info = new_stmt_info; | |
c48d2d35 RS |
2855 | } |
2856 | } | |
2857 | ||
bfaa08b7 RS |
2858 | /* Prepare the base and offset in GS_INFO for vectorization. |
2859 | Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET | |
82570274 RS |
2860 | to the vectorized offset argument for the first copy of STMT_INFO. |
2861 | STMT_INFO is the statement described by GS_INFO and LOOP is the | |
2862 | containing loop. */ | |
bfaa08b7 RS |
2863 | |
2864 | static void | |
82570274 | 2865 | vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info, |
bfaa08b7 RS |
2866 | gather_scatter_info *gs_info, |
2867 | tree *dataref_ptr, tree *vec_offset) | |
2868 | { | |
2869 | gimple_seq stmts = NULL; | |
2870 | *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE); | |
2871 | if (stmts != NULL) | |
2872 | { | |
2873 | basic_block new_bb; | |
2874 | edge pe = loop_preheader_edge (loop); | |
2875 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
2876 | gcc_assert (!new_bb); | |
2877 | } | |
2878 | tree offset_type = TREE_TYPE (gs_info->offset); | |
2879 | tree offset_vectype = get_vectype_for_scalar_type (offset_type); | |
82570274 | 2880 | *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info, |
bfaa08b7 RS |
2881 | offset_vectype); |
2882 | } | |
2883 | ||
ab2fc782 RS |
2884 | /* Prepare to implement a grouped or strided load or store using |
2885 | the gather load or scatter store operation described by GS_INFO. | |
32e8e429 | 2886 | STMT_INFO is the load or store statement. |
ab2fc782 RS |
2887 | |
2888 | Set *DATAREF_BUMP to the amount that should be added to the base | |
2889 | address after each copy of the vectorized statement. Set *VEC_OFFSET | |
2890 | to an invariant offset vector in which element I has the value | |
2891 | I * DR_STEP / SCALE. */ | |
2892 | ||
2893 | static void | |
32e8e429 RS |
2894 | vect_get_strided_load_store_ops (stmt_vec_info stmt_info, |
2895 | loop_vec_info loop_vinfo, | |
ab2fc782 RS |
2896 | gather_scatter_info *gs_info, |
2897 | tree *dataref_bump, tree *vec_offset) | |
2898 | { | |
ab2fc782 RS |
2899 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
2900 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2901 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2902 | gimple_seq stmts; | |
2903 | ||
2904 | tree bump = size_binop (MULT_EXPR, | |
2905 | fold_convert (sizetype, DR_STEP (dr)), | |
2906 | size_int (TYPE_VECTOR_SUBPARTS (vectype))); | |
2907 | *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE); | |
2908 | if (stmts) | |
2909 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
2910 | ||
2911 | /* The offset given in GS_INFO can have pointer type, so use the element | |
2912 | type of the vector instead. */ | |
2913 | tree offset_type = TREE_TYPE (gs_info->offset); | |
2914 | tree offset_vectype = get_vectype_for_scalar_type (offset_type); | |
2915 | offset_type = TREE_TYPE (offset_vectype); | |
2916 | ||
2917 | /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */ | |
2918 | tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr), | |
2919 | ssize_int (gs_info->scale)); | |
2920 | step = fold_convert (offset_type, step); | |
2921 | step = force_gimple_operand (step, &stmts, true, NULL_TREE); | |
2922 | ||
2923 | /* Create {0, X, X*2, X*3, ...}. */ | |
2924 | *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype, | |
2925 | build_zero_cst (offset_type), step); | |
2926 | if (stmts) | |
2927 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
2928 | } | |
2929 | ||
2930 | /* Return the amount that should be added to a vector pointer to move | |
89fa689a | 2931 | to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference |
ab2fc782 RS |
2932 | being vectorized and MEMORY_ACCESS_TYPE describes the type of |
2933 | vectorization. */ | |
2934 | ||
2935 | static tree | |
89fa689a | 2936 | vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type, |
ab2fc782 RS |
2937 | vect_memory_access_type memory_access_type) |
2938 | { | |
2939 | if (memory_access_type == VMAT_INVARIANT) | |
2940 | return size_zero_node; | |
2941 | ||
2942 | tree iv_step = TYPE_SIZE_UNIT (aggr_type); | |
89fa689a | 2943 | tree step = vect_dr_behavior (dr_info)->step; |
ab2fc782 RS |
2944 | if (tree_int_cst_sgn (step) == -1) |
2945 | iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step); | |
2946 | return iv_step; | |
2947 | } | |
2948 | ||
37b14185 RB |
2949 | /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */ |
2950 | ||
2951 | static bool | |
32e8e429 | 2952 | vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 2953 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
e4057a39 | 2954 | tree vectype_in, stmt_vector_for_cost *cost_vec) |
37b14185 RB |
2955 | { |
2956 | tree op, vectype; | |
32e8e429 | 2957 | gcall *stmt = as_a <gcall *> (stmt_info->stmt); |
e4057a39 | 2958 | vec_info *vinfo = stmt_info->vinfo; |
37b14185 | 2959 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
928686b1 RS |
2960 | unsigned ncopies; |
2961 | unsigned HOST_WIDE_INT nunits, num_bytes; | |
37b14185 RB |
2962 | |
2963 | op = gimple_call_arg (stmt, 0); | |
2964 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
928686b1 RS |
2965 | |
2966 | if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)) | |
2967 | return false; | |
37b14185 RB |
2968 | |
2969 | /* Multiple types in SLP are handled by creating the appropriate number of | |
2970 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
2971 | case of SLP. */ | |
2972 | if (slp_node) | |
2973 | ncopies = 1; | |
2974 | else | |
e8f142e2 | 2975 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
37b14185 RB |
2976 | |
2977 | gcc_assert (ncopies >= 1); | |
2978 | ||
2979 | tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in); | |
2980 | if (! char_vectype) | |
2981 | return false; | |
2982 | ||
928686b1 RS |
2983 | if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes)) |
2984 | return false; | |
2985 | ||
794e3180 | 2986 | unsigned word_bytes = num_bytes / nunits; |
908a1a16 | 2987 | |
d980067b RS |
2988 | /* The encoding uses one stepped pattern for each byte in the word. */ |
2989 | vec_perm_builder elts (num_bytes, word_bytes, 3); | |
2990 | for (unsigned i = 0; i < 3; ++i) | |
37b14185 | 2991 | for (unsigned j = 0; j < word_bytes; ++j) |
908a1a16 | 2992 | elts.quick_push ((i + 1) * word_bytes - j - 1); |
37b14185 | 2993 | |
e3342de4 RS |
2994 | vec_perm_indices indices (elts, 1, num_bytes); |
2995 | if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices)) | |
37b14185 RB |
2996 | return false; |
2997 | ||
2998 | if (! vec_stmt) | |
2999 | { | |
3000 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
adac3a68 | 3001 | DUMP_VECT_SCOPE ("vectorizable_bswap"); |
78604de0 | 3002 | if (! slp_node) |
37b14185 | 3003 | { |
68435eb2 RB |
3004 | record_stmt_cost (cost_vec, |
3005 | 1, vector_stmt, stmt_info, 0, vect_prologue); | |
3006 | record_stmt_cost (cost_vec, | |
3007 | ncopies, vec_perm, stmt_info, 0, vect_body); | |
37b14185 RB |
3008 | } |
3009 | return true; | |
3010 | } | |
3011 | ||
736d0f28 | 3012 | tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices); |
37b14185 RB |
3013 | |
3014 | /* Transform. */ | |
3015 | vec<tree> vec_oprnds = vNULL; | |
e1bd7296 | 3016 | stmt_vec_info new_stmt_info = NULL; |
37b14185 RB |
3017 | stmt_vec_info prev_stmt_info = NULL; |
3018 | for (unsigned j = 0; j < ncopies; j++) | |
3019 | { | |
3020 | /* Handle uses. */ | |
3021 | if (j == 0) | |
86a91c0a | 3022 | vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node); |
37b14185 | 3023 | else |
e4057a39 | 3024 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL); |
37b14185 RB |
3025 | |
3026 | /* Arguments are ready. create the new vector stmt. */ | |
3027 | unsigned i; | |
3028 | tree vop; | |
3029 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) | |
3030 | { | |
e1bd7296 | 3031 | gimple *new_stmt; |
37b14185 RB |
3032 | tree tem = make_ssa_name (char_vectype); |
3033 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
3034 | char_vectype, vop)); | |
86a91c0a | 3035 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
37b14185 RB |
3036 | tree tem2 = make_ssa_name (char_vectype); |
3037 | new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR, | |
3038 | tem, tem, bswap_vconst); | |
86a91c0a | 3039 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
37b14185 RB |
3040 | tem = make_ssa_name (vectype); |
3041 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
3042 | vectype, tem2)); | |
86a91c0a RS |
3043 | new_stmt_info |
3044 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
37b14185 | 3045 | if (slp_node) |
e1bd7296 | 3046 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
37b14185 RB |
3047 | } |
3048 | ||
3049 | if (slp_node) | |
3050 | continue; | |
3051 | ||
3052 | if (j == 0) | |
e1bd7296 | 3053 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
37b14185 | 3054 | else |
e1bd7296 | 3055 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
37b14185 | 3056 | |
e1bd7296 | 3057 | prev_stmt_info = new_stmt_info; |
37b14185 RB |
3058 | } |
3059 | ||
3060 | vec_oprnds.release (); | |
3061 | return true; | |
3062 | } | |
3063 | ||
b1b6836e RS |
3064 | /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have |
3065 | integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT | |
3066 | in a single step. On success, store the binary pack code in | |
3067 | *CONVERT_CODE. */ | |
3068 | ||
3069 | static bool | |
3070 | simple_integer_narrowing (tree vectype_out, tree vectype_in, | |
3071 | tree_code *convert_code) | |
3072 | { | |
3073 | if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out)) | |
3074 | || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in))) | |
3075 | return false; | |
3076 | ||
3077 | tree_code code; | |
3078 | int multi_step_cvt = 0; | |
3079 | auto_vec <tree, 8> interm_types; | |
3080 | if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in, | |
3081 | &code, &multi_step_cvt, | |
3082 | &interm_types) | |
3083 | || multi_step_cvt) | |
3084 | return false; | |
3085 | ||
3086 | *convert_code = code; | |
3087 | return true; | |
3088 | } | |
5ce9450f | 3089 | |
ebfd146a IR |
3090 | /* Function vectorizable_call. |
3091 | ||
32e8e429 RS |
3092 | Check if STMT_INFO performs a function call that can be vectorized. |
3093 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
3094 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
3095 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
3096 | |
3097 | static bool | |
32e8e429 | 3098 | vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
3099 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
3100 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 3101 | { |
538dd0b7 | 3102 | gcall *stmt; |
ebfd146a IR |
3103 | tree vec_dest; |
3104 | tree scalar_dest; | |
0267732b | 3105 | tree op; |
ebfd146a | 3106 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; |
32e8e429 | 3107 | stmt_vec_info prev_stmt_info; |
ebfd146a | 3108 | tree vectype_out, vectype_in; |
c7bda0f4 RS |
3109 | poly_uint64 nunits_in; |
3110 | poly_uint64 nunits_out; | |
ebfd146a | 3111 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
190c2236 | 3112 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 3113 | vec_info *vinfo = stmt_info->vinfo; |
81c40241 | 3114 | tree fndecl, new_temp, rhs_type; |
2c58d42c RS |
3115 | enum vect_def_type dt[4] |
3116 | = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type, | |
3117 | vect_unknown_def_type }; | |
3118 | int ndts = ARRAY_SIZE (dt); | |
ebfd146a | 3119 | int ncopies, j; |
2c58d42c RS |
3120 | auto_vec<tree, 8> vargs; |
3121 | auto_vec<tree, 8> orig_vargs; | |
ebfd146a IR |
3122 | enum { NARROW, NONE, WIDEN } modifier; |
3123 | size_t i, nargs; | |
9d5e7640 | 3124 | tree lhs; |
ebfd146a | 3125 | |
190c2236 | 3126 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
3127 | return false; |
3128 | ||
66c16fd9 RB |
3129 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
3130 | && ! vec_stmt) | |
ebfd146a IR |
3131 | return false; |
3132 | ||
86a91c0a RS |
3133 | /* Is STMT_INFO a vectorizable call? */ |
3134 | stmt = dyn_cast <gcall *> (stmt_info->stmt); | |
538dd0b7 | 3135 | if (!stmt) |
ebfd146a IR |
3136 | return false; |
3137 | ||
5ce9450f | 3138 | if (gimple_call_internal_p (stmt) |
bfaa08b7 | 3139 | && (internal_load_fn_p (gimple_call_internal_fn (stmt)) |
f307441a | 3140 | || internal_store_fn_p (gimple_call_internal_fn (stmt)))) |
c3a8f964 RS |
3141 | /* Handled by vectorizable_load and vectorizable_store. */ |
3142 | return false; | |
5ce9450f | 3143 | |
0136f8f0 AH |
3144 | if (gimple_call_lhs (stmt) == NULL_TREE |
3145 | || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
ebfd146a IR |
3146 | return false; |
3147 | ||
0136f8f0 | 3148 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); |
5a2c1986 | 3149 | |
b690cc0f RG |
3150 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
3151 | ||
ebfd146a IR |
3152 | /* Process function arguments. */ |
3153 | rhs_type = NULL_TREE; | |
b690cc0f | 3154 | vectype_in = NULL_TREE; |
ebfd146a IR |
3155 | nargs = gimple_call_num_args (stmt); |
3156 | ||
1b1562a5 MM |
3157 | /* Bail out if the function has more than three arguments, we do not have |
3158 | interesting builtin functions to vectorize with more than two arguments | |
3159 | except for fma. No arguments is also not good. */ | |
2c58d42c | 3160 | if (nargs == 0 || nargs > 4) |
ebfd146a IR |
3161 | return false; |
3162 | ||
74bf76ed | 3163 | /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */ |
2c58d42c RS |
3164 | combined_fn cfn = gimple_call_combined_fn (stmt); |
3165 | if (cfn == CFN_GOMP_SIMD_LANE) | |
74bf76ed JJ |
3166 | { |
3167 | nargs = 0; | |
3168 | rhs_type = unsigned_type_node; | |
3169 | } | |
3170 | ||
2c58d42c RS |
3171 | int mask_opno = -1; |
3172 | if (internal_fn_p (cfn)) | |
3173 | mask_opno = internal_fn_mask_index (as_internal_fn (cfn)); | |
3174 | ||
ebfd146a IR |
3175 | for (i = 0; i < nargs; i++) |
3176 | { | |
b690cc0f RG |
3177 | tree opvectype; |
3178 | ||
ebfd146a | 3179 | op = gimple_call_arg (stmt, i); |
2c58d42c RS |
3180 | if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype)) |
3181 | { | |
3182 | if (dump_enabled_p ()) | |
3183 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3184 | "use not simple.\n"); | |
3185 | return false; | |
3186 | } | |
3187 | ||
3188 | /* Skip the mask argument to an internal function. This operand | |
3189 | has been converted via a pattern if necessary. */ | |
3190 | if ((int) i == mask_opno) | |
3191 | continue; | |
ebfd146a IR |
3192 | |
3193 | /* We can only handle calls with arguments of the same type. */ | |
3194 | if (rhs_type | |
8533c9d8 | 3195 | && !types_compatible_p (rhs_type, TREE_TYPE (op))) |
ebfd146a | 3196 | { |
73fbfcad | 3197 | if (dump_enabled_p ()) |
78c60e3d | 3198 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 3199 | "argument types differ.\n"); |
ebfd146a IR |
3200 | return false; |
3201 | } | |
b690cc0f RG |
3202 | if (!rhs_type) |
3203 | rhs_type = TREE_TYPE (op); | |
ebfd146a | 3204 | |
b690cc0f RG |
3205 | if (!vectype_in) |
3206 | vectype_in = opvectype; | |
3207 | else if (opvectype | |
3208 | && opvectype != vectype_in) | |
3209 | { | |
73fbfcad | 3210 | if (dump_enabled_p ()) |
78c60e3d | 3211 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 3212 | "argument vector types differ.\n"); |
b690cc0f RG |
3213 | return false; |
3214 | } | |
3215 | } | |
3216 | /* If all arguments are external or constant defs use a vector type with | |
3217 | the same size as the output vector type. */ | |
ebfd146a | 3218 | if (!vectype_in) |
b690cc0f | 3219 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
3220 | if (vec_stmt) |
3221 | gcc_assert (vectype_in); | |
3222 | if (!vectype_in) | |
3223 | { | |
73fbfcad | 3224 | if (dump_enabled_p ()) |
7d8930a0 | 3225 | { |
78c60e3d SS |
3226 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3227 | "no vectype for scalar type "); | |
3228 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 3229 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
3230 | } |
3231 | ||
3232 | return false; | |
3233 | } | |
ebfd146a IR |
3234 | |
3235 | /* FORNOW */ | |
b690cc0f RG |
3236 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
3237 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
c7bda0f4 | 3238 | if (known_eq (nunits_in * 2, nunits_out)) |
ebfd146a | 3239 | modifier = NARROW; |
c7bda0f4 | 3240 | else if (known_eq (nunits_out, nunits_in)) |
ebfd146a | 3241 | modifier = NONE; |
c7bda0f4 | 3242 | else if (known_eq (nunits_out * 2, nunits_in)) |
ebfd146a IR |
3243 | modifier = WIDEN; |
3244 | else | |
3245 | return false; | |
3246 | ||
70439f0d RS |
3247 | /* We only handle functions that do not read or clobber memory. */ |
3248 | if (gimple_vuse (stmt)) | |
3249 | { | |
3250 | if (dump_enabled_p ()) | |
3251 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3252 | "function reads from or writes to memory.\n"); | |
3253 | return false; | |
3254 | } | |
3255 | ||
ebfd146a IR |
3256 | /* For now, we only vectorize functions if a target specific builtin |
3257 | is available. TODO -- in some cases, it might be profitable to | |
3258 | insert the calls for pieces of the vector, in order to be able | |
3259 | to vectorize other operations in the loop. */ | |
70439f0d RS |
3260 | fndecl = NULL_TREE; |
3261 | internal_fn ifn = IFN_LAST; | |
70439f0d RS |
3262 | tree callee = gimple_call_fndecl (stmt); |
3263 | ||
3264 | /* First try using an internal function. */ | |
b1b6836e RS |
3265 | tree_code convert_code = ERROR_MARK; |
3266 | if (cfn != CFN_LAST | |
3267 | && (modifier == NONE | |
3268 | || (modifier == NARROW | |
3269 | && simple_integer_narrowing (vectype_out, vectype_in, | |
3270 | &convert_code)))) | |
70439f0d RS |
3271 | ifn = vectorizable_internal_function (cfn, callee, vectype_out, |
3272 | vectype_in); | |
3273 | ||
3274 | /* If that fails, try asking for a target-specific built-in function. */ | |
3275 | if (ifn == IFN_LAST) | |
3276 | { | |
3277 | if (cfn != CFN_LAST) | |
3278 | fndecl = targetm.vectorize.builtin_vectorized_function | |
3279 | (cfn, vectype_out, vectype_in); | |
7672aa9b | 3280 | else if (callee) |
70439f0d RS |
3281 | fndecl = targetm.vectorize.builtin_md_vectorized_function |
3282 | (callee, vectype_out, vectype_in); | |
3283 | } | |
3284 | ||
3285 | if (ifn == IFN_LAST && !fndecl) | |
ebfd146a | 3286 | { |
70439f0d | 3287 | if (cfn == CFN_GOMP_SIMD_LANE |
74bf76ed JJ |
3288 | && !slp_node |
3289 | && loop_vinfo | |
3290 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
3291 | && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME | |
3292 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
3293 | == SSA_NAME_VAR (gimple_call_arg (stmt, 0))) | |
3294 | { | |
3295 | /* We can handle IFN_GOMP_SIMD_LANE by returning a | |
3296 | { 0, 1, 2, ... vf - 1 } vector. */ | |
3297 | gcc_assert (nargs == 0); | |
3298 | } | |
37b14185 RB |
3299 | else if (modifier == NONE |
3300 | && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16) | |
3301 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32) | |
3302 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64))) | |
86a91c0a | 3303 | return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node, |
e4057a39 | 3304 | vectype_in, cost_vec); |
74bf76ed JJ |
3305 | else |
3306 | { | |
3307 | if (dump_enabled_p ()) | |
3308 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
e645e942 | 3309 | "function is not vectorizable.\n"); |
74bf76ed JJ |
3310 | return false; |
3311 | } | |
ebfd146a IR |
3312 | } |
3313 | ||
fce57248 | 3314 | if (slp_node) |
190c2236 | 3315 | ncopies = 1; |
b1b6836e | 3316 | else if (modifier == NARROW && ifn == IFN_LAST) |
e8f142e2 | 3317 | ncopies = vect_get_num_copies (loop_vinfo, vectype_out); |
ebfd146a | 3318 | else |
e8f142e2 | 3319 | ncopies = vect_get_num_copies (loop_vinfo, vectype_in); |
ebfd146a IR |
3320 | |
3321 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
3322 | needs to be generated. */ | |
3323 | gcc_assert (ncopies >= 1); | |
3324 | ||
ed623edb | 3325 | vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL); |
ebfd146a IR |
3326 | if (!vec_stmt) /* transformation not required. */ |
3327 | { | |
3328 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
adac3a68 | 3329 | DUMP_VECT_SCOPE ("vectorizable_call"); |
68435eb2 RB |
3330 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
3331 | if (ifn != IFN_LAST && modifier == NARROW && !slp_node) | |
3332 | record_stmt_cost (cost_vec, ncopies / 2, | |
3333 | vec_promote_demote, stmt_info, 0, vect_body); | |
b1b6836e | 3334 | |
2c58d42c RS |
3335 | if (loop_vinfo && mask_opno >= 0) |
3336 | { | |
3337 | unsigned int nvectors = (slp_node | |
3338 | ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) | |
3339 | : ncopies); | |
3340 | vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out); | |
3341 | } | |
ebfd146a IR |
3342 | return true; |
3343 | } | |
3344 | ||
67b8dbac | 3345 | /* Transform. */ |
ebfd146a | 3346 | |
73fbfcad | 3347 | if (dump_enabled_p ()) |
e645e942 | 3348 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); |
ebfd146a IR |
3349 | |
3350 | /* Handle def. */ | |
3351 | scalar_dest = gimple_call_lhs (stmt); | |
3352 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
3353 | ||
2c58d42c RS |
3354 | bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo); |
3355 | ||
e1bd7296 | 3356 | stmt_vec_info new_stmt_info = NULL; |
ebfd146a | 3357 | prev_stmt_info = NULL; |
b1b6836e | 3358 | if (modifier == NONE || ifn != IFN_LAST) |
ebfd146a | 3359 | { |
b1b6836e | 3360 | tree prev_res = NULL_TREE; |
2c58d42c RS |
3361 | vargs.safe_grow (nargs); |
3362 | orig_vargs.safe_grow (nargs); | |
ebfd146a IR |
3363 | for (j = 0; j < ncopies; ++j) |
3364 | { | |
3365 | /* Build argument list for the vectorized call. */ | |
190c2236 JJ |
3366 | if (slp_node) |
3367 | { | |
ef062b13 | 3368 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 3369 | vec<tree> vec_oprnds0; |
190c2236 JJ |
3370 | |
3371 | for (i = 0; i < nargs; i++) | |
2c58d42c | 3372 | vargs[i] = gimple_call_arg (stmt, i); |
306b0c92 | 3373 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 3374 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
3375 | |
3376 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 3377 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0) |
190c2236 JJ |
3378 | { |
3379 | size_t k; | |
3380 | for (k = 0; k < nargs; k++) | |
3381 | { | |
37b5ec8f | 3382 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 | 3383 | vargs[k] = vec_oprndsk[i]; |
190c2236 | 3384 | } |
b1b6836e RS |
3385 | if (modifier == NARROW) |
3386 | { | |
2c58d42c RS |
3387 | /* We don't define any narrowing conditional functions |
3388 | at present. */ | |
3389 | gcc_assert (mask_opno < 0); | |
b1b6836e | 3390 | tree half_res = make_ssa_name (vectype_in); |
a844293d RS |
3391 | gcall *call |
3392 | = gimple_build_call_internal_vec (ifn, vargs); | |
3393 | gimple_call_set_lhs (call, half_res); | |
3394 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 3395 | new_stmt_info |
86a91c0a | 3396 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
b1b6836e RS |
3397 | if ((i & 1) == 0) |
3398 | { | |
3399 | prev_res = half_res; | |
3400 | continue; | |
3401 | } | |
3402 | new_temp = make_ssa_name (vec_dest); | |
e1bd7296 RS |
3403 | gimple *new_stmt |
3404 | = gimple_build_assign (new_temp, convert_code, | |
3405 | prev_res, half_res); | |
3406 | new_stmt_info | |
86a91c0a RS |
3407 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
3408 | gsi); | |
b1b6836e | 3409 | } |
70439f0d | 3410 | else |
b1b6836e | 3411 | { |
2c58d42c RS |
3412 | if (mask_opno >= 0 && masked_loop_p) |
3413 | { | |
3414 | unsigned int vec_num = vec_oprnds0.length (); | |
3415 | /* Always true for SLP. */ | |
3416 | gcc_assert (ncopies == 1); | |
3417 | tree mask = vect_get_loop_mask (gsi, masks, vec_num, | |
3418 | vectype_out, i); | |
3419 | vargs[mask_opno] = prepare_load_store_mask | |
3420 | (TREE_TYPE (mask), mask, vargs[mask_opno], gsi); | |
3421 | } | |
3422 | ||
a844293d | 3423 | gcall *call; |
b1b6836e | 3424 | if (ifn != IFN_LAST) |
a844293d | 3425 | call = gimple_build_call_internal_vec (ifn, vargs); |
b1b6836e | 3426 | else |
a844293d RS |
3427 | call = gimple_build_call_vec (fndecl, vargs); |
3428 | new_temp = make_ssa_name (vec_dest, call); | |
3429 | gimple_call_set_lhs (call, new_temp); | |
3430 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 3431 | new_stmt_info |
86a91c0a | 3432 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
b1b6836e | 3433 | } |
e1bd7296 | 3434 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
190c2236 JJ |
3435 | } |
3436 | ||
3437 | for (i = 0; i < nargs; i++) | |
3438 | { | |
37b5ec8f | 3439 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 3440 | vec_oprndsi.release (); |
190c2236 | 3441 | } |
190c2236 JJ |
3442 | continue; |
3443 | } | |
3444 | ||
ebfd146a IR |
3445 | for (i = 0; i < nargs; i++) |
3446 | { | |
3447 | op = gimple_call_arg (stmt, i); | |
3448 | if (j == 0) | |
3449 | vec_oprnd0 | |
86a91c0a | 3450 | = vect_get_vec_def_for_operand (op, stmt_info); |
ebfd146a | 3451 | else |
2c58d42c | 3452 | vec_oprnd0 |
e4057a39 | 3453 | = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]); |
2c58d42c RS |
3454 | |
3455 | orig_vargs[i] = vargs[i] = vec_oprnd0; | |
3456 | } | |
ebfd146a | 3457 | |
2c58d42c RS |
3458 | if (mask_opno >= 0 && masked_loop_p) |
3459 | { | |
3460 | tree mask = vect_get_loop_mask (gsi, masks, ncopies, | |
3461 | vectype_out, j); | |
3462 | vargs[mask_opno] | |
3463 | = prepare_load_store_mask (TREE_TYPE (mask), mask, | |
3464 | vargs[mask_opno], gsi); | |
ebfd146a IR |
3465 | } |
3466 | ||
2c58d42c | 3467 | if (cfn == CFN_GOMP_SIMD_LANE) |
74bf76ed | 3468 | { |
c7bda0f4 | 3469 | tree cst = build_index_vector (vectype_out, j * nunits_out, 1); |
74bf76ed | 3470 | tree new_var |
0e22bb5a | 3471 | = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_"); |
355fe088 | 3472 | gimple *init_stmt = gimple_build_assign (new_var, cst); |
86a91c0a | 3473 | vect_init_vector_1 (stmt_info, init_stmt, NULL); |
b731b390 | 3474 | new_temp = make_ssa_name (vec_dest); |
e1bd7296 RS |
3475 | gimple *new_stmt = gimple_build_assign (new_temp, new_var); |
3476 | new_stmt_info | |
86a91c0a | 3477 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
74bf76ed | 3478 | } |
b1b6836e RS |
3479 | else if (modifier == NARROW) |
3480 | { | |
2c58d42c RS |
3481 | /* We don't define any narrowing conditional functions at |
3482 | present. */ | |
3483 | gcc_assert (mask_opno < 0); | |
b1b6836e | 3484 | tree half_res = make_ssa_name (vectype_in); |
a844293d RS |
3485 | gcall *call = gimple_build_call_internal_vec (ifn, vargs); |
3486 | gimple_call_set_lhs (call, half_res); | |
3487 | gimple_call_set_nothrow (call, true); | |
86a91c0a RS |
3488 | new_stmt_info |
3489 | = vect_finish_stmt_generation (stmt_info, call, gsi); | |
b1b6836e RS |
3490 | if ((j & 1) == 0) |
3491 | { | |
3492 | prev_res = half_res; | |
3493 | continue; | |
3494 | } | |
3495 | new_temp = make_ssa_name (vec_dest); | |
e1bd7296 RS |
3496 | gassign *new_stmt = gimple_build_assign (new_temp, convert_code, |
3497 | prev_res, half_res); | |
3498 | new_stmt_info | |
86a91c0a | 3499 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
b1b6836e | 3500 | } |
74bf76ed JJ |
3501 | else |
3502 | { | |
a844293d | 3503 | gcall *call; |
70439f0d | 3504 | if (ifn != IFN_LAST) |
a844293d | 3505 | call = gimple_build_call_internal_vec (ifn, vargs); |
70439f0d | 3506 | else |
a844293d | 3507 | call = gimple_build_call_vec (fndecl, vargs); |
e1bd7296 | 3508 | new_temp = make_ssa_name (vec_dest, call); |
a844293d RS |
3509 | gimple_call_set_lhs (call, new_temp); |
3510 | gimple_call_set_nothrow (call, true); | |
86a91c0a RS |
3511 | new_stmt_info |
3512 | = vect_finish_stmt_generation (stmt_info, call, gsi); | |
74bf76ed | 3513 | } |
ebfd146a | 3514 | |
b1b6836e | 3515 | if (j == (modifier == NARROW ? 1 : 0)) |
e1bd7296 | 3516 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
ebfd146a | 3517 | else |
e1bd7296 | 3518 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
ebfd146a | 3519 | |
e1bd7296 | 3520 | prev_stmt_info = new_stmt_info; |
ebfd146a | 3521 | } |
b1b6836e RS |
3522 | } |
3523 | else if (modifier == NARROW) | |
3524 | { | |
2c58d42c RS |
3525 | /* We don't define any narrowing conditional functions at present. */ |
3526 | gcc_assert (mask_opno < 0); | |
ebfd146a IR |
3527 | for (j = 0; j < ncopies; ++j) |
3528 | { | |
3529 | /* Build argument list for the vectorized call. */ | |
3530 | if (j == 0) | |
9771b263 | 3531 | vargs.create (nargs * 2); |
ebfd146a | 3532 | else |
9771b263 | 3533 | vargs.truncate (0); |
ebfd146a | 3534 | |
190c2236 JJ |
3535 | if (slp_node) |
3536 | { | |
ef062b13 | 3537 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 3538 | vec<tree> vec_oprnds0; |
190c2236 JJ |
3539 | |
3540 | for (i = 0; i < nargs; i++) | |
9771b263 | 3541 | vargs.quick_push (gimple_call_arg (stmt, i)); |
306b0c92 | 3542 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 3543 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
3544 | |
3545 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 3546 | for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2) |
190c2236 JJ |
3547 | { |
3548 | size_t k; | |
9771b263 | 3549 | vargs.truncate (0); |
190c2236 JJ |
3550 | for (k = 0; k < nargs; k++) |
3551 | { | |
37b5ec8f | 3552 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 DN |
3553 | vargs.quick_push (vec_oprndsk[i]); |
3554 | vargs.quick_push (vec_oprndsk[i + 1]); | |
190c2236 | 3555 | } |
a844293d | 3556 | gcall *call; |
70439f0d | 3557 | if (ifn != IFN_LAST) |
a844293d | 3558 | call = gimple_build_call_internal_vec (ifn, vargs); |
70439f0d | 3559 | else |
a844293d RS |
3560 | call = gimple_build_call_vec (fndecl, vargs); |
3561 | new_temp = make_ssa_name (vec_dest, call); | |
3562 | gimple_call_set_lhs (call, new_temp); | |
3563 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 3564 | new_stmt_info |
86a91c0a | 3565 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
e1bd7296 | 3566 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
190c2236 JJ |
3567 | } |
3568 | ||
3569 | for (i = 0; i < nargs; i++) | |
3570 | { | |
37b5ec8f | 3571 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 3572 | vec_oprndsi.release (); |
190c2236 | 3573 | } |
190c2236 JJ |
3574 | continue; |
3575 | } | |
3576 | ||
ebfd146a IR |
3577 | for (i = 0; i < nargs; i++) |
3578 | { | |
3579 | op = gimple_call_arg (stmt, i); | |
3580 | if (j == 0) | |
3581 | { | |
3582 | vec_oprnd0 | |
86a91c0a | 3583 | = vect_get_vec_def_for_operand (op, stmt_info); |
ebfd146a | 3584 | vec_oprnd1 |
e4057a39 | 3585 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
ebfd146a IR |
3586 | } |
3587 | else | |
3588 | { | |
e1bd7296 RS |
3589 | vec_oprnd1 = gimple_call_arg (new_stmt_info->stmt, |
3590 | 2 * i + 1); | |
ebfd146a | 3591 | vec_oprnd0 |
e4057a39 | 3592 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); |
ebfd146a | 3593 | vec_oprnd1 |
e4057a39 | 3594 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
ebfd146a IR |
3595 | } |
3596 | ||
9771b263 DN |
3597 | vargs.quick_push (vec_oprnd0); |
3598 | vargs.quick_push (vec_oprnd1); | |
ebfd146a IR |
3599 | } |
3600 | ||
e1bd7296 | 3601 | gcall *new_stmt = gimple_build_call_vec (fndecl, vargs); |
ebfd146a IR |
3602 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3603 | gimple_call_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
3604 | new_stmt_info |
3605 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
ebfd146a IR |
3606 | |
3607 | if (j == 0) | |
e1bd7296 | 3608 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; |
ebfd146a | 3609 | else |
e1bd7296 | 3610 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
ebfd146a | 3611 | |
e1bd7296 | 3612 | prev_stmt_info = new_stmt_info; |
ebfd146a IR |
3613 | } |
3614 | ||
3615 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a | 3616 | } |
b1b6836e RS |
3617 | else |
3618 | /* No current target implements this case. */ | |
3619 | return false; | |
ebfd146a | 3620 | |
9771b263 | 3621 | vargs.release (); |
ebfd146a | 3622 | |
ebfd146a IR |
3623 | /* The call in STMT might prevent it from being removed in dce. |
3624 | We however cannot remove it here, due to the way the ssa name | |
3625 | it defines is mapped to the new definition. So just replace | |
3626 | rhs of the statement with something harmless. */ | |
3627 | ||
dd34c087 JJ |
3628 | if (slp_node) |
3629 | return true; | |
3630 | ||
9d5e7640 | 3631 | if (is_pattern_stmt_p (stmt_info)) |
10681ce8 | 3632 | stmt_info = STMT_VINFO_RELATED_STMT (stmt_info); |
ed7b8123 | 3633 | lhs = gimple_get_lhs (stmt_info->stmt); |
3cc2fa2a | 3634 | |
e1bd7296 RS |
3635 | gassign *new_stmt |
3636 | = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); | |
9d97912b | 3637 | vinfo->replace_stmt (gsi, stmt_info, new_stmt); |
ebfd146a IR |
3638 | |
3639 | return true; | |
3640 | } | |
3641 | ||
3642 | ||
0136f8f0 AH |
3643 | struct simd_call_arg_info |
3644 | { | |
3645 | tree vectype; | |
3646 | tree op; | |
0136f8f0 | 3647 | HOST_WIDE_INT linear_step; |
34e82342 | 3648 | enum vect_def_type dt; |
0136f8f0 | 3649 | unsigned int align; |
17b658af | 3650 | bool simd_lane_linear; |
0136f8f0 AH |
3651 | }; |
3652 | ||
17b658af JJ |
3653 | /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME, |
3654 | is linear within simd lane (but not within whole loop), note it in | |
3655 | *ARGINFO. */ | |
3656 | ||
3657 | static void | |
3658 | vect_simd_lane_linear (tree op, struct loop *loop, | |
3659 | struct simd_call_arg_info *arginfo) | |
3660 | { | |
355fe088 | 3661 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
17b658af JJ |
3662 | |
3663 | if (!is_gimple_assign (def_stmt) | |
3664 | || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR | |
3665 | || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt))) | |
3666 | return; | |
3667 | ||
3668 | tree base = gimple_assign_rhs1 (def_stmt); | |
3669 | HOST_WIDE_INT linear_step = 0; | |
3670 | tree v = gimple_assign_rhs2 (def_stmt); | |
3671 | while (TREE_CODE (v) == SSA_NAME) | |
3672 | { | |
3673 | tree t; | |
3674 | def_stmt = SSA_NAME_DEF_STMT (v); | |
3675 | if (is_gimple_assign (def_stmt)) | |
3676 | switch (gimple_assign_rhs_code (def_stmt)) | |
3677 | { | |
3678 | case PLUS_EXPR: | |
3679 | t = gimple_assign_rhs2 (def_stmt); | |
3680 | if (linear_step || TREE_CODE (t) != INTEGER_CST) | |
3681 | return; | |
3682 | base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t); | |
3683 | v = gimple_assign_rhs1 (def_stmt); | |
3684 | continue; | |
3685 | case MULT_EXPR: | |
3686 | t = gimple_assign_rhs2 (def_stmt); | |
3687 | if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t)) | |
3688 | return; | |
3689 | linear_step = tree_to_shwi (t); | |
3690 | v = gimple_assign_rhs1 (def_stmt); | |
3691 | continue; | |
3692 | CASE_CONVERT: | |
3693 | t = gimple_assign_rhs1 (def_stmt); | |
3694 | if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE | |
3695 | || (TYPE_PRECISION (TREE_TYPE (v)) | |
3696 | < TYPE_PRECISION (TREE_TYPE (t)))) | |
3697 | return; | |
3698 | if (!linear_step) | |
3699 | linear_step = 1; | |
3700 | v = t; | |
3701 | continue; | |
3702 | default: | |
3703 | return; | |
3704 | } | |
8e4284d0 | 3705 | else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE) |
17b658af JJ |
3706 | && loop->simduid |
3707 | && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME | |
3708 | && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0)) | |
3709 | == loop->simduid)) | |
3710 | { | |
3711 | if (!linear_step) | |
3712 | linear_step = 1; | |
3713 | arginfo->linear_step = linear_step; | |
3714 | arginfo->op = base; | |
3715 | arginfo->simd_lane_linear = true; | |
3716 | return; | |
3717 | } | |
3718 | } | |
3719 | } | |
3720 | ||
cf1b2ba4 RS |
3721 | /* Return the number of elements in vector type VECTYPE, which is associated |
3722 | with a SIMD clone. At present these vectors always have a constant | |
3723 | length. */ | |
3724 | ||
3725 | static unsigned HOST_WIDE_INT | |
3726 | simd_clone_subparts (tree vectype) | |
3727 | { | |
928686b1 | 3728 | return TYPE_VECTOR_SUBPARTS (vectype).to_constant (); |
cf1b2ba4 RS |
3729 | } |
3730 | ||
0136f8f0 AH |
3731 | /* Function vectorizable_simd_clone_call. |
3732 | ||
32e8e429 | 3733 | Check if STMT_INFO performs a function call that can be vectorized |
0136f8f0 | 3734 | by calling a simd clone of the function. |
32e8e429 RS |
3735 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized |
3736 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
3737 | Return true if STMT_INFO is vectorizable in this way. */ | |
0136f8f0 AH |
3738 | |
3739 | static bool | |
32e8e429 RS |
3740 | vectorizable_simd_clone_call (stmt_vec_info stmt_info, |
3741 | gimple_stmt_iterator *gsi, | |
1eede195 | 3742 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 3743 | stmt_vector_for_cost *) |
0136f8f0 AH |
3744 | { |
3745 | tree vec_dest; | |
3746 | tree scalar_dest; | |
3747 | tree op, type; | |
3748 | tree vec_oprnd0 = NULL_TREE; | |
32e8e429 | 3749 | stmt_vec_info prev_stmt_info; |
0136f8f0 AH |
3750 | tree vectype; |
3751 | unsigned int nunits; | |
3752 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
3753 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
310213d4 | 3754 | vec_info *vinfo = stmt_info->vinfo; |
0136f8f0 | 3755 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; |
81c40241 | 3756 | tree fndecl, new_temp; |
0136f8f0 | 3757 | int ncopies, j; |
00426f9a | 3758 | auto_vec<simd_call_arg_info> arginfo; |
0136f8f0 AH |
3759 | vec<tree> vargs = vNULL; |
3760 | size_t i, nargs; | |
3761 | tree lhs, rtype, ratype; | |
e7a74006 | 3762 | vec<constructor_elt, va_gc> *ret_ctor_elts = NULL; |
0136f8f0 AH |
3763 | |
3764 | /* Is STMT a vectorizable call? */ | |
32e8e429 RS |
3765 | gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt); |
3766 | if (!stmt) | |
0136f8f0 AH |
3767 | return false; |
3768 | ||
3769 | fndecl = gimple_call_fndecl (stmt); | |
3770 | if (fndecl == NULL_TREE) | |
3771 | return false; | |
3772 | ||
d52f5295 | 3773 | struct cgraph_node *node = cgraph_node::get (fndecl); |
0136f8f0 AH |
3774 | if (node == NULL || node->simd_clones == NULL) |
3775 | return false; | |
3776 | ||
3777 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
3778 | return false; | |
3779 | ||
66c16fd9 RB |
3780 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
3781 | && ! vec_stmt) | |
0136f8f0 AH |
3782 | return false; |
3783 | ||
3784 | if (gimple_call_lhs (stmt) | |
3785 | && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
3786 | return false; | |
3787 | ||
3788 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); | |
3789 | ||
3790 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
3791 | ||
86a91c0a | 3792 | if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info)) |
0136f8f0 AH |
3793 | return false; |
3794 | ||
3795 | /* FORNOW */ | |
fce57248 | 3796 | if (slp_node) |
0136f8f0 AH |
3797 | return false; |
3798 | ||
3799 | /* Process function arguments. */ | |
3800 | nargs = gimple_call_num_args (stmt); | |
3801 | ||
3802 | /* Bail out if the function has zero arguments. */ | |
3803 | if (nargs == 0) | |
3804 | return false; | |
3805 | ||
00426f9a | 3806 | arginfo.reserve (nargs, true); |
0136f8f0 AH |
3807 | |
3808 | for (i = 0; i < nargs; i++) | |
3809 | { | |
3810 | simd_call_arg_info thisarginfo; | |
3811 | affine_iv iv; | |
3812 | ||
3813 | thisarginfo.linear_step = 0; | |
3814 | thisarginfo.align = 0; | |
3815 | thisarginfo.op = NULL_TREE; | |
17b658af | 3816 | thisarginfo.simd_lane_linear = false; |
0136f8f0 AH |
3817 | |
3818 | op = gimple_call_arg (stmt, i); | |
894dd753 | 3819 | if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt, |
81c40241 | 3820 | &thisarginfo.vectype) |
0136f8f0 AH |
3821 | || thisarginfo.dt == vect_uninitialized_def) |
3822 | { | |
3823 | if (dump_enabled_p ()) | |
3824 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3825 | "use not simple.\n"); | |
0136f8f0 AH |
3826 | return false; |
3827 | } | |
3828 | ||
3829 | if (thisarginfo.dt == vect_constant_def | |
3830 | || thisarginfo.dt == vect_external_def) | |
3831 | gcc_assert (thisarginfo.vectype == NULL_TREE); | |
3832 | else | |
3833 | gcc_assert (thisarginfo.vectype != NULL_TREE); | |
3834 | ||
6c9e85fb JJ |
3835 | /* For linear arguments, the analyze phase should have saved |
3836 | the base and step in STMT_VINFO_SIMD_CLONE_INFO. */ | |
17b658af JJ |
3837 | if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length () |
3838 | && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]) | |
6c9e85fb JJ |
3839 | { |
3840 | gcc_assert (vec_stmt); | |
3841 | thisarginfo.linear_step | |
17b658af | 3842 | = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]); |
6c9e85fb | 3843 | thisarginfo.op |
17b658af JJ |
3844 | = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1]; |
3845 | thisarginfo.simd_lane_linear | |
3846 | = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3] | |
3847 | == boolean_true_node); | |
6c9e85fb JJ |
3848 | /* If loop has been peeled for alignment, we need to adjust it. */ |
3849 | tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo); | |
3850 | tree n2 = LOOP_VINFO_NITERS (loop_vinfo); | |
17b658af | 3851 | if (n1 != n2 && !thisarginfo.simd_lane_linear) |
6c9e85fb JJ |
3852 | { |
3853 | tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2); | |
17b658af | 3854 | tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]; |
6c9e85fb JJ |
3855 | tree opt = TREE_TYPE (thisarginfo.op); |
3856 | bias = fold_convert (TREE_TYPE (step), bias); | |
3857 | bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step); | |
3858 | thisarginfo.op | |
3859 | = fold_build2 (POINTER_TYPE_P (opt) | |
3860 | ? POINTER_PLUS_EXPR : PLUS_EXPR, opt, | |
3861 | thisarginfo.op, bias); | |
3862 | } | |
3863 | } | |
3864 | else if (!vec_stmt | |
3865 | && thisarginfo.dt != vect_constant_def | |
3866 | && thisarginfo.dt != vect_external_def | |
3867 | && loop_vinfo | |
3868 | && TREE_CODE (op) == SSA_NAME | |
3869 | && simple_iv (loop, loop_containing_stmt (stmt), op, | |
3870 | &iv, false) | |
3871 | && tree_fits_shwi_p (iv.step)) | |
0136f8f0 AH |
3872 | { |
3873 | thisarginfo.linear_step = tree_to_shwi (iv.step); | |
3874 | thisarginfo.op = iv.base; | |
3875 | } | |
3876 | else if ((thisarginfo.dt == vect_constant_def | |
3877 | || thisarginfo.dt == vect_external_def) | |
3878 | && POINTER_TYPE_P (TREE_TYPE (op))) | |
3879 | thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT; | |
17b658af JJ |
3880 | /* Addresses of array elements indexed by GOMP_SIMD_LANE are |
3881 | linear too. */ | |
3882 | if (POINTER_TYPE_P (TREE_TYPE (op)) | |
3883 | && !thisarginfo.linear_step | |
3884 | && !vec_stmt | |
3885 | && thisarginfo.dt != vect_constant_def | |
3886 | && thisarginfo.dt != vect_external_def | |
3887 | && loop_vinfo | |
3888 | && !slp_node | |
3889 | && TREE_CODE (op) == SSA_NAME) | |
3890 | vect_simd_lane_linear (op, loop, &thisarginfo); | |
0136f8f0 AH |
3891 | |
3892 | arginfo.quick_push (thisarginfo); | |
3893 | } | |
3894 | ||
d9f21f6a RS |
3895 | unsigned HOST_WIDE_INT vf; |
3896 | if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) | |
3897 | { | |
3898 | if (dump_enabled_p ()) | |
3899 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3900 | "not considering SIMD clones; not yet supported" | |
3901 | " for variable-width vectors.\n"); | |
3902 | return NULL; | |
3903 | } | |
3904 | ||
0136f8f0 AH |
3905 | unsigned int badness = 0; |
3906 | struct cgraph_node *bestn = NULL; | |
6c9e85fb JJ |
3907 | if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ()) |
3908 | bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]); | |
0136f8f0 AH |
3909 | else |
3910 | for (struct cgraph_node *n = node->simd_clones; n != NULL; | |
3911 | n = n->simdclone->next_clone) | |
3912 | { | |
3913 | unsigned int this_badness = 0; | |
d9f21f6a | 3914 | if (n->simdclone->simdlen > vf |
0136f8f0 AH |
3915 | || n->simdclone->nargs != nargs) |
3916 | continue; | |
d9f21f6a RS |
3917 | if (n->simdclone->simdlen < vf) |
3918 | this_badness += (exact_log2 (vf) | |
0136f8f0 AH |
3919 | - exact_log2 (n->simdclone->simdlen)) * 1024; |
3920 | if (n->simdclone->inbranch) | |
3921 | this_badness += 2048; | |
3922 | int target_badness = targetm.simd_clone.usable (n); | |
3923 | if (target_badness < 0) | |
3924 | continue; | |
3925 | this_badness += target_badness * 512; | |
3926 | /* FORNOW: Have to add code to add the mask argument. */ | |
3927 | if (n->simdclone->inbranch) | |
3928 | continue; | |
3929 | for (i = 0; i < nargs; i++) | |
3930 | { | |
3931 | switch (n->simdclone->args[i].arg_type) | |
3932 | { | |
3933 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
3934 | if (!useless_type_conversion_p | |
3935 | (n->simdclone->args[i].orig_type, | |
3936 | TREE_TYPE (gimple_call_arg (stmt, i)))) | |
3937 | i = -1; | |
3938 | else if (arginfo[i].dt == vect_constant_def | |
3939 | || arginfo[i].dt == vect_external_def | |
3940 | || arginfo[i].linear_step) | |
3941 | this_badness += 64; | |
3942 | break; | |
3943 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
3944 | if (arginfo[i].dt != vect_constant_def | |
3945 | && arginfo[i].dt != vect_external_def) | |
3946 | i = -1; | |
3947 | break; | |
3948 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
d9a6bd32 | 3949 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
3950 | if (arginfo[i].dt == vect_constant_def |
3951 | || arginfo[i].dt == vect_external_def | |
3952 | || (arginfo[i].linear_step | |
3953 | != n->simdclone->args[i].linear_step)) | |
3954 | i = -1; | |
3955 | break; | |
3956 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: | |
d9a6bd32 JJ |
3957 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
3958 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
e01d41e5 JJ |
3959 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
3960 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
3961 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
3962 | /* FORNOW */ |
3963 | i = -1; | |
3964 | break; | |
3965 | case SIMD_CLONE_ARG_TYPE_MASK: | |
3966 | gcc_unreachable (); | |
3967 | } | |
3968 | if (i == (size_t) -1) | |
3969 | break; | |
3970 | if (n->simdclone->args[i].alignment > arginfo[i].align) | |
3971 | { | |
3972 | i = -1; | |
3973 | break; | |
3974 | } | |
3975 | if (arginfo[i].align) | |
3976 | this_badness += (exact_log2 (arginfo[i].align) | |
3977 | - exact_log2 (n->simdclone->args[i].alignment)); | |
3978 | } | |
3979 | if (i == (size_t) -1) | |
3980 | continue; | |
3981 | if (bestn == NULL || this_badness < badness) | |
3982 | { | |
3983 | bestn = n; | |
3984 | badness = this_badness; | |
3985 | } | |
3986 | } | |
3987 | ||
3988 | if (bestn == NULL) | |
00426f9a | 3989 | return false; |
0136f8f0 AH |
3990 | |
3991 | for (i = 0; i < nargs; i++) | |
3992 | if ((arginfo[i].dt == vect_constant_def | |
3993 | || arginfo[i].dt == vect_external_def) | |
3994 | && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) | |
3995 | { | |
3996 | arginfo[i].vectype | |
3997 | = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt, | |
3998 | i))); | |
3999 | if (arginfo[i].vectype == NULL | |
cf1b2ba4 | 4000 | || (simd_clone_subparts (arginfo[i].vectype) |
0136f8f0 | 4001 | > bestn->simdclone->simdlen)) |
00426f9a | 4002 | return false; |
0136f8f0 AH |
4003 | } |
4004 | ||
4005 | fndecl = bestn->decl; | |
4006 | nunits = bestn->simdclone->simdlen; | |
d9f21f6a | 4007 | ncopies = vf / nunits; |
0136f8f0 AH |
4008 | |
4009 | /* If the function isn't const, only allow it in simd loops where user | |
4010 | has asserted that at least nunits consecutive iterations can be | |
4011 | performed using SIMD instructions. */ | |
4012 | if ((loop == NULL || (unsigned) loop->safelen < nunits) | |
4013 | && gimple_vuse (stmt)) | |
00426f9a | 4014 | return false; |
0136f8f0 AH |
4015 | |
4016 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
4017 | needs to be generated. */ | |
4018 | gcc_assert (ncopies >= 1); | |
4019 | ||
4020 | if (!vec_stmt) /* transformation not required. */ | |
4021 | { | |
6c9e85fb JJ |
4022 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl); |
4023 | for (i = 0; i < nargs; i++) | |
7adb26f2 JJ |
4024 | if ((bestn->simdclone->args[i].arg_type |
4025 | == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) | |
4026 | || (bestn->simdclone->args[i].arg_type | |
4027 | == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)) | |
6c9e85fb | 4028 | { |
17b658af | 4029 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3 |
6c9e85fb JJ |
4030 | + 1); |
4031 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op); | |
4032 | tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op)) | |
4033 | ? size_type_node : TREE_TYPE (arginfo[i].op); | |
4034 | tree ls = build_int_cst (lst, arginfo[i].linear_step); | |
4035 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls); | |
17b658af JJ |
4036 | tree sll = arginfo[i].simd_lane_linear |
4037 | ? boolean_true_node : boolean_false_node; | |
4038 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll); | |
6c9e85fb | 4039 | } |
0136f8f0 | 4040 | STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; |
adac3a68 | 4041 | DUMP_VECT_SCOPE ("vectorizable_simd_clone_call"); |
68435eb2 | 4042 | /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */ |
0136f8f0 AH |
4043 | return true; |
4044 | } | |
4045 | ||
67b8dbac | 4046 | /* Transform. */ |
0136f8f0 AH |
4047 | |
4048 | if (dump_enabled_p ()) | |
4049 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); | |
4050 | ||
4051 | /* Handle def. */ | |
4052 | scalar_dest = gimple_call_lhs (stmt); | |
4053 | vec_dest = NULL_TREE; | |
4054 | rtype = NULL_TREE; | |
4055 | ratype = NULL_TREE; | |
4056 | if (scalar_dest) | |
4057 | { | |
4058 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
4059 | rtype = TREE_TYPE (TREE_TYPE (fndecl)); | |
4060 | if (TREE_CODE (rtype) == ARRAY_TYPE) | |
4061 | { | |
4062 | ratype = rtype; | |
4063 | rtype = TREE_TYPE (ratype); | |
4064 | } | |
4065 | } | |
4066 | ||
4067 | prev_stmt_info = NULL; | |
4068 | for (j = 0; j < ncopies; ++j) | |
4069 | { | |
4070 | /* Build argument list for the vectorized call. */ | |
4071 | if (j == 0) | |
4072 | vargs.create (nargs); | |
4073 | else | |
4074 | vargs.truncate (0); | |
4075 | ||
4076 | for (i = 0; i < nargs; i++) | |
4077 | { | |
4078 | unsigned int k, l, m, o; | |
4079 | tree atype; | |
4080 | op = gimple_call_arg (stmt, i); | |
4081 | switch (bestn->simdclone->args[i].arg_type) | |
4082 | { | |
4083 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
4084 | atype = bestn->simdclone->args[i].vector_type; | |
cf1b2ba4 | 4085 | o = nunits / simd_clone_subparts (atype); |
0136f8f0 AH |
4086 | for (m = j * o; m < (j + 1) * o; m++) |
4087 | { | |
cf1b2ba4 RS |
4088 | if (simd_clone_subparts (atype) |
4089 | < simd_clone_subparts (arginfo[i].vectype)) | |
0136f8f0 | 4090 | { |
73a699ae | 4091 | poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype)); |
cf1b2ba4 RS |
4092 | k = (simd_clone_subparts (arginfo[i].vectype) |
4093 | / simd_clone_subparts (atype)); | |
0136f8f0 AH |
4094 | gcc_assert ((k & (k - 1)) == 0); |
4095 | if (m == 0) | |
4096 | vec_oprnd0 | |
86a91c0a | 4097 | = vect_get_vec_def_for_operand (op, stmt_info); |
0136f8f0 AH |
4098 | else |
4099 | { | |
4100 | vec_oprnd0 = arginfo[i].op; | |
4101 | if ((m & (k - 1)) == 0) | |
4102 | vec_oprnd0 | |
e4057a39 | 4103 | = vect_get_vec_def_for_stmt_copy (vinfo, |
0136f8f0 AH |
4104 | vec_oprnd0); |
4105 | } | |
4106 | arginfo[i].op = vec_oprnd0; | |
4107 | vec_oprnd0 | |
4108 | = build3 (BIT_FIELD_REF, atype, vec_oprnd0, | |
92e29a5e | 4109 | bitsize_int (prec), |
0136f8f0 | 4110 | bitsize_int ((m & (k - 1)) * prec)); |
e1bd7296 | 4111 | gassign *new_stmt |
b731b390 | 4112 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 | 4113 | vec_oprnd0); |
86a91c0a | 4114 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0136f8f0 AH |
4115 | vargs.safe_push (gimple_assign_lhs (new_stmt)); |
4116 | } | |
4117 | else | |
4118 | { | |
cf1b2ba4 RS |
4119 | k = (simd_clone_subparts (atype) |
4120 | / simd_clone_subparts (arginfo[i].vectype)); | |
0136f8f0 AH |
4121 | gcc_assert ((k & (k - 1)) == 0); |
4122 | vec<constructor_elt, va_gc> *ctor_elts; | |
4123 | if (k != 1) | |
4124 | vec_alloc (ctor_elts, k); | |
4125 | else | |
4126 | ctor_elts = NULL; | |
4127 | for (l = 0; l < k; l++) | |
4128 | { | |
4129 | if (m == 0 && l == 0) | |
4130 | vec_oprnd0 | |
86a91c0a | 4131 | = vect_get_vec_def_for_operand (op, stmt_info); |
0136f8f0 AH |
4132 | else |
4133 | vec_oprnd0 | |
e4057a39 | 4134 | = vect_get_vec_def_for_stmt_copy (vinfo, |
0136f8f0 AH |
4135 | arginfo[i].op); |
4136 | arginfo[i].op = vec_oprnd0; | |
4137 | if (k == 1) | |
4138 | break; | |
4139 | CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, | |
4140 | vec_oprnd0); | |
4141 | } | |
4142 | if (k == 1) | |
4143 | vargs.safe_push (vec_oprnd0); | |
4144 | else | |
4145 | { | |
4146 | vec_oprnd0 = build_constructor (atype, ctor_elts); | |
e1bd7296 | 4147 | gassign *new_stmt |
b731b390 | 4148 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 | 4149 | vec_oprnd0); |
86a91c0a RS |
4150 | vect_finish_stmt_generation (stmt_info, new_stmt, |
4151 | gsi); | |
0136f8f0 AH |
4152 | vargs.safe_push (gimple_assign_lhs (new_stmt)); |
4153 | } | |
4154 | } | |
4155 | } | |
4156 | break; | |
4157 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
4158 | vargs.safe_push (op); | |
4159 | break; | |
4160 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
7adb26f2 | 4161 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
4162 | if (j == 0) |
4163 | { | |
4164 | gimple_seq stmts; | |
4165 | arginfo[i].op | |
4166 | = force_gimple_operand (arginfo[i].op, &stmts, true, | |
4167 | NULL_TREE); | |
4168 | if (stmts != NULL) | |
4169 | { | |
4170 | basic_block new_bb; | |
4171 | edge pe = loop_preheader_edge (loop); | |
4172 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
4173 | gcc_assert (!new_bb); | |
4174 | } | |
17b658af JJ |
4175 | if (arginfo[i].simd_lane_linear) |
4176 | { | |
4177 | vargs.safe_push (arginfo[i].op); | |
4178 | break; | |
4179 | } | |
b731b390 | 4180 | tree phi_res = copy_ssa_name (op); |
538dd0b7 | 4181 | gphi *new_phi = create_phi_node (phi_res, loop->header); |
4fbeb363 | 4182 | loop_vinfo->add_stmt (new_phi); |
0136f8f0 AH |
4183 | add_phi_arg (new_phi, arginfo[i].op, |
4184 | loop_preheader_edge (loop), UNKNOWN_LOCATION); | |
4185 | enum tree_code code | |
4186 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
4187 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
4188 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
4189 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
4190 | widest_int cst |
4191 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
4192 | ncopies * nunits); | |
4193 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 4194 | tree phi_arg = copy_ssa_name (op); |
e1bd7296 | 4195 | gassign *new_stmt |
0d0e4a03 | 4196 | = gimple_build_assign (phi_arg, code, phi_res, tcst); |
0136f8f0 AH |
4197 | gimple_stmt_iterator si = gsi_after_labels (loop->header); |
4198 | gsi_insert_after (&si, new_stmt, GSI_NEW_STMT); | |
4fbeb363 | 4199 | loop_vinfo->add_stmt (new_stmt); |
0136f8f0 AH |
4200 | add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop), |
4201 | UNKNOWN_LOCATION); | |
4202 | arginfo[i].op = phi_res; | |
4203 | vargs.safe_push (phi_res); | |
4204 | } | |
4205 | else | |
4206 | { | |
4207 | enum tree_code code | |
4208 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
4209 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
4210 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
4211 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
4212 | widest_int cst |
4213 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
4214 | j * nunits); | |
4215 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 4216 | new_temp = make_ssa_name (TREE_TYPE (op)); |
e1bd7296 RS |
4217 | gassign *new_stmt |
4218 | = gimple_build_assign (new_temp, code, | |
4219 | arginfo[i].op, tcst); | |
86a91c0a | 4220 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0136f8f0 AH |
4221 | vargs.safe_push (new_temp); |
4222 | } | |
4223 | break; | |
7adb26f2 JJ |
4224 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
4225 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
0136f8f0 | 4226 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: |
e01d41e5 JJ |
4227 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
4228 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
4229 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
4230 | default: |
4231 | gcc_unreachable (); | |
4232 | } | |
4233 | } | |
4234 | ||
e1bd7296 | 4235 | gcall *new_call = gimple_build_call_vec (fndecl, vargs); |
0136f8f0 AH |
4236 | if (vec_dest) |
4237 | { | |
cf1b2ba4 | 4238 | gcc_assert (ratype || simd_clone_subparts (rtype) == nunits); |
0136f8f0 | 4239 | if (ratype) |
b731b390 | 4240 | new_temp = create_tmp_var (ratype); |
cf1b2ba4 RS |
4241 | else if (simd_clone_subparts (vectype) |
4242 | == simd_clone_subparts (rtype)) | |
e1bd7296 | 4243 | new_temp = make_ssa_name (vec_dest, new_call); |
0136f8f0 | 4244 | else |
e1bd7296 RS |
4245 | new_temp = make_ssa_name (rtype, new_call); |
4246 | gimple_call_set_lhs (new_call, new_temp); | |
0136f8f0 | 4247 | } |
e1bd7296 | 4248 | stmt_vec_info new_stmt_info |
86a91c0a | 4249 | = vect_finish_stmt_generation (stmt_info, new_call, gsi); |
0136f8f0 AH |
4250 | |
4251 | if (vec_dest) | |
4252 | { | |
cf1b2ba4 | 4253 | if (simd_clone_subparts (vectype) < nunits) |
0136f8f0 AH |
4254 | { |
4255 | unsigned int k, l; | |
73a699ae RS |
4256 | poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype)); |
4257 | poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype)); | |
cf1b2ba4 | 4258 | k = nunits / simd_clone_subparts (vectype); |
0136f8f0 AH |
4259 | gcc_assert ((k & (k - 1)) == 0); |
4260 | for (l = 0; l < k; l++) | |
4261 | { | |
4262 | tree t; | |
4263 | if (ratype) | |
4264 | { | |
4265 | t = build_fold_addr_expr (new_temp); | |
4266 | t = build2 (MEM_REF, vectype, t, | |
73a699ae | 4267 | build_int_cst (TREE_TYPE (t), l * bytes)); |
0136f8f0 AH |
4268 | } |
4269 | else | |
4270 | t = build3 (BIT_FIELD_REF, vectype, new_temp, | |
92e29a5e | 4271 | bitsize_int (prec), bitsize_int (l * prec)); |
e1bd7296 | 4272 | gimple *new_stmt |
b731b390 | 4273 | = gimple_build_assign (make_ssa_name (vectype), t); |
e1bd7296 | 4274 | new_stmt_info |
86a91c0a | 4275 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
e1bd7296 | 4276 | |
0136f8f0 | 4277 | if (j == 0 && l == 0) |
e1bd7296 RS |
4278 | STMT_VINFO_VEC_STMT (stmt_info) |
4279 | = *vec_stmt = new_stmt_info; | |
0136f8f0 | 4280 | else |
e1bd7296 | 4281 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
0136f8f0 | 4282 | |
e1bd7296 | 4283 | prev_stmt_info = new_stmt_info; |
0136f8f0 AH |
4284 | } |
4285 | ||
4286 | if (ratype) | |
86a91c0a | 4287 | vect_clobber_variable (stmt_info, gsi, new_temp); |
0136f8f0 AH |
4288 | continue; |
4289 | } | |
cf1b2ba4 | 4290 | else if (simd_clone_subparts (vectype) > nunits) |
0136f8f0 | 4291 | { |
cf1b2ba4 RS |
4292 | unsigned int k = (simd_clone_subparts (vectype) |
4293 | / simd_clone_subparts (rtype)); | |
0136f8f0 AH |
4294 | gcc_assert ((k & (k - 1)) == 0); |
4295 | if ((j & (k - 1)) == 0) | |
4296 | vec_alloc (ret_ctor_elts, k); | |
4297 | if (ratype) | |
4298 | { | |
cf1b2ba4 | 4299 | unsigned int m, o = nunits / simd_clone_subparts (rtype); |
0136f8f0 AH |
4300 | for (m = 0; m < o; m++) |
4301 | { | |
4302 | tree tem = build4 (ARRAY_REF, rtype, new_temp, | |
4303 | size_int (m), NULL_TREE, NULL_TREE); | |
e1bd7296 | 4304 | gimple *new_stmt |
b731b390 | 4305 | = gimple_build_assign (make_ssa_name (rtype), tem); |
e1bd7296 | 4306 | new_stmt_info |
86a91c0a RS |
4307 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
4308 | gsi); | |
0136f8f0 AH |
4309 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, |
4310 | gimple_assign_lhs (new_stmt)); | |
4311 | } | |
86a91c0a | 4312 | vect_clobber_variable (stmt_info, gsi, new_temp); |
0136f8f0 AH |
4313 | } |
4314 | else | |
4315 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp); | |
4316 | if ((j & (k - 1)) != k - 1) | |
4317 | continue; | |
4318 | vec_oprnd0 = build_constructor (vectype, ret_ctor_elts); | |
e1bd7296 | 4319 | gimple *new_stmt |
b731b390 | 4320 | = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0); |
e1bd7296 | 4321 | new_stmt_info |
86a91c0a | 4322 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0136f8f0 AH |
4323 | |
4324 | if ((unsigned) j == k - 1) | |
e1bd7296 | 4325 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
0136f8f0 | 4326 | else |
e1bd7296 | 4327 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
0136f8f0 | 4328 | |
e1bd7296 | 4329 | prev_stmt_info = new_stmt_info; |
0136f8f0 AH |
4330 | continue; |
4331 | } | |
4332 | else if (ratype) | |
4333 | { | |
4334 | tree t = build_fold_addr_expr (new_temp); | |
4335 | t = build2 (MEM_REF, vectype, t, | |
4336 | build_int_cst (TREE_TYPE (t), 0)); | |
e1bd7296 | 4337 | gimple *new_stmt |
b731b390 | 4338 | = gimple_build_assign (make_ssa_name (vec_dest), t); |
e1bd7296 | 4339 | new_stmt_info |
86a91c0a RS |
4340 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4341 | vect_clobber_variable (stmt_info, gsi, new_temp); | |
0136f8f0 AH |
4342 | } |
4343 | } | |
4344 | ||
4345 | if (j == 0) | |
e1bd7296 | 4346 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
0136f8f0 | 4347 | else |
e1bd7296 | 4348 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
0136f8f0 | 4349 | |
e1bd7296 | 4350 | prev_stmt_info = new_stmt_info; |
0136f8f0 AH |
4351 | } |
4352 | ||
4353 | vargs.release (); | |
4354 | ||
4355 | /* The call in STMT might prevent it from being removed in dce. | |
4356 | We however cannot remove it here, due to the way the ssa name | |
4357 | it defines is mapped to the new definition. So just replace | |
4358 | rhs of the statement with something harmless. */ | |
4359 | ||
4360 | if (slp_node) | |
4361 | return true; | |
4362 | ||
e1bd7296 | 4363 | gimple *new_stmt; |
0136f8f0 AH |
4364 | if (scalar_dest) |
4365 | { | |
4366 | type = TREE_TYPE (scalar_dest); | |
4367 | if (is_pattern_stmt_p (stmt_info)) | |
10681ce8 | 4368 | lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info)->stmt); |
0136f8f0 AH |
4369 | else |
4370 | lhs = gimple_call_lhs (stmt); | |
4371 | new_stmt = gimple_build_assign (lhs, build_zero_cst (type)); | |
4372 | } | |
4373 | else | |
4374 | new_stmt = gimple_build_nop (); | |
9d97912b | 4375 | vinfo->replace_stmt (gsi, stmt_info, new_stmt); |
0136f8f0 AH |
4376 | unlink_stmt_vdef (stmt); |
4377 | ||
4378 | return true; | |
4379 | } | |
4380 | ||
4381 | ||
ebfd146a IR |
4382 | /* Function vect_gen_widened_results_half |
4383 | ||
4384 | Create a vector stmt whose code, type, number of arguments, and result | |
b8698a0f | 4385 | variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are |
ff802fa1 | 4386 | VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. |
ebfd146a IR |
4387 | In the case that CODE is a CALL_EXPR, this means that a call to DECL |
4388 | needs to be created (DECL is a function-decl of a target-builtin). | |
82570274 | 4389 | STMT_INFO is the original scalar stmt that we are vectorizing. */ |
ebfd146a | 4390 | |
355fe088 | 4391 | static gimple * |
ebfd146a IR |
4392 | vect_gen_widened_results_half (enum tree_code code, |
4393 | tree decl, | |
4394 | tree vec_oprnd0, tree vec_oprnd1, int op_type, | |
4395 | tree vec_dest, gimple_stmt_iterator *gsi, | |
82570274 | 4396 | stmt_vec_info stmt_info) |
b8698a0f | 4397 | { |
355fe088 | 4398 | gimple *new_stmt; |
b8698a0f L |
4399 | tree new_temp; |
4400 | ||
4401 | /* Generate half of the widened result: */ | |
4402 | if (code == CALL_EXPR) | |
4403 | { | |
4404 | /* Target specific support */ | |
ebfd146a IR |
4405 | if (op_type == binary_op) |
4406 | new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1); | |
4407 | else | |
4408 | new_stmt = gimple_build_call (decl, 1, vec_oprnd0); | |
4409 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4410 | gimple_call_set_lhs (new_stmt, new_temp); | |
b8698a0f L |
4411 | } |
4412 | else | |
ebfd146a | 4413 | { |
b8698a0f L |
4414 | /* Generic support */ |
4415 | gcc_assert (op_type == TREE_CODE_LENGTH (code)); | |
ebfd146a IR |
4416 | if (op_type != binary_op) |
4417 | vec_oprnd1 = NULL; | |
0d0e4a03 | 4418 | new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1); |
ebfd146a IR |
4419 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4420 | gimple_assign_set_lhs (new_stmt, new_temp); | |
b8698a0f | 4421 | } |
82570274 | 4422 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
ebfd146a | 4423 | |
ebfd146a IR |
4424 | return new_stmt; |
4425 | } | |
4426 | ||
4a00c761 | 4427 | |
82570274 RS |
4428 | /* Get vectorized definitions for loop-based vectorization of STMT_INFO. |
4429 | For the first operand we call vect_get_vec_def_for_operand (with OPRND | |
4430 | containing scalar operand), and for the rest we get a copy with | |
4a00c761 JJ |
4431 | vect_get_vec_def_for_stmt_copy() using the previous vector definition |
4432 | (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details. | |
4433 | The vectors are collected into VEC_OPRNDS. */ | |
4434 | ||
4435 | static void | |
82570274 | 4436 | vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info, |
e4057a39 | 4437 | vec<tree> *vec_oprnds, int multi_step_cvt) |
4a00c761 | 4438 | { |
e4057a39 | 4439 | vec_info *vinfo = stmt_info->vinfo; |
4a00c761 JJ |
4440 | tree vec_oprnd; |
4441 | ||
4442 | /* Get first vector operand. */ | |
4443 | /* All the vector operands except the very first one (that is scalar oprnd) | |
4444 | are stmt copies. */ | |
4445 | if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE) | |
82570274 | 4446 | vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info); |
4a00c761 | 4447 | else |
e4057a39 | 4448 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd); |
4a00c761 | 4449 | |
9771b263 | 4450 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
4451 | |
4452 | /* Get second vector operand. */ | |
e4057a39 | 4453 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd); |
9771b263 | 4454 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
4455 | |
4456 | *oprnd = vec_oprnd; | |
4457 | ||
4458 | /* For conversion in multiple steps, continue to get operands | |
4459 | recursively. */ | |
4460 | if (multi_step_cvt) | |
e4057a39 | 4461 | vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds, |
82570274 | 4462 | multi_step_cvt - 1); |
4a00c761 JJ |
4463 | } |
4464 | ||
4465 | ||
4466 | /* Create vectorized demotion statements for vector operands from VEC_OPRNDS. | |
4467 | For multi-step conversions store the resulting vectors and call the function | |
4468 | recursively. */ | |
4469 | ||
4470 | static void | |
9771b263 | 4471 | vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, |
32e8e429 RS |
4472 | int multi_step_cvt, |
4473 | stmt_vec_info stmt_info, | |
9771b263 | 4474 | vec<tree> vec_dsts, |
4a00c761 JJ |
4475 | gimple_stmt_iterator *gsi, |
4476 | slp_tree slp_node, enum tree_code code, | |
4477 | stmt_vec_info *prev_stmt_info) | |
4478 | { | |
4479 | unsigned int i; | |
4480 | tree vop0, vop1, new_tmp, vec_dest; | |
4a00c761 | 4481 | |
9771b263 | 4482 | vec_dest = vec_dsts.pop (); |
4a00c761 | 4483 | |
9771b263 | 4484 | for (i = 0; i < vec_oprnds->length (); i += 2) |
4a00c761 JJ |
4485 | { |
4486 | /* Create demotion operation. */ | |
9771b263 DN |
4487 | vop0 = (*vec_oprnds)[i]; |
4488 | vop1 = (*vec_oprnds)[i + 1]; | |
e1bd7296 | 4489 | gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
4a00c761 JJ |
4490 | new_tmp = make_ssa_name (vec_dest, new_stmt); |
4491 | gimple_assign_set_lhs (new_stmt, new_tmp); | |
e1bd7296 | 4492 | stmt_vec_info new_stmt_info |
86a91c0a | 4493 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
4494 | |
4495 | if (multi_step_cvt) | |
4496 | /* Store the resulting vector for next recursive call. */ | |
9771b263 | 4497 | (*vec_oprnds)[i/2] = new_tmp; |
4a00c761 JJ |
4498 | else |
4499 | { | |
4500 | /* This is the last step of the conversion sequence. Store the | |
4501 | vectors in SLP_NODE or in vector info of the scalar statement | |
4502 | (or in STMT_VINFO_RELATED_STMT chain). */ | |
4503 | if (slp_node) | |
e1bd7296 | 4504 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
4a00c761 | 4505 | else |
c689ce1e RB |
4506 | { |
4507 | if (!*prev_stmt_info) | |
e1bd7296 | 4508 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; |
c689ce1e | 4509 | else |
e1bd7296 | 4510 | STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt_info; |
4a00c761 | 4511 | |
e1bd7296 | 4512 | *prev_stmt_info = new_stmt_info; |
c689ce1e | 4513 | } |
4a00c761 JJ |
4514 | } |
4515 | } | |
4516 | ||
4517 | /* For multi-step demotion operations we first generate demotion operations | |
4518 | from the source type to the intermediate types, and then combine the | |
4519 | results (stored in VEC_OPRNDS) in demotion operation to the destination | |
4520 | type. */ | |
4521 | if (multi_step_cvt) | |
4522 | { | |
4523 | /* At each level of recursion we have half of the operands we had at the | |
4524 | previous level. */ | |
9771b263 | 4525 | vec_oprnds->truncate ((i+1)/2); |
4a00c761 | 4526 | vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, |
86a91c0a RS |
4527 | stmt_info, vec_dsts, gsi, |
4528 | slp_node, VEC_PACK_TRUNC_EXPR, | |
4a00c761 JJ |
4529 | prev_stmt_info); |
4530 | } | |
4531 | ||
9771b263 | 4532 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4533 | } |
4534 | ||
4535 | ||
4536 | /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0 | |
82570274 RS |
4537 | and VEC_OPRNDS1, for a binary operation associated with scalar statement |
4538 | STMT_INFO. For multi-step conversions store the resulting vectors and | |
4539 | call the function recursively. */ | |
4a00c761 JJ |
4540 | |
4541 | static void | |
9771b263 DN |
4542 | vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0, |
4543 | vec<tree> *vec_oprnds1, | |
82570274 | 4544 | stmt_vec_info stmt_info, tree vec_dest, |
4a00c761 JJ |
4545 | gimple_stmt_iterator *gsi, |
4546 | enum tree_code code1, | |
4547 | enum tree_code code2, tree decl1, | |
4548 | tree decl2, int op_type) | |
4549 | { | |
4550 | int i; | |
4551 | tree vop0, vop1, new_tmp1, new_tmp2; | |
355fe088 | 4552 | gimple *new_stmt1, *new_stmt2; |
6e1aa848 | 4553 | vec<tree> vec_tmp = vNULL; |
4a00c761 | 4554 | |
9771b263 DN |
4555 | vec_tmp.create (vec_oprnds0->length () * 2); |
4556 | FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0) | |
4a00c761 JJ |
4557 | { |
4558 | if (op_type == binary_op) | |
9771b263 | 4559 | vop1 = (*vec_oprnds1)[i]; |
4a00c761 JJ |
4560 | else |
4561 | vop1 = NULL_TREE; | |
4562 | ||
4563 | /* Generate the two halves of promotion operation. */ | |
4564 | new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1, | |
82570274 RS |
4565 | op_type, vec_dest, gsi, |
4566 | stmt_info); | |
4a00c761 | 4567 | new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1, |
82570274 RS |
4568 | op_type, vec_dest, gsi, |
4569 | stmt_info); | |
4a00c761 JJ |
4570 | if (is_gimple_call (new_stmt1)) |
4571 | { | |
4572 | new_tmp1 = gimple_call_lhs (new_stmt1); | |
4573 | new_tmp2 = gimple_call_lhs (new_stmt2); | |
4574 | } | |
4575 | else | |
4576 | { | |
4577 | new_tmp1 = gimple_assign_lhs (new_stmt1); | |
4578 | new_tmp2 = gimple_assign_lhs (new_stmt2); | |
4579 | } | |
4580 | ||
4581 | /* Store the results for the next step. */ | |
9771b263 DN |
4582 | vec_tmp.quick_push (new_tmp1); |
4583 | vec_tmp.quick_push (new_tmp2); | |
4a00c761 JJ |
4584 | } |
4585 | ||
689eaba3 | 4586 | vec_oprnds0->release (); |
4a00c761 JJ |
4587 | *vec_oprnds0 = vec_tmp; |
4588 | } | |
4589 | ||
4590 | ||
32e8e429 RS |
4591 | /* Check if STMT_INFO performs a conversion operation that can be vectorized. |
4592 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
4a00c761 | 4593 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. |
32e8e429 | 4594 | Return true if STMT_INFO is vectorizable in this way. */ |
ebfd146a IR |
4595 | |
4596 | static bool | |
32e8e429 | 4597 | vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 4598 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 4599 | stmt_vector_for_cost *cost_vec) |
ebfd146a IR |
4600 | { |
4601 | tree vec_dest; | |
4602 | tree scalar_dest; | |
4a00c761 | 4603 | tree op0, op1 = NULL_TREE; |
ebfd146a | 4604 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; |
ebfd146a IR |
4605 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
4606 | enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; | |
4a00c761 | 4607 | enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; |
ebfd146a IR |
4608 | tree decl1 = NULL_TREE, decl2 = NULL_TREE; |
4609 | tree new_temp; | |
ebfd146a | 4610 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 4611 | int ndts = 2; |
ebfd146a | 4612 | stmt_vec_info prev_stmt_info; |
062d5ccc RS |
4613 | poly_uint64 nunits_in; |
4614 | poly_uint64 nunits_out; | |
ebfd146a | 4615 | tree vectype_out, vectype_in; |
4a00c761 JJ |
4616 | int ncopies, i, j; |
4617 | tree lhs_type, rhs_type; | |
ebfd146a | 4618 | enum { NARROW, NONE, WIDEN } modifier; |
6e1aa848 DN |
4619 | vec<tree> vec_oprnds0 = vNULL; |
4620 | vec<tree> vec_oprnds1 = vNULL; | |
ebfd146a | 4621 | tree vop0; |
4a00c761 | 4622 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4623 | vec_info *vinfo = stmt_info->vinfo; |
4a00c761 | 4624 | int multi_step_cvt = 0; |
6e1aa848 | 4625 | vec<tree> interm_types = vNULL; |
4a00c761 JJ |
4626 | tree last_oprnd, intermediate_type, cvt_type = NULL_TREE; |
4627 | int op_type; | |
4a00c761 | 4628 | unsigned short fltsz; |
ebfd146a IR |
4629 | |
4630 | /* Is STMT a vectorizable conversion? */ | |
4631 | ||
4a00c761 | 4632 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
4633 | return false; |
4634 | ||
66c16fd9 RB |
4635 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4636 | && ! vec_stmt) | |
ebfd146a IR |
4637 | return false; |
4638 | ||
32e8e429 RS |
4639 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
4640 | if (!stmt) | |
ebfd146a IR |
4641 | return false; |
4642 | ||
4643 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
4644 | return false; | |
4645 | ||
4646 | code = gimple_assign_rhs_code (stmt); | |
4a00c761 JJ |
4647 | if (!CONVERT_EXPR_CODE_P (code) |
4648 | && code != FIX_TRUNC_EXPR | |
4649 | && code != FLOAT_EXPR | |
4650 | && code != WIDEN_MULT_EXPR | |
4651 | && code != WIDEN_LSHIFT_EXPR) | |
ebfd146a IR |
4652 | return false; |
4653 | ||
4a00c761 JJ |
4654 | op_type = TREE_CODE_LENGTH (code); |
4655 | ||
ebfd146a | 4656 | /* Check types of lhs and rhs. */ |
b690cc0f | 4657 | scalar_dest = gimple_assign_lhs (stmt); |
4a00c761 | 4658 | lhs_type = TREE_TYPE (scalar_dest); |
b690cc0f RG |
4659 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
4660 | ||
ebfd146a IR |
4661 | op0 = gimple_assign_rhs1 (stmt); |
4662 | rhs_type = TREE_TYPE (op0); | |
4a00c761 JJ |
4663 | |
4664 | if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4665 | && !((INTEGRAL_TYPE_P (lhs_type) | |
4666 | && INTEGRAL_TYPE_P (rhs_type)) | |
4667 | || (SCALAR_FLOAT_TYPE_P (lhs_type) | |
4668 | && SCALAR_FLOAT_TYPE_P (rhs_type)))) | |
4669 | return false; | |
4670 | ||
e6f5c25d IE |
4671 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4672 | && ((INTEGRAL_TYPE_P (lhs_type) | |
2be65d9e | 4673 | && !type_has_mode_precision_p (lhs_type)) |
e6f5c25d | 4674 | || (INTEGRAL_TYPE_P (rhs_type) |
2be65d9e | 4675 | && !type_has_mode_precision_p (rhs_type)))) |
4a00c761 | 4676 | { |
73fbfcad | 4677 | if (dump_enabled_p ()) |
78c60e3d | 4678 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
4679 | "type conversion to/from bit-precision unsupported." |
4680 | "\n"); | |
4a00c761 JJ |
4681 | return false; |
4682 | } | |
4683 | ||
b690cc0f | 4684 | /* Check the operands of the operation. */ |
894dd753 | 4685 | if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype_in)) |
b690cc0f | 4686 | { |
73fbfcad | 4687 | if (dump_enabled_p ()) |
78c60e3d | 4688 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4689 | "use not simple.\n"); |
b690cc0f RG |
4690 | return false; |
4691 | } | |
4a00c761 JJ |
4692 | if (op_type == binary_op) |
4693 | { | |
4694 | bool ok; | |
4695 | ||
4696 | op1 = gimple_assign_rhs2 (stmt); | |
4697 | gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR); | |
4698 | /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of | |
4699 | OP1. */ | |
4700 | if (CONSTANT_CLASS_P (op0)) | |
894dd753 | 4701 | ok = vect_is_simple_use (op1, vinfo, &dt[1], &vectype_in); |
4a00c761 | 4702 | else |
894dd753 | 4703 | ok = vect_is_simple_use (op1, vinfo, &dt[1]); |
4a00c761 JJ |
4704 | |
4705 | if (!ok) | |
4706 | { | |
73fbfcad | 4707 | if (dump_enabled_p ()) |
78c60e3d | 4708 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4709 | "use not simple.\n"); |
4a00c761 JJ |
4710 | return false; |
4711 | } | |
4712 | } | |
4713 | ||
b690cc0f RG |
4714 | /* If op0 is an external or constant defs use a vector type of |
4715 | the same size as the output vector type. */ | |
ebfd146a | 4716 | if (!vectype_in) |
b690cc0f | 4717 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
4718 | if (vec_stmt) |
4719 | gcc_assert (vectype_in); | |
4720 | if (!vectype_in) | |
4721 | { | |
73fbfcad | 4722 | if (dump_enabled_p ()) |
4a00c761 | 4723 | { |
78c60e3d SS |
4724 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4725 | "no vectype for scalar type "); | |
4726 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 4727 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
4a00c761 | 4728 | } |
7d8930a0 IR |
4729 | |
4730 | return false; | |
4731 | } | |
ebfd146a | 4732 | |
e6f5c25d IE |
4733 | if (VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4734 | && !VECTOR_BOOLEAN_TYPE_P (vectype_in)) | |
4735 | { | |
4736 | if (dump_enabled_p ()) | |
4737 | { | |
4738 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
4739 | "can't convert between boolean and non " | |
4740 | "boolean vectors"); | |
4741 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
4742 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
4743 | } | |
4744 | ||
4745 | return false; | |
4746 | } | |
4747 | ||
b690cc0f RG |
4748 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
4749 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
062d5ccc | 4750 | if (known_eq (nunits_out, nunits_in)) |
ebfd146a | 4751 | modifier = NONE; |
062d5ccc RS |
4752 | else if (multiple_p (nunits_out, nunits_in)) |
4753 | modifier = NARROW; | |
ebfd146a | 4754 | else |
062d5ccc RS |
4755 | { |
4756 | gcc_checking_assert (multiple_p (nunits_in, nunits_out)); | |
4757 | modifier = WIDEN; | |
4758 | } | |
ebfd146a | 4759 | |
ff802fa1 IR |
4760 | /* Multiple types in SLP are handled by creating the appropriate number of |
4761 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4762 | case of SLP. */ | |
fce57248 | 4763 | if (slp_node) |
ebfd146a | 4764 | ncopies = 1; |
4a00c761 | 4765 | else if (modifier == NARROW) |
e8f142e2 | 4766 | ncopies = vect_get_num_copies (loop_vinfo, vectype_out); |
4a00c761 | 4767 | else |
e8f142e2 | 4768 | ncopies = vect_get_num_copies (loop_vinfo, vectype_in); |
b8698a0f | 4769 | |
ebfd146a IR |
4770 | /* Sanity check: make sure that at least one copy of the vectorized stmt |
4771 | needs to be generated. */ | |
4772 | gcc_assert (ncopies >= 1); | |
4773 | ||
16d22000 RS |
4774 | bool found_mode = false; |
4775 | scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type); | |
4776 | scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type); | |
4777 | opt_scalar_mode rhs_mode_iter; | |
b397965c | 4778 | |
ebfd146a | 4779 | /* Supportable by target? */ |
4a00c761 | 4780 | switch (modifier) |
ebfd146a | 4781 | { |
4a00c761 JJ |
4782 | case NONE: |
4783 | if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4784 | return false; | |
4785 | if (supportable_convert_operation (code, vectype_out, vectype_in, | |
4786 | &decl1, &code1)) | |
4787 | break; | |
4788 | /* FALLTHRU */ | |
4789 | unsupported: | |
73fbfcad | 4790 | if (dump_enabled_p ()) |
78c60e3d | 4791 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4792 | "conversion not supported by target.\n"); |
ebfd146a | 4793 | return false; |
ebfd146a | 4794 | |
4a00c761 | 4795 | case WIDEN: |
86a91c0a RS |
4796 | if (supportable_widening_operation (code, stmt_info, vectype_out, |
4797 | vectype_in, &code1, &code2, | |
4798 | &multi_step_cvt, &interm_types)) | |
4a00c761 JJ |
4799 | { |
4800 | /* Binary widening operation can only be supported directly by the | |
4801 | architecture. */ | |
4802 | gcc_assert (!(multi_step_cvt && op_type == binary_op)); | |
4803 | break; | |
4804 | } | |
4805 | ||
4806 | if (code != FLOAT_EXPR | |
b397965c | 4807 | || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode)) |
4a00c761 JJ |
4808 | goto unsupported; |
4809 | ||
b397965c | 4810 | fltsz = GET_MODE_SIZE (lhs_mode); |
16d22000 | 4811 | FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode) |
4a00c761 | 4812 | { |
16d22000 | 4813 | rhs_mode = rhs_mode_iter.require (); |
c94843d2 RS |
4814 | if (GET_MODE_SIZE (rhs_mode) > fltsz) |
4815 | break; | |
4816 | ||
4a00c761 JJ |
4817 | cvt_type |
4818 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4819 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4820 | if (cvt_type == NULL_TREE) | |
4821 | goto unsupported; | |
4822 | ||
4823 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4824 | { | |
4825 | if (!supportable_convert_operation (code, vectype_out, | |
4826 | cvt_type, &decl1, &codecvt1)) | |
4827 | goto unsupported; | |
4828 | } | |
86a91c0a RS |
4829 | else if (!supportable_widening_operation (code, stmt_info, |
4830 | vectype_out, cvt_type, | |
4831 | &codecvt1, &codecvt2, | |
4832 | &multi_step_cvt, | |
4a00c761 JJ |
4833 | &interm_types)) |
4834 | continue; | |
4835 | else | |
4836 | gcc_assert (multi_step_cvt == 0); | |
4837 | ||
86a91c0a | 4838 | if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type, |
a86ec597 RH |
4839 | vectype_in, &code1, &code2, |
4840 | &multi_step_cvt, &interm_types)) | |
16d22000 RS |
4841 | { |
4842 | found_mode = true; | |
4843 | break; | |
4844 | } | |
4a00c761 JJ |
4845 | } |
4846 | ||
16d22000 | 4847 | if (!found_mode) |
4a00c761 JJ |
4848 | goto unsupported; |
4849 | ||
4850 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4851 | codecvt2 = ERROR_MARK; | |
4852 | else | |
4853 | { | |
4854 | multi_step_cvt++; | |
9771b263 | 4855 | interm_types.safe_push (cvt_type); |
4a00c761 JJ |
4856 | cvt_type = NULL_TREE; |
4857 | } | |
4858 | break; | |
4859 | ||
4860 | case NARROW: | |
4861 | gcc_assert (op_type == unary_op); | |
4862 | if (supportable_narrowing_operation (code, vectype_out, vectype_in, | |
4863 | &code1, &multi_step_cvt, | |
4864 | &interm_types)) | |
4865 | break; | |
4866 | ||
4867 | if (code != FIX_TRUNC_EXPR | |
b397965c | 4868 | || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode)) |
4a00c761 JJ |
4869 | goto unsupported; |
4870 | ||
4a00c761 JJ |
4871 | cvt_type |
4872 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4873 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4874 | if (cvt_type == NULL_TREE) | |
4875 | goto unsupported; | |
4876 | if (!supportable_convert_operation (code, cvt_type, vectype_in, | |
4877 | &decl1, &codecvt1)) | |
4878 | goto unsupported; | |
4879 | if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type, | |
4880 | &code1, &multi_step_cvt, | |
4881 | &interm_types)) | |
4882 | break; | |
4883 | goto unsupported; | |
4884 | ||
4885 | default: | |
4886 | gcc_unreachable (); | |
ebfd146a IR |
4887 | } |
4888 | ||
4889 | if (!vec_stmt) /* transformation not required. */ | |
4890 | { | |
adac3a68 | 4891 | DUMP_VECT_SCOPE ("vectorizable_conversion"); |
4a00c761 | 4892 | if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR) |
8bd37302 BS |
4893 | { |
4894 | STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; | |
68435eb2 RB |
4895 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, |
4896 | cost_vec); | |
8bd37302 | 4897 | } |
4a00c761 JJ |
4898 | else if (modifier == NARROW) |
4899 | { | |
4900 | STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; | |
68435eb2 RB |
4901 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt, |
4902 | cost_vec); | |
4a00c761 JJ |
4903 | } |
4904 | else | |
4905 | { | |
4906 | STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; | |
68435eb2 RB |
4907 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt, |
4908 | cost_vec); | |
4a00c761 | 4909 | } |
9771b263 | 4910 | interm_types.release (); |
ebfd146a IR |
4911 | return true; |
4912 | } | |
4913 | ||
67b8dbac | 4914 | /* Transform. */ |
73fbfcad | 4915 | if (dump_enabled_p ()) |
78c60e3d | 4916 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4917 | "transform conversion. ncopies = %d.\n", ncopies); |
ebfd146a | 4918 | |
4a00c761 JJ |
4919 | if (op_type == binary_op) |
4920 | { | |
4921 | if (CONSTANT_CLASS_P (op0)) | |
4922 | op0 = fold_convert (TREE_TYPE (op1), op0); | |
4923 | else if (CONSTANT_CLASS_P (op1)) | |
4924 | op1 = fold_convert (TREE_TYPE (op0), op1); | |
4925 | } | |
4926 | ||
4927 | /* In case of multi-step conversion, we first generate conversion operations | |
4928 | to the intermediate types, and then from that types to the final one. | |
4929 | We create vector destinations for the intermediate type (TYPES) received | |
4930 | from supportable_*_operation, and store them in the correct order | |
4931 | for future use in vect_create_vectorized_*_stmts (). */ | |
8c681247 | 4932 | auto_vec<tree> vec_dsts (multi_step_cvt + 1); |
82294ec1 JJ |
4933 | vec_dest = vect_create_destination_var (scalar_dest, |
4934 | (cvt_type && modifier == WIDEN) | |
4935 | ? cvt_type : vectype_out); | |
9771b263 | 4936 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4937 | |
4938 | if (multi_step_cvt) | |
4939 | { | |
9771b263 DN |
4940 | for (i = interm_types.length () - 1; |
4941 | interm_types.iterate (i, &intermediate_type); i--) | |
4a00c761 JJ |
4942 | { |
4943 | vec_dest = vect_create_destination_var (scalar_dest, | |
4944 | intermediate_type); | |
9771b263 | 4945 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4946 | } |
4947 | } | |
ebfd146a | 4948 | |
4a00c761 | 4949 | if (cvt_type) |
82294ec1 JJ |
4950 | vec_dest = vect_create_destination_var (scalar_dest, |
4951 | modifier == WIDEN | |
4952 | ? vectype_out : cvt_type); | |
4a00c761 JJ |
4953 | |
4954 | if (!slp_node) | |
4955 | { | |
30862efc | 4956 | if (modifier == WIDEN) |
4a00c761 | 4957 | { |
c3284718 | 4958 | vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1); |
4a00c761 | 4959 | if (op_type == binary_op) |
9771b263 | 4960 | vec_oprnds1.create (1); |
4a00c761 | 4961 | } |
30862efc | 4962 | else if (modifier == NARROW) |
9771b263 DN |
4963 | vec_oprnds0.create ( |
4964 | 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1)); | |
4a00c761 JJ |
4965 | } |
4966 | else if (code == WIDEN_LSHIFT_EXPR) | |
9771b263 | 4967 | vec_oprnds1.create (slp_node->vec_stmts_size); |
ebfd146a | 4968 | |
4a00c761 | 4969 | last_oprnd = op0; |
ebfd146a IR |
4970 | prev_stmt_info = NULL; |
4971 | switch (modifier) | |
4972 | { | |
4973 | case NONE: | |
4974 | for (j = 0; j < ncopies; j++) | |
4975 | { | |
ebfd146a | 4976 | if (j == 0) |
86a91c0a RS |
4977 | vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0, |
4978 | NULL, slp_node); | |
ebfd146a | 4979 | else |
e4057a39 | 4980 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL); |
ebfd146a | 4981 | |
9771b263 | 4982 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 | 4983 | { |
e1bd7296 | 4984 | stmt_vec_info new_stmt_info; |
4a00c761 JJ |
4985 | /* Arguments are ready, create the new vector stmt. */ |
4986 | if (code1 == CALL_EXPR) | |
4987 | { | |
e1bd7296 | 4988 | gcall *new_stmt = gimple_build_call (decl1, 1, vop0); |
4a00c761 JJ |
4989 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4990 | gimple_call_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 4991 | new_stmt_info |
86a91c0a | 4992 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
4993 | } |
4994 | else | |
4995 | { | |
4996 | gcc_assert (TREE_CODE_LENGTH (code1) == unary_op); | |
e1bd7296 RS |
4997 | gassign *new_stmt |
4998 | = gimple_build_assign (vec_dest, code1, vop0); | |
4a00c761 JJ |
4999 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5000 | gimple_assign_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 5001 | new_stmt_info |
86a91c0a | 5002 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
5003 | } |
5004 | ||
4a00c761 | 5005 | if (slp_node) |
e1bd7296 | 5006 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
225ce44b RB |
5007 | else |
5008 | { | |
5009 | if (!prev_stmt_info) | |
e1bd7296 RS |
5010 | STMT_VINFO_VEC_STMT (stmt_info) |
5011 | = *vec_stmt = new_stmt_info; | |
225ce44b | 5012 | else |
e1bd7296 RS |
5013 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
5014 | prev_stmt_info = new_stmt_info; | |
225ce44b | 5015 | } |
4a00c761 | 5016 | } |
ebfd146a IR |
5017 | } |
5018 | break; | |
5019 | ||
5020 | case WIDEN: | |
5021 | /* In case the vectorization factor (VF) is bigger than the number | |
5022 | of elements that we can fit in a vectype (nunits), we have to | |
5023 | generate more than one vector stmt - i.e - we need to "unroll" | |
5024 | the vector stmt by a factor VF/nunits. */ | |
5025 | for (j = 0; j < ncopies; j++) | |
5026 | { | |
4a00c761 | 5027 | /* Handle uses. */ |
ebfd146a | 5028 | if (j == 0) |
4a00c761 JJ |
5029 | { |
5030 | if (slp_node) | |
5031 | { | |
5032 | if (code == WIDEN_LSHIFT_EXPR) | |
5033 | { | |
5034 | unsigned int k; | |
ebfd146a | 5035 | |
4a00c761 JJ |
5036 | vec_oprnd1 = op1; |
5037 | /* Store vec_oprnd1 for every vector stmt to be created | |
5038 | for SLP_NODE. We check during the analysis that all | |
5039 | the shift arguments are the same. */ | |
5040 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 5041 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 | 5042 | |
86a91c0a RS |
5043 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, |
5044 | &vec_oprnds0, NULL, slp_node); | |
4a00c761 JJ |
5045 | } |
5046 | else | |
86a91c0a | 5047 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, |
306b0c92 | 5048 | &vec_oprnds1, slp_node); |
4a00c761 JJ |
5049 | } |
5050 | else | |
5051 | { | |
86a91c0a | 5052 | vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info); |
9771b263 | 5053 | vec_oprnds0.quick_push (vec_oprnd0); |
4a00c761 JJ |
5054 | if (op_type == binary_op) |
5055 | { | |
5056 | if (code == WIDEN_LSHIFT_EXPR) | |
5057 | vec_oprnd1 = op1; | |
5058 | else | |
86a91c0a RS |
5059 | vec_oprnd1 |
5060 | = vect_get_vec_def_for_operand (op1, stmt_info); | |
9771b263 | 5061 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 JJ |
5062 | } |
5063 | } | |
5064 | } | |
ebfd146a | 5065 | else |
4a00c761 | 5066 | { |
e4057a39 | 5067 | vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
9771b263 DN |
5068 | vec_oprnds0.truncate (0); |
5069 | vec_oprnds0.quick_push (vec_oprnd0); | |
4a00c761 JJ |
5070 | if (op_type == binary_op) |
5071 | { | |
5072 | if (code == WIDEN_LSHIFT_EXPR) | |
5073 | vec_oprnd1 = op1; | |
5074 | else | |
e4057a39 | 5075 | vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo, |
4a00c761 | 5076 | vec_oprnd1); |
9771b263 DN |
5077 | vec_oprnds1.truncate (0); |
5078 | vec_oprnds1.quick_push (vec_oprnd1); | |
4a00c761 JJ |
5079 | } |
5080 | } | |
ebfd146a | 5081 | |
4a00c761 JJ |
5082 | /* Arguments are ready. Create the new vector stmts. */ |
5083 | for (i = multi_step_cvt; i >= 0; i--) | |
5084 | { | |
9771b263 | 5085 | tree this_dest = vec_dsts[i]; |
4a00c761 JJ |
5086 | enum tree_code c1 = code1, c2 = code2; |
5087 | if (i == 0 && codecvt2 != ERROR_MARK) | |
5088 | { | |
5089 | c1 = codecvt1; | |
5090 | c2 = codecvt2; | |
5091 | } | |
5092 | vect_create_vectorized_promotion_stmts (&vec_oprnds0, | |
86a91c0a RS |
5093 | &vec_oprnds1, stmt_info, |
5094 | this_dest, gsi, | |
4a00c761 JJ |
5095 | c1, c2, decl1, decl2, |
5096 | op_type); | |
5097 | } | |
5098 | ||
9771b263 | 5099 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 | 5100 | { |
e1bd7296 | 5101 | stmt_vec_info new_stmt_info; |
4a00c761 JJ |
5102 | if (cvt_type) |
5103 | { | |
5104 | if (codecvt1 == CALL_EXPR) | |
5105 | { | |
e1bd7296 | 5106 | gcall *new_stmt = gimple_build_call (decl1, 1, vop0); |
4a00c761 JJ |
5107 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5108 | gimple_call_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 5109 | new_stmt_info |
86a91c0a RS |
5110 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
5111 | gsi); | |
4a00c761 JJ |
5112 | } |
5113 | else | |
5114 | { | |
5115 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 5116 | new_temp = make_ssa_name (vec_dest); |
e1bd7296 RS |
5117 | gassign *new_stmt |
5118 | = gimple_build_assign (new_temp, codecvt1, vop0); | |
5119 | new_stmt_info | |
86a91c0a RS |
5120 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
5121 | gsi); | |
4a00c761 | 5122 | } |
4a00c761 JJ |
5123 | } |
5124 | else | |
e1bd7296 | 5125 | new_stmt_info = vinfo->lookup_def (vop0); |
4a00c761 JJ |
5126 | |
5127 | if (slp_node) | |
e1bd7296 | 5128 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
4a00c761 | 5129 | else |
c689ce1e RB |
5130 | { |
5131 | if (!prev_stmt_info) | |
e1bd7296 | 5132 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; |
c689ce1e | 5133 | else |
e1bd7296 RS |
5134 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
5135 | prev_stmt_info = new_stmt_info; | |
c689ce1e | 5136 | } |
4a00c761 | 5137 | } |
ebfd146a | 5138 | } |
4a00c761 JJ |
5139 | |
5140 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a IR |
5141 | break; |
5142 | ||
5143 | case NARROW: | |
5144 | /* In case the vectorization factor (VF) is bigger than the number | |
5145 | of elements that we can fit in a vectype (nunits), we have to | |
5146 | generate more than one vector stmt - i.e - we need to "unroll" | |
5147 | the vector stmt by a factor VF/nunits. */ | |
5148 | for (j = 0; j < ncopies; j++) | |
5149 | { | |
5150 | /* Handle uses. */ | |
4a00c761 | 5151 | if (slp_node) |
86a91c0a | 5152 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, |
306b0c92 | 5153 | slp_node); |
ebfd146a IR |
5154 | else |
5155 | { | |
9771b263 | 5156 | vec_oprnds0.truncate (0); |
e4057a39 | 5157 | vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0, |
4a00c761 | 5158 | vect_pow2 (multi_step_cvt) - 1); |
ebfd146a IR |
5159 | } |
5160 | ||
4a00c761 JJ |
5161 | /* Arguments are ready. Create the new vector stmts. */ |
5162 | if (cvt_type) | |
9771b263 | 5163 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
5164 | { |
5165 | if (codecvt1 == CALL_EXPR) | |
5166 | { | |
e1bd7296 | 5167 | gcall *new_stmt = gimple_build_call (decl1, 1, vop0); |
4a00c761 JJ |
5168 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5169 | gimple_call_set_lhs (new_stmt, new_temp); | |
86a91c0a | 5170 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
5171 | } |
5172 | else | |
5173 | { | |
5174 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 5175 | new_temp = make_ssa_name (vec_dest); |
e1bd7296 RS |
5176 | gassign *new_stmt |
5177 | = gimple_build_assign (new_temp, codecvt1, vop0); | |
86a91c0a | 5178 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 | 5179 | } |
ebfd146a | 5180 | |
9771b263 | 5181 | vec_oprnds0[i] = new_temp; |
4a00c761 | 5182 | } |
ebfd146a | 5183 | |
4a00c761 | 5184 | vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, |
86a91c0a | 5185 | stmt_info, vec_dsts, gsi, |
4a00c761 JJ |
5186 | slp_node, code1, |
5187 | &prev_stmt_info); | |
ebfd146a IR |
5188 | } |
5189 | ||
5190 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
4a00c761 | 5191 | break; |
ebfd146a IR |
5192 | } |
5193 | ||
9771b263 DN |
5194 | vec_oprnds0.release (); |
5195 | vec_oprnds1.release (); | |
9771b263 | 5196 | interm_types.release (); |
ebfd146a IR |
5197 | |
5198 | return true; | |
5199 | } | |
ff802fa1 IR |
5200 | |
5201 | ||
ebfd146a IR |
5202 | /* Function vectorizable_assignment. |
5203 | ||
32e8e429 RS |
5204 | Check if STMT_INFO performs an assignment (copy) that can be vectorized. |
5205 | If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized | |
5206 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
5207 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
5208 | |
5209 | static bool | |
32e8e429 | 5210 | vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 5211 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 5212 | stmt_vector_for_cost *cost_vec) |
ebfd146a IR |
5213 | { |
5214 | tree vec_dest; | |
5215 | tree scalar_dest; | |
5216 | tree op; | |
ebfd146a IR |
5217 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
5218 | tree new_temp; | |
4fc5ebf1 JG |
5219 | enum vect_def_type dt[1] = {vect_unknown_def_type}; |
5220 | int ndts = 1; | |
ebfd146a | 5221 | int ncopies; |
f18b55bd | 5222 | int i, j; |
6e1aa848 | 5223 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 5224 | tree vop; |
a70d6342 | 5225 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5226 | vec_info *vinfo = stmt_info->vinfo; |
f18b55bd | 5227 | stmt_vec_info prev_stmt_info = NULL; |
fde9c428 RG |
5228 | enum tree_code code; |
5229 | tree vectype_in; | |
ebfd146a | 5230 | |
a70d6342 | 5231 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5232 | return false; |
5233 | ||
66c16fd9 RB |
5234 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5235 | && ! vec_stmt) | |
ebfd146a IR |
5236 | return false; |
5237 | ||
5238 | /* Is vectorizable assignment? */ | |
32e8e429 RS |
5239 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
5240 | if (!stmt) | |
ebfd146a IR |
5241 | return false; |
5242 | ||
5243 | scalar_dest = gimple_assign_lhs (stmt); | |
5244 | if (TREE_CODE (scalar_dest) != SSA_NAME) | |
5245 | return false; | |
5246 | ||
fde9c428 | 5247 | code = gimple_assign_rhs_code (stmt); |
ebfd146a | 5248 | if (gimple_assign_single_p (stmt) |
fde9c428 RG |
5249 | || code == PAREN_EXPR |
5250 | || CONVERT_EXPR_CODE_P (code)) | |
ebfd146a IR |
5251 | op = gimple_assign_rhs1 (stmt); |
5252 | else | |
5253 | return false; | |
5254 | ||
7b7ec6c5 RG |
5255 | if (code == VIEW_CONVERT_EXPR) |
5256 | op = TREE_OPERAND (op, 0); | |
5257 | ||
465c8c19 | 5258 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
928686b1 | 5259 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
465c8c19 JJ |
5260 | |
5261 | /* Multiple types in SLP are handled by creating the appropriate number of | |
5262 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
5263 | case of SLP. */ | |
fce57248 | 5264 | if (slp_node) |
465c8c19 JJ |
5265 | ncopies = 1; |
5266 | else | |
e8f142e2 | 5267 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
465c8c19 JJ |
5268 | |
5269 | gcc_assert (ncopies >= 1); | |
5270 | ||
894dd753 | 5271 | if (!vect_is_simple_use (op, vinfo, &dt[0], &vectype_in)) |
ebfd146a | 5272 | { |
73fbfcad | 5273 | if (dump_enabled_p ()) |
78c60e3d | 5274 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5275 | "use not simple.\n"); |
ebfd146a IR |
5276 | return false; |
5277 | } | |
5278 | ||
fde9c428 RG |
5279 | /* We can handle NOP_EXPR conversions that do not change the number |
5280 | of elements or the vector size. */ | |
7b7ec6c5 RG |
5281 | if ((CONVERT_EXPR_CODE_P (code) |
5282 | || code == VIEW_CONVERT_EXPR) | |
fde9c428 | 5283 | && (!vectype_in |
928686b1 | 5284 | || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits) |
cf098191 RS |
5285 | || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), |
5286 | GET_MODE_SIZE (TYPE_MODE (vectype_in))))) | |
fde9c428 RG |
5287 | return false; |
5288 | ||
7b7b1813 RG |
5289 | /* We do not handle bit-precision changes. */ |
5290 | if ((CONVERT_EXPR_CODE_P (code) | |
5291 | || code == VIEW_CONVERT_EXPR) | |
5292 | && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) | |
2be65d9e RS |
5293 | && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)) |
5294 | || !type_has_mode_precision_p (TREE_TYPE (op))) | |
7b7b1813 RG |
5295 | /* But a conversion that does not change the bit-pattern is ok. */ |
5296 | && !((TYPE_PRECISION (TREE_TYPE (scalar_dest)) | |
5297 | > TYPE_PRECISION (TREE_TYPE (op))) | |
2dab46d5 IE |
5298 | && TYPE_UNSIGNED (TREE_TYPE (op))) |
5299 | /* Conversion between boolean types of different sizes is | |
5300 | a simple assignment in case their vectypes are same | |
5301 | boolean vectors. */ | |
5302 | && (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
5303 | || !VECTOR_BOOLEAN_TYPE_P (vectype_in))) | |
7b7b1813 | 5304 | { |
73fbfcad | 5305 | if (dump_enabled_p ()) |
78c60e3d SS |
5306 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5307 | "type conversion to/from bit-precision " | |
e645e942 | 5308 | "unsupported.\n"); |
7b7b1813 RG |
5309 | return false; |
5310 | } | |
5311 | ||
ebfd146a IR |
5312 | if (!vec_stmt) /* transformation not required. */ |
5313 | { | |
5314 | STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; | |
adac3a68 | 5315 | DUMP_VECT_SCOPE ("vectorizable_assignment"); |
68435eb2 | 5316 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
ebfd146a IR |
5317 | return true; |
5318 | } | |
5319 | ||
67b8dbac | 5320 | /* Transform. */ |
73fbfcad | 5321 | if (dump_enabled_p ()) |
e645e942 | 5322 | dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n"); |
ebfd146a IR |
5323 | |
5324 | /* Handle def. */ | |
5325 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
5326 | ||
5327 | /* Handle use. */ | |
f18b55bd | 5328 | for (j = 0; j < ncopies; j++) |
ebfd146a | 5329 | { |
f18b55bd IR |
5330 | /* Handle uses. */ |
5331 | if (j == 0) | |
86a91c0a | 5332 | vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node); |
f18b55bd | 5333 | else |
e4057a39 | 5334 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL); |
f18b55bd IR |
5335 | |
5336 | /* Arguments are ready. create the new vector stmt. */ | |
e1bd7296 | 5337 | stmt_vec_info new_stmt_info = NULL; |
9771b263 | 5338 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) |
f18b55bd | 5339 | { |
7b7ec6c5 RG |
5340 | if (CONVERT_EXPR_CODE_P (code) |
5341 | || code == VIEW_CONVERT_EXPR) | |
4a73490d | 5342 | vop = build1 (VIEW_CONVERT_EXPR, vectype, vop); |
e1bd7296 | 5343 | gassign *new_stmt = gimple_build_assign (vec_dest, vop); |
f18b55bd IR |
5344 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5345 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
5346 | new_stmt_info |
5347 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
f18b55bd | 5348 | if (slp_node) |
e1bd7296 | 5349 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
f18b55bd | 5350 | } |
ebfd146a IR |
5351 | |
5352 | if (slp_node) | |
f18b55bd IR |
5353 | continue; |
5354 | ||
5355 | if (j == 0) | |
e1bd7296 | 5356 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
f18b55bd | 5357 | else |
e1bd7296 | 5358 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
f18b55bd | 5359 | |
e1bd7296 | 5360 | prev_stmt_info = new_stmt_info; |
f18b55bd | 5361 | } |
b8698a0f | 5362 | |
9771b263 | 5363 | vec_oprnds.release (); |
ebfd146a IR |
5364 | return true; |
5365 | } | |
5366 | ||
9dc3f7de | 5367 | |
1107f3ae IR |
5368 | /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE |
5369 | either as shift by a scalar or by a vector. */ | |
5370 | ||
5371 | bool | |
5372 | vect_supportable_shift (enum tree_code code, tree scalar_type) | |
5373 | { | |
5374 | ||
ef4bddc2 | 5375 | machine_mode vec_mode; |
1107f3ae IR |
5376 | optab optab; |
5377 | int icode; | |
5378 | tree vectype; | |
5379 | ||
5380 | vectype = get_vectype_for_scalar_type (scalar_type); | |
5381 | if (!vectype) | |
5382 | return false; | |
5383 | ||
5384 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
5385 | if (!optab | |
5386 | || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing) | |
5387 | { | |
5388 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
5389 | if (!optab | |
5390 | || (optab_handler (optab, TYPE_MODE (vectype)) | |
5391 | == CODE_FOR_nothing)) | |
5392 | return false; | |
5393 | } | |
5394 | ||
5395 | vec_mode = TYPE_MODE (vectype); | |
5396 | icode = (int) optab_handler (optab, vec_mode); | |
5397 | if (icode == CODE_FOR_nothing) | |
5398 | return false; | |
5399 | ||
5400 | return true; | |
5401 | } | |
5402 | ||
5403 | ||
9dc3f7de IR |
5404 | /* Function vectorizable_shift. |
5405 | ||
32e8e429 RS |
5406 | Check if STMT_INFO performs a shift operation that can be vectorized. |
5407 | If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized | |
5408 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
5409 | Return true if STMT_INFO is vectorizable in this way. */ | |
9dc3f7de IR |
5410 | |
5411 | static bool | |
32e8e429 | 5412 | vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 5413 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 5414 | stmt_vector_for_cost *cost_vec) |
9dc3f7de IR |
5415 | { |
5416 | tree vec_dest; | |
5417 | tree scalar_dest; | |
5418 | tree op0, op1 = NULL; | |
5419 | tree vec_oprnd1 = NULL_TREE; | |
9dc3f7de IR |
5420 | tree vectype; |
5421 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
5422 | enum tree_code code; | |
ef4bddc2 | 5423 | machine_mode vec_mode; |
9dc3f7de IR |
5424 | tree new_temp; |
5425 | optab optab; | |
5426 | int icode; | |
ef4bddc2 | 5427 | machine_mode optab_op2_mode; |
9dc3f7de | 5428 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 5429 | int ndts = 2; |
9dc3f7de | 5430 | stmt_vec_info prev_stmt_info; |
928686b1 RS |
5431 | poly_uint64 nunits_in; |
5432 | poly_uint64 nunits_out; | |
9dc3f7de | 5433 | tree vectype_out; |
cede2577 | 5434 | tree op1_vectype; |
9dc3f7de IR |
5435 | int ncopies; |
5436 | int j, i; | |
6e1aa848 DN |
5437 | vec<tree> vec_oprnds0 = vNULL; |
5438 | vec<tree> vec_oprnds1 = vNULL; | |
9dc3f7de IR |
5439 | tree vop0, vop1; |
5440 | unsigned int k; | |
49eab32e | 5441 | bool scalar_shift_arg = true; |
9dc3f7de | 5442 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5443 | vec_info *vinfo = stmt_info->vinfo; |
9dc3f7de IR |
5444 | |
5445 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
5446 | return false; | |
5447 | ||
66c16fd9 RB |
5448 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5449 | && ! vec_stmt) | |
9dc3f7de IR |
5450 | return false; |
5451 | ||
5452 | /* Is STMT a vectorizable binary/unary operation? */ | |
32e8e429 RS |
5453 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
5454 | if (!stmt) | |
9dc3f7de IR |
5455 | return false; |
5456 | ||
5457 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5458 | return false; | |
5459 | ||
5460 | code = gimple_assign_rhs_code (stmt); | |
5461 | ||
5462 | if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR | |
5463 | || code == RROTATE_EXPR)) | |
5464 | return false; | |
5465 | ||
5466 | scalar_dest = gimple_assign_lhs (stmt); | |
5467 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
2be65d9e | 5468 | if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))) |
7b7b1813 | 5469 | { |
73fbfcad | 5470 | if (dump_enabled_p ()) |
78c60e3d | 5471 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5472 | "bit-precision shifts not supported.\n"); |
7b7b1813 RG |
5473 | return false; |
5474 | } | |
9dc3f7de IR |
5475 | |
5476 | op0 = gimple_assign_rhs1 (stmt); | |
894dd753 | 5477 | if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype)) |
9dc3f7de | 5478 | { |
73fbfcad | 5479 | if (dump_enabled_p ()) |
78c60e3d | 5480 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5481 | "use not simple.\n"); |
9dc3f7de IR |
5482 | return false; |
5483 | } | |
5484 | /* If op0 is an external or constant def use a vector type with | |
5485 | the same size as the output vector type. */ | |
5486 | if (!vectype) | |
5487 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
5488 | if (vec_stmt) | |
5489 | gcc_assert (vectype); | |
5490 | if (!vectype) | |
5491 | { | |
73fbfcad | 5492 | if (dump_enabled_p ()) |
78c60e3d | 5493 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5494 | "no vectype for scalar type\n"); |
9dc3f7de IR |
5495 | return false; |
5496 | } | |
5497 | ||
5498 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
5499 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
928686b1 | 5500 | if (maybe_ne (nunits_out, nunits_in)) |
9dc3f7de IR |
5501 | return false; |
5502 | ||
5503 | op1 = gimple_assign_rhs2 (stmt); | |
fef96d8e RS |
5504 | stmt_vec_info op1_def_stmt_info; |
5505 | if (!vect_is_simple_use (op1, vinfo, &dt[1], &op1_vectype, | |
5506 | &op1_def_stmt_info)) | |
9dc3f7de | 5507 | { |
73fbfcad | 5508 | if (dump_enabled_p ()) |
78c60e3d | 5509 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5510 | "use not simple.\n"); |
9dc3f7de IR |
5511 | return false; |
5512 | } | |
5513 | ||
9dc3f7de IR |
5514 | /* Multiple types in SLP are handled by creating the appropriate number of |
5515 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
5516 | case of SLP. */ | |
fce57248 | 5517 | if (slp_node) |
9dc3f7de IR |
5518 | ncopies = 1; |
5519 | else | |
e8f142e2 | 5520 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
9dc3f7de IR |
5521 | |
5522 | gcc_assert (ncopies >= 1); | |
5523 | ||
5524 | /* Determine whether the shift amount is a vector, or scalar. If the | |
5525 | shift/rotate amount is a vector, use the vector/vector shift optabs. */ | |
5526 | ||
dbfa87aa YR |
5527 | if ((dt[1] == vect_internal_def |
5528 | || dt[1] == vect_induction_def) | |
5529 | && !slp_node) | |
49eab32e JJ |
5530 | scalar_shift_arg = false; |
5531 | else if (dt[1] == vect_constant_def | |
5532 | || dt[1] == vect_external_def | |
5533 | || dt[1] == vect_internal_def) | |
5534 | { | |
5535 | /* In SLP, need to check whether the shift count is the same, | |
5536 | in loops if it is a constant or invariant, it is always | |
5537 | a scalar shift. */ | |
5538 | if (slp_node) | |
5539 | { | |
b9787581 RS |
5540 | vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node); |
5541 | stmt_vec_info slpstmt_info; | |
49eab32e | 5542 | |
b9787581 RS |
5543 | FOR_EACH_VEC_ELT (stmts, k, slpstmt_info) |
5544 | { | |
5545 | gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt); | |
5546 | if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0)) | |
5547 | scalar_shift_arg = false; | |
5548 | } | |
49eab32e | 5549 | } |
60d393e8 RB |
5550 | |
5551 | /* If the shift amount is computed by a pattern stmt we cannot | |
5552 | use the scalar amount directly thus give up and use a vector | |
5553 | shift. */ | |
fef96d8e RS |
5554 | if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info)) |
5555 | scalar_shift_arg = false; | |
49eab32e JJ |
5556 | } |
5557 | else | |
5558 | { | |
73fbfcad | 5559 | if (dump_enabled_p ()) |
78c60e3d | 5560 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5561 | "operand mode requires invariant argument.\n"); |
49eab32e JJ |
5562 | return false; |
5563 | } | |
5564 | ||
9dc3f7de | 5565 | /* Vector shifted by vector. */ |
49eab32e | 5566 | if (!scalar_shift_arg) |
9dc3f7de IR |
5567 | { |
5568 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
73fbfcad | 5569 | if (dump_enabled_p ()) |
78c60e3d | 5570 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5571 | "vector/vector shift/rotate found.\n"); |
78c60e3d | 5572 | |
aa948027 JJ |
5573 | if (!op1_vectype) |
5574 | op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out); | |
5575 | if (op1_vectype == NULL_TREE | |
5576 | || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype)) | |
cede2577 | 5577 | { |
73fbfcad | 5578 | if (dump_enabled_p ()) |
78c60e3d SS |
5579 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5580 | "unusable type for last operand in" | |
e645e942 | 5581 | " vector/vector shift/rotate.\n"); |
cede2577 JJ |
5582 | return false; |
5583 | } | |
9dc3f7de IR |
5584 | } |
5585 | /* See if the machine has a vector shifted by scalar insn and if not | |
5586 | then see if it has a vector shifted by vector insn. */ | |
49eab32e | 5587 | else |
9dc3f7de IR |
5588 | { |
5589 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
5590 | if (optab | |
5591 | && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing) | |
5592 | { | |
73fbfcad | 5593 | if (dump_enabled_p ()) |
78c60e3d | 5594 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5595 | "vector/scalar shift/rotate found.\n"); |
9dc3f7de IR |
5596 | } |
5597 | else | |
5598 | { | |
5599 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
5600 | if (optab | |
5601 | && (optab_handler (optab, TYPE_MODE (vectype)) | |
5602 | != CODE_FOR_nothing)) | |
5603 | { | |
49eab32e JJ |
5604 | scalar_shift_arg = false; |
5605 | ||
73fbfcad | 5606 | if (dump_enabled_p ()) |
78c60e3d | 5607 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5608 | "vector/vector shift/rotate found.\n"); |
9dc3f7de IR |
5609 | |
5610 | /* Unlike the other binary operators, shifts/rotates have | |
5611 | the rhs being int, instead of the same type as the lhs, | |
5612 | so make sure the scalar is the right type if we are | |
aa948027 | 5613 | dealing with vectors of long long/long/short/char. */ |
9dc3f7de IR |
5614 | if (dt[1] == vect_constant_def) |
5615 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
aa948027 JJ |
5616 | else if (!useless_type_conversion_p (TREE_TYPE (vectype), |
5617 | TREE_TYPE (op1))) | |
5618 | { | |
5619 | if (slp_node | |
5620 | && TYPE_MODE (TREE_TYPE (vectype)) | |
5621 | != TYPE_MODE (TREE_TYPE (op1))) | |
5622 | { | |
73fbfcad | 5623 | if (dump_enabled_p ()) |
78c60e3d SS |
5624 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5625 | "unusable type for last operand in" | |
e645e942 | 5626 | " vector/vector shift/rotate.\n"); |
21c0a521 | 5627 | return false; |
aa948027 JJ |
5628 | } |
5629 | if (vec_stmt && !slp_node) | |
5630 | { | |
5631 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
86a91c0a | 5632 | op1 = vect_init_vector (stmt_info, op1, |
aa948027 JJ |
5633 | TREE_TYPE (vectype), NULL); |
5634 | } | |
5635 | } | |
9dc3f7de IR |
5636 | } |
5637 | } | |
5638 | } | |
9dc3f7de IR |
5639 | |
5640 | /* Supportable by target? */ | |
5641 | if (!optab) | |
5642 | { | |
73fbfcad | 5643 | if (dump_enabled_p ()) |
78c60e3d | 5644 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5645 | "no optab.\n"); |
9dc3f7de IR |
5646 | return false; |
5647 | } | |
5648 | vec_mode = TYPE_MODE (vectype); | |
5649 | icode = (int) optab_handler (optab, vec_mode); | |
5650 | if (icode == CODE_FOR_nothing) | |
5651 | { | |
73fbfcad | 5652 | if (dump_enabled_p ()) |
78c60e3d | 5653 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5654 | "op not supported by target.\n"); |
9dc3f7de | 5655 | /* Check only during analysis. */ |
cf098191 | 5656 | if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) |
ca09abcb RS |
5657 | || (!vec_stmt |
5658 | && !vect_worthwhile_without_simd_p (vinfo, code))) | |
9dc3f7de | 5659 | return false; |
73fbfcad | 5660 | if (dump_enabled_p ()) |
e645e942 TJ |
5661 | dump_printf_loc (MSG_NOTE, vect_location, |
5662 | "proceeding using word mode.\n"); | |
9dc3f7de IR |
5663 | } |
5664 | ||
5665 | /* Worthwhile without SIMD support? Check only during analysis. */ | |
ca09abcb RS |
5666 | if (!vec_stmt |
5667 | && !VECTOR_MODE_P (TYPE_MODE (vectype)) | |
5668 | && !vect_worthwhile_without_simd_p (vinfo, code)) | |
9dc3f7de | 5669 | { |
73fbfcad | 5670 | if (dump_enabled_p ()) |
78c60e3d | 5671 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5672 | "not worthwhile without SIMD support.\n"); |
9dc3f7de IR |
5673 | return false; |
5674 | } | |
5675 | ||
5676 | if (!vec_stmt) /* transformation not required. */ | |
5677 | { | |
5678 | STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type; | |
adac3a68 | 5679 | DUMP_VECT_SCOPE ("vectorizable_shift"); |
68435eb2 | 5680 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
9dc3f7de IR |
5681 | return true; |
5682 | } | |
5683 | ||
67b8dbac | 5684 | /* Transform. */ |
9dc3f7de | 5685 | |
73fbfcad | 5686 | if (dump_enabled_p ()) |
78c60e3d | 5687 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5688 | "transform binary/unary operation.\n"); |
9dc3f7de IR |
5689 | |
5690 | /* Handle def. */ | |
5691 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
5692 | ||
9dc3f7de IR |
5693 | prev_stmt_info = NULL; |
5694 | for (j = 0; j < ncopies; j++) | |
5695 | { | |
5696 | /* Handle uses. */ | |
5697 | if (j == 0) | |
5698 | { | |
5699 | if (scalar_shift_arg) | |
5700 | { | |
5701 | /* Vector shl and shr insn patterns can be defined with scalar | |
5702 | operand 2 (shift operand). In this case, use constant or loop | |
5703 | invariant op1 directly, without extending it to vector mode | |
5704 | first. */ | |
5705 | optab_op2_mode = insn_data[icode].operand[2].mode; | |
5706 | if (!VECTOR_MODE_P (optab_op2_mode)) | |
5707 | { | |
73fbfcad | 5708 | if (dump_enabled_p ()) |
78c60e3d | 5709 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5710 | "operand 1 using scalar mode.\n"); |
9dc3f7de | 5711 | vec_oprnd1 = op1; |
8930f723 | 5712 | vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1); |
9771b263 | 5713 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5714 | if (slp_node) |
5715 | { | |
5716 | /* Store vec_oprnd1 for every vector stmt to be created | |
5717 | for SLP_NODE. We check during the analysis that all | |
5718 | the shift arguments are the same. | |
5719 | TODO: Allow different constants for different vector | |
5720 | stmts generated for an SLP instance. */ | |
5721 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 5722 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5723 | } |
5724 | } | |
5725 | } | |
5726 | ||
5727 | /* vec_oprnd1 is available if operand 1 should be of a scalar-type | |
5728 | (a special case for certain kind of vector shifts); otherwise, | |
5729 | operand 1 should be of a vector type (the usual case). */ | |
5730 | if (vec_oprnd1) | |
86a91c0a RS |
5731 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, |
5732 | slp_node); | |
9dc3f7de | 5733 | else |
86a91c0a RS |
5734 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, |
5735 | slp_node); | |
9dc3f7de IR |
5736 | } |
5737 | else | |
e4057a39 | 5738 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1); |
9dc3f7de IR |
5739 | |
5740 | /* Arguments are ready. Create the new vector stmt. */ | |
e1bd7296 | 5741 | stmt_vec_info new_stmt_info = NULL; |
9771b263 | 5742 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
9dc3f7de | 5743 | { |
9771b263 | 5744 | vop1 = vec_oprnds1[i]; |
e1bd7296 | 5745 | gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
9dc3f7de IR |
5746 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5747 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
5748 | new_stmt_info |
5749 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
9dc3f7de | 5750 | if (slp_node) |
e1bd7296 | 5751 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
9dc3f7de IR |
5752 | } |
5753 | ||
5754 | if (slp_node) | |
5755 | continue; | |
5756 | ||
5757 | if (j == 0) | |
e1bd7296 | 5758 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
9dc3f7de | 5759 | else |
e1bd7296 RS |
5760 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
5761 | prev_stmt_info = new_stmt_info; | |
9dc3f7de IR |
5762 | } |
5763 | ||
9771b263 DN |
5764 | vec_oprnds0.release (); |
5765 | vec_oprnds1.release (); | |
9dc3f7de IR |
5766 | |
5767 | return true; | |
5768 | } | |
5769 | ||
5770 | ||
ebfd146a IR |
5771 | /* Function vectorizable_operation. |
5772 | ||
32e8e429 | 5773 | Check if STMT_INFO performs a binary, unary or ternary operation that can |
16949072 | 5774 | be vectorized. |
32e8e429 RS |
5775 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized |
5776 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
5777 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
5778 | |
5779 | static bool | |
32e8e429 | 5780 | vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 5781 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 5782 | stmt_vector_for_cost *cost_vec) |
ebfd146a | 5783 | { |
00f07b86 | 5784 | tree vec_dest; |
ebfd146a | 5785 | tree scalar_dest; |
16949072 | 5786 | tree op0, op1 = NULL_TREE, op2 = NULL_TREE; |
00f07b86 | 5787 | tree vectype; |
ebfd146a | 5788 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
0eb952ea | 5789 | enum tree_code code, orig_code; |
ef4bddc2 | 5790 | machine_mode vec_mode; |
ebfd146a IR |
5791 | tree new_temp; |
5792 | int op_type; | |
00f07b86 | 5793 | optab optab; |
523ba738 | 5794 | bool target_support_p; |
16949072 RG |
5795 | enum vect_def_type dt[3] |
5796 | = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 5797 | int ndts = 3; |
ebfd146a | 5798 | stmt_vec_info prev_stmt_info; |
928686b1 RS |
5799 | poly_uint64 nunits_in; |
5800 | poly_uint64 nunits_out; | |
ebfd146a IR |
5801 | tree vectype_out; |
5802 | int ncopies; | |
5803 | int j, i; | |
6e1aa848 DN |
5804 | vec<tree> vec_oprnds0 = vNULL; |
5805 | vec<tree> vec_oprnds1 = vNULL; | |
5806 | vec<tree> vec_oprnds2 = vNULL; | |
16949072 | 5807 | tree vop0, vop1, vop2; |
a70d6342 | 5808 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5809 | vec_info *vinfo = stmt_info->vinfo; |
a70d6342 | 5810 | |
a70d6342 | 5811 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5812 | return false; |
5813 | ||
66c16fd9 RB |
5814 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5815 | && ! vec_stmt) | |
ebfd146a IR |
5816 | return false; |
5817 | ||
5818 | /* Is STMT a vectorizable binary/unary operation? */ | |
32e8e429 RS |
5819 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
5820 | if (!stmt) | |
ebfd146a IR |
5821 | return false; |
5822 | ||
5823 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5824 | return false; | |
5825 | ||
0eb952ea | 5826 | orig_code = code = gimple_assign_rhs_code (stmt); |
ebfd146a | 5827 | |
1af4ebf5 MG |
5828 | /* For pointer addition and subtraction, we should use the normal |
5829 | plus and minus for the vector operation. */ | |
ebfd146a IR |
5830 | if (code == POINTER_PLUS_EXPR) |
5831 | code = PLUS_EXPR; | |
1af4ebf5 MG |
5832 | if (code == POINTER_DIFF_EXPR) |
5833 | code = MINUS_EXPR; | |
ebfd146a IR |
5834 | |
5835 | /* Support only unary or binary operations. */ | |
5836 | op_type = TREE_CODE_LENGTH (code); | |
16949072 | 5837 | if (op_type != unary_op && op_type != binary_op && op_type != ternary_op) |
ebfd146a | 5838 | { |
73fbfcad | 5839 | if (dump_enabled_p ()) |
78c60e3d | 5840 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5841 | "num. args = %d (not unary/binary/ternary op).\n", |
78c60e3d | 5842 | op_type); |
ebfd146a IR |
5843 | return false; |
5844 | } | |
5845 | ||
b690cc0f RG |
5846 | scalar_dest = gimple_assign_lhs (stmt); |
5847 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
5848 | ||
7b7b1813 RG |
5849 | /* Most operations cannot handle bit-precision types without extra |
5850 | truncations. */ | |
045c1278 | 5851 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
2be65d9e | 5852 | && !type_has_mode_precision_p (TREE_TYPE (scalar_dest)) |
7b7b1813 RG |
5853 | /* Exception are bitwise binary operations. */ |
5854 | && code != BIT_IOR_EXPR | |
5855 | && code != BIT_XOR_EXPR | |
5856 | && code != BIT_AND_EXPR) | |
5857 | { | |
73fbfcad | 5858 | if (dump_enabled_p ()) |
78c60e3d | 5859 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5860 | "bit-precision arithmetic not supported.\n"); |
7b7b1813 RG |
5861 | return false; |
5862 | } | |
5863 | ||
ebfd146a | 5864 | op0 = gimple_assign_rhs1 (stmt); |
894dd753 | 5865 | if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype)) |
ebfd146a | 5866 | { |
73fbfcad | 5867 | if (dump_enabled_p ()) |
78c60e3d | 5868 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5869 | "use not simple.\n"); |
ebfd146a IR |
5870 | return false; |
5871 | } | |
b690cc0f RG |
5872 | /* If op0 is an external or constant def use a vector type with |
5873 | the same size as the output vector type. */ | |
5874 | if (!vectype) | |
b036c6c5 IE |
5875 | { |
5876 | /* For boolean type we cannot determine vectype by | |
5877 | invariant value (don't know whether it is a vector | |
5878 | of booleans or vector of integers). We use output | |
5879 | vectype because operations on boolean don't change | |
5880 | type. */ | |
2568d8a1 | 5881 | if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0))) |
b036c6c5 | 5882 | { |
2568d8a1 | 5883 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest))) |
b036c6c5 IE |
5884 | { |
5885 | if (dump_enabled_p ()) | |
5886 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5887 | "not supported operation on bool value.\n"); | |
5888 | return false; | |
5889 | } | |
5890 | vectype = vectype_out; | |
5891 | } | |
5892 | else | |
5893 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
5894 | } | |
7d8930a0 IR |
5895 | if (vec_stmt) |
5896 | gcc_assert (vectype); | |
5897 | if (!vectype) | |
5898 | { | |
73fbfcad | 5899 | if (dump_enabled_p ()) |
7d8930a0 | 5900 | { |
78c60e3d SS |
5901 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5902 | "no vectype for scalar type "); | |
5903 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
5904 | TREE_TYPE (op0)); | |
e645e942 | 5905 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
5906 | } |
5907 | ||
5908 | return false; | |
5909 | } | |
b690cc0f RG |
5910 | |
5911 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
5912 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
928686b1 | 5913 | if (maybe_ne (nunits_out, nunits_in)) |
b690cc0f | 5914 | return false; |
ebfd146a | 5915 | |
16949072 | 5916 | if (op_type == binary_op || op_type == ternary_op) |
ebfd146a IR |
5917 | { |
5918 | op1 = gimple_assign_rhs2 (stmt); | |
894dd753 | 5919 | if (!vect_is_simple_use (op1, vinfo, &dt[1])) |
ebfd146a | 5920 | { |
73fbfcad | 5921 | if (dump_enabled_p ()) |
78c60e3d | 5922 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5923 | "use not simple.\n"); |
ebfd146a IR |
5924 | return false; |
5925 | } | |
5926 | } | |
16949072 RG |
5927 | if (op_type == ternary_op) |
5928 | { | |
5929 | op2 = gimple_assign_rhs3 (stmt); | |
894dd753 | 5930 | if (!vect_is_simple_use (op2, vinfo, &dt[2])) |
16949072 | 5931 | { |
73fbfcad | 5932 | if (dump_enabled_p ()) |
78c60e3d | 5933 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5934 | "use not simple.\n"); |
16949072 RG |
5935 | return false; |
5936 | } | |
5937 | } | |
ebfd146a | 5938 | |
b690cc0f | 5939 | /* Multiple types in SLP are handled by creating the appropriate number of |
ff802fa1 | 5940 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
b690cc0f | 5941 | case of SLP. */ |
fce57248 | 5942 | if (slp_node) |
b690cc0f RG |
5943 | ncopies = 1; |
5944 | else | |
e8f142e2 | 5945 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
b690cc0f RG |
5946 | |
5947 | gcc_assert (ncopies >= 1); | |
5948 | ||
9dc3f7de | 5949 | /* Shifts are handled in vectorizable_shift (). */ |
ebfd146a IR |
5950 | if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR |
5951 | || code == RROTATE_EXPR) | |
9dc3f7de | 5952 | return false; |
ebfd146a | 5953 | |
ebfd146a | 5954 | /* Supportable by target? */ |
00f07b86 RH |
5955 | |
5956 | vec_mode = TYPE_MODE (vectype); | |
5957 | if (code == MULT_HIGHPART_EXPR) | |
523ba738 | 5958 | target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)); |
00f07b86 RH |
5959 | else |
5960 | { | |
5961 | optab = optab_for_tree_code (code, vectype, optab_default); | |
5962 | if (!optab) | |
5deb57cb | 5963 | { |
73fbfcad | 5964 | if (dump_enabled_p ()) |
78c60e3d | 5965 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5966 | "no optab.\n"); |
00f07b86 | 5967 | return false; |
5deb57cb | 5968 | } |
523ba738 RS |
5969 | target_support_p = (optab_handler (optab, vec_mode) |
5970 | != CODE_FOR_nothing); | |
5deb57cb JJ |
5971 | } |
5972 | ||
523ba738 | 5973 | if (!target_support_p) |
ebfd146a | 5974 | { |
73fbfcad | 5975 | if (dump_enabled_p ()) |
78c60e3d | 5976 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5977 | "op not supported by target.\n"); |
ebfd146a | 5978 | /* Check only during analysis. */ |
cf098191 | 5979 | if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) |
ca09abcb | 5980 | || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code))) |
ebfd146a | 5981 | return false; |
73fbfcad | 5982 | if (dump_enabled_p ()) |
e645e942 TJ |
5983 | dump_printf_loc (MSG_NOTE, vect_location, |
5984 | "proceeding using word mode.\n"); | |
383d9c83 IR |
5985 | } |
5986 | ||
4a00c761 | 5987 | /* Worthwhile without SIMD support? Check only during analysis. */ |
5deb57cb JJ |
5988 | if (!VECTOR_MODE_P (vec_mode) |
5989 | && !vec_stmt | |
ca09abcb | 5990 | && !vect_worthwhile_without_simd_p (vinfo, code)) |
7d8930a0 | 5991 | { |
73fbfcad | 5992 | if (dump_enabled_p ()) |
78c60e3d | 5993 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5994 | "not worthwhile without SIMD support.\n"); |
e34842c6 | 5995 | return false; |
7d8930a0 | 5996 | } |
ebfd146a | 5997 | |
ebfd146a IR |
5998 | if (!vec_stmt) /* transformation not required. */ |
5999 | { | |
4a00c761 | 6000 | STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; |
adac3a68 | 6001 | DUMP_VECT_SCOPE ("vectorizable_operation"); |
68435eb2 | 6002 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
ebfd146a IR |
6003 | return true; |
6004 | } | |
6005 | ||
67b8dbac | 6006 | /* Transform. */ |
ebfd146a | 6007 | |
73fbfcad | 6008 | if (dump_enabled_p ()) |
78c60e3d | 6009 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 6010 | "transform binary/unary operation.\n"); |
383d9c83 | 6011 | |
0eb952ea JJ |
6012 | /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as |
6013 | vectors with unsigned elements, but the result is signed. So, we | |
6014 | need to compute the MINUS_EXPR into vectype temporary and | |
6015 | VIEW_CONVERT_EXPR it into the final vectype_out result. */ | |
6016 | tree vec_cvt_dest = NULL_TREE; | |
6017 | if (orig_code == POINTER_DIFF_EXPR) | |
7b76867b RB |
6018 | { |
6019 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
6020 | vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
6021 | } | |
6022 | /* Handle def. */ | |
6023 | else | |
6024 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
0eb952ea | 6025 | |
ebfd146a IR |
6026 | /* In case the vectorization factor (VF) is bigger than the number |
6027 | of elements that we can fit in a vectype (nunits), we have to generate | |
6028 | more than one vector stmt - i.e - we need to "unroll" the | |
4a00c761 JJ |
6029 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
6030 | from one copy of the vector stmt to the next, in the field | |
6031 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following | |
6032 | stages to find the correct vector defs to be used when vectorizing | |
6033 | stmts that use the defs of the current stmt. The example below | |
6034 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., | |
6035 | we need to create 4 vectorized stmts): | |
6036 | ||
6037 | before vectorization: | |
6038 | RELATED_STMT VEC_STMT | |
6039 | S1: x = memref - - | |
6040 | S2: z = x + 1 - - | |
6041 | ||
6042 | step 1: vectorize stmt S1 (done in vectorizable_load. See more details | |
6043 | there): | |
6044 | RELATED_STMT VEC_STMT | |
6045 | VS1_0: vx0 = memref0 VS1_1 - | |
6046 | VS1_1: vx1 = memref1 VS1_2 - | |
6047 | VS1_2: vx2 = memref2 VS1_3 - | |
6048 | VS1_3: vx3 = memref3 - - | |
6049 | S1: x = load - VS1_0 | |
6050 | S2: z = x + 1 - - | |
6051 | ||
6052 | step2: vectorize stmt S2 (done here): | |
6053 | To vectorize stmt S2 we first need to find the relevant vector | |
6054 | def for the first operand 'x'. This is, as usual, obtained from | |
6055 | the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt | |
6056 | that defines 'x' (S1). This way we find the stmt VS1_0, and the | |
6057 | relevant vector def 'vx0'. Having found 'vx0' we can generate | |
6058 | the vector stmt VS2_0, and as usual, record it in the | |
6059 | STMT_VINFO_VEC_STMT of stmt S2. | |
6060 | When creating the second copy (VS2_1), we obtain the relevant vector | |
6061 | def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of | |
6062 | stmt VS1_0. This way we find the stmt VS1_1 and the relevant | |
6063 | vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a | |
6064 | pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0. | |
6065 | Similarly when creating stmts VS2_2 and VS2_3. This is the resulting | |
6066 | chain of stmts and pointers: | |
6067 | RELATED_STMT VEC_STMT | |
6068 | VS1_0: vx0 = memref0 VS1_1 - | |
6069 | VS1_1: vx1 = memref1 VS1_2 - | |
6070 | VS1_2: vx2 = memref2 VS1_3 - | |
6071 | VS1_3: vx3 = memref3 - - | |
6072 | S1: x = load - VS1_0 | |
6073 | VS2_0: vz0 = vx0 + v1 VS2_1 - | |
6074 | VS2_1: vz1 = vx1 + v1 VS2_2 - | |
6075 | VS2_2: vz2 = vx2 + v1 VS2_3 - | |
6076 | VS2_3: vz3 = vx3 + v1 - - | |
6077 | S2: z = x + 1 - VS2_0 */ | |
ebfd146a IR |
6078 | |
6079 | prev_stmt_info = NULL; | |
6080 | for (j = 0; j < ncopies; j++) | |
6081 | { | |
6082 | /* Handle uses. */ | |
6083 | if (j == 0) | |
4a00c761 | 6084 | { |
d6476f90 | 6085 | if (op_type == binary_op) |
86a91c0a | 6086 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, |
306b0c92 | 6087 | slp_node); |
d6476f90 RB |
6088 | else if (op_type == ternary_op) |
6089 | { | |
6090 | if (slp_node) | |
6091 | { | |
6092 | auto_vec<tree> ops(3); | |
6093 | ops.quick_push (op0); | |
6094 | ops.quick_push (op1); | |
6095 | ops.quick_push (op2); | |
6096 | auto_vec<vec<tree> > vec_defs(3); | |
6097 | vect_get_slp_defs (ops, slp_node, &vec_defs); | |
6098 | vec_oprnds0 = vec_defs[0]; | |
6099 | vec_oprnds1 = vec_defs[1]; | |
6100 | vec_oprnds2 = vec_defs[2]; | |
6101 | } | |
6102 | else | |
6103 | { | |
86a91c0a RS |
6104 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, |
6105 | &vec_oprnds1, NULL); | |
6106 | vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2, | |
6107 | NULL, NULL); | |
d6476f90 RB |
6108 | } |
6109 | } | |
4a00c761 | 6110 | else |
86a91c0a | 6111 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, |
306b0c92 | 6112 | slp_node); |
4a00c761 | 6113 | } |
ebfd146a | 6114 | else |
4a00c761 | 6115 | { |
e4057a39 | 6116 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1); |
4a00c761 JJ |
6117 | if (op_type == ternary_op) |
6118 | { | |
9771b263 | 6119 | tree vec_oprnd = vec_oprnds2.pop (); |
e4057a39 | 6120 | vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (vinfo, |
9771b263 | 6121 | vec_oprnd)); |
4a00c761 JJ |
6122 | } |
6123 | } | |
6124 | ||
6125 | /* Arguments are ready. Create the new vector stmt. */ | |
e1bd7296 | 6126 | stmt_vec_info new_stmt_info = NULL; |
9771b263 | 6127 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
ebfd146a | 6128 | { |
4a00c761 | 6129 | vop1 = ((op_type == binary_op || op_type == ternary_op) |
9771b263 | 6130 | ? vec_oprnds1[i] : NULL_TREE); |
4a00c761 | 6131 | vop2 = ((op_type == ternary_op) |
9771b263 | 6132 | ? vec_oprnds2[i] : NULL_TREE); |
e1bd7296 RS |
6133 | gassign *new_stmt = gimple_build_assign (vec_dest, code, |
6134 | vop0, vop1, vop2); | |
4a00c761 JJ |
6135 | new_temp = make_ssa_name (vec_dest, new_stmt); |
6136 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
6137 | new_stmt_info |
6138 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
0eb952ea JJ |
6139 | if (vec_cvt_dest) |
6140 | { | |
6141 | new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp); | |
e1bd7296 RS |
6142 | gassign *new_stmt |
6143 | = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR, | |
6144 | new_temp); | |
0eb952ea JJ |
6145 | new_temp = make_ssa_name (vec_cvt_dest, new_stmt); |
6146 | gimple_assign_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 6147 | new_stmt_info |
86a91c0a | 6148 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0eb952ea | 6149 | } |
4a00c761 | 6150 | if (slp_node) |
e1bd7296 | 6151 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
ebfd146a IR |
6152 | } |
6153 | ||
4a00c761 JJ |
6154 | if (slp_node) |
6155 | continue; | |
6156 | ||
6157 | if (j == 0) | |
e1bd7296 | 6158 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
4a00c761 | 6159 | else |
e1bd7296 RS |
6160 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
6161 | prev_stmt_info = new_stmt_info; | |
ebfd146a IR |
6162 | } |
6163 | ||
9771b263 DN |
6164 | vec_oprnds0.release (); |
6165 | vec_oprnds1.release (); | |
6166 | vec_oprnds2.release (); | |
ebfd146a | 6167 | |
ebfd146a IR |
6168 | return true; |
6169 | } | |
6170 | ||
89fa689a | 6171 | /* A helper function to ensure data reference DR_INFO's base alignment. */ |
c716e67f XDL |
6172 | |
6173 | static void | |
89fa689a | 6174 | ensure_base_align (dr_vec_info *dr_info) |
c716e67f | 6175 | { |
89fa689a | 6176 | if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED) |
c716e67f XDL |
6177 | return; |
6178 | ||
89fa689a | 6179 | if (dr_info->base_misaligned) |
c716e67f | 6180 | { |
89fa689a | 6181 | tree base_decl = dr_info->base_decl; |
c716e67f | 6182 | |
89fa689a RS |
6183 | unsigned int align_base_to |
6184 | = DR_TARGET_ALIGNMENT (dr_info) * BITS_PER_UNIT; | |
f702e7d4 | 6185 | |
428f0c67 | 6186 | if (decl_in_symtab_p (base_decl)) |
f702e7d4 | 6187 | symtab_node::get (base_decl)->increase_alignment (align_base_to); |
428f0c67 JH |
6188 | else |
6189 | { | |
f702e7d4 | 6190 | SET_DECL_ALIGN (base_decl, align_base_to); |
428f0c67 JH |
6191 | DECL_USER_ALIGN (base_decl) = 1; |
6192 | } | |
89fa689a | 6193 | dr_info->base_misaligned = false; |
c716e67f XDL |
6194 | } |
6195 | } | |
6196 | ||
ebfd146a | 6197 | |
44fc7854 BE |
6198 | /* Function get_group_alias_ptr_type. |
6199 | ||
32e8e429 | 6200 | Return the alias type for the group starting at FIRST_STMT_INFO. */ |
44fc7854 BE |
6201 | |
6202 | static tree | |
32e8e429 | 6203 | get_group_alias_ptr_type (stmt_vec_info first_stmt_info) |
44fc7854 BE |
6204 | { |
6205 | struct data_reference *first_dr, *next_dr; | |
44fc7854 | 6206 | |
91987857 RS |
6207 | first_dr = STMT_VINFO_DATA_REF (first_stmt_info); |
6208 | stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info); | |
bffb8014 | 6209 | while (next_stmt_info) |
44fc7854 | 6210 | { |
bffb8014 | 6211 | next_dr = STMT_VINFO_DATA_REF (next_stmt_info); |
44fc7854 BE |
6212 | if (get_alias_set (DR_REF (first_dr)) |
6213 | != get_alias_set (DR_REF (next_dr))) | |
6214 | { | |
6215 | if (dump_enabled_p ()) | |
6216 | dump_printf_loc (MSG_NOTE, vect_location, | |
6217 | "conflicting alias set types.\n"); | |
6218 | return ptr_type_node; | |
6219 | } | |
bffb8014 | 6220 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
44fc7854 BE |
6221 | } |
6222 | return reference_alias_ptr_type (DR_REF (first_dr)); | |
6223 | } | |
6224 | ||
6225 | ||
ebfd146a IR |
6226 | /* Function vectorizable_store. |
6227 | ||
32e8e429 RS |
6228 | Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure) |
6229 | that can be vectorized. | |
6230 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
6231 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
6232 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
6233 | |
6234 | static bool | |
32e8e429 | 6235 | vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
6236 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
6237 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 6238 | { |
ebfd146a IR |
6239 | tree data_ref; |
6240 | tree op; | |
6241 | tree vec_oprnd = NULL_TREE; | |
272c6793 | 6242 | tree elem_type; |
ebfd146a | 6243 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 6244 | struct loop *loop = NULL; |
ef4bddc2 | 6245 | machine_mode vec_mode; |
ebfd146a IR |
6246 | tree dummy; |
6247 | enum dr_alignment_support alignment_support_scheme; | |
929b4411 RS |
6248 | enum vect_def_type rhs_dt = vect_unknown_def_type; |
6249 | enum vect_def_type mask_dt = vect_unknown_def_type; | |
ebfd146a IR |
6250 | stmt_vec_info prev_stmt_info = NULL; |
6251 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 6252 | tree dataref_offset = NULL_TREE; |
355fe088 | 6253 | gimple *ptr_incr = NULL; |
ebfd146a IR |
6254 | int ncopies; |
6255 | int j; | |
bffb8014 | 6256 | stmt_vec_info first_stmt_info; |
2de001ee | 6257 | bool grouped_store; |
ebfd146a | 6258 | unsigned int group_size, i; |
6e1aa848 DN |
6259 | vec<tree> oprnds = vNULL; |
6260 | vec<tree> result_chain = vNULL; | |
ebfd146a | 6261 | bool inv_p; |
09dfa495 | 6262 | tree offset = NULL_TREE; |
6e1aa848 | 6263 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 6264 | bool slp = (slp_node != NULL); |
ebfd146a | 6265 | unsigned int vec_num; |
a70d6342 | 6266 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 6267 | vec_info *vinfo = stmt_info->vinfo; |
272c6793 | 6268 | tree aggr_type; |
134c85ca | 6269 | gather_scatter_info gs_info; |
d9f21f6a | 6270 | poly_uint64 vf; |
2de001ee | 6271 | vec_load_store_type vls_type; |
44fc7854 | 6272 | tree ref_type; |
a70d6342 | 6273 | |
a70d6342 | 6274 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
6275 | return false; |
6276 | ||
66c16fd9 RB |
6277 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
6278 | && ! vec_stmt) | |
ebfd146a IR |
6279 | return false; |
6280 | ||
6281 | /* Is vectorizable store? */ | |
6282 | ||
c3a8f964 | 6283 | tree mask = NULL_TREE, mask_vectype = NULL_TREE; |
86a91c0a | 6284 | if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt)) |
c3a8f964 | 6285 | { |
beb456c3 | 6286 | tree scalar_dest = gimple_assign_lhs (assign); |
c3a8f964 RS |
6287 | if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR |
6288 | && is_pattern_stmt_p (stmt_info)) | |
6289 | scalar_dest = TREE_OPERAND (scalar_dest, 0); | |
6290 | if (TREE_CODE (scalar_dest) != ARRAY_REF | |
6291 | && TREE_CODE (scalar_dest) != BIT_FIELD_REF | |
6292 | && TREE_CODE (scalar_dest) != INDIRECT_REF | |
6293 | && TREE_CODE (scalar_dest) != COMPONENT_REF | |
6294 | && TREE_CODE (scalar_dest) != IMAGPART_EXPR | |
6295 | && TREE_CODE (scalar_dest) != REALPART_EXPR | |
6296 | && TREE_CODE (scalar_dest) != MEM_REF) | |
6297 | return false; | |
6298 | } | |
6299 | else | |
6300 | { | |
86a91c0a | 6301 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); |
f307441a RS |
6302 | if (!call || !gimple_call_internal_p (call)) |
6303 | return false; | |
6304 | ||
6305 | internal_fn ifn = gimple_call_internal_fn (call); | |
6306 | if (!internal_store_fn_p (ifn)) | |
c3a8f964 | 6307 | return false; |
ebfd146a | 6308 | |
c3a8f964 RS |
6309 | if (slp_node != NULL) |
6310 | { | |
6311 | if (dump_enabled_p ()) | |
6312 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6313 | "SLP of masked stores not supported.\n"); | |
6314 | return false; | |
6315 | } | |
6316 | ||
f307441a RS |
6317 | int mask_index = internal_fn_mask_index (ifn); |
6318 | if (mask_index >= 0) | |
6319 | { | |
6320 | mask = gimple_call_arg (call, mask_index); | |
86a91c0a | 6321 | if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt, |
929b4411 | 6322 | &mask_vectype)) |
f307441a RS |
6323 | return false; |
6324 | } | |
c3a8f964 RS |
6325 | } |
6326 | ||
86a91c0a | 6327 | op = vect_get_store_rhs (stmt_info); |
ebfd146a | 6328 | |
fce57248 RS |
6329 | /* Cannot have hybrid store SLP -- that would mean storing to the |
6330 | same location twice. */ | |
6331 | gcc_assert (slp == PURE_SLP_STMT (stmt_info)); | |
6332 | ||
f4d09712 | 6333 | tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE; |
4d694b27 | 6334 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
465c8c19 JJ |
6335 | |
6336 | if (loop_vinfo) | |
b17dc4d4 RB |
6337 | { |
6338 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
6339 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
6340 | } | |
6341 | else | |
6342 | vf = 1; | |
465c8c19 JJ |
6343 | |
6344 | /* Multiple types in SLP are handled by creating the appropriate number of | |
6345 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
6346 | case of SLP. */ | |
fce57248 | 6347 | if (slp) |
465c8c19 JJ |
6348 | ncopies = 1; |
6349 | else | |
e8f142e2 | 6350 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
465c8c19 JJ |
6351 | |
6352 | gcc_assert (ncopies >= 1); | |
6353 | ||
6354 | /* FORNOW. This restriction should be relaxed. */ | |
86a91c0a | 6355 | if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1) |
465c8c19 JJ |
6356 | { |
6357 | if (dump_enabled_p ()) | |
6358 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6359 | "multiple types in nested loop.\n"); | |
6360 | return false; | |
6361 | } | |
6362 | ||
86a91c0a | 6363 | if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type)) |
f4d09712 KY |
6364 | return false; |
6365 | ||
272c6793 | 6366 | elem_type = TREE_TYPE (vectype); |
ebfd146a | 6367 | vec_mode = TYPE_MODE (vectype); |
7b7b1813 | 6368 | |
ebfd146a IR |
6369 | if (!STMT_VINFO_DATA_REF (stmt_info)) |
6370 | return false; | |
6371 | ||
2de001ee | 6372 | vect_memory_access_type memory_access_type; |
86a91c0a | 6373 | if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies, |
2de001ee RS |
6374 | &memory_access_type, &gs_info)) |
6375 | return false; | |
3bab6342 | 6376 | |
c3a8f964 RS |
6377 | if (mask) |
6378 | { | |
7e11fc7f RS |
6379 | if (memory_access_type == VMAT_CONTIGUOUS) |
6380 | { | |
6381 | if (!VECTOR_MODE_P (vec_mode) | |
6382 | || !can_vec_mask_load_store_p (vec_mode, | |
6383 | TYPE_MODE (mask_vectype), false)) | |
6384 | return false; | |
6385 | } | |
f307441a RS |
6386 | else if (memory_access_type != VMAT_LOAD_STORE_LANES |
6387 | && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl)) | |
c3a8f964 RS |
6388 | { |
6389 | if (dump_enabled_p ()) | |
6390 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6391 | "unsupported access type for masked store.\n"); | |
6392 | return false; | |
6393 | } | |
c3a8f964 RS |
6394 | } |
6395 | else | |
6396 | { | |
6397 | /* FORNOW. In some cases can vectorize even if data-type not supported | |
6398 | (e.g. - array initialization with 0). */ | |
6399 | if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing) | |
6400 | return false; | |
6401 | } | |
6402 | ||
89fa689a | 6403 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL; |
f307441a | 6404 | grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
b5ec4de7 RS |
6405 | && memory_access_type != VMAT_GATHER_SCATTER |
6406 | && (slp || memory_access_type != VMAT_CONTIGUOUS)); | |
7cfb4d93 RS |
6407 | if (grouped_store) |
6408 | { | |
bffb8014 | 6409 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
89fa689a | 6410 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
bffb8014 | 6411 | group_size = DR_GROUP_SIZE (first_stmt_info); |
7cfb4d93 RS |
6412 | } |
6413 | else | |
6414 | { | |
bffb8014 | 6415 | first_stmt_info = stmt_info; |
89fa689a | 6416 | first_dr_info = dr_info; |
7cfb4d93 RS |
6417 | group_size = vec_num = 1; |
6418 | } | |
6419 | ||
ebfd146a IR |
6420 | if (!vec_stmt) /* transformation not required. */ |
6421 | { | |
2de001ee | 6422 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; |
7cfb4d93 RS |
6423 | |
6424 | if (loop_vinfo | |
6425 | && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) | |
6426 | check_load_store_masking (loop_vinfo, vectype, vls_type, group_size, | |
bfaa08b7 | 6427 | memory_access_type, &gs_info); |
7cfb4d93 | 6428 | |
ebfd146a | 6429 | STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; |
68435eb2 RB |
6430 | vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type, |
6431 | vls_type, slp_node, cost_vec); | |
ebfd146a IR |
6432 | return true; |
6433 | } | |
2de001ee | 6434 | gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); |
ebfd146a | 6435 | |
67b8dbac | 6436 | /* Transform. */ |
ebfd146a | 6437 | |
89fa689a | 6438 | ensure_base_align (dr_info); |
c716e67f | 6439 | |
f307441a | 6440 | if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) |
3bab6342 | 6441 | { |
c3a8f964 | 6442 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src; |
134c85ca | 6443 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
6444 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
6445 | tree ptr, mask, var, scale, perm_mask = NULL_TREE; | |
6446 | edge pe = loop_preheader_edge (loop); | |
6447 | gimple_seq seq; | |
6448 | basic_block new_bb; | |
6449 | enum { NARROW, NONE, WIDEN } modifier; | |
4d694b27 RS |
6450 | poly_uint64 scatter_off_nunits |
6451 | = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); | |
3bab6342 | 6452 | |
4d694b27 | 6453 | if (known_eq (nunits, scatter_off_nunits)) |
3bab6342 | 6454 | modifier = NONE; |
4d694b27 | 6455 | else if (known_eq (nunits * 2, scatter_off_nunits)) |
3bab6342 | 6456 | { |
3bab6342 AT |
6457 | modifier = WIDEN; |
6458 | ||
4d694b27 RS |
6459 | /* Currently gathers and scatters are only supported for |
6460 | fixed-length vectors. */ | |
6461 | unsigned int count = scatter_off_nunits.to_constant (); | |
6462 | vec_perm_builder sel (count, count, 1); | |
6463 | for (i = 0; i < (unsigned int) count; ++i) | |
6464 | sel.quick_push (i | (count / 2)); | |
3bab6342 | 6465 | |
4d694b27 | 6466 | vec_perm_indices indices (sel, 1, count); |
e3342de4 RS |
6467 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, |
6468 | indices); | |
3bab6342 AT |
6469 | gcc_assert (perm_mask != NULL_TREE); |
6470 | } | |
4d694b27 | 6471 | else if (known_eq (nunits, scatter_off_nunits * 2)) |
3bab6342 | 6472 | { |
3bab6342 AT |
6473 | modifier = NARROW; |
6474 | ||
4d694b27 RS |
6475 | /* Currently gathers and scatters are only supported for |
6476 | fixed-length vectors. */ | |
6477 | unsigned int count = nunits.to_constant (); | |
6478 | vec_perm_builder sel (count, count, 1); | |
6479 | for (i = 0; i < (unsigned int) count; ++i) | |
6480 | sel.quick_push (i | (count / 2)); | |
3bab6342 | 6481 | |
4d694b27 | 6482 | vec_perm_indices indices (sel, 2, count); |
e3342de4 | 6483 | perm_mask = vect_gen_perm_mask_checked (vectype, indices); |
3bab6342 AT |
6484 | gcc_assert (perm_mask != NULL_TREE); |
6485 | ncopies *= 2; | |
6486 | } | |
6487 | else | |
6488 | gcc_unreachable (); | |
6489 | ||
134c85ca | 6490 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
6491 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
6492 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6493 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6494 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6495 | scaletype = TREE_VALUE (arglist); | |
6496 | ||
6497 | gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE | |
6498 | && TREE_CODE (rettype) == VOID_TYPE); | |
6499 | ||
134c85ca | 6500 | ptr = fold_convert (ptrtype, gs_info.base); |
3bab6342 AT |
6501 | if (!is_gimple_min_invariant (ptr)) |
6502 | { | |
6503 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
6504 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
6505 | gcc_assert (!new_bb); | |
6506 | } | |
6507 | ||
6508 | /* Currently we support only unconditional scatter stores, | |
6509 | so mask should be all ones. */ | |
6510 | mask = build_int_cst (masktype, -1); | |
86a91c0a | 6511 | mask = vect_init_vector (stmt_info, mask, masktype, NULL); |
3bab6342 | 6512 | |
134c85ca | 6513 | scale = build_int_cst (scaletype, gs_info.scale); |
3bab6342 AT |
6514 | |
6515 | prev_stmt_info = NULL; | |
6516 | for (j = 0; j < ncopies; ++j) | |
6517 | { | |
6518 | if (j == 0) | |
6519 | { | |
6520 | src = vec_oprnd1 | |
86a91c0a | 6521 | = vect_get_vec_def_for_operand (op, stmt_info); |
3bab6342 | 6522 | op = vec_oprnd0 |
86a91c0a | 6523 | = vect_get_vec_def_for_operand (gs_info.offset, stmt_info); |
3bab6342 AT |
6524 | } |
6525 | else if (modifier != NONE && (j & 1)) | |
6526 | { | |
6527 | if (modifier == WIDEN) | |
6528 | { | |
6529 | src = vec_oprnd1 | |
e4057a39 | 6530 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); |
3bab6342 | 6531 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask, |
86a91c0a | 6532 | stmt_info, gsi); |
3bab6342 AT |
6533 | } |
6534 | else if (modifier == NARROW) | |
6535 | { | |
6536 | src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask, | |
86a91c0a | 6537 | stmt_info, gsi); |
3bab6342 | 6538 | op = vec_oprnd0 |
e4057a39 | 6539 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
3bab6342 AT |
6540 | } |
6541 | else | |
6542 | gcc_unreachable (); | |
6543 | } | |
6544 | else | |
6545 | { | |
6546 | src = vec_oprnd1 | |
e4057a39 | 6547 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); |
3bab6342 | 6548 | op = vec_oprnd0 |
e4057a39 | 6549 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
3bab6342 AT |
6550 | } |
6551 | ||
6552 | if (!useless_type_conversion_p (srctype, TREE_TYPE (src))) | |
6553 | { | |
928686b1 RS |
6554 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)), |
6555 | TYPE_VECTOR_SUBPARTS (srctype))); | |
0e22bb5a | 6556 | var = vect_get_new_ssa_name (srctype, vect_simple_var); |
3bab6342 | 6557 | src = build1 (VIEW_CONVERT_EXPR, srctype, src); |
e1bd7296 RS |
6558 | gassign *new_stmt |
6559 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, src); | |
86a91c0a | 6560 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3bab6342 AT |
6561 | src = var; |
6562 | } | |
6563 | ||
6564 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
6565 | { | |
928686b1 RS |
6566 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)), |
6567 | TYPE_VECTOR_SUBPARTS (idxtype))); | |
0e22bb5a | 6568 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
3bab6342 | 6569 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
e1bd7296 RS |
6570 | gassign *new_stmt |
6571 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); | |
86a91c0a | 6572 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3bab6342 AT |
6573 | op = var; |
6574 | } | |
6575 | ||
e1bd7296 | 6576 | gcall *new_stmt |
134c85ca | 6577 | = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale); |
e1bd7296 | 6578 | stmt_vec_info new_stmt_info |
86a91c0a | 6579 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3bab6342 | 6580 | |
ddf98a96 | 6581 | if (prev_stmt_info == NULL) |
e1bd7296 | 6582 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
3bab6342 | 6583 | else |
e1bd7296 RS |
6584 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
6585 | prev_stmt_info = new_stmt_info; | |
3bab6342 AT |
6586 | } |
6587 | return true; | |
6588 | } | |
6589 | ||
f307441a | 6590 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
bffb8014 | 6591 | DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++; |
ebfd146a | 6592 | |
f307441a RS |
6593 | if (grouped_store) |
6594 | { | |
ebfd146a | 6595 | /* FORNOW */ |
86a91c0a | 6596 | gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info)); |
ebfd146a IR |
6597 | |
6598 | /* We vectorize all the stmts of the interleaving group when we | |
6599 | reach the last stmt in the group. */ | |
bffb8014 RS |
6600 | if (DR_GROUP_STORE_COUNT (first_stmt_info) |
6601 | < DR_GROUP_SIZE (first_stmt_info) | |
ebfd146a IR |
6602 | && !slp) |
6603 | { | |
6604 | *vec_stmt = NULL; | |
6605 | return true; | |
6606 | } | |
6607 | ||
6608 | if (slp) | |
4b5caab7 | 6609 | { |
0d0293ac | 6610 | grouped_store = false; |
4b5caab7 IR |
6611 | /* VEC_NUM is the number of vect stmts to be created for this |
6612 | group. */ | |
6613 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
bffb8014 RS |
6614 | first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
6615 | gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info) | |
6616 | == first_stmt_info); | |
89fa689a | 6617 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
bffb8014 | 6618 | op = vect_get_store_rhs (first_stmt_info); |
4b5caab7 | 6619 | } |
ebfd146a | 6620 | else |
4b5caab7 IR |
6621 | /* VEC_NUM is the number of vect stmts to be created for this |
6622 | group. */ | |
ebfd146a | 6623 | vec_num = group_size; |
44fc7854 | 6624 | |
bffb8014 | 6625 | ref_type = get_group_alias_ptr_type (first_stmt_info); |
ebfd146a | 6626 | } |
b8698a0f | 6627 | else |
89fa689a | 6628 | ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr)); |
b8698a0f | 6629 | |
73fbfcad | 6630 | if (dump_enabled_p ()) |
78c60e3d | 6631 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 6632 | "transform store. ncopies = %d\n", ncopies); |
ebfd146a | 6633 | |
2de001ee RS |
6634 | if (memory_access_type == VMAT_ELEMENTWISE |
6635 | || memory_access_type == VMAT_STRIDED_SLP) | |
f2e2a985 MM |
6636 | { |
6637 | gimple_stmt_iterator incr_gsi; | |
6638 | bool insert_after; | |
355fe088 | 6639 | gimple *incr; |
f2e2a985 MM |
6640 | tree offvar; |
6641 | tree ivstep; | |
6642 | tree running_off; | |
f2e2a985 MM |
6643 | tree stride_base, stride_step, alias_off; |
6644 | tree vec_oprnd; | |
f502d50e | 6645 | unsigned int g; |
4d694b27 RS |
6646 | /* Checked by get_load_store_type. */ |
6647 | unsigned int const_nunits = nunits.to_constant (); | |
f2e2a985 | 6648 | |
7cfb4d93 | 6649 | gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); |
86a91c0a | 6650 | gcc_assert (!nested_in_vect_loop_p (loop, stmt_info)); |
f2e2a985 MM |
6651 | |
6652 | stride_base | |
6653 | = fold_build_pointer_plus | |
89fa689a | 6654 | (DR_BASE_ADDRESS (first_dr_info->dr), |
f2e2a985 | 6655 | size_binop (PLUS_EXPR, |
89fa689a RS |
6656 | convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)), |
6657 | convert_to_ptrofftype (DR_INIT (first_dr_info->dr)))); | |
6658 | stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr)); | |
f2e2a985 MM |
6659 | |
6660 | /* For a store with loop-invariant (but other than power-of-2) | |
6661 | stride (i.e. not a grouped access) like so: | |
6662 | ||
6663 | for (i = 0; i < n; i += stride) | |
6664 | array[i] = ...; | |
6665 | ||
6666 | we generate a new induction variable and new stores from | |
6667 | the components of the (vectorized) rhs: | |
6668 | ||
6669 | for (j = 0; ; j += VF*stride) | |
6670 | vectemp = ...; | |
6671 | tmp1 = vectemp[0]; | |
6672 | array[j] = tmp1; | |
6673 | tmp2 = vectemp[1]; | |
6674 | array[j + stride] = tmp2; | |
6675 | ... | |
6676 | */ | |
6677 | ||
4d694b27 | 6678 | unsigned nstores = const_nunits; |
b17dc4d4 | 6679 | unsigned lnel = 1; |
cee62fee | 6680 | tree ltype = elem_type; |
04199738 | 6681 | tree lvectype = vectype; |
cee62fee MM |
6682 | if (slp) |
6683 | { | |
4d694b27 RS |
6684 | if (group_size < const_nunits |
6685 | && const_nunits % group_size == 0) | |
b17dc4d4 | 6686 | { |
4d694b27 | 6687 | nstores = const_nunits / group_size; |
b17dc4d4 RB |
6688 | lnel = group_size; |
6689 | ltype = build_vector_type (elem_type, group_size); | |
04199738 RB |
6690 | lvectype = vectype; |
6691 | ||
6692 | /* First check if vec_extract optab doesn't support extraction | |
6693 | of vector elts directly. */ | |
b397965c | 6694 | scalar_mode elmode = SCALAR_TYPE_MODE (elem_type); |
9da15d40 RS |
6695 | machine_mode vmode; |
6696 | if (!mode_for_vector (elmode, group_size).exists (&vmode) | |
6697 | || !VECTOR_MODE_P (vmode) | |
414fef4e | 6698 | || !targetm.vector_mode_supported_p (vmode) |
04199738 RB |
6699 | || (convert_optab_handler (vec_extract_optab, |
6700 | TYPE_MODE (vectype), vmode) | |
6701 | == CODE_FOR_nothing)) | |
6702 | { | |
6703 | /* Try to avoid emitting an extract of vector elements | |
6704 | by performing the extracts using an integer type of the | |
6705 | same size, extracting from a vector of those and then | |
6706 | re-interpreting it as the original vector type if | |
6707 | supported. */ | |
6708 | unsigned lsize | |
6709 | = group_size * GET_MODE_BITSIZE (elmode); | |
fffbab82 | 6710 | elmode = int_mode_for_size (lsize, 0).require (); |
4d694b27 | 6711 | unsigned int lnunits = const_nunits / group_size; |
04199738 RB |
6712 | /* If we can't construct such a vector fall back to |
6713 | element extracts from the original vector type and | |
6714 | element size stores. */ | |
4d694b27 | 6715 | if (mode_for_vector (elmode, lnunits).exists (&vmode) |
9da15d40 | 6716 | && VECTOR_MODE_P (vmode) |
414fef4e | 6717 | && targetm.vector_mode_supported_p (vmode) |
04199738 RB |
6718 | && (convert_optab_handler (vec_extract_optab, |
6719 | vmode, elmode) | |
6720 | != CODE_FOR_nothing)) | |
6721 | { | |
4d694b27 | 6722 | nstores = lnunits; |
04199738 RB |
6723 | lnel = group_size; |
6724 | ltype = build_nonstandard_integer_type (lsize, 1); | |
6725 | lvectype = build_vector_type (ltype, nstores); | |
6726 | } | |
6727 | /* Else fall back to vector extraction anyway. | |
6728 | Fewer stores are more important than avoiding spilling | |
6729 | of the vector we extract from. Compared to the | |
6730 | construction case in vectorizable_load no store-forwarding | |
6731 | issue exists here for reasonable archs. */ | |
6732 | } | |
b17dc4d4 | 6733 | } |
4d694b27 RS |
6734 | else if (group_size >= const_nunits |
6735 | && group_size % const_nunits == 0) | |
b17dc4d4 RB |
6736 | { |
6737 | nstores = 1; | |
4d694b27 | 6738 | lnel = const_nunits; |
b17dc4d4 | 6739 | ltype = vectype; |
04199738 | 6740 | lvectype = vectype; |
b17dc4d4 | 6741 | } |
cee62fee MM |
6742 | ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type)); |
6743 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
6744 | } | |
6745 | ||
f2e2a985 MM |
6746 | ivstep = stride_step; |
6747 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep, | |
b17dc4d4 | 6748 | build_int_cst (TREE_TYPE (ivstep), vf)); |
f2e2a985 MM |
6749 | |
6750 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
6751 | ||
b210f45f RB |
6752 | stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base); |
6753 | ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep); | |
f2e2a985 MM |
6754 | create_iv (stride_base, ivstep, NULL, |
6755 | loop, &incr_gsi, insert_after, | |
6756 | &offvar, NULL); | |
6757 | incr = gsi_stmt (incr_gsi); | |
4fbeb363 | 6758 | loop_vinfo->add_stmt (incr); |
f2e2a985 | 6759 | |
b210f45f | 6760 | stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step); |
f2e2a985 MM |
6761 | |
6762 | prev_stmt_info = NULL; | |
44fc7854 | 6763 | alias_off = build_int_cst (ref_type, 0); |
bffb8014 | 6764 | stmt_vec_info next_stmt_info = first_stmt_info; |
f502d50e | 6765 | for (g = 0; g < group_size; g++) |
f2e2a985 | 6766 | { |
f502d50e MM |
6767 | running_off = offvar; |
6768 | if (g) | |
f2e2a985 | 6769 | { |
f502d50e MM |
6770 | tree size = TYPE_SIZE_UNIT (ltype); |
6771 | tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g), | |
f2e2a985 | 6772 | size); |
f502d50e | 6773 | tree newoff = copy_ssa_name (running_off, NULL); |
f2e2a985 | 6774 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, |
f502d50e | 6775 | running_off, pos); |
86a91c0a | 6776 | vect_finish_stmt_generation (stmt_info, incr, gsi); |
f2e2a985 | 6777 | running_off = newoff; |
f502d50e | 6778 | } |
b17dc4d4 RB |
6779 | unsigned int group_el = 0; |
6780 | unsigned HOST_WIDE_INT | |
6781 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
f502d50e MM |
6782 | for (j = 0; j < ncopies; j++) |
6783 | { | |
c3a8f964 | 6784 | /* We've set op and dt above, from vect_get_store_rhs, |
bffb8014 | 6785 | and first_stmt_info == stmt_info. */ |
f502d50e MM |
6786 | if (j == 0) |
6787 | { | |
6788 | if (slp) | |
6789 | { | |
86a91c0a RS |
6790 | vect_get_vec_defs (op, NULL_TREE, stmt_info, |
6791 | &vec_oprnds, NULL, slp_node); | |
f502d50e MM |
6792 | vec_oprnd = vec_oprnds[0]; |
6793 | } | |
6794 | else | |
6795 | { | |
bffb8014 RS |
6796 | op = vect_get_store_rhs (next_stmt_info); |
6797 | vec_oprnd = vect_get_vec_def_for_operand | |
6798 | (op, next_stmt_info); | |
f502d50e MM |
6799 | } |
6800 | } | |
f2e2a985 | 6801 | else |
f502d50e MM |
6802 | { |
6803 | if (slp) | |
6804 | vec_oprnd = vec_oprnds[j]; | |
6805 | else | |
e4057a39 RS |
6806 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, |
6807 | vec_oprnd); | |
f502d50e | 6808 | } |
04199738 RB |
6809 | /* Pun the vector to extract from if necessary. */ |
6810 | if (lvectype != vectype) | |
6811 | { | |
6812 | tree tem = make_ssa_name (lvectype); | |
6813 | gimple *pun | |
6814 | = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
6815 | lvectype, vec_oprnd)); | |
86a91c0a | 6816 | vect_finish_stmt_generation (stmt_info, pun, gsi); |
04199738 RB |
6817 | vec_oprnd = tem; |
6818 | } | |
f502d50e MM |
6819 | for (i = 0; i < nstores; i++) |
6820 | { | |
6821 | tree newref, newoff; | |
355fe088 | 6822 | gimple *incr, *assign; |
f502d50e MM |
6823 | tree size = TYPE_SIZE (ltype); |
6824 | /* Extract the i'th component. */ | |
6825 | tree pos = fold_build2 (MULT_EXPR, bitsizetype, | |
6826 | bitsize_int (i), size); | |
6827 | tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd, | |
6828 | size, pos); | |
6829 | ||
6830 | elem = force_gimple_operand_gsi (gsi, elem, true, | |
6831 | NULL_TREE, true, | |
6832 | GSI_SAME_STMT); | |
6833 | ||
b17dc4d4 RB |
6834 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
6835 | group_el * elsz); | |
f502d50e | 6836 | newref = build2 (MEM_REF, ltype, |
b17dc4d4 | 6837 | running_off, this_off); |
89fa689a | 6838 | vect_copy_ref_info (newref, DR_REF (first_dr_info->dr)); |
f502d50e MM |
6839 | |
6840 | /* And store it to *running_off. */ | |
6841 | assign = gimple_build_assign (newref, elem); | |
e1bd7296 | 6842 | stmt_vec_info assign_info |
86a91c0a | 6843 | = vect_finish_stmt_generation (stmt_info, assign, gsi); |
f502d50e | 6844 | |
b17dc4d4 RB |
6845 | group_el += lnel; |
6846 | if (! slp | |
6847 | || group_el == group_size) | |
6848 | { | |
6849 | newoff = copy_ssa_name (running_off, NULL); | |
6850 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
6851 | running_off, stride_step); | |
86a91c0a | 6852 | vect_finish_stmt_generation (stmt_info, incr, gsi); |
f502d50e | 6853 | |
b17dc4d4 RB |
6854 | running_off = newoff; |
6855 | group_el = 0; | |
6856 | } | |
225ce44b RB |
6857 | if (g == group_size - 1 |
6858 | && !slp) | |
f502d50e MM |
6859 | { |
6860 | if (j == 0 && i == 0) | |
225ce44b | 6861 | STMT_VINFO_VEC_STMT (stmt_info) |
e1bd7296 | 6862 | = *vec_stmt = assign_info; |
f502d50e | 6863 | else |
e1bd7296 RS |
6864 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign_info; |
6865 | prev_stmt_info = assign_info; | |
f502d50e MM |
6866 | } |
6867 | } | |
f2e2a985 | 6868 | } |
bffb8014 | 6869 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
b17dc4d4 RB |
6870 | if (slp) |
6871 | break; | |
f2e2a985 | 6872 | } |
778dd3b6 RB |
6873 | |
6874 | vec_oprnds.release (); | |
f2e2a985 MM |
6875 | return true; |
6876 | } | |
6877 | ||
8c681247 | 6878 | auto_vec<tree> dr_chain (group_size); |
9771b263 | 6879 | oprnds.create (group_size); |
ebfd146a | 6880 | |
89fa689a RS |
6881 | alignment_support_scheme |
6882 | = vect_supportable_dr_alignment (first_dr_info, false); | |
ebfd146a | 6883 | gcc_assert (alignment_support_scheme); |
70088b95 RS |
6884 | vec_loop_masks *loop_masks |
6885 | = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) | |
6886 | ? &LOOP_VINFO_MASKS (loop_vinfo) | |
6887 | : NULL); | |
272c6793 | 6888 | /* Targets with store-lane instructions must not require explicit |
c3a8f964 RS |
6889 | realignment. vect_supportable_dr_alignment always returns either |
6890 | dr_aligned or dr_unaligned_supported for masked operations. */ | |
7cfb4d93 RS |
6891 | gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES |
6892 | && !mask | |
70088b95 | 6893 | && !loop_masks) |
272c6793 RS |
6894 | || alignment_support_scheme == dr_aligned |
6895 | || alignment_support_scheme == dr_unaligned_supported); | |
6896 | ||
62da9e14 RS |
6897 | if (memory_access_type == VMAT_CONTIGUOUS_DOWN |
6898 | || memory_access_type == VMAT_CONTIGUOUS_REVERSE) | |
09dfa495 BM |
6899 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
6900 | ||
f307441a RS |
6901 | tree bump; |
6902 | tree vec_offset = NULL_TREE; | |
6903 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
6904 | { | |
6905 | aggr_type = NULL_TREE; | |
6906 | bump = NULL_TREE; | |
6907 | } | |
6908 | else if (memory_access_type == VMAT_GATHER_SCATTER) | |
6909 | { | |
6910 | aggr_type = elem_type; | |
86a91c0a | 6911 | vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info, |
f307441a RS |
6912 | &bump, &vec_offset); |
6913 | } | |
272c6793 | 6914 | else |
f307441a RS |
6915 | { |
6916 | if (memory_access_type == VMAT_LOAD_STORE_LANES) | |
6917 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); | |
6918 | else | |
6919 | aggr_type = vectype; | |
89fa689a RS |
6920 | bump = vect_get_data_ptr_increment (dr_info, aggr_type, |
6921 | memory_access_type); | |
f307441a | 6922 | } |
ebfd146a | 6923 | |
c3a8f964 RS |
6924 | if (mask) |
6925 | LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true; | |
6926 | ||
ebfd146a IR |
6927 | /* In case the vectorization factor (VF) is bigger than the number |
6928 | of elements that we can fit in a vectype (nunits), we have to generate | |
6929 | more than one vector stmt - i.e - we need to "unroll" the | |
b8698a0f | 6930 | vector stmt by a factor VF/nunits. For more details see documentation in |
ebfd146a IR |
6931 | vect_get_vec_def_for_copy_stmt. */ |
6932 | ||
0d0293ac | 6933 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
6934 | |
6935 | S1: &base + 2 = x2 | |
6936 | S2: &base = x0 | |
6937 | S3: &base + 1 = x1 | |
6938 | S4: &base + 3 = x3 | |
6939 | ||
6940 | We create vectorized stores starting from base address (the access of the | |
6941 | first stmt in the chain (S2 in the above example), when the last store stmt | |
6942 | of the chain (S4) is reached: | |
6943 | ||
6944 | VS1: &base = vx2 | |
6945 | VS2: &base + vec_size*1 = vx0 | |
6946 | VS3: &base + vec_size*2 = vx1 | |
6947 | VS4: &base + vec_size*3 = vx3 | |
6948 | ||
6949 | Then permutation statements are generated: | |
6950 | ||
3fcc1b55 JJ |
6951 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} > |
6952 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} > | |
ebfd146a | 6953 | ... |
b8698a0f | 6954 | |
ebfd146a IR |
6955 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts |
6956 | (the order of the data-refs in the output of vect_permute_store_chain | |
6957 | corresponds to the order of scalar stmts in the interleaving chain - see | |
6958 | the documentation of vect_permute_store_chain()). | |
6959 | ||
6960 | In case of both multiple types and interleaving, above vector stores and | |
ff802fa1 | 6961 | permutation stmts are created for every copy. The result vector stmts are |
ebfd146a | 6962 | put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding |
b8698a0f | 6963 | STMT_VINFO_RELATED_STMT for the next copies. |
ebfd146a IR |
6964 | */ |
6965 | ||
6966 | prev_stmt_info = NULL; | |
c3a8f964 | 6967 | tree vec_mask = NULL_TREE; |
ebfd146a IR |
6968 | for (j = 0; j < ncopies; j++) |
6969 | { | |
e1bd7296 | 6970 | stmt_vec_info new_stmt_info; |
ebfd146a IR |
6971 | if (j == 0) |
6972 | { | |
6973 | if (slp) | |
6974 | { | |
6975 | /* Get vectorized arguments for SLP_NODE. */ | |
86a91c0a RS |
6976 | vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds, |
6977 | NULL, slp_node); | |
ebfd146a | 6978 | |
9771b263 | 6979 | vec_oprnd = vec_oprnds[0]; |
ebfd146a IR |
6980 | } |
6981 | else | |
6982 | { | |
b8698a0f L |
6983 | /* For interleaved stores we collect vectorized defs for all the |
6984 | stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then | |
6985 | used as an input to vect_permute_store_chain(), and OPRNDS as | |
ebfd146a IR |
6986 | an input to vect_get_vec_def_for_stmt_copy() for the next copy. |
6987 | ||
2c53b149 | 6988 | If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a | 6989 | OPRNDS are of size 1. */ |
bffb8014 | 6990 | stmt_vec_info next_stmt_info = first_stmt_info; |
ebfd146a IR |
6991 | for (i = 0; i < group_size; i++) |
6992 | { | |
b8698a0f | 6993 | /* Since gaps are not supported for interleaved stores, |
2c53b149 | 6994 | DR_GROUP_SIZE is the exact number of stmts in the chain. |
bffb8014 RS |
6995 | Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case |
6996 | that there is no interleaving, DR_GROUP_SIZE is 1, | |
6997 | and only one iteration of the loop will be executed. */ | |
6998 | op = vect_get_store_rhs (next_stmt_info); | |
6999 | vec_oprnd = vect_get_vec_def_for_operand | |
7000 | (op, next_stmt_info); | |
9771b263 DN |
7001 | dr_chain.quick_push (vec_oprnd); |
7002 | oprnds.quick_push (vec_oprnd); | |
bffb8014 | 7003 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
ebfd146a | 7004 | } |
c3a8f964 | 7005 | if (mask) |
86a91c0a | 7006 | vec_mask = vect_get_vec_def_for_operand (mask, stmt_info, |
c3a8f964 | 7007 | mask_vectype); |
ebfd146a IR |
7008 | } |
7009 | ||
7010 | /* We should have catched mismatched types earlier. */ | |
7011 | gcc_assert (useless_type_conversion_p (vectype, | |
7012 | TREE_TYPE (vec_oprnd))); | |
74bf76ed JJ |
7013 | bool simd_lane_access_p |
7014 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
7015 | if (simd_lane_access_p | |
89fa689a RS |
7016 | && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR |
7017 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) | |
7018 | && integer_zerop (DR_OFFSET (first_dr_info->dr)) | |
7019 | && integer_zerop (DR_INIT (first_dr_info->dr)) | |
74bf76ed | 7020 | && alias_sets_conflict_p (get_alias_set (aggr_type), |
44fc7854 | 7021 | get_alias_set (TREE_TYPE (ref_type)))) |
74bf76ed | 7022 | { |
89fa689a | 7023 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); |
44fc7854 | 7024 | dataref_offset = build_int_cst (ref_type, 0); |
8928eff3 | 7025 | inv_p = false; |
74bf76ed | 7026 | } |
f307441a RS |
7027 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
7028 | { | |
86a91c0a | 7029 | vect_get_gather_scatter_ops (loop, stmt_info, &gs_info, |
f307441a RS |
7030 | &dataref_ptr, &vec_offset); |
7031 | inv_p = false; | |
7032 | } | |
74bf76ed JJ |
7033 | else |
7034 | dataref_ptr | |
bffb8014 | 7035 | = vect_create_data_ref_ptr (first_stmt_info, aggr_type, |
74bf76ed | 7036 | simd_lane_access_p ? loop : NULL, |
09dfa495 | 7037 | offset, &dummy, gsi, &ptr_incr, |
f307441a RS |
7038 | simd_lane_access_p, &inv_p, |
7039 | NULL_TREE, bump); | |
a70d6342 | 7040 | gcc_assert (bb_vinfo || !inv_p); |
ebfd146a | 7041 | } |
b8698a0f | 7042 | else |
ebfd146a | 7043 | { |
b8698a0f L |
7044 | /* For interleaved stores we created vectorized defs for all the |
7045 | defs stored in OPRNDS in the previous iteration (previous copy). | |
7046 | DR_CHAIN is then used as an input to vect_permute_store_chain(), | |
ebfd146a IR |
7047 | and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the |
7048 | next copy. | |
2c53b149 | 7049 | If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a IR |
7050 | OPRNDS are of size 1. */ |
7051 | for (i = 0; i < group_size; i++) | |
7052 | { | |
9771b263 | 7053 | op = oprnds[i]; |
e4057a39 | 7054 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, op); |
9771b263 DN |
7055 | dr_chain[i] = vec_oprnd; |
7056 | oprnds[i] = vec_oprnd; | |
ebfd146a | 7057 | } |
c3a8f964 | 7058 | if (mask) |
e4057a39 | 7059 | vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask); |
74bf76ed JJ |
7060 | if (dataref_offset) |
7061 | dataref_offset | |
f307441a RS |
7062 | = int_const_binop (PLUS_EXPR, dataref_offset, bump); |
7063 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
e4057a39 | 7064 | vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset); |
74bf76ed | 7065 | else |
86a91c0a RS |
7066 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
7067 | stmt_info, bump); | |
ebfd146a IR |
7068 | } |
7069 | ||
2de001ee | 7070 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 7071 | { |
272c6793 | 7072 | tree vec_array; |
267d3070 | 7073 | |
3ba4ff41 | 7074 | /* Get an array into which we can store the individual vectors. */ |
272c6793 | 7075 | vec_array = create_vector_array (vectype, vec_num); |
3ba4ff41 RS |
7076 | |
7077 | /* Invalidate the current contents of VEC_ARRAY. This should | |
7078 | become an RTL clobber too, which prevents the vector registers | |
7079 | from being upward-exposed. */ | |
86a91c0a | 7080 | vect_clobber_variable (stmt_info, gsi, vec_array); |
3ba4ff41 RS |
7081 | |
7082 | /* Store the individual vectors into the array. */ | |
272c6793 | 7083 | for (i = 0; i < vec_num; i++) |
c2d7ab2a | 7084 | { |
9771b263 | 7085 | vec_oprnd = dr_chain[i]; |
86a91c0a | 7086 | write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i); |
267d3070 | 7087 | } |
b8698a0f | 7088 | |
7cfb4d93 | 7089 | tree final_mask = NULL; |
70088b95 RS |
7090 | if (loop_masks) |
7091 | final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies, | |
7092 | vectype, j); | |
7cfb4d93 RS |
7093 | if (vec_mask) |
7094 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
7095 | vec_mask, gsi); | |
7096 | ||
7e11fc7f | 7097 | gcall *call; |
7cfb4d93 | 7098 | if (final_mask) |
7e11fc7f RS |
7099 | { |
7100 | /* Emit: | |
7101 | MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK, | |
7102 | VEC_ARRAY). */ | |
7103 | unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype)); | |
7104 | tree alias_ptr = build_int_cst (ref_type, align); | |
7105 | call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4, | |
7106 | dataref_ptr, alias_ptr, | |
7cfb4d93 | 7107 | final_mask, vec_array); |
7e11fc7f RS |
7108 | } |
7109 | else | |
7110 | { | |
7111 | /* Emit: | |
7112 | MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */ | |
7113 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); | |
7114 | call = gimple_build_call_internal (IFN_STORE_LANES, 1, | |
7115 | vec_array); | |
7116 | gimple_call_set_lhs (call, data_ref); | |
7117 | } | |
a844293d | 7118 | gimple_call_set_nothrow (call, true); |
86a91c0a | 7119 | new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi); |
3ba4ff41 RS |
7120 | |
7121 | /* Record that VEC_ARRAY is now dead. */ | |
86a91c0a | 7122 | vect_clobber_variable (stmt_info, gsi, vec_array); |
272c6793 RS |
7123 | } |
7124 | else | |
7125 | { | |
e1bd7296 | 7126 | new_stmt_info = NULL; |
0d0293ac | 7127 | if (grouped_store) |
272c6793 | 7128 | { |
b6b9227d JJ |
7129 | if (j == 0) |
7130 | result_chain.create (group_size); | |
272c6793 | 7131 | /* Permute. */ |
86a91c0a | 7132 | vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi, |
272c6793 RS |
7133 | &result_chain); |
7134 | } | |
c2d7ab2a | 7135 | |
bffb8014 | 7136 | stmt_vec_info next_stmt_info = first_stmt_info; |
272c6793 RS |
7137 | for (i = 0; i < vec_num; i++) |
7138 | { | |
644ffefd | 7139 | unsigned align, misalign; |
272c6793 | 7140 | |
7cfb4d93 | 7141 | tree final_mask = NULL_TREE; |
70088b95 RS |
7142 | if (loop_masks) |
7143 | final_mask = vect_get_loop_mask (gsi, loop_masks, | |
7144 | vec_num * ncopies, | |
7cfb4d93 RS |
7145 | vectype, vec_num * j + i); |
7146 | if (vec_mask) | |
7147 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
7148 | vec_mask, gsi); | |
7149 | ||
f307441a RS |
7150 | if (memory_access_type == VMAT_GATHER_SCATTER) |
7151 | { | |
7152 | tree scale = size_int (gs_info.scale); | |
7153 | gcall *call; | |
70088b95 | 7154 | if (loop_masks) |
f307441a RS |
7155 | call = gimple_build_call_internal |
7156 | (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset, | |
7157 | scale, vec_oprnd, final_mask); | |
7158 | else | |
7159 | call = gimple_build_call_internal | |
7160 | (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset, | |
7161 | scale, vec_oprnd); | |
7162 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 7163 | new_stmt_info |
86a91c0a | 7164 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
f307441a RS |
7165 | break; |
7166 | } | |
7167 | ||
272c6793 RS |
7168 | if (i > 0) |
7169 | /* Bump the vector pointer. */ | |
7170 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
86a91c0a | 7171 | stmt_info, bump); |
272c6793 RS |
7172 | |
7173 | if (slp) | |
9771b263 | 7174 | vec_oprnd = vec_oprnds[i]; |
0d0293ac MM |
7175 | else if (grouped_store) |
7176 | /* For grouped stores vectorized defs are interleaved in | |
272c6793 | 7177 | vect_permute_store_chain(). */ |
9771b263 | 7178 | vec_oprnd = result_chain[i]; |
272c6793 | 7179 | |
89fa689a RS |
7180 | align = DR_TARGET_ALIGNMENT (first_dr_info); |
7181 | if (aligned_access_p (first_dr_info)) | |
644ffefd | 7182 | misalign = 0; |
89fa689a | 7183 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
272c6793 | 7184 | { |
89fa689a | 7185 | align = dr_alignment (vect_dr_behavior (first_dr_info)); |
52639a61 | 7186 | misalign = 0; |
272c6793 RS |
7187 | } |
7188 | else | |
89fa689a | 7189 | misalign = DR_MISALIGNMENT (first_dr_info); |
aed93b23 RB |
7190 | if (dataref_offset == NULL_TREE |
7191 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
7192 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, |
7193 | misalign); | |
c2d7ab2a | 7194 | |
62da9e14 | 7195 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
09dfa495 BM |
7196 | { |
7197 | tree perm_mask = perm_mask_for_reverse (vectype); | |
86a91c0a RS |
7198 | tree perm_dest = vect_create_destination_var |
7199 | (vect_get_store_rhs (stmt_info), vectype); | |
b731b390 | 7200 | tree new_temp = make_ssa_name (perm_dest); |
09dfa495 BM |
7201 | |
7202 | /* Generate the permute statement. */ | |
355fe088 | 7203 | gimple *perm_stmt |
0d0e4a03 JJ |
7204 | = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, |
7205 | vec_oprnd, perm_mask); | |
86a91c0a | 7206 | vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); |
09dfa495 BM |
7207 | |
7208 | perm_stmt = SSA_NAME_DEF_STMT (new_temp); | |
7209 | vec_oprnd = new_temp; | |
7210 | } | |
7211 | ||
272c6793 | 7212 | /* Arguments are ready. Create the new vector stmt. */ |
7cfb4d93 | 7213 | if (final_mask) |
c3a8f964 RS |
7214 | { |
7215 | align = least_bit_hwi (misalign | align); | |
7216 | tree ptr = build_int_cst (ref_type, align); | |
7217 | gcall *call | |
7218 | = gimple_build_call_internal (IFN_MASK_STORE, 4, | |
7219 | dataref_ptr, ptr, | |
7cfb4d93 | 7220 | final_mask, vec_oprnd); |
c3a8f964 | 7221 | gimple_call_set_nothrow (call, true); |
e1bd7296 | 7222 | new_stmt_info |
86a91c0a | 7223 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
c3a8f964 RS |
7224 | } |
7225 | else | |
7226 | { | |
7227 | data_ref = fold_build2 (MEM_REF, vectype, | |
7228 | dataref_ptr, | |
7229 | dataref_offset | |
7230 | ? dataref_offset | |
7231 | : build_int_cst (ref_type, 0)); | |
89fa689a | 7232 | if (aligned_access_p (first_dr_info)) |
c3a8f964 | 7233 | ; |
89fa689a | 7234 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
c3a8f964 RS |
7235 | TREE_TYPE (data_ref) |
7236 | = build_aligned_type (TREE_TYPE (data_ref), | |
7237 | align * BITS_PER_UNIT); | |
7238 | else | |
7239 | TREE_TYPE (data_ref) | |
7240 | = build_aligned_type (TREE_TYPE (data_ref), | |
7241 | TYPE_ALIGN (elem_type)); | |
89fa689a | 7242 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
e1bd7296 RS |
7243 | gassign *new_stmt |
7244 | = gimple_build_assign (data_ref, vec_oprnd); | |
7245 | new_stmt_info | |
86a91c0a | 7246 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
c3a8f964 | 7247 | } |
272c6793 RS |
7248 | |
7249 | if (slp) | |
7250 | continue; | |
7251 | ||
bffb8014 RS |
7252 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
7253 | if (!next_stmt_info) | |
272c6793 RS |
7254 | break; |
7255 | } | |
ebfd146a | 7256 | } |
1da0876c RS |
7257 | if (!slp) |
7258 | { | |
7259 | if (j == 0) | |
e1bd7296 | 7260 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
1da0876c | 7261 | else |
e1bd7296 RS |
7262 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
7263 | prev_stmt_info = new_stmt_info; | |
1da0876c | 7264 | } |
ebfd146a IR |
7265 | } |
7266 | ||
9771b263 DN |
7267 | oprnds.release (); |
7268 | result_chain.release (); | |
7269 | vec_oprnds.release (); | |
ebfd146a IR |
7270 | |
7271 | return true; | |
7272 | } | |
7273 | ||
557be5a8 AL |
7274 | /* Given a vector type VECTYPE, turns permutation SEL into the equivalent |
7275 | VECTOR_CST mask. No checks are made that the target platform supports the | |
7ac7e286 | 7276 | mask, so callers may wish to test can_vec_perm_const_p separately, or use |
557be5a8 | 7277 | vect_gen_perm_mask_checked. */ |
a1e53f3f | 7278 | |
3fcc1b55 | 7279 | tree |
4aae3cb3 | 7280 | vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel) |
a1e53f3f | 7281 | { |
b00cb3bf | 7282 | tree mask_type; |
a1e53f3f | 7283 | |
0ecc2b7d RS |
7284 | poly_uint64 nunits = sel.length (); |
7285 | gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype))); | |
b00cb3bf RS |
7286 | |
7287 | mask_type = build_vector_type (ssizetype, nunits); | |
736d0f28 | 7288 | return vec_perm_indices_to_tree (mask_type, sel); |
a1e53f3f L |
7289 | } |
7290 | ||
7ac7e286 | 7291 | /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p, |
cf7aa6a3 | 7292 | i.e. that the target supports the pattern _for arbitrary input vectors_. */ |
557be5a8 AL |
7293 | |
7294 | tree | |
4aae3cb3 | 7295 | vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel) |
557be5a8 | 7296 | { |
7ac7e286 | 7297 | gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel)); |
557be5a8 AL |
7298 | return vect_gen_perm_mask_any (vectype, sel); |
7299 | } | |
7300 | ||
aec7ae7d | 7301 | /* Given a vector variable X and Y, that was generated for the scalar |
82570274 | 7302 | STMT_INFO, generate instructions to permute the vector elements of X and Y |
aec7ae7d JJ |
7303 | using permutation mask MASK_VEC, insert them at *GSI and return the |
7304 | permuted vector variable. */ | |
a1e53f3f L |
7305 | |
7306 | static tree | |
82570274 | 7307 | permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info, |
aec7ae7d | 7308 | gimple_stmt_iterator *gsi) |
a1e53f3f L |
7309 | { |
7310 | tree vectype = TREE_TYPE (x); | |
aec7ae7d | 7311 | tree perm_dest, data_ref; |
355fe088 | 7312 | gimple *perm_stmt; |
a1e53f3f | 7313 | |
82570274 | 7314 | tree scalar_dest = gimple_get_lhs (stmt_info->stmt); |
7ad429a4 RS |
7315 | if (TREE_CODE (scalar_dest) == SSA_NAME) |
7316 | perm_dest = vect_create_destination_var (scalar_dest, vectype); | |
7317 | else | |
7318 | perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL); | |
b731b390 | 7319 | data_ref = make_ssa_name (perm_dest); |
a1e53f3f L |
7320 | |
7321 | /* Generate the permute statement. */ | |
0d0e4a03 | 7322 | perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec); |
82570274 | 7323 | vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); |
a1e53f3f L |
7324 | |
7325 | return data_ref; | |
7326 | } | |
7327 | ||
32e8e429 | 7328 | /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP, |
6b916b36 | 7329 | inserting them on the loops preheader edge. Returns true if we |
32e8e429 | 7330 | were successful in doing so (and thus STMT_INFO can be moved then), |
6b916b36 RB |
7331 | otherwise returns false. */ |
7332 | ||
7333 | static bool | |
32e8e429 | 7334 | hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop) |
6b916b36 RB |
7335 | { |
7336 | ssa_op_iter i; | |
7337 | tree op; | |
7338 | bool any = false; | |
7339 | ||
32e8e429 | 7340 | FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE) |
6b916b36 | 7341 | { |
355fe088 | 7342 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
7343 | if (!gimple_nop_p (def_stmt) |
7344 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
7345 | { | |
7346 | /* Make sure we don't need to recurse. While we could do | |
7347 | so in simple cases when there are more complex use webs | |
7348 | we don't have an easy way to preserve stmt order to fulfil | |
7349 | dependencies within them. */ | |
7350 | tree op2; | |
7351 | ssa_op_iter i2; | |
d1417442 JJ |
7352 | if (gimple_code (def_stmt) == GIMPLE_PHI) |
7353 | return false; | |
6b916b36 RB |
7354 | FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE) |
7355 | { | |
355fe088 | 7356 | gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2); |
6b916b36 RB |
7357 | if (!gimple_nop_p (def_stmt2) |
7358 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2))) | |
7359 | return false; | |
7360 | } | |
7361 | any = true; | |
7362 | } | |
7363 | } | |
7364 | ||
7365 | if (!any) | |
7366 | return true; | |
7367 | ||
32e8e429 | 7368 | FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE) |
6b916b36 | 7369 | { |
355fe088 | 7370 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
7371 | if (!gimple_nop_p (def_stmt) |
7372 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
7373 | { | |
7374 | gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); | |
7375 | gsi_remove (&gsi, false); | |
7376 | gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt); | |
7377 | } | |
7378 | } | |
7379 | ||
7380 | return true; | |
7381 | } | |
7382 | ||
ebfd146a IR |
7383 | /* vectorizable_load. |
7384 | ||
32e8e429 RS |
7385 | Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure) |
7386 | that can be vectorized. | |
7387 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
7388 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
7389 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
7390 | |
7391 | static bool | |
32e8e429 | 7392 | vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
7393 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
7394 | slp_instance slp_node_instance, | |
68435eb2 | 7395 | stmt_vector_for_cost *cost_vec) |
ebfd146a IR |
7396 | { |
7397 | tree scalar_dest; | |
7398 | tree vec_dest = NULL; | |
7399 | tree data_ref = NULL; | |
b8698a0f | 7400 | stmt_vec_info prev_stmt_info; |
ebfd146a | 7401 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 7402 | struct loop *loop = NULL; |
32e8e429 | 7403 | struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father; |
a70d6342 | 7404 | bool nested_in_vect_loop = false; |
272c6793 | 7405 | tree elem_type; |
ebfd146a | 7406 | tree new_temp; |
ef4bddc2 | 7407 | machine_mode mode; |
ebfd146a IR |
7408 | tree dummy; |
7409 | enum dr_alignment_support alignment_support_scheme; | |
7410 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 7411 | tree dataref_offset = NULL_TREE; |
355fe088 | 7412 | gimple *ptr_incr = NULL; |
ebfd146a | 7413 | int ncopies; |
4d694b27 RS |
7414 | int i, j; |
7415 | unsigned int group_size; | |
7416 | poly_uint64 group_gap_adj; | |
ebfd146a IR |
7417 | tree msq = NULL_TREE, lsq; |
7418 | tree offset = NULL_TREE; | |
356bbc4c | 7419 | tree byte_offset = NULL_TREE; |
ebfd146a | 7420 | tree realignment_token = NULL_TREE; |
538dd0b7 | 7421 | gphi *phi = NULL; |
6e1aa848 | 7422 | vec<tree> dr_chain = vNULL; |
0d0293ac | 7423 | bool grouped_load = false; |
bffb8014 | 7424 | stmt_vec_info first_stmt_info; |
b9787581 | 7425 | stmt_vec_info first_stmt_info_for_drptr = NULL; |
ebfd146a IR |
7426 | bool inv_p; |
7427 | bool compute_in_loop = false; | |
7428 | struct loop *at_loop; | |
7429 | int vec_num; | |
7430 | bool slp = (slp_node != NULL); | |
7431 | bool slp_perm = false; | |
a70d6342 | 7432 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
d9f21f6a | 7433 | poly_uint64 vf; |
272c6793 | 7434 | tree aggr_type; |
134c85ca | 7435 | gather_scatter_info gs_info; |
310213d4 | 7436 | vec_info *vinfo = stmt_info->vinfo; |
44fc7854 | 7437 | tree ref_type; |
929b4411 | 7438 | enum vect_def_type mask_dt = vect_unknown_def_type; |
a70d6342 | 7439 | |
465c8c19 JJ |
7440 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
7441 | return false; | |
7442 | ||
66c16fd9 RB |
7443 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
7444 | && ! vec_stmt) | |
465c8c19 JJ |
7445 | return false; |
7446 | ||
c3a8f964 | 7447 | tree mask = NULL_TREE, mask_vectype = NULL_TREE; |
86a91c0a | 7448 | if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt)) |
c3a8f964 | 7449 | { |
beb456c3 | 7450 | scalar_dest = gimple_assign_lhs (assign); |
c3a8f964 RS |
7451 | if (TREE_CODE (scalar_dest) != SSA_NAME) |
7452 | return false; | |
465c8c19 | 7453 | |
beb456c3 | 7454 | tree_code code = gimple_assign_rhs_code (assign); |
c3a8f964 RS |
7455 | if (code != ARRAY_REF |
7456 | && code != BIT_FIELD_REF | |
7457 | && code != INDIRECT_REF | |
7458 | && code != COMPONENT_REF | |
7459 | && code != IMAGPART_EXPR | |
7460 | && code != REALPART_EXPR | |
7461 | && code != MEM_REF | |
7462 | && TREE_CODE_CLASS (code) != tcc_declaration) | |
7463 | return false; | |
7464 | } | |
7465 | else | |
7466 | { | |
86a91c0a | 7467 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); |
bfaa08b7 RS |
7468 | if (!call || !gimple_call_internal_p (call)) |
7469 | return false; | |
7470 | ||
7471 | internal_fn ifn = gimple_call_internal_fn (call); | |
7472 | if (!internal_load_fn_p (ifn)) | |
c3a8f964 | 7473 | return false; |
465c8c19 | 7474 | |
c3a8f964 RS |
7475 | scalar_dest = gimple_call_lhs (call); |
7476 | if (!scalar_dest) | |
7477 | return false; | |
7478 | ||
7479 | if (slp_node != NULL) | |
7480 | { | |
7481 | if (dump_enabled_p ()) | |
7482 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7483 | "SLP of masked loads not supported.\n"); | |
7484 | return false; | |
7485 | } | |
7486 | ||
bfaa08b7 RS |
7487 | int mask_index = internal_fn_mask_index (ifn); |
7488 | if (mask_index >= 0) | |
7489 | { | |
7490 | mask = gimple_call_arg (call, mask_index); | |
86a91c0a | 7491 | if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt, |
929b4411 | 7492 | &mask_vectype)) |
bfaa08b7 RS |
7493 | return false; |
7494 | } | |
c3a8f964 | 7495 | } |
465c8c19 JJ |
7496 | |
7497 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
7498 | return false; | |
7499 | ||
7500 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
4d694b27 | 7501 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
465c8c19 | 7502 | |
a70d6342 IR |
7503 | if (loop_vinfo) |
7504 | { | |
7505 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
86a91c0a | 7506 | nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info); |
a70d6342 IR |
7507 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
7508 | } | |
7509 | else | |
3533e503 | 7510 | vf = 1; |
ebfd146a IR |
7511 | |
7512 | /* Multiple types in SLP are handled by creating the appropriate number of | |
ff802fa1 | 7513 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
ebfd146a | 7514 | case of SLP. */ |
fce57248 | 7515 | if (slp) |
ebfd146a IR |
7516 | ncopies = 1; |
7517 | else | |
e8f142e2 | 7518 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
ebfd146a IR |
7519 | |
7520 | gcc_assert (ncopies >= 1); | |
7521 | ||
7522 | /* FORNOW. This restriction should be relaxed. */ | |
7523 | if (nested_in_vect_loop && ncopies > 1) | |
7524 | { | |
73fbfcad | 7525 | if (dump_enabled_p ()) |
78c60e3d | 7526 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 7527 | "multiple types in nested loop.\n"); |
ebfd146a IR |
7528 | return false; |
7529 | } | |
7530 | ||
f2556b68 RB |
7531 | /* Invalidate assumptions made by dependence analysis when vectorization |
7532 | on the unrolled body effectively re-orders stmts. */ | |
7533 | if (ncopies > 1 | |
7534 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
d9f21f6a RS |
7535 | && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo), |
7536 | STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
f2556b68 RB |
7537 | { |
7538 | if (dump_enabled_p ()) | |
7539 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7540 | "cannot perform implicit CSE when unrolling " | |
7541 | "with negative dependence distance\n"); | |
7542 | return false; | |
7543 | } | |
7544 | ||
7b7b1813 | 7545 | elem_type = TREE_TYPE (vectype); |
947131ba | 7546 | mode = TYPE_MODE (vectype); |
ebfd146a IR |
7547 | |
7548 | /* FORNOW. In some cases can vectorize even if data-type not supported | |
7549 | (e.g. - data copies). */ | |
947131ba | 7550 | if (optab_handler (mov_optab, mode) == CODE_FOR_nothing) |
ebfd146a | 7551 | { |
73fbfcad | 7552 | if (dump_enabled_p ()) |
78c60e3d | 7553 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 7554 | "Aligned load, but unsupported type.\n"); |
ebfd146a IR |
7555 | return false; |
7556 | } | |
7557 | ||
ebfd146a | 7558 | /* Check if the load is a part of an interleaving chain. */ |
0d0293ac | 7559 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
ebfd146a | 7560 | { |
0d0293ac | 7561 | grouped_load = true; |
ebfd146a | 7562 | /* FORNOW */ |
2de001ee RS |
7563 | gcc_assert (!nested_in_vect_loop); |
7564 | gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info)); | |
ebfd146a | 7565 | |
bffb8014 RS |
7566 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
7567 | group_size = DR_GROUP_SIZE (first_stmt_info); | |
d5f035ea | 7568 | |
b1af7da6 RB |
7569 | if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) |
7570 | slp_perm = true; | |
7571 | ||
f2556b68 RB |
7572 | /* Invalidate assumptions made by dependence analysis when vectorization |
7573 | on the unrolled body effectively re-orders stmts. */ | |
7574 | if (!PURE_SLP_STMT (stmt_info) | |
7575 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
d9f21f6a RS |
7576 | && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo), |
7577 | STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
f2556b68 RB |
7578 | { |
7579 | if (dump_enabled_p ()) | |
7580 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7581 | "cannot perform implicit CSE when performing " | |
7582 | "group loads with negative dependence distance\n"); | |
7583 | return false; | |
7584 | } | |
96bb56b2 RB |
7585 | |
7586 | /* Similarly when the stmt is a load that is both part of a SLP | |
7587 | instance and a loop vectorized stmt via the same-dr mechanism | |
7588 | we have to give up. */ | |
2c53b149 | 7589 | if (DR_GROUP_SAME_DR_STMT (stmt_info) |
96bb56b2 | 7590 | && (STMT_SLP_TYPE (stmt_info) |
c26228d4 | 7591 | != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info)))) |
96bb56b2 RB |
7592 | { |
7593 | if (dump_enabled_p ()) | |
7594 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7595 | "conflicting SLP types for CSEd load\n"); | |
7596 | return false; | |
7597 | } | |
ebfd146a | 7598 | } |
7cfb4d93 RS |
7599 | else |
7600 | group_size = 1; | |
ebfd146a | 7601 | |
2de001ee | 7602 | vect_memory_access_type memory_access_type; |
86a91c0a | 7603 | if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies, |
2de001ee RS |
7604 | &memory_access_type, &gs_info)) |
7605 | return false; | |
a1e53f3f | 7606 | |
c3a8f964 RS |
7607 | if (mask) |
7608 | { | |
7609 | if (memory_access_type == VMAT_CONTIGUOUS) | |
7610 | { | |
7e11fc7f RS |
7611 | machine_mode vec_mode = TYPE_MODE (vectype); |
7612 | if (!VECTOR_MODE_P (vec_mode) | |
7613 | || !can_vec_mask_load_store_p (vec_mode, | |
c3a8f964 RS |
7614 | TYPE_MODE (mask_vectype), true)) |
7615 | return false; | |
7616 | } | |
bfaa08b7 | 7617 | else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) |
c3a8f964 RS |
7618 | { |
7619 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); | |
7620 | tree masktype | |
7621 | = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))); | |
7622 | if (TREE_CODE (masktype) == INTEGER_TYPE) | |
7623 | { | |
7624 | if (dump_enabled_p ()) | |
7625 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7626 | "masked gather with integer mask not" | |
7627 | " supported."); | |
7628 | return false; | |
7629 | } | |
7630 | } | |
bfaa08b7 RS |
7631 | else if (memory_access_type != VMAT_LOAD_STORE_LANES |
7632 | && memory_access_type != VMAT_GATHER_SCATTER) | |
c3a8f964 RS |
7633 | { |
7634 | if (dump_enabled_p ()) | |
7635 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7636 | "unsupported access type for masked load.\n"); | |
7637 | return false; | |
7638 | } | |
7639 | } | |
7640 | ||
ebfd146a IR |
7641 | if (!vec_stmt) /* transformation not required. */ |
7642 | { | |
2de001ee RS |
7643 | if (!slp) |
7644 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; | |
7cfb4d93 RS |
7645 | |
7646 | if (loop_vinfo | |
7647 | && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) | |
7648 | check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size, | |
bfaa08b7 | 7649 | memory_access_type, &gs_info); |
7cfb4d93 | 7650 | |
ebfd146a | 7651 | STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; |
68435eb2 RB |
7652 | vect_model_load_cost (stmt_info, ncopies, memory_access_type, |
7653 | slp_node_instance, slp_node, cost_vec); | |
ebfd146a IR |
7654 | return true; |
7655 | } | |
7656 | ||
2de001ee RS |
7657 | if (!slp) |
7658 | gcc_assert (memory_access_type | |
7659 | == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); | |
7660 | ||
73fbfcad | 7661 | if (dump_enabled_p ()) |
78c60e3d | 7662 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 7663 | "transform load. ncopies = %d\n", ncopies); |
ebfd146a | 7664 | |
67b8dbac | 7665 | /* Transform. */ |
ebfd146a | 7666 | |
89fa689a RS |
7667 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL; |
7668 | ensure_base_align (dr_info); | |
c716e67f | 7669 | |
bfaa08b7 | 7670 | if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) |
aec7ae7d | 7671 | { |
e4057a39 | 7672 | vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask); |
aec7ae7d JJ |
7673 | return true; |
7674 | } | |
2de001ee RS |
7675 | |
7676 | if (memory_access_type == VMAT_ELEMENTWISE | |
7677 | || memory_access_type == VMAT_STRIDED_SLP) | |
7d75abc8 MM |
7678 | { |
7679 | gimple_stmt_iterator incr_gsi; | |
7680 | bool insert_after; | |
355fe088 | 7681 | gimple *incr; |
7d75abc8 | 7682 | tree offvar; |
7d75abc8 MM |
7683 | tree ivstep; |
7684 | tree running_off; | |
9771b263 | 7685 | vec<constructor_elt, va_gc> *v = NULL; |
14ac6aa2 | 7686 | tree stride_base, stride_step, alias_off; |
4d694b27 RS |
7687 | /* Checked by get_load_store_type. */ |
7688 | unsigned int const_nunits = nunits.to_constant (); | |
b210f45f | 7689 | unsigned HOST_WIDE_INT cst_offset = 0; |
14ac6aa2 | 7690 | |
7cfb4d93 | 7691 | gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); |
14ac6aa2 | 7692 | gcc_assert (!nested_in_vect_loop); |
7d75abc8 | 7693 | |
b210f45f | 7694 | if (grouped_load) |
44fc7854 | 7695 | { |
bffb8014 | 7696 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
89fa689a | 7697 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
44fc7854 | 7698 | } |
ab313a8c | 7699 | else |
44fc7854 | 7700 | { |
bffb8014 | 7701 | first_stmt_info = stmt_info; |
89fa689a | 7702 | first_dr_info = dr_info; |
b210f45f RB |
7703 | } |
7704 | if (slp && grouped_load) | |
7705 | { | |
bffb8014 RS |
7706 | group_size = DR_GROUP_SIZE (first_stmt_info); |
7707 | ref_type = get_group_alias_ptr_type (first_stmt_info); | |
b210f45f RB |
7708 | } |
7709 | else | |
7710 | { | |
7711 | if (grouped_load) | |
7712 | cst_offset | |
7713 | = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))) | |
86a91c0a | 7714 | * vect_get_place_in_interleaving_chain (stmt_info, |
bffb8014 | 7715 | first_stmt_info)); |
44fc7854 | 7716 | group_size = 1; |
89fa689a | 7717 | ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr)); |
44fc7854 | 7718 | } |
ab313a8c | 7719 | |
14ac6aa2 RB |
7720 | stride_base |
7721 | = fold_build_pointer_plus | |
89fa689a | 7722 | (DR_BASE_ADDRESS (first_dr_info->dr), |
14ac6aa2 | 7723 | size_binop (PLUS_EXPR, |
89fa689a RS |
7724 | convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)), |
7725 | convert_to_ptrofftype (DR_INIT (first_dr_info->dr)))); | |
7726 | stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr)); | |
7d75abc8 MM |
7727 | |
7728 | /* For a load with loop-invariant (but other than power-of-2) | |
7729 | stride (i.e. not a grouped access) like so: | |
7730 | ||
7731 | for (i = 0; i < n; i += stride) | |
7732 | ... = array[i]; | |
7733 | ||
7734 | we generate a new induction variable and new accesses to | |
7735 | form a new vector (or vectors, depending on ncopies): | |
7736 | ||
7737 | for (j = 0; ; j += VF*stride) | |
7738 | tmp1 = array[j]; | |
7739 | tmp2 = array[j + stride]; | |
7740 | ... | |
7741 | vectemp = {tmp1, tmp2, ...} | |
7742 | */ | |
7743 | ||
ab313a8c RB |
7744 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step, |
7745 | build_int_cst (TREE_TYPE (stride_step), vf)); | |
7d75abc8 MM |
7746 | |
7747 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
7748 | ||
b210f45f RB |
7749 | stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base); |
7750 | ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep); | |
7751 | create_iv (stride_base, ivstep, NULL, | |
7d75abc8 MM |
7752 | loop, &incr_gsi, insert_after, |
7753 | &offvar, NULL); | |
7754 | incr = gsi_stmt (incr_gsi); | |
4fbeb363 | 7755 | loop_vinfo->add_stmt (incr); |
7d75abc8 | 7756 | |
b210f45f | 7757 | stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step); |
7d75abc8 MM |
7758 | |
7759 | prev_stmt_info = NULL; | |
7760 | running_off = offvar; | |
44fc7854 | 7761 | alias_off = build_int_cst (ref_type, 0); |
4d694b27 | 7762 | int nloads = const_nunits; |
e09b4c37 | 7763 | int lnel = 1; |
7b5fc413 | 7764 | tree ltype = TREE_TYPE (vectype); |
ea60dd34 | 7765 | tree lvectype = vectype; |
b266b968 | 7766 | auto_vec<tree> dr_chain; |
2de001ee | 7767 | if (memory_access_type == VMAT_STRIDED_SLP) |
7b5fc413 | 7768 | { |
4d694b27 | 7769 | if (group_size < const_nunits) |
e09b4c37 | 7770 | { |
ff03930a JJ |
7771 | /* First check if vec_init optab supports construction from |
7772 | vector elts directly. */ | |
b397965c | 7773 | scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype)); |
9da15d40 RS |
7774 | machine_mode vmode; |
7775 | if (mode_for_vector (elmode, group_size).exists (&vmode) | |
7776 | && VECTOR_MODE_P (vmode) | |
414fef4e | 7777 | && targetm.vector_mode_supported_p (vmode) |
ff03930a JJ |
7778 | && (convert_optab_handler (vec_init_optab, |
7779 | TYPE_MODE (vectype), vmode) | |
7780 | != CODE_FOR_nothing)) | |
ea60dd34 | 7781 | { |
4d694b27 | 7782 | nloads = const_nunits / group_size; |
ea60dd34 | 7783 | lnel = group_size; |
ff03930a JJ |
7784 | ltype = build_vector_type (TREE_TYPE (vectype), group_size); |
7785 | } | |
7786 | else | |
7787 | { | |
7788 | /* Otherwise avoid emitting a constructor of vector elements | |
7789 | by performing the loads using an integer type of the same | |
7790 | size, constructing a vector of those and then | |
7791 | re-interpreting it as the original vector type. | |
7792 | This avoids a huge runtime penalty due to the general | |
7793 | inability to perform store forwarding from smaller stores | |
7794 | to a larger load. */ | |
7795 | unsigned lsize | |
7796 | = group_size * TYPE_PRECISION (TREE_TYPE (vectype)); | |
fffbab82 | 7797 | elmode = int_mode_for_size (lsize, 0).require (); |
4d694b27 | 7798 | unsigned int lnunits = const_nunits / group_size; |
ff03930a JJ |
7799 | /* If we can't construct such a vector fall back to |
7800 | element loads of the original vector type. */ | |
4d694b27 | 7801 | if (mode_for_vector (elmode, lnunits).exists (&vmode) |
9da15d40 | 7802 | && VECTOR_MODE_P (vmode) |
414fef4e | 7803 | && targetm.vector_mode_supported_p (vmode) |
ff03930a JJ |
7804 | && (convert_optab_handler (vec_init_optab, vmode, elmode) |
7805 | != CODE_FOR_nothing)) | |
7806 | { | |
4d694b27 | 7807 | nloads = lnunits; |
ff03930a JJ |
7808 | lnel = group_size; |
7809 | ltype = build_nonstandard_integer_type (lsize, 1); | |
7810 | lvectype = build_vector_type (ltype, nloads); | |
7811 | } | |
ea60dd34 | 7812 | } |
e09b4c37 | 7813 | } |
2de001ee | 7814 | else |
e09b4c37 | 7815 | { |
ea60dd34 | 7816 | nloads = 1; |
4d694b27 | 7817 | lnel = const_nunits; |
e09b4c37 | 7818 | ltype = vectype; |
e09b4c37 | 7819 | } |
2de001ee RS |
7820 | ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype))); |
7821 | } | |
bb4e4747 BC |
7822 | /* Load vector(1) scalar_type if it's 1 element-wise vectype. */ |
7823 | else if (nloads == 1) | |
7824 | ltype = vectype; | |
7825 | ||
2de001ee RS |
7826 | if (slp) |
7827 | { | |
66c16fd9 RB |
7828 | /* For SLP permutation support we need to load the whole group, |
7829 | not only the number of vector stmts the permutation result | |
7830 | fits in. */ | |
b266b968 | 7831 | if (slp_perm) |
66c16fd9 | 7832 | { |
d9f21f6a RS |
7833 | /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for |
7834 | variable VF. */ | |
7835 | unsigned int const_vf = vf.to_constant (); | |
4d694b27 | 7836 | ncopies = CEIL (group_size * const_vf, const_nunits); |
66c16fd9 RB |
7837 | dr_chain.create (ncopies); |
7838 | } | |
7839 | else | |
7840 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
7b5fc413 | 7841 | } |
4d694b27 | 7842 | unsigned int group_el = 0; |
e09b4c37 RB |
7843 | unsigned HOST_WIDE_INT |
7844 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
7d75abc8 MM |
7845 | for (j = 0; j < ncopies; j++) |
7846 | { | |
7b5fc413 | 7847 | if (nloads > 1) |
e09b4c37 | 7848 | vec_alloc (v, nloads); |
e1bd7296 | 7849 | stmt_vec_info new_stmt_info = NULL; |
e09b4c37 | 7850 | for (i = 0; i < nloads; i++) |
7b5fc413 | 7851 | { |
e09b4c37 | 7852 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
b210f45f | 7853 | group_el * elsz + cst_offset); |
19986382 | 7854 | tree data_ref = build2 (MEM_REF, ltype, running_off, this_off); |
89fa689a | 7855 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
e1bd7296 RS |
7856 | gassign *new_stmt |
7857 | = gimple_build_assign (make_ssa_name (ltype), data_ref); | |
7858 | new_stmt_info | |
86a91c0a | 7859 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
e09b4c37 RB |
7860 | if (nloads > 1) |
7861 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, | |
7862 | gimple_assign_lhs (new_stmt)); | |
7863 | ||
7864 | group_el += lnel; | |
7865 | if (! slp | |
7866 | || group_el == group_size) | |
7b5fc413 | 7867 | { |
e09b4c37 RB |
7868 | tree newoff = copy_ssa_name (running_off); |
7869 | gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
7870 | running_off, stride_step); | |
86a91c0a | 7871 | vect_finish_stmt_generation (stmt_info, incr, gsi); |
7b5fc413 RB |
7872 | |
7873 | running_off = newoff; | |
e09b4c37 | 7874 | group_el = 0; |
7b5fc413 | 7875 | } |
7b5fc413 | 7876 | } |
e09b4c37 | 7877 | if (nloads > 1) |
7d75abc8 | 7878 | { |
ea60dd34 | 7879 | tree vec_inv = build_constructor (lvectype, v); |
86a91c0a | 7880 | new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi); |
e1bd7296 | 7881 | new_stmt_info = vinfo->lookup_def (new_temp); |
ea60dd34 RB |
7882 | if (lvectype != vectype) |
7883 | { | |
e1bd7296 RS |
7884 | gassign *new_stmt |
7885 | = gimple_build_assign (make_ssa_name (vectype), | |
7886 | VIEW_CONVERT_EXPR, | |
7887 | build1 (VIEW_CONVERT_EXPR, | |
7888 | vectype, new_temp)); | |
7889 | new_stmt_info | |
86a91c0a | 7890 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
ea60dd34 | 7891 | } |
7d75abc8 MM |
7892 | } |
7893 | ||
7b5fc413 | 7894 | if (slp) |
b266b968 | 7895 | { |
b266b968 | 7896 | if (slp_perm) |
e1bd7296 | 7897 | dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt)); |
66c16fd9 | 7898 | else |
e1bd7296 | 7899 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
b266b968 | 7900 | } |
7d75abc8 | 7901 | else |
225ce44b RB |
7902 | { |
7903 | if (j == 0) | |
e1bd7296 | 7904 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
225ce44b | 7905 | else |
e1bd7296 RS |
7906 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
7907 | prev_stmt_info = new_stmt_info; | |
225ce44b | 7908 | } |
7d75abc8 | 7909 | } |
b266b968 | 7910 | if (slp_perm) |
29afecdf RB |
7911 | { |
7912 | unsigned n_perms; | |
7913 | vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, | |
7914 | slp_node_instance, false, &n_perms); | |
7915 | } | |
7d75abc8 MM |
7916 | return true; |
7917 | } | |
aec7ae7d | 7918 | |
b5ec4de7 RS |
7919 | if (memory_access_type == VMAT_GATHER_SCATTER |
7920 | || (!slp && memory_access_type == VMAT_CONTIGUOUS)) | |
ab2fc782 RS |
7921 | grouped_load = false; |
7922 | ||
0d0293ac | 7923 | if (grouped_load) |
ebfd146a | 7924 | { |
bffb8014 RS |
7925 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
7926 | group_size = DR_GROUP_SIZE (first_stmt_info); | |
4f0a0218 | 7927 | /* For SLP vectorization we directly vectorize a subchain |
52eab378 RB |
7928 | without permutation. */ |
7929 | if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) | |
bffb8014 | 7930 | first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
4f0a0218 RB |
7931 | /* For BB vectorization always use the first stmt to base |
7932 | the data ref pointer on. */ | |
7933 | if (bb_vinfo) | |
b9787581 | 7934 | first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
6aa904c4 | 7935 | |
ebfd146a | 7936 | /* Check if the chain of loads is already vectorized. */ |
bffb8014 | 7937 | if (STMT_VINFO_VEC_STMT (first_stmt_info) |
01d8bf07 RB |
7938 | /* For SLP we would need to copy over SLP_TREE_VEC_STMTS. |
7939 | ??? But we can only do so if there is exactly one | |
7940 | as we have no way to get at the rest. Leave the CSE | |
7941 | opportunity alone. | |
7942 | ??? With the group load eventually participating | |
7943 | in multiple different permutations (having multiple | |
7944 | slp nodes which refer to the same group) the CSE | |
7945 | is even wrong code. See PR56270. */ | |
7946 | && !slp) | |
ebfd146a IR |
7947 | { |
7948 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
7949 | return true; | |
7950 | } | |
89fa689a | 7951 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
9b999e8c | 7952 | group_gap_adj = 0; |
ebfd146a IR |
7953 | |
7954 | /* VEC_NUM is the number of vect stmts to be created for this group. */ | |
7955 | if (slp) | |
7956 | { | |
0d0293ac | 7957 | grouped_load = false; |
91ff1504 RB |
7958 | /* For SLP permutation support we need to load the whole group, |
7959 | not only the number of vector stmts the permutation result | |
7960 | fits in. */ | |
7961 | if (slp_perm) | |
b267968e | 7962 | { |
d9f21f6a RS |
7963 | /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for |
7964 | variable VF. */ | |
7965 | unsigned int const_vf = vf.to_constant (); | |
4d694b27 RS |
7966 | unsigned int const_nunits = nunits.to_constant (); |
7967 | vec_num = CEIL (group_size * const_vf, const_nunits); | |
b267968e RB |
7968 | group_gap_adj = vf * group_size - nunits * vec_num; |
7969 | } | |
91ff1504 | 7970 | else |
b267968e RB |
7971 | { |
7972 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
796bd467 RB |
7973 | group_gap_adj |
7974 | = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance); | |
b267968e | 7975 | } |
a70d6342 | 7976 | } |
ebfd146a | 7977 | else |
9b999e8c | 7978 | vec_num = group_size; |
44fc7854 | 7979 | |
bffb8014 | 7980 | ref_type = get_group_alias_ptr_type (first_stmt_info); |
ebfd146a IR |
7981 | } |
7982 | else | |
7983 | { | |
bffb8014 | 7984 | first_stmt_info = stmt_info; |
89fa689a | 7985 | first_dr_info = dr_info; |
ebfd146a | 7986 | group_size = vec_num = 1; |
9b999e8c | 7987 | group_gap_adj = 0; |
89fa689a | 7988 | ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr)); |
ebfd146a IR |
7989 | } |
7990 | ||
89fa689a RS |
7991 | alignment_support_scheme |
7992 | = vect_supportable_dr_alignment (first_dr_info, false); | |
ebfd146a | 7993 | gcc_assert (alignment_support_scheme); |
70088b95 RS |
7994 | vec_loop_masks *loop_masks |
7995 | = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) | |
7996 | ? &LOOP_VINFO_MASKS (loop_vinfo) | |
7997 | : NULL); | |
7cfb4d93 RS |
7998 | /* Targets with store-lane instructions must not require explicit |
7999 | realignment. vect_supportable_dr_alignment always returns either | |
8000 | dr_aligned or dr_unaligned_supported for masked operations. */ | |
8001 | gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES | |
8002 | && !mask | |
70088b95 | 8003 | && !loop_masks) |
272c6793 RS |
8004 | || alignment_support_scheme == dr_aligned |
8005 | || alignment_support_scheme == dr_unaligned_supported); | |
ebfd146a IR |
8006 | |
8007 | /* In case the vectorization factor (VF) is bigger than the number | |
8008 | of elements that we can fit in a vectype (nunits), we have to generate | |
8009 | more than one vector stmt - i.e - we need to "unroll" the | |
ff802fa1 | 8010 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
ebfd146a | 8011 | from one copy of the vector stmt to the next, in the field |
ff802fa1 | 8012 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following |
ebfd146a | 8013 | stages to find the correct vector defs to be used when vectorizing |
ff802fa1 IR |
8014 | stmts that use the defs of the current stmt. The example below |
8015 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., we | |
8016 | need to create 4 vectorized stmts): | |
ebfd146a IR |
8017 | |
8018 | before vectorization: | |
8019 | RELATED_STMT VEC_STMT | |
8020 | S1: x = memref - - | |
8021 | S2: z = x + 1 - - | |
8022 | ||
8023 | step 1: vectorize stmt S1: | |
8024 | We first create the vector stmt VS1_0, and, as usual, record a | |
8025 | pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1. | |
8026 | Next, we create the vector stmt VS1_1, and record a pointer to | |
8027 | it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0. | |
ff802fa1 | 8028 | Similarly, for VS1_2 and VS1_3. This is the resulting chain of |
ebfd146a IR |
8029 | stmts and pointers: |
8030 | RELATED_STMT VEC_STMT | |
8031 | VS1_0: vx0 = memref0 VS1_1 - | |
8032 | VS1_1: vx1 = memref1 VS1_2 - | |
8033 | VS1_2: vx2 = memref2 VS1_3 - | |
8034 | VS1_3: vx3 = memref3 - - | |
8035 | S1: x = load - VS1_0 | |
8036 | S2: z = x + 1 - - | |
8037 | ||
b8698a0f L |
8038 | See in documentation in vect_get_vec_def_for_stmt_copy for how the |
8039 | information we recorded in RELATED_STMT field is used to vectorize | |
ebfd146a IR |
8040 | stmt S2. */ |
8041 | ||
0d0293ac | 8042 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
8043 | |
8044 | S1: x2 = &base + 2 | |
8045 | S2: x0 = &base | |
8046 | S3: x1 = &base + 1 | |
8047 | S4: x3 = &base + 3 | |
8048 | ||
b8698a0f | 8049 | Vectorized loads are created in the order of memory accesses |
ebfd146a IR |
8050 | starting from the access of the first stmt of the chain: |
8051 | ||
8052 | VS1: vx0 = &base | |
8053 | VS2: vx1 = &base + vec_size*1 | |
8054 | VS3: vx3 = &base + vec_size*2 | |
8055 | VS4: vx4 = &base + vec_size*3 | |
8056 | ||
8057 | Then permutation statements are generated: | |
8058 | ||
e2c83630 RH |
8059 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } > |
8060 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } > | |
ebfd146a IR |
8061 | ... |
8062 | ||
8063 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts | |
8064 | (the order of the data-refs in the output of vect_permute_load_chain | |
8065 | corresponds to the order of scalar stmts in the interleaving chain - see | |
8066 | the documentation of vect_permute_load_chain()). | |
8067 | The generation of permutation stmts and recording them in | |
0d0293ac | 8068 | STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load(). |
ebfd146a | 8069 | |
b8698a0f | 8070 | In case of both multiple types and interleaving, the vector loads and |
ff802fa1 IR |
8071 | permutation stmts above are created for every copy. The result vector |
8072 | stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the | |
8073 | corresponding STMT_VINFO_RELATED_STMT for the next copies. */ | |
ebfd146a IR |
8074 | |
8075 | /* If the data reference is aligned (dr_aligned) or potentially unaligned | |
8076 | on a target that supports unaligned accesses (dr_unaligned_supported) | |
8077 | we generate the following code: | |
8078 | p = initial_addr; | |
8079 | indx = 0; | |
8080 | loop { | |
8081 | p = p + indx * vectype_size; | |
8082 | vec_dest = *(p); | |
8083 | indx = indx + 1; | |
8084 | } | |
8085 | ||
8086 | Otherwise, the data reference is potentially unaligned on a target that | |
b8698a0f | 8087 | does not support unaligned accesses (dr_explicit_realign_optimized) - |
ebfd146a IR |
8088 | then generate the following code, in which the data in each iteration is |
8089 | obtained by two vector loads, one from the previous iteration, and one | |
8090 | from the current iteration: | |
8091 | p1 = initial_addr; | |
8092 | msq_init = *(floor(p1)) | |
8093 | p2 = initial_addr + VS - 1; | |
8094 | realignment_token = call target_builtin; | |
8095 | indx = 0; | |
8096 | loop { | |
8097 | p2 = p2 + indx * vectype_size | |
8098 | lsq = *(floor(p2)) | |
8099 | vec_dest = realign_load (msq, lsq, realignment_token) | |
8100 | indx = indx + 1; | |
8101 | msq = lsq; | |
8102 | } */ | |
8103 | ||
8104 | /* If the misalignment remains the same throughout the execution of the | |
8105 | loop, we can create the init_addr and permutation mask at the loop | |
ff802fa1 | 8106 | preheader. Otherwise, it needs to be created inside the loop. |
ebfd146a IR |
8107 | This can only occur when vectorizing memory accesses in the inner-loop |
8108 | nested within an outer-loop that is being vectorized. */ | |
8109 | ||
d1e4b493 | 8110 | if (nested_in_vect_loop |
89fa689a | 8111 | && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr), |
cf098191 | 8112 | GET_MODE_SIZE (TYPE_MODE (vectype)))) |
ebfd146a IR |
8113 | { |
8114 | gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); | |
8115 | compute_in_loop = true; | |
8116 | } | |
8117 | ||
8118 | if ((alignment_support_scheme == dr_explicit_realign_optimized | |
8119 | || alignment_support_scheme == dr_explicit_realign) | |
59fd17e3 | 8120 | && !compute_in_loop) |
ebfd146a | 8121 | { |
bffb8014 | 8122 | msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token, |
ebfd146a IR |
8123 | alignment_support_scheme, NULL_TREE, |
8124 | &at_loop); | |
8125 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
8126 | { | |
538dd0b7 | 8127 | phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq)); |
356bbc4c JJ |
8128 | byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype), |
8129 | size_one_node); | |
ebfd146a IR |
8130 | } |
8131 | } | |
8132 | else | |
8133 | at_loop = loop; | |
8134 | ||
62da9e14 | 8135 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
a1e53f3f L |
8136 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
8137 | ||
ab2fc782 RS |
8138 | tree bump; |
8139 | tree vec_offset = NULL_TREE; | |
8140 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
8141 | { | |
8142 | aggr_type = NULL_TREE; | |
8143 | bump = NULL_TREE; | |
8144 | } | |
8145 | else if (memory_access_type == VMAT_GATHER_SCATTER) | |
8146 | { | |
8147 | aggr_type = elem_type; | |
86a91c0a | 8148 | vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info, |
ab2fc782 RS |
8149 | &bump, &vec_offset); |
8150 | } | |
272c6793 | 8151 | else |
ab2fc782 RS |
8152 | { |
8153 | if (memory_access_type == VMAT_LOAD_STORE_LANES) | |
8154 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); | |
8155 | else | |
8156 | aggr_type = vectype; | |
89fa689a RS |
8157 | bump = vect_get_data_ptr_increment (dr_info, aggr_type, |
8158 | memory_access_type); | |
ab2fc782 | 8159 | } |
272c6793 | 8160 | |
c3a8f964 | 8161 | tree vec_mask = NULL_TREE; |
ebfd146a | 8162 | prev_stmt_info = NULL; |
4d694b27 | 8163 | poly_uint64 group_elt = 0; |
ebfd146a | 8164 | for (j = 0; j < ncopies; j++) |
b8698a0f | 8165 | { |
e1bd7296 | 8166 | stmt_vec_info new_stmt_info = NULL; |
272c6793 | 8167 | /* 1. Create the vector or array pointer update chain. */ |
ebfd146a | 8168 | if (j == 0) |
74bf76ed JJ |
8169 | { |
8170 | bool simd_lane_access_p | |
8171 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
8172 | if (simd_lane_access_p | |
89fa689a RS |
8173 | && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR |
8174 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) | |
8175 | && integer_zerop (DR_OFFSET (first_dr_info->dr)) | |
8176 | && integer_zerop (DR_INIT (first_dr_info->dr)) | |
74bf76ed | 8177 | && alias_sets_conflict_p (get_alias_set (aggr_type), |
44fc7854 | 8178 | get_alias_set (TREE_TYPE (ref_type))) |
74bf76ed JJ |
8179 | && (alignment_support_scheme == dr_aligned |
8180 | || alignment_support_scheme == dr_unaligned_supported)) | |
8181 | { | |
89fa689a | 8182 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); |
44fc7854 | 8183 | dataref_offset = build_int_cst (ref_type, 0); |
8928eff3 | 8184 | inv_p = false; |
74bf76ed | 8185 | } |
b9787581 | 8186 | else if (first_stmt_info_for_drptr |
bffb8014 | 8187 | && first_stmt_info != first_stmt_info_for_drptr) |
4f0a0218 RB |
8188 | { |
8189 | dataref_ptr | |
b9787581 RS |
8190 | = vect_create_data_ref_ptr (first_stmt_info_for_drptr, |
8191 | aggr_type, at_loop, offset, &dummy, | |
8192 | gsi, &ptr_incr, simd_lane_access_p, | |
ab2fc782 | 8193 | &inv_p, byte_offset, bump); |
4f0a0218 RB |
8194 | /* Adjust the pointer by the difference to first_stmt. */ |
8195 | data_reference_p ptrdr | |
b9787581 | 8196 | = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr); |
89fa689a RS |
8197 | tree diff |
8198 | = fold_convert (sizetype, | |
8199 | size_binop (MINUS_EXPR, | |
8200 | DR_INIT (first_dr_info->dr), | |
8201 | DR_INIT (ptrdr))); | |
4f0a0218 | 8202 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8203 | stmt_info, diff); |
4f0a0218 | 8204 | } |
bfaa08b7 RS |
8205 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
8206 | { | |
86a91c0a | 8207 | vect_get_gather_scatter_ops (loop, stmt_info, &gs_info, |
bfaa08b7 RS |
8208 | &dataref_ptr, &vec_offset); |
8209 | inv_p = false; | |
8210 | } | |
74bf76ed JJ |
8211 | else |
8212 | dataref_ptr | |
bffb8014 | 8213 | = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop, |
74bf76ed | 8214 | offset, &dummy, gsi, &ptr_incr, |
356bbc4c | 8215 | simd_lane_access_p, &inv_p, |
ab2fc782 | 8216 | byte_offset, bump); |
c3a8f964 | 8217 | if (mask) |
86a91c0a | 8218 | vec_mask = vect_get_vec_def_for_operand (mask, stmt_info, |
c3a8f964 | 8219 | mask_vectype); |
74bf76ed | 8220 | } |
ebfd146a | 8221 | else |
c3a8f964 RS |
8222 | { |
8223 | if (dataref_offset) | |
8224 | dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, | |
ab2fc782 | 8225 | bump); |
bfaa08b7 | 8226 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
e4057a39 | 8227 | vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset); |
c3a8f964 | 8228 | else |
ab2fc782 | 8229 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8230 | stmt_info, bump); |
c3a8f964 | 8231 | if (mask) |
e4057a39 | 8232 | vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask); |
c3a8f964 | 8233 | } |
ebfd146a | 8234 | |
0d0293ac | 8235 | if (grouped_load || slp_perm) |
9771b263 | 8236 | dr_chain.create (vec_num); |
5ce1ee7f | 8237 | |
2de001ee | 8238 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 8239 | { |
272c6793 RS |
8240 | tree vec_array; |
8241 | ||
8242 | vec_array = create_vector_array (vectype, vec_num); | |
8243 | ||
7cfb4d93 | 8244 | tree final_mask = NULL_TREE; |
70088b95 RS |
8245 | if (loop_masks) |
8246 | final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies, | |
8247 | vectype, j); | |
7cfb4d93 RS |
8248 | if (vec_mask) |
8249 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
8250 | vec_mask, gsi); | |
8251 | ||
7e11fc7f | 8252 | gcall *call; |
7cfb4d93 | 8253 | if (final_mask) |
7e11fc7f RS |
8254 | { |
8255 | /* Emit: | |
8256 | VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR, | |
8257 | VEC_MASK). */ | |
8258 | unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype)); | |
8259 | tree alias_ptr = build_int_cst (ref_type, align); | |
8260 | call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3, | |
8261 | dataref_ptr, alias_ptr, | |
7cfb4d93 | 8262 | final_mask); |
7e11fc7f RS |
8263 | } |
8264 | else | |
8265 | { | |
8266 | /* Emit: | |
8267 | VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */ | |
8268 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); | |
8269 | call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref); | |
8270 | } | |
a844293d RS |
8271 | gimple_call_set_lhs (call, vec_array); |
8272 | gimple_call_set_nothrow (call, true); | |
86a91c0a | 8273 | new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi); |
ebfd146a | 8274 | |
272c6793 RS |
8275 | /* Extract each vector into an SSA_NAME. */ |
8276 | for (i = 0; i < vec_num; i++) | |
ebfd146a | 8277 | { |
86a91c0a | 8278 | new_temp = read_vector_array (stmt_info, gsi, scalar_dest, |
272c6793 | 8279 | vec_array, i); |
9771b263 | 8280 | dr_chain.quick_push (new_temp); |
272c6793 RS |
8281 | } |
8282 | ||
8283 | /* Record the mapping between SSA_NAMEs and statements. */ | |
86a91c0a | 8284 | vect_record_grouped_load_vectors (stmt_info, dr_chain); |
3ba4ff41 RS |
8285 | |
8286 | /* Record that VEC_ARRAY is now dead. */ | |
86a91c0a | 8287 | vect_clobber_variable (stmt_info, gsi, vec_array); |
272c6793 RS |
8288 | } |
8289 | else | |
8290 | { | |
8291 | for (i = 0; i < vec_num; i++) | |
8292 | { | |
7cfb4d93 | 8293 | tree final_mask = NULL_TREE; |
70088b95 | 8294 | if (loop_masks |
7cfb4d93 | 8295 | && memory_access_type != VMAT_INVARIANT) |
70088b95 RS |
8296 | final_mask = vect_get_loop_mask (gsi, loop_masks, |
8297 | vec_num * ncopies, | |
7cfb4d93 RS |
8298 | vectype, vec_num * j + i); |
8299 | if (vec_mask) | |
8300 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
8301 | vec_mask, gsi); | |
8302 | ||
272c6793 RS |
8303 | if (i > 0) |
8304 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
86a91c0a | 8305 | stmt_info, bump); |
272c6793 RS |
8306 | |
8307 | /* 2. Create the vector-load in the loop. */ | |
e1bd7296 | 8308 | gimple *new_stmt = NULL; |
272c6793 RS |
8309 | switch (alignment_support_scheme) |
8310 | { | |
8311 | case dr_aligned: | |
8312 | case dr_unaligned_supported: | |
be1ac4ec | 8313 | { |
644ffefd MJ |
8314 | unsigned int align, misalign; |
8315 | ||
bfaa08b7 RS |
8316 | if (memory_access_type == VMAT_GATHER_SCATTER) |
8317 | { | |
8318 | tree scale = size_int (gs_info.scale); | |
8319 | gcall *call; | |
70088b95 | 8320 | if (loop_masks) |
bfaa08b7 RS |
8321 | call = gimple_build_call_internal |
8322 | (IFN_MASK_GATHER_LOAD, 4, dataref_ptr, | |
8323 | vec_offset, scale, final_mask); | |
8324 | else | |
8325 | call = gimple_build_call_internal | |
8326 | (IFN_GATHER_LOAD, 3, dataref_ptr, | |
8327 | vec_offset, scale); | |
8328 | gimple_call_set_nothrow (call, true); | |
8329 | new_stmt = call; | |
8330 | data_ref = NULL_TREE; | |
8331 | break; | |
8332 | } | |
8333 | ||
89fa689a | 8334 | align = DR_TARGET_ALIGNMENT (dr_info); |
272c6793 RS |
8335 | if (alignment_support_scheme == dr_aligned) |
8336 | { | |
89fa689a | 8337 | gcc_assert (aligned_access_p (first_dr_info)); |
644ffefd | 8338 | misalign = 0; |
272c6793 | 8339 | } |
89fa689a | 8340 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
272c6793 | 8341 | { |
89fa689a RS |
8342 | align = dr_alignment |
8343 | (vect_dr_behavior (first_dr_info)); | |
52639a61 | 8344 | misalign = 0; |
272c6793 RS |
8345 | } |
8346 | else | |
89fa689a | 8347 | misalign = DR_MISALIGNMENT (first_dr_info); |
aed93b23 RB |
8348 | if (dataref_offset == NULL_TREE |
8349 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
8350 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), |
8351 | align, misalign); | |
c3a8f964 | 8352 | |
7cfb4d93 | 8353 | if (final_mask) |
c3a8f964 RS |
8354 | { |
8355 | align = least_bit_hwi (misalign | align); | |
8356 | tree ptr = build_int_cst (ref_type, align); | |
8357 | gcall *call | |
8358 | = gimple_build_call_internal (IFN_MASK_LOAD, 3, | |
8359 | dataref_ptr, ptr, | |
7cfb4d93 | 8360 | final_mask); |
c3a8f964 RS |
8361 | gimple_call_set_nothrow (call, true); |
8362 | new_stmt = call; | |
8363 | data_ref = NULL_TREE; | |
8364 | } | |
8365 | else | |
8366 | { | |
8367 | data_ref | |
8368 | = fold_build2 (MEM_REF, vectype, dataref_ptr, | |
8369 | dataref_offset | |
8370 | ? dataref_offset | |
8371 | : build_int_cst (ref_type, 0)); | |
8372 | if (alignment_support_scheme == dr_aligned) | |
8373 | ; | |
89fa689a | 8374 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
c3a8f964 RS |
8375 | TREE_TYPE (data_ref) |
8376 | = build_aligned_type (TREE_TYPE (data_ref), | |
8377 | align * BITS_PER_UNIT); | |
8378 | else | |
8379 | TREE_TYPE (data_ref) | |
8380 | = build_aligned_type (TREE_TYPE (data_ref), | |
8381 | TYPE_ALIGN (elem_type)); | |
8382 | } | |
272c6793 | 8383 | break; |
be1ac4ec | 8384 | } |
272c6793 | 8385 | case dr_explicit_realign: |
267d3070 | 8386 | { |
272c6793 | 8387 | tree ptr, bump; |
272c6793 | 8388 | |
d88981fc | 8389 | tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); |
272c6793 RS |
8390 | |
8391 | if (compute_in_loop) | |
bffb8014 | 8392 | msq = vect_setup_realignment (first_stmt_info, gsi, |
272c6793 RS |
8393 | &realignment_token, |
8394 | dr_explicit_realign, | |
8395 | dataref_ptr, NULL); | |
8396 | ||
aed93b23 RB |
8397 | if (TREE_CODE (dataref_ptr) == SSA_NAME) |
8398 | ptr = copy_ssa_name (dataref_ptr); | |
8399 | else | |
8400 | ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
89fa689a | 8401 | unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info); |
0d0e4a03 JJ |
8402 | new_stmt = gimple_build_assign |
8403 | (ptr, BIT_AND_EXPR, dataref_ptr, | |
272c6793 RS |
8404 | build_int_cst |
8405 | (TREE_TYPE (dataref_ptr), | |
f702e7d4 | 8406 | -(HOST_WIDE_INT) align)); |
86a91c0a | 8407 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
8408 | data_ref |
8409 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 8410 | build_int_cst (ref_type, 0)); |
89fa689a | 8411 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
272c6793 RS |
8412 | vec_dest = vect_create_destination_var (scalar_dest, |
8413 | vectype); | |
8414 | new_stmt = gimple_build_assign (vec_dest, data_ref); | |
8415 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
8416 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
8417 | gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt)); |
8418 | gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt)); | |
8419 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
272c6793 RS |
8420 | msq = new_temp; |
8421 | ||
d88981fc | 8422 | bump = size_binop (MULT_EXPR, vs, |
7b7b1813 | 8423 | TYPE_SIZE_UNIT (elem_type)); |
d88981fc | 8424 | bump = size_binop (MINUS_EXPR, bump, size_one_node); |
86a91c0a RS |
8425 | ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, |
8426 | stmt_info, bump); | |
0d0e4a03 JJ |
8427 | new_stmt = gimple_build_assign |
8428 | (NULL_TREE, BIT_AND_EXPR, ptr, | |
272c6793 | 8429 | build_int_cst |
f702e7d4 | 8430 | (TREE_TYPE (ptr), -(HOST_WIDE_INT) align)); |
aed93b23 | 8431 | ptr = copy_ssa_name (ptr, new_stmt); |
272c6793 | 8432 | gimple_assign_set_lhs (new_stmt, ptr); |
86a91c0a | 8433 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
8434 | data_ref |
8435 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 8436 | build_int_cst (ref_type, 0)); |
272c6793 | 8437 | break; |
267d3070 | 8438 | } |
272c6793 | 8439 | case dr_explicit_realign_optimized: |
f702e7d4 RS |
8440 | { |
8441 | if (TREE_CODE (dataref_ptr) == SSA_NAME) | |
8442 | new_temp = copy_ssa_name (dataref_ptr); | |
8443 | else | |
8444 | new_temp = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
89fa689a | 8445 | unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info); |
f702e7d4 RS |
8446 | new_stmt = gimple_build_assign |
8447 | (new_temp, BIT_AND_EXPR, dataref_ptr, | |
8448 | build_int_cst (TREE_TYPE (dataref_ptr), | |
8449 | -(HOST_WIDE_INT) align)); | |
86a91c0a | 8450 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
f702e7d4 RS |
8451 | data_ref |
8452 | = build2 (MEM_REF, vectype, new_temp, | |
8453 | build_int_cst (ref_type, 0)); | |
8454 | break; | |
8455 | } | |
272c6793 RS |
8456 | default: |
8457 | gcc_unreachable (); | |
8458 | } | |
ebfd146a | 8459 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
c3a8f964 RS |
8460 | /* DATA_REF is null if we've already built the statement. */ |
8461 | if (data_ref) | |
19986382 | 8462 | { |
89fa689a | 8463 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
19986382 RB |
8464 | new_stmt = gimple_build_assign (vec_dest, data_ref); |
8465 | } | |
ebfd146a | 8466 | new_temp = make_ssa_name (vec_dest, new_stmt); |
c3a8f964 | 8467 | gimple_set_lhs (new_stmt, new_temp); |
e1bd7296 | 8468 | new_stmt_info |
86a91c0a | 8469 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
ebfd146a | 8470 | |
272c6793 RS |
8471 | /* 3. Handle explicit realignment if necessary/supported. |
8472 | Create in loop: | |
8473 | vec_dest = realign_load (msq, lsq, realignment_token) */ | |
8474 | if (alignment_support_scheme == dr_explicit_realign_optimized | |
8475 | || alignment_support_scheme == dr_explicit_realign) | |
ebfd146a | 8476 | { |
272c6793 RS |
8477 | lsq = gimple_assign_lhs (new_stmt); |
8478 | if (!realignment_token) | |
8479 | realignment_token = dataref_ptr; | |
8480 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
0d0e4a03 JJ |
8481 | new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR, |
8482 | msq, lsq, realignment_token); | |
272c6793 RS |
8483 | new_temp = make_ssa_name (vec_dest, new_stmt); |
8484 | gimple_assign_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 8485 | new_stmt_info |
86a91c0a | 8486 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
8487 | |
8488 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
8489 | { | |
8490 | gcc_assert (phi); | |
8491 | if (i == vec_num - 1 && j == ncopies - 1) | |
8492 | add_phi_arg (phi, lsq, | |
8493 | loop_latch_edge (containing_loop), | |
9e227d60 | 8494 | UNKNOWN_LOCATION); |
272c6793 RS |
8495 | msq = lsq; |
8496 | } | |
ebfd146a | 8497 | } |
ebfd146a | 8498 | |
59fd17e3 RB |
8499 | /* 4. Handle invariant-load. */ |
8500 | if (inv_p && !bb_vinfo) | |
8501 | { | |
59fd17e3 | 8502 | gcc_assert (!grouped_load); |
d1417442 JJ |
8503 | /* If we have versioned for aliasing or the loop doesn't |
8504 | have any data dependencies that would preclude this, | |
8505 | then we are sure this is a loop invariant load and | |
8506 | thus we can insert it on the preheader edge. */ | |
8507 | if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) | |
8508 | && !nested_in_vect_loop | |
86a91c0a | 8509 | && hoist_defs_of_uses (stmt_info, loop)) |
a0e35eb0 | 8510 | { |
32e8e429 | 8511 | gassign *stmt = as_a <gassign *> (stmt_info->stmt); |
a0e35eb0 RB |
8512 | if (dump_enabled_p ()) |
8513 | { | |
8514 | dump_printf_loc (MSG_NOTE, vect_location, | |
8515 | "hoisting out of the vectorized " | |
8516 | "loop: "); | |
8517 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
a0e35eb0 | 8518 | } |
b731b390 | 8519 | tree tem = copy_ssa_name (scalar_dest); |
a0e35eb0 RB |
8520 | gsi_insert_on_edge_immediate |
8521 | (loop_preheader_edge (loop), | |
8522 | gimple_build_assign (tem, | |
8523 | unshare_expr | |
8524 | (gimple_assign_rhs1 (stmt)))); | |
86a91c0a RS |
8525 | new_temp = vect_init_vector (stmt_info, tem, |
8526 | vectype, NULL); | |
34cd48e5 | 8527 | new_stmt = SSA_NAME_DEF_STMT (new_temp); |
e1bd7296 | 8528 | new_stmt_info = vinfo->add_stmt (new_stmt); |
a0e35eb0 RB |
8529 | } |
8530 | else | |
8531 | { | |
8532 | gimple_stmt_iterator gsi2 = *gsi; | |
8533 | gsi_next (&gsi2); | |
86a91c0a | 8534 | new_temp = vect_init_vector (stmt_info, scalar_dest, |
a0e35eb0 | 8535 | vectype, &gsi2); |
e1bd7296 | 8536 | new_stmt_info = vinfo->lookup_def (new_temp); |
a0e35eb0 | 8537 | } |
59fd17e3 RB |
8538 | } |
8539 | ||
62da9e14 | 8540 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
272c6793 | 8541 | { |
aec7ae7d JJ |
8542 | tree perm_mask = perm_mask_for_reverse (vectype); |
8543 | new_temp = permute_vec_elements (new_temp, new_temp, | |
86a91c0a | 8544 | perm_mask, stmt_info, gsi); |
e1bd7296 | 8545 | new_stmt_info = vinfo->lookup_def (new_temp); |
ebfd146a | 8546 | } |
267d3070 | 8547 | |
272c6793 | 8548 | /* Collect vector loads and later create their permutation in |
0d0293ac MM |
8549 | vect_transform_grouped_load (). */ |
8550 | if (grouped_load || slp_perm) | |
9771b263 | 8551 | dr_chain.quick_push (new_temp); |
267d3070 | 8552 | |
272c6793 RS |
8553 | /* Store vector loads in the corresponding SLP_NODE. */ |
8554 | if (slp && !slp_perm) | |
e1bd7296 | 8555 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
b267968e RB |
8556 | |
8557 | /* With SLP permutation we load the gaps as well, without | |
8558 | we need to skip the gaps after we manage to fully load | |
2c53b149 | 8559 | all elements. group_gap_adj is DR_GROUP_SIZE here. */ |
b267968e | 8560 | group_elt += nunits; |
d9f21f6a RS |
8561 | if (maybe_ne (group_gap_adj, 0U) |
8562 | && !slp_perm | |
8563 | && known_eq (group_elt, group_size - group_gap_adj)) | |
b267968e | 8564 | { |
d9f21f6a RS |
8565 | poly_wide_int bump_val |
8566 | = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) | |
8567 | * group_gap_adj); | |
8e6cdc90 | 8568 | tree bump = wide_int_to_tree (sizetype, bump_val); |
b267968e | 8569 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8570 | stmt_info, bump); |
b267968e RB |
8571 | group_elt = 0; |
8572 | } | |
272c6793 | 8573 | } |
9b999e8c RB |
8574 | /* Bump the vector pointer to account for a gap or for excess |
8575 | elements loaded for a permuted SLP load. */ | |
d9f21f6a | 8576 | if (maybe_ne (group_gap_adj, 0U) && slp_perm) |
a64b9c26 | 8577 | { |
d9f21f6a RS |
8578 | poly_wide_int bump_val |
8579 | = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) | |
8580 | * group_gap_adj); | |
8e6cdc90 | 8581 | tree bump = wide_int_to_tree (sizetype, bump_val); |
a64b9c26 | 8582 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8583 | stmt_info, bump); |
a64b9c26 | 8584 | } |
ebfd146a IR |
8585 | } |
8586 | ||
8587 | if (slp && !slp_perm) | |
8588 | continue; | |
8589 | ||
8590 | if (slp_perm) | |
8591 | { | |
29afecdf | 8592 | unsigned n_perms; |
01d8bf07 | 8593 | if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, |
29afecdf RB |
8594 | slp_node_instance, false, |
8595 | &n_perms)) | |
ebfd146a | 8596 | { |
9771b263 | 8597 | dr_chain.release (); |
ebfd146a IR |
8598 | return false; |
8599 | } | |
8600 | } | |
8601 | else | |
8602 | { | |
0d0293ac | 8603 | if (grouped_load) |
ebfd146a | 8604 | { |
2de001ee | 8605 | if (memory_access_type != VMAT_LOAD_STORE_LANES) |
86a91c0a RS |
8606 | vect_transform_grouped_load (stmt_info, dr_chain, |
8607 | group_size, gsi); | |
ebfd146a | 8608 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); |
ebfd146a IR |
8609 | } |
8610 | else | |
8611 | { | |
8612 | if (j == 0) | |
e1bd7296 | 8613 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
ebfd146a | 8614 | else |
e1bd7296 RS |
8615 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
8616 | prev_stmt_info = new_stmt_info; | |
ebfd146a IR |
8617 | } |
8618 | } | |
9771b263 | 8619 | dr_chain.release (); |
ebfd146a IR |
8620 | } |
8621 | ||
ebfd146a IR |
8622 | return true; |
8623 | } | |
8624 | ||
8625 | /* Function vect_is_simple_cond. | |
b8698a0f | 8626 | |
ebfd146a IR |
8627 | Input: |
8628 | LOOP - the loop that is being vectorized. | |
8629 | COND - Condition that is checked for simple use. | |
8630 | ||
e9e1d143 RG |
8631 | Output: |
8632 | *COMP_VECTYPE - the vector type for the comparison. | |
4fc5ebf1 | 8633 | *DTS - The def types for the arguments of the comparison |
e9e1d143 | 8634 | |
ebfd146a IR |
8635 | Returns whether a COND can be vectorized. Checks whether |
8636 | condition operands are supportable using vec_is_simple_use. */ | |
8637 | ||
87aab9b2 | 8638 | static bool |
4fc5ebf1 | 8639 | vect_is_simple_cond (tree cond, vec_info *vinfo, |
8da4c8d8 RB |
8640 | tree *comp_vectype, enum vect_def_type *dts, |
8641 | tree vectype) | |
ebfd146a IR |
8642 | { |
8643 | tree lhs, rhs; | |
e9e1d143 | 8644 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
ebfd146a | 8645 | |
a414c77f IE |
8646 | /* Mask case. */ |
8647 | if (TREE_CODE (cond) == SSA_NAME | |
2568d8a1 | 8648 | && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond))) |
a414c77f | 8649 | { |
894dd753 | 8650 | if (!vect_is_simple_use (cond, vinfo, &dts[0], comp_vectype) |
a414c77f IE |
8651 | || !*comp_vectype |
8652 | || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype)) | |
8653 | return false; | |
8654 | return true; | |
8655 | } | |
8656 | ||
ebfd146a IR |
8657 | if (!COMPARISON_CLASS_P (cond)) |
8658 | return false; | |
8659 | ||
8660 | lhs = TREE_OPERAND (cond, 0); | |
8661 | rhs = TREE_OPERAND (cond, 1); | |
8662 | ||
8663 | if (TREE_CODE (lhs) == SSA_NAME) | |
8664 | { | |
894dd753 | 8665 | if (!vect_is_simple_use (lhs, vinfo, &dts[0], &vectype1)) |
ebfd146a IR |
8666 | return false; |
8667 | } | |
4fc5ebf1 JG |
8668 | else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST |
8669 | || TREE_CODE (lhs) == FIXED_CST) | |
8670 | dts[0] = vect_constant_def; | |
8671 | else | |
ebfd146a IR |
8672 | return false; |
8673 | ||
8674 | if (TREE_CODE (rhs) == SSA_NAME) | |
8675 | { | |
894dd753 | 8676 | if (!vect_is_simple_use (rhs, vinfo, &dts[1], &vectype2)) |
ebfd146a IR |
8677 | return false; |
8678 | } | |
4fc5ebf1 JG |
8679 | else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST |
8680 | || TREE_CODE (rhs) == FIXED_CST) | |
8681 | dts[1] = vect_constant_def; | |
8682 | else | |
ebfd146a IR |
8683 | return false; |
8684 | ||
28b33016 | 8685 | if (vectype1 && vectype2 |
928686b1 RS |
8686 | && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1), |
8687 | TYPE_VECTOR_SUBPARTS (vectype2))) | |
28b33016 IE |
8688 | return false; |
8689 | ||
e9e1d143 | 8690 | *comp_vectype = vectype1 ? vectype1 : vectype2; |
8da4c8d8 | 8691 | /* Invariant comparison. */ |
4515e413 | 8692 | if (! *comp_vectype && vectype) |
8da4c8d8 RB |
8693 | { |
8694 | tree scalar_type = TREE_TYPE (lhs); | |
8695 | /* If we can widen the comparison to match vectype do so. */ | |
8696 | if (INTEGRAL_TYPE_P (scalar_type) | |
8697 | && tree_int_cst_lt (TYPE_SIZE (scalar_type), | |
8698 | TYPE_SIZE (TREE_TYPE (vectype)))) | |
8699 | scalar_type = build_nonstandard_integer_type | |
8700 | (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))), | |
8701 | TYPE_UNSIGNED (scalar_type)); | |
8702 | *comp_vectype = get_vectype_for_scalar_type (scalar_type); | |
8703 | } | |
8704 | ||
ebfd146a IR |
8705 | return true; |
8706 | } | |
8707 | ||
8708 | /* vectorizable_condition. | |
8709 | ||
32e8e429 RS |
8710 | Check if STMT_INFO is conditional modify expression that can be vectorized. |
8711 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
b8698a0f | 8712 | stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it |
4bbe8262 IR |
8713 | at GSI. |
8714 | ||
32e8e429 RS |
8715 | When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector |
8716 | variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, | |
8717 | and in else clause if it is 2). | |
ebfd146a | 8718 | |
32e8e429 | 8719 | Return true if STMT_INFO is vectorizable in this way. */ |
ebfd146a | 8720 | |
4bbe8262 | 8721 | bool |
32e8e429 | 8722 | vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
8723 | stmt_vec_info *vec_stmt, tree reduc_def, |
8724 | int reduc_index, slp_tree slp_node, | |
8725 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 8726 | { |
e4057a39 | 8727 | vec_info *vinfo = stmt_info->vinfo; |
ebfd146a IR |
8728 | tree scalar_dest = NULL_TREE; |
8729 | tree vec_dest = NULL_TREE; | |
01216d27 JJ |
8730 | tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE; |
8731 | tree then_clause, else_clause; | |
df11cc78 | 8732 | tree comp_vectype = NULL_TREE; |
ff802fa1 IR |
8733 | tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE; |
8734 | tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE; | |
5958f9e2 | 8735 | tree vec_compare; |
ebfd146a IR |
8736 | tree new_temp; |
8737 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4fc5ebf1 JG |
8738 | enum vect_def_type dts[4] |
8739 | = {vect_unknown_def_type, vect_unknown_def_type, | |
8740 | vect_unknown_def_type, vect_unknown_def_type}; | |
8741 | int ndts = 4; | |
f7e531cf | 8742 | int ncopies; |
01216d27 | 8743 | enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
a855b1b1 | 8744 | stmt_vec_info prev_stmt_info = NULL; |
f7e531cf IR |
8745 | int i, j; |
8746 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
6e1aa848 DN |
8747 | vec<tree> vec_oprnds0 = vNULL; |
8748 | vec<tree> vec_oprnds1 = vNULL; | |
8749 | vec<tree> vec_oprnds2 = vNULL; | |
8750 | vec<tree> vec_oprnds3 = vNULL; | |
74946978 | 8751 | tree vec_cmp_type; |
a414c77f | 8752 | bool masked = false; |
b8698a0f | 8753 | |
f7e531cf IR |
8754 | if (reduc_index && STMT_SLP_TYPE (stmt_info)) |
8755 | return false; | |
8756 | ||
bb6c2b68 RS |
8757 | vect_reduction_type reduction_type |
8758 | = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info); | |
8759 | if (reduction_type == TREE_CODE_REDUCTION) | |
af29617a AH |
8760 | { |
8761 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
8762 | return false; | |
ebfd146a | 8763 | |
af29617a AH |
8764 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
8765 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
8766 | && reduc_def)) | |
8767 | return false; | |
ebfd146a | 8768 | |
af29617a AH |
8769 | /* FORNOW: not yet supported. */ |
8770 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
8771 | { | |
8772 | if (dump_enabled_p ()) | |
8773 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
8774 | "value used after loop.\n"); | |
8775 | return false; | |
8776 | } | |
ebfd146a IR |
8777 | } |
8778 | ||
8779 | /* Is vectorizable conditional operation? */ | |
32e8e429 RS |
8780 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
8781 | if (!stmt) | |
ebfd146a IR |
8782 | return false; |
8783 | ||
8784 | code = gimple_assign_rhs_code (stmt); | |
8785 | ||
8786 | if (code != COND_EXPR) | |
8787 | return false; | |
8788 | ||
465c8c19 | 8789 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
2947d3b2 | 8790 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
465c8c19 | 8791 | |
fce57248 | 8792 | if (slp_node) |
465c8c19 JJ |
8793 | ncopies = 1; |
8794 | else | |
e8f142e2 | 8795 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
465c8c19 JJ |
8796 | |
8797 | gcc_assert (ncopies >= 1); | |
8798 | if (reduc_index && ncopies > 1) | |
8799 | return false; /* FORNOW */ | |
8800 | ||
4e71066d RG |
8801 | cond_expr = gimple_assign_rhs1 (stmt); |
8802 | then_clause = gimple_assign_rhs2 (stmt); | |
8803 | else_clause = gimple_assign_rhs3 (stmt); | |
ebfd146a | 8804 | |
4fc5ebf1 | 8805 | if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, |
4515e413 | 8806 | &comp_vectype, &dts[0], slp_node ? NULL : vectype) |
e9e1d143 | 8807 | || !comp_vectype) |
ebfd146a IR |
8808 | return false; |
8809 | ||
894dd753 | 8810 | if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1)) |
2947d3b2 | 8811 | return false; |
894dd753 | 8812 | if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2)) |
ebfd146a | 8813 | return false; |
2947d3b2 IE |
8814 | |
8815 | if (vectype1 && !useless_type_conversion_p (vectype, vectype1)) | |
8816 | return false; | |
8817 | ||
8818 | if (vectype2 && !useless_type_conversion_p (vectype, vectype2)) | |
ebfd146a IR |
8819 | return false; |
8820 | ||
28b33016 IE |
8821 | masked = !COMPARISON_CLASS_P (cond_expr); |
8822 | vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype); | |
8823 | ||
74946978 MP |
8824 | if (vec_cmp_type == NULL_TREE) |
8825 | return false; | |
784fb9b3 | 8826 | |
01216d27 JJ |
8827 | cond_code = TREE_CODE (cond_expr); |
8828 | if (!masked) | |
8829 | { | |
8830 | cond_expr0 = TREE_OPERAND (cond_expr, 0); | |
8831 | cond_expr1 = TREE_OPERAND (cond_expr, 1); | |
8832 | } | |
8833 | ||
8834 | if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype)) | |
8835 | { | |
8836 | /* Boolean values may have another representation in vectors | |
8837 | and therefore we prefer bit operations over comparison for | |
8838 | them (which also works for scalar masks). We store opcodes | |
8839 | to use in bitop1 and bitop2. Statement is vectorized as | |
8840 | BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2) | |
8841 | depending on bitop1 and bitop2 arity. */ | |
8842 | switch (cond_code) | |
8843 | { | |
8844 | case GT_EXPR: | |
8845 | bitop1 = BIT_NOT_EXPR; | |
8846 | bitop2 = BIT_AND_EXPR; | |
8847 | break; | |
8848 | case GE_EXPR: | |
8849 | bitop1 = BIT_NOT_EXPR; | |
8850 | bitop2 = BIT_IOR_EXPR; | |
8851 | break; | |
8852 | case LT_EXPR: | |
8853 | bitop1 = BIT_NOT_EXPR; | |
8854 | bitop2 = BIT_AND_EXPR; | |
8855 | std::swap (cond_expr0, cond_expr1); | |
8856 | break; | |
8857 | case LE_EXPR: | |
8858 | bitop1 = BIT_NOT_EXPR; | |
8859 | bitop2 = BIT_IOR_EXPR; | |
8860 | std::swap (cond_expr0, cond_expr1); | |
8861 | break; | |
8862 | case NE_EXPR: | |
8863 | bitop1 = BIT_XOR_EXPR; | |
8864 | break; | |
8865 | case EQ_EXPR: | |
8866 | bitop1 = BIT_XOR_EXPR; | |
8867 | bitop2 = BIT_NOT_EXPR; | |
8868 | break; | |
8869 | default: | |
8870 | return false; | |
8871 | } | |
8872 | cond_code = SSA_NAME; | |
8873 | } | |
8874 | ||
b8698a0f | 8875 | if (!vec_stmt) |
ebfd146a | 8876 | { |
01216d27 JJ |
8877 | if (bitop1 != NOP_EXPR) |
8878 | { | |
8879 | machine_mode mode = TYPE_MODE (comp_vectype); | |
8880 | optab optab; | |
8881 | ||
8882 | optab = optab_for_tree_code (bitop1, comp_vectype, optab_default); | |
8883 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8884 | return false; | |
8885 | ||
8886 | if (bitop2 != NOP_EXPR) | |
8887 | { | |
8888 | optab = optab_for_tree_code (bitop2, comp_vectype, | |
8889 | optab_default); | |
8890 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8891 | return false; | |
8892 | } | |
8893 | } | |
4fc5ebf1 JG |
8894 | if (expand_vec_cond_expr_p (vectype, comp_vectype, |
8895 | cond_code)) | |
8896 | { | |
68435eb2 RB |
8897 | STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type; |
8898 | vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node, | |
8899 | cost_vec); | |
4fc5ebf1 JG |
8900 | return true; |
8901 | } | |
8902 | return false; | |
ebfd146a IR |
8903 | } |
8904 | ||
f7e531cf IR |
8905 | /* Transform. */ |
8906 | ||
8907 | if (!slp_node) | |
8908 | { | |
9771b263 DN |
8909 | vec_oprnds0.create (1); |
8910 | vec_oprnds1.create (1); | |
8911 | vec_oprnds2.create (1); | |
8912 | vec_oprnds3.create (1); | |
f7e531cf | 8913 | } |
ebfd146a IR |
8914 | |
8915 | /* Handle def. */ | |
8916 | scalar_dest = gimple_assign_lhs (stmt); | |
bb6c2b68 RS |
8917 | if (reduction_type != EXTRACT_LAST_REDUCTION) |
8918 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
ebfd146a IR |
8919 | |
8920 | /* Handle cond expr. */ | |
a855b1b1 MM |
8921 | for (j = 0; j < ncopies; j++) |
8922 | { | |
e1bd7296 | 8923 | stmt_vec_info new_stmt_info = NULL; |
a855b1b1 MM |
8924 | if (j == 0) |
8925 | { | |
f7e531cf IR |
8926 | if (slp_node) |
8927 | { | |
00f96dc9 TS |
8928 | auto_vec<tree, 4> ops; |
8929 | auto_vec<vec<tree>, 4> vec_defs; | |
9771b263 | 8930 | |
a414c77f | 8931 | if (masked) |
01216d27 | 8932 | ops.safe_push (cond_expr); |
a414c77f IE |
8933 | else |
8934 | { | |
01216d27 JJ |
8935 | ops.safe_push (cond_expr0); |
8936 | ops.safe_push (cond_expr1); | |
a414c77f | 8937 | } |
9771b263 DN |
8938 | ops.safe_push (then_clause); |
8939 | ops.safe_push (else_clause); | |
306b0c92 | 8940 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
37b5ec8f JJ |
8941 | vec_oprnds3 = vec_defs.pop (); |
8942 | vec_oprnds2 = vec_defs.pop (); | |
a414c77f IE |
8943 | if (!masked) |
8944 | vec_oprnds1 = vec_defs.pop (); | |
37b5ec8f | 8945 | vec_oprnds0 = vec_defs.pop (); |
f7e531cf IR |
8946 | } |
8947 | else | |
8948 | { | |
a414c77f IE |
8949 | if (masked) |
8950 | { | |
8951 | vec_cond_lhs | |
86a91c0a | 8952 | = vect_get_vec_def_for_operand (cond_expr, stmt_info, |
a414c77f | 8953 | comp_vectype); |
894dd753 | 8954 | vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]); |
a414c77f IE |
8955 | } |
8956 | else | |
8957 | { | |
01216d27 JJ |
8958 | vec_cond_lhs |
8959 | = vect_get_vec_def_for_operand (cond_expr0, | |
86a91c0a | 8960 | stmt_info, comp_vectype); |
894dd753 | 8961 | vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]); |
01216d27 JJ |
8962 | |
8963 | vec_cond_rhs | |
8964 | = vect_get_vec_def_for_operand (cond_expr1, | |
86a91c0a | 8965 | stmt_info, comp_vectype); |
894dd753 | 8966 | vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]); |
a414c77f | 8967 | } |
f7e531cf IR |
8968 | if (reduc_index == 1) |
8969 | vec_then_clause = reduc_def; | |
8970 | else | |
8971 | { | |
8972 | vec_then_clause = vect_get_vec_def_for_operand (then_clause, | |
86a91c0a | 8973 | stmt_info); |
894dd753 | 8974 | vect_is_simple_use (then_clause, loop_vinfo, &dts[2]); |
f7e531cf IR |
8975 | } |
8976 | if (reduc_index == 2) | |
8977 | vec_else_clause = reduc_def; | |
8978 | else | |
8979 | { | |
8980 | vec_else_clause = vect_get_vec_def_for_operand (else_clause, | |
86a91c0a | 8981 | stmt_info); |
894dd753 | 8982 | vect_is_simple_use (else_clause, loop_vinfo, &dts[3]); |
f7e531cf | 8983 | } |
a855b1b1 MM |
8984 | } |
8985 | } | |
8986 | else | |
8987 | { | |
a414c77f | 8988 | vec_cond_lhs |
e4057a39 | 8989 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds0.pop ()); |
a414c77f IE |
8990 | if (!masked) |
8991 | vec_cond_rhs | |
e4057a39 | 8992 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds1.pop ()); |
a414c77f | 8993 | |
e4057a39 | 8994 | vec_then_clause = vect_get_vec_def_for_stmt_copy (vinfo, |
9771b263 | 8995 | vec_oprnds2.pop ()); |
e4057a39 | 8996 | vec_else_clause = vect_get_vec_def_for_stmt_copy (vinfo, |
9771b263 | 8997 | vec_oprnds3.pop ()); |
f7e531cf IR |
8998 | } |
8999 | ||
9000 | if (!slp_node) | |
9001 | { | |
9771b263 | 9002 | vec_oprnds0.quick_push (vec_cond_lhs); |
a414c77f IE |
9003 | if (!masked) |
9004 | vec_oprnds1.quick_push (vec_cond_rhs); | |
9771b263 DN |
9005 | vec_oprnds2.quick_push (vec_then_clause); |
9006 | vec_oprnds3.quick_push (vec_else_clause); | |
a855b1b1 MM |
9007 | } |
9008 | ||
9dc3f7de | 9009 | /* Arguments are ready. Create the new vector stmt. */ |
9771b263 | 9010 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs) |
f7e531cf | 9011 | { |
9771b263 DN |
9012 | vec_then_clause = vec_oprnds2[i]; |
9013 | vec_else_clause = vec_oprnds3[i]; | |
a855b1b1 | 9014 | |
a414c77f IE |
9015 | if (masked) |
9016 | vec_compare = vec_cond_lhs; | |
9017 | else | |
9018 | { | |
9019 | vec_cond_rhs = vec_oprnds1[i]; | |
01216d27 JJ |
9020 | if (bitop1 == NOP_EXPR) |
9021 | vec_compare = build2 (cond_code, vec_cmp_type, | |
9022 | vec_cond_lhs, vec_cond_rhs); | |
9023 | else | |
9024 | { | |
9025 | new_temp = make_ssa_name (vec_cmp_type); | |
e1bd7296 | 9026 | gassign *new_stmt; |
01216d27 JJ |
9027 | if (bitop1 == BIT_NOT_EXPR) |
9028 | new_stmt = gimple_build_assign (new_temp, bitop1, | |
9029 | vec_cond_rhs); | |
9030 | else | |
9031 | new_stmt | |
9032 | = gimple_build_assign (new_temp, bitop1, vec_cond_lhs, | |
9033 | vec_cond_rhs); | |
86a91c0a | 9034 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
01216d27 JJ |
9035 | if (bitop2 == NOP_EXPR) |
9036 | vec_compare = new_temp; | |
9037 | else if (bitop2 == BIT_NOT_EXPR) | |
9038 | { | |
9039 | /* Instead of doing ~x ? y : z do x ? z : y. */ | |
9040 | vec_compare = new_temp; | |
9041 | std::swap (vec_then_clause, vec_else_clause); | |
9042 | } | |
9043 | else | |
9044 | { | |
9045 | vec_compare = make_ssa_name (vec_cmp_type); | |
9046 | new_stmt | |
9047 | = gimple_build_assign (vec_compare, bitop2, | |
9048 | vec_cond_lhs, new_temp); | |
86a91c0a | 9049 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
01216d27 JJ |
9050 | } |
9051 | } | |
a414c77f | 9052 | } |
bb6c2b68 RS |
9053 | if (reduction_type == EXTRACT_LAST_REDUCTION) |
9054 | { | |
9055 | if (!is_gimple_val (vec_compare)) | |
9056 | { | |
9057 | tree vec_compare_name = make_ssa_name (vec_cmp_type); | |
e1bd7296 RS |
9058 | gassign *new_stmt = gimple_build_assign (vec_compare_name, |
9059 | vec_compare); | |
86a91c0a | 9060 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
bb6c2b68 RS |
9061 | vec_compare = vec_compare_name; |
9062 | } | |
9063 | gcc_assert (reduc_index == 2); | |
e1bd7296 | 9064 | gcall *new_stmt = gimple_build_call_internal |
bb6c2b68 RS |
9065 | (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare, |
9066 | vec_then_clause); | |
9067 | gimple_call_set_lhs (new_stmt, scalar_dest); | |
9068 | SSA_NAME_DEF_STMT (scalar_dest) = new_stmt; | |
86a91c0a RS |
9069 | if (stmt_info->stmt == gsi_stmt (*gsi)) |
9070 | new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt); | |
bb6c2b68 RS |
9071 | else |
9072 | { | |
9073 | /* In this case we're moving the definition to later in the | |
9074 | block. That doesn't matter because the only uses of the | |
9075 | lhs are in phi statements. */ | |
86a91c0a RS |
9076 | gimple_stmt_iterator old_gsi |
9077 | = gsi_for_stmt (stmt_info->stmt); | |
bb6c2b68 | 9078 | gsi_remove (&old_gsi, true); |
e1bd7296 | 9079 | new_stmt_info |
86a91c0a | 9080 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
bb6c2b68 RS |
9081 | } |
9082 | } | |
9083 | else | |
9084 | { | |
9085 | new_temp = make_ssa_name (vec_dest); | |
e1bd7296 RS |
9086 | gassign *new_stmt |
9087 | = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare, | |
9088 | vec_then_clause, vec_else_clause); | |
9089 | new_stmt_info | |
86a91c0a | 9090 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
bb6c2b68 | 9091 | } |
f7e531cf | 9092 | if (slp_node) |
e1bd7296 | 9093 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
f7e531cf IR |
9094 | } |
9095 | ||
9096 | if (slp_node) | |
9097 | continue; | |
9098 | ||
e1bd7296 RS |
9099 | if (j == 0) |
9100 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; | |
9101 | else | |
9102 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; | |
f7e531cf | 9103 | |
e1bd7296 | 9104 | prev_stmt_info = new_stmt_info; |
a855b1b1 | 9105 | } |
b8698a0f | 9106 | |
9771b263 DN |
9107 | vec_oprnds0.release (); |
9108 | vec_oprnds1.release (); | |
9109 | vec_oprnds2.release (); | |
9110 | vec_oprnds3.release (); | |
f7e531cf | 9111 | |
ebfd146a IR |
9112 | return true; |
9113 | } | |
9114 | ||
42fd8198 IE |
9115 | /* vectorizable_comparison. |
9116 | ||
32e8e429 RS |
9117 | Check if STMT_INFO is comparison expression that can be vectorized. |
9118 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
42fd8198 IE |
9119 | comparison, put it in VEC_STMT, and insert it at GSI. |
9120 | ||
32e8e429 | 9121 | Return true if STMT_INFO is vectorizable in this way. */ |
42fd8198 | 9122 | |
fce57248 | 9123 | static bool |
32e8e429 | 9124 | vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 9125 | stmt_vec_info *vec_stmt, tree reduc_def, |
68435eb2 | 9126 | slp_tree slp_node, stmt_vector_for_cost *cost_vec) |
42fd8198 | 9127 | { |
e4057a39 | 9128 | vec_info *vinfo = stmt_info->vinfo; |
42fd8198 | 9129 | tree lhs, rhs1, rhs2; |
42fd8198 IE |
9130 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
9131 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
9132 | tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE; | |
9133 | tree new_temp; | |
9134 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
9135 | enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 9136 | int ndts = 2; |
928686b1 | 9137 | poly_uint64 nunits; |
42fd8198 | 9138 | int ncopies; |
49e76ff1 | 9139 | enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
42fd8198 IE |
9140 | stmt_vec_info prev_stmt_info = NULL; |
9141 | int i, j; | |
9142 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
9143 | vec<tree> vec_oprnds0 = vNULL; | |
9144 | vec<tree> vec_oprnds1 = vNULL; | |
42fd8198 IE |
9145 | tree mask_type; |
9146 | tree mask; | |
9147 | ||
c245362b IE |
9148 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
9149 | return false; | |
9150 | ||
30480bcd | 9151 | if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype)) |
42fd8198 IE |
9152 | return false; |
9153 | ||
9154 | mask_type = vectype; | |
9155 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
9156 | ||
fce57248 | 9157 | if (slp_node) |
42fd8198 IE |
9158 | ncopies = 1; |
9159 | else | |
e8f142e2 | 9160 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
42fd8198 IE |
9161 | |
9162 | gcc_assert (ncopies >= 1); | |
42fd8198 IE |
9163 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
9164 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
9165 | && reduc_def)) | |
9166 | return false; | |
9167 | ||
9168 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
9169 | { | |
9170 | if (dump_enabled_p ()) | |
9171 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
9172 | "value used after loop.\n"); | |
9173 | return false; | |
9174 | } | |
9175 | ||
32e8e429 RS |
9176 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
9177 | if (!stmt) | |
42fd8198 IE |
9178 | return false; |
9179 | ||
9180 | code = gimple_assign_rhs_code (stmt); | |
9181 | ||
9182 | if (TREE_CODE_CLASS (code) != tcc_comparison) | |
9183 | return false; | |
9184 | ||
9185 | rhs1 = gimple_assign_rhs1 (stmt); | |
9186 | rhs2 = gimple_assign_rhs2 (stmt); | |
9187 | ||
894dd753 | 9188 | if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1)) |
42fd8198 IE |
9189 | return false; |
9190 | ||
894dd753 | 9191 | if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2)) |
42fd8198 IE |
9192 | return false; |
9193 | ||
9194 | if (vectype1 && vectype2 | |
928686b1 RS |
9195 | && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1), |
9196 | TYPE_VECTOR_SUBPARTS (vectype2))) | |
42fd8198 IE |
9197 | return false; |
9198 | ||
9199 | vectype = vectype1 ? vectype1 : vectype2; | |
9200 | ||
9201 | /* Invariant comparison. */ | |
9202 | if (!vectype) | |
9203 | { | |
69a9a66f | 9204 | vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1)); |
928686b1 | 9205 | if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits)) |
42fd8198 IE |
9206 | return false; |
9207 | } | |
928686b1 | 9208 | else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype))) |
42fd8198 IE |
9209 | return false; |
9210 | ||
49e76ff1 IE |
9211 | /* Can't compare mask and non-mask types. */ |
9212 | if (vectype1 && vectype2 | |
9213 | && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2))) | |
9214 | return false; | |
9215 | ||
9216 | /* Boolean values may have another representation in vectors | |
9217 | and therefore we prefer bit operations over comparison for | |
9218 | them (which also works for scalar masks). We store opcodes | |
9219 | to use in bitop1 and bitop2. Statement is vectorized as | |
9220 | BITOP2 (rhs1 BITOP1 rhs2) or | |
9221 | rhs1 BITOP2 (BITOP1 rhs2) | |
9222 | depending on bitop1 and bitop2 arity. */ | |
9223 | if (VECTOR_BOOLEAN_TYPE_P (vectype)) | |
9224 | { | |
9225 | if (code == GT_EXPR) | |
9226 | { | |
9227 | bitop1 = BIT_NOT_EXPR; | |
9228 | bitop2 = BIT_AND_EXPR; | |
9229 | } | |
9230 | else if (code == GE_EXPR) | |
9231 | { | |
9232 | bitop1 = BIT_NOT_EXPR; | |
9233 | bitop2 = BIT_IOR_EXPR; | |
9234 | } | |
9235 | else if (code == LT_EXPR) | |
9236 | { | |
9237 | bitop1 = BIT_NOT_EXPR; | |
9238 | bitop2 = BIT_AND_EXPR; | |
9239 | std::swap (rhs1, rhs2); | |
264d951a | 9240 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
9241 | } |
9242 | else if (code == LE_EXPR) | |
9243 | { | |
9244 | bitop1 = BIT_NOT_EXPR; | |
9245 | bitop2 = BIT_IOR_EXPR; | |
9246 | std::swap (rhs1, rhs2); | |
264d951a | 9247 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
9248 | } |
9249 | else | |
9250 | { | |
9251 | bitop1 = BIT_XOR_EXPR; | |
9252 | if (code == EQ_EXPR) | |
9253 | bitop2 = BIT_NOT_EXPR; | |
9254 | } | |
9255 | } | |
9256 | ||
42fd8198 IE |
9257 | if (!vec_stmt) |
9258 | { | |
49e76ff1 | 9259 | if (bitop1 == NOP_EXPR) |
68435eb2 RB |
9260 | { |
9261 | if (!expand_vec_cmp_expr_p (vectype, mask_type, code)) | |
9262 | return false; | |
9263 | } | |
49e76ff1 IE |
9264 | else |
9265 | { | |
9266 | machine_mode mode = TYPE_MODE (vectype); | |
9267 | optab optab; | |
9268 | ||
9269 | optab = optab_for_tree_code (bitop1, vectype, optab_default); | |
9270 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
9271 | return false; | |
9272 | ||
9273 | if (bitop2 != NOP_EXPR) | |
9274 | { | |
9275 | optab = optab_for_tree_code (bitop2, vectype, optab_default); | |
9276 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
9277 | return false; | |
9278 | } | |
49e76ff1 | 9279 | } |
68435eb2 RB |
9280 | |
9281 | STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type; | |
9282 | vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)), | |
9283 | dts, ndts, slp_node, cost_vec); | |
9284 | return true; | |
42fd8198 IE |
9285 | } |
9286 | ||
9287 | /* Transform. */ | |
9288 | if (!slp_node) | |
9289 | { | |
9290 | vec_oprnds0.create (1); | |
9291 | vec_oprnds1.create (1); | |
9292 | } | |
9293 | ||
9294 | /* Handle def. */ | |
9295 | lhs = gimple_assign_lhs (stmt); | |
9296 | mask = vect_create_destination_var (lhs, mask_type); | |
9297 | ||
9298 | /* Handle cmp expr. */ | |
9299 | for (j = 0; j < ncopies; j++) | |
9300 | { | |
e1bd7296 | 9301 | stmt_vec_info new_stmt_info = NULL; |
42fd8198 IE |
9302 | if (j == 0) |
9303 | { | |
9304 | if (slp_node) | |
9305 | { | |
9306 | auto_vec<tree, 2> ops; | |
9307 | auto_vec<vec<tree>, 2> vec_defs; | |
9308 | ||
9309 | ops.safe_push (rhs1); | |
9310 | ops.safe_push (rhs2); | |
306b0c92 | 9311 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
42fd8198 IE |
9312 | vec_oprnds1 = vec_defs.pop (); |
9313 | vec_oprnds0 = vec_defs.pop (); | |
9314 | } | |
9315 | else | |
9316 | { | |
86a91c0a RS |
9317 | vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info, |
9318 | vectype); | |
9319 | vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info, | |
9320 | vectype); | |
42fd8198 IE |
9321 | } |
9322 | } | |
9323 | else | |
9324 | { | |
e4057a39 | 9325 | vec_rhs1 = vect_get_vec_def_for_stmt_copy (vinfo, |
42fd8198 | 9326 | vec_oprnds0.pop ()); |
e4057a39 | 9327 | vec_rhs2 = vect_get_vec_def_for_stmt_copy (vinfo, |
42fd8198 IE |
9328 | vec_oprnds1.pop ()); |
9329 | } | |
9330 | ||
9331 | if (!slp_node) | |
9332 | { | |
9333 | vec_oprnds0.quick_push (vec_rhs1); | |
9334 | vec_oprnds1.quick_push (vec_rhs2); | |
9335 | } | |
9336 | ||
9337 | /* Arguments are ready. Create the new vector stmt. */ | |
9338 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1) | |
9339 | { | |
9340 | vec_rhs2 = vec_oprnds1[i]; | |
9341 | ||
9342 | new_temp = make_ssa_name (mask); | |
49e76ff1 IE |
9343 | if (bitop1 == NOP_EXPR) |
9344 | { | |
e1bd7296 RS |
9345 | gassign *new_stmt = gimple_build_assign (new_temp, code, |
9346 | vec_rhs1, vec_rhs2); | |
9347 | new_stmt_info | |
86a91c0a | 9348 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
49e76ff1 IE |
9349 | } |
9350 | else | |
9351 | { | |
e1bd7296 | 9352 | gassign *new_stmt; |
49e76ff1 IE |
9353 | if (bitop1 == BIT_NOT_EXPR) |
9354 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2); | |
9355 | else | |
9356 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1, | |
9357 | vec_rhs2); | |
e1bd7296 | 9358 | new_stmt_info |
86a91c0a | 9359 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
49e76ff1 IE |
9360 | if (bitop2 != NOP_EXPR) |
9361 | { | |
9362 | tree res = make_ssa_name (mask); | |
9363 | if (bitop2 == BIT_NOT_EXPR) | |
9364 | new_stmt = gimple_build_assign (res, bitop2, new_temp); | |
9365 | else | |
9366 | new_stmt = gimple_build_assign (res, bitop2, vec_rhs1, | |
9367 | new_temp); | |
e1bd7296 | 9368 | new_stmt_info |
86a91c0a | 9369 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
49e76ff1 IE |
9370 | } |
9371 | } | |
42fd8198 | 9372 | if (slp_node) |
e1bd7296 | 9373 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
42fd8198 IE |
9374 | } |
9375 | ||
9376 | if (slp_node) | |
9377 | continue; | |
9378 | ||
9379 | if (j == 0) | |
e1bd7296 | 9380 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
42fd8198 | 9381 | else |
e1bd7296 | 9382 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
42fd8198 | 9383 | |
e1bd7296 | 9384 | prev_stmt_info = new_stmt_info; |
42fd8198 IE |
9385 | } |
9386 | ||
9387 | vec_oprnds0.release (); | |
9388 | vec_oprnds1.release (); | |
9389 | ||
9390 | return true; | |
9391 | } | |
ebfd146a | 9392 | |
68a0f2ff RS |
9393 | /* If SLP_NODE is nonnull, return true if vectorizable_live_operation |
9394 | can handle all live statements in the node. Otherwise return true | |
82570274 | 9395 | if STMT_INFO is not live or if vectorizable_live_operation can handle it. |
68a0f2ff RS |
9396 | GSI and VEC_STMT are as for vectorizable_live_operation. */ |
9397 | ||
9398 | static bool | |
82570274 | 9399 | can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 9400 | slp_tree slp_node, stmt_vec_info *vec_stmt, |
68435eb2 | 9401 | stmt_vector_for_cost *cost_vec) |
68a0f2ff RS |
9402 | { |
9403 | if (slp_node) | |
9404 | { | |
b9787581 | 9405 | stmt_vec_info slp_stmt_info; |
68a0f2ff | 9406 | unsigned int i; |
b9787581 | 9407 | FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info) |
68a0f2ff | 9408 | { |
68a0f2ff | 9409 | if (STMT_VINFO_LIVE_P (slp_stmt_info) |
b9787581 | 9410 | && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i, |
68435eb2 | 9411 | vec_stmt, cost_vec)) |
68a0f2ff RS |
9412 | return false; |
9413 | } | |
9414 | } | |
82570274 RS |
9415 | else if (STMT_VINFO_LIVE_P (stmt_info) |
9416 | && !vectorizable_live_operation (stmt_info, gsi, slp_node, -1, | |
9417 | vec_stmt, cost_vec)) | |
68a0f2ff RS |
9418 | return false; |
9419 | ||
9420 | return true; | |
9421 | } | |
9422 | ||
8644a673 | 9423 | /* Make sure the statement is vectorizable. */ |
ebfd146a IR |
9424 | |
9425 | bool | |
32e8e429 RS |
9426 | vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, |
9427 | slp_tree node, slp_instance node_instance, | |
9428 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 9429 | { |
6585ff8f | 9430 | vec_info *vinfo = stmt_info->vinfo; |
a70d6342 | 9431 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
b8698a0f | 9432 | enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); |
ebfd146a | 9433 | bool ok; |
363477c0 | 9434 | gimple_seq pattern_def_seq; |
ebfd146a | 9435 | |
73fbfcad | 9436 | if (dump_enabled_p ()) |
ebfd146a | 9437 | { |
78c60e3d | 9438 | dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); |
86a91c0a | 9439 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
8644a673 | 9440 | } |
ebfd146a | 9441 | |
86a91c0a | 9442 | if (gimple_has_volatile_ops (stmt_info->stmt)) |
b8698a0f | 9443 | { |
73fbfcad | 9444 | if (dump_enabled_p ()) |
78c60e3d | 9445 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9446 | "not vectorized: stmt has volatile operands\n"); |
1825a1f3 IR |
9447 | |
9448 | return false; | |
9449 | } | |
b8698a0f | 9450 | |
d54a098e RS |
9451 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
9452 | && node == NULL | |
9453 | && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info))) | |
9454 | { | |
9455 | gimple_stmt_iterator si; | |
9456 | ||
9457 | for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si)) | |
9458 | { | |
6585ff8f RS |
9459 | stmt_vec_info pattern_def_stmt_info |
9460 | = vinfo->lookup_stmt (gsi_stmt (si)); | |
9461 | if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) | |
9462 | || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) | |
d54a098e RS |
9463 | { |
9464 | /* Analyze def stmt of STMT if it's a pattern stmt. */ | |
9465 | if (dump_enabled_p ()) | |
9466 | { | |
9467 | dump_printf_loc (MSG_NOTE, vect_location, | |
9468 | "==> examining pattern def statement: "); | |
86a91c0a RS |
9469 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, |
9470 | pattern_def_stmt_info->stmt, 0); | |
d54a098e RS |
9471 | } |
9472 | ||
86a91c0a | 9473 | if (!vect_analyze_stmt (pattern_def_stmt_info, |
d54a098e RS |
9474 | need_to_vectorize, node, node_instance, |
9475 | cost_vec)) | |
9476 | return false; | |
9477 | } | |
9478 | } | |
9479 | } | |
9480 | ||
b8698a0f | 9481 | /* Skip stmts that do not need to be vectorized. In loops this is expected |
8644a673 IR |
9482 | to include: |
9483 | - the COND_EXPR which is the loop exit condition | |
9484 | - any LABEL_EXPRs in the loop | |
b8698a0f | 9485 | - computations that are used only for array indexing or loop control. |
8644a673 | 9486 | In basic blocks we only analyze statements that are a part of some SLP |
83197f37 | 9487 | instance, therefore, all the statements are relevant. |
ebfd146a | 9488 | |
d092494c | 9489 | Pattern statement needs to be analyzed instead of the original statement |
83197f37 | 9490 | if the original statement is not relevant. Otherwise, we analyze both |
079c527f JJ |
9491 | statements. In basic blocks we are called from some SLP instance |
9492 | traversal, don't analyze pattern stmts instead, the pattern stmts | |
9493 | already will be part of SLP instance. */ | |
83197f37 | 9494 | |
10681ce8 | 9495 | stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info); |
b8698a0f | 9496 | if (!STMT_VINFO_RELEVANT_P (stmt_info) |
8644a673 | 9497 | && !STMT_VINFO_LIVE_P (stmt_info)) |
ebfd146a | 9498 | { |
9d5e7640 | 9499 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
10681ce8 RS |
9500 | && pattern_stmt_info |
9501 | && (STMT_VINFO_RELEVANT_P (pattern_stmt_info) | |
9502 | || STMT_VINFO_LIVE_P (pattern_stmt_info))) | |
9d5e7640 | 9503 | { |
83197f37 | 9504 | /* Analyze PATTERN_STMT instead of the original stmt. */ |
10681ce8 | 9505 | stmt_info = pattern_stmt_info; |
73fbfcad | 9506 | if (dump_enabled_p ()) |
9d5e7640 | 9507 | { |
78c60e3d SS |
9508 | dump_printf_loc (MSG_NOTE, vect_location, |
9509 | "==> examining pattern statement: "); | |
86a91c0a | 9510 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
9d5e7640 IR |
9511 | } |
9512 | } | |
9513 | else | |
9514 | { | |
73fbfcad | 9515 | if (dump_enabled_p ()) |
e645e942 | 9516 | dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n"); |
ebfd146a | 9517 | |
9d5e7640 IR |
9518 | return true; |
9519 | } | |
8644a673 | 9520 | } |
83197f37 | 9521 | else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
079c527f | 9522 | && node == NULL |
10681ce8 RS |
9523 | && pattern_stmt_info |
9524 | && (STMT_VINFO_RELEVANT_P (pattern_stmt_info) | |
9525 | || STMT_VINFO_LIVE_P (pattern_stmt_info))) | |
83197f37 IR |
9526 | { |
9527 | /* Analyze PATTERN_STMT too. */ | |
73fbfcad | 9528 | if (dump_enabled_p ()) |
83197f37 | 9529 | { |
78c60e3d SS |
9530 | dump_printf_loc (MSG_NOTE, vect_location, |
9531 | "==> examining pattern statement: "); | |
86a91c0a | 9532 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0); |
83197f37 IR |
9533 | } |
9534 | ||
10681ce8 | 9535 | if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node, |
68435eb2 | 9536 | node_instance, cost_vec)) |
83197f37 IR |
9537 | return false; |
9538 | } | |
ebfd146a | 9539 | |
8644a673 IR |
9540 | switch (STMT_VINFO_DEF_TYPE (stmt_info)) |
9541 | { | |
9542 | case vect_internal_def: | |
9543 | break; | |
ebfd146a | 9544 | |
8644a673 | 9545 | case vect_reduction_def: |
7c5222ff | 9546 | case vect_nested_cycle: |
14a61437 RB |
9547 | gcc_assert (!bb_vinfo |
9548 | && (relevance == vect_used_in_outer | |
9549 | || relevance == vect_used_in_outer_by_reduction | |
9550 | || relevance == vect_used_by_reduction | |
b28ead45 AH |
9551 | || relevance == vect_unused_in_scope |
9552 | || relevance == vect_used_only_live)); | |
8644a673 IR |
9553 | break; |
9554 | ||
9555 | case vect_induction_def: | |
e7baeb39 RB |
9556 | gcc_assert (!bb_vinfo); |
9557 | break; | |
9558 | ||
8644a673 IR |
9559 | case vect_constant_def: |
9560 | case vect_external_def: | |
9561 | case vect_unknown_def_type: | |
9562 | default: | |
9563 | gcc_unreachable (); | |
9564 | } | |
ebfd146a | 9565 | |
8644a673 | 9566 | if (STMT_VINFO_RELEVANT_P (stmt_info)) |
ebfd146a | 9567 | { |
86a91c0a RS |
9568 | tree type = gimple_expr_type (stmt_info->stmt); |
9569 | gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type))); | |
9570 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); | |
0136f8f0 | 9571 | gcc_assert (STMT_VINFO_VECTYPE (stmt_info) |
beb456c3 | 9572 | || (call && gimple_call_lhs (call) == NULL_TREE)); |
8644a673 | 9573 | *need_to_vectorize = true; |
ebfd146a IR |
9574 | } |
9575 | ||
b1af7da6 RB |
9576 | if (PURE_SLP_STMT (stmt_info) && !node) |
9577 | { | |
9578 | dump_printf_loc (MSG_NOTE, vect_location, | |
9579 | "handled only by SLP analysis\n"); | |
9580 | return true; | |
9581 | } | |
9582 | ||
9583 | ok = true; | |
9584 | if (!bb_vinfo | |
9585 | && (STMT_VINFO_RELEVANT_P (stmt_info) | |
9586 | || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)) | |
86a91c0a RS |
9587 | ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, cost_vec) |
9588 | || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec) | |
9589 | || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec) | |
9590 | || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec) | |
9591 | || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec) | |
9592 | || vectorizable_load (stmt_info, NULL, NULL, node, node_instance, | |
9593 | cost_vec) | |
9594 | || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec) | |
9595 | || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec) | |
9596 | || vectorizable_reduction (stmt_info, NULL, NULL, node, | |
9597 | node_instance, cost_vec) | |
9598 | || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec) | |
9599 | || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node, | |
68435eb2 | 9600 | cost_vec) |
86a91c0a RS |
9601 | || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node, |
9602 | cost_vec)); | |
b1af7da6 RB |
9603 | else |
9604 | { | |
9605 | if (bb_vinfo) | |
86a91c0a RS |
9606 | ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, |
9607 | cost_vec) | |
9608 | || vectorizable_conversion (stmt_info, NULL, NULL, node, | |
9609 | cost_vec) | |
9610 | || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec) | |
9611 | || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec) | |
9612 | || vectorizable_assignment (stmt_info, NULL, NULL, node, | |
9613 | cost_vec) | |
9614 | || vectorizable_load (stmt_info, NULL, NULL, node, node_instance, | |
68435eb2 | 9615 | cost_vec) |
86a91c0a RS |
9616 | || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec) |
9617 | || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec) | |
9618 | || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node, | |
68435eb2 | 9619 | cost_vec) |
86a91c0a | 9620 | || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node, |
68435eb2 | 9621 | cost_vec)); |
b1af7da6 | 9622 | } |
8644a673 IR |
9623 | |
9624 | if (!ok) | |
ebfd146a | 9625 | { |
73fbfcad | 9626 | if (dump_enabled_p ()) |
8644a673 | 9627 | { |
78c60e3d SS |
9628 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
9629 | "not vectorized: relevant stmt not "); | |
9630 | dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); | |
86a91c0a RS |
9631 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
9632 | stmt_info->stmt, 0); | |
8644a673 | 9633 | } |
b8698a0f | 9634 | |
ebfd146a IR |
9635 | return false; |
9636 | } | |
9637 | ||
8644a673 IR |
9638 | /* Stmts that are (also) "live" (i.e. - that are used out of the loop) |
9639 | need extra handling, except for vectorizable reductions. */ | |
68435eb2 RB |
9640 | if (!bb_vinfo |
9641 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type | |
86a91c0a | 9642 | && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec)) |
ebfd146a | 9643 | { |
73fbfcad | 9644 | if (dump_enabled_p ()) |
8644a673 | 9645 | { |
78c60e3d | 9646 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
68a0f2ff | 9647 | "not vectorized: live stmt not supported: "); |
86a91c0a RS |
9648 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
9649 | stmt_info->stmt, 0); | |
8644a673 | 9650 | } |
b8698a0f | 9651 | |
8644a673 | 9652 | return false; |
ebfd146a IR |
9653 | } |
9654 | ||
ebfd146a IR |
9655 | return true; |
9656 | } | |
9657 | ||
9658 | ||
9659 | /* Function vect_transform_stmt. | |
9660 | ||
32e8e429 | 9661 | Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */ |
ebfd146a IR |
9662 | |
9663 | bool | |
32e8e429 | 9664 | vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
0d0293ac | 9665 | bool *grouped_store, slp_tree slp_node, |
ebfd146a IR |
9666 | slp_instance slp_node_instance) |
9667 | { | |
6585ff8f | 9668 | vec_info *vinfo = stmt_info->vinfo; |
ebfd146a | 9669 | bool is_store = false; |
1eede195 | 9670 | stmt_vec_info vec_stmt = NULL; |
ebfd146a | 9671 | bool done; |
ebfd146a | 9672 | |
fce57248 | 9673 | gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info)); |
1eede195 | 9674 | stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info); |
225ce44b | 9675 | |
e57d9a82 RB |
9676 | bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info) |
9677 | && nested_in_vect_loop_p | |
9678 | (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)), | |
86a91c0a | 9679 | stmt_info)); |
e57d9a82 | 9680 | |
32e8e429 | 9681 | gimple *stmt = stmt_info->stmt; |
ebfd146a IR |
9682 | switch (STMT_VINFO_TYPE (stmt_info)) |
9683 | { | |
9684 | case type_demotion_vec_info_type: | |
ebfd146a | 9685 | case type_promotion_vec_info_type: |
ebfd146a | 9686 | case type_conversion_vec_info_type: |
86a91c0a RS |
9687 | done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node, |
9688 | NULL); | |
ebfd146a IR |
9689 | gcc_assert (done); |
9690 | break; | |
9691 | ||
9692 | case induc_vec_info_type: | |
86a91c0a RS |
9693 | done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node, |
9694 | NULL); | |
ebfd146a IR |
9695 | gcc_assert (done); |
9696 | break; | |
9697 | ||
9dc3f7de | 9698 | case shift_vec_info_type: |
86a91c0a | 9699 | done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL); |
9dc3f7de IR |
9700 | gcc_assert (done); |
9701 | break; | |
9702 | ||
ebfd146a | 9703 | case op_vec_info_type: |
86a91c0a RS |
9704 | done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node, |
9705 | NULL); | |
ebfd146a IR |
9706 | gcc_assert (done); |
9707 | break; | |
9708 | ||
9709 | case assignment_vec_info_type: | |
86a91c0a RS |
9710 | done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node, |
9711 | NULL); | |
ebfd146a IR |
9712 | gcc_assert (done); |
9713 | break; | |
9714 | ||
9715 | case load_vec_info_type: | |
86a91c0a | 9716 | done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node, |
68435eb2 | 9717 | slp_node_instance, NULL); |
ebfd146a IR |
9718 | gcc_assert (done); |
9719 | break; | |
9720 | ||
9721 | case store_vec_info_type: | |
86a91c0a | 9722 | done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL); |
ebfd146a | 9723 | gcc_assert (done); |
0d0293ac | 9724 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node) |
ebfd146a IR |
9725 | { |
9726 | /* In case of interleaving, the whole chain is vectorized when the | |
ff802fa1 | 9727 | last store in the chain is reached. Store stmts before the last |
ebfd146a IR |
9728 | one are skipped, and there vec_stmt_info shouldn't be freed |
9729 | meanwhile. */ | |
0d0293ac | 9730 | *grouped_store = true; |
bffb8014 | 9731 | stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
2c53b149 | 9732 | if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info)) |
ebfd146a | 9733 | is_store = true; |
f307441a | 9734 | } |
ebfd146a IR |
9735 | else |
9736 | is_store = true; | |
9737 | break; | |
9738 | ||
9739 | case condition_vec_info_type: | |
86a91c0a RS |
9740 | done = vectorizable_condition (stmt_info, gsi, &vec_stmt, NULL, 0, |
9741 | slp_node, NULL); | |
ebfd146a IR |
9742 | gcc_assert (done); |
9743 | break; | |
9744 | ||
42fd8198 | 9745 | case comparison_vec_info_type: |
86a91c0a RS |
9746 | done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, NULL, |
9747 | slp_node, NULL); | |
42fd8198 IE |
9748 | gcc_assert (done); |
9749 | break; | |
9750 | ||
ebfd146a | 9751 | case call_vec_info_type: |
86a91c0a | 9752 | done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL); |
039d9ea1 | 9753 | stmt = gsi_stmt (*gsi); |
ebfd146a IR |
9754 | break; |
9755 | ||
0136f8f0 | 9756 | case call_simd_clone_vec_info_type: |
86a91c0a RS |
9757 | done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt, |
9758 | slp_node, NULL); | |
0136f8f0 AH |
9759 | stmt = gsi_stmt (*gsi); |
9760 | break; | |
9761 | ||
ebfd146a | 9762 | case reduc_vec_info_type: |
86a91c0a | 9763 | done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node, |
68435eb2 | 9764 | slp_node_instance, NULL); |
ebfd146a IR |
9765 | gcc_assert (done); |
9766 | break; | |
9767 | ||
9768 | default: | |
9769 | if (!STMT_VINFO_LIVE_P (stmt_info)) | |
9770 | { | |
73fbfcad | 9771 | if (dump_enabled_p ()) |
78c60e3d | 9772 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9773 | "stmt not supported.\n"); |
ebfd146a IR |
9774 | gcc_unreachable (); |
9775 | } | |
9776 | } | |
9777 | ||
225ce44b RB |
9778 | /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT. |
9779 | This would break hybrid SLP vectorization. */ | |
9780 | if (slp_node) | |
d90f8440 | 9781 | gcc_assert (!vec_stmt |
1eede195 | 9782 | && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info); |
225ce44b | 9783 | |
ebfd146a IR |
9784 | /* Handle inner-loop stmts whose DEF is used in the loop-nest that |
9785 | is being vectorized, but outside the immediately enclosing loop. */ | |
9786 | if (vec_stmt | |
e57d9a82 | 9787 | && nested_p |
ebfd146a IR |
9788 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type |
9789 | && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer | |
b8698a0f | 9790 | || STMT_VINFO_RELEVANT (stmt_info) == |
a70d6342 | 9791 | vect_used_in_outer_by_reduction)) |
ebfd146a | 9792 | { |
a70d6342 IR |
9793 | struct loop *innerloop = LOOP_VINFO_LOOP ( |
9794 | STMT_VINFO_LOOP_VINFO (stmt_info))->inner; | |
ebfd146a IR |
9795 | imm_use_iterator imm_iter; |
9796 | use_operand_p use_p; | |
9797 | tree scalar_dest; | |
ebfd146a | 9798 | |
73fbfcad | 9799 | if (dump_enabled_p ()) |
78c60e3d | 9800 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 9801 | "Record the vdef for outer-loop vectorization.\n"); |
ebfd146a IR |
9802 | |
9803 | /* Find the relevant loop-exit phi-node, and reord the vec_stmt there | |
9804 | (to be used when vectorizing outer-loop stmts that use the DEF of | |
9805 | STMT). */ | |
9806 | if (gimple_code (stmt) == GIMPLE_PHI) | |
9807 | scalar_dest = PHI_RESULT (stmt); | |
9808 | else | |
9809 | scalar_dest = gimple_assign_lhs (stmt); | |
9810 | ||
9811 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) | |
6585ff8f RS |
9812 | if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p)))) |
9813 | { | |
9814 | stmt_vec_info exit_phi_info | |
9815 | = vinfo->lookup_stmt (USE_STMT (use_p)); | |
9816 | STMT_VINFO_VEC_STMT (exit_phi_info) = vec_stmt; | |
9817 | } | |
ebfd146a IR |
9818 | } |
9819 | ||
9820 | /* Handle stmts whose DEF is used outside the loop-nest that is | |
9821 | being vectorized. */ | |
68a0f2ff | 9822 | if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
ebfd146a | 9823 | { |
86a91c0a RS |
9824 | done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt, |
9825 | NULL); | |
ebfd146a IR |
9826 | gcc_assert (done); |
9827 | } | |
9828 | ||
9829 | if (vec_stmt) | |
83197f37 | 9830 | STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt; |
ebfd146a | 9831 | |
b8698a0f | 9832 | return is_store; |
ebfd146a IR |
9833 | } |
9834 | ||
9835 | ||
b8698a0f | 9836 | /* Remove a group of stores (for SLP or interleaving), free their |
ebfd146a IR |
9837 | stmt_vec_info. */ |
9838 | ||
9839 | void | |
32e8e429 | 9840 | vect_remove_stores (stmt_vec_info first_stmt_info) |
ebfd146a | 9841 | { |
b5b56c2a | 9842 | vec_info *vinfo = first_stmt_info->vinfo; |
32e8e429 | 9843 | stmt_vec_info next_stmt_info = first_stmt_info; |
ebfd146a | 9844 | |
a1824cfd | 9845 | while (next_stmt_info) |
ebfd146a | 9846 | { |
a1824cfd RS |
9847 | stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
9848 | if (is_pattern_stmt_p (next_stmt_info)) | |
9849 | next_stmt_info = STMT_VINFO_RELATED_STMT (next_stmt_info); | |
ebfd146a | 9850 | /* Free the attached stmt_vec_info and remove the stmt. */ |
b5b56c2a | 9851 | vinfo->remove_stmt (next_stmt_info); |
a1824cfd | 9852 | next_stmt_info = tmp; |
ebfd146a IR |
9853 | } |
9854 | } | |
9855 | ||
bb67d9c7 | 9856 | /* Function get_vectype_for_scalar_type_and_size. |
ebfd146a | 9857 | |
bb67d9c7 | 9858 | Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported |
ebfd146a IR |
9859 | by the target. */ |
9860 | ||
c803b2a9 | 9861 | tree |
86e36728 | 9862 | get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size) |
ebfd146a | 9863 | { |
c7d97b28 | 9864 | tree orig_scalar_type = scalar_type; |
3bd8f481 | 9865 | scalar_mode inner_mode; |
ef4bddc2 | 9866 | machine_mode simd_mode; |
86e36728 | 9867 | poly_uint64 nunits; |
ebfd146a IR |
9868 | tree vectype; |
9869 | ||
3bd8f481 RS |
9870 | if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode) |
9871 | && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode)) | |
ebfd146a IR |
9872 | return NULL_TREE; |
9873 | ||
3bd8f481 | 9874 | unsigned int nbytes = GET_MODE_SIZE (inner_mode); |
48f2e373 | 9875 | |
7b7b1813 RG |
9876 | /* For vector types of elements whose mode precision doesn't |
9877 | match their types precision we use a element type of mode | |
9878 | precision. The vectorization routines will have to make sure | |
48f2e373 RB |
9879 | they support the proper result truncation/extension. |
9880 | We also make sure to build vector types with INTEGER_TYPE | |
9881 | component type only. */ | |
6d7971b8 | 9882 | if (INTEGRAL_TYPE_P (scalar_type) |
48f2e373 RB |
9883 | && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type) |
9884 | || TREE_CODE (scalar_type) != INTEGER_TYPE)) | |
7b7b1813 RG |
9885 | scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode), |
9886 | TYPE_UNSIGNED (scalar_type)); | |
6d7971b8 | 9887 | |
ccbf5bb4 RG |
9888 | /* We shouldn't end up building VECTOR_TYPEs of non-scalar components. |
9889 | When the component mode passes the above test simply use a type | |
9890 | corresponding to that mode. The theory is that any use that | |
9891 | would cause problems with this will disable vectorization anyway. */ | |
dfc2e2ac | 9892 | else if (!SCALAR_FLOAT_TYPE_P (scalar_type) |
e67f39f7 | 9893 | && !INTEGRAL_TYPE_P (scalar_type)) |
60b95d28 RB |
9894 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1); |
9895 | ||
9896 | /* We can't build a vector type of elements with alignment bigger than | |
9897 | their size. */ | |
dfc2e2ac | 9898 | else if (nbytes < TYPE_ALIGN_UNIT (scalar_type)) |
aca43c6c JJ |
9899 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, |
9900 | TYPE_UNSIGNED (scalar_type)); | |
ccbf5bb4 | 9901 | |
dfc2e2ac RB |
9902 | /* If we felt back to using the mode fail if there was |
9903 | no scalar type for it. */ | |
9904 | if (scalar_type == NULL_TREE) | |
9905 | return NULL_TREE; | |
9906 | ||
bb67d9c7 RG |
9907 | /* If no size was supplied use the mode the target prefers. Otherwise |
9908 | lookup a vector mode of the specified size. */ | |
86e36728 | 9909 | if (known_eq (size, 0U)) |
bb67d9c7 | 9910 | simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode); |
86e36728 RS |
9911 | else if (!multiple_p (size, nbytes, &nunits) |
9912 | || !mode_for_vector (inner_mode, nunits).exists (&simd_mode)) | |
9da15d40 | 9913 | return NULL_TREE; |
4c8fd8ac | 9914 | /* NOTE: nunits == 1 is allowed to support single element vector types. */ |
86e36728 | 9915 | if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits)) |
cc4b5170 | 9916 | return NULL_TREE; |
ebfd146a IR |
9917 | |
9918 | vectype = build_vector_type (scalar_type, nunits); | |
ebfd146a IR |
9919 | |
9920 | if (!VECTOR_MODE_P (TYPE_MODE (vectype)) | |
9921 | && !INTEGRAL_MODE_P (TYPE_MODE (vectype))) | |
451dabda | 9922 | return NULL_TREE; |
ebfd146a | 9923 | |
c7d97b28 RB |
9924 | /* Re-attach the address-space qualifier if we canonicalized the scalar |
9925 | type. */ | |
9926 | if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype)) | |
9927 | return build_qualified_type | |
9928 | (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type))); | |
9929 | ||
ebfd146a IR |
9930 | return vectype; |
9931 | } | |
9932 | ||
86e36728 | 9933 | poly_uint64 current_vector_size; |
bb67d9c7 RG |
9934 | |
9935 | /* Function get_vectype_for_scalar_type. | |
9936 | ||
9937 | Returns the vector type corresponding to SCALAR_TYPE as supported | |
9938 | by the target. */ | |
9939 | ||
9940 | tree | |
9941 | get_vectype_for_scalar_type (tree scalar_type) | |
9942 | { | |
9943 | tree vectype; | |
9944 | vectype = get_vectype_for_scalar_type_and_size (scalar_type, | |
9945 | current_vector_size); | |
9946 | if (vectype | |
86e36728 | 9947 | && known_eq (current_vector_size, 0U)) |
bb67d9c7 RG |
9948 | current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype)); |
9949 | return vectype; | |
9950 | } | |
9951 | ||
42fd8198 IE |
9952 | /* Function get_mask_type_for_scalar_type. |
9953 | ||
9954 | Returns the mask type corresponding to a result of comparison | |
9955 | of vectors of specified SCALAR_TYPE as supported by target. */ | |
9956 | ||
9957 | tree | |
9958 | get_mask_type_for_scalar_type (tree scalar_type) | |
9959 | { | |
9960 | tree vectype = get_vectype_for_scalar_type (scalar_type); | |
9961 | ||
9962 | if (!vectype) | |
9963 | return NULL; | |
9964 | ||
9965 | return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), | |
9966 | current_vector_size); | |
9967 | } | |
9968 | ||
b690cc0f RG |
9969 | /* Function get_same_sized_vectype |
9970 | ||
9971 | Returns a vector type corresponding to SCALAR_TYPE of size | |
9972 | VECTOR_TYPE if supported by the target. */ | |
9973 | ||
9974 | tree | |
bb67d9c7 | 9975 | get_same_sized_vectype (tree scalar_type, tree vector_type) |
b690cc0f | 9976 | { |
2568d8a1 | 9977 | if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)) |
9f47c7e5 IE |
9978 | return build_same_sized_truth_vector_type (vector_type); |
9979 | ||
bb67d9c7 RG |
9980 | return get_vectype_for_scalar_type_and_size |
9981 | (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type))); | |
b690cc0f RG |
9982 | } |
9983 | ||
ebfd146a IR |
9984 | /* Function vect_is_simple_use. |
9985 | ||
9986 | Input: | |
81c40241 RB |
9987 | VINFO - the vect info of the loop or basic block that is being vectorized. |
9988 | OPERAND - operand in the loop or bb. | |
9989 | Output: | |
fef96d8e RS |
9990 | DEF_STMT_INFO_OUT (optional) - information about the defining stmt in |
9991 | case OPERAND is an SSA_NAME that is defined in the vectorizable region | |
9992 | DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME; | |
9993 | the definition could be anywhere in the function | |
81c40241 | 9994 | DT - the type of definition |
ebfd146a IR |
9995 | |
9996 | Returns whether a stmt with OPERAND can be vectorized. | |
b8698a0f | 9997 | For loops, supportable operands are constants, loop invariants, and operands |
ff802fa1 | 9998 | that are defined by the current iteration of the loop. Unsupportable |
b8698a0f | 9999 | operands are those that are defined by a previous iteration of the loop (as |
a70d6342 IR |
10000 | is the case in reduction/induction computations). |
10001 | For basic blocks, supportable operands are constants and bb invariants. | |
10002 | For now, operands defined outside the basic block are not supported. */ | |
ebfd146a IR |
10003 | |
10004 | bool | |
894dd753 | 10005 | vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, |
fef96d8e | 10006 | stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out) |
b8698a0f | 10007 | { |
fef96d8e RS |
10008 | if (def_stmt_info_out) |
10009 | *def_stmt_info_out = NULL; | |
894dd753 RS |
10010 | if (def_stmt_out) |
10011 | *def_stmt_out = NULL; | |
3fc356dc | 10012 | *dt = vect_unknown_def_type; |
b8698a0f | 10013 | |
73fbfcad | 10014 | if (dump_enabled_p ()) |
ebfd146a | 10015 | { |
78c60e3d SS |
10016 | dump_printf_loc (MSG_NOTE, vect_location, |
10017 | "vect_is_simple_use: operand "); | |
30f502ed RB |
10018 | if (TREE_CODE (operand) == SSA_NAME |
10019 | && !SSA_NAME_IS_DEFAULT_DEF (operand)) | |
10020 | dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0); | |
10021 | else | |
10022 | dump_generic_expr (MSG_NOTE, TDF_SLIM, operand); | |
ebfd146a | 10023 | } |
b8698a0f | 10024 | |
b758f602 | 10025 | if (CONSTANT_CLASS_P (operand)) |
30f502ed RB |
10026 | *dt = vect_constant_def; |
10027 | else if (is_gimple_min_invariant (operand)) | |
10028 | *dt = vect_external_def; | |
10029 | else if (TREE_CODE (operand) != SSA_NAME) | |
10030 | *dt = vect_unknown_def_type; | |
10031 | else if (SSA_NAME_IS_DEFAULT_DEF (operand)) | |
8644a673 | 10032 | *dt = vect_external_def; |
ebfd146a IR |
10033 | else |
10034 | { | |
30f502ed | 10035 | gimple *def_stmt = SSA_NAME_DEF_STMT (operand); |
c98d0595 RS |
10036 | stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand); |
10037 | if (!stmt_vinfo) | |
30f502ed RB |
10038 | *dt = vect_external_def; |
10039 | else | |
0f8c840c | 10040 | { |
30f502ed RB |
10041 | if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)) |
10042 | { | |
10681ce8 RS |
10043 | stmt_vinfo = STMT_VINFO_RELATED_STMT (stmt_vinfo); |
10044 | def_stmt = stmt_vinfo->stmt; | |
30f502ed RB |
10045 | } |
10046 | switch (gimple_code (def_stmt)) | |
10047 | { | |
10048 | case GIMPLE_PHI: | |
10049 | case GIMPLE_ASSIGN: | |
10050 | case GIMPLE_CALL: | |
10051 | *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo); | |
10052 | break; | |
10053 | default: | |
10054 | *dt = vect_unknown_def_type; | |
10055 | break; | |
10056 | } | |
fef96d8e RS |
10057 | if (def_stmt_info_out) |
10058 | *def_stmt_info_out = stmt_vinfo; | |
0f8c840c | 10059 | } |
30f502ed RB |
10060 | if (def_stmt_out) |
10061 | *def_stmt_out = def_stmt; | |
ebfd146a IR |
10062 | } |
10063 | ||
2e8ab70c RB |
10064 | if (dump_enabled_p ()) |
10065 | { | |
30f502ed | 10066 | dump_printf (MSG_NOTE, ", type of def: "); |
2e8ab70c RB |
10067 | switch (*dt) |
10068 | { | |
10069 | case vect_uninitialized_def: | |
10070 | dump_printf (MSG_NOTE, "uninitialized\n"); | |
10071 | break; | |
10072 | case vect_constant_def: | |
10073 | dump_printf (MSG_NOTE, "constant\n"); | |
10074 | break; | |
10075 | case vect_external_def: | |
10076 | dump_printf (MSG_NOTE, "external\n"); | |
10077 | break; | |
10078 | case vect_internal_def: | |
10079 | dump_printf (MSG_NOTE, "internal\n"); | |
10080 | break; | |
10081 | case vect_induction_def: | |
10082 | dump_printf (MSG_NOTE, "induction\n"); | |
10083 | break; | |
10084 | case vect_reduction_def: | |
10085 | dump_printf (MSG_NOTE, "reduction\n"); | |
10086 | break; | |
10087 | case vect_double_reduction_def: | |
10088 | dump_printf (MSG_NOTE, "double reduction\n"); | |
10089 | break; | |
10090 | case vect_nested_cycle: | |
10091 | dump_printf (MSG_NOTE, "nested cycle\n"); | |
10092 | break; | |
10093 | case vect_unknown_def_type: | |
10094 | dump_printf (MSG_NOTE, "unknown\n"); | |
10095 | break; | |
10096 | } | |
10097 | } | |
10098 | ||
81c40241 | 10099 | if (*dt == vect_unknown_def_type) |
ebfd146a | 10100 | { |
73fbfcad | 10101 | if (dump_enabled_p ()) |
78c60e3d | 10102 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 10103 | "Unsupported pattern.\n"); |
ebfd146a IR |
10104 | return false; |
10105 | } | |
10106 | ||
ebfd146a IR |
10107 | return true; |
10108 | } | |
10109 | ||
81c40241 | 10110 | /* Function vect_is_simple_use. |
b690cc0f | 10111 | |
81c40241 | 10112 | Same as vect_is_simple_use but also determines the vector operand |
b690cc0f RG |
10113 | type of OPERAND and stores it to *VECTYPE. If the definition of |
10114 | OPERAND is vect_uninitialized_def, vect_constant_def or | |
10115 | vect_external_def *VECTYPE will be set to NULL_TREE and the caller | |
10116 | is responsible to compute the best suited vector type for the | |
10117 | scalar operand. */ | |
10118 | ||
10119 | bool | |
894dd753 | 10120 | vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, |
fef96d8e RS |
10121 | tree *vectype, stmt_vec_info *def_stmt_info_out, |
10122 | gimple **def_stmt_out) | |
b690cc0f | 10123 | { |
fef96d8e | 10124 | stmt_vec_info def_stmt_info; |
894dd753 | 10125 | gimple *def_stmt; |
fef96d8e | 10126 | if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt)) |
b690cc0f RG |
10127 | return false; |
10128 | ||
894dd753 RS |
10129 | if (def_stmt_out) |
10130 | *def_stmt_out = def_stmt; | |
fef96d8e RS |
10131 | if (def_stmt_info_out) |
10132 | *def_stmt_info_out = def_stmt_info; | |
894dd753 | 10133 | |
b690cc0f RG |
10134 | /* Now get a vector type if the def is internal, otherwise supply |
10135 | NULL_TREE and leave it up to the caller to figure out a proper | |
10136 | type for the use stmt. */ | |
10137 | if (*dt == vect_internal_def | |
10138 | || *dt == vect_induction_def | |
10139 | || *dt == vect_reduction_def | |
10140 | || *dt == vect_double_reduction_def | |
10141 | || *dt == vect_nested_cycle) | |
10142 | { | |
fef96d8e | 10143 | *vectype = STMT_VINFO_VECTYPE (def_stmt_info); |
b690cc0f | 10144 | gcc_assert (*vectype != NULL_TREE); |
30f502ed RB |
10145 | if (dump_enabled_p ()) |
10146 | { | |
10147 | dump_printf_loc (MSG_NOTE, vect_location, | |
10148 | "vect_is_simple_use: vectype "); | |
10149 | dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype); | |
10150 | dump_printf (MSG_NOTE, "\n"); | |
10151 | } | |
b690cc0f RG |
10152 | } |
10153 | else if (*dt == vect_uninitialized_def | |
10154 | || *dt == vect_constant_def | |
10155 | || *dt == vect_external_def) | |
10156 | *vectype = NULL_TREE; | |
10157 | else | |
10158 | gcc_unreachable (); | |
10159 | ||
10160 | return true; | |
10161 | } | |
10162 | ||
ebfd146a IR |
10163 | |
10164 | /* Function supportable_widening_operation | |
10165 | ||
b8698a0f L |
10166 | Check whether an operation represented by the code CODE is a |
10167 | widening operation that is supported by the target platform in | |
b690cc0f RG |
10168 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
10169 | producing a result of type VECTYPE_OUT). | |
b8698a0f | 10170 | |
1bda738b JJ |
10171 | Widening operations we currently support are NOP (CONVERT), FLOAT, |
10172 | FIX_TRUNC and WIDEN_MULT. This function checks if these operations | |
10173 | are supported by the target platform either directly (via vector | |
10174 | tree-codes), or via target builtins. | |
ebfd146a IR |
10175 | |
10176 | Output: | |
b8698a0f L |
10177 | - CODE1 and CODE2 are codes of vector operations to be used when |
10178 | vectorizing the operation, if available. | |
ebfd146a IR |
10179 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
10180 | case of multi-step conversion (like char->short->int - in that case | |
10181 | MULTI_STEP_CVT will be 1). | |
b8698a0f L |
10182 | - INTERM_TYPES contains the intermediate type required to perform the |
10183 | widening operation (short in the above example). */ | |
ebfd146a IR |
10184 | |
10185 | bool | |
32e8e429 | 10186 | supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info, |
b690cc0f | 10187 | tree vectype_out, tree vectype_in, |
ebfd146a IR |
10188 | enum tree_code *code1, enum tree_code *code2, |
10189 | int *multi_step_cvt, | |
9771b263 | 10190 | vec<tree> *interm_types) |
ebfd146a | 10191 | { |
ebfd146a | 10192 | loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); |
4ef69dfc | 10193 | struct loop *vect_loop = NULL; |
ef4bddc2 | 10194 | machine_mode vec_mode; |
81f40b79 | 10195 | enum insn_code icode1, icode2; |
ebfd146a | 10196 | optab optab1, optab2; |
b690cc0f RG |
10197 | tree vectype = vectype_in; |
10198 | tree wide_vectype = vectype_out; | |
ebfd146a | 10199 | enum tree_code c1, c2; |
4a00c761 JJ |
10200 | int i; |
10201 | tree prev_type, intermediate_type; | |
ef4bddc2 | 10202 | machine_mode intermediate_mode, prev_mode; |
4a00c761 | 10203 | optab optab3, optab4; |
ebfd146a | 10204 | |
4a00c761 | 10205 | *multi_step_cvt = 0; |
4ef69dfc IR |
10206 | if (loop_info) |
10207 | vect_loop = LOOP_VINFO_LOOP (loop_info); | |
10208 | ||
ebfd146a IR |
10209 | switch (code) |
10210 | { | |
10211 | case WIDEN_MULT_EXPR: | |
6ae6116f RH |
10212 | /* The result of a vectorized widening operation usually requires |
10213 | two vectors (because the widened results do not fit into one vector). | |
10214 | The generated vector results would normally be expected to be | |
10215 | generated in the same order as in the original scalar computation, | |
10216 | i.e. if 8 results are generated in each vector iteration, they are | |
10217 | to be organized as follows: | |
10218 | vect1: [res1,res2,res3,res4], | |
10219 | vect2: [res5,res6,res7,res8]. | |
10220 | ||
10221 | However, in the special case that the result of the widening | |
10222 | operation is used in a reduction computation only, the order doesn't | |
10223 | matter (because when vectorizing a reduction we change the order of | |
10224 | the computation). Some targets can take advantage of this and | |
10225 | generate more efficient code. For example, targets like Altivec, | |
10226 | that support widen_mult using a sequence of {mult_even,mult_odd} | |
10227 | generate the following vectors: | |
10228 | vect1: [res1,res3,res5,res7], | |
10229 | vect2: [res2,res4,res6,res8]. | |
10230 | ||
10231 | When vectorizing outer-loops, we execute the inner-loop sequentially | |
10232 | (each vectorized inner-loop iteration contributes to VF outer-loop | |
10233 | iterations in parallel). We therefore don't allow to change the | |
10234 | order of the computation in the inner-loop during outer-loop | |
10235 | vectorization. */ | |
10236 | /* TODO: Another case in which order doesn't *really* matter is when we | |
10237 | widen and then contract again, e.g. (short)((int)x * y >> 8). | |
10238 | Normally, pack_trunc performs an even/odd permute, whereas the | |
10239 | repack from an even/odd expansion would be an interleave, which | |
10240 | would be significantly simpler for e.g. AVX2. */ | |
10241 | /* In any case, in order to avoid duplicating the code below, recurse | |
10242 | on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values | |
10243 | are properly set up for the caller. If we fail, we'll continue with | |
10244 | a VEC_WIDEN_MULT_LO/HI_EXPR check. */ | |
10245 | if (vect_loop | |
10246 | && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction | |
86a91c0a | 10247 | && !nested_in_vect_loop_p (vect_loop, stmt_info) |
6ae6116f | 10248 | && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR, |
86a91c0a RS |
10249 | stmt_info, vectype_out, |
10250 | vectype_in, code1, code2, | |
10251 | multi_step_cvt, interm_types)) | |
ebc047a2 CH |
10252 | { |
10253 | /* Elements in a vector with vect_used_by_reduction property cannot | |
10254 | be reordered if the use chain with this property does not have the | |
10255 | same operation. One such an example is s += a * b, where elements | |
10256 | in a and b cannot be reordered. Here we check if the vector defined | |
10257 | by STMT is only directly used in the reduction statement. */ | |
86a91c0a | 10258 | tree lhs = gimple_assign_lhs (stmt_info->stmt); |
0d0a4e20 RS |
10259 | stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs); |
10260 | if (use_stmt_info | |
10261 | && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def) | |
10262 | return true; | |
ebc047a2 | 10263 | } |
4a00c761 JJ |
10264 | c1 = VEC_WIDEN_MULT_LO_EXPR; |
10265 | c2 = VEC_WIDEN_MULT_HI_EXPR; | |
ebfd146a IR |
10266 | break; |
10267 | ||
81c40241 RB |
10268 | case DOT_PROD_EXPR: |
10269 | c1 = DOT_PROD_EXPR; | |
10270 | c2 = DOT_PROD_EXPR; | |
10271 | break; | |
10272 | ||
10273 | case SAD_EXPR: | |
10274 | c1 = SAD_EXPR; | |
10275 | c2 = SAD_EXPR; | |
10276 | break; | |
10277 | ||
6ae6116f RH |
10278 | case VEC_WIDEN_MULT_EVEN_EXPR: |
10279 | /* Support the recursion induced just above. */ | |
10280 | c1 = VEC_WIDEN_MULT_EVEN_EXPR; | |
10281 | c2 = VEC_WIDEN_MULT_ODD_EXPR; | |
10282 | break; | |
10283 | ||
36ba4aae | 10284 | case WIDEN_LSHIFT_EXPR: |
4a00c761 JJ |
10285 | c1 = VEC_WIDEN_LSHIFT_LO_EXPR; |
10286 | c2 = VEC_WIDEN_LSHIFT_HI_EXPR; | |
36ba4aae IR |
10287 | break; |
10288 | ||
ebfd146a | 10289 | CASE_CONVERT: |
4a00c761 JJ |
10290 | c1 = VEC_UNPACK_LO_EXPR; |
10291 | c2 = VEC_UNPACK_HI_EXPR; | |
ebfd146a IR |
10292 | break; |
10293 | ||
10294 | case FLOAT_EXPR: | |
4a00c761 JJ |
10295 | c1 = VEC_UNPACK_FLOAT_LO_EXPR; |
10296 | c2 = VEC_UNPACK_FLOAT_HI_EXPR; | |
ebfd146a IR |
10297 | break; |
10298 | ||
10299 | case FIX_TRUNC_EXPR: | |
1bda738b JJ |
10300 | c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR; |
10301 | c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR; | |
10302 | break; | |
ebfd146a IR |
10303 | |
10304 | default: | |
10305 | gcc_unreachable (); | |
10306 | } | |
10307 | ||
6ae6116f | 10308 | if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR) |
6b4db501 | 10309 | std::swap (c1, c2); |
4a00c761 | 10310 | |
ebfd146a IR |
10311 | if (code == FIX_TRUNC_EXPR) |
10312 | { | |
10313 | /* The signedness is determined from output operand. */ | |
b690cc0f RG |
10314 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
10315 | optab2 = optab_for_tree_code (c2, vectype_out, optab_default); | |
ebfd146a IR |
10316 | } |
10317 | else | |
10318 | { | |
10319 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
10320 | optab2 = optab_for_tree_code (c2, vectype, optab_default); | |
10321 | } | |
10322 | ||
10323 | if (!optab1 || !optab2) | |
10324 | return false; | |
10325 | ||
10326 | vec_mode = TYPE_MODE (vectype); | |
947131ba RS |
10327 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing |
10328 | || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing) | |
ebfd146a IR |
10329 | return false; |
10330 | ||
4a00c761 JJ |
10331 | *code1 = c1; |
10332 | *code2 = c2; | |
10333 | ||
10334 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
10335 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff IE |
10336 | /* For scalar masks we may have different boolean |
10337 | vector types having the same QImode. Thus we | |
10338 | add additional check for elements number. */ | |
10339 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
928686b1 RS |
10340 | || known_eq (TYPE_VECTOR_SUBPARTS (vectype), |
10341 | TYPE_VECTOR_SUBPARTS (wide_vectype) * 2)); | |
4a00c761 | 10342 | |
b8698a0f | 10343 | /* Check if it's a multi-step conversion that can be done using intermediate |
ebfd146a | 10344 | types. */ |
ebfd146a | 10345 | |
4a00c761 JJ |
10346 | prev_type = vectype; |
10347 | prev_mode = vec_mode; | |
b8698a0f | 10348 | |
4a00c761 JJ |
10349 | if (!CONVERT_EXPR_CODE_P (code)) |
10350 | return false; | |
b8698a0f | 10351 | |
4a00c761 JJ |
10352 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
10353 | intermediate steps in promotion sequence. We try | |
10354 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do | |
10355 | not. */ | |
9771b263 | 10356 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
10357 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
10358 | { | |
10359 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
10360 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
10361 | { | |
7cfb4d93 | 10362 | intermediate_type = vect_halve_mask_nunits (prev_type); |
3ae0661a IE |
10363 | if (intermediate_mode != TYPE_MODE (intermediate_type)) |
10364 | return false; | |
10365 | } | |
10366 | else | |
10367 | intermediate_type | |
10368 | = lang_hooks.types.type_for_mode (intermediate_mode, | |
10369 | TYPE_UNSIGNED (prev_type)); | |
10370 | ||
4a00c761 JJ |
10371 | optab3 = optab_for_tree_code (c1, intermediate_type, optab_default); |
10372 | optab4 = optab_for_tree_code (c2, intermediate_type, optab_default); | |
10373 | ||
10374 | if (!optab3 || !optab4 | |
10375 | || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing | |
10376 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
10377 | || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing | |
10378 | || insn_data[icode2].operand[0].mode != intermediate_mode | |
10379 | || ((icode1 = optab_handler (optab3, intermediate_mode)) | |
10380 | == CODE_FOR_nothing) | |
10381 | || ((icode2 = optab_handler (optab4, intermediate_mode)) | |
10382 | == CODE_FOR_nothing)) | |
10383 | break; | |
ebfd146a | 10384 | |
9771b263 | 10385 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
10386 | (*multi_step_cvt)++; |
10387 | ||
10388 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
10389 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff | 10390 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
928686b1 RS |
10391 | || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type), |
10392 | TYPE_VECTOR_SUBPARTS (wide_vectype) * 2)); | |
4a00c761 JJ |
10393 | |
10394 | prev_type = intermediate_type; | |
10395 | prev_mode = intermediate_mode; | |
ebfd146a IR |
10396 | } |
10397 | ||
9771b263 | 10398 | interm_types->release (); |
4a00c761 | 10399 | return false; |
ebfd146a IR |
10400 | } |
10401 | ||
10402 | ||
10403 | /* Function supportable_narrowing_operation | |
10404 | ||
b8698a0f L |
10405 | Check whether an operation represented by the code CODE is a |
10406 | narrowing operation that is supported by the target platform in | |
b690cc0f RG |
10407 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
10408 | and producing a result of type VECTYPE_OUT). | |
b8698a0f | 10409 | |
1bda738b JJ |
10410 | Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC |
10411 | and FLOAT. This function checks if these operations are supported by | |
ebfd146a IR |
10412 | the target platform directly via vector tree-codes. |
10413 | ||
10414 | Output: | |
b8698a0f L |
10415 | - CODE1 is the code of a vector operation to be used when |
10416 | vectorizing the operation, if available. | |
ebfd146a IR |
10417 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
10418 | case of multi-step conversion (like int->short->char - in that case | |
10419 | MULTI_STEP_CVT will be 1). | |
10420 | - INTERM_TYPES contains the intermediate type required to perform the | |
b8698a0f | 10421 | narrowing operation (short in the above example). */ |
ebfd146a IR |
10422 | |
10423 | bool | |
10424 | supportable_narrowing_operation (enum tree_code code, | |
b690cc0f | 10425 | tree vectype_out, tree vectype_in, |
ebfd146a | 10426 | enum tree_code *code1, int *multi_step_cvt, |
9771b263 | 10427 | vec<tree> *interm_types) |
ebfd146a | 10428 | { |
ef4bddc2 | 10429 | machine_mode vec_mode; |
ebfd146a IR |
10430 | enum insn_code icode1; |
10431 | optab optab1, interm_optab; | |
b690cc0f RG |
10432 | tree vectype = vectype_in; |
10433 | tree narrow_vectype = vectype_out; | |
ebfd146a | 10434 | enum tree_code c1; |
3ae0661a | 10435 | tree intermediate_type, prev_type; |
ef4bddc2 | 10436 | machine_mode intermediate_mode, prev_mode; |
ebfd146a | 10437 | int i; |
4a00c761 | 10438 | bool uns; |
ebfd146a | 10439 | |
4a00c761 | 10440 | *multi_step_cvt = 0; |
ebfd146a IR |
10441 | switch (code) |
10442 | { | |
10443 | CASE_CONVERT: | |
10444 | c1 = VEC_PACK_TRUNC_EXPR; | |
10445 | break; | |
10446 | ||
10447 | case FIX_TRUNC_EXPR: | |
10448 | c1 = VEC_PACK_FIX_TRUNC_EXPR; | |
10449 | break; | |
10450 | ||
10451 | case FLOAT_EXPR: | |
1bda738b JJ |
10452 | c1 = VEC_PACK_FLOAT_EXPR; |
10453 | break; | |
ebfd146a IR |
10454 | |
10455 | default: | |
10456 | gcc_unreachable (); | |
10457 | } | |
10458 | ||
10459 | if (code == FIX_TRUNC_EXPR) | |
10460 | /* The signedness is determined from output operand. */ | |
b690cc0f | 10461 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
ebfd146a IR |
10462 | else |
10463 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
10464 | ||
10465 | if (!optab1) | |
10466 | return false; | |
10467 | ||
10468 | vec_mode = TYPE_MODE (vectype); | |
947131ba | 10469 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing) |
ebfd146a IR |
10470 | return false; |
10471 | ||
4a00c761 JJ |
10472 | *code1 = c1; |
10473 | ||
10474 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff IE |
10475 | /* For scalar masks we may have different boolean |
10476 | vector types having the same QImode. Thus we | |
10477 | add additional check for elements number. */ | |
10478 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
928686b1 RS |
10479 | || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2, |
10480 | TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 | 10481 | |
1bda738b JJ |
10482 | if (code == FLOAT_EXPR) |
10483 | return false; | |
10484 | ||
ebfd146a IR |
10485 | /* Check if it's a multi-step conversion that can be done using intermediate |
10486 | types. */ | |
4a00c761 | 10487 | prev_mode = vec_mode; |
3ae0661a | 10488 | prev_type = vectype; |
4a00c761 JJ |
10489 | if (code == FIX_TRUNC_EXPR) |
10490 | uns = TYPE_UNSIGNED (vectype_out); | |
10491 | else | |
10492 | uns = TYPE_UNSIGNED (vectype); | |
10493 | ||
10494 | /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer | |
10495 | conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more | |
10496 | costly than signed. */ | |
10497 | if (code == FIX_TRUNC_EXPR && uns) | |
10498 | { | |
10499 | enum insn_code icode2; | |
10500 | ||
10501 | intermediate_type | |
10502 | = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0); | |
10503 | interm_optab | |
10504 | = optab_for_tree_code (c1, intermediate_type, optab_default); | |
2225b9f2 | 10505 | if (interm_optab != unknown_optab |
4a00c761 JJ |
10506 | && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing |
10507 | && insn_data[icode1].operand[0].mode | |
10508 | == insn_data[icode2].operand[0].mode) | |
10509 | { | |
10510 | uns = false; | |
10511 | optab1 = interm_optab; | |
10512 | icode1 = icode2; | |
10513 | } | |
10514 | } | |
ebfd146a | 10515 | |
4a00c761 JJ |
10516 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
10517 | intermediate steps in promotion sequence. We try | |
10518 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */ | |
9771b263 | 10519 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
10520 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
10521 | { | |
10522 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
10523 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
10524 | { | |
7cfb4d93 | 10525 | intermediate_type = vect_double_mask_nunits (prev_type); |
3ae0661a | 10526 | if (intermediate_mode != TYPE_MODE (intermediate_type)) |
7cfb4d93 | 10527 | return false; |
3ae0661a IE |
10528 | } |
10529 | else | |
10530 | intermediate_type | |
10531 | = lang_hooks.types.type_for_mode (intermediate_mode, uns); | |
4a00c761 JJ |
10532 | interm_optab |
10533 | = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type, | |
10534 | optab_default); | |
10535 | if (!interm_optab | |
10536 | || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing) | |
10537 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
10538 | || ((icode1 = optab_handler (interm_optab, intermediate_mode)) | |
10539 | == CODE_FOR_nothing)) | |
10540 | break; | |
10541 | ||
9771b263 | 10542 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
10543 | (*multi_step_cvt)++; |
10544 | ||
10545 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff | 10546 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
928686b1 RS |
10547 | || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2, |
10548 | TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 JJ |
10549 | |
10550 | prev_mode = intermediate_mode; | |
3ae0661a | 10551 | prev_type = intermediate_type; |
4a00c761 | 10552 | optab1 = interm_optab; |
ebfd146a IR |
10553 | } |
10554 | ||
9771b263 | 10555 | interm_types->release (); |
4a00c761 | 10556 | return false; |
ebfd146a | 10557 | } |
7cfb4d93 RS |
10558 | |
10559 | /* Generate and return a statement that sets vector mask MASK such that | |
10560 | MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */ | |
10561 | ||
10562 | gcall * | |
10563 | vect_gen_while (tree mask, tree start_index, tree end_index) | |
10564 | { | |
10565 | tree cmp_type = TREE_TYPE (start_index); | |
10566 | tree mask_type = TREE_TYPE (mask); | |
10567 | gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT, | |
10568 | cmp_type, mask_type, | |
10569 | OPTIMIZE_FOR_SPEED)); | |
10570 | gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3, | |
10571 | start_index, end_index, | |
10572 | build_zero_cst (mask_type)); | |
10573 | gimple_call_set_lhs (call, mask); | |
10574 | return call; | |
10575 | } | |
535e7c11 RS |
10576 | |
10577 | /* Generate a vector mask of type MASK_TYPE for which index I is false iff | |
10578 | J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */ | |
10579 | ||
10580 | tree | |
10581 | vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index, | |
10582 | tree end_index) | |
10583 | { | |
10584 | tree tmp = make_ssa_name (mask_type); | |
10585 | gcall *call = vect_gen_while (tmp, start_index, end_index); | |
10586 | gimple_seq_add_stmt (seq, call); | |
10587 | return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp); | |
10588 | } | |
1f3cb663 RS |
10589 | |
10590 | /* Try to compute the vector types required to vectorize STMT_INFO, | |
10591 | returning true on success and false if vectorization isn't possible. | |
10592 | ||
10593 | On success: | |
10594 | ||
10595 | - Set *STMT_VECTYPE_OUT to: | |
10596 | - NULL_TREE if the statement doesn't need to be vectorized; | |
10597 | - boolean_type_node if the statement is a boolean operation whose | |
10598 | vector type can only be determined once all the other vector types | |
10599 | are known; and | |
10600 | - the equivalent of STMT_VINFO_VECTYPE otherwise. | |
10601 | ||
10602 | - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum | |
10603 | number of units needed to vectorize STMT_INFO, or NULL_TREE if the | |
10604 | statement does not help to determine the overall number of units. */ | |
10605 | ||
10606 | bool | |
10607 | vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, | |
10608 | tree *stmt_vectype_out, | |
10609 | tree *nunits_vectype_out) | |
10610 | { | |
10611 | gimple *stmt = stmt_info->stmt; | |
10612 | ||
10613 | *stmt_vectype_out = NULL_TREE; | |
10614 | *nunits_vectype_out = NULL_TREE; | |
10615 | ||
10616 | if (gimple_get_lhs (stmt) == NULL_TREE | |
10617 | /* MASK_STORE has no lhs, but is ok. */ | |
10618 | && !gimple_call_internal_p (stmt, IFN_MASK_STORE)) | |
10619 | { | |
10620 | if (is_a <gcall *> (stmt)) | |
10621 | { | |
10622 | /* Ignore calls with no lhs. These must be calls to | |
10623 | #pragma omp simd functions, and what vectorization factor | |
10624 | it really needs can't be determined until | |
10625 | vectorizable_simd_clone_call. */ | |
10626 | if (dump_enabled_p ()) | |
10627 | dump_printf_loc (MSG_NOTE, vect_location, | |
10628 | "defer to SIMD clone analysis.\n"); | |
10629 | return true; | |
10630 | } | |
10631 | ||
10632 | if (dump_enabled_p ()) | |
10633 | { | |
10634 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10635 | "not vectorized: irregular stmt."); | |
10636 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
10637 | } | |
10638 | return false; | |
10639 | } | |
10640 | ||
10641 | if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) | |
10642 | { | |
10643 | if (dump_enabled_p ()) | |
10644 | { | |
10645 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10646 | "not vectorized: vector stmt in loop:"); | |
10647 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
10648 | } | |
10649 | return false; | |
10650 | } | |
10651 | ||
10652 | tree vectype; | |
10653 | tree scalar_type = NULL_TREE; | |
10654 | if (STMT_VINFO_VECTYPE (stmt_info)) | |
10655 | *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info); | |
10656 | else | |
10657 | { | |
10658 | gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); | |
10659 | if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) | |
10660 | scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); | |
10661 | else | |
10662 | scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); | |
10663 | ||
10664 | /* Pure bool ops don't participate in number-of-units computation. | |
10665 | For comparisons use the types being compared. */ | |
10666 | if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type) | |
10667 | && is_gimple_assign (stmt) | |
10668 | && gimple_assign_rhs_code (stmt) != COND_EXPR) | |
10669 | { | |
10670 | *stmt_vectype_out = boolean_type_node; | |
10671 | ||
10672 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
10673 | if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison | |
10674 | && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1))) | |
10675 | scalar_type = TREE_TYPE (rhs1); | |
10676 | else | |
10677 | { | |
10678 | if (dump_enabled_p ()) | |
10679 | dump_printf_loc (MSG_NOTE, vect_location, | |
10680 | "pure bool operation.\n"); | |
10681 | return true; | |
10682 | } | |
10683 | } | |
10684 | ||
10685 | if (dump_enabled_p ()) | |
10686 | { | |
10687 | dump_printf_loc (MSG_NOTE, vect_location, | |
10688 | "get vectype for scalar type: "); | |
10689 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
10690 | dump_printf (MSG_NOTE, "\n"); | |
10691 | } | |
10692 | vectype = get_vectype_for_scalar_type (scalar_type); | |
10693 | if (!vectype) | |
10694 | { | |
10695 | if (dump_enabled_p ()) | |
10696 | { | |
10697 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10698 | "not vectorized: unsupported data-type "); | |
10699 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10700 | scalar_type); | |
10701 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10702 | } | |
10703 | return false; | |
10704 | } | |
10705 | ||
10706 | if (!*stmt_vectype_out) | |
10707 | *stmt_vectype_out = vectype; | |
10708 | ||
10709 | if (dump_enabled_p ()) | |
10710 | { | |
10711 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); | |
10712 | dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); | |
10713 | dump_printf (MSG_NOTE, "\n"); | |
10714 | } | |
10715 | } | |
10716 | ||
10717 | /* Don't try to compute scalar types if the stmt produces a boolean | |
10718 | vector; use the existing vector type instead. */ | |
10719 | tree nunits_vectype; | |
10720 | if (VECTOR_BOOLEAN_TYPE_P (vectype)) | |
10721 | nunits_vectype = vectype; | |
10722 | else | |
10723 | { | |
10724 | /* The number of units is set according to the smallest scalar | |
10725 | type (or the largest vector size, but we only support one | |
10726 | vector size per vectorization). */ | |
10727 | if (*stmt_vectype_out != boolean_type_node) | |
10728 | { | |
10729 | HOST_WIDE_INT dummy; | |
86a91c0a RS |
10730 | scalar_type = vect_get_smallest_scalar_type (stmt_info, |
10731 | &dummy, &dummy); | |
1f3cb663 RS |
10732 | } |
10733 | if (dump_enabled_p ()) | |
10734 | { | |
10735 | dump_printf_loc (MSG_NOTE, vect_location, | |
10736 | "get vectype for scalar type: "); | |
10737 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
10738 | dump_printf (MSG_NOTE, "\n"); | |
10739 | } | |
10740 | nunits_vectype = get_vectype_for_scalar_type (scalar_type); | |
10741 | } | |
10742 | if (!nunits_vectype) | |
10743 | { | |
10744 | if (dump_enabled_p ()) | |
10745 | { | |
10746 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10747 | "not vectorized: unsupported data-type "); | |
10748 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); | |
10749 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10750 | } | |
10751 | return false; | |
10752 | } | |
10753 | ||
10754 | if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), | |
10755 | GET_MODE_SIZE (TYPE_MODE (nunits_vectype)))) | |
10756 | { | |
10757 | if (dump_enabled_p ()) | |
10758 | { | |
10759 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10760 | "not vectorized: different sized vector " | |
10761 | "types in statement, "); | |
10762 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); | |
10763 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
10764 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype); | |
10765 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10766 | } | |
10767 | return false; | |
10768 | } | |
10769 | ||
10770 | if (dump_enabled_p ()) | |
10771 | { | |
10772 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); | |
10773 | dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype); | |
10774 | dump_printf (MSG_NOTE, "\n"); | |
10775 | ||
10776 | dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); | |
10777 | dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype)); | |
10778 | dump_printf (MSG_NOTE, "\n"); | |
10779 | } | |
10780 | ||
10781 | *nunits_vectype_out = nunits_vectype; | |
10782 | return true; | |
10783 | } | |
10784 | ||
10785 | /* Try to determine the correct vector type for STMT_INFO, which is a | |
10786 | statement that produces a scalar boolean result. Return the vector | |
10787 | type on success, otherwise return NULL_TREE. */ | |
10788 | ||
10789 | tree | |
10790 | vect_get_mask_type_for_stmt (stmt_vec_info stmt_info) | |
10791 | { | |
10792 | gimple *stmt = stmt_info->stmt; | |
10793 | tree mask_type = NULL; | |
10794 | tree vectype, scalar_type; | |
10795 | ||
10796 | if (is_gimple_assign (stmt) | |
10797 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison | |
10798 | && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) | |
10799 | { | |
10800 | scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); | |
10801 | mask_type = get_mask_type_for_scalar_type (scalar_type); | |
10802 | ||
10803 | if (!mask_type) | |
10804 | { | |
10805 | if (dump_enabled_p ()) | |
10806 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10807 | "not vectorized: unsupported mask\n"); | |
10808 | return NULL_TREE; | |
10809 | } | |
10810 | } | |
10811 | else | |
10812 | { | |
10813 | tree rhs; | |
10814 | ssa_op_iter iter; | |
1f3cb663 RS |
10815 | enum vect_def_type dt; |
10816 | ||
10817 | FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE) | |
10818 | { | |
894dd753 | 10819 | if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype)) |
1f3cb663 RS |
10820 | { |
10821 | if (dump_enabled_p ()) | |
10822 | { | |
10823 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10824 | "not vectorized: can't compute mask type " | |
10825 | "for statement, "); | |
10826 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, | |
10827 | 0); | |
10828 | } | |
10829 | return NULL_TREE; | |
10830 | } | |
10831 | ||
10832 | /* No vectype probably means external definition. | |
10833 | Allow it in case there is another operand which | |
10834 | allows to determine mask type. */ | |
10835 | if (!vectype) | |
10836 | continue; | |
10837 | ||
10838 | if (!mask_type) | |
10839 | mask_type = vectype; | |
10840 | else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type), | |
10841 | TYPE_VECTOR_SUBPARTS (vectype))) | |
10842 | { | |
10843 | if (dump_enabled_p ()) | |
10844 | { | |
10845 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10846 | "not vectorized: different sized masks " | |
10847 | "types in statement, "); | |
10848 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10849 | mask_type); | |
10850 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
10851 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10852 | vectype); | |
10853 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10854 | } | |
10855 | return NULL_TREE; | |
10856 | } | |
10857 | else if (VECTOR_BOOLEAN_TYPE_P (mask_type) | |
10858 | != VECTOR_BOOLEAN_TYPE_P (vectype)) | |
10859 | { | |
10860 | if (dump_enabled_p ()) | |
10861 | { | |
10862 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10863 | "not vectorized: mixed mask and " | |
10864 | "nonmask vector types in statement, "); | |
10865 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10866 | mask_type); | |
10867 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
10868 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10869 | vectype); | |
10870 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10871 | } | |
10872 | return NULL_TREE; | |
10873 | } | |
10874 | } | |
10875 | ||
10876 | /* We may compare boolean value loaded as vector of integers. | |
10877 | Fix mask_type in such case. */ | |
10878 | if (mask_type | |
10879 | && !VECTOR_BOOLEAN_TYPE_P (mask_type) | |
10880 | && gimple_code (stmt) == GIMPLE_ASSIGN | |
10881 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) | |
10882 | mask_type = build_same_sized_truth_vector_type (mask_type); | |
10883 | } | |
10884 | ||
10885 | /* No mask_type should mean loop invariant predicate. | |
10886 | This is probably a subject for optimization in if-conversion. */ | |
10887 | if (!mask_type && dump_enabled_p ()) | |
10888 | { | |
10889 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10890 | "not vectorized: can't compute mask type " | |
10891 | "for statement, "); | |
10892 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
10893 | } | |
10894 | return mask_type; | |
10895 | } |