]>
Commit | Line | Data |
---|---|---|
ebfd146a | 1 | /* Statement Analysis and Transformation for Vectorization |
85ec4feb | 2 | Copyright (C) 2003-2018 Free Software Foundation, Inc. |
b8698a0f | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
ebfd146a IR |
4 | and Ira Rosen <irar@il.ibm.com> |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
c7131fb2 | 25 | #include "backend.h" |
957060b5 AM |
26 | #include "target.h" |
27 | #include "rtl.h" | |
ebfd146a | 28 | #include "tree.h" |
c7131fb2 | 29 | #include "gimple.h" |
c7131fb2 | 30 | #include "ssa.h" |
957060b5 AM |
31 | #include "optabs-tree.h" |
32 | #include "insn-config.h" | |
33 | #include "recog.h" /* FIXME: for insn_data */ | |
34 | #include "cgraph.h" | |
957060b5 | 35 | #include "dumpfile.h" |
c7131fb2 | 36 | #include "alias.h" |
40e23961 | 37 | #include "fold-const.h" |
d8a2d370 | 38 | #include "stor-layout.h" |
2fb9a547 | 39 | #include "tree-eh.h" |
45b0be94 | 40 | #include "gimplify.h" |
5be5c238 | 41 | #include "gimple-iterator.h" |
18f429e2 | 42 | #include "gimplify-me.h" |
442b4905 | 43 | #include "tree-cfg.h" |
e28030cf | 44 | #include "tree-ssa-loop-manip.h" |
ebfd146a | 45 | #include "cfgloop.h" |
0136f8f0 AH |
46 | #include "tree-ssa-loop.h" |
47 | #include "tree-scalar-evolution.h" | |
ebfd146a | 48 | #include "tree-vectorizer.h" |
9b2b7279 | 49 | #include "builtins.h" |
70439f0d | 50 | #include "internal-fn.h" |
5ebaa477 | 51 | #include "tree-vector-builder.h" |
f151c9e1 | 52 | #include "vec-perm-indices.h" |
7cfb4d93 RS |
53 | #include "tree-ssa-loop-niter.h" |
54 | #include "gimple-fold.h" | |
ebfd146a | 55 | |
7ee2468b SB |
56 | /* For lang_hooks.types.type_for_mode. */ |
57 | #include "langhooks.h" | |
ebfd146a | 58 | |
c3e7ee41 BS |
59 | /* Return the vectorized type for the given statement. */ |
60 | ||
61 | tree | |
62 | stmt_vectype (struct _stmt_vec_info *stmt_info) | |
63 | { | |
64 | return STMT_VINFO_VECTYPE (stmt_info); | |
65 | } | |
66 | ||
67 | /* Return TRUE iff the given statement is in an inner loop relative to | |
68 | the loop being vectorized. */ | |
69 | bool | |
70 | stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info) | |
71 | { | |
355fe088 | 72 | gimple *stmt = STMT_VINFO_STMT (stmt_info); |
c3e7ee41 BS |
73 | basic_block bb = gimple_bb (stmt); |
74 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
75 | struct loop* loop; | |
76 | ||
77 | if (!loop_vinfo) | |
78 | return false; | |
79 | ||
80 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
81 | ||
82 | return (bb->loop_father == loop->inner); | |
83 | } | |
84 | ||
85 | /* Record the cost of a statement, either by directly informing the | |
86 | target model or by saving it in a vector for later processing. | |
87 | Return a preliminary estimate of the statement's cost. */ | |
88 | ||
89 | unsigned | |
92345349 | 90 | record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, |
c3e7ee41 | 91 | enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, |
92345349 | 92 | int misalign, enum vect_cost_model_location where) |
c3e7ee41 | 93 | { |
cc9fe6bb JH |
94 | if ((kind == vector_load || kind == unaligned_load) |
95 | && STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
96 | kind = vector_gather_load; | |
97 | if ((kind == vector_store || kind == unaligned_store) | |
98 | && STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
99 | kind = vector_scatter_store; | |
68435eb2 | 100 | |
211ee39b | 101 | stmt_info_for_cost si = { count, kind, where, stmt_info, misalign }; |
68435eb2 RB |
102 | body_cost_vec->safe_push (si); |
103 | ||
104 | tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; | |
105 | return (unsigned) | |
106 | (builtin_vectorization_cost (kind, vectype, misalign) * count); | |
c3e7ee41 BS |
107 | } |
108 | ||
272c6793 RS |
109 | /* Return a variable of type ELEM_TYPE[NELEMS]. */ |
110 | ||
111 | static tree | |
112 | create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems) | |
113 | { | |
114 | return create_tmp_var (build_array_type_nelts (elem_type, nelems), | |
115 | "vect_array"); | |
116 | } | |
117 | ||
118 | /* ARRAY is an array of vectors created by create_vector_array. | |
119 | Return an SSA_NAME for the vector in index N. The reference | |
82570274 | 120 | is part of the vectorization of STMT_INFO and the vector is associated |
272c6793 RS |
121 | with scalar destination SCALAR_DEST. */ |
122 | ||
123 | static tree | |
82570274 RS |
124 | read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
125 | tree scalar_dest, tree array, unsigned HOST_WIDE_INT n) | |
272c6793 RS |
126 | { |
127 | tree vect_type, vect, vect_name, array_ref; | |
355fe088 | 128 | gimple *new_stmt; |
272c6793 RS |
129 | |
130 | gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE); | |
131 | vect_type = TREE_TYPE (TREE_TYPE (array)); | |
132 | vect = vect_create_destination_var (scalar_dest, vect_type); | |
133 | array_ref = build4 (ARRAY_REF, vect_type, array, | |
134 | build_int_cst (size_type_node, n), | |
135 | NULL_TREE, NULL_TREE); | |
136 | ||
137 | new_stmt = gimple_build_assign (vect, array_ref); | |
138 | vect_name = make_ssa_name (vect, new_stmt); | |
139 | gimple_assign_set_lhs (new_stmt, vect_name); | |
82570274 | 140 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
141 | |
142 | return vect_name; | |
143 | } | |
144 | ||
145 | /* ARRAY is an array of vectors created by create_vector_array. | |
146 | Emit code to store SSA_NAME VECT in index N of the array. | |
82570274 | 147 | The store is part of the vectorization of STMT_INFO. */ |
272c6793 RS |
148 | |
149 | static void | |
82570274 RS |
150 | write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
151 | tree vect, tree array, unsigned HOST_WIDE_INT n) | |
272c6793 RS |
152 | { |
153 | tree array_ref; | |
355fe088 | 154 | gimple *new_stmt; |
272c6793 RS |
155 | |
156 | array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array, | |
157 | build_int_cst (size_type_node, n), | |
158 | NULL_TREE, NULL_TREE); | |
159 | ||
160 | new_stmt = gimple_build_assign (array_ref, vect); | |
82570274 | 161 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
162 | } |
163 | ||
164 | /* PTR is a pointer to an array of type TYPE. Return a representation | |
165 | of *PTR. The memory reference replaces those in FIRST_DR | |
166 | (and its group). */ | |
167 | ||
168 | static tree | |
44fc7854 | 169 | create_array_ref (tree type, tree ptr, tree alias_ptr_type) |
272c6793 | 170 | { |
44fc7854 | 171 | tree mem_ref; |
272c6793 | 172 | |
272c6793 RS |
173 | mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0)); |
174 | /* Arrays have the same alignment as their type. */ | |
644ffefd | 175 | set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0); |
272c6793 RS |
176 | return mem_ref; |
177 | } | |
178 | ||
82570274 | 179 | /* Add a clobber of variable VAR to the vectorization of STMT_INFO. |
3ba4ff41 RS |
180 | Emit the clobber before *GSI. */ |
181 | ||
182 | static void | |
82570274 RS |
183 | vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
184 | tree var) | |
3ba4ff41 RS |
185 | { |
186 | tree clobber = build_clobber (TREE_TYPE (var)); | |
187 | gimple *new_stmt = gimple_build_assign (var, clobber); | |
82570274 | 188 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3ba4ff41 RS |
189 | } |
190 | ||
ebfd146a IR |
191 | /* Utility functions used by vect_mark_stmts_to_be_vectorized. */ |
192 | ||
193 | /* Function vect_mark_relevant. | |
194 | ||
32e8e429 | 195 | Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */ |
ebfd146a IR |
196 | |
197 | static void | |
32e8e429 | 198 | vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info, |
97ecdb46 | 199 | enum vect_relevant relevant, bool live_p) |
ebfd146a | 200 | { |
ebfd146a IR |
201 | enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info); |
202 | bool save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
203 | ||
73fbfcad | 204 | if (dump_enabled_p ()) |
66c16fd9 RB |
205 | { |
206 | dump_printf_loc (MSG_NOTE, vect_location, | |
207 | "mark relevant %d, live %d: ", relevant, live_p); | |
86a91c0a | 208 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
66c16fd9 | 209 | } |
ebfd146a | 210 | |
83197f37 IR |
211 | /* If this stmt is an original stmt in a pattern, we might need to mark its |
212 | related pattern stmt instead of the original stmt. However, such stmts | |
213 | may have their own uses that are not in any pattern, in such cases the | |
214 | stmt itself should be marked. */ | |
ebfd146a IR |
215 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) |
216 | { | |
97ecdb46 JJ |
217 | /* This is the last stmt in a sequence that was detected as a |
218 | pattern that can potentially be vectorized. Don't mark the stmt | |
219 | as relevant/live because it's not going to be vectorized. | |
220 | Instead mark the pattern-stmt that replaces it. */ | |
83197f37 | 221 | |
97ecdb46 JJ |
222 | if (dump_enabled_p ()) |
223 | dump_printf_loc (MSG_NOTE, vect_location, | |
224 | "last stmt in pattern. don't mark" | |
225 | " relevant/live.\n"); | |
10681ce8 RS |
226 | stmt_vec_info old_stmt_info = stmt_info; |
227 | stmt_info = STMT_VINFO_RELATED_STMT (stmt_info); | |
228 | gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info); | |
97ecdb46 JJ |
229 | save_relevant = STMT_VINFO_RELEVANT (stmt_info); |
230 | save_live_p = STMT_VINFO_LIVE_P (stmt_info); | |
ebfd146a IR |
231 | } |
232 | ||
233 | STMT_VINFO_LIVE_P (stmt_info) |= live_p; | |
234 | if (relevant > STMT_VINFO_RELEVANT (stmt_info)) | |
235 | STMT_VINFO_RELEVANT (stmt_info) = relevant; | |
236 | ||
237 | if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant | |
238 | && STMT_VINFO_LIVE_P (stmt_info) == save_live_p) | |
239 | { | |
73fbfcad | 240 | if (dump_enabled_p ()) |
78c60e3d | 241 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 242 | "already marked relevant/live.\n"); |
ebfd146a IR |
243 | return; |
244 | } | |
245 | ||
86a91c0a | 246 | worklist->safe_push (stmt_info); |
ebfd146a IR |
247 | } |
248 | ||
249 | ||
b28ead45 AH |
250 | /* Function is_simple_and_all_uses_invariant |
251 | ||
32e8e429 | 252 | Return true if STMT_INFO is simple and all uses of it are invariant. */ |
b28ead45 AH |
253 | |
254 | bool | |
32e8e429 RS |
255 | is_simple_and_all_uses_invariant (stmt_vec_info stmt_info, |
256 | loop_vec_info loop_vinfo) | |
b28ead45 AH |
257 | { |
258 | tree op; | |
b28ead45 AH |
259 | ssa_op_iter iter; |
260 | ||
32e8e429 RS |
261 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
262 | if (!stmt) | |
b28ead45 AH |
263 | return false; |
264 | ||
265 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) | |
266 | { | |
267 | enum vect_def_type dt = vect_uninitialized_def; | |
268 | ||
894dd753 | 269 | if (!vect_is_simple_use (op, loop_vinfo, &dt)) |
b28ead45 AH |
270 | { |
271 | if (dump_enabled_p ()) | |
272 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
273 | "use not simple.\n"); | |
274 | return false; | |
275 | } | |
276 | ||
277 | if (dt != vect_external_def && dt != vect_constant_def) | |
278 | return false; | |
279 | } | |
280 | return true; | |
281 | } | |
282 | ||
ebfd146a IR |
283 | /* Function vect_stmt_relevant_p. |
284 | ||
82570274 RS |
285 | Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO, |
286 | is "relevant for vectorization". | |
ebfd146a IR |
287 | |
288 | A stmt is considered "relevant for vectorization" if: | |
289 | - it has uses outside the loop. | |
290 | - it has vdefs (it alters memory). | |
291 | - control stmts in the loop (except for the exit condition). | |
292 | ||
293 | CHECKME: what other side effects would the vectorizer allow? */ | |
294 | ||
295 | static bool | |
82570274 | 296 | vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, |
ebfd146a IR |
297 | enum vect_relevant *relevant, bool *live_p) |
298 | { | |
299 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
300 | ssa_op_iter op_iter; | |
301 | imm_use_iterator imm_iter; | |
302 | use_operand_p use_p; | |
303 | def_operand_p def_p; | |
304 | ||
8644a673 | 305 | *relevant = vect_unused_in_scope; |
ebfd146a IR |
306 | *live_p = false; |
307 | ||
308 | /* cond stmt other than loop exit cond. */ | |
82570274 RS |
309 | if (is_ctrl_stmt (stmt_info->stmt) |
310 | && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type) | |
8644a673 | 311 | *relevant = vect_used_in_scope; |
ebfd146a IR |
312 | |
313 | /* changing memory. */ | |
82570274 RS |
314 | if (gimple_code (stmt_info->stmt) != GIMPLE_PHI) |
315 | if (gimple_vdef (stmt_info->stmt) | |
316 | && !gimple_clobber_p (stmt_info->stmt)) | |
ebfd146a | 317 | { |
73fbfcad | 318 | if (dump_enabled_p ()) |
78c60e3d | 319 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 320 | "vec_stmt_relevant_p: stmt has vdefs.\n"); |
8644a673 | 321 | *relevant = vect_used_in_scope; |
ebfd146a IR |
322 | } |
323 | ||
324 | /* uses outside the loop. */ | |
82570274 | 325 | FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF) |
ebfd146a IR |
326 | { |
327 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p)) | |
328 | { | |
329 | basic_block bb = gimple_bb (USE_STMT (use_p)); | |
330 | if (!flow_bb_inside_loop_p (loop, bb)) | |
331 | { | |
73fbfcad | 332 | if (dump_enabled_p ()) |
78c60e3d | 333 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 334 | "vec_stmt_relevant_p: used out of loop.\n"); |
ebfd146a | 335 | |
3157b0c2 AO |
336 | if (is_gimple_debug (USE_STMT (use_p))) |
337 | continue; | |
338 | ||
ebfd146a IR |
339 | /* We expect all such uses to be in the loop exit phis |
340 | (because of loop closed form) */ | |
341 | gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI); | |
342 | gcc_assert (bb == single_exit (loop)->dest); | |
343 | ||
344 | *live_p = true; | |
345 | } | |
346 | } | |
347 | } | |
348 | ||
3a2edf4c | 349 | if (*live_p && *relevant == vect_unused_in_scope |
82570274 | 350 | && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo)) |
b28ead45 AH |
351 | { |
352 | if (dump_enabled_p ()) | |
353 | dump_printf_loc (MSG_NOTE, vect_location, | |
354 | "vec_stmt_relevant_p: stmt live but not relevant.\n"); | |
355 | *relevant = vect_used_only_live; | |
356 | } | |
357 | ||
ebfd146a IR |
358 | return (*live_p || *relevant); |
359 | } | |
360 | ||
361 | ||
b8698a0f | 362 | /* Function exist_non_indexing_operands_for_use_p |
ebfd146a | 363 | |
32e8e429 RS |
364 | USE is one of the uses attached to STMT_INFO. Check if USE is |
365 | used in STMT_INFO for anything other than indexing an array. */ | |
ebfd146a IR |
366 | |
367 | static bool | |
32e8e429 | 368 | exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info) |
ebfd146a IR |
369 | { |
370 | tree operand; | |
59a05b0c | 371 | |
ff802fa1 | 372 | /* USE corresponds to some operand in STMT. If there is no data |
ebfd146a IR |
373 | reference in STMT, then any operand that corresponds to USE |
374 | is not indexing an array. */ | |
375 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
376 | return true; | |
59a05b0c | 377 | |
ebfd146a IR |
378 | /* STMT has a data_ref. FORNOW this means that its of one of |
379 | the following forms: | |
380 | -1- ARRAY_REF = var | |
381 | -2- var = ARRAY_REF | |
382 | (This should have been verified in analyze_data_refs). | |
383 | ||
384 | 'var' in the second case corresponds to a def, not a use, | |
b8698a0f | 385 | so USE cannot correspond to any operands that are not used |
ebfd146a IR |
386 | for array indexing. |
387 | ||
388 | Therefore, all we need to check is if STMT falls into the | |
389 | first case, and whether var corresponds to USE. */ | |
ebfd146a | 390 | |
86a91c0a | 391 | gassign *assign = dyn_cast <gassign *> (stmt_info->stmt); |
beb456c3 | 392 | if (!assign || !gimple_assign_copy_p (assign)) |
5ce9450f | 393 | { |
86a91c0a | 394 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); |
beb456c3 | 395 | if (call && gimple_call_internal_p (call)) |
bfaa08b7 | 396 | { |
beb456c3 | 397 | internal_fn ifn = gimple_call_internal_fn (call); |
bfaa08b7 RS |
398 | int mask_index = internal_fn_mask_index (ifn); |
399 | if (mask_index >= 0 | |
beb456c3 | 400 | && use == gimple_call_arg (call, mask_index)) |
bfaa08b7 | 401 | return true; |
f307441a RS |
402 | int stored_value_index = internal_fn_stored_value_index (ifn); |
403 | if (stored_value_index >= 0 | |
beb456c3 | 404 | && use == gimple_call_arg (call, stored_value_index)) |
f307441a | 405 | return true; |
bfaa08b7 | 406 | if (internal_gather_scatter_fn_p (ifn) |
beb456c3 | 407 | && use == gimple_call_arg (call, 1)) |
bfaa08b7 | 408 | return true; |
bfaa08b7 | 409 | } |
5ce9450f JJ |
410 | return false; |
411 | } | |
412 | ||
beb456c3 | 413 | if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME) |
59a05b0c | 414 | return false; |
beb456c3 | 415 | operand = gimple_assign_rhs1 (assign); |
ebfd146a IR |
416 | if (TREE_CODE (operand) != SSA_NAME) |
417 | return false; | |
418 | ||
419 | if (operand == use) | |
420 | return true; | |
421 | ||
422 | return false; | |
423 | } | |
424 | ||
425 | ||
b8698a0f | 426 | /* |
ebfd146a IR |
427 | Function process_use. |
428 | ||
429 | Inputs: | |
32e8e429 | 430 | - a USE in STMT_VINFO in a loop represented by LOOP_VINFO |
b28ead45 | 431 | - RELEVANT - enum value to be set in the STMT_VINFO of the stmt |
ff802fa1 | 432 | that defined USE. This is done by calling mark_relevant and passing it |
ebfd146a | 433 | the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant). |
aec7ae7d JJ |
434 | - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't |
435 | be performed. | |
ebfd146a IR |
436 | |
437 | Outputs: | |
438 | Generally, LIVE_P and RELEVANT are used to define the liveness and | |
439 | relevance info of the DEF_STMT of this USE: | |
32e8e429 RS |
440 | STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p |
441 | STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant | |
ebfd146a IR |
442 | Exceptions: |
443 | - case 1: If USE is used only for address computations (e.g. array indexing), | |
b8698a0f | 444 | which does not need to be directly vectorized, then the liveness/relevance |
ebfd146a | 445 | of the respective DEF_STMT is left unchanged. |
32e8e429 RS |
446 | - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt, |
447 | we skip DEF_STMT cause it had already been processed. | |
448 | - case 3: If DEF_STMT and STMT_VINFO are in different nests, then | |
449 | "relevant" will be modified accordingly. | |
ebfd146a IR |
450 | |
451 | Return true if everything is as expected. Return false otherwise. */ | |
452 | ||
453 | static bool | |
32e8e429 | 454 | process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo, |
eca52fdd | 455 | enum vect_relevant relevant, vec<stmt_vec_info> *worklist, |
aec7ae7d | 456 | bool force) |
ebfd146a | 457 | { |
ebfd146a IR |
458 | stmt_vec_info dstmt_vinfo; |
459 | basic_block bb, def_bb; | |
ebfd146a IR |
460 | enum vect_def_type dt; |
461 | ||
b8698a0f | 462 | /* case 1: we are only interested in uses that need to be vectorized. Uses |
ebfd146a | 463 | that are used for address computation are not considered relevant. */ |
86a91c0a | 464 | if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo)) |
ebfd146a IR |
465 | return true; |
466 | ||
fef96d8e | 467 | if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo)) |
b8698a0f | 468 | { |
73fbfcad | 469 | if (dump_enabled_p ()) |
78c60e3d | 470 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 471 | "not vectorized: unsupported use in stmt.\n"); |
ebfd146a IR |
472 | return false; |
473 | } | |
474 | ||
fef96d8e | 475 | if (!dstmt_vinfo) |
ebfd146a IR |
476 | return true; |
477 | ||
fef96d8e | 478 | def_bb = gimple_bb (dstmt_vinfo->stmt); |
ebfd146a | 479 | |
fef96d8e RS |
480 | /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO). |
481 | DSTMT_VINFO must have already been processed, because this should be the | |
b8698a0f | 482 | only way that STMT, which is a reduction-phi, was put in the worklist, |
fef96d8e | 483 | as there should be no other uses for DSTMT_VINFO in the loop. So we just |
ebfd146a | 484 | check that everything is as expected, and we are done. */ |
86a91c0a RS |
485 | bb = gimple_bb (stmt_vinfo->stmt); |
486 | if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI | |
ebfd146a | 487 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def |
fef96d8e | 488 | && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI |
ebfd146a IR |
489 | && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def |
490 | && bb->loop_father == def_bb->loop_father) | |
491 | { | |
73fbfcad | 492 | if (dump_enabled_p ()) |
78c60e3d | 493 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 494 | "reduc-stmt defining reduc-phi in the same nest.\n"); |
ebfd146a | 495 | gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction); |
b8698a0f | 496 | gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo) |
8644a673 | 497 | || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope); |
ebfd146a IR |
498 | return true; |
499 | } | |
500 | ||
501 | /* case 3a: outer-loop stmt defining an inner-loop stmt: | |
502 | outer-loop-header-bb: | |
fef96d8e | 503 | d = dstmt_vinfo |
ebfd146a IR |
504 | inner-loop: |
505 | stmt # use (d) | |
506 | outer-loop-tail-bb: | |
507 | ... */ | |
508 | if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father)) | |
509 | { | |
73fbfcad | 510 | if (dump_enabled_p ()) |
78c60e3d | 511 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 512 | "outer-loop def-stmt defining inner-loop stmt.\n"); |
7c5222ff | 513 | |
ebfd146a IR |
514 | switch (relevant) |
515 | { | |
8644a673 | 516 | case vect_unused_in_scope: |
7c5222ff IR |
517 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ? |
518 | vect_used_in_scope : vect_unused_in_scope; | |
ebfd146a | 519 | break; |
7c5222ff | 520 | |
ebfd146a | 521 | case vect_used_in_outer_by_reduction: |
7c5222ff | 522 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
ebfd146a IR |
523 | relevant = vect_used_by_reduction; |
524 | break; | |
7c5222ff | 525 | |
ebfd146a | 526 | case vect_used_in_outer: |
7c5222ff | 527 | gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def); |
8644a673 | 528 | relevant = vect_used_in_scope; |
ebfd146a | 529 | break; |
7c5222ff | 530 | |
8644a673 | 531 | case vect_used_in_scope: |
ebfd146a IR |
532 | break; |
533 | ||
534 | default: | |
535 | gcc_unreachable (); | |
b8698a0f | 536 | } |
ebfd146a IR |
537 | } |
538 | ||
539 | /* case 3b: inner-loop stmt defining an outer-loop stmt: | |
540 | outer-loop-header-bb: | |
541 | ... | |
542 | inner-loop: | |
fef96d8e | 543 | d = dstmt_vinfo |
06066f92 | 544 | outer-loop-tail-bb (or outer-loop-exit-bb in double reduction): |
ebfd146a IR |
545 | stmt # use (d) */ |
546 | else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father)) | |
547 | { | |
73fbfcad | 548 | if (dump_enabled_p ()) |
78c60e3d | 549 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 550 | "inner-loop def-stmt defining outer-loop stmt.\n"); |
7c5222ff | 551 | |
ebfd146a IR |
552 | switch (relevant) |
553 | { | |
8644a673 | 554 | case vect_unused_in_scope: |
b8698a0f | 555 | relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def |
06066f92 | 556 | || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ? |
a70d6342 | 557 | vect_used_in_outer_by_reduction : vect_unused_in_scope; |
ebfd146a IR |
558 | break; |
559 | ||
ebfd146a | 560 | case vect_used_by_reduction: |
b28ead45 | 561 | case vect_used_only_live: |
ebfd146a IR |
562 | relevant = vect_used_in_outer_by_reduction; |
563 | break; | |
564 | ||
8644a673 | 565 | case vect_used_in_scope: |
ebfd146a IR |
566 | relevant = vect_used_in_outer; |
567 | break; | |
568 | ||
569 | default: | |
570 | gcc_unreachable (); | |
571 | } | |
572 | } | |
643a9684 RB |
573 | /* We are also not interested in uses on loop PHI backedges that are |
574 | inductions. Otherwise we'll needlessly vectorize the IV increment | |
e294f495 RB |
575 | and cause hybrid SLP for SLP inductions. Unless the PHI is live |
576 | of course. */ | |
86a91c0a | 577 | else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI |
643a9684 | 578 | && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def |
e294f495 | 579 | && ! STMT_VINFO_LIVE_P (stmt_vinfo) |
86a91c0a RS |
580 | && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, |
581 | loop_latch_edge (bb->loop_father)) | |
643a9684 RB |
582 | == use)) |
583 | { | |
584 | if (dump_enabled_p ()) | |
585 | dump_printf_loc (MSG_NOTE, vect_location, | |
586 | "induction value on backedge.\n"); | |
587 | return true; | |
588 | } | |
589 | ||
ebfd146a | 590 | |
fef96d8e | 591 | vect_mark_relevant (worklist, dstmt_vinfo, relevant, false); |
ebfd146a IR |
592 | return true; |
593 | } | |
594 | ||
595 | ||
596 | /* Function vect_mark_stmts_to_be_vectorized. | |
597 | ||
598 | Not all stmts in the loop need to be vectorized. For example: | |
599 | ||
600 | for i... | |
601 | for j... | |
602 | 1. T0 = i + j | |
603 | 2. T1 = a[T0] | |
604 | ||
605 | 3. j = j + 1 | |
606 | ||
607 | Stmt 1 and 3 do not need to be vectorized, because loop control and | |
608 | addressing of vectorized data-refs are handled differently. | |
609 | ||
610 | This pass detects such stmts. */ | |
611 | ||
612 | bool | |
613 | vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) | |
614 | { | |
ebfd146a IR |
615 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
616 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); | |
617 | unsigned int nbbs = loop->num_nodes; | |
618 | gimple_stmt_iterator si; | |
ebfd146a | 619 | unsigned int i; |
ebfd146a | 620 | basic_block bb; |
ebfd146a | 621 | bool live_p; |
b28ead45 | 622 | enum vect_relevant relevant; |
ebfd146a | 623 | |
adac3a68 | 624 | DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized"); |
ebfd146a | 625 | |
eca52fdd | 626 | auto_vec<stmt_vec_info, 64> worklist; |
ebfd146a IR |
627 | |
628 | /* 1. Init worklist. */ | |
629 | for (i = 0; i < nbbs; i++) | |
630 | { | |
631 | bb = bbs[i]; | |
632 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
b8698a0f | 633 | { |
a1824cfd | 634 | stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
73fbfcad | 635 | if (dump_enabled_p ()) |
ebfd146a | 636 | { |
78c60e3d | 637 | dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? "); |
a1824cfd | 638 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0); |
ebfd146a IR |
639 | } |
640 | ||
a1824cfd RS |
641 | if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p)) |
642 | vect_mark_relevant (&worklist, phi_info, relevant, live_p); | |
ebfd146a IR |
643 | } |
644 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
645 | { | |
a1824cfd | 646 | stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si)); |
73fbfcad | 647 | if (dump_enabled_p ()) |
ebfd146a | 648 | { |
78c60e3d | 649 | dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? "); |
a1824cfd | 650 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
b8698a0f | 651 | } |
ebfd146a | 652 | |
a1824cfd RS |
653 | if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p)) |
654 | vect_mark_relevant (&worklist, stmt_info, relevant, live_p); | |
ebfd146a IR |
655 | } |
656 | } | |
657 | ||
658 | /* 2. Process_worklist */ | |
9771b263 | 659 | while (worklist.length () > 0) |
ebfd146a IR |
660 | { |
661 | use_operand_p use_p; | |
662 | ssa_op_iter iter; | |
663 | ||
eca52fdd | 664 | stmt_vec_info stmt_vinfo = worklist.pop (); |
73fbfcad | 665 | if (dump_enabled_p ()) |
ebfd146a | 666 | { |
eca52fdd RS |
667 | dump_printf_loc (MSG_NOTE, vect_location, |
668 | "worklist: examine stmt: "); | |
669 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0); | |
ebfd146a IR |
670 | } |
671 | ||
b8698a0f | 672 | /* Examine the USEs of STMT. For each USE, mark the stmt that defines it |
b28ead45 AH |
673 | (DEF_STMT) as relevant/irrelevant according to the relevance property |
674 | of STMT. */ | |
ebfd146a | 675 | relevant = STMT_VINFO_RELEVANT (stmt_vinfo); |
ebfd146a | 676 | |
b28ead45 AH |
677 | /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is |
678 | propagated as is to the DEF_STMTs of its USEs. | |
ebfd146a IR |
679 | |
680 | One exception is when STMT has been identified as defining a reduction | |
b28ead45 | 681 | variable; in this case we set the relevance to vect_used_by_reduction. |
ebfd146a | 682 | This is because we distinguish between two kinds of relevant stmts - |
b8698a0f | 683 | those that are used by a reduction computation, and those that are |
ff802fa1 | 684 | (also) used by a regular computation. This allows us later on to |
b8698a0f | 685 | identify stmts that are used solely by a reduction, and therefore the |
7c5222ff | 686 | order of the results that they produce does not have to be kept. */ |
ebfd146a | 687 | |
b28ead45 | 688 | switch (STMT_VINFO_DEF_TYPE (stmt_vinfo)) |
ebfd146a | 689 | { |
06066f92 | 690 | case vect_reduction_def: |
b28ead45 AH |
691 | gcc_assert (relevant != vect_unused_in_scope); |
692 | if (relevant != vect_unused_in_scope | |
693 | && relevant != vect_used_in_scope | |
694 | && relevant != vect_used_by_reduction | |
695 | && relevant != vect_used_only_live) | |
06066f92 | 696 | { |
b28ead45 AH |
697 | if (dump_enabled_p ()) |
698 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
699 | "unsupported use of reduction.\n"); | |
700 | return false; | |
06066f92 | 701 | } |
06066f92 | 702 | break; |
b8698a0f | 703 | |
06066f92 | 704 | case vect_nested_cycle: |
b28ead45 AH |
705 | if (relevant != vect_unused_in_scope |
706 | && relevant != vect_used_in_outer_by_reduction | |
707 | && relevant != vect_used_in_outer) | |
06066f92 | 708 | { |
73fbfcad | 709 | if (dump_enabled_p ()) |
78c60e3d | 710 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 711 | "unsupported use of nested cycle.\n"); |
7c5222ff | 712 | |
06066f92 IR |
713 | return false; |
714 | } | |
b8698a0f L |
715 | break; |
716 | ||
06066f92 | 717 | case vect_double_reduction_def: |
b28ead45 AH |
718 | if (relevant != vect_unused_in_scope |
719 | && relevant != vect_used_by_reduction | |
720 | && relevant != vect_used_only_live) | |
06066f92 | 721 | { |
73fbfcad | 722 | if (dump_enabled_p ()) |
78c60e3d | 723 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 724 | "unsupported use of double reduction.\n"); |
7c5222ff | 725 | |
7c5222ff | 726 | return false; |
06066f92 | 727 | } |
b8698a0f | 728 | break; |
7c5222ff | 729 | |
06066f92 IR |
730 | default: |
731 | break; | |
7c5222ff | 732 | } |
b8698a0f | 733 | |
aec7ae7d | 734 | if (is_pattern_stmt_p (stmt_vinfo)) |
9d5e7640 IR |
735 | { |
736 | /* Pattern statements are not inserted into the code, so | |
737 | FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we | |
738 | have to scan the RHS or function arguments instead. */ | |
86a91c0a | 739 | if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt)) |
beb456c3 RS |
740 | { |
741 | enum tree_code rhs_code = gimple_assign_rhs_code (assign); | |
742 | tree op = gimple_assign_rhs1 (assign); | |
69d2aade JJ |
743 | |
744 | i = 1; | |
745 | if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op)) | |
746 | { | |
86a91c0a RS |
747 | if (!process_use (stmt_vinfo, TREE_OPERAND (op, 0), |
748 | loop_vinfo, relevant, &worklist, false) | |
749 | || !process_use (stmt_vinfo, TREE_OPERAND (op, 1), | |
750 | loop_vinfo, relevant, &worklist, false)) | |
566d377a | 751 | return false; |
69d2aade JJ |
752 | i = 2; |
753 | } | |
beb456c3 RS |
754 | for (; i < gimple_num_ops (assign); i++) |
755 | { | |
756 | op = gimple_op (assign, i); | |
afbe6325 | 757 | if (TREE_CODE (op) == SSA_NAME |
86a91c0a | 758 | && !process_use (stmt_vinfo, op, loop_vinfo, relevant, |
afbe6325 | 759 | &worklist, false)) |
07687835 | 760 | return false; |
9d5e7640 IR |
761 | } |
762 | } | |
86a91c0a | 763 | else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt)) |
beb456c3 RS |
764 | { |
765 | for (i = 0; i < gimple_call_num_args (call); i++) | |
766 | { | |
767 | tree arg = gimple_call_arg (call, i); | |
86a91c0a | 768 | if (!process_use (stmt_vinfo, arg, loop_vinfo, relevant, |
aec7ae7d | 769 | &worklist, false)) |
07687835 | 770 | return false; |
beb456c3 RS |
771 | } |
772 | } | |
9d5e7640 IR |
773 | } |
774 | else | |
86a91c0a | 775 | FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE) |
9d5e7640 IR |
776 | { |
777 | tree op = USE_FROM_PTR (use_p); | |
86a91c0a | 778 | if (!process_use (stmt_vinfo, op, loop_vinfo, relevant, |
aec7ae7d | 779 | &worklist, false)) |
07687835 | 780 | return false; |
9d5e7640 | 781 | } |
aec7ae7d | 782 | |
3bab6342 | 783 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo)) |
aec7ae7d | 784 | { |
134c85ca | 785 | gather_scatter_info gs_info; |
86a91c0a | 786 | if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info)) |
134c85ca | 787 | gcc_unreachable (); |
86a91c0a | 788 | if (!process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant, |
134c85ca | 789 | &worklist, true)) |
566d377a | 790 | return false; |
aec7ae7d | 791 | } |
ebfd146a IR |
792 | } /* while worklist */ |
793 | ||
ebfd146a IR |
794 | return true; |
795 | } | |
796 | ||
68435eb2 RB |
797 | /* Compute the prologue cost for invariant or constant operands. */ |
798 | ||
799 | static unsigned | |
800 | vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info, | |
801 | unsigned opno, enum vect_def_type dt, | |
802 | stmt_vector_for_cost *cost_vec) | |
803 | { | |
b9787581 | 804 | gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt; |
68435eb2 RB |
805 | tree op = gimple_op (stmt, opno); |
806 | unsigned prologue_cost = 0; | |
807 | ||
808 | /* Without looking at the actual initializer a vector of | |
809 | constants can be implemented as load from the constant pool. | |
810 | When all elements are the same we can use a splat. */ | |
811 | tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op)); | |
812 | unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length (); | |
813 | unsigned num_vects_to_check; | |
814 | unsigned HOST_WIDE_INT const_nunits; | |
815 | unsigned nelt_limit; | |
816 | if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits) | |
817 | && ! multiple_p (const_nunits, group_size)) | |
818 | { | |
819 | num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node); | |
820 | nelt_limit = const_nunits; | |
821 | } | |
822 | else | |
823 | { | |
824 | /* If either the vector has variable length or the vectors | |
825 | are composed of repeated whole groups we only need to | |
826 | cost construction once. All vectors will be the same. */ | |
827 | num_vects_to_check = 1; | |
828 | nelt_limit = group_size; | |
829 | } | |
830 | tree elt = NULL_TREE; | |
831 | unsigned nelt = 0; | |
832 | for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j) | |
833 | { | |
834 | unsigned si = j % group_size; | |
835 | if (nelt == 0) | |
b9787581 | 836 | elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno); |
68435eb2 RB |
837 | /* ??? We're just tracking whether all operands of a single |
838 | vector initializer are the same, ideally we'd check if | |
839 | we emitted the same one already. */ | |
b9787581 | 840 | else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, |
68435eb2 RB |
841 | opno)) |
842 | elt = NULL_TREE; | |
843 | nelt++; | |
844 | if (nelt == nelt_limit) | |
845 | { | |
846 | /* ??? We need to pass down stmt_info for a vector type | |
847 | even if it points to the wrong stmt. */ | |
848 | prologue_cost += record_stmt_cost | |
849 | (cost_vec, 1, | |
850 | dt == vect_external_def | |
851 | ? (elt ? scalar_to_vec : vec_construct) | |
852 | : vector_load, | |
853 | stmt_info, 0, vect_prologue); | |
854 | nelt = 0; | |
855 | } | |
856 | } | |
857 | ||
858 | return prologue_cost; | |
859 | } | |
ebfd146a | 860 | |
b8698a0f | 861 | /* Function vect_model_simple_cost. |
ebfd146a | 862 | |
b8698a0f | 863 | Models cost for simple operations, i.e. those that only emit ncopies of a |
ebfd146a IR |
864 | single op. Right now, this does not account for multiple insns that could |
865 | be generated for the single vector op. We will handle that shortly. */ | |
866 | ||
68435eb2 | 867 | static void |
b8698a0f | 868 | vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies, |
92345349 | 869 | enum vect_def_type *dt, |
4fc5ebf1 | 870 | int ndts, |
68435eb2 RB |
871 | slp_tree node, |
872 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 873 | { |
92345349 | 874 | int inside_cost = 0, prologue_cost = 0; |
ebfd146a | 875 | |
68435eb2 | 876 | gcc_assert (cost_vec != NULL); |
ebfd146a | 877 | |
68435eb2 RB |
878 | /* ??? Somehow we need to fix this at the callers. */ |
879 | if (node) | |
880 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node); | |
881 | ||
882 | if (node) | |
883 | { | |
884 | /* Scan operands and account for prologue cost of constants/externals. | |
885 | ??? This over-estimates cost for multiple uses and should be | |
886 | re-engineered. */ | |
b9787581 | 887 | gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt; |
68435eb2 RB |
888 | tree lhs = gimple_get_lhs (stmt); |
889 | for (unsigned i = 0; i < gimple_num_ops (stmt); ++i) | |
890 | { | |
891 | tree op = gimple_op (stmt, i); | |
68435eb2 RB |
892 | enum vect_def_type dt; |
893 | if (!op || op == lhs) | |
894 | continue; | |
894dd753 | 895 | if (vect_is_simple_use (op, stmt_info->vinfo, &dt) |
68435eb2 RB |
896 | && (dt == vect_constant_def || dt == vect_external_def)) |
897 | prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info, | |
898 | i, dt, cost_vec); | |
899 | } | |
900 | } | |
901 | else | |
902 | /* Cost the "broadcast" of a scalar operand in to a vector operand. | |
903 | Use scalar_to_vec to cost the broadcast, as elsewhere in the vector | |
904 | cost model. */ | |
905 | for (int i = 0; i < ndts; i++) | |
906 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) | |
907 | prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, | |
908 | stmt_info, 0, vect_prologue); | |
909 | ||
910 | /* Adjust for two-operator SLP nodes. */ | |
911 | if (node && SLP_TREE_TWO_OPERATORS (node)) | |
912 | { | |
913 | ncopies *= 2; | |
914 | inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm, | |
915 | stmt_info, 0, vect_body); | |
916 | } | |
c3e7ee41 BS |
917 | |
918 | /* Pass the inside-of-loop statements to the target-specific cost model. */ | |
68435eb2 RB |
919 | inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt, |
920 | stmt_info, 0, vect_body); | |
c3e7ee41 | 921 | |
73fbfcad | 922 | if (dump_enabled_p ()) |
78c60e3d SS |
923 | dump_printf_loc (MSG_NOTE, vect_location, |
924 | "vect_model_simple_cost: inside_cost = %d, " | |
e645e942 | 925 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
926 | } |
927 | ||
928 | ||
8bd37302 BS |
929 | /* Model cost for type demotion and promotion operations. PWR is normally |
930 | zero for single-step promotions and demotions. It will be one if | |
931 | two-step promotion/demotion is required, and so on. Each additional | |
932 | step doubles the number of instructions required. */ | |
933 | ||
934 | static void | |
935 | vect_model_promotion_demotion_cost (stmt_vec_info stmt_info, | |
68435eb2 RB |
936 | enum vect_def_type *dt, int pwr, |
937 | stmt_vector_for_cost *cost_vec) | |
8bd37302 BS |
938 | { |
939 | int i, tmp; | |
92345349 | 940 | int inside_cost = 0, prologue_cost = 0; |
c3e7ee41 | 941 | |
8bd37302 BS |
942 | for (i = 0; i < pwr + 1; i++) |
943 | { | |
944 | tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ? | |
945 | (i + 1) : i; | |
68435eb2 RB |
946 | inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp), |
947 | vec_promote_demote, stmt_info, 0, | |
948 | vect_body); | |
8bd37302 BS |
949 | } |
950 | ||
951 | /* FORNOW: Assuming maximum 2 args per stmts. */ | |
952 | for (i = 0; i < 2; i++) | |
92345349 | 953 | if (dt[i] == vect_constant_def || dt[i] == vect_external_def) |
68435eb2 RB |
954 | prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt, |
955 | stmt_info, 0, vect_prologue); | |
8bd37302 | 956 | |
73fbfcad | 957 | if (dump_enabled_p ()) |
78c60e3d SS |
958 | dump_printf_loc (MSG_NOTE, vect_location, |
959 | "vect_model_promotion_demotion_cost: inside_cost = %d, " | |
e645e942 | 960 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
8bd37302 BS |
961 | } |
962 | ||
ebfd146a IR |
963 | /* Function vect_model_store_cost |
964 | ||
0d0293ac MM |
965 | Models cost for stores. In the case of grouped accesses, one access |
966 | has the overhead of the grouped access attributed to it. */ | |
ebfd146a | 967 | |
68435eb2 | 968 | static void |
b8698a0f | 969 | vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, |
68435eb2 | 970 | enum vect_def_type dt, |
2de001ee | 971 | vect_memory_access_type memory_access_type, |
9ce4345a | 972 | vec_load_store_type vls_type, slp_tree slp_node, |
68435eb2 | 973 | stmt_vector_for_cost *cost_vec) |
ebfd146a | 974 | { |
92345349 | 975 | unsigned int inside_cost = 0, prologue_cost = 0; |
bffb8014 | 976 | stmt_vec_info first_stmt_info = stmt_info; |
892a981f | 977 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
ebfd146a | 978 | |
68435eb2 RB |
979 | /* ??? Somehow we need to fix this at the callers. */ |
980 | if (slp_node) | |
981 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
982 | ||
9ce4345a | 983 | if (vls_type == VLS_STORE_INVARIANT) |
68435eb2 RB |
984 | { |
985 | if (slp_node) | |
986 | prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info, | |
987 | 1, dt, cost_vec); | |
988 | else | |
989 | prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, | |
990 | stmt_info, 0, vect_prologue); | |
991 | } | |
ebfd146a | 992 | |
892a981f RS |
993 | /* Grouped stores update all elements in the group at once, |
994 | so we want the DR for the first statement. */ | |
995 | if (!slp_node && grouped_access_p) | |
bffb8014 | 996 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
ebfd146a | 997 | |
892a981f RS |
998 | /* True if we should include any once-per-group costs as well as |
999 | the cost of the statement itself. For SLP we only get called | |
1000 | once per group anyhow. */ | |
bffb8014 | 1001 | bool first_stmt_p = (first_stmt_info == stmt_info); |
892a981f | 1002 | |
272c6793 | 1003 | /* We assume that the cost of a single store-lanes instruction is |
2c53b149 | 1004 | equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped |
272c6793 | 1005 | access is instead being provided by a permute-and-store operation, |
2de001ee RS |
1006 | include the cost of the permutes. */ |
1007 | if (first_stmt_p | |
1008 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 1009 | { |
e1377713 ES |
1010 | /* Uses a high and low interleave or shuffle operations for each |
1011 | needed permute. */ | |
bffb8014 | 1012 | int group_size = DR_GROUP_SIZE (first_stmt_info); |
e1377713 | 1013 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
68435eb2 | 1014 | inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm, |
92345349 | 1015 | stmt_info, 0, vect_body); |
ebfd146a | 1016 | |
73fbfcad | 1017 | if (dump_enabled_p ()) |
78c60e3d | 1018 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1019 | "vect_model_store_cost: strided group_size = %d .\n", |
78c60e3d | 1020 | group_size); |
ebfd146a IR |
1021 | } |
1022 | ||
cee62fee | 1023 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
ebfd146a | 1024 | /* Costs of the stores. */ |
067bc855 RB |
1025 | if (memory_access_type == VMAT_ELEMENTWISE |
1026 | || memory_access_type == VMAT_GATHER_SCATTER) | |
c5126ce8 RS |
1027 | { |
1028 | /* N scalar stores plus extracting the elements. */ | |
1029 | unsigned int assumed_nunits = vect_nunits_for_cost (vectype); | |
68435eb2 | 1030 | inside_cost += record_stmt_cost (cost_vec, |
c5126ce8 RS |
1031 | ncopies * assumed_nunits, |
1032 | scalar_store, stmt_info, 0, vect_body); | |
1033 | } | |
f2e2a985 | 1034 | else |
57c454d2 | 1035 | vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec); |
ebfd146a | 1036 | |
2de001ee RS |
1037 | if (memory_access_type == VMAT_ELEMENTWISE |
1038 | || memory_access_type == VMAT_STRIDED_SLP) | |
c5126ce8 RS |
1039 | { |
1040 | /* N scalar stores plus extracting the elements. */ | |
1041 | unsigned int assumed_nunits = vect_nunits_for_cost (vectype); | |
68435eb2 | 1042 | inside_cost += record_stmt_cost (cost_vec, |
c5126ce8 RS |
1043 | ncopies * assumed_nunits, |
1044 | vec_to_scalar, stmt_info, 0, vect_body); | |
1045 | } | |
cee62fee | 1046 | |
73fbfcad | 1047 | if (dump_enabled_p ()) |
78c60e3d SS |
1048 | dump_printf_loc (MSG_NOTE, vect_location, |
1049 | "vect_model_store_cost: inside_cost = %d, " | |
e645e942 | 1050 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
ebfd146a IR |
1051 | } |
1052 | ||
1053 | ||
720f5239 IR |
1054 | /* Calculate cost of DR's memory access. */ |
1055 | void | |
57c454d2 | 1056 | vect_get_store_cost (stmt_vec_info stmt_info, int ncopies, |
c3e7ee41 | 1057 | unsigned int *inside_cost, |
92345349 | 1058 | stmt_vector_for_cost *body_cost_vec) |
720f5239 | 1059 | { |
89fa689a RS |
1060 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
1061 | int alignment_support_scheme | |
1062 | = vect_supportable_dr_alignment (dr_info, false); | |
720f5239 IR |
1063 | |
1064 | switch (alignment_support_scheme) | |
1065 | { | |
1066 | case dr_aligned: | |
1067 | { | |
92345349 BS |
1068 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
1069 | vector_store, stmt_info, 0, | |
1070 | vect_body); | |
720f5239 | 1071 | |
73fbfcad | 1072 | if (dump_enabled_p ()) |
78c60e3d | 1073 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1074 | "vect_model_store_cost: aligned.\n"); |
720f5239 IR |
1075 | break; |
1076 | } | |
1077 | ||
1078 | case dr_unaligned_supported: | |
1079 | { | |
720f5239 | 1080 | /* Here, we assign an additional cost for the unaligned store. */ |
92345349 | 1081 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1082 | unaligned_store, stmt_info, |
89fa689a RS |
1083 | DR_MISALIGNMENT (dr_info), |
1084 | vect_body); | |
73fbfcad | 1085 | if (dump_enabled_p ()) |
78c60e3d SS |
1086 | dump_printf_loc (MSG_NOTE, vect_location, |
1087 | "vect_model_store_cost: unaligned supported by " | |
e645e942 | 1088 | "hardware.\n"); |
720f5239 IR |
1089 | break; |
1090 | } | |
1091 | ||
38eec4c6 UW |
1092 | case dr_unaligned_unsupported: |
1093 | { | |
1094 | *inside_cost = VECT_MAX_COST; | |
1095 | ||
73fbfcad | 1096 | if (dump_enabled_p ()) |
78c60e3d | 1097 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1098 | "vect_model_store_cost: unsupported access.\n"); |
38eec4c6 UW |
1099 | break; |
1100 | } | |
1101 | ||
720f5239 IR |
1102 | default: |
1103 | gcc_unreachable (); | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | ||
ebfd146a IR |
1108 | /* Function vect_model_load_cost |
1109 | ||
892a981f RS |
1110 | Models cost for loads. In the case of grouped accesses, one access has |
1111 | the overhead of the grouped access attributed to it. Since unaligned | |
b8698a0f | 1112 | accesses are supported for loads, we also account for the costs of the |
ebfd146a IR |
1113 | access scheme chosen. */ |
1114 | ||
68435eb2 RB |
1115 | static void |
1116 | vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, | |
2de001ee | 1117 | vect_memory_access_type memory_access_type, |
68435eb2 | 1118 | slp_instance instance, |
2de001ee | 1119 | slp_tree slp_node, |
68435eb2 | 1120 | stmt_vector_for_cost *cost_vec) |
ebfd146a | 1121 | { |
92345349 | 1122 | unsigned int inside_cost = 0, prologue_cost = 0; |
892a981f | 1123 | bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info); |
ebfd146a | 1124 | |
68435eb2 RB |
1125 | gcc_assert (cost_vec); |
1126 | ||
1127 | /* ??? Somehow we need to fix this at the callers. */ | |
1128 | if (slp_node) | |
1129 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
1130 | ||
1131 | if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) | |
1132 | { | |
1133 | /* If the load is permuted then the alignment is determined by | |
1134 | the first group element not by the first scalar stmt DR. */ | |
bffb8014 | 1135 | stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
68435eb2 RB |
1136 | /* Record the cost for the permutation. */ |
1137 | unsigned n_perms; | |
1138 | unsigned assumed_nunits | |
bffb8014 | 1139 | = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info)); |
68435eb2 RB |
1140 | unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size; |
1141 | vect_transform_slp_perm_load (slp_node, vNULL, NULL, | |
1142 | slp_vf, instance, true, | |
1143 | &n_perms); | |
1144 | inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm, | |
bffb8014 | 1145 | first_stmt_info, 0, vect_body); |
68435eb2 RB |
1146 | /* And adjust the number of loads performed. This handles |
1147 | redundancies as well as loads that are later dead. */ | |
bffb8014 | 1148 | auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info)); |
68435eb2 RB |
1149 | bitmap_clear (perm); |
1150 | for (unsigned i = 0; | |
1151 | i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i) | |
1152 | bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]); | |
1153 | ncopies = 0; | |
1154 | bool load_seen = false; | |
bffb8014 | 1155 | for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i) |
68435eb2 RB |
1156 | { |
1157 | if (i % assumed_nunits == 0) | |
1158 | { | |
1159 | if (load_seen) | |
1160 | ncopies++; | |
1161 | load_seen = false; | |
1162 | } | |
1163 | if (bitmap_bit_p (perm, i)) | |
1164 | load_seen = true; | |
1165 | } | |
1166 | if (load_seen) | |
1167 | ncopies++; | |
1168 | gcc_assert (ncopies | |
bffb8014 RS |
1169 | <= (DR_GROUP_SIZE (first_stmt_info) |
1170 | - DR_GROUP_GAP (first_stmt_info) | |
68435eb2 RB |
1171 | + assumed_nunits - 1) / assumed_nunits); |
1172 | } | |
1173 | ||
892a981f RS |
1174 | /* Grouped loads read all elements in the group at once, |
1175 | so we want the DR for the first statement. */ | |
bffb8014 | 1176 | stmt_vec_info first_stmt_info = stmt_info; |
892a981f | 1177 | if (!slp_node && grouped_access_p) |
bffb8014 | 1178 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
ebfd146a | 1179 | |
892a981f RS |
1180 | /* True if we should include any once-per-group costs as well as |
1181 | the cost of the statement itself. For SLP we only get called | |
1182 | once per group anyhow. */ | |
bffb8014 | 1183 | bool first_stmt_p = (first_stmt_info == stmt_info); |
892a981f | 1184 | |
272c6793 | 1185 | /* We assume that the cost of a single load-lanes instruction is |
2c53b149 | 1186 | equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped |
272c6793 | 1187 | access is instead being provided by a load-and-permute operation, |
2de001ee RS |
1188 | include the cost of the permutes. */ |
1189 | if (first_stmt_p | |
1190 | && memory_access_type == VMAT_CONTIGUOUS_PERMUTE) | |
ebfd146a | 1191 | { |
2c23db6d ES |
1192 | /* Uses an even and odd extract operations or shuffle operations |
1193 | for each needed permute. */ | |
bffb8014 | 1194 | int group_size = DR_GROUP_SIZE (first_stmt_info); |
2c23db6d | 1195 | int nstmts = ncopies * ceil_log2 (group_size) * group_size; |
68435eb2 RB |
1196 | inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm, |
1197 | stmt_info, 0, vect_body); | |
ebfd146a | 1198 | |
73fbfcad | 1199 | if (dump_enabled_p ()) |
e645e942 TJ |
1200 | dump_printf_loc (MSG_NOTE, vect_location, |
1201 | "vect_model_load_cost: strided group_size = %d .\n", | |
78c60e3d | 1202 | group_size); |
ebfd146a IR |
1203 | } |
1204 | ||
1205 | /* The loads themselves. */ | |
067bc855 RB |
1206 | if (memory_access_type == VMAT_ELEMENTWISE |
1207 | || memory_access_type == VMAT_GATHER_SCATTER) | |
a82960aa | 1208 | { |
a21892ad BS |
1209 | /* N scalar loads plus gathering them into a vector. */ |
1210 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
c5126ce8 | 1211 | unsigned int assumed_nunits = vect_nunits_for_cost (vectype); |
68435eb2 | 1212 | inside_cost += record_stmt_cost (cost_vec, |
c5126ce8 | 1213 | ncopies * assumed_nunits, |
92345349 | 1214 | scalar_load, stmt_info, 0, vect_body); |
a82960aa RG |
1215 | } |
1216 | else | |
57c454d2 | 1217 | vect_get_load_cost (stmt_info, ncopies, first_stmt_p, |
92345349 | 1218 | &inside_cost, &prologue_cost, |
68435eb2 | 1219 | cost_vec, cost_vec, true); |
2de001ee RS |
1220 | if (memory_access_type == VMAT_ELEMENTWISE |
1221 | || memory_access_type == VMAT_STRIDED_SLP) | |
68435eb2 | 1222 | inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct, |
892a981f | 1223 | stmt_info, 0, vect_body); |
720f5239 | 1224 | |
73fbfcad | 1225 | if (dump_enabled_p ()) |
78c60e3d SS |
1226 | dump_printf_loc (MSG_NOTE, vect_location, |
1227 | "vect_model_load_cost: inside_cost = %d, " | |
e645e942 | 1228 | "prologue_cost = %d .\n", inside_cost, prologue_cost); |
720f5239 IR |
1229 | } |
1230 | ||
1231 | ||
1232 | /* Calculate cost of DR's memory access. */ | |
1233 | void | |
57c454d2 | 1234 | vect_get_load_cost (stmt_vec_info stmt_info, int ncopies, |
c3e7ee41 | 1235 | bool add_realign_cost, unsigned int *inside_cost, |
92345349 BS |
1236 | unsigned int *prologue_cost, |
1237 | stmt_vector_for_cost *prologue_cost_vec, | |
1238 | stmt_vector_for_cost *body_cost_vec, | |
1239 | bool record_prologue_costs) | |
720f5239 | 1240 | { |
89fa689a RS |
1241 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
1242 | int alignment_support_scheme | |
1243 | = vect_supportable_dr_alignment (dr_info, false); | |
720f5239 IR |
1244 | |
1245 | switch (alignment_support_scheme) | |
ebfd146a IR |
1246 | { |
1247 | case dr_aligned: | |
1248 | { | |
92345349 BS |
1249 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1250 | stmt_info, 0, vect_body); | |
ebfd146a | 1251 | |
73fbfcad | 1252 | if (dump_enabled_p ()) |
78c60e3d | 1253 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1254 | "vect_model_load_cost: aligned.\n"); |
ebfd146a IR |
1255 | |
1256 | break; | |
1257 | } | |
1258 | case dr_unaligned_supported: | |
1259 | { | |
720f5239 | 1260 | /* Here, we assign an additional cost for the unaligned load. */ |
92345349 | 1261 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, |
c3e7ee41 | 1262 | unaligned_load, stmt_info, |
89fa689a RS |
1263 | DR_MISALIGNMENT (dr_info), |
1264 | vect_body); | |
c3e7ee41 | 1265 | |
73fbfcad | 1266 | if (dump_enabled_p ()) |
78c60e3d SS |
1267 | dump_printf_loc (MSG_NOTE, vect_location, |
1268 | "vect_model_load_cost: unaligned supported by " | |
e645e942 | 1269 | "hardware.\n"); |
ebfd146a IR |
1270 | |
1271 | break; | |
1272 | } | |
1273 | case dr_explicit_realign: | |
1274 | { | |
92345349 BS |
1275 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2, |
1276 | vector_load, stmt_info, 0, vect_body); | |
1277 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, | |
1278 | vec_perm, stmt_info, 0, vect_body); | |
ebfd146a IR |
1279 | |
1280 | /* FIXME: If the misalignment remains fixed across the iterations of | |
1281 | the containing loop, the following cost should be added to the | |
92345349 | 1282 | prologue costs. */ |
ebfd146a | 1283 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1284 | *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt, |
1285 | stmt_info, 0, vect_body); | |
ebfd146a | 1286 | |
73fbfcad | 1287 | if (dump_enabled_p ()) |
e645e942 TJ |
1288 | dump_printf_loc (MSG_NOTE, vect_location, |
1289 | "vect_model_load_cost: explicit realign\n"); | |
8bd37302 | 1290 | |
ebfd146a IR |
1291 | break; |
1292 | } | |
1293 | case dr_explicit_realign_optimized: | |
1294 | { | |
73fbfcad | 1295 | if (dump_enabled_p ()) |
e645e942 | 1296 | dump_printf_loc (MSG_NOTE, vect_location, |
78c60e3d | 1297 | "vect_model_load_cost: unaligned software " |
e645e942 | 1298 | "pipelined.\n"); |
ebfd146a IR |
1299 | |
1300 | /* Unaligned software pipeline has a load of an address, an initial | |
ff802fa1 | 1301 | load, and possibly a mask operation to "prime" the loop. However, |
0d0293ac | 1302 | if this is an access in a group of loads, which provide grouped |
ebfd146a | 1303 | access, then the above cost should only be considered for one |
ff802fa1 | 1304 | access in the group. Inside the loop, there is a load op |
ebfd146a IR |
1305 | and a realignment op. */ |
1306 | ||
92345349 | 1307 | if (add_realign_cost && record_prologue_costs) |
ebfd146a | 1308 | { |
92345349 BS |
1309 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 2, |
1310 | vector_stmt, stmt_info, | |
1311 | 0, vect_prologue); | |
ebfd146a | 1312 | if (targetm.vectorize.builtin_mask_for_load) |
92345349 BS |
1313 | *prologue_cost += record_stmt_cost (prologue_cost_vec, 1, |
1314 | vector_stmt, stmt_info, | |
1315 | 0, vect_prologue); | |
ebfd146a IR |
1316 | } |
1317 | ||
92345349 BS |
1318 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load, |
1319 | stmt_info, 0, vect_body); | |
1320 | *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm, | |
1321 | stmt_info, 0, vect_body); | |
8bd37302 | 1322 | |
73fbfcad | 1323 | if (dump_enabled_p ()) |
78c60e3d | 1324 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 TJ |
1325 | "vect_model_load_cost: explicit realign optimized" |
1326 | "\n"); | |
8bd37302 | 1327 | |
ebfd146a IR |
1328 | break; |
1329 | } | |
1330 | ||
38eec4c6 UW |
1331 | case dr_unaligned_unsupported: |
1332 | { | |
1333 | *inside_cost = VECT_MAX_COST; | |
1334 | ||
73fbfcad | 1335 | if (dump_enabled_p ()) |
78c60e3d | 1336 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 1337 | "vect_model_load_cost: unsupported access.\n"); |
38eec4c6 UW |
1338 | break; |
1339 | } | |
1340 | ||
ebfd146a IR |
1341 | default: |
1342 | gcc_unreachable (); | |
1343 | } | |
ebfd146a IR |
1344 | } |
1345 | ||
418b7df3 | 1346 | /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in |
32e8e429 | 1347 | the loop preheader for the vectorized stmt STMT_VINFO. */ |
ebfd146a | 1348 | |
418b7df3 | 1349 | static void |
32e8e429 RS |
1350 | vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt, |
1351 | gimple_stmt_iterator *gsi) | |
ebfd146a | 1352 | { |
ebfd146a | 1353 | if (gsi) |
a1824cfd | 1354 | vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi); |
ebfd146a IR |
1355 | else |
1356 | { | |
1357 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); | |
b8698a0f | 1358 | |
a70d6342 IR |
1359 | if (loop_vinfo) |
1360 | { | |
1361 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
418b7df3 RG |
1362 | basic_block new_bb; |
1363 | edge pe; | |
a70d6342 | 1364 | |
86a91c0a RS |
1365 | if (nested_in_vect_loop_p (loop, stmt_vinfo)) |
1366 | loop = loop->inner; | |
b8698a0f | 1367 | |
a70d6342 | 1368 | pe = loop_preheader_edge (loop); |
418b7df3 | 1369 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); |
a70d6342 IR |
1370 | gcc_assert (!new_bb); |
1371 | } | |
1372 | else | |
1373 | { | |
1374 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo); | |
1375 | basic_block bb; | |
1376 | gimple_stmt_iterator gsi_bb_start; | |
1377 | ||
1378 | gcc_assert (bb_vinfo); | |
1379 | bb = BB_VINFO_BB (bb_vinfo); | |
12aaf609 | 1380 | gsi_bb_start = gsi_after_labels (bb); |
418b7df3 | 1381 | gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT); |
a70d6342 | 1382 | } |
ebfd146a IR |
1383 | } |
1384 | ||
73fbfcad | 1385 | if (dump_enabled_p ()) |
ebfd146a | 1386 | { |
78c60e3d SS |
1387 | dump_printf_loc (MSG_NOTE, vect_location, |
1388 | "created new init_stmt: "); | |
1389 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); | |
ebfd146a | 1390 | } |
418b7df3 RG |
1391 | } |
1392 | ||
1393 | /* Function vect_init_vector. | |
ebfd146a | 1394 | |
5467ee52 RG |
1395 | Insert a new stmt (INIT_STMT) that initializes a new variable of type |
1396 | TYPE with the value VAL. If TYPE is a vector type and VAL does not have | |
1397 | vector type a vector with all elements equal to VAL is created first. | |
1398 | Place the initialization at BSI if it is not NULL. Otherwise, place the | |
1399 | initialization at the loop preheader. | |
418b7df3 | 1400 | Return the DEF of INIT_STMT. |
32e8e429 | 1401 | It will be used in the vectorization of STMT_INFO. */ |
418b7df3 RG |
1402 | |
1403 | tree | |
32e8e429 RS |
1404 | vect_init_vector (stmt_vec_info stmt_info, tree val, tree type, |
1405 | gimple_stmt_iterator *gsi) | |
418b7df3 | 1406 | { |
355fe088 | 1407 | gimple *init_stmt; |
418b7df3 RG |
1408 | tree new_temp; |
1409 | ||
e412ece4 RB |
1410 | /* We abuse this function to push sth to a SSA name with initial 'val'. */ |
1411 | if (! useless_type_conversion_p (type, TREE_TYPE (val))) | |
418b7df3 | 1412 | { |
e412ece4 RB |
1413 | gcc_assert (TREE_CODE (type) == VECTOR_TYPE); |
1414 | if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val))) | |
418b7df3 | 1415 | { |
5a308cf1 IE |
1416 | /* Scalar boolean value should be transformed into |
1417 | all zeros or all ones value before building a vector. */ | |
1418 | if (VECTOR_BOOLEAN_TYPE_P (type)) | |
1419 | { | |
b3d51f23 IE |
1420 | tree true_val = build_all_ones_cst (TREE_TYPE (type)); |
1421 | tree false_val = build_zero_cst (TREE_TYPE (type)); | |
5a308cf1 IE |
1422 | |
1423 | if (CONSTANT_CLASS_P (val)) | |
1424 | val = integer_zerop (val) ? false_val : true_val; | |
1425 | else | |
1426 | { | |
1427 | new_temp = make_ssa_name (TREE_TYPE (type)); | |
1428 | init_stmt = gimple_build_assign (new_temp, COND_EXPR, | |
1429 | val, true_val, false_val); | |
a1824cfd | 1430 | vect_init_vector_1 (stmt_info, init_stmt, gsi); |
5a308cf1 IE |
1431 | val = new_temp; |
1432 | } | |
1433 | } | |
1434 | else if (CONSTANT_CLASS_P (val)) | |
42fd8198 | 1435 | val = fold_convert (TREE_TYPE (type), val); |
418b7df3 RG |
1436 | else |
1437 | { | |
b731b390 | 1438 | new_temp = make_ssa_name (TREE_TYPE (type)); |
e412ece4 RB |
1439 | if (! INTEGRAL_TYPE_P (TREE_TYPE (val))) |
1440 | init_stmt = gimple_build_assign (new_temp, | |
1441 | fold_build1 (VIEW_CONVERT_EXPR, | |
1442 | TREE_TYPE (type), | |
1443 | val)); | |
1444 | else | |
1445 | init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val); | |
a1824cfd | 1446 | vect_init_vector_1 (stmt_info, init_stmt, gsi); |
5467ee52 | 1447 | val = new_temp; |
418b7df3 RG |
1448 | } |
1449 | } | |
5467ee52 | 1450 | val = build_vector_from_val (type, val); |
418b7df3 RG |
1451 | } |
1452 | ||
0e22bb5a RB |
1453 | new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_"); |
1454 | init_stmt = gimple_build_assign (new_temp, val); | |
a1824cfd | 1455 | vect_init_vector_1 (stmt_info, init_stmt, gsi); |
0e22bb5a | 1456 | return new_temp; |
ebfd146a IR |
1457 | } |
1458 | ||
c83a894c | 1459 | /* Function vect_get_vec_def_for_operand_1. |
a70d6342 | 1460 | |
32e8e429 RS |
1461 | For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def |
1462 | with type DT that will be used in the vectorized stmt. */ | |
ebfd146a IR |
1463 | |
1464 | tree | |
32e8e429 RS |
1465 | vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info, |
1466 | enum vect_def_type dt) | |
ebfd146a IR |
1467 | { |
1468 | tree vec_oprnd; | |
1eede195 | 1469 | stmt_vec_info vec_stmt_info; |
ebfd146a IR |
1470 | |
1471 | switch (dt) | |
1472 | { | |
81c40241 | 1473 | /* operand is a constant or a loop invariant. */ |
ebfd146a | 1474 | case vect_constant_def: |
81c40241 | 1475 | case vect_external_def: |
c83a894c AH |
1476 | /* Code should use vect_get_vec_def_for_operand. */ |
1477 | gcc_unreachable (); | |
ebfd146a | 1478 | |
81c40241 | 1479 | /* operand is defined inside the loop. */ |
8644a673 | 1480 | case vect_internal_def: |
ebfd146a | 1481 | { |
ebfd146a | 1482 | /* Get the def from the vectorized stmt. */ |
1eede195 RS |
1483 | vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info); |
1484 | /* Get vectorized pattern statement. */ | |
1485 | if (!vec_stmt_info | |
1486 | && STMT_VINFO_IN_PATTERN_P (def_stmt_info) | |
1487 | && !STMT_VINFO_RELEVANT (def_stmt_info)) | |
1488 | vec_stmt_info = (STMT_VINFO_VEC_STMT | |
1489 | (STMT_VINFO_RELATED_STMT (def_stmt_info))); | |
1490 | gcc_assert (vec_stmt_info); | |
1491 | if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt)) | |
1492 | vec_oprnd = PHI_RESULT (phi); | |
ebfd146a | 1493 | else |
1eede195 RS |
1494 | vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt); |
1495 | return vec_oprnd; | |
ebfd146a IR |
1496 | } |
1497 | ||
c78e3652 | 1498 | /* operand is defined by a loop header phi. */ |
ebfd146a | 1499 | case vect_reduction_def: |
06066f92 | 1500 | case vect_double_reduction_def: |
7c5222ff | 1501 | case vect_nested_cycle: |
ebfd146a IR |
1502 | case vect_induction_def: |
1503 | { | |
32e8e429 | 1504 | gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI); |
ebfd146a | 1505 | |
1eede195 | 1506 | /* Get the def from the vectorized stmt. */ |
1eede195 RS |
1507 | vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info); |
1508 | if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt)) | |
1509 | vec_oprnd = PHI_RESULT (phi); | |
6dbbece6 | 1510 | else |
1eede195 RS |
1511 | vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt); |
1512 | return vec_oprnd; | |
ebfd146a IR |
1513 | } |
1514 | ||
1515 | default: | |
1516 | gcc_unreachable (); | |
1517 | } | |
1518 | } | |
1519 | ||
1520 | ||
c83a894c AH |
1521 | /* Function vect_get_vec_def_for_operand. |
1522 | ||
32e8e429 RS |
1523 | OP is an operand in STMT_VINFO. This function returns a (vector) def |
1524 | that will be used in the vectorized stmt for STMT_VINFO. | |
c83a894c AH |
1525 | |
1526 | In the case that OP is an SSA_NAME which is defined in the loop, then | |
1527 | STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def. | |
1528 | ||
1529 | In case OP is an invariant or constant, a new stmt that creates a vector def | |
1530 | needs to be introduced. VECTYPE may be used to specify a required type for | |
1531 | vector invariant. */ | |
1532 | ||
1533 | tree | |
32e8e429 | 1534 | vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype) |
c83a894c AH |
1535 | { |
1536 | gimple *def_stmt; | |
1537 | enum vect_def_type dt; | |
1538 | bool is_simple_use; | |
c83a894c AH |
1539 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); |
1540 | ||
1541 | if (dump_enabled_p ()) | |
1542 | { | |
1543 | dump_printf_loc (MSG_NOTE, vect_location, | |
1544 | "vect_get_vec_def_for_operand: "); | |
1545 | dump_generic_expr (MSG_NOTE, TDF_SLIM, op); | |
1546 | dump_printf (MSG_NOTE, "\n"); | |
1547 | } | |
1548 | ||
fef96d8e RS |
1549 | stmt_vec_info def_stmt_info; |
1550 | is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt, | |
1551 | &def_stmt_info, &def_stmt); | |
c83a894c AH |
1552 | gcc_assert (is_simple_use); |
1553 | if (def_stmt && dump_enabled_p ()) | |
1554 | { | |
1555 | dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = "); | |
1556 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); | |
1557 | } | |
1558 | ||
1559 | if (dt == vect_constant_def || dt == vect_external_def) | |
1560 | { | |
1561 | tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo); | |
1562 | tree vector_type; | |
1563 | ||
1564 | if (vectype) | |
1565 | vector_type = vectype; | |
2568d8a1 | 1566 | else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op)) |
c83a894c AH |
1567 | && VECTOR_BOOLEAN_TYPE_P (stmt_vectype)) |
1568 | vector_type = build_same_sized_truth_vector_type (stmt_vectype); | |
1569 | else | |
1570 | vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); | |
1571 | ||
1572 | gcc_assert (vector_type); | |
86a91c0a | 1573 | return vect_init_vector (stmt_vinfo, op, vector_type, NULL); |
c83a894c AH |
1574 | } |
1575 | else | |
fef96d8e | 1576 | return vect_get_vec_def_for_operand_1 (def_stmt_info, dt); |
c83a894c AH |
1577 | } |
1578 | ||
1579 | ||
ebfd146a IR |
1580 | /* Function vect_get_vec_def_for_stmt_copy |
1581 | ||
ff802fa1 | 1582 | Return a vector-def for an operand. This function is used when the |
b8698a0f L |
1583 | vectorized stmt to be created (by the caller to this function) is a "copy" |
1584 | created in case the vectorized result cannot fit in one vector, and several | |
ff802fa1 | 1585 | copies of the vector-stmt are required. In this case the vector-def is |
ebfd146a | 1586 | retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field |
e4057a39 | 1587 | of the stmt that defines VEC_OPRND. VINFO describes the vectorization. |
ebfd146a IR |
1588 | |
1589 | Context: | |
1590 | In case the vectorization factor (VF) is bigger than the number | |
1591 | of elements that can fit in a vectype (nunits), we have to generate | |
ff802fa1 | 1592 | more than one vector stmt to vectorize the scalar stmt. This situation |
b8698a0f | 1593 | arises when there are multiple data-types operated upon in the loop; the |
ebfd146a IR |
1594 | smallest data-type determines the VF, and as a result, when vectorizing |
1595 | stmts operating on wider types we need to create 'VF/nunits' "copies" of the | |
1596 | vector stmt (each computing a vector of 'nunits' results, and together | |
b8698a0f | 1597 | computing 'VF' results in each iteration). This function is called when |
ebfd146a IR |
1598 | vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in |
1599 | which VF=16 and nunits=4, so the number of copies required is 4): | |
1600 | ||
1601 | scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT | |
b8698a0f | 1602 | |
ebfd146a IR |
1603 | S1: x = load VS1.0: vx.0 = memref0 VS1.1 |
1604 | VS1.1: vx.1 = memref1 VS1.2 | |
1605 | VS1.2: vx.2 = memref2 VS1.3 | |
b8698a0f | 1606 | VS1.3: vx.3 = memref3 |
ebfd146a IR |
1607 | |
1608 | S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1 | |
1609 | VSnew.1: vz1 = vx.1 + ... VSnew.2 | |
1610 | VSnew.2: vz2 = vx.2 + ... VSnew.3 | |
1611 | VSnew.3: vz3 = vx.3 + ... | |
1612 | ||
1613 | The vectorization of S1 is explained in vectorizable_load. | |
1614 | The vectorization of S2: | |
b8698a0f L |
1615 | To create the first vector-stmt out of the 4 copies - VSnew.0 - |
1616 | the function 'vect_get_vec_def_for_operand' is called to | |
ff802fa1 | 1617 | get the relevant vector-def for each operand of S2. For operand x it |
ebfd146a IR |
1618 | returns the vector-def 'vx.0'. |
1619 | ||
b8698a0f L |
1620 | To create the remaining copies of the vector-stmt (VSnew.j), this |
1621 | function is called to get the relevant vector-def for each operand. It is | |
1622 | obtained from the respective VS1.j stmt, which is recorded in the | |
ebfd146a IR |
1623 | STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND. |
1624 | ||
b8698a0f L |
1625 | For example, to obtain the vector-def 'vx.1' in order to create the |
1626 | vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'. | |
1627 | Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the | |
ebfd146a IR |
1628 | STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1', |
1629 | and return its def ('vx.1'). | |
1630 | Overall, to create the above sequence this function will be called 3 times: | |
e4057a39 RS |
1631 | vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0); |
1632 | vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1); | |
1633 | vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */ | |
ebfd146a IR |
1634 | |
1635 | tree | |
e4057a39 | 1636 | vect_get_vec_def_for_stmt_copy (vec_info *vinfo, tree vec_oprnd) |
ebfd146a | 1637 | { |
e4057a39 RS |
1638 | stmt_vec_info def_stmt_info = vinfo->lookup_def (vec_oprnd); |
1639 | if (!def_stmt_info) | |
1640 | /* Do nothing; can reuse same def. */ | |
ebfd146a IR |
1641 | return vec_oprnd; |
1642 | ||
e4057a39 | 1643 | def_stmt_info = STMT_VINFO_RELATED_STMT (def_stmt_info); |
ebfd146a | 1644 | gcc_assert (def_stmt_info); |
e4057a39 RS |
1645 | if (gphi *phi = dyn_cast <gphi *> (def_stmt_info->stmt)) |
1646 | vec_oprnd = PHI_RESULT (phi); | |
ebfd146a | 1647 | else |
e4057a39 | 1648 | vec_oprnd = gimple_get_lhs (def_stmt_info->stmt); |
ebfd146a IR |
1649 | return vec_oprnd; |
1650 | } | |
1651 | ||
1652 | ||
1653 | /* Get vectorized definitions for the operands to create a copy of an original | |
ff802fa1 | 1654 | stmt. See vect_get_vec_def_for_stmt_copy () for details. */ |
ebfd146a | 1655 | |
c78e3652 | 1656 | void |
e4057a39 | 1657 | vect_get_vec_defs_for_stmt_copy (vec_info *vinfo, |
9771b263 DN |
1658 | vec<tree> *vec_oprnds0, |
1659 | vec<tree> *vec_oprnds1) | |
ebfd146a | 1660 | { |
9771b263 | 1661 | tree vec_oprnd = vec_oprnds0->pop (); |
ebfd146a | 1662 | |
e4057a39 | 1663 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd); |
9771b263 | 1664 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a | 1665 | |
9771b263 | 1666 | if (vec_oprnds1 && vec_oprnds1->length ()) |
ebfd146a | 1667 | { |
9771b263 | 1668 | vec_oprnd = vec_oprnds1->pop (); |
e4057a39 | 1669 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd); |
9771b263 | 1670 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1671 | } |
1672 | } | |
1673 | ||
1674 | ||
c78e3652 | 1675 | /* Get vectorized definitions for OP0 and OP1. */ |
ebfd146a | 1676 | |
c78e3652 | 1677 | void |
32e8e429 | 1678 | vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info, |
9771b263 DN |
1679 | vec<tree> *vec_oprnds0, |
1680 | vec<tree> *vec_oprnds1, | |
306b0c92 | 1681 | slp_tree slp_node) |
ebfd146a IR |
1682 | { |
1683 | if (slp_node) | |
d092494c IR |
1684 | { |
1685 | int nops = (op1 == NULL_TREE) ? 1 : 2; | |
ef062b13 TS |
1686 | auto_vec<tree> ops (nops); |
1687 | auto_vec<vec<tree> > vec_defs (nops); | |
d092494c | 1688 | |
9771b263 | 1689 | ops.quick_push (op0); |
d092494c | 1690 | if (op1) |
9771b263 | 1691 | ops.quick_push (op1); |
d092494c | 1692 | |
306b0c92 | 1693 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
d092494c | 1694 | |
37b5ec8f | 1695 | *vec_oprnds0 = vec_defs[0]; |
d092494c | 1696 | if (op1) |
37b5ec8f | 1697 | *vec_oprnds1 = vec_defs[1]; |
d092494c | 1698 | } |
ebfd146a IR |
1699 | else |
1700 | { | |
1701 | tree vec_oprnd; | |
1702 | ||
9771b263 | 1703 | vec_oprnds0->create (1); |
a1824cfd | 1704 | vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info); |
9771b263 | 1705 | vec_oprnds0->quick_push (vec_oprnd); |
ebfd146a IR |
1706 | |
1707 | if (op1) | |
1708 | { | |
9771b263 | 1709 | vec_oprnds1->create (1); |
a1824cfd | 1710 | vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info); |
9771b263 | 1711 | vec_oprnds1->quick_push (vec_oprnd); |
ebfd146a IR |
1712 | } |
1713 | } | |
1714 | } | |
1715 | ||
bb6c2b68 RS |
1716 | /* Helper function called by vect_finish_replace_stmt and |
1717 | vect_finish_stmt_generation. Set the location of the new | |
e1bd7296 | 1718 | statement and create and return a stmt_vec_info for it. */ |
bb6c2b68 | 1719 | |
e1bd7296 | 1720 | static stmt_vec_info |
32e8e429 | 1721 | vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt) |
bb6c2b68 | 1722 | { |
bb6c2b68 RS |
1723 | vec_info *vinfo = stmt_info->vinfo; |
1724 | ||
e1bd7296 | 1725 | stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt); |
bb6c2b68 RS |
1726 | |
1727 | if (dump_enabled_p ()) | |
1728 | { | |
1729 | dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: "); | |
1730 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0); | |
1731 | } | |
1732 | ||
86a91c0a | 1733 | gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt)); |
bb6c2b68 RS |
1734 | |
1735 | /* While EH edges will generally prevent vectorization, stmt might | |
1736 | e.g. be in a must-not-throw region. Ensure newly created stmts | |
1737 | that could throw are part of the same region. */ | |
86a91c0a | 1738 | int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt); |
bb6c2b68 RS |
1739 | if (lp_nr != 0 && stmt_could_throw_p (vec_stmt)) |
1740 | add_stmt_to_eh_lp (vec_stmt, lp_nr); | |
e1bd7296 RS |
1741 | |
1742 | return vec_stmt_info; | |
bb6c2b68 RS |
1743 | } |
1744 | ||
32e8e429 RS |
1745 | /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT, |
1746 | which sets the same scalar result as STMT_INFO did. Create and return a | |
e1bd7296 | 1747 | stmt_vec_info for VEC_STMT. */ |
bb6c2b68 | 1748 | |
e1bd7296 | 1749 | stmt_vec_info |
32e8e429 | 1750 | vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt) |
bb6c2b68 | 1751 | { |
a1824cfd | 1752 | gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt)); |
bb6c2b68 | 1753 | |
a1824cfd | 1754 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt); |
bb6c2b68 RS |
1755 | gsi_replace (&gsi, vec_stmt, false); |
1756 | ||
a1824cfd | 1757 | return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); |
bb6c2b68 | 1758 | } |
ebfd146a | 1759 | |
32e8e429 | 1760 | /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it |
e1bd7296 | 1761 | before *GSI. Create and return a stmt_vec_info for VEC_STMT. */ |
ebfd146a | 1762 | |
e1bd7296 | 1763 | stmt_vec_info |
32e8e429 | 1764 | vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt, |
ebfd146a IR |
1765 | gimple_stmt_iterator *gsi) |
1766 | { | |
a1824cfd | 1767 | gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL); |
ebfd146a | 1768 | |
54e8e2c3 RG |
1769 | if (!gsi_end_p (*gsi) |
1770 | && gimple_has_mem_ops (vec_stmt)) | |
1771 | { | |
355fe088 | 1772 | gimple *at_stmt = gsi_stmt (*gsi); |
54e8e2c3 RG |
1773 | tree vuse = gimple_vuse (at_stmt); |
1774 | if (vuse && TREE_CODE (vuse) == SSA_NAME) | |
1775 | { | |
1776 | tree vdef = gimple_vdef (at_stmt); | |
1777 | gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt)); | |
1778 | /* If we have an SSA vuse and insert a store, update virtual | |
1779 | SSA form to avoid triggering the renamer. Do so only | |
1780 | if we can easily see all uses - which is what almost always | |
1781 | happens with the way vectorized stmts are inserted. */ | |
1782 | if ((vdef && TREE_CODE (vdef) == SSA_NAME) | |
1783 | && ((is_gimple_assign (vec_stmt) | |
1784 | && !is_gimple_reg (gimple_assign_lhs (vec_stmt))) | |
1785 | || (is_gimple_call (vec_stmt) | |
1786 | && !(gimple_call_flags (vec_stmt) | |
1787 | & (ECF_CONST|ECF_PURE|ECF_NOVOPS))))) | |
1788 | { | |
1789 | tree new_vdef = copy_ssa_name (vuse, vec_stmt); | |
1790 | gimple_set_vdef (vec_stmt, new_vdef); | |
1791 | SET_USE (gimple_vuse_op (at_stmt), new_vdef); | |
1792 | } | |
1793 | } | |
1794 | } | |
ebfd146a | 1795 | gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); |
a1824cfd | 1796 | return vect_finish_stmt_generation_1 (stmt_info, vec_stmt); |
ebfd146a IR |
1797 | } |
1798 | ||
70439f0d RS |
1799 | /* We want to vectorize a call to combined function CFN with function |
1800 | decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN | |
1801 | as the types of all inputs. Check whether this is possible using | |
1802 | an internal function, returning its code if so or IFN_LAST if not. */ | |
ebfd146a | 1803 | |
70439f0d RS |
1804 | static internal_fn |
1805 | vectorizable_internal_function (combined_fn cfn, tree fndecl, | |
1806 | tree vectype_out, tree vectype_in) | |
ebfd146a | 1807 | { |
70439f0d RS |
1808 | internal_fn ifn; |
1809 | if (internal_fn_p (cfn)) | |
1810 | ifn = as_internal_fn (cfn); | |
1811 | else | |
1812 | ifn = associated_internal_fn (fndecl); | |
1813 | if (ifn != IFN_LAST && direct_internal_fn_p (ifn)) | |
1814 | { | |
1815 | const direct_internal_fn_info &info = direct_internal_fn (ifn); | |
1816 | if (info.vectorizable) | |
1817 | { | |
1818 | tree type0 = (info.type0 < 0 ? vectype_out : vectype_in); | |
1819 | tree type1 = (info.type1 < 0 ? vectype_out : vectype_in); | |
d95ab70a RS |
1820 | if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1), |
1821 | OPTIMIZE_FOR_SPEED)) | |
70439f0d RS |
1822 | return ifn; |
1823 | } | |
1824 | } | |
1825 | return IFN_LAST; | |
ebfd146a IR |
1826 | } |
1827 | ||
5ce9450f | 1828 | |
82570274 | 1829 | static tree permute_vec_elements (tree, tree, tree, stmt_vec_info, |
5ce9450f JJ |
1830 | gimple_stmt_iterator *); |
1831 | ||
7cfb4d93 RS |
1832 | /* Check whether a load or store statement in the loop described by |
1833 | LOOP_VINFO is possible in a fully-masked loop. This is testing | |
1834 | whether the vectorizer pass has the appropriate support, as well as | |
1835 | whether the target does. | |
1836 | ||
1837 | VLS_TYPE says whether the statement is a load or store and VECTYPE | |
1838 | is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE | |
1839 | says how the load or store is going to be implemented and GROUP_SIZE | |
1840 | is the number of load or store statements in the containing group. | |
bfaa08b7 RS |
1841 | If the access is a gather load or scatter store, GS_INFO describes |
1842 | its arguments. | |
7cfb4d93 RS |
1843 | |
1844 | Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not | |
1845 | supported, otherwise record the required mask types. */ | |
1846 | ||
1847 | static void | |
1848 | check_load_store_masking (loop_vec_info loop_vinfo, tree vectype, | |
1849 | vec_load_store_type vls_type, int group_size, | |
bfaa08b7 RS |
1850 | vect_memory_access_type memory_access_type, |
1851 | gather_scatter_info *gs_info) | |
7cfb4d93 RS |
1852 | { |
1853 | /* Invariant loads need no special support. */ | |
1854 | if (memory_access_type == VMAT_INVARIANT) | |
1855 | return; | |
1856 | ||
1857 | vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo); | |
1858 | machine_mode vecmode = TYPE_MODE (vectype); | |
1859 | bool is_load = (vls_type == VLS_LOAD); | |
1860 | if (memory_access_type == VMAT_LOAD_STORE_LANES) | |
1861 | { | |
1862 | if (is_load | |
1863 | ? !vect_load_lanes_supported (vectype, group_size, true) | |
1864 | : !vect_store_lanes_supported (vectype, group_size, true)) | |
1865 | { | |
1866 | if (dump_enabled_p ()) | |
1867 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1868 | "can't use a fully-masked loop because the" | |
1869 | " target doesn't have an appropriate masked" | |
1870 | " load/store-lanes instruction.\n"); | |
1871 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; | |
1872 | return; | |
1873 | } | |
1874 | unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype); | |
1875 | vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype); | |
1876 | return; | |
1877 | } | |
1878 | ||
bfaa08b7 RS |
1879 | if (memory_access_type == VMAT_GATHER_SCATTER) |
1880 | { | |
f307441a RS |
1881 | internal_fn ifn = (is_load |
1882 | ? IFN_MASK_GATHER_LOAD | |
1883 | : IFN_MASK_SCATTER_STORE); | |
bfaa08b7 | 1884 | tree offset_type = TREE_TYPE (gs_info->offset); |
f307441a | 1885 | if (!internal_gather_scatter_fn_supported_p (ifn, vectype, |
bfaa08b7 RS |
1886 | gs_info->memory_type, |
1887 | TYPE_SIGN (offset_type), | |
1888 | gs_info->scale)) | |
1889 | { | |
1890 | if (dump_enabled_p ()) | |
1891 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1892 | "can't use a fully-masked loop because the" | |
1893 | " target doesn't have an appropriate masked" | |
f307441a | 1894 | " gather load or scatter store instruction.\n"); |
bfaa08b7 RS |
1895 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; |
1896 | return; | |
1897 | } | |
1898 | unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype); | |
1899 | vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype); | |
1900 | return; | |
1901 | } | |
1902 | ||
7cfb4d93 RS |
1903 | if (memory_access_type != VMAT_CONTIGUOUS |
1904 | && memory_access_type != VMAT_CONTIGUOUS_PERMUTE) | |
1905 | { | |
1906 | /* Element X of the data must come from iteration i * VF + X of the | |
1907 | scalar loop. We need more work to support other mappings. */ | |
1908 | if (dump_enabled_p ()) | |
1909 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1910 | "can't use a fully-masked loop because an access" | |
1911 | " isn't contiguous.\n"); | |
1912 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; | |
1913 | return; | |
1914 | } | |
1915 | ||
1916 | machine_mode mask_mode; | |
1917 | if (!(targetm.vectorize.get_mask_mode | |
1918 | (GET_MODE_NUNITS (vecmode), | |
1919 | GET_MODE_SIZE (vecmode)).exists (&mask_mode)) | |
1920 | || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load)) | |
1921 | { | |
1922 | if (dump_enabled_p ()) | |
1923 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
1924 | "can't use a fully-masked loop because the target" | |
1925 | " doesn't have the appropriate masked load or" | |
1926 | " store.\n"); | |
1927 | LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; | |
1928 | return; | |
1929 | } | |
1930 | /* We might load more scalars than we need for permuting SLP loads. | |
1931 | We checked in get_group_load_store_type that the extra elements | |
1932 | don't leak into a new vector. */ | |
1933 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
1934 | poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
1935 | unsigned int nvectors; | |
1936 | if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors)) | |
1937 | vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype); | |
1938 | else | |
1939 | gcc_unreachable (); | |
1940 | } | |
1941 | ||
1942 | /* Return the mask input to a masked load or store. VEC_MASK is the vectorized | |
1943 | form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask | |
1944 | that needs to be applied to all loads and stores in a vectorized loop. | |
1945 | Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK. | |
1946 | ||
1947 | MASK_TYPE is the type of both masks. If new statements are needed, | |
1948 | insert them before GSI. */ | |
1949 | ||
1950 | static tree | |
1951 | prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask, | |
1952 | gimple_stmt_iterator *gsi) | |
1953 | { | |
1954 | gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask))); | |
1955 | if (!loop_mask) | |
1956 | return vec_mask; | |
1957 | ||
1958 | gcc_assert (TREE_TYPE (loop_mask) == mask_type); | |
1959 | tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and"); | |
1960 | gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR, | |
1961 | vec_mask, loop_mask); | |
1962 | gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT); | |
1963 | return and_res; | |
1964 | } | |
1965 | ||
429ef523 | 1966 | /* Determine whether we can use a gather load or scatter store to vectorize |
32e8e429 RS |
1967 | strided load or store STMT_INFO by truncating the current offset to a |
1968 | smaller width. We need to be able to construct an offset vector: | |
429ef523 RS |
1969 | |
1970 | { 0, X, X*2, X*3, ... } | |
1971 | ||
32e8e429 | 1972 | without loss of precision, where X is STMT_INFO's DR_STEP. |
429ef523 RS |
1973 | |
1974 | Return true if this is possible, describing the gather load or scatter | |
1975 | store in GS_INFO. MASKED_P is true if the load or store is conditional. */ | |
1976 | ||
1977 | static bool | |
32e8e429 RS |
1978 | vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info, |
1979 | loop_vec_info loop_vinfo, bool masked_p, | |
429ef523 RS |
1980 | gather_scatter_info *gs_info) |
1981 | { | |
89fa689a RS |
1982 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
1983 | data_reference *dr = dr_info->dr; | |
429ef523 RS |
1984 | tree step = DR_STEP (dr); |
1985 | if (TREE_CODE (step) != INTEGER_CST) | |
1986 | { | |
1987 | /* ??? Perhaps we could use range information here? */ | |
1988 | if (dump_enabled_p ()) | |
1989 | dump_printf_loc (MSG_NOTE, vect_location, | |
1990 | "cannot truncate variable step.\n"); | |
1991 | return false; | |
1992 | } | |
1993 | ||
1994 | /* Get the number of bits in an element. */ | |
1995 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
1996 | scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype)); | |
1997 | unsigned int element_bits = GET_MODE_BITSIZE (element_mode); | |
1998 | ||
1999 | /* Set COUNT to the upper limit on the number of elements - 1. | |
2000 | Start with the maximum vectorization factor. */ | |
2001 | unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1; | |
2002 | ||
2003 | /* Try lowering COUNT to the number of scalar latch iterations. */ | |
2004 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2005 | widest_int max_iters; | |
2006 | if (max_loop_iterations (loop, &max_iters) | |
2007 | && max_iters < count) | |
2008 | count = max_iters.to_shwi (); | |
2009 | ||
2010 | /* Try scales of 1 and the element size. */ | |
89fa689a | 2011 | int scales[] = { 1, vect_get_scalar_dr_size (dr_info) }; |
4a669ac3 | 2012 | wi::overflow_type overflow = wi::OVF_NONE; |
429ef523 RS |
2013 | for (int i = 0; i < 2; ++i) |
2014 | { | |
2015 | int scale = scales[i]; | |
2016 | widest_int factor; | |
2017 | if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor)) | |
2018 | continue; | |
2019 | ||
2020 | /* See whether we can calculate (COUNT - 1) * STEP / SCALE | |
2021 | in OFFSET_BITS bits. */ | |
4a669ac3 AH |
2022 | widest_int range = wi::mul (count, factor, SIGNED, &overflow); |
2023 | if (overflow) | |
429ef523 RS |
2024 | continue; |
2025 | signop sign = range >= 0 ? UNSIGNED : SIGNED; | |
2026 | if (wi::min_precision (range, sign) > element_bits) | |
2027 | { | |
4a669ac3 | 2028 | overflow = wi::OVF_UNKNOWN; |
429ef523 RS |
2029 | continue; |
2030 | } | |
2031 | ||
2032 | /* See whether the target supports the operation. */ | |
2033 | tree memory_type = TREE_TYPE (DR_REF (dr)); | |
2034 | if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype, | |
2035 | memory_type, element_bits, sign, scale, | |
2036 | &gs_info->ifn, &gs_info->element_type)) | |
2037 | continue; | |
2038 | ||
2039 | tree offset_type = build_nonstandard_integer_type (element_bits, | |
2040 | sign == UNSIGNED); | |
2041 | ||
2042 | gs_info->decl = NULL_TREE; | |
2043 | /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET, | |
2044 | but we don't need to store that here. */ | |
2045 | gs_info->base = NULL_TREE; | |
2046 | gs_info->offset = fold_convert (offset_type, step); | |
929b4411 | 2047 | gs_info->offset_dt = vect_constant_def; |
429ef523 RS |
2048 | gs_info->offset_vectype = NULL_TREE; |
2049 | gs_info->scale = scale; | |
2050 | gs_info->memory_type = memory_type; | |
2051 | return true; | |
2052 | } | |
2053 | ||
4a669ac3 | 2054 | if (overflow && dump_enabled_p ()) |
429ef523 RS |
2055 | dump_printf_loc (MSG_NOTE, vect_location, |
2056 | "truncating gather/scatter offset to %d bits" | |
2057 | " might change its value.\n", element_bits); | |
2058 | ||
2059 | return false; | |
2060 | } | |
2061 | ||
ab2fc782 | 2062 | /* Return true if we can use gather/scatter internal functions to |
82570274 | 2063 | vectorize STMT_INFO, which is a grouped or strided load or store. |
429ef523 RS |
2064 | MASKED_P is true if load or store is conditional. When returning |
2065 | true, fill in GS_INFO with the information required to perform the | |
2066 | operation. */ | |
ab2fc782 RS |
2067 | |
2068 | static bool | |
82570274 RS |
2069 | vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info, |
2070 | loop_vec_info loop_vinfo, bool masked_p, | |
ab2fc782 RS |
2071 | gather_scatter_info *gs_info) |
2072 | { | |
82570274 | 2073 | if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info) |
ab2fc782 | 2074 | || gs_info->decl) |
82570274 | 2075 | return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo, |
429ef523 | 2076 | masked_p, gs_info); |
ab2fc782 RS |
2077 | |
2078 | scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type); | |
2079 | unsigned int element_bits = GET_MODE_BITSIZE (element_mode); | |
2080 | tree offset_type = TREE_TYPE (gs_info->offset); | |
2081 | unsigned int offset_bits = TYPE_PRECISION (offset_type); | |
2082 | ||
2083 | /* Enforced by vect_check_gather_scatter. */ | |
2084 | gcc_assert (element_bits >= offset_bits); | |
2085 | ||
2086 | /* If the elements are wider than the offset, convert the offset to the | |
2087 | same width, without changing its sign. */ | |
2088 | if (element_bits > offset_bits) | |
2089 | { | |
2090 | bool unsigned_p = TYPE_UNSIGNED (offset_type); | |
2091 | offset_type = build_nonstandard_integer_type (element_bits, unsigned_p); | |
2092 | gs_info->offset = fold_convert (offset_type, gs_info->offset); | |
2093 | } | |
2094 | ||
2095 | if (dump_enabled_p ()) | |
2096 | dump_printf_loc (MSG_NOTE, vect_location, | |
2097 | "using gather/scatter for strided/grouped access," | |
2098 | " scale = %d\n", gs_info->scale); | |
2099 | ||
2100 | return true; | |
2101 | } | |
2102 | ||
32e8e429 | 2103 | /* STMT_INFO is a non-strided load or store, meaning that it accesses |
62da9e14 RS |
2104 | elements with a known constant step. Return -1 if that step |
2105 | is negative, 0 if it is zero, and 1 if it is greater than zero. */ | |
2106 | ||
2107 | static int | |
32e8e429 | 2108 | compare_step_with_zero (stmt_vec_info stmt_info) |
62da9e14 | 2109 | { |
89fa689a RS |
2110 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
2111 | return tree_int_cst_compare (vect_dr_behavior (dr_info)->step, | |
3f5e8a76 | 2112 | size_zero_node); |
62da9e14 RS |
2113 | } |
2114 | ||
2115 | /* If the target supports a permute mask that reverses the elements in | |
2116 | a vector of type VECTYPE, return that mask, otherwise return null. */ | |
2117 | ||
2118 | static tree | |
2119 | perm_mask_for_reverse (tree vectype) | |
2120 | { | |
928686b1 | 2121 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
62da9e14 | 2122 | |
d980067b RS |
2123 | /* The encoding has a single stepped pattern. */ |
2124 | vec_perm_builder sel (nunits, 1, 3); | |
928686b1 | 2125 | for (int i = 0; i < 3; ++i) |
908a1a16 | 2126 | sel.quick_push (nunits - 1 - i); |
62da9e14 | 2127 | |
e3342de4 RS |
2128 | vec_perm_indices indices (sel, 1, nunits); |
2129 | if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices)) | |
62da9e14 | 2130 | return NULL_TREE; |
e3342de4 | 2131 | return vect_gen_perm_mask_checked (vectype, indices); |
62da9e14 | 2132 | } |
5ce9450f | 2133 | |
32e8e429 | 2134 | /* STMT_INFO is either a masked or unconditional store. Return the value |
c3a8f964 RS |
2135 | being stored. */ |
2136 | ||
f307441a | 2137 | tree |
32e8e429 | 2138 | vect_get_store_rhs (stmt_vec_info stmt_info) |
c3a8f964 | 2139 | { |
32e8e429 | 2140 | if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt)) |
c3a8f964 RS |
2141 | { |
2142 | gcc_assert (gimple_assign_single_p (assign)); | |
2143 | return gimple_assign_rhs1 (assign); | |
2144 | } | |
32e8e429 | 2145 | if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt)) |
c3a8f964 RS |
2146 | { |
2147 | internal_fn ifn = gimple_call_internal_fn (call); | |
f307441a RS |
2148 | int index = internal_fn_stored_value_index (ifn); |
2149 | gcc_assert (index >= 0); | |
32e8e429 | 2150 | return gimple_call_arg (call, index); |
c3a8f964 RS |
2151 | } |
2152 | gcc_unreachable (); | |
2153 | } | |
2154 | ||
2de001ee | 2155 | /* A subroutine of get_load_store_type, with a subset of the same |
32e8e429 | 2156 | arguments. Handle the case where STMT_INFO is part of a grouped load |
2de001ee RS |
2157 | or store. |
2158 | ||
2159 | For stores, the statements in the group are all consecutive | |
2160 | and there is no gap at the end. For loads, the statements in the | |
2161 | group might not be consecutive; there can be gaps between statements | |
2162 | as well as at the end. */ | |
2163 | ||
2164 | static bool | |
32e8e429 | 2165 | get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, |
7e11fc7f | 2166 | bool masked_p, vec_load_store_type vls_type, |
429ef523 RS |
2167 | vect_memory_access_type *memory_access_type, |
2168 | gather_scatter_info *gs_info) | |
2de001ee | 2169 | { |
2de001ee RS |
2170 | vec_info *vinfo = stmt_info->vinfo; |
2171 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
2172 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; | |
bffb8014 | 2173 | stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
89fa689a | 2174 | dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
bffb8014 RS |
2175 | unsigned int group_size = DR_GROUP_SIZE (first_stmt_info); |
2176 | bool single_element_p = (stmt_info == first_stmt_info | |
2c53b149 | 2177 | && !DR_GROUP_NEXT_ELEMENT (stmt_info)); |
bffb8014 | 2178 | unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info); |
928686b1 | 2179 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2de001ee RS |
2180 | |
2181 | /* True if the vectorized statements would access beyond the last | |
2182 | statement in the group. */ | |
2183 | bool overrun_p = false; | |
2184 | ||
2185 | /* True if we can cope with such overrun by peeling for gaps, so that | |
2186 | there is at least one final scalar iteration after the vector loop. */ | |
7e11fc7f RS |
2187 | bool can_overrun_p = (!masked_p |
2188 | && vls_type == VLS_LOAD | |
2189 | && loop_vinfo | |
2190 | && !loop->inner); | |
2de001ee RS |
2191 | |
2192 | /* There can only be a gap at the end of the group if the stride is | |
2193 | known at compile time. */ | |
3ad3b3ac | 2194 | gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0); |
2de001ee RS |
2195 | |
2196 | /* Stores can't yet have gaps. */ | |
2197 | gcc_assert (slp || vls_type == VLS_LOAD || gap == 0); | |
2198 | ||
2199 | if (slp) | |
2200 | { | |
3ad3b3ac | 2201 | if (STMT_VINFO_STRIDED_P (first_stmt_info)) |
2de001ee | 2202 | { |
2c53b149 | 2203 | /* Try to use consecutive accesses of DR_GROUP_SIZE elements, |
2de001ee RS |
2204 | separated by the stride, until we have a complete vector. |
2205 | Fall back to scalar accesses if that isn't possible. */ | |
928686b1 | 2206 | if (multiple_p (nunits, group_size)) |
2de001ee RS |
2207 | *memory_access_type = VMAT_STRIDED_SLP; |
2208 | else | |
2209 | *memory_access_type = VMAT_ELEMENTWISE; | |
2210 | } | |
2211 | else | |
2212 | { | |
2213 | overrun_p = loop_vinfo && gap != 0; | |
2214 | if (overrun_p && vls_type != VLS_LOAD) | |
2215 | { | |
2216 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2217 | "Grouped store with gaps requires" | |
2218 | " non-consecutive accesses\n"); | |
2219 | return false; | |
2220 | } | |
f702e7d4 RS |
2221 | /* An overrun is fine if the trailing elements are smaller |
2222 | than the alignment boundary B. Every vector access will | |
2223 | be a multiple of B and so we are guaranteed to access a | |
2224 | non-gap element in the same B-sized block. */ | |
f9ef2c76 | 2225 | if (overrun_p |
89fa689a RS |
2226 | && gap < (vect_known_alignment_in_bytes (first_dr_info) |
2227 | / vect_get_scalar_dr_size (first_dr_info))) | |
f9ef2c76 | 2228 | overrun_p = false; |
2de001ee RS |
2229 | if (overrun_p && !can_overrun_p) |
2230 | { | |
2231 | if (dump_enabled_p ()) | |
2232 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2233 | "Peeling for outer loop is not supported\n"); | |
2234 | return false; | |
2235 | } | |
2236 | *memory_access_type = VMAT_CONTIGUOUS; | |
2237 | } | |
2238 | } | |
2239 | else | |
2240 | { | |
2241 | /* We can always handle this case using elementwise accesses, | |
2242 | but see if something more efficient is available. */ | |
2243 | *memory_access_type = VMAT_ELEMENTWISE; | |
2244 | ||
2245 | /* If there is a gap at the end of the group then these optimizations | |
2246 | would access excess elements in the last iteration. */ | |
2247 | bool would_overrun_p = (gap != 0); | |
f702e7d4 RS |
2248 | /* An overrun is fine if the trailing elements are smaller than the |
2249 | alignment boundary B. Every vector access will be a multiple of B | |
2250 | and so we are guaranteed to access a non-gap element in the | |
2251 | same B-sized block. */ | |
f9ef2c76 | 2252 | if (would_overrun_p |
7e11fc7f | 2253 | && !masked_p |
89fa689a RS |
2254 | && gap < (vect_known_alignment_in_bytes (first_dr_info) |
2255 | / vect_get_scalar_dr_size (first_dr_info))) | |
f9ef2c76 | 2256 | would_overrun_p = false; |
f702e7d4 | 2257 | |
3ad3b3ac | 2258 | if (!STMT_VINFO_STRIDED_P (first_stmt_info) |
62da9e14 | 2259 | && (can_overrun_p || !would_overrun_p) |
86a91c0a | 2260 | && compare_step_with_zero (stmt_info) > 0) |
2de001ee | 2261 | { |
6737facb RS |
2262 | /* First cope with the degenerate case of a single-element |
2263 | vector. */ | |
2264 | if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)) | |
2265 | *memory_access_type = VMAT_CONTIGUOUS; | |
2266 | ||
2267 | /* Otherwise try using LOAD/STORE_LANES. */ | |
2268 | if (*memory_access_type == VMAT_ELEMENTWISE | |
2269 | && (vls_type == VLS_LOAD | |
7e11fc7f RS |
2270 | ? vect_load_lanes_supported (vectype, group_size, masked_p) |
2271 | : vect_store_lanes_supported (vectype, group_size, | |
2272 | masked_p))) | |
2de001ee RS |
2273 | { |
2274 | *memory_access_type = VMAT_LOAD_STORE_LANES; | |
2275 | overrun_p = would_overrun_p; | |
2276 | } | |
2277 | ||
2278 | /* If that fails, try using permuting loads. */ | |
2279 | if (*memory_access_type == VMAT_ELEMENTWISE | |
2280 | && (vls_type == VLS_LOAD | |
2281 | ? vect_grouped_load_supported (vectype, single_element_p, | |
2282 | group_size) | |
2283 | : vect_grouped_store_supported (vectype, group_size))) | |
2284 | { | |
2285 | *memory_access_type = VMAT_CONTIGUOUS_PERMUTE; | |
2286 | overrun_p = would_overrun_p; | |
2287 | } | |
2288 | } | |
429ef523 RS |
2289 | |
2290 | /* As a last resort, trying using a gather load or scatter store. | |
2291 | ||
2292 | ??? Although the code can handle all group sizes correctly, | |
2293 | it probably isn't a win to use separate strided accesses based | |
2294 | on nearby locations. Or, even if it's a win over scalar code, | |
2295 | it might not be a win over vectorizing at a lower VF, if that | |
2296 | allows us to use contiguous accesses. */ | |
2297 | if (*memory_access_type == VMAT_ELEMENTWISE | |
2298 | && single_element_p | |
2299 | && loop_vinfo | |
86a91c0a | 2300 | && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo, |
429ef523 RS |
2301 | masked_p, gs_info)) |
2302 | *memory_access_type = VMAT_GATHER_SCATTER; | |
2de001ee RS |
2303 | } |
2304 | ||
bffb8014 | 2305 | if (vls_type != VLS_LOAD && first_stmt_info == stmt_info) |
2de001ee RS |
2306 | { |
2307 | /* STMT is the leader of the group. Check the operands of all the | |
2308 | stmts of the group. */ | |
bffb8014 RS |
2309 | stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info); |
2310 | while (next_stmt_info) | |
2de001ee | 2311 | { |
bffb8014 | 2312 | tree op = vect_get_store_rhs (next_stmt_info); |
2de001ee | 2313 | enum vect_def_type dt; |
894dd753 | 2314 | if (!vect_is_simple_use (op, vinfo, &dt)) |
2de001ee RS |
2315 | { |
2316 | if (dump_enabled_p ()) | |
2317 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2318 | "use not simple.\n"); | |
2319 | return false; | |
2320 | } | |
bffb8014 | 2321 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
2de001ee RS |
2322 | } |
2323 | } | |
2324 | ||
2325 | if (overrun_p) | |
2326 | { | |
2327 | gcc_assert (can_overrun_p); | |
2328 | if (dump_enabled_p ()) | |
2329 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2330 | "Data access with gaps requires scalar " | |
2331 | "epilogue loop\n"); | |
2332 | LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; | |
2333 | } | |
2334 | ||
2335 | return true; | |
2336 | } | |
2337 | ||
62da9e14 | 2338 | /* A subroutine of get_load_store_type, with a subset of the same |
32e8e429 | 2339 | arguments. Handle the case where STMT_INFO is a load or store that |
62da9e14 RS |
2340 | accesses consecutive elements with a negative step. */ |
2341 | ||
2342 | static vect_memory_access_type | |
32e8e429 | 2343 | get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype, |
62da9e14 RS |
2344 | vec_load_store_type vls_type, |
2345 | unsigned int ncopies) | |
2346 | { | |
89fa689a | 2347 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info); |
62da9e14 RS |
2348 | dr_alignment_support alignment_support_scheme; |
2349 | ||
2350 | if (ncopies > 1) | |
2351 | { | |
2352 | if (dump_enabled_p ()) | |
2353 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2354 | "multiple types with negative step.\n"); | |
2355 | return VMAT_ELEMENTWISE; | |
2356 | } | |
2357 | ||
89fa689a | 2358 | alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false); |
62da9e14 RS |
2359 | if (alignment_support_scheme != dr_aligned |
2360 | && alignment_support_scheme != dr_unaligned_supported) | |
2361 | { | |
2362 | if (dump_enabled_p ()) | |
2363 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2364 | "negative step but alignment required.\n"); | |
2365 | return VMAT_ELEMENTWISE; | |
2366 | } | |
2367 | ||
2368 | if (vls_type == VLS_STORE_INVARIANT) | |
2369 | { | |
2370 | if (dump_enabled_p ()) | |
2371 | dump_printf_loc (MSG_NOTE, vect_location, | |
2372 | "negative step with invariant source;" | |
2373 | " no permute needed.\n"); | |
2374 | return VMAT_CONTIGUOUS_DOWN; | |
2375 | } | |
2376 | ||
2377 | if (!perm_mask_for_reverse (vectype)) | |
2378 | { | |
2379 | if (dump_enabled_p ()) | |
2380 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2381 | "negative step and reversing not supported.\n"); | |
2382 | return VMAT_ELEMENTWISE; | |
2383 | } | |
2384 | ||
2385 | return VMAT_CONTIGUOUS_REVERSE; | |
2386 | } | |
2387 | ||
32e8e429 | 2388 | /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true |
2de001ee RS |
2389 | if there is a memory access type that the vectorized form can use, |
2390 | storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers | |
2391 | or scatters, fill in GS_INFO accordingly. | |
2392 | ||
2393 | SLP says whether we're performing SLP rather than loop vectorization. | |
7e11fc7f | 2394 | MASKED_P is true if the statement is conditional on a vectorized mask. |
62da9e14 RS |
2395 | VECTYPE is the vector type that the vectorized statements will use. |
2396 | NCOPIES is the number of vector statements that will be needed. */ | |
2de001ee RS |
2397 | |
2398 | static bool | |
32e8e429 RS |
2399 | get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp, |
2400 | bool masked_p, vec_load_store_type vls_type, | |
2401 | unsigned int ncopies, | |
2de001ee RS |
2402 | vect_memory_access_type *memory_access_type, |
2403 | gather_scatter_info *gs_info) | |
2404 | { | |
2de001ee RS |
2405 | vec_info *vinfo = stmt_info->vinfo; |
2406 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4d694b27 | 2407 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
2de001ee RS |
2408 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
2409 | { | |
2410 | *memory_access_type = VMAT_GATHER_SCATTER; | |
86a91c0a | 2411 | if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)) |
2de001ee | 2412 | gcc_unreachable (); |
894dd753 | 2413 | else if (!vect_is_simple_use (gs_info->offset, vinfo, |
2de001ee RS |
2414 | &gs_info->offset_dt, |
2415 | &gs_info->offset_vectype)) | |
2416 | { | |
2417 | if (dump_enabled_p ()) | |
2418 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2419 | "%s index use not simple.\n", | |
2420 | vls_type == VLS_LOAD ? "gather" : "scatter"); | |
2421 | return false; | |
2422 | } | |
2423 | } | |
2424 | else if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) | |
2425 | { | |
86a91c0a RS |
2426 | if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p, |
2427 | vls_type, memory_access_type, gs_info)) | |
2de001ee RS |
2428 | return false; |
2429 | } | |
2430 | else if (STMT_VINFO_STRIDED_P (stmt_info)) | |
2431 | { | |
2432 | gcc_assert (!slp); | |
ab2fc782 | 2433 | if (loop_vinfo |
86a91c0a | 2434 | && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo, |
429ef523 | 2435 | masked_p, gs_info)) |
ab2fc782 RS |
2436 | *memory_access_type = VMAT_GATHER_SCATTER; |
2437 | else | |
2438 | *memory_access_type = VMAT_ELEMENTWISE; | |
2de001ee RS |
2439 | } |
2440 | else | |
62da9e14 | 2441 | { |
86a91c0a | 2442 | int cmp = compare_step_with_zero (stmt_info); |
62da9e14 RS |
2443 | if (cmp < 0) |
2444 | *memory_access_type = get_negative_load_store_type | |
86a91c0a | 2445 | (stmt_info, vectype, vls_type, ncopies); |
62da9e14 RS |
2446 | else if (cmp == 0) |
2447 | { | |
2448 | gcc_assert (vls_type == VLS_LOAD); | |
2449 | *memory_access_type = VMAT_INVARIANT; | |
2450 | } | |
2451 | else | |
2452 | *memory_access_type = VMAT_CONTIGUOUS; | |
2453 | } | |
2de001ee | 2454 | |
4d694b27 RS |
2455 | if ((*memory_access_type == VMAT_ELEMENTWISE |
2456 | || *memory_access_type == VMAT_STRIDED_SLP) | |
2457 | && !nunits.is_constant ()) | |
2458 | { | |
2459 | if (dump_enabled_p ()) | |
2460 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2461 | "Not using elementwise accesses due to variable " | |
2462 | "vectorization factor.\n"); | |
2463 | return false; | |
2464 | } | |
2465 | ||
2de001ee RS |
2466 | /* FIXME: At the moment the cost model seems to underestimate the |
2467 | cost of using elementwise accesses. This check preserves the | |
2468 | traditional behavior until that can be fixed. */ | |
3ad3b3ac RS |
2469 | stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
2470 | if (!first_stmt_info) | |
2471 | first_stmt_info = stmt_info; | |
2de001ee | 2472 | if (*memory_access_type == VMAT_ELEMENTWISE |
3ad3b3ac | 2473 | && !STMT_VINFO_STRIDED_P (first_stmt_info) |
bffb8014 | 2474 | && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info) |
2c53b149 RB |
2475 | && !DR_GROUP_NEXT_ELEMENT (stmt_info) |
2476 | && !pow2p_hwi (DR_GROUP_SIZE (stmt_info)))) | |
2de001ee RS |
2477 | { |
2478 | if (dump_enabled_p ()) | |
2479 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2480 | "not falling back to elementwise accesses\n"); | |
2481 | return false; | |
2482 | } | |
2483 | return true; | |
2484 | } | |
2485 | ||
aaeefd88 | 2486 | /* Return true if boolean argument MASK is suitable for vectorizing |
32e8e429 | 2487 | conditional load or store STMT_INFO. When returning true, store the type |
929b4411 RS |
2488 | of the definition in *MASK_DT_OUT and the type of the vectorized mask |
2489 | in *MASK_VECTYPE_OUT. */ | |
aaeefd88 RS |
2490 | |
2491 | static bool | |
32e8e429 | 2492 | vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask, |
929b4411 RS |
2493 | vect_def_type *mask_dt_out, |
2494 | tree *mask_vectype_out) | |
aaeefd88 RS |
2495 | { |
2496 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask))) | |
2497 | { | |
2498 | if (dump_enabled_p ()) | |
2499 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2500 | "mask argument is not a boolean.\n"); | |
2501 | return false; | |
2502 | } | |
2503 | ||
2504 | if (TREE_CODE (mask) != SSA_NAME) | |
2505 | { | |
2506 | if (dump_enabled_p ()) | |
2507 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2508 | "mask argument is not an SSA name.\n"); | |
2509 | return false; | |
2510 | } | |
2511 | ||
929b4411 | 2512 | enum vect_def_type mask_dt; |
aaeefd88 | 2513 | tree mask_vectype; |
894dd753 | 2514 | if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype)) |
aaeefd88 RS |
2515 | { |
2516 | if (dump_enabled_p ()) | |
2517 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2518 | "mask use not simple.\n"); | |
2519 | return false; | |
2520 | } | |
2521 | ||
2522 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2523 | if (!mask_vectype) | |
2524 | mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype)); | |
2525 | ||
2526 | if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)) | |
2527 | { | |
2528 | if (dump_enabled_p ()) | |
2529 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2530 | "could not find an appropriate vector mask type.\n"); | |
2531 | return false; | |
2532 | } | |
2533 | ||
2534 | if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype), | |
2535 | TYPE_VECTOR_SUBPARTS (vectype))) | |
2536 | { | |
2537 | if (dump_enabled_p ()) | |
2538 | { | |
2539 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2540 | "vector mask type "); | |
2541 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype); | |
2542 | dump_printf (MSG_MISSED_OPTIMIZATION, | |
2543 | " does not match vector data type "); | |
2544 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); | |
2545 | dump_printf (MSG_MISSED_OPTIMIZATION, ".\n"); | |
2546 | } | |
2547 | return false; | |
2548 | } | |
2549 | ||
929b4411 | 2550 | *mask_dt_out = mask_dt; |
aaeefd88 RS |
2551 | *mask_vectype_out = mask_vectype; |
2552 | return true; | |
2553 | } | |
2554 | ||
3133c3b6 | 2555 | /* Return true if stored value RHS is suitable for vectorizing store |
32e8e429 | 2556 | statement STMT_INFO. When returning true, store the type of the |
929b4411 RS |
2557 | definition in *RHS_DT_OUT, the type of the vectorized store value in |
2558 | *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */ | |
3133c3b6 RS |
2559 | |
2560 | static bool | |
32e8e429 RS |
2561 | vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs, |
2562 | vect_def_type *rhs_dt_out, tree *rhs_vectype_out, | |
2563 | vec_load_store_type *vls_type_out) | |
3133c3b6 RS |
2564 | { |
2565 | /* In the case this is a store from a constant make sure | |
2566 | native_encode_expr can handle it. */ | |
2567 | if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0) | |
2568 | { | |
2569 | if (dump_enabled_p ()) | |
2570 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2571 | "cannot encode constant as a byte sequence.\n"); | |
2572 | return false; | |
2573 | } | |
2574 | ||
929b4411 | 2575 | enum vect_def_type rhs_dt; |
3133c3b6 | 2576 | tree rhs_vectype; |
894dd753 | 2577 | if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype)) |
3133c3b6 RS |
2578 | { |
2579 | if (dump_enabled_p ()) | |
2580 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2581 | "use not simple.\n"); | |
2582 | return false; | |
2583 | } | |
2584 | ||
2585 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2586 | if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype)) | |
2587 | { | |
2588 | if (dump_enabled_p ()) | |
2589 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
2590 | "incompatible vector types.\n"); | |
2591 | return false; | |
2592 | } | |
2593 | ||
929b4411 | 2594 | *rhs_dt_out = rhs_dt; |
3133c3b6 | 2595 | *rhs_vectype_out = rhs_vectype; |
929b4411 | 2596 | if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def) |
3133c3b6 RS |
2597 | *vls_type_out = VLS_STORE_INVARIANT; |
2598 | else | |
2599 | *vls_type_out = VLS_STORE; | |
2600 | return true; | |
2601 | } | |
2602 | ||
82570274 | 2603 | /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO. |
bc9587eb RS |
2604 | Note that we support masks with floating-point type, in which case the |
2605 | floats are interpreted as a bitmask. */ | |
2606 | ||
2607 | static tree | |
82570274 | 2608 | vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype) |
bc9587eb RS |
2609 | { |
2610 | if (TREE_CODE (masktype) == INTEGER_TYPE) | |
2611 | return build_int_cst (masktype, -1); | |
2612 | else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE) | |
2613 | { | |
2614 | tree mask = build_int_cst (TREE_TYPE (masktype), -1); | |
2615 | mask = build_vector_from_val (masktype, mask); | |
82570274 | 2616 | return vect_init_vector (stmt_info, mask, masktype, NULL); |
bc9587eb RS |
2617 | } |
2618 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype))) | |
2619 | { | |
2620 | REAL_VALUE_TYPE r; | |
2621 | long tmp[6]; | |
2622 | for (int j = 0; j < 6; ++j) | |
2623 | tmp[j] = -1; | |
2624 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype))); | |
2625 | tree mask = build_real (TREE_TYPE (masktype), r); | |
2626 | mask = build_vector_from_val (masktype, mask); | |
82570274 | 2627 | return vect_init_vector (stmt_info, mask, masktype, NULL); |
bc9587eb RS |
2628 | } |
2629 | gcc_unreachable (); | |
2630 | } | |
2631 | ||
2632 | /* Build an all-zero merge value of type VECTYPE while vectorizing | |
82570274 | 2633 | STMT_INFO as a gather load. */ |
bc9587eb RS |
2634 | |
2635 | static tree | |
82570274 | 2636 | vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype) |
bc9587eb RS |
2637 | { |
2638 | tree merge; | |
2639 | if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE) | |
2640 | merge = build_int_cst (TREE_TYPE (vectype), 0); | |
2641 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype))) | |
2642 | { | |
2643 | REAL_VALUE_TYPE r; | |
2644 | long tmp[6]; | |
2645 | for (int j = 0; j < 6; ++j) | |
2646 | tmp[j] = 0; | |
2647 | real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype))); | |
2648 | merge = build_real (TREE_TYPE (vectype), r); | |
2649 | } | |
2650 | else | |
2651 | gcc_unreachable (); | |
2652 | merge = build_vector_from_val (vectype, merge); | |
82570274 | 2653 | return vect_init_vector (stmt_info, merge, vectype, NULL); |
bc9587eb RS |
2654 | } |
2655 | ||
32e8e429 RS |
2656 | /* Build a gather load call while vectorizing STMT_INFO. Insert new |
2657 | instructions before GSI and add them to VEC_STMT. GS_INFO describes | |
2658 | the gather load operation. If the load is conditional, MASK is the | |
2659 | unvectorized condition and MASK_DT is its definition type, otherwise | |
2660 | MASK is null. */ | |
c48d2d35 RS |
2661 | |
2662 | static void | |
32e8e429 RS |
2663 | vect_build_gather_load_calls (stmt_vec_info stmt_info, |
2664 | gimple_stmt_iterator *gsi, | |
1eede195 | 2665 | stmt_vec_info *vec_stmt, |
32e8e429 | 2666 | gather_scatter_info *gs_info, |
e4057a39 | 2667 | tree mask) |
c48d2d35 | 2668 | { |
c48d2d35 RS |
2669 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
2670 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2671 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2672 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
2673 | int ncopies = vect_get_num_copies (loop_vinfo, vectype); | |
2674 | edge pe = loop_preheader_edge (loop); | |
2675 | enum { NARROW, NONE, WIDEN } modifier; | |
2676 | poly_uint64 gather_off_nunits | |
2677 | = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype); | |
2678 | ||
2679 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl)); | |
2680 | tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl)); | |
2681 | tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2682 | tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2683 | tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2684 | tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
2685 | tree scaletype = TREE_VALUE (arglist); | |
2686 | gcc_checking_assert (types_compatible_p (srctype, rettype) | |
2687 | && (!mask || types_compatible_p (srctype, masktype))); | |
2688 | ||
2689 | tree perm_mask = NULL_TREE; | |
2690 | tree mask_perm_mask = NULL_TREE; | |
2691 | if (known_eq (nunits, gather_off_nunits)) | |
2692 | modifier = NONE; | |
2693 | else if (known_eq (nunits * 2, gather_off_nunits)) | |
2694 | { | |
2695 | modifier = WIDEN; | |
2696 | ||
2697 | /* Currently widening gathers and scatters are only supported for | |
2698 | fixed-length vectors. */ | |
2699 | int count = gather_off_nunits.to_constant (); | |
2700 | vec_perm_builder sel (count, count, 1); | |
2701 | for (int i = 0; i < count; ++i) | |
2702 | sel.quick_push (i | (count / 2)); | |
2703 | ||
2704 | vec_perm_indices indices (sel, 1, count); | |
2705 | perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype, | |
2706 | indices); | |
2707 | } | |
2708 | else if (known_eq (nunits, gather_off_nunits * 2)) | |
2709 | { | |
2710 | modifier = NARROW; | |
2711 | ||
2712 | /* Currently narrowing gathers and scatters are only supported for | |
2713 | fixed-length vectors. */ | |
2714 | int count = nunits.to_constant (); | |
2715 | vec_perm_builder sel (count, count, 1); | |
2716 | sel.quick_grow (count); | |
2717 | for (int i = 0; i < count; ++i) | |
2718 | sel[i] = i < count / 2 ? i : i + count / 2; | |
2719 | vec_perm_indices indices (sel, 2, count); | |
2720 | perm_mask = vect_gen_perm_mask_checked (vectype, indices); | |
2721 | ||
2722 | ncopies *= 2; | |
2723 | ||
2724 | if (mask) | |
2725 | { | |
2726 | for (int i = 0; i < count; ++i) | |
2727 | sel[i] = i | (count / 2); | |
2728 | indices.new_vector (sel, 2, count); | |
2729 | mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices); | |
2730 | } | |
2731 | } | |
2732 | else | |
2733 | gcc_unreachable (); | |
2734 | ||
86a91c0a RS |
2735 | tree scalar_dest = gimple_get_lhs (stmt_info->stmt); |
2736 | tree vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
c48d2d35 RS |
2737 | |
2738 | tree ptr = fold_convert (ptrtype, gs_info->base); | |
2739 | if (!is_gimple_min_invariant (ptr)) | |
2740 | { | |
2741 | gimple_seq seq; | |
2742 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
2743 | basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
2744 | gcc_assert (!new_bb); | |
2745 | } | |
2746 | ||
2747 | tree scale = build_int_cst (scaletype, gs_info->scale); | |
2748 | ||
2749 | tree vec_oprnd0 = NULL_TREE; | |
2750 | tree vec_mask = NULL_TREE; | |
2751 | tree src_op = NULL_TREE; | |
2752 | tree mask_op = NULL_TREE; | |
2753 | tree prev_res = NULL_TREE; | |
2754 | stmt_vec_info prev_stmt_info = NULL; | |
2755 | ||
2756 | if (!mask) | |
2757 | { | |
86a91c0a RS |
2758 | src_op = vect_build_zero_merge_argument (stmt_info, rettype); |
2759 | mask_op = vect_build_all_ones_mask (stmt_info, masktype); | |
c48d2d35 RS |
2760 | } |
2761 | ||
2762 | for (int j = 0; j < ncopies; ++j) | |
2763 | { | |
2764 | tree op, var; | |
c48d2d35 RS |
2765 | if (modifier == WIDEN && (j & 1)) |
2766 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, | |
86a91c0a | 2767 | perm_mask, stmt_info, gsi); |
c48d2d35 RS |
2768 | else if (j == 0) |
2769 | op = vec_oprnd0 | |
86a91c0a | 2770 | = vect_get_vec_def_for_operand (gs_info->offset, stmt_info); |
c48d2d35 | 2771 | else |
e4057a39 RS |
2772 | op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo, |
2773 | vec_oprnd0); | |
c48d2d35 RS |
2774 | |
2775 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
2776 | { | |
2777 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)), | |
2778 | TYPE_VECTOR_SUBPARTS (idxtype))); | |
2779 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); | |
2780 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); | |
e1bd7296 | 2781 | gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
86a91c0a | 2782 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
c48d2d35 RS |
2783 | op = var; |
2784 | } | |
2785 | ||
2786 | if (mask) | |
2787 | { | |
2788 | if (mask_perm_mask && (j & 1)) | |
2789 | mask_op = permute_vec_elements (mask_op, mask_op, | |
86a91c0a | 2790 | mask_perm_mask, stmt_info, gsi); |
c48d2d35 RS |
2791 | else |
2792 | { | |
2793 | if (j == 0) | |
86a91c0a | 2794 | vec_mask = vect_get_vec_def_for_operand (mask, stmt_info); |
c48d2d35 | 2795 | else |
e4057a39 RS |
2796 | vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo, |
2797 | vec_mask); | |
c48d2d35 RS |
2798 | |
2799 | mask_op = vec_mask; | |
2800 | if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask))) | |
2801 | { | |
2802 | gcc_assert | |
2803 | (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)), | |
2804 | TYPE_VECTOR_SUBPARTS (masktype))); | |
2805 | var = vect_get_new_ssa_name (masktype, vect_simple_var); | |
2806 | mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); | |
e1bd7296 RS |
2807 | gassign *new_stmt |
2808 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); | |
86a91c0a | 2809 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
c48d2d35 RS |
2810 | mask_op = var; |
2811 | } | |
2812 | } | |
2813 | src_op = mask_op; | |
2814 | } | |
2815 | ||
e1bd7296 RS |
2816 | gcall *new_call = gimple_build_call (gs_info->decl, 5, src_op, ptr, op, |
2817 | mask_op, scale); | |
c48d2d35 | 2818 | |
e1bd7296 | 2819 | stmt_vec_info new_stmt_info; |
c48d2d35 RS |
2820 | if (!useless_type_conversion_p (vectype, rettype)) |
2821 | { | |
2822 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype), | |
2823 | TYPE_VECTOR_SUBPARTS (rettype))); | |
2824 | op = vect_get_new_ssa_name (rettype, vect_simple_var); | |
e1bd7296 | 2825 | gimple_call_set_lhs (new_call, op); |
86a91c0a | 2826 | vect_finish_stmt_generation (stmt_info, new_call, gsi); |
c48d2d35 RS |
2827 | var = make_ssa_name (vec_dest); |
2828 | op = build1 (VIEW_CONVERT_EXPR, vectype, op); | |
e1bd7296 | 2829 | gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); |
86a91c0a RS |
2830 | new_stmt_info |
2831 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
c48d2d35 RS |
2832 | } |
2833 | else | |
2834 | { | |
e1bd7296 RS |
2835 | var = make_ssa_name (vec_dest, new_call); |
2836 | gimple_call_set_lhs (new_call, var); | |
86a91c0a RS |
2837 | new_stmt_info |
2838 | = vect_finish_stmt_generation (stmt_info, new_call, gsi); | |
c48d2d35 RS |
2839 | } |
2840 | ||
c48d2d35 RS |
2841 | if (modifier == NARROW) |
2842 | { | |
2843 | if ((j & 1) == 0) | |
2844 | { | |
2845 | prev_res = var; | |
2846 | continue; | |
2847 | } | |
86a91c0a RS |
2848 | var = permute_vec_elements (prev_res, var, perm_mask, |
2849 | stmt_info, gsi); | |
e1bd7296 | 2850 | new_stmt_info = loop_vinfo->lookup_def (var); |
c48d2d35 RS |
2851 | } |
2852 | ||
ddf98a96 | 2853 | if (prev_stmt_info == NULL) |
e1bd7296 | 2854 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
c48d2d35 | 2855 | else |
e1bd7296 RS |
2856 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
2857 | prev_stmt_info = new_stmt_info; | |
c48d2d35 RS |
2858 | } |
2859 | } | |
2860 | ||
bfaa08b7 RS |
2861 | /* Prepare the base and offset in GS_INFO for vectorization. |
2862 | Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET | |
82570274 RS |
2863 | to the vectorized offset argument for the first copy of STMT_INFO. |
2864 | STMT_INFO is the statement described by GS_INFO and LOOP is the | |
2865 | containing loop. */ | |
bfaa08b7 RS |
2866 | |
2867 | static void | |
82570274 | 2868 | vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info, |
bfaa08b7 RS |
2869 | gather_scatter_info *gs_info, |
2870 | tree *dataref_ptr, tree *vec_offset) | |
2871 | { | |
2872 | gimple_seq stmts = NULL; | |
2873 | *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE); | |
2874 | if (stmts != NULL) | |
2875 | { | |
2876 | basic_block new_bb; | |
2877 | edge pe = loop_preheader_edge (loop); | |
2878 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
2879 | gcc_assert (!new_bb); | |
2880 | } | |
2881 | tree offset_type = TREE_TYPE (gs_info->offset); | |
2882 | tree offset_vectype = get_vectype_for_scalar_type (offset_type); | |
82570274 | 2883 | *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info, |
bfaa08b7 RS |
2884 | offset_vectype); |
2885 | } | |
2886 | ||
ab2fc782 RS |
2887 | /* Prepare to implement a grouped or strided load or store using |
2888 | the gather load or scatter store operation described by GS_INFO. | |
32e8e429 | 2889 | STMT_INFO is the load or store statement. |
ab2fc782 RS |
2890 | |
2891 | Set *DATAREF_BUMP to the amount that should be added to the base | |
2892 | address after each copy of the vectorized statement. Set *VEC_OFFSET | |
2893 | to an invariant offset vector in which element I has the value | |
2894 | I * DR_STEP / SCALE. */ | |
2895 | ||
2896 | static void | |
32e8e429 RS |
2897 | vect_get_strided_load_store_ops (stmt_vec_info stmt_info, |
2898 | loop_vec_info loop_vinfo, | |
ab2fc782 RS |
2899 | gather_scatter_info *gs_info, |
2900 | tree *dataref_bump, tree *vec_offset) | |
2901 | { | |
ab2fc782 RS |
2902 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
2903 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2904 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
2905 | gimple_seq stmts; | |
2906 | ||
2907 | tree bump = size_binop (MULT_EXPR, | |
2908 | fold_convert (sizetype, DR_STEP (dr)), | |
2909 | size_int (TYPE_VECTOR_SUBPARTS (vectype))); | |
2910 | *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE); | |
2911 | if (stmts) | |
2912 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
2913 | ||
2914 | /* The offset given in GS_INFO can have pointer type, so use the element | |
2915 | type of the vector instead. */ | |
2916 | tree offset_type = TREE_TYPE (gs_info->offset); | |
2917 | tree offset_vectype = get_vectype_for_scalar_type (offset_type); | |
2918 | offset_type = TREE_TYPE (offset_vectype); | |
2919 | ||
2920 | /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */ | |
2921 | tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr), | |
2922 | ssize_int (gs_info->scale)); | |
2923 | step = fold_convert (offset_type, step); | |
2924 | step = force_gimple_operand (step, &stmts, true, NULL_TREE); | |
2925 | ||
2926 | /* Create {0, X, X*2, X*3, ...}. */ | |
2927 | *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype, | |
2928 | build_zero_cst (offset_type), step); | |
2929 | if (stmts) | |
2930 | gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
2931 | } | |
2932 | ||
2933 | /* Return the amount that should be added to a vector pointer to move | |
89fa689a | 2934 | to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference |
ab2fc782 RS |
2935 | being vectorized and MEMORY_ACCESS_TYPE describes the type of |
2936 | vectorization. */ | |
2937 | ||
2938 | static tree | |
89fa689a | 2939 | vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type, |
ab2fc782 RS |
2940 | vect_memory_access_type memory_access_type) |
2941 | { | |
2942 | if (memory_access_type == VMAT_INVARIANT) | |
2943 | return size_zero_node; | |
2944 | ||
2945 | tree iv_step = TYPE_SIZE_UNIT (aggr_type); | |
89fa689a | 2946 | tree step = vect_dr_behavior (dr_info)->step; |
ab2fc782 RS |
2947 | if (tree_int_cst_sgn (step) == -1) |
2948 | iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step); | |
2949 | return iv_step; | |
2950 | } | |
2951 | ||
37b14185 RB |
2952 | /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */ |
2953 | ||
2954 | static bool | |
32e8e429 | 2955 | vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 2956 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
e4057a39 | 2957 | tree vectype_in, stmt_vector_for_cost *cost_vec) |
37b14185 RB |
2958 | { |
2959 | tree op, vectype; | |
32e8e429 | 2960 | gcall *stmt = as_a <gcall *> (stmt_info->stmt); |
e4057a39 | 2961 | vec_info *vinfo = stmt_info->vinfo; |
37b14185 | 2962 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
928686b1 | 2963 | unsigned ncopies; |
37b14185 RB |
2964 | |
2965 | op = gimple_call_arg (stmt, 0); | |
2966 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
8c2f568c | 2967 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
37b14185 RB |
2968 | |
2969 | /* Multiple types in SLP are handled by creating the appropriate number of | |
2970 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
2971 | case of SLP. */ | |
2972 | if (slp_node) | |
2973 | ncopies = 1; | |
2974 | else | |
e8f142e2 | 2975 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
37b14185 RB |
2976 | |
2977 | gcc_assert (ncopies >= 1); | |
2978 | ||
2979 | tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in); | |
2980 | if (! char_vectype) | |
2981 | return false; | |
2982 | ||
8c2f568c RS |
2983 | poly_uint64 num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype); |
2984 | unsigned word_bytes; | |
2985 | if (!constant_multiple_p (num_bytes, nunits, &word_bytes)) | |
928686b1 RS |
2986 | return false; |
2987 | ||
d980067b RS |
2988 | /* The encoding uses one stepped pattern for each byte in the word. */ |
2989 | vec_perm_builder elts (num_bytes, word_bytes, 3); | |
2990 | for (unsigned i = 0; i < 3; ++i) | |
37b14185 | 2991 | for (unsigned j = 0; j < word_bytes; ++j) |
908a1a16 | 2992 | elts.quick_push ((i + 1) * word_bytes - j - 1); |
37b14185 | 2993 | |
e3342de4 RS |
2994 | vec_perm_indices indices (elts, 1, num_bytes); |
2995 | if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices)) | |
37b14185 RB |
2996 | return false; |
2997 | ||
2998 | if (! vec_stmt) | |
2999 | { | |
3000 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
adac3a68 | 3001 | DUMP_VECT_SCOPE ("vectorizable_bswap"); |
78604de0 | 3002 | if (! slp_node) |
37b14185 | 3003 | { |
68435eb2 RB |
3004 | record_stmt_cost (cost_vec, |
3005 | 1, vector_stmt, stmt_info, 0, vect_prologue); | |
3006 | record_stmt_cost (cost_vec, | |
3007 | ncopies, vec_perm, stmt_info, 0, vect_body); | |
37b14185 RB |
3008 | } |
3009 | return true; | |
3010 | } | |
3011 | ||
736d0f28 | 3012 | tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices); |
37b14185 RB |
3013 | |
3014 | /* Transform. */ | |
3015 | vec<tree> vec_oprnds = vNULL; | |
e1bd7296 | 3016 | stmt_vec_info new_stmt_info = NULL; |
37b14185 RB |
3017 | stmt_vec_info prev_stmt_info = NULL; |
3018 | for (unsigned j = 0; j < ncopies; j++) | |
3019 | { | |
3020 | /* Handle uses. */ | |
3021 | if (j == 0) | |
86a91c0a | 3022 | vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node); |
37b14185 | 3023 | else |
e4057a39 | 3024 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL); |
37b14185 RB |
3025 | |
3026 | /* Arguments are ready. create the new vector stmt. */ | |
3027 | unsigned i; | |
3028 | tree vop; | |
3029 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) | |
3030 | { | |
e1bd7296 | 3031 | gimple *new_stmt; |
37b14185 RB |
3032 | tree tem = make_ssa_name (char_vectype); |
3033 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
3034 | char_vectype, vop)); | |
86a91c0a | 3035 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
37b14185 RB |
3036 | tree tem2 = make_ssa_name (char_vectype); |
3037 | new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR, | |
3038 | tem, tem, bswap_vconst); | |
86a91c0a | 3039 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
37b14185 RB |
3040 | tem = make_ssa_name (vectype); |
3041 | new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
3042 | vectype, tem2)); | |
86a91c0a RS |
3043 | new_stmt_info |
3044 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
37b14185 | 3045 | if (slp_node) |
e1bd7296 | 3046 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
37b14185 RB |
3047 | } |
3048 | ||
3049 | if (slp_node) | |
3050 | continue; | |
3051 | ||
3052 | if (j == 0) | |
e1bd7296 | 3053 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
37b14185 | 3054 | else |
e1bd7296 | 3055 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
37b14185 | 3056 | |
e1bd7296 | 3057 | prev_stmt_info = new_stmt_info; |
37b14185 RB |
3058 | } |
3059 | ||
3060 | vec_oprnds.release (); | |
3061 | return true; | |
3062 | } | |
3063 | ||
b1b6836e RS |
3064 | /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have |
3065 | integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT | |
3066 | in a single step. On success, store the binary pack code in | |
3067 | *CONVERT_CODE. */ | |
3068 | ||
3069 | static bool | |
3070 | simple_integer_narrowing (tree vectype_out, tree vectype_in, | |
3071 | tree_code *convert_code) | |
3072 | { | |
3073 | if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out)) | |
3074 | || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in))) | |
3075 | return false; | |
3076 | ||
3077 | tree_code code; | |
3078 | int multi_step_cvt = 0; | |
3079 | auto_vec <tree, 8> interm_types; | |
3080 | if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in, | |
3081 | &code, &multi_step_cvt, | |
3082 | &interm_types) | |
3083 | || multi_step_cvt) | |
3084 | return false; | |
3085 | ||
3086 | *convert_code = code; | |
3087 | return true; | |
3088 | } | |
5ce9450f | 3089 | |
ebfd146a IR |
3090 | /* Function vectorizable_call. |
3091 | ||
32e8e429 RS |
3092 | Check if STMT_INFO performs a function call that can be vectorized. |
3093 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
3094 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
3095 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
3096 | |
3097 | static bool | |
32e8e429 | 3098 | vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
3099 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
3100 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 3101 | { |
538dd0b7 | 3102 | gcall *stmt; |
ebfd146a IR |
3103 | tree vec_dest; |
3104 | tree scalar_dest; | |
0267732b | 3105 | tree op; |
ebfd146a | 3106 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; |
32e8e429 | 3107 | stmt_vec_info prev_stmt_info; |
ebfd146a | 3108 | tree vectype_out, vectype_in; |
c7bda0f4 RS |
3109 | poly_uint64 nunits_in; |
3110 | poly_uint64 nunits_out; | |
ebfd146a | 3111 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
190c2236 | 3112 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 3113 | vec_info *vinfo = stmt_info->vinfo; |
81c40241 | 3114 | tree fndecl, new_temp, rhs_type; |
2c58d42c RS |
3115 | enum vect_def_type dt[4] |
3116 | = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type, | |
3117 | vect_unknown_def_type }; | |
3118 | int ndts = ARRAY_SIZE (dt); | |
ebfd146a | 3119 | int ncopies, j; |
2c58d42c RS |
3120 | auto_vec<tree, 8> vargs; |
3121 | auto_vec<tree, 8> orig_vargs; | |
ebfd146a IR |
3122 | enum { NARROW, NONE, WIDEN } modifier; |
3123 | size_t i, nargs; | |
9d5e7640 | 3124 | tree lhs; |
ebfd146a | 3125 | |
190c2236 | 3126 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
3127 | return false; |
3128 | ||
66c16fd9 RB |
3129 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
3130 | && ! vec_stmt) | |
ebfd146a IR |
3131 | return false; |
3132 | ||
86a91c0a RS |
3133 | /* Is STMT_INFO a vectorizable call? */ |
3134 | stmt = dyn_cast <gcall *> (stmt_info->stmt); | |
538dd0b7 | 3135 | if (!stmt) |
ebfd146a IR |
3136 | return false; |
3137 | ||
5ce9450f | 3138 | if (gimple_call_internal_p (stmt) |
bfaa08b7 | 3139 | && (internal_load_fn_p (gimple_call_internal_fn (stmt)) |
f307441a | 3140 | || internal_store_fn_p (gimple_call_internal_fn (stmt)))) |
c3a8f964 RS |
3141 | /* Handled by vectorizable_load and vectorizable_store. */ |
3142 | return false; | |
5ce9450f | 3143 | |
0136f8f0 AH |
3144 | if (gimple_call_lhs (stmt) == NULL_TREE |
3145 | || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
ebfd146a IR |
3146 | return false; |
3147 | ||
0136f8f0 | 3148 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); |
5a2c1986 | 3149 | |
b690cc0f RG |
3150 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
3151 | ||
ebfd146a IR |
3152 | /* Process function arguments. */ |
3153 | rhs_type = NULL_TREE; | |
b690cc0f | 3154 | vectype_in = NULL_TREE; |
ebfd146a IR |
3155 | nargs = gimple_call_num_args (stmt); |
3156 | ||
1b1562a5 MM |
3157 | /* Bail out if the function has more than three arguments, we do not have |
3158 | interesting builtin functions to vectorize with more than two arguments | |
3159 | except for fma. No arguments is also not good. */ | |
2c58d42c | 3160 | if (nargs == 0 || nargs > 4) |
ebfd146a IR |
3161 | return false; |
3162 | ||
74bf76ed | 3163 | /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */ |
2c58d42c RS |
3164 | combined_fn cfn = gimple_call_combined_fn (stmt); |
3165 | if (cfn == CFN_GOMP_SIMD_LANE) | |
74bf76ed JJ |
3166 | { |
3167 | nargs = 0; | |
3168 | rhs_type = unsigned_type_node; | |
3169 | } | |
3170 | ||
2c58d42c RS |
3171 | int mask_opno = -1; |
3172 | if (internal_fn_p (cfn)) | |
3173 | mask_opno = internal_fn_mask_index (as_internal_fn (cfn)); | |
3174 | ||
ebfd146a IR |
3175 | for (i = 0; i < nargs; i++) |
3176 | { | |
b690cc0f RG |
3177 | tree opvectype; |
3178 | ||
ebfd146a | 3179 | op = gimple_call_arg (stmt, i); |
2c58d42c RS |
3180 | if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype)) |
3181 | { | |
3182 | if (dump_enabled_p ()) | |
3183 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3184 | "use not simple.\n"); | |
3185 | return false; | |
3186 | } | |
3187 | ||
3188 | /* Skip the mask argument to an internal function. This operand | |
3189 | has been converted via a pattern if necessary. */ | |
3190 | if ((int) i == mask_opno) | |
3191 | continue; | |
ebfd146a IR |
3192 | |
3193 | /* We can only handle calls with arguments of the same type. */ | |
3194 | if (rhs_type | |
8533c9d8 | 3195 | && !types_compatible_p (rhs_type, TREE_TYPE (op))) |
ebfd146a | 3196 | { |
73fbfcad | 3197 | if (dump_enabled_p ()) |
78c60e3d | 3198 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 3199 | "argument types differ.\n"); |
ebfd146a IR |
3200 | return false; |
3201 | } | |
b690cc0f RG |
3202 | if (!rhs_type) |
3203 | rhs_type = TREE_TYPE (op); | |
ebfd146a | 3204 | |
b690cc0f RG |
3205 | if (!vectype_in) |
3206 | vectype_in = opvectype; | |
3207 | else if (opvectype | |
3208 | && opvectype != vectype_in) | |
3209 | { | |
73fbfcad | 3210 | if (dump_enabled_p ()) |
78c60e3d | 3211 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 3212 | "argument vector types differ.\n"); |
b690cc0f RG |
3213 | return false; |
3214 | } | |
3215 | } | |
3216 | /* If all arguments are external or constant defs use a vector type with | |
3217 | the same size as the output vector type. */ | |
ebfd146a | 3218 | if (!vectype_in) |
b690cc0f | 3219 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
3220 | if (vec_stmt) |
3221 | gcc_assert (vectype_in); | |
3222 | if (!vectype_in) | |
3223 | { | |
73fbfcad | 3224 | if (dump_enabled_p ()) |
7d8930a0 | 3225 | { |
78c60e3d SS |
3226 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3227 | "no vectype for scalar type "); | |
3228 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 3229 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
3230 | } |
3231 | ||
3232 | return false; | |
3233 | } | |
ebfd146a IR |
3234 | |
3235 | /* FORNOW */ | |
b690cc0f RG |
3236 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
3237 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
c7bda0f4 | 3238 | if (known_eq (nunits_in * 2, nunits_out)) |
ebfd146a | 3239 | modifier = NARROW; |
c7bda0f4 | 3240 | else if (known_eq (nunits_out, nunits_in)) |
ebfd146a | 3241 | modifier = NONE; |
c7bda0f4 | 3242 | else if (known_eq (nunits_out * 2, nunits_in)) |
ebfd146a IR |
3243 | modifier = WIDEN; |
3244 | else | |
3245 | return false; | |
3246 | ||
70439f0d RS |
3247 | /* We only handle functions that do not read or clobber memory. */ |
3248 | if (gimple_vuse (stmt)) | |
3249 | { | |
3250 | if (dump_enabled_p ()) | |
3251 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3252 | "function reads from or writes to memory.\n"); | |
3253 | return false; | |
3254 | } | |
3255 | ||
ebfd146a IR |
3256 | /* For now, we only vectorize functions if a target specific builtin |
3257 | is available. TODO -- in some cases, it might be profitable to | |
3258 | insert the calls for pieces of the vector, in order to be able | |
3259 | to vectorize other operations in the loop. */ | |
70439f0d RS |
3260 | fndecl = NULL_TREE; |
3261 | internal_fn ifn = IFN_LAST; | |
70439f0d RS |
3262 | tree callee = gimple_call_fndecl (stmt); |
3263 | ||
3264 | /* First try using an internal function. */ | |
b1b6836e RS |
3265 | tree_code convert_code = ERROR_MARK; |
3266 | if (cfn != CFN_LAST | |
3267 | && (modifier == NONE | |
3268 | || (modifier == NARROW | |
3269 | && simple_integer_narrowing (vectype_out, vectype_in, | |
3270 | &convert_code)))) | |
70439f0d RS |
3271 | ifn = vectorizable_internal_function (cfn, callee, vectype_out, |
3272 | vectype_in); | |
3273 | ||
3274 | /* If that fails, try asking for a target-specific built-in function. */ | |
3275 | if (ifn == IFN_LAST) | |
3276 | { | |
3277 | if (cfn != CFN_LAST) | |
3278 | fndecl = targetm.vectorize.builtin_vectorized_function | |
3279 | (cfn, vectype_out, vectype_in); | |
7672aa9b | 3280 | else if (callee) |
70439f0d RS |
3281 | fndecl = targetm.vectorize.builtin_md_vectorized_function |
3282 | (callee, vectype_out, vectype_in); | |
3283 | } | |
3284 | ||
3285 | if (ifn == IFN_LAST && !fndecl) | |
ebfd146a | 3286 | { |
70439f0d | 3287 | if (cfn == CFN_GOMP_SIMD_LANE |
74bf76ed JJ |
3288 | && !slp_node |
3289 | && loop_vinfo | |
3290 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
3291 | && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME | |
3292 | && LOOP_VINFO_LOOP (loop_vinfo)->simduid | |
3293 | == SSA_NAME_VAR (gimple_call_arg (stmt, 0))) | |
3294 | { | |
3295 | /* We can handle IFN_GOMP_SIMD_LANE by returning a | |
3296 | { 0, 1, 2, ... vf - 1 } vector. */ | |
3297 | gcc_assert (nargs == 0); | |
3298 | } | |
37b14185 RB |
3299 | else if (modifier == NONE |
3300 | && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16) | |
3301 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32) | |
3302 | || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64))) | |
86a91c0a | 3303 | return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node, |
e4057a39 | 3304 | vectype_in, cost_vec); |
74bf76ed JJ |
3305 | else |
3306 | { | |
3307 | if (dump_enabled_p ()) | |
3308 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
e645e942 | 3309 | "function is not vectorizable.\n"); |
74bf76ed JJ |
3310 | return false; |
3311 | } | |
ebfd146a IR |
3312 | } |
3313 | ||
fce57248 | 3314 | if (slp_node) |
190c2236 | 3315 | ncopies = 1; |
b1b6836e | 3316 | else if (modifier == NARROW && ifn == IFN_LAST) |
e8f142e2 | 3317 | ncopies = vect_get_num_copies (loop_vinfo, vectype_out); |
ebfd146a | 3318 | else |
e8f142e2 | 3319 | ncopies = vect_get_num_copies (loop_vinfo, vectype_in); |
ebfd146a IR |
3320 | |
3321 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
3322 | needs to be generated. */ | |
3323 | gcc_assert (ncopies >= 1); | |
3324 | ||
ed623edb | 3325 | vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL); |
ebfd146a IR |
3326 | if (!vec_stmt) /* transformation not required. */ |
3327 | { | |
3328 | STMT_VINFO_TYPE (stmt_info) = call_vec_info_type; | |
adac3a68 | 3329 | DUMP_VECT_SCOPE ("vectorizable_call"); |
68435eb2 RB |
3330 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
3331 | if (ifn != IFN_LAST && modifier == NARROW && !slp_node) | |
3332 | record_stmt_cost (cost_vec, ncopies / 2, | |
3333 | vec_promote_demote, stmt_info, 0, vect_body); | |
b1b6836e | 3334 | |
2c58d42c RS |
3335 | if (loop_vinfo && mask_opno >= 0) |
3336 | { | |
3337 | unsigned int nvectors = (slp_node | |
3338 | ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) | |
3339 | : ncopies); | |
3340 | vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out); | |
3341 | } | |
ebfd146a IR |
3342 | return true; |
3343 | } | |
3344 | ||
67b8dbac | 3345 | /* Transform. */ |
ebfd146a | 3346 | |
73fbfcad | 3347 | if (dump_enabled_p ()) |
e645e942 | 3348 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); |
ebfd146a IR |
3349 | |
3350 | /* Handle def. */ | |
3351 | scalar_dest = gimple_call_lhs (stmt); | |
3352 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
3353 | ||
2c58d42c RS |
3354 | bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo); |
3355 | ||
e1bd7296 | 3356 | stmt_vec_info new_stmt_info = NULL; |
ebfd146a | 3357 | prev_stmt_info = NULL; |
b1b6836e | 3358 | if (modifier == NONE || ifn != IFN_LAST) |
ebfd146a | 3359 | { |
b1b6836e | 3360 | tree prev_res = NULL_TREE; |
2c58d42c RS |
3361 | vargs.safe_grow (nargs); |
3362 | orig_vargs.safe_grow (nargs); | |
ebfd146a IR |
3363 | for (j = 0; j < ncopies; ++j) |
3364 | { | |
3365 | /* Build argument list for the vectorized call. */ | |
190c2236 JJ |
3366 | if (slp_node) |
3367 | { | |
ef062b13 | 3368 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 3369 | vec<tree> vec_oprnds0; |
190c2236 JJ |
3370 | |
3371 | for (i = 0; i < nargs; i++) | |
2c58d42c | 3372 | vargs[i] = gimple_call_arg (stmt, i); |
306b0c92 | 3373 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 3374 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
3375 | |
3376 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 3377 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0) |
190c2236 JJ |
3378 | { |
3379 | size_t k; | |
3380 | for (k = 0; k < nargs; k++) | |
3381 | { | |
37b5ec8f | 3382 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 | 3383 | vargs[k] = vec_oprndsk[i]; |
190c2236 | 3384 | } |
b1b6836e RS |
3385 | if (modifier == NARROW) |
3386 | { | |
2c58d42c RS |
3387 | /* We don't define any narrowing conditional functions |
3388 | at present. */ | |
3389 | gcc_assert (mask_opno < 0); | |
b1b6836e | 3390 | tree half_res = make_ssa_name (vectype_in); |
a844293d RS |
3391 | gcall *call |
3392 | = gimple_build_call_internal_vec (ifn, vargs); | |
3393 | gimple_call_set_lhs (call, half_res); | |
3394 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 3395 | new_stmt_info |
86a91c0a | 3396 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
b1b6836e RS |
3397 | if ((i & 1) == 0) |
3398 | { | |
3399 | prev_res = half_res; | |
3400 | continue; | |
3401 | } | |
3402 | new_temp = make_ssa_name (vec_dest); | |
e1bd7296 RS |
3403 | gimple *new_stmt |
3404 | = gimple_build_assign (new_temp, convert_code, | |
3405 | prev_res, half_res); | |
3406 | new_stmt_info | |
86a91c0a RS |
3407 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
3408 | gsi); | |
b1b6836e | 3409 | } |
70439f0d | 3410 | else |
b1b6836e | 3411 | { |
2c58d42c RS |
3412 | if (mask_opno >= 0 && masked_loop_p) |
3413 | { | |
3414 | unsigned int vec_num = vec_oprnds0.length (); | |
3415 | /* Always true for SLP. */ | |
3416 | gcc_assert (ncopies == 1); | |
3417 | tree mask = vect_get_loop_mask (gsi, masks, vec_num, | |
3418 | vectype_out, i); | |
3419 | vargs[mask_opno] = prepare_load_store_mask | |
3420 | (TREE_TYPE (mask), mask, vargs[mask_opno], gsi); | |
3421 | } | |
3422 | ||
a844293d | 3423 | gcall *call; |
b1b6836e | 3424 | if (ifn != IFN_LAST) |
a844293d | 3425 | call = gimple_build_call_internal_vec (ifn, vargs); |
b1b6836e | 3426 | else |
a844293d RS |
3427 | call = gimple_build_call_vec (fndecl, vargs); |
3428 | new_temp = make_ssa_name (vec_dest, call); | |
3429 | gimple_call_set_lhs (call, new_temp); | |
3430 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 3431 | new_stmt_info |
86a91c0a | 3432 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
b1b6836e | 3433 | } |
e1bd7296 | 3434 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
190c2236 JJ |
3435 | } |
3436 | ||
3437 | for (i = 0; i < nargs; i++) | |
3438 | { | |
37b5ec8f | 3439 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 3440 | vec_oprndsi.release (); |
190c2236 | 3441 | } |
190c2236 JJ |
3442 | continue; |
3443 | } | |
3444 | ||
ebfd146a IR |
3445 | for (i = 0; i < nargs; i++) |
3446 | { | |
3447 | op = gimple_call_arg (stmt, i); | |
3448 | if (j == 0) | |
3449 | vec_oprnd0 | |
86a91c0a | 3450 | = vect_get_vec_def_for_operand (op, stmt_info); |
ebfd146a | 3451 | else |
2c58d42c | 3452 | vec_oprnd0 |
e4057a39 | 3453 | = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]); |
2c58d42c RS |
3454 | |
3455 | orig_vargs[i] = vargs[i] = vec_oprnd0; | |
3456 | } | |
ebfd146a | 3457 | |
2c58d42c RS |
3458 | if (mask_opno >= 0 && masked_loop_p) |
3459 | { | |
3460 | tree mask = vect_get_loop_mask (gsi, masks, ncopies, | |
3461 | vectype_out, j); | |
3462 | vargs[mask_opno] | |
3463 | = prepare_load_store_mask (TREE_TYPE (mask), mask, | |
3464 | vargs[mask_opno], gsi); | |
ebfd146a IR |
3465 | } |
3466 | ||
2c58d42c | 3467 | if (cfn == CFN_GOMP_SIMD_LANE) |
74bf76ed | 3468 | { |
c7bda0f4 | 3469 | tree cst = build_index_vector (vectype_out, j * nunits_out, 1); |
74bf76ed | 3470 | tree new_var |
0e22bb5a | 3471 | = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_"); |
355fe088 | 3472 | gimple *init_stmt = gimple_build_assign (new_var, cst); |
86a91c0a | 3473 | vect_init_vector_1 (stmt_info, init_stmt, NULL); |
b731b390 | 3474 | new_temp = make_ssa_name (vec_dest); |
e1bd7296 RS |
3475 | gimple *new_stmt = gimple_build_assign (new_temp, new_var); |
3476 | new_stmt_info | |
86a91c0a | 3477 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
74bf76ed | 3478 | } |
b1b6836e RS |
3479 | else if (modifier == NARROW) |
3480 | { | |
2c58d42c RS |
3481 | /* We don't define any narrowing conditional functions at |
3482 | present. */ | |
3483 | gcc_assert (mask_opno < 0); | |
b1b6836e | 3484 | tree half_res = make_ssa_name (vectype_in); |
a844293d RS |
3485 | gcall *call = gimple_build_call_internal_vec (ifn, vargs); |
3486 | gimple_call_set_lhs (call, half_res); | |
3487 | gimple_call_set_nothrow (call, true); | |
86a91c0a RS |
3488 | new_stmt_info |
3489 | = vect_finish_stmt_generation (stmt_info, call, gsi); | |
b1b6836e RS |
3490 | if ((j & 1) == 0) |
3491 | { | |
3492 | prev_res = half_res; | |
3493 | continue; | |
3494 | } | |
3495 | new_temp = make_ssa_name (vec_dest); | |
e1bd7296 RS |
3496 | gassign *new_stmt = gimple_build_assign (new_temp, convert_code, |
3497 | prev_res, half_res); | |
3498 | new_stmt_info | |
86a91c0a | 3499 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
b1b6836e | 3500 | } |
74bf76ed JJ |
3501 | else |
3502 | { | |
a844293d | 3503 | gcall *call; |
70439f0d | 3504 | if (ifn != IFN_LAST) |
a844293d | 3505 | call = gimple_build_call_internal_vec (ifn, vargs); |
70439f0d | 3506 | else |
a844293d | 3507 | call = gimple_build_call_vec (fndecl, vargs); |
e1bd7296 | 3508 | new_temp = make_ssa_name (vec_dest, call); |
a844293d RS |
3509 | gimple_call_set_lhs (call, new_temp); |
3510 | gimple_call_set_nothrow (call, true); | |
86a91c0a RS |
3511 | new_stmt_info |
3512 | = vect_finish_stmt_generation (stmt_info, call, gsi); | |
74bf76ed | 3513 | } |
ebfd146a | 3514 | |
b1b6836e | 3515 | if (j == (modifier == NARROW ? 1 : 0)) |
e1bd7296 | 3516 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
ebfd146a | 3517 | else |
e1bd7296 | 3518 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
ebfd146a | 3519 | |
e1bd7296 | 3520 | prev_stmt_info = new_stmt_info; |
ebfd146a | 3521 | } |
b1b6836e RS |
3522 | } |
3523 | else if (modifier == NARROW) | |
3524 | { | |
2c58d42c RS |
3525 | /* We don't define any narrowing conditional functions at present. */ |
3526 | gcc_assert (mask_opno < 0); | |
ebfd146a IR |
3527 | for (j = 0; j < ncopies; ++j) |
3528 | { | |
3529 | /* Build argument list for the vectorized call. */ | |
3530 | if (j == 0) | |
9771b263 | 3531 | vargs.create (nargs * 2); |
ebfd146a | 3532 | else |
9771b263 | 3533 | vargs.truncate (0); |
ebfd146a | 3534 | |
190c2236 JJ |
3535 | if (slp_node) |
3536 | { | |
ef062b13 | 3537 | auto_vec<vec<tree> > vec_defs (nargs); |
9771b263 | 3538 | vec<tree> vec_oprnds0; |
190c2236 JJ |
3539 | |
3540 | for (i = 0; i < nargs; i++) | |
9771b263 | 3541 | vargs.quick_push (gimple_call_arg (stmt, i)); |
306b0c92 | 3542 | vect_get_slp_defs (vargs, slp_node, &vec_defs); |
37b5ec8f | 3543 | vec_oprnds0 = vec_defs[0]; |
190c2236 JJ |
3544 | |
3545 | /* Arguments are ready. Create the new vector stmt. */ | |
9771b263 | 3546 | for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2) |
190c2236 JJ |
3547 | { |
3548 | size_t k; | |
9771b263 | 3549 | vargs.truncate (0); |
190c2236 JJ |
3550 | for (k = 0; k < nargs; k++) |
3551 | { | |
37b5ec8f | 3552 | vec<tree> vec_oprndsk = vec_defs[k]; |
9771b263 DN |
3553 | vargs.quick_push (vec_oprndsk[i]); |
3554 | vargs.quick_push (vec_oprndsk[i + 1]); | |
190c2236 | 3555 | } |
a844293d | 3556 | gcall *call; |
70439f0d | 3557 | if (ifn != IFN_LAST) |
a844293d | 3558 | call = gimple_build_call_internal_vec (ifn, vargs); |
70439f0d | 3559 | else |
a844293d RS |
3560 | call = gimple_build_call_vec (fndecl, vargs); |
3561 | new_temp = make_ssa_name (vec_dest, call); | |
3562 | gimple_call_set_lhs (call, new_temp); | |
3563 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 3564 | new_stmt_info |
86a91c0a | 3565 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
e1bd7296 | 3566 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
190c2236 JJ |
3567 | } |
3568 | ||
3569 | for (i = 0; i < nargs; i++) | |
3570 | { | |
37b5ec8f | 3571 | vec<tree> vec_oprndsi = vec_defs[i]; |
9771b263 | 3572 | vec_oprndsi.release (); |
190c2236 | 3573 | } |
190c2236 JJ |
3574 | continue; |
3575 | } | |
3576 | ||
ebfd146a IR |
3577 | for (i = 0; i < nargs; i++) |
3578 | { | |
3579 | op = gimple_call_arg (stmt, i); | |
3580 | if (j == 0) | |
3581 | { | |
3582 | vec_oprnd0 | |
86a91c0a | 3583 | = vect_get_vec_def_for_operand (op, stmt_info); |
ebfd146a | 3584 | vec_oprnd1 |
e4057a39 | 3585 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
ebfd146a IR |
3586 | } |
3587 | else | |
3588 | { | |
e1bd7296 RS |
3589 | vec_oprnd1 = gimple_call_arg (new_stmt_info->stmt, |
3590 | 2 * i + 1); | |
ebfd146a | 3591 | vec_oprnd0 |
e4057a39 | 3592 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); |
ebfd146a | 3593 | vec_oprnd1 |
e4057a39 | 3594 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
ebfd146a IR |
3595 | } |
3596 | ||
9771b263 DN |
3597 | vargs.quick_push (vec_oprnd0); |
3598 | vargs.quick_push (vec_oprnd1); | |
ebfd146a IR |
3599 | } |
3600 | ||
e1bd7296 | 3601 | gcall *new_stmt = gimple_build_call_vec (fndecl, vargs); |
ebfd146a IR |
3602 | new_temp = make_ssa_name (vec_dest, new_stmt); |
3603 | gimple_call_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
3604 | new_stmt_info |
3605 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
ebfd146a IR |
3606 | |
3607 | if (j == 0) | |
e1bd7296 | 3608 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; |
ebfd146a | 3609 | else |
e1bd7296 | 3610 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
ebfd146a | 3611 | |
e1bd7296 | 3612 | prev_stmt_info = new_stmt_info; |
ebfd146a IR |
3613 | } |
3614 | ||
3615 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a | 3616 | } |
b1b6836e RS |
3617 | else |
3618 | /* No current target implements this case. */ | |
3619 | return false; | |
ebfd146a | 3620 | |
9771b263 | 3621 | vargs.release (); |
ebfd146a | 3622 | |
ebfd146a IR |
3623 | /* The call in STMT might prevent it from being removed in dce. |
3624 | We however cannot remove it here, due to the way the ssa name | |
3625 | it defines is mapped to the new definition. So just replace | |
3626 | rhs of the statement with something harmless. */ | |
3627 | ||
dd34c087 JJ |
3628 | if (slp_node) |
3629 | return true; | |
3630 | ||
211cd1e2 | 3631 | stmt_info = vect_orig_stmt (stmt_info); |
ed7b8123 | 3632 | lhs = gimple_get_lhs (stmt_info->stmt); |
3cc2fa2a | 3633 | |
e1bd7296 RS |
3634 | gassign *new_stmt |
3635 | = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); | |
9d97912b | 3636 | vinfo->replace_stmt (gsi, stmt_info, new_stmt); |
ebfd146a IR |
3637 | |
3638 | return true; | |
3639 | } | |
3640 | ||
3641 | ||
0136f8f0 AH |
3642 | struct simd_call_arg_info |
3643 | { | |
3644 | tree vectype; | |
3645 | tree op; | |
0136f8f0 | 3646 | HOST_WIDE_INT linear_step; |
34e82342 | 3647 | enum vect_def_type dt; |
0136f8f0 | 3648 | unsigned int align; |
17b658af | 3649 | bool simd_lane_linear; |
0136f8f0 AH |
3650 | }; |
3651 | ||
17b658af JJ |
3652 | /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME, |
3653 | is linear within simd lane (but not within whole loop), note it in | |
3654 | *ARGINFO. */ | |
3655 | ||
3656 | static void | |
3657 | vect_simd_lane_linear (tree op, struct loop *loop, | |
3658 | struct simd_call_arg_info *arginfo) | |
3659 | { | |
355fe088 | 3660 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
17b658af JJ |
3661 | |
3662 | if (!is_gimple_assign (def_stmt) | |
3663 | || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR | |
3664 | || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt))) | |
3665 | return; | |
3666 | ||
3667 | tree base = gimple_assign_rhs1 (def_stmt); | |
3668 | HOST_WIDE_INT linear_step = 0; | |
3669 | tree v = gimple_assign_rhs2 (def_stmt); | |
3670 | while (TREE_CODE (v) == SSA_NAME) | |
3671 | { | |
3672 | tree t; | |
3673 | def_stmt = SSA_NAME_DEF_STMT (v); | |
3674 | if (is_gimple_assign (def_stmt)) | |
3675 | switch (gimple_assign_rhs_code (def_stmt)) | |
3676 | { | |
3677 | case PLUS_EXPR: | |
3678 | t = gimple_assign_rhs2 (def_stmt); | |
3679 | if (linear_step || TREE_CODE (t) != INTEGER_CST) | |
3680 | return; | |
3681 | base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t); | |
3682 | v = gimple_assign_rhs1 (def_stmt); | |
3683 | continue; | |
3684 | case MULT_EXPR: | |
3685 | t = gimple_assign_rhs2 (def_stmt); | |
3686 | if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t)) | |
3687 | return; | |
3688 | linear_step = tree_to_shwi (t); | |
3689 | v = gimple_assign_rhs1 (def_stmt); | |
3690 | continue; | |
3691 | CASE_CONVERT: | |
3692 | t = gimple_assign_rhs1 (def_stmt); | |
3693 | if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE | |
3694 | || (TYPE_PRECISION (TREE_TYPE (v)) | |
3695 | < TYPE_PRECISION (TREE_TYPE (t)))) | |
3696 | return; | |
3697 | if (!linear_step) | |
3698 | linear_step = 1; | |
3699 | v = t; | |
3700 | continue; | |
3701 | default: | |
3702 | return; | |
3703 | } | |
8e4284d0 | 3704 | else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE) |
17b658af JJ |
3705 | && loop->simduid |
3706 | && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME | |
3707 | && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0)) | |
3708 | == loop->simduid)) | |
3709 | { | |
3710 | if (!linear_step) | |
3711 | linear_step = 1; | |
3712 | arginfo->linear_step = linear_step; | |
3713 | arginfo->op = base; | |
3714 | arginfo->simd_lane_linear = true; | |
3715 | return; | |
3716 | } | |
3717 | } | |
3718 | } | |
3719 | ||
cf1b2ba4 RS |
3720 | /* Return the number of elements in vector type VECTYPE, which is associated |
3721 | with a SIMD clone. At present these vectors always have a constant | |
3722 | length. */ | |
3723 | ||
3724 | static unsigned HOST_WIDE_INT | |
3725 | simd_clone_subparts (tree vectype) | |
3726 | { | |
928686b1 | 3727 | return TYPE_VECTOR_SUBPARTS (vectype).to_constant (); |
cf1b2ba4 RS |
3728 | } |
3729 | ||
0136f8f0 AH |
3730 | /* Function vectorizable_simd_clone_call. |
3731 | ||
32e8e429 | 3732 | Check if STMT_INFO performs a function call that can be vectorized |
0136f8f0 | 3733 | by calling a simd clone of the function. |
32e8e429 RS |
3734 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized |
3735 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
3736 | Return true if STMT_INFO is vectorizable in this way. */ | |
0136f8f0 AH |
3737 | |
3738 | static bool | |
32e8e429 RS |
3739 | vectorizable_simd_clone_call (stmt_vec_info stmt_info, |
3740 | gimple_stmt_iterator *gsi, | |
1eede195 | 3741 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 3742 | stmt_vector_for_cost *) |
0136f8f0 AH |
3743 | { |
3744 | tree vec_dest; | |
3745 | tree scalar_dest; | |
3746 | tree op, type; | |
3747 | tree vec_oprnd0 = NULL_TREE; | |
32e8e429 | 3748 | stmt_vec_info prev_stmt_info; |
0136f8f0 AH |
3749 | tree vectype; |
3750 | unsigned int nunits; | |
3751 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
3752 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
310213d4 | 3753 | vec_info *vinfo = stmt_info->vinfo; |
0136f8f0 | 3754 | struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; |
81c40241 | 3755 | tree fndecl, new_temp; |
0136f8f0 | 3756 | int ncopies, j; |
00426f9a | 3757 | auto_vec<simd_call_arg_info> arginfo; |
0136f8f0 AH |
3758 | vec<tree> vargs = vNULL; |
3759 | size_t i, nargs; | |
3760 | tree lhs, rtype, ratype; | |
e7a74006 | 3761 | vec<constructor_elt, va_gc> *ret_ctor_elts = NULL; |
0136f8f0 AH |
3762 | |
3763 | /* Is STMT a vectorizable call? */ | |
32e8e429 RS |
3764 | gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt); |
3765 | if (!stmt) | |
0136f8f0 AH |
3766 | return false; |
3767 | ||
3768 | fndecl = gimple_call_fndecl (stmt); | |
3769 | if (fndecl == NULL_TREE) | |
3770 | return false; | |
3771 | ||
d52f5295 | 3772 | struct cgraph_node *node = cgraph_node::get (fndecl); |
0136f8f0 AH |
3773 | if (node == NULL || node->simd_clones == NULL) |
3774 | return false; | |
3775 | ||
3776 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
3777 | return false; | |
3778 | ||
66c16fd9 RB |
3779 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
3780 | && ! vec_stmt) | |
0136f8f0 AH |
3781 | return false; |
3782 | ||
3783 | if (gimple_call_lhs (stmt) | |
3784 | && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME) | |
3785 | return false; | |
3786 | ||
3787 | gcc_checking_assert (!stmt_can_throw_internal (stmt)); | |
3788 | ||
3789 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
3790 | ||
86a91c0a | 3791 | if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info)) |
0136f8f0 AH |
3792 | return false; |
3793 | ||
3794 | /* FORNOW */ | |
fce57248 | 3795 | if (slp_node) |
0136f8f0 AH |
3796 | return false; |
3797 | ||
3798 | /* Process function arguments. */ | |
3799 | nargs = gimple_call_num_args (stmt); | |
3800 | ||
3801 | /* Bail out if the function has zero arguments. */ | |
3802 | if (nargs == 0) | |
3803 | return false; | |
3804 | ||
00426f9a | 3805 | arginfo.reserve (nargs, true); |
0136f8f0 AH |
3806 | |
3807 | for (i = 0; i < nargs; i++) | |
3808 | { | |
3809 | simd_call_arg_info thisarginfo; | |
3810 | affine_iv iv; | |
3811 | ||
3812 | thisarginfo.linear_step = 0; | |
3813 | thisarginfo.align = 0; | |
3814 | thisarginfo.op = NULL_TREE; | |
17b658af | 3815 | thisarginfo.simd_lane_linear = false; |
0136f8f0 AH |
3816 | |
3817 | op = gimple_call_arg (stmt, i); | |
894dd753 | 3818 | if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt, |
81c40241 | 3819 | &thisarginfo.vectype) |
0136f8f0 AH |
3820 | || thisarginfo.dt == vect_uninitialized_def) |
3821 | { | |
3822 | if (dump_enabled_p ()) | |
3823 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3824 | "use not simple.\n"); | |
0136f8f0 AH |
3825 | return false; |
3826 | } | |
3827 | ||
3828 | if (thisarginfo.dt == vect_constant_def | |
3829 | || thisarginfo.dt == vect_external_def) | |
3830 | gcc_assert (thisarginfo.vectype == NULL_TREE); | |
3831 | else | |
3832 | gcc_assert (thisarginfo.vectype != NULL_TREE); | |
3833 | ||
6c9e85fb JJ |
3834 | /* For linear arguments, the analyze phase should have saved |
3835 | the base and step in STMT_VINFO_SIMD_CLONE_INFO. */ | |
17b658af JJ |
3836 | if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length () |
3837 | && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]) | |
6c9e85fb JJ |
3838 | { |
3839 | gcc_assert (vec_stmt); | |
3840 | thisarginfo.linear_step | |
17b658af | 3841 | = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]); |
6c9e85fb | 3842 | thisarginfo.op |
17b658af JJ |
3843 | = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1]; |
3844 | thisarginfo.simd_lane_linear | |
3845 | = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3] | |
3846 | == boolean_true_node); | |
6c9e85fb JJ |
3847 | /* If loop has been peeled for alignment, we need to adjust it. */ |
3848 | tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo); | |
3849 | tree n2 = LOOP_VINFO_NITERS (loop_vinfo); | |
17b658af | 3850 | if (n1 != n2 && !thisarginfo.simd_lane_linear) |
6c9e85fb JJ |
3851 | { |
3852 | tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2); | |
17b658af | 3853 | tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]; |
6c9e85fb JJ |
3854 | tree opt = TREE_TYPE (thisarginfo.op); |
3855 | bias = fold_convert (TREE_TYPE (step), bias); | |
3856 | bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step); | |
3857 | thisarginfo.op | |
3858 | = fold_build2 (POINTER_TYPE_P (opt) | |
3859 | ? POINTER_PLUS_EXPR : PLUS_EXPR, opt, | |
3860 | thisarginfo.op, bias); | |
3861 | } | |
3862 | } | |
3863 | else if (!vec_stmt | |
3864 | && thisarginfo.dt != vect_constant_def | |
3865 | && thisarginfo.dt != vect_external_def | |
3866 | && loop_vinfo | |
3867 | && TREE_CODE (op) == SSA_NAME | |
3868 | && simple_iv (loop, loop_containing_stmt (stmt), op, | |
3869 | &iv, false) | |
3870 | && tree_fits_shwi_p (iv.step)) | |
0136f8f0 AH |
3871 | { |
3872 | thisarginfo.linear_step = tree_to_shwi (iv.step); | |
3873 | thisarginfo.op = iv.base; | |
3874 | } | |
3875 | else if ((thisarginfo.dt == vect_constant_def | |
3876 | || thisarginfo.dt == vect_external_def) | |
3877 | && POINTER_TYPE_P (TREE_TYPE (op))) | |
3878 | thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT; | |
17b658af JJ |
3879 | /* Addresses of array elements indexed by GOMP_SIMD_LANE are |
3880 | linear too. */ | |
3881 | if (POINTER_TYPE_P (TREE_TYPE (op)) | |
3882 | && !thisarginfo.linear_step | |
3883 | && !vec_stmt | |
3884 | && thisarginfo.dt != vect_constant_def | |
3885 | && thisarginfo.dt != vect_external_def | |
3886 | && loop_vinfo | |
3887 | && !slp_node | |
3888 | && TREE_CODE (op) == SSA_NAME) | |
3889 | vect_simd_lane_linear (op, loop, &thisarginfo); | |
0136f8f0 AH |
3890 | |
3891 | arginfo.quick_push (thisarginfo); | |
3892 | } | |
3893 | ||
d9f21f6a RS |
3894 | unsigned HOST_WIDE_INT vf; |
3895 | if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) | |
3896 | { | |
3897 | if (dump_enabled_p ()) | |
3898 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3899 | "not considering SIMD clones; not yet supported" | |
3900 | " for variable-width vectors.\n"); | |
3901 | return NULL; | |
3902 | } | |
3903 | ||
0136f8f0 AH |
3904 | unsigned int badness = 0; |
3905 | struct cgraph_node *bestn = NULL; | |
6c9e85fb JJ |
3906 | if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ()) |
3907 | bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]); | |
0136f8f0 AH |
3908 | else |
3909 | for (struct cgraph_node *n = node->simd_clones; n != NULL; | |
3910 | n = n->simdclone->next_clone) | |
3911 | { | |
3912 | unsigned int this_badness = 0; | |
d9f21f6a | 3913 | if (n->simdclone->simdlen > vf |
0136f8f0 AH |
3914 | || n->simdclone->nargs != nargs) |
3915 | continue; | |
d9f21f6a RS |
3916 | if (n->simdclone->simdlen < vf) |
3917 | this_badness += (exact_log2 (vf) | |
0136f8f0 AH |
3918 | - exact_log2 (n->simdclone->simdlen)) * 1024; |
3919 | if (n->simdclone->inbranch) | |
3920 | this_badness += 2048; | |
3921 | int target_badness = targetm.simd_clone.usable (n); | |
3922 | if (target_badness < 0) | |
3923 | continue; | |
3924 | this_badness += target_badness * 512; | |
3925 | /* FORNOW: Have to add code to add the mask argument. */ | |
3926 | if (n->simdclone->inbranch) | |
3927 | continue; | |
3928 | for (i = 0; i < nargs; i++) | |
3929 | { | |
3930 | switch (n->simdclone->args[i].arg_type) | |
3931 | { | |
3932 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
3933 | if (!useless_type_conversion_p | |
3934 | (n->simdclone->args[i].orig_type, | |
3935 | TREE_TYPE (gimple_call_arg (stmt, i)))) | |
3936 | i = -1; | |
3937 | else if (arginfo[i].dt == vect_constant_def | |
3938 | || arginfo[i].dt == vect_external_def | |
3939 | || arginfo[i].linear_step) | |
3940 | this_badness += 64; | |
3941 | break; | |
3942 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
3943 | if (arginfo[i].dt != vect_constant_def | |
3944 | && arginfo[i].dt != vect_external_def) | |
3945 | i = -1; | |
3946 | break; | |
3947 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
d9a6bd32 | 3948 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
3949 | if (arginfo[i].dt == vect_constant_def |
3950 | || arginfo[i].dt == vect_external_def | |
3951 | || (arginfo[i].linear_step | |
3952 | != n->simdclone->args[i].linear_step)) | |
3953 | i = -1; | |
3954 | break; | |
3955 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: | |
d9a6bd32 JJ |
3956 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
3957 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
e01d41e5 JJ |
3958 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
3959 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
3960 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
3961 | /* FORNOW */ |
3962 | i = -1; | |
3963 | break; | |
3964 | case SIMD_CLONE_ARG_TYPE_MASK: | |
3965 | gcc_unreachable (); | |
3966 | } | |
3967 | if (i == (size_t) -1) | |
3968 | break; | |
3969 | if (n->simdclone->args[i].alignment > arginfo[i].align) | |
3970 | { | |
3971 | i = -1; | |
3972 | break; | |
3973 | } | |
3974 | if (arginfo[i].align) | |
3975 | this_badness += (exact_log2 (arginfo[i].align) | |
3976 | - exact_log2 (n->simdclone->args[i].alignment)); | |
3977 | } | |
3978 | if (i == (size_t) -1) | |
3979 | continue; | |
3980 | if (bestn == NULL || this_badness < badness) | |
3981 | { | |
3982 | bestn = n; | |
3983 | badness = this_badness; | |
3984 | } | |
3985 | } | |
3986 | ||
3987 | if (bestn == NULL) | |
00426f9a | 3988 | return false; |
0136f8f0 AH |
3989 | |
3990 | for (i = 0; i < nargs; i++) | |
3991 | if ((arginfo[i].dt == vect_constant_def | |
3992 | || arginfo[i].dt == vect_external_def) | |
3993 | && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) | |
3994 | { | |
3995 | arginfo[i].vectype | |
3996 | = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt, | |
3997 | i))); | |
3998 | if (arginfo[i].vectype == NULL | |
cf1b2ba4 | 3999 | || (simd_clone_subparts (arginfo[i].vectype) |
0136f8f0 | 4000 | > bestn->simdclone->simdlen)) |
00426f9a | 4001 | return false; |
0136f8f0 AH |
4002 | } |
4003 | ||
4004 | fndecl = bestn->decl; | |
4005 | nunits = bestn->simdclone->simdlen; | |
d9f21f6a | 4006 | ncopies = vf / nunits; |
0136f8f0 AH |
4007 | |
4008 | /* If the function isn't const, only allow it in simd loops where user | |
4009 | has asserted that at least nunits consecutive iterations can be | |
4010 | performed using SIMD instructions. */ | |
4011 | if ((loop == NULL || (unsigned) loop->safelen < nunits) | |
4012 | && gimple_vuse (stmt)) | |
00426f9a | 4013 | return false; |
0136f8f0 AH |
4014 | |
4015 | /* Sanity check: make sure that at least one copy of the vectorized stmt | |
4016 | needs to be generated. */ | |
4017 | gcc_assert (ncopies >= 1); | |
4018 | ||
4019 | if (!vec_stmt) /* transformation not required. */ | |
4020 | { | |
6c9e85fb JJ |
4021 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl); |
4022 | for (i = 0; i < nargs; i++) | |
7adb26f2 JJ |
4023 | if ((bestn->simdclone->args[i].arg_type |
4024 | == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) | |
4025 | || (bestn->simdclone->args[i].arg_type | |
4026 | == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)) | |
6c9e85fb | 4027 | { |
17b658af | 4028 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3 |
6c9e85fb JJ |
4029 | + 1); |
4030 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op); | |
4031 | tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op)) | |
4032 | ? size_type_node : TREE_TYPE (arginfo[i].op); | |
4033 | tree ls = build_int_cst (lst, arginfo[i].linear_step); | |
4034 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls); | |
17b658af JJ |
4035 | tree sll = arginfo[i].simd_lane_linear |
4036 | ? boolean_true_node : boolean_false_node; | |
4037 | STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll); | |
6c9e85fb | 4038 | } |
0136f8f0 | 4039 | STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; |
adac3a68 | 4040 | DUMP_VECT_SCOPE ("vectorizable_simd_clone_call"); |
68435eb2 | 4041 | /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */ |
0136f8f0 AH |
4042 | return true; |
4043 | } | |
4044 | ||
67b8dbac | 4045 | /* Transform. */ |
0136f8f0 AH |
4046 | |
4047 | if (dump_enabled_p ()) | |
4048 | dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n"); | |
4049 | ||
4050 | /* Handle def. */ | |
4051 | scalar_dest = gimple_call_lhs (stmt); | |
4052 | vec_dest = NULL_TREE; | |
4053 | rtype = NULL_TREE; | |
4054 | ratype = NULL_TREE; | |
4055 | if (scalar_dest) | |
4056 | { | |
4057 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
4058 | rtype = TREE_TYPE (TREE_TYPE (fndecl)); | |
4059 | if (TREE_CODE (rtype) == ARRAY_TYPE) | |
4060 | { | |
4061 | ratype = rtype; | |
4062 | rtype = TREE_TYPE (ratype); | |
4063 | } | |
4064 | } | |
4065 | ||
4066 | prev_stmt_info = NULL; | |
4067 | for (j = 0; j < ncopies; ++j) | |
4068 | { | |
4069 | /* Build argument list for the vectorized call. */ | |
4070 | if (j == 0) | |
4071 | vargs.create (nargs); | |
4072 | else | |
4073 | vargs.truncate (0); | |
4074 | ||
4075 | for (i = 0; i < nargs; i++) | |
4076 | { | |
4077 | unsigned int k, l, m, o; | |
4078 | tree atype; | |
4079 | op = gimple_call_arg (stmt, i); | |
4080 | switch (bestn->simdclone->args[i].arg_type) | |
4081 | { | |
4082 | case SIMD_CLONE_ARG_TYPE_VECTOR: | |
4083 | atype = bestn->simdclone->args[i].vector_type; | |
cf1b2ba4 | 4084 | o = nunits / simd_clone_subparts (atype); |
0136f8f0 AH |
4085 | for (m = j * o; m < (j + 1) * o; m++) |
4086 | { | |
cf1b2ba4 RS |
4087 | if (simd_clone_subparts (atype) |
4088 | < simd_clone_subparts (arginfo[i].vectype)) | |
0136f8f0 | 4089 | { |
73a699ae | 4090 | poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype)); |
cf1b2ba4 RS |
4091 | k = (simd_clone_subparts (arginfo[i].vectype) |
4092 | / simd_clone_subparts (atype)); | |
0136f8f0 AH |
4093 | gcc_assert ((k & (k - 1)) == 0); |
4094 | if (m == 0) | |
4095 | vec_oprnd0 | |
86a91c0a | 4096 | = vect_get_vec_def_for_operand (op, stmt_info); |
0136f8f0 AH |
4097 | else |
4098 | { | |
4099 | vec_oprnd0 = arginfo[i].op; | |
4100 | if ((m & (k - 1)) == 0) | |
4101 | vec_oprnd0 | |
e4057a39 | 4102 | = vect_get_vec_def_for_stmt_copy (vinfo, |
0136f8f0 AH |
4103 | vec_oprnd0); |
4104 | } | |
4105 | arginfo[i].op = vec_oprnd0; | |
4106 | vec_oprnd0 | |
4107 | = build3 (BIT_FIELD_REF, atype, vec_oprnd0, | |
92e29a5e | 4108 | bitsize_int (prec), |
0136f8f0 | 4109 | bitsize_int ((m & (k - 1)) * prec)); |
e1bd7296 | 4110 | gassign *new_stmt |
b731b390 | 4111 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 | 4112 | vec_oprnd0); |
86a91c0a | 4113 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0136f8f0 AH |
4114 | vargs.safe_push (gimple_assign_lhs (new_stmt)); |
4115 | } | |
4116 | else | |
4117 | { | |
cf1b2ba4 RS |
4118 | k = (simd_clone_subparts (atype) |
4119 | / simd_clone_subparts (arginfo[i].vectype)); | |
0136f8f0 AH |
4120 | gcc_assert ((k & (k - 1)) == 0); |
4121 | vec<constructor_elt, va_gc> *ctor_elts; | |
4122 | if (k != 1) | |
4123 | vec_alloc (ctor_elts, k); | |
4124 | else | |
4125 | ctor_elts = NULL; | |
4126 | for (l = 0; l < k; l++) | |
4127 | { | |
4128 | if (m == 0 && l == 0) | |
4129 | vec_oprnd0 | |
86a91c0a | 4130 | = vect_get_vec_def_for_operand (op, stmt_info); |
0136f8f0 AH |
4131 | else |
4132 | vec_oprnd0 | |
e4057a39 | 4133 | = vect_get_vec_def_for_stmt_copy (vinfo, |
0136f8f0 AH |
4134 | arginfo[i].op); |
4135 | arginfo[i].op = vec_oprnd0; | |
4136 | if (k == 1) | |
4137 | break; | |
4138 | CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, | |
4139 | vec_oprnd0); | |
4140 | } | |
4141 | if (k == 1) | |
4142 | vargs.safe_push (vec_oprnd0); | |
4143 | else | |
4144 | { | |
4145 | vec_oprnd0 = build_constructor (atype, ctor_elts); | |
e1bd7296 | 4146 | gassign *new_stmt |
b731b390 | 4147 | = gimple_build_assign (make_ssa_name (atype), |
0136f8f0 | 4148 | vec_oprnd0); |
86a91c0a RS |
4149 | vect_finish_stmt_generation (stmt_info, new_stmt, |
4150 | gsi); | |
0136f8f0 AH |
4151 | vargs.safe_push (gimple_assign_lhs (new_stmt)); |
4152 | } | |
4153 | } | |
4154 | } | |
4155 | break; | |
4156 | case SIMD_CLONE_ARG_TYPE_UNIFORM: | |
4157 | vargs.safe_push (op); | |
4158 | break; | |
4159 | case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: | |
7adb26f2 | 4160 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: |
0136f8f0 AH |
4161 | if (j == 0) |
4162 | { | |
4163 | gimple_seq stmts; | |
4164 | arginfo[i].op | |
4165 | = force_gimple_operand (arginfo[i].op, &stmts, true, | |
4166 | NULL_TREE); | |
4167 | if (stmts != NULL) | |
4168 | { | |
4169 | basic_block new_bb; | |
4170 | edge pe = loop_preheader_edge (loop); | |
4171 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
4172 | gcc_assert (!new_bb); | |
4173 | } | |
17b658af JJ |
4174 | if (arginfo[i].simd_lane_linear) |
4175 | { | |
4176 | vargs.safe_push (arginfo[i].op); | |
4177 | break; | |
4178 | } | |
b731b390 | 4179 | tree phi_res = copy_ssa_name (op); |
538dd0b7 | 4180 | gphi *new_phi = create_phi_node (phi_res, loop->header); |
4fbeb363 | 4181 | loop_vinfo->add_stmt (new_phi); |
0136f8f0 AH |
4182 | add_phi_arg (new_phi, arginfo[i].op, |
4183 | loop_preheader_edge (loop), UNKNOWN_LOCATION); | |
4184 | enum tree_code code | |
4185 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
4186 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
4187 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
4188 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
4189 | widest_int cst |
4190 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
4191 | ncopies * nunits); | |
4192 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 4193 | tree phi_arg = copy_ssa_name (op); |
e1bd7296 | 4194 | gassign *new_stmt |
0d0e4a03 | 4195 | = gimple_build_assign (phi_arg, code, phi_res, tcst); |
0136f8f0 AH |
4196 | gimple_stmt_iterator si = gsi_after_labels (loop->header); |
4197 | gsi_insert_after (&si, new_stmt, GSI_NEW_STMT); | |
4fbeb363 | 4198 | loop_vinfo->add_stmt (new_stmt); |
0136f8f0 AH |
4199 | add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop), |
4200 | UNKNOWN_LOCATION); | |
4201 | arginfo[i].op = phi_res; | |
4202 | vargs.safe_push (phi_res); | |
4203 | } | |
4204 | else | |
4205 | { | |
4206 | enum tree_code code | |
4207 | = POINTER_TYPE_P (TREE_TYPE (op)) | |
4208 | ? POINTER_PLUS_EXPR : PLUS_EXPR; | |
4209 | tree type = POINTER_TYPE_P (TREE_TYPE (op)) | |
4210 | ? sizetype : TREE_TYPE (op); | |
807e902e KZ |
4211 | widest_int cst |
4212 | = wi::mul (bestn->simdclone->args[i].linear_step, | |
4213 | j * nunits); | |
4214 | tree tcst = wide_int_to_tree (type, cst); | |
b731b390 | 4215 | new_temp = make_ssa_name (TREE_TYPE (op)); |
e1bd7296 RS |
4216 | gassign *new_stmt |
4217 | = gimple_build_assign (new_temp, code, | |
4218 | arginfo[i].op, tcst); | |
86a91c0a | 4219 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0136f8f0 AH |
4220 | vargs.safe_push (new_temp); |
4221 | } | |
4222 | break; | |
7adb26f2 JJ |
4223 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: |
4224 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: | |
0136f8f0 | 4225 | case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: |
e01d41e5 JJ |
4226 | case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: |
4227 | case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: | |
4228 | case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: | |
0136f8f0 AH |
4229 | default: |
4230 | gcc_unreachable (); | |
4231 | } | |
4232 | } | |
4233 | ||
e1bd7296 | 4234 | gcall *new_call = gimple_build_call_vec (fndecl, vargs); |
0136f8f0 AH |
4235 | if (vec_dest) |
4236 | { | |
cf1b2ba4 | 4237 | gcc_assert (ratype || simd_clone_subparts (rtype) == nunits); |
0136f8f0 | 4238 | if (ratype) |
b731b390 | 4239 | new_temp = create_tmp_var (ratype); |
cf1b2ba4 RS |
4240 | else if (simd_clone_subparts (vectype) |
4241 | == simd_clone_subparts (rtype)) | |
e1bd7296 | 4242 | new_temp = make_ssa_name (vec_dest, new_call); |
0136f8f0 | 4243 | else |
e1bd7296 RS |
4244 | new_temp = make_ssa_name (rtype, new_call); |
4245 | gimple_call_set_lhs (new_call, new_temp); | |
0136f8f0 | 4246 | } |
e1bd7296 | 4247 | stmt_vec_info new_stmt_info |
86a91c0a | 4248 | = vect_finish_stmt_generation (stmt_info, new_call, gsi); |
0136f8f0 AH |
4249 | |
4250 | if (vec_dest) | |
4251 | { | |
cf1b2ba4 | 4252 | if (simd_clone_subparts (vectype) < nunits) |
0136f8f0 AH |
4253 | { |
4254 | unsigned int k, l; | |
73a699ae RS |
4255 | poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype)); |
4256 | poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype)); | |
cf1b2ba4 | 4257 | k = nunits / simd_clone_subparts (vectype); |
0136f8f0 AH |
4258 | gcc_assert ((k & (k - 1)) == 0); |
4259 | for (l = 0; l < k; l++) | |
4260 | { | |
4261 | tree t; | |
4262 | if (ratype) | |
4263 | { | |
4264 | t = build_fold_addr_expr (new_temp); | |
4265 | t = build2 (MEM_REF, vectype, t, | |
73a699ae | 4266 | build_int_cst (TREE_TYPE (t), l * bytes)); |
0136f8f0 AH |
4267 | } |
4268 | else | |
4269 | t = build3 (BIT_FIELD_REF, vectype, new_temp, | |
92e29a5e | 4270 | bitsize_int (prec), bitsize_int (l * prec)); |
e1bd7296 | 4271 | gimple *new_stmt |
b731b390 | 4272 | = gimple_build_assign (make_ssa_name (vectype), t); |
e1bd7296 | 4273 | new_stmt_info |
86a91c0a | 4274 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
e1bd7296 | 4275 | |
0136f8f0 | 4276 | if (j == 0 && l == 0) |
e1bd7296 RS |
4277 | STMT_VINFO_VEC_STMT (stmt_info) |
4278 | = *vec_stmt = new_stmt_info; | |
0136f8f0 | 4279 | else |
e1bd7296 | 4280 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
0136f8f0 | 4281 | |
e1bd7296 | 4282 | prev_stmt_info = new_stmt_info; |
0136f8f0 AH |
4283 | } |
4284 | ||
4285 | if (ratype) | |
86a91c0a | 4286 | vect_clobber_variable (stmt_info, gsi, new_temp); |
0136f8f0 AH |
4287 | continue; |
4288 | } | |
cf1b2ba4 | 4289 | else if (simd_clone_subparts (vectype) > nunits) |
0136f8f0 | 4290 | { |
cf1b2ba4 RS |
4291 | unsigned int k = (simd_clone_subparts (vectype) |
4292 | / simd_clone_subparts (rtype)); | |
0136f8f0 AH |
4293 | gcc_assert ((k & (k - 1)) == 0); |
4294 | if ((j & (k - 1)) == 0) | |
4295 | vec_alloc (ret_ctor_elts, k); | |
4296 | if (ratype) | |
4297 | { | |
cf1b2ba4 | 4298 | unsigned int m, o = nunits / simd_clone_subparts (rtype); |
0136f8f0 AH |
4299 | for (m = 0; m < o; m++) |
4300 | { | |
4301 | tree tem = build4 (ARRAY_REF, rtype, new_temp, | |
4302 | size_int (m), NULL_TREE, NULL_TREE); | |
e1bd7296 | 4303 | gimple *new_stmt |
b731b390 | 4304 | = gimple_build_assign (make_ssa_name (rtype), tem); |
e1bd7296 | 4305 | new_stmt_info |
86a91c0a RS |
4306 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
4307 | gsi); | |
0136f8f0 AH |
4308 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, |
4309 | gimple_assign_lhs (new_stmt)); | |
4310 | } | |
86a91c0a | 4311 | vect_clobber_variable (stmt_info, gsi, new_temp); |
0136f8f0 AH |
4312 | } |
4313 | else | |
4314 | CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp); | |
4315 | if ((j & (k - 1)) != k - 1) | |
4316 | continue; | |
4317 | vec_oprnd0 = build_constructor (vectype, ret_ctor_elts); | |
e1bd7296 | 4318 | gimple *new_stmt |
b731b390 | 4319 | = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0); |
e1bd7296 | 4320 | new_stmt_info |
86a91c0a | 4321 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0136f8f0 AH |
4322 | |
4323 | if ((unsigned) j == k - 1) | |
e1bd7296 | 4324 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
0136f8f0 | 4325 | else |
e1bd7296 | 4326 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
0136f8f0 | 4327 | |
e1bd7296 | 4328 | prev_stmt_info = new_stmt_info; |
0136f8f0 AH |
4329 | continue; |
4330 | } | |
4331 | else if (ratype) | |
4332 | { | |
4333 | tree t = build_fold_addr_expr (new_temp); | |
4334 | t = build2 (MEM_REF, vectype, t, | |
4335 | build_int_cst (TREE_TYPE (t), 0)); | |
e1bd7296 | 4336 | gimple *new_stmt |
b731b390 | 4337 | = gimple_build_assign (make_ssa_name (vec_dest), t); |
e1bd7296 | 4338 | new_stmt_info |
86a91c0a RS |
4339 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4340 | vect_clobber_variable (stmt_info, gsi, new_temp); | |
0136f8f0 AH |
4341 | } |
4342 | } | |
4343 | ||
4344 | if (j == 0) | |
e1bd7296 | 4345 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
0136f8f0 | 4346 | else |
e1bd7296 | 4347 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
0136f8f0 | 4348 | |
e1bd7296 | 4349 | prev_stmt_info = new_stmt_info; |
0136f8f0 AH |
4350 | } |
4351 | ||
4352 | vargs.release (); | |
4353 | ||
4354 | /* The call in STMT might prevent it from being removed in dce. | |
4355 | We however cannot remove it here, due to the way the ssa name | |
4356 | it defines is mapped to the new definition. So just replace | |
4357 | rhs of the statement with something harmless. */ | |
4358 | ||
4359 | if (slp_node) | |
4360 | return true; | |
4361 | ||
e1bd7296 | 4362 | gimple *new_stmt; |
0136f8f0 AH |
4363 | if (scalar_dest) |
4364 | { | |
4365 | type = TREE_TYPE (scalar_dest); | |
211cd1e2 | 4366 | lhs = gimple_call_lhs (vect_orig_stmt (stmt_info)->stmt); |
0136f8f0 AH |
4367 | new_stmt = gimple_build_assign (lhs, build_zero_cst (type)); |
4368 | } | |
4369 | else | |
4370 | new_stmt = gimple_build_nop (); | |
41b6b80e | 4371 | vinfo->replace_stmt (gsi, vect_orig_stmt (stmt_info), new_stmt); |
0136f8f0 AH |
4372 | unlink_stmt_vdef (stmt); |
4373 | ||
4374 | return true; | |
4375 | } | |
4376 | ||
4377 | ||
ebfd146a IR |
4378 | /* Function vect_gen_widened_results_half |
4379 | ||
4380 | Create a vector stmt whose code, type, number of arguments, and result | |
b8698a0f | 4381 | variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are |
ff802fa1 | 4382 | VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI. |
ebfd146a IR |
4383 | In the case that CODE is a CALL_EXPR, this means that a call to DECL |
4384 | needs to be created (DECL is a function-decl of a target-builtin). | |
82570274 | 4385 | STMT_INFO is the original scalar stmt that we are vectorizing. */ |
ebfd146a | 4386 | |
355fe088 | 4387 | static gimple * |
ebfd146a IR |
4388 | vect_gen_widened_results_half (enum tree_code code, |
4389 | tree decl, | |
4390 | tree vec_oprnd0, tree vec_oprnd1, int op_type, | |
4391 | tree vec_dest, gimple_stmt_iterator *gsi, | |
82570274 | 4392 | stmt_vec_info stmt_info) |
b8698a0f | 4393 | { |
355fe088 | 4394 | gimple *new_stmt; |
b8698a0f L |
4395 | tree new_temp; |
4396 | ||
4397 | /* Generate half of the widened result: */ | |
4398 | if (code == CALL_EXPR) | |
4399 | { | |
4400 | /* Target specific support */ | |
ebfd146a IR |
4401 | if (op_type == binary_op) |
4402 | new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1); | |
4403 | else | |
4404 | new_stmt = gimple_build_call (decl, 1, vec_oprnd0); | |
4405 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4406 | gimple_call_set_lhs (new_stmt, new_temp); | |
b8698a0f L |
4407 | } |
4408 | else | |
ebfd146a | 4409 | { |
b8698a0f L |
4410 | /* Generic support */ |
4411 | gcc_assert (op_type == TREE_CODE_LENGTH (code)); | |
ebfd146a IR |
4412 | if (op_type != binary_op) |
4413 | vec_oprnd1 = NULL; | |
0d0e4a03 | 4414 | new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1); |
ebfd146a IR |
4415 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4416 | gimple_assign_set_lhs (new_stmt, new_temp); | |
b8698a0f | 4417 | } |
82570274 | 4418 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
ebfd146a | 4419 | |
ebfd146a IR |
4420 | return new_stmt; |
4421 | } | |
4422 | ||
4a00c761 | 4423 | |
82570274 RS |
4424 | /* Get vectorized definitions for loop-based vectorization of STMT_INFO. |
4425 | For the first operand we call vect_get_vec_def_for_operand (with OPRND | |
4426 | containing scalar operand), and for the rest we get a copy with | |
4a00c761 JJ |
4427 | vect_get_vec_def_for_stmt_copy() using the previous vector definition |
4428 | (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details. | |
4429 | The vectors are collected into VEC_OPRNDS. */ | |
4430 | ||
4431 | static void | |
82570274 | 4432 | vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info, |
e4057a39 | 4433 | vec<tree> *vec_oprnds, int multi_step_cvt) |
4a00c761 | 4434 | { |
e4057a39 | 4435 | vec_info *vinfo = stmt_info->vinfo; |
4a00c761 JJ |
4436 | tree vec_oprnd; |
4437 | ||
4438 | /* Get first vector operand. */ | |
4439 | /* All the vector operands except the very first one (that is scalar oprnd) | |
4440 | are stmt copies. */ | |
4441 | if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE) | |
82570274 | 4442 | vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info); |
4a00c761 | 4443 | else |
e4057a39 | 4444 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd); |
4a00c761 | 4445 | |
9771b263 | 4446 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
4447 | |
4448 | /* Get second vector operand. */ | |
e4057a39 | 4449 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd); |
9771b263 | 4450 | vec_oprnds->quick_push (vec_oprnd); |
4a00c761 JJ |
4451 | |
4452 | *oprnd = vec_oprnd; | |
4453 | ||
4454 | /* For conversion in multiple steps, continue to get operands | |
4455 | recursively. */ | |
4456 | if (multi_step_cvt) | |
e4057a39 | 4457 | vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds, |
82570274 | 4458 | multi_step_cvt - 1); |
4a00c761 JJ |
4459 | } |
4460 | ||
4461 | ||
4462 | /* Create vectorized demotion statements for vector operands from VEC_OPRNDS. | |
4463 | For multi-step conversions store the resulting vectors and call the function | |
4464 | recursively. */ | |
4465 | ||
4466 | static void | |
9771b263 | 4467 | vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, |
32e8e429 RS |
4468 | int multi_step_cvt, |
4469 | stmt_vec_info stmt_info, | |
9771b263 | 4470 | vec<tree> vec_dsts, |
4a00c761 JJ |
4471 | gimple_stmt_iterator *gsi, |
4472 | slp_tree slp_node, enum tree_code code, | |
4473 | stmt_vec_info *prev_stmt_info) | |
4474 | { | |
4475 | unsigned int i; | |
4476 | tree vop0, vop1, new_tmp, vec_dest; | |
4a00c761 | 4477 | |
9771b263 | 4478 | vec_dest = vec_dsts.pop (); |
4a00c761 | 4479 | |
9771b263 | 4480 | for (i = 0; i < vec_oprnds->length (); i += 2) |
4a00c761 JJ |
4481 | { |
4482 | /* Create demotion operation. */ | |
9771b263 DN |
4483 | vop0 = (*vec_oprnds)[i]; |
4484 | vop1 = (*vec_oprnds)[i + 1]; | |
e1bd7296 | 4485 | gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
4a00c761 JJ |
4486 | new_tmp = make_ssa_name (vec_dest, new_stmt); |
4487 | gimple_assign_set_lhs (new_stmt, new_tmp); | |
e1bd7296 | 4488 | stmt_vec_info new_stmt_info |
86a91c0a | 4489 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
4490 | |
4491 | if (multi_step_cvt) | |
4492 | /* Store the resulting vector for next recursive call. */ | |
9771b263 | 4493 | (*vec_oprnds)[i/2] = new_tmp; |
4a00c761 JJ |
4494 | else |
4495 | { | |
4496 | /* This is the last step of the conversion sequence. Store the | |
4497 | vectors in SLP_NODE or in vector info of the scalar statement | |
4498 | (or in STMT_VINFO_RELATED_STMT chain). */ | |
4499 | if (slp_node) | |
e1bd7296 | 4500 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
4a00c761 | 4501 | else |
c689ce1e RB |
4502 | { |
4503 | if (!*prev_stmt_info) | |
e1bd7296 | 4504 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; |
c689ce1e | 4505 | else |
e1bd7296 | 4506 | STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt_info; |
4a00c761 | 4507 | |
e1bd7296 | 4508 | *prev_stmt_info = new_stmt_info; |
c689ce1e | 4509 | } |
4a00c761 JJ |
4510 | } |
4511 | } | |
4512 | ||
4513 | /* For multi-step demotion operations we first generate demotion operations | |
4514 | from the source type to the intermediate types, and then combine the | |
4515 | results (stored in VEC_OPRNDS) in demotion operation to the destination | |
4516 | type. */ | |
4517 | if (multi_step_cvt) | |
4518 | { | |
4519 | /* At each level of recursion we have half of the operands we had at the | |
4520 | previous level. */ | |
9771b263 | 4521 | vec_oprnds->truncate ((i+1)/2); |
4a00c761 | 4522 | vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, |
86a91c0a RS |
4523 | stmt_info, vec_dsts, gsi, |
4524 | slp_node, VEC_PACK_TRUNC_EXPR, | |
4a00c761 JJ |
4525 | prev_stmt_info); |
4526 | } | |
4527 | ||
9771b263 | 4528 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4529 | } |
4530 | ||
4531 | ||
4532 | /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0 | |
82570274 RS |
4533 | and VEC_OPRNDS1, for a binary operation associated with scalar statement |
4534 | STMT_INFO. For multi-step conversions store the resulting vectors and | |
4535 | call the function recursively. */ | |
4a00c761 JJ |
4536 | |
4537 | static void | |
9771b263 DN |
4538 | vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0, |
4539 | vec<tree> *vec_oprnds1, | |
82570274 | 4540 | stmt_vec_info stmt_info, tree vec_dest, |
4a00c761 JJ |
4541 | gimple_stmt_iterator *gsi, |
4542 | enum tree_code code1, | |
4543 | enum tree_code code2, tree decl1, | |
4544 | tree decl2, int op_type) | |
4545 | { | |
4546 | int i; | |
4547 | tree vop0, vop1, new_tmp1, new_tmp2; | |
355fe088 | 4548 | gimple *new_stmt1, *new_stmt2; |
6e1aa848 | 4549 | vec<tree> vec_tmp = vNULL; |
4a00c761 | 4550 | |
9771b263 DN |
4551 | vec_tmp.create (vec_oprnds0->length () * 2); |
4552 | FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0) | |
4a00c761 JJ |
4553 | { |
4554 | if (op_type == binary_op) | |
9771b263 | 4555 | vop1 = (*vec_oprnds1)[i]; |
4a00c761 JJ |
4556 | else |
4557 | vop1 = NULL_TREE; | |
4558 | ||
4559 | /* Generate the two halves of promotion operation. */ | |
4560 | new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1, | |
82570274 RS |
4561 | op_type, vec_dest, gsi, |
4562 | stmt_info); | |
4a00c761 | 4563 | new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1, |
82570274 RS |
4564 | op_type, vec_dest, gsi, |
4565 | stmt_info); | |
4a00c761 JJ |
4566 | if (is_gimple_call (new_stmt1)) |
4567 | { | |
4568 | new_tmp1 = gimple_call_lhs (new_stmt1); | |
4569 | new_tmp2 = gimple_call_lhs (new_stmt2); | |
4570 | } | |
4571 | else | |
4572 | { | |
4573 | new_tmp1 = gimple_assign_lhs (new_stmt1); | |
4574 | new_tmp2 = gimple_assign_lhs (new_stmt2); | |
4575 | } | |
4576 | ||
4577 | /* Store the results for the next step. */ | |
9771b263 DN |
4578 | vec_tmp.quick_push (new_tmp1); |
4579 | vec_tmp.quick_push (new_tmp2); | |
4a00c761 JJ |
4580 | } |
4581 | ||
689eaba3 | 4582 | vec_oprnds0->release (); |
4a00c761 JJ |
4583 | *vec_oprnds0 = vec_tmp; |
4584 | } | |
4585 | ||
4586 | ||
32e8e429 RS |
4587 | /* Check if STMT_INFO performs a conversion operation that can be vectorized. |
4588 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
4a00c761 | 4589 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. |
32e8e429 | 4590 | Return true if STMT_INFO is vectorizable in this way. */ |
ebfd146a IR |
4591 | |
4592 | static bool | |
32e8e429 | 4593 | vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 4594 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 4595 | stmt_vector_for_cost *cost_vec) |
ebfd146a IR |
4596 | { |
4597 | tree vec_dest; | |
4598 | tree scalar_dest; | |
4a00c761 | 4599 | tree op0, op1 = NULL_TREE; |
ebfd146a | 4600 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE; |
ebfd146a IR |
4601 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
4602 | enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK; | |
4a00c761 | 4603 | enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK; |
ebfd146a IR |
4604 | tree decl1 = NULL_TREE, decl2 = NULL_TREE; |
4605 | tree new_temp; | |
ebfd146a | 4606 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 4607 | int ndts = 2; |
ebfd146a | 4608 | stmt_vec_info prev_stmt_info; |
062d5ccc RS |
4609 | poly_uint64 nunits_in; |
4610 | poly_uint64 nunits_out; | |
ebfd146a | 4611 | tree vectype_out, vectype_in; |
4a00c761 JJ |
4612 | int ncopies, i, j; |
4613 | tree lhs_type, rhs_type; | |
ebfd146a | 4614 | enum { NARROW, NONE, WIDEN } modifier; |
6e1aa848 DN |
4615 | vec<tree> vec_oprnds0 = vNULL; |
4616 | vec<tree> vec_oprnds1 = vNULL; | |
ebfd146a | 4617 | tree vop0; |
4a00c761 | 4618 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 4619 | vec_info *vinfo = stmt_info->vinfo; |
4a00c761 | 4620 | int multi_step_cvt = 0; |
6e1aa848 | 4621 | vec<tree> interm_types = vNULL; |
4a00c761 JJ |
4622 | tree last_oprnd, intermediate_type, cvt_type = NULL_TREE; |
4623 | int op_type; | |
4a00c761 | 4624 | unsigned short fltsz; |
ebfd146a IR |
4625 | |
4626 | /* Is STMT a vectorizable conversion? */ | |
4627 | ||
4a00c761 | 4628 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
4629 | return false; |
4630 | ||
66c16fd9 RB |
4631 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
4632 | && ! vec_stmt) | |
ebfd146a IR |
4633 | return false; |
4634 | ||
32e8e429 RS |
4635 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
4636 | if (!stmt) | |
ebfd146a IR |
4637 | return false; |
4638 | ||
4639 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
4640 | return false; | |
4641 | ||
4642 | code = gimple_assign_rhs_code (stmt); | |
4a00c761 JJ |
4643 | if (!CONVERT_EXPR_CODE_P (code) |
4644 | && code != FIX_TRUNC_EXPR | |
4645 | && code != FLOAT_EXPR | |
4646 | && code != WIDEN_MULT_EXPR | |
4647 | && code != WIDEN_LSHIFT_EXPR) | |
ebfd146a IR |
4648 | return false; |
4649 | ||
4a00c761 JJ |
4650 | op_type = TREE_CODE_LENGTH (code); |
4651 | ||
ebfd146a | 4652 | /* Check types of lhs and rhs. */ |
b690cc0f | 4653 | scalar_dest = gimple_assign_lhs (stmt); |
4a00c761 | 4654 | lhs_type = TREE_TYPE (scalar_dest); |
b690cc0f RG |
4655 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); |
4656 | ||
ebfd146a IR |
4657 | op0 = gimple_assign_rhs1 (stmt); |
4658 | rhs_type = TREE_TYPE (op0); | |
4a00c761 JJ |
4659 | |
4660 | if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4661 | && !((INTEGRAL_TYPE_P (lhs_type) | |
4662 | && INTEGRAL_TYPE_P (rhs_type)) | |
4663 | || (SCALAR_FLOAT_TYPE_P (lhs_type) | |
4664 | && SCALAR_FLOAT_TYPE_P (rhs_type)))) | |
4665 | return false; | |
4666 | ||
e6f5c25d IE |
4667 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4668 | && ((INTEGRAL_TYPE_P (lhs_type) | |
2be65d9e | 4669 | && !type_has_mode_precision_p (lhs_type)) |
e6f5c25d | 4670 | || (INTEGRAL_TYPE_P (rhs_type) |
2be65d9e | 4671 | && !type_has_mode_precision_p (rhs_type)))) |
4a00c761 | 4672 | { |
73fbfcad | 4673 | if (dump_enabled_p ()) |
78c60e3d | 4674 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
4675 | "type conversion to/from bit-precision unsupported." |
4676 | "\n"); | |
4a00c761 JJ |
4677 | return false; |
4678 | } | |
4679 | ||
b690cc0f | 4680 | /* Check the operands of the operation. */ |
894dd753 | 4681 | if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype_in)) |
b690cc0f | 4682 | { |
73fbfcad | 4683 | if (dump_enabled_p ()) |
78c60e3d | 4684 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4685 | "use not simple.\n"); |
b690cc0f RG |
4686 | return false; |
4687 | } | |
4a00c761 JJ |
4688 | if (op_type == binary_op) |
4689 | { | |
4690 | bool ok; | |
4691 | ||
4692 | op1 = gimple_assign_rhs2 (stmt); | |
4693 | gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR); | |
4694 | /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of | |
4695 | OP1. */ | |
4696 | if (CONSTANT_CLASS_P (op0)) | |
894dd753 | 4697 | ok = vect_is_simple_use (op1, vinfo, &dt[1], &vectype_in); |
4a00c761 | 4698 | else |
894dd753 | 4699 | ok = vect_is_simple_use (op1, vinfo, &dt[1]); |
4a00c761 JJ |
4700 | |
4701 | if (!ok) | |
4702 | { | |
73fbfcad | 4703 | if (dump_enabled_p ()) |
78c60e3d | 4704 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4705 | "use not simple.\n"); |
4a00c761 JJ |
4706 | return false; |
4707 | } | |
4708 | } | |
4709 | ||
b690cc0f RG |
4710 | /* If op0 is an external or constant defs use a vector type of |
4711 | the same size as the output vector type. */ | |
ebfd146a | 4712 | if (!vectype_in) |
b690cc0f | 4713 | vectype_in = get_same_sized_vectype (rhs_type, vectype_out); |
7d8930a0 IR |
4714 | if (vec_stmt) |
4715 | gcc_assert (vectype_in); | |
4716 | if (!vectype_in) | |
4717 | { | |
73fbfcad | 4718 | if (dump_enabled_p ()) |
4a00c761 | 4719 | { |
78c60e3d SS |
4720 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
4721 | "no vectype for scalar type "); | |
4722 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
e645e942 | 4723 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
4a00c761 | 4724 | } |
7d8930a0 IR |
4725 | |
4726 | return false; | |
4727 | } | |
ebfd146a | 4728 | |
e6f5c25d IE |
4729 | if (VECTOR_BOOLEAN_TYPE_P (vectype_out) |
4730 | && !VECTOR_BOOLEAN_TYPE_P (vectype_in)) | |
4731 | { | |
4732 | if (dump_enabled_p ()) | |
4733 | { | |
4734 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
4735 | "can't convert between boolean and non " | |
4736 | "boolean vectors"); | |
4737 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type); | |
4738 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
4739 | } | |
4740 | ||
4741 | return false; | |
4742 | } | |
4743 | ||
b690cc0f RG |
4744 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in); |
4745 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
062d5ccc | 4746 | if (known_eq (nunits_out, nunits_in)) |
ebfd146a | 4747 | modifier = NONE; |
062d5ccc RS |
4748 | else if (multiple_p (nunits_out, nunits_in)) |
4749 | modifier = NARROW; | |
ebfd146a | 4750 | else |
062d5ccc RS |
4751 | { |
4752 | gcc_checking_assert (multiple_p (nunits_in, nunits_out)); | |
4753 | modifier = WIDEN; | |
4754 | } | |
ebfd146a | 4755 | |
ff802fa1 IR |
4756 | /* Multiple types in SLP are handled by creating the appropriate number of |
4757 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
4758 | case of SLP. */ | |
fce57248 | 4759 | if (slp_node) |
ebfd146a | 4760 | ncopies = 1; |
4a00c761 | 4761 | else if (modifier == NARROW) |
e8f142e2 | 4762 | ncopies = vect_get_num_copies (loop_vinfo, vectype_out); |
4a00c761 | 4763 | else |
e8f142e2 | 4764 | ncopies = vect_get_num_copies (loop_vinfo, vectype_in); |
b8698a0f | 4765 | |
ebfd146a IR |
4766 | /* Sanity check: make sure that at least one copy of the vectorized stmt |
4767 | needs to be generated. */ | |
4768 | gcc_assert (ncopies >= 1); | |
4769 | ||
16d22000 RS |
4770 | bool found_mode = false; |
4771 | scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type); | |
4772 | scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type); | |
4773 | opt_scalar_mode rhs_mode_iter; | |
b397965c | 4774 | |
ebfd146a | 4775 | /* Supportable by target? */ |
4a00c761 | 4776 | switch (modifier) |
ebfd146a | 4777 | { |
4a00c761 JJ |
4778 | case NONE: |
4779 | if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR) | |
4780 | return false; | |
4781 | if (supportable_convert_operation (code, vectype_out, vectype_in, | |
4782 | &decl1, &code1)) | |
4783 | break; | |
4784 | /* FALLTHRU */ | |
4785 | unsupported: | |
73fbfcad | 4786 | if (dump_enabled_p ()) |
78c60e3d | 4787 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 4788 | "conversion not supported by target.\n"); |
ebfd146a | 4789 | return false; |
ebfd146a | 4790 | |
4a00c761 | 4791 | case WIDEN: |
86a91c0a RS |
4792 | if (supportable_widening_operation (code, stmt_info, vectype_out, |
4793 | vectype_in, &code1, &code2, | |
4794 | &multi_step_cvt, &interm_types)) | |
4a00c761 JJ |
4795 | { |
4796 | /* Binary widening operation can only be supported directly by the | |
4797 | architecture. */ | |
4798 | gcc_assert (!(multi_step_cvt && op_type == binary_op)); | |
4799 | break; | |
4800 | } | |
4801 | ||
4802 | if (code != FLOAT_EXPR | |
b397965c | 4803 | || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode)) |
4a00c761 JJ |
4804 | goto unsupported; |
4805 | ||
b397965c | 4806 | fltsz = GET_MODE_SIZE (lhs_mode); |
16d22000 | 4807 | FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode) |
4a00c761 | 4808 | { |
16d22000 | 4809 | rhs_mode = rhs_mode_iter.require (); |
c94843d2 RS |
4810 | if (GET_MODE_SIZE (rhs_mode) > fltsz) |
4811 | break; | |
4812 | ||
4a00c761 JJ |
4813 | cvt_type |
4814 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4815 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4816 | if (cvt_type == NULL_TREE) | |
4817 | goto unsupported; | |
4818 | ||
4819 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4820 | { | |
4821 | if (!supportable_convert_operation (code, vectype_out, | |
4822 | cvt_type, &decl1, &codecvt1)) | |
4823 | goto unsupported; | |
4824 | } | |
86a91c0a RS |
4825 | else if (!supportable_widening_operation (code, stmt_info, |
4826 | vectype_out, cvt_type, | |
4827 | &codecvt1, &codecvt2, | |
4828 | &multi_step_cvt, | |
4a00c761 JJ |
4829 | &interm_types)) |
4830 | continue; | |
4831 | else | |
4832 | gcc_assert (multi_step_cvt == 0); | |
4833 | ||
86a91c0a | 4834 | if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type, |
a86ec597 RH |
4835 | vectype_in, &code1, &code2, |
4836 | &multi_step_cvt, &interm_types)) | |
16d22000 RS |
4837 | { |
4838 | found_mode = true; | |
4839 | break; | |
4840 | } | |
4a00c761 JJ |
4841 | } |
4842 | ||
16d22000 | 4843 | if (!found_mode) |
4a00c761 JJ |
4844 | goto unsupported; |
4845 | ||
4846 | if (GET_MODE_SIZE (rhs_mode) == fltsz) | |
4847 | codecvt2 = ERROR_MARK; | |
4848 | else | |
4849 | { | |
4850 | multi_step_cvt++; | |
9771b263 | 4851 | interm_types.safe_push (cvt_type); |
4a00c761 JJ |
4852 | cvt_type = NULL_TREE; |
4853 | } | |
4854 | break; | |
4855 | ||
4856 | case NARROW: | |
4857 | gcc_assert (op_type == unary_op); | |
4858 | if (supportable_narrowing_operation (code, vectype_out, vectype_in, | |
4859 | &code1, &multi_step_cvt, | |
4860 | &interm_types)) | |
4861 | break; | |
4862 | ||
4863 | if (code != FIX_TRUNC_EXPR | |
b397965c | 4864 | || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode)) |
4a00c761 JJ |
4865 | goto unsupported; |
4866 | ||
4a00c761 JJ |
4867 | cvt_type |
4868 | = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0); | |
4869 | cvt_type = get_same_sized_vectype (cvt_type, vectype_in); | |
4870 | if (cvt_type == NULL_TREE) | |
4871 | goto unsupported; | |
4872 | if (!supportable_convert_operation (code, cvt_type, vectype_in, | |
4873 | &decl1, &codecvt1)) | |
4874 | goto unsupported; | |
4875 | if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type, | |
4876 | &code1, &multi_step_cvt, | |
4877 | &interm_types)) | |
4878 | break; | |
4879 | goto unsupported; | |
4880 | ||
4881 | default: | |
4882 | gcc_unreachable (); | |
ebfd146a IR |
4883 | } |
4884 | ||
4885 | if (!vec_stmt) /* transformation not required. */ | |
4886 | { | |
adac3a68 | 4887 | DUMP_VECT_SCOPE ("vectorizable_conversion"); |
4a00c761 | 4888 | if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR) |
8bd37302 BS |
4889 | { |
4890 | STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type; | |
68435eb2 RB |
4891 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, |
4892 | cost_vec); | |
8bd37302 | 4893 | } |
4a00c761 JJ |
4894 | else if (modifier == NARROW) |
4895 | { | |
4896 | STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type; | |
68435eb2 RB |
4897 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt, |
4898 | cost_vec); | |
4a00c761 JJ |
4899 | } |
4900 | else | |
4901 | { | |
4902 | STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type; | |
68435eb2 RB |
4903 | vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt, |
4904 | cost_vec); | |
4a00c761 | 4905 | } |
9771b263 | 4906 | interm_types.release (); |
ebfd146a IR |
4907 | return true; |
4908 | } | |
4909 | ||
67b8dbac | 4910 | /* Transform. */ |
73fbfcad | 4911 | if (dump_enabled_p ()) |
78c60e3d | 4912 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 4913 | "transform conversion. ncopies = %d.\n", ncopies); |
ebfd146a | 4914 | |
4a00c761 JJ |
4915 | if (op_type == binary_op) |
4916 | { | |
4917 | if (CONSTANT_CLASS_P (op0)) | |
4918 | op0 = fold_convert (TREE_TYPE (op1), op0); | |
4919 | else if (CONSTANT_CLASS_P (op1)) | |
4920 | op1 = fold_convert (TREE_TYPE (op0), op1); | |
4921 | } | |
4922 | ||
4923 | /* In case of multi-step conversion, we first generate conversion operations | |
4924 | to the intermediate types, and then from that types to the final one. | |
4925 | We create vector destinations for the intermediate type (TYPES) received | |
4926 | from supportable_*_operation, and store them in the correct order | |
4927 | for future use in vect_create_vectorized_*_stmts (). */ | |
8c681247 | 4928 | auto_vec<tree> vec_dsts (multi_step_cvt + 1); |
82294ec1 JJ |
4929 | vec_dest = vect_create_destination_var (scalar_dest, |
4930 | (cvt_type && modifier == WIDEN) | |
4931 | ? cvt_type : vectype_out); | |
9771b263 | 4932 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4933 | |
4934 | if (multi_step_cvt) | |
4935 | { | |
9771b263 DN |
4936 | for (i = interm_types.length () - 1; |
4937 | interm_types.iterate (i, &intermediate_type); i--) | |
4a00c761 JJ |
4938 | { |
4939 | vec_dest = vect_create_destination_var (scalar_dest, | |
4940 | intermediate_type); | |
9771b263 | 4941 | vec_dsts.quick_push (vec_dest); |
4a00c761 JJ |
4942 | } |
4943 | } | |
ebfd146a | 4944 | |
4a00c761 | 4945 | if (cvt_type) |
82294ec1 JJ |
4946 | vec_dest = vect_create_destination_var (scalar_dest, |
4947 | modifier == WIDEN | |
4948 | ? vectype_out : cvt_type); | |
4a00c761 JJ |
4949 | |
4950 | if (!slp_node) | |
4951 | { | |
30862efc | 4952 | if (modifier == WIDEN) |
4a00c761 | 4953 | { |
c3284718 | 4954 | vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1); |
4a00c761 | 4955 | if (op_type == binary_op) |
9771b263 | 4956 | vec_oprnds1.create (1); |
4a00c761 | 4957 | } |
30862efc | 4958 | else if (modifier == NARROW) |
9771b263 DN |
4959 | vec_oprnds0.create ( |
4960 | 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1)); | |
4a00c761 JJ |
4961 | } |
4962 | else if (code == WIDEN_LSHIFT_EXPR) | |
9771b263 | 4963 | vec_oprnds1.create (slp_node->vec_stmts_size); |
ebfd146a | 4964 | |
4a00c761 | 4965 | last_oprnd = op0; |
ebfd146a IR |
4966 | prev_stmt_info = NULL; |
4967 | switch (modifier) | |
4968 | { | |
4969 | case NONE: | |
4970 | for (j = 0; j < ncopies; j++) | |
4971 | { | |
ebfd146a | 4972 | if (j == 0) |
86a91c0a RS |
4973 | vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0, |
4974 | NULL, slp_node); | |
ebfd146a | 4975 | else |
e4057a39 | 4976 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL); |
ebfd146a | 4977 | |
9771b263 | 4978 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 | 4979 | { |
e1bd7296 | 4980 | stmt_vec_info new_stmt_info; |
4a00c761 JJ |
4981 | /* Arguments are ready, create the new vector stmt. */ |
4982 | if (code1 == CALL_EXPR) | |
4983 | { | |
e1bd7296 | 4984 | gcall *new_stmt = gimple_build_call (decl1, 1, vop0); |
4a00c761 JJ |
4985 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4986 | gimple_call_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 4987 | new_stmt_info |
86a91c0a | 4988 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
4989 | } |
4990 | else | |
4991 | { | |
4992 | gcc_assert (TREE_CODE_LENGTH (code1) == unary_op); | |
e1bd7296 RS |
4993 | gassign *new_stmt |
4994 | = gimple_build_assign (vec_dest, code1, vop0); | |
4a00c761 JJ |
4995 | new_temp = make_ssa_name (vec_dest, new_stmt); |
4996 | gimple_assign_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 4997 | new_stmt_info |
86a91c0a | 4998 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
4999 | } |
5000 | ||
4a00c761 | 5001 | if (slp_node) |
e1bd7296 | 5002 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
225ce44b RB |
5003 | else |
5004 | { | |
5005 | if (!prev_stmt_info) | |
e1bd7296 RS |
5006 | STMT_VINFO_VEC_STMT (stmt_info) |
5007 | = *vec_stmt = new_stmt_info; | |
225ce44b | 5008 | else |
e1bd7296 RS |
5009 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
5010 | prev_stmt_info = new_stmt_info; | |
225ce44b | 5011 | } |
4a00c761 | 5012 | } |
ebfd146a IR |
5013 | } |
5014 | break; | |
5015 | ||
5016 | case WIDEN: | |
5017 | /* In case the vectorization factor (VF) is bigger than the number | |
5018 | of elements that we can fit in a vectype (nunits), we have to | |
5019 | generate more than one vector stmt - i.e - we need to "unroll" | |
5020 | the vector stmt by a factor VF/nunits. */ | |
5021 | for (j = 0; j < ncopies; j++) | |
5022 | { | |
4a00c761 | 5023 | /* Handle uses. */ |
ebfd146a | 5024 | if (j == 0) |
4a00c761 JJ |
5025 | { |
5026 | if (slp_node) | |
5027 | { | |
5028 | if (code == WIDEN_LSHIFT_EXPR) | |
5029 | { | |
5030 | unsigned int k; | |
ebfd146a | 5031 | |
4a00c761 JJ |
5032 | vec_oprnd1 = op1; |
5033 | /* Store vec_oprnd1 for every vector stmt to be created | |
5034 | for SLP_NODE. We check during the analysis that all | |
5035 | the shift arguments are the same. */ | |
5036 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 5037 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 | 5038 | |
86a91c0a RS |
5039 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, |
5040 | &vec_oprnds0, NULL, slp_node); | |
4a00c761 JJ |
5041 | } |
5042 | else | |
86a91c0a | 5043 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, |
306b0c92 | 5044 | &vec_oprnds1, slp_node); |
4a00c761 JJ |
5045 | } |
5046 | else | |
5047 | { | |
86a91c0a | 5048 | vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info); |
9771b263 | 5049 | vec_oprnds0.quick_push (vec_oprnd0); |
4a00c761 JJ |
5050 | if (op_type == binary_op) |
5051 | { | |
5052 | if (code == WIDEN_LSHIFT_EXPR) | |
5053 | vec_oprnd1 = op1; | |
5054 | else | |
86a91c0a RS |
5055 | vec_oprnd1 |
5056 | = vect_get_vec_def_for_operand (op1, stmt_info); | |
9771b263 | 5057 | vec_oprnds1.quick_push (vec_oprnd1); |
4a00c761 JJ |
5058 | } |
5059 | } | |
5060 | } | |
ebfd146a | 5061 | else |
4a00c761 | 5062 | { |
e4057a39 | 5063 | vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
9771b263 DN |
5064 | vec_oprnds0.truncate (0); |
5065 | vec_oprnds0.quick_push (vec_oprnd0); | |
4a00c761 JJ |
5066 | if (op_type == binary_op) |
5067 | { | |
5068 | if (code == WIDEN_LSHIFT_EXPR) | |
5069 | vec_oprnd1 = op1; | |
5070 | else | |
e4057a39 | 5071 | vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo, |
4a00c761 | 5072 | vec_oprnd1); |
9771b263 DN |
5073 | vec_oprnds1.truncate (0); |
5074 | vec_oprnds1.quick_push (vec_oprnd1); | |
4a00c761 JJ |
5075 | } |
5076 | } | |
ebfd146a | 5077 | |
4a00c761 JJ |
5078 | /* Arguments are ready. Create the new vector stmts. */ |
5079 | for (i = multi_step_cvt; i >= 0; i--) | |
5080 | { | |
9771b263 | 5081 | tree this_dest = vec_dsts[i]; |
4a00c761 JJ |
5082 | enum tree_code c1 = code1, c2 = code2; |
5083 | if (i == 0 && codecvt2 != ERROR_MARK) | |
5084 | { | |
5085 | c1 = codecvt1; | |
5086 | c2 = codecvt2; | |
5087 | } | |
5088 | vect_create_vectorized_promotion_stmts (&vec_oprnds0, | |
86a91c0a RS |
5089 | &vec_oprnds1, stmt_info, |
5090 | this_dest, gsi, | |
4a00c761 JJ |
5091 | c1, c2, decl1, decl2, |
5092 | op_type); | |
5093 | } | |
5094 | ||
9771b263 | 5095 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 | 5096 | { |
e1bd7296 | 5097 | stmt_vec_info new_stmt_info; |
4a00c761 JJ |
5098 | if (cvt_type) |
5099 | { | |
5100 | if (codecvt1 == CALL_EXPR) | |
5101 | { | |
e1bd7296 | 5102 | gcall *new_stmt = gimple_build_call (decl1, 1, vop0); |
4a00c761 JJ |
5103 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5104 | gimple_call_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 5105 | new_stmt_info |
86a91c0a RS |
5106 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
5107 | gsi); | |
4a00c761 JJ |
5108 | } |
5109 | else | |
5110 | { | |
5111 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 5112 | new_temp = make_ssa_name (vec_dest); |
e1bd7296 RS |
5113 | gassign *new_stmt |
5114 | = gimple_build_assign (new_temp, codecvt1, vop0); | |
5115 | new_stmt_info | |
86a91c0a RS |
5116 | = vect_finish_stmt_generation (stmt_info, new_stmt, |
5117 | gsi); | |
4a00c761 | 5118 | } |
4a00c761 JJ |
5119 | } |
5120 | else | |
e1bd7296 | 5121 | new_stmt_info = vinfo->lookup_def (vop0); |
4a00c761 JJ |
5122 | |
5123 | if (slp_node) | |
e1bd7296 | 5124 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
4a00c761 | 5125 | else |
c689ce1e RB |
5126 | { |
5127 | if (!prev_stmt_info) | |
e1bd7296 | 5128 | STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; |
c689ce1e | 5129 | else |
e1bd7296 RS |
5130 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
5131 | prev_stmt_info = new_stmt_info; | |
c689ce1e | 5132 | } |
4a00c761 | 5133 | } |
ebfd146a | 5134 | } |
4a00c761 JJ |
5135 | |
5136 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
ebfd146a IR |
5137 | break; |
5138 | ||
5139 | case NARROW: | |
5140 | /* In case the vectorization factor (VF) is bigger than the number | |
5141 | of elements that we can fit in a vectype (nunits), we have to | |
5142 | generate more than one vector stmt - i.e - we need to "unroll" | |
5143 | the vector stmt by a factor VF/nunits. */ | |
5144 | for (j = 0; j < ncopies; j++) | |
5145 | { | |
5146 | /* Handle uses. */ | |
4a00c761 | 5147 | if (slp_node) |
86a91c0a | 5148 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, |
306b0c92 | 5149 | slp_node); |
ebfd146a IR |
5150 | else |
5151 | { | |
9771b263 | 5152 | vec_oprnds0.truncate (0); |
e4057a39 | 5153 | vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0, |
4a00c761 | 5154 | vect_pow2 (multi_step_cvt) - 1); |
ebfd146a IR |
5155 | } |
5156 | ||
4a00c761 JJ |
5157 | /* Arguments are ready. Create the new vector stmts. */ |
5158 | if (cvt_type) | |
9771b263 | 5159 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
4a00c761 JJ |
5160 | { |
5161 | if (codecvt1 == CALL_EXPR) | |
5162 | { | |
e1bd7296 | 5163 | gcall *new_stmt = gimple_build_call (decl1, 1, vop0); |
4a00c761 JJ |
5164 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5165 | gimple_call_set_lhs (new_stmt, new_temp); | |
86a91c0a | 5166 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 JJ |
5167 | } |
5168 | else | |
5169 | { | |
5170 | gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op); | |
b731b390 | 5171 | new_temp = make_ssa_name (vec_dest); |
e1bd7296 RS |
5172 | gassign *new_stmt |
5173 | = gimple_build_assign (new_temp, codecvt1, vop0); | |
86a91c0a | 5174 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
4a00c761 | 5175 | } |
ebfd146a | 5176 | |
9771b263 | 5177 | vec_oprnds0[i] = new_temp; |
4a00c761 | 5178 | } |
ebfd146a | 5179 | |
4a00c761 | 5180 | vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, |
86a91c0a | 5181 | stmt_info, vec_dsts, gsi, |
4a00c761 JJ |
5182 | slp_node, code1, |
5183 | &prev_stmt_info); | |
ebfd146a IR |
5184 | } |
5185 | ||
5186 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
4a00c761 | 5187 | break; |
ebfd146a IR |
5188 | } |
5189 | ||
9771b263 DN |
5190 | vec_oprnds0.release (); |
5191 | vec_oprnds1.release (); | |
9771b263 | 5192 | interm_types.release (); |
ebfd146a IR |
5193 | |
5194 | return true; | |
5195 | } | |
ff802fa1 IR |
5196 | |
5197 | ||
ebfd146a IR |
5198 | /* Function vectorizable_assignment. |
5199 | ||
32e8e429 RS |
5200 | Check if STMT_INFO performs an assignment (copy) that can be vectorized. |
5201 | If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized | |
5202 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
5203 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
5204 | |
5205 | static bool | |
32e8e429 | 5206 | vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 5207 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 5208 | stmt_vector_for_cost *cost_vec) |
ebfd146a IR |
5209 | { |
5210 | tree vec_dest; | |
5211 | tree scalar_dest; | |
5212 | tree op; | |
ebfd146a IR |
5213 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
5214 | tree new_temp; | |
4fc5ebf1 JG |
5215 | enum vect_def_type dt[1] = {vect_unknown_def_type}; |
5216 | int ndts = 1; | |
ebfd146a | 5217 | int ncopies; |
f18b55bd | 5218 | int i, j; |
6e1aa848 | 5219 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 5220 | tree vop; |
a70d6342 | 5221 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5222 | vec_info *vinfo = stmt_info->vinfo; |
f18b55bd | 5223 | stmt_vec_info prev_stmt_info = NULL; |
fde9c428 RG |
5224 | enum tree_code code; |
5225 | tree vectype_in; | |
ebfd146a | 5226 | |
a70d6342 | 5227 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5228 | return false; |
5229 | ||
66c16fd9 RB |
5230 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5231 | && ! vec_stmt) | |
ebfd146a IR |
5232 | return false; |
5233 | ||
5234 | /* Is vectorizable assignment? */ | |
32e8e429 RS |
5235 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
5236 | if (!stmt) | |
ebfd146a IR |
5237 | return false; |
5238 | ||
5239 | scalar_dest = gimple_assign_lhs (stmt); | |
5240 | if (TREE_CODE (scalar_dest) != SSA_NAME) | |
5241 | return false; | |
5242 | ||
fde9c428 | 5243 | code = gimple_assign_rhs_code (stmt); |
ebfd146a | 5244 | if (gimple_assign_single_p (stmt) |
fde9c428 RG |
5245 | || code == PAREN_EXPR |
5246 | || CONVERT_EXPR_CODE_P (code)) | |
ebfd146a IR |
5247 | op = gimple_assign_rhs1 (stmt); |
5248 | else | |
5249 | return false; | |
5250 | ||
7b7ec6c5 RG |
5251 | if (code == VIEW_CONVERT_EXPR) |
5252 | op = TREE_OPERAND (op, 0); | |
5253 | ||
465c8c19 | 5254 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
928686b1 | 5255 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
465c8c19 JJ |
5256 | |
5257 | /* Multiple types in SLP are handled by creating the appropriate number of | |
5258 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
5259 | case of SLP. */ | |
fce57248 | 5260 | if (slp_node) |
465c8c19 JJ |
5261 | ncopies = 1; |
5262 | else | |
e8f142e2 | 5263 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
465c8c19 JJ |
5264 | |
5265 | gcc_assert (ncopies >= 1); | |
5266 | ||
894dd753 | 5267 | if (!vect_is_simple_use (op, vinfo, &dt[0], &vectype_in)) |
ebfd146a | 5268 | { |
73fbfcad | 5269 | if (dump_enabled_p ()) |
78c60e3d | 5270 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5271 | "use not simple.\n"); |
ebfd146a IR |
5272 | return false; |
5273 | } | |
5274 | ||
fde9c428 RG |
5275 | /* We can handle NOP_EXPR conversions that do not change the number |
5276 | of elements or the vector size. */ | |
7b7ec6c5 RG |
5277 | if ((CONVERT_EXPR_CODE_P (code) |
5278 | || code == VIEW_CONVERT_EXPR) | |
fde9c428 | 5279 | && (!vectype_in |
928686b1 | 5280 | || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits) |
cf098191 RS |
5281 | || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), |
5282 | GET_MODE_SIZE (TYPE_MODE (vectype_in))))) | |
fde9c428 RG |
5283 | return false; |
5284 | ||
7b7b1813 RG |
5285 | /* We do not handle bit-precision changes. */ |
5286 | if ((CONVERT_EXPR_CODE_P (code) | |
5287 | || code == VIEW_CONVERT_EXPR) | |
5288 | && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) | |
2be65d9e RS |
5289 | && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)) |
5290 | || !type_has_mode_precision_p (TREE_TYPE (op))) | |
7b7b1813 RG |
5291 | /* But a conversion that does not change the bit-pattern is ok. */ |
5292 | && !((TYPE_PRECISION (TREE_TYPE (scalar_dest)) | |
5293 | > TYPE_PRECISION (TREE_TYPE (op))) | |
2dab46d5 IE |
5294 | && TYPE_UNSIGNED (TREE_TYPE (op))) |
5295 | /* Conversion between boolean types of different sizes is | |
5296 | a simple assignment in case their vectypes are same | |
5297 | boolean vectors. */ | |
5298 | && (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
5299 | || !VECTOR_BOOLEAN_TYPE_P (vectype_in))) | |
7b7b1813 | 5300 | { |
73fbfcad | 5301 | if (dump_enabled_p ()) |
78c60e3d SS |
5302 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5303 | "type conversion to/from bit-precision " | |
e645e942 | 5304 | "unsupported.\n"); |
7b7b1813 RG |
5305 | return false; |
5306 | } | |
5307 | ||
ebfd146a IR |
5308 | if (!vec_stmt) /* transformation not required. */ |
5309 | { | |
5310 | STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; | |
adac3a68 | 5311 | DUMP_VECT_SCOPE ("vectorizable_assignment"); |
68435eb2 | 5312 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
ebfd146a IR |
5313 | return true; |
5314 | } | |
5315 | ||
67b8dbac | 5316 | /* Transform. */ |
73fbfcad | 5317 | if (dump_enabled_p ()) |
e645e942 | 5318 | dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n"); |
ebfd146a IR |
5319 | |
5320 | /* Handle def. */ | |
5321 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
5322 | ||
5323 | /* Handle use. */ | |
f18b55bd | 5324 | for (j = 0; j < ncopies; j++) |
ebfd146a | 5325 | { |
f18b55bd IR |
5326 | /* Handle uses. */ |
5327 | if (j == 0) | |
86a91c0a | 5328 | vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node); |
f18b55bd | 5329 | else |
e4057a39 | 5330 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL); |
f18b55bd IR |
5331 | |
5332 | /* Arguments are ready. create the new vector stmt. */ | |
e1bd7296 | 5333 | stmt_vec_info new_stmt_info = NULL; |
9771b263 | 5334 | FOR_EACH_VEC_ELT (vec_oprnds, i, vop) |
f18b55bd | 5335 | { |
7b7ec6c5 RG |
5336 | if (CONVERT_EXPR_CODE_P (code) |
5337 | || code == VIEW_CONVERT_EXPR) | |
4a73490d | 5338 | vop = build1 (VIEW_CONVERT_EXPR, vectype, vop); |
e1bd7296 | 5339 | gassign *new_stmt = gimple_build_assign (vec_dest, vop); |
f18b55bd IR |
5340 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5341 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
5342 | new_stmt_info |
5343 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
f18b55bd | 5344 | if (slp_node) |
e1bd7296 | 5345 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
f18b55bd | 5346 | } |
ebfd146a IR |
5347 | |
5348 | if (slp_node) | |
f18b55bd IR |
5349 | continue; |
5350 | ||
5351 | if (j == 0) | |
e1bd7296 | 5352 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
f18b55bd | 5353 | else |
e1bd7296 | 5354 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
f18b55bd | 5355 | |
e1bd7296 | 5356 | prev_stmt_info = new_stmt_info; |
f18b55bd | 5357 | } |
b8698a0f | 5358 | |
9771b263 | 5359 | vec_oprnds.release (); |
ebfd146a IR |
5360 | return true; |
5361 | } | |
5362 | ||
9dc3f7de | 5363 | |
1107f3ae IR |
5364 | /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE |
5365 | either as shift by a scalar or by a vector. */ | |
5366 | ||
5367 | bool | |
5368 | vect_supportable_shift (enum tree_code code, tree scalar_type) | |
5369 | { | |
5370 | ||
ef4bddc2 | 5371 | machine_mode vec_mode; |
1107f3ae IR |
5372 | optab optab; |
5373 | int icode; | |
5374 | tree vectype; | |
5375 | ||
5376 | vectype = get_vectype_for_scalar_type (scalar_type); | |
5377 | if (!vectype) | |
5378 | return false; | |
5379 | ||
5380 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
5381 | if (!optab | |
5382 | || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing) | |
5383 | { | |
5384 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
5385 | if (!optab | |
5386 | || (optab_handler (optab, TYPE_MODE (vectype)) | |
5387 | == CODE_FOR_nothing)) | |
5388 | return false; | |
5389 | } | |
5390 | ||
5391 | vec_mode = TYPE_MODE (vectype); | |
5392 | icode = (int) optab_handler (optab, vec_mode); | |
5393 | if (icode == CODE_FOR_nothing) | |
5394 | return false; | |
5395 | ||
5396 | return true; | |
5397 | } | |
5398 | ||
5399 | ||
9dc3f7de IR |
5400 | /* Function vectorizable_shift. |
5401 | ||
32e8e429 RS |
5402 | Check if STMT_INFO performs a shift operation that can be vectorized. |
5403 | If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized | |
5404 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
5405 | Return true if STMT_INFO is vectorizable in this way. */ | |
9dc3f7de IR |
5406 | |
5407 | static bool | |
32e8e429 | 5408 | vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 5409 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 5410 | stmt_vector_for_cost *cost_vec) |
9dc3f7de IR |
5411 | { |
5412 | tree vec_dest; | |
5413 | tree scalar_dest; | |
5414 | tree op0, op1 = NULL; | |
5415 | tree vec_oprnd1 = NULL_TREE; | |
9dc3f7de IR |
5416 | tree vectype; |
5417 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
5418 | enum tree_code code; | |
ef4bddc2 | 5419 | machine_mode vec_mode; |
9dc3f7de IR |
5420 | tree new_temp; |
5421 | optab optab; | |
5422 | int icode; | |
ef4bddc2 | 5423 | machine_mode optab_op2_mode; |
9dc3f7de | 5424 | enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type}; |
4fc5ebf1 | 5425 | int ndts = 2; |
9dc3f7de | 5426 | stmt_vec_info prev_stmt_info; |
928686b1 RS |
5427 | poly_uint64 nunits_in; |
5428 | poly_uint64 nunits_out; | |
9dc3f7de | 5429 | tree vectype_out; |
cede2577 | 5430 | tree op1_vectype; |
9dc3f7de IR |
5431 | int ncopies; |
5432 | int j, i; | |
6e1aa848 DN |
5433 | vec<tree> vec_oprnds0 = vNULL; |
5434 | vec<tree> vec_oprnds1 = vNULL; | |
9dc3f7de IR |
5435 | tree vop0, vop1; |
5436 | unsigned int k; | |
49eab32e | 5437 | bool scalar_shift_arg = true; |
9dc3f7de | 5438 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5439 | vec_info *vinfo = stmt_info->vinfo; |
9dc3f7de IR |
5440 | |
5441 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
5442 | return false; | |
5443 | ||
66c16fd9 RB |
5444 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5445 | && ! vec_stmt) | |
9dc3f7de IR |
5446 | return false; |
5447 | ||
5448 | /* Is STMT a vectorizable binary/unary operation? */ | |
32e8e429 RS |
5449 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
5450 | if (!stmt) | |
9dc3f7de IR |
5451 | return false; |
5452 | ||
5453 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5454 | return false; | |
5455 | ||
5456 | code = gimple_assign_rhs_code (stmt); | |
5457 | ||
5458 | if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR | |
5459 | || code == RROTATE_EXPR)) | |
5460 | return false; | |
5461 | ||
5462 | scalar_dest = gimple_assign_lhs (stmt); | |
5463 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
2be65d9e | 5464 | if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))) |
7b7b1813 | 5465 | { |
73fbfcad | 5466 | if (dump_enabled_p ()) |
78c60e3d | 5467 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5468 | "bit-precision shifts not supported.\n"); |
7b7b1813 RG |
5469 | return false; |
5470 | } | |
9dc3f7de IR |
5471 | |
5472 | op0 = gimple_assign_rhs1 (stmt); | |
894dd753 | 5473 | if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype)) |
9dc3f7de | 5474 | { |
73fbfcad | 5475 | if (dump_enabled_p ()) |
78c60e3d | 5476 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5477 | "use not simple.\n"); |
9dc3f7de IR |
5478 | return false; |
5479 | } | |
5480 | /* If op0 is an external or constant def use a vector type with | |
5481 | the same size as the output vector type. */ | |
5482 | if (!vectype) | |
5483 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
5484 | if (vec_stmt) | |
5485 | gcc_assert (vectype); | |
5486 | if (!vectype) | |
5487 | { | |
73fbfcad | 5488 | if (dump_enabled_p ()) |
78c60e3d | 5489 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5490 | "no vectype for scalar type\n"); |
9dc3f7de IR |
5491 | return false; |
5492 | } | |
5493 | ||
5494 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
5495 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
928686b1 | 5496 | if (maybe_ne (nunits_out, nunits_in)) |
9dc3f7de IR |
5497 | return false; |
5498 | ||
5499 | op1 = gimple_assign_rhs2 (stmt); | |
fef96d8e RS |
5500 | stmt_vec_info op1_def_stmt_info; |
5501 | if (!vect_is_simple_use (op1, vinfo, &dt[1], &op1_vectype, | |
5502 | &op1_def_stmt_info)) | |
9dc3f7de | 5503 | { |
73fbfcad | 5504 | if (dump_enabled_p ()) |
78c60e3d | 5505 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5506 | "use not simple.\n"); |
9dc3f7de IR |
5507 | return false; |
5508 | } | |
5509 | ||
9dc3f7de IR |
5510 | /* Multiple types in SLP are handled by creating the appropriate number of |
5511 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
5512 | case of SLP. */ | |
fce57248 | 5513 | if (slp_node) |
9dc3f7de IR |
5514 | ncopies = 1; |
5515 | else | |
e8f142e2 | 5516 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
9dc3f7de IR |
5517 | |
5518 | gcc_assert (ncopies >= 1); | |
5519 | ||
5520 | /* Determine whether the shift amount is a vector, or scalar. If the | |
5521 | shift/rotate amount is a vector, use the vector/vector shift optabs. */ | |
5522 | ||
dbfa87aa YR |
5523 | if ((dt[1] == vect_internal_def |
5524 | || dt[1] == vect_induction_def) | |
5525 | && !slp_node) | |
49eab32e JJ |
5526 | scalar_shift_arg = false; |
5527 | else if (dt[1] == vect_constant_def | |
5528 | || dt[1] == vect_external_def | |
5529 | || dt[1] == vect_internal_def) | |
5530 | { | |
5531 | /* In SLP, need to check whether the shift count is the same, | |
5532 | in loops if it is a constant or invariant, it is always | |
5533 | a scalar shift. */ | |
5534 | if (slp_node) | |
5535 | { | |
b9787581 RS |
5536 | vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node); |
5537 | stmt_vec_info slpstmt_info; | |
49eab32e | 5538 | |
b9787581 RS |
5539 | FOR_EACH_VEC_ELT (stmts, k, slpstmt_info) |
5540 | { | |
5541 | gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt); | |
5542 | if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0)) | |
5543 | scalar_shift_arg = false; | |
5544 | } | |
49eab32e | 5545 | } |
60d393e8 RB |
5546 | |
5547 | /* If the shift amount is computed by a pattern stmt we cannot | |
5548 | use the scalar amount directly thus give up and use a vector | |
5549 | shift. */ | |
fef96d8e RS |
5550 | if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info)) |
5551 | scalar_shift_arg = false; | |
49eab32e JJ |
5552 | } |
5553 | else | |
5554 | { | |
73fbfcad | 5555 | if (dump_enabled_p ()) |
78c60e3d | 5556 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5557 | "operand mode requires invariant argument.\n"); |
49eab32e JJ |
5558 | return false; |
5559 | } | |
5560 | ||
9dc3f7de | 5561 | /* Vector shifted by vector. */ |
49eab32e | 5562 | if (!scalar_shift_arg) |
9dc3f7de IR |
5563 | { |
5564 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
73fbfcad | 5565 | if (dump_enabled_p ()) |
78c60e3d | 5566 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5567 | "vector/vector shift/rotate found.\n"); |
78c60e3d | 5568 | |
aa948027 JJ |
5569 | if (!op1_vectype) |
5570 | op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out); | |
5571 | if (op1_vectype == NULL_TREE | |
5572 | || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype)) | |
cede2577 | 5573 | { |
73fbfcad | 5574 | if (dump_enabled_p ()) |
78c60e3d SS |
5575 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5576 | "unusable type for last operand in" | |
e645e942 | 5577 | " vector/vector shift/rotate.\n"); |
cede2577 JJ |
5578 | return false; |
5579 | } | |
9dc3f7de IR |
5580 | } |
5581 | /* See if the machine has a vector shifted by scalar insn and if not | |
5582 | then see if it has a vector shifted by vector insn. */ | |
49eab32e | 5583 | else |
9dc3f7de IR |
5584 | { |
5585 | optab = optab_for_tree_code (code, vectype, optab_scalar); | |
5586 | if (optab | |
5587 | && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing) | |
5588 | { | |
73fbfcad | 5589 | if (dump_enabled_p ()) |
78c60e3d | 5590 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5591 | "vector/scalar shift/rotate found.\n"); |
9dc3f7de IR |
5592 | } |
5593 | else | |
5594 | { | |
5595 | optab = optab_for_tree_code (code, vectype, optab_vector); | |
5596 | if (optab | |
5597 | && (optab_handler (optab, TYPE_MODE (vectype)) | |
5598 | != CODE_FOR_nothing)) | |
5599 | { | |
49eab32e JJ |
5600 | scalar_shift_arg = false; |
5601 | ||
73fbfcad | 5602 | if (dump_enabled_p ()) |
78c60e3d | 5603 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5604 | "vector/vector shift/rotate found.\n"); |
9dc3f7de IR |
5605 | |
5606 | /* Unlike the other binary operators, shifts/rotates have | |
5607 | the rhs being int, instead of the same type as the lhs, | |
5608 | so make sure the scalar is the right type if we are | |
aa948027 | 5609 | dealing with vectors of long long/long/short/char. */ |
9dc3f7de IR |
5610 | if (dt[1] == vect_constant_def) |
5611 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
aa948027 JJ |
5612 | else if (!useless_type_conversion_p (TREE_TYPE (vectype), |
5613 | TREE_TYPE (op1))) | |
5614 | { | |
5615 | if (slp_node | |
5616 | && TYPE_MODE (TREE_TYPE (vectype)) | |
5617 | != TYPE_MODE (TREE_TYPE (op1))) | |
5618 | { | |
73fbfcad | 5619 | if (dump_enabled_p ()) |
78c60e3d SS |
5620 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5621 | "unusable type for last operand in" | |
e645e942 | 5622 | " vector/vector shift/rotate.\n"); |
21c0a521 | 5623 | return false; |
aa948027 JJ |
5624 | } |
5625 | if (vec_stmt && !slp_node) | |
5626 | { | |
5627 | op1 = fold_convert (TREE_TYPE (vectype), op1); | |
86a91c0a | 5628 | op1 = vect_init_vector (stmt_info, op1, |
aa948027 JJ |
5629 | TREE_TYPE (vectype), NULL); |
5630 | } | |
5631 | } | |
9dc3f7de IR |
5632 | } |
5633 | } | |
5634 | } | |
9dc3f7de IR |
5635 | |
5636 | /* Supportable by target? */ | |
5637 | if (!optab) | |
5638 | { | |
73fbfcad | 5639 | if (dump_enabled_p ()) |
78c60e3d | 5640 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5641 | "no optab.\n"); |
9dc3f7de IR |
5642 | return false; |
5643 | } | |
5644 | vec_mode = TYPE_MODE (vectype); | |
5645 | icode = (int) optab_handler (optab, vec_mode); | |
5646 | if (icode == CODE_FOR_nothing) | |
5647 | { | |
73fbfcad | 5648 | if (dump_enabled_p ()) |
78c60e3d | 5649 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5650 | "op not supported by target.\n"); |
9dc3f7de | 5651 | /* Check only during analysis. */ |
cf098191 | 5652 | if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) |
ca09abcb RS |
5653 | || (!vec_stmt |
5654 | && !vect_worthwhile_without_simd_p (vinfo, code))) | |
9dc3f7de | 5655 | return false; |
73fbfcad | 5656 | if (dump_enabled_p ()) |
e645e942 TJ |
5657 | dump_printf_loc (MSG_NOTE, vect_location, |
5658 | "proceeding using word mode.\n"); | |
9dc3f7de IR |
5659 | } |
5660 | ||
5661 | /* Worthwhile without SIMD support? Check only during analysis. */ | |
ca09abcb RS |
5662 | if (!vec_stmt |
5663 | && !VECTOR_MODE_P (TYPE_MODE (vectype)) | |
5664 | && !vect_worthwhile_without_simd_p (vinfo, code)) | |
9dc3f7de | 5665 | { |
73fbfcad | 5666 | if (dump_enabled_p ()) |
78c60e3d | 5667 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5668 | "not worthwhile without SIMD support.\n"); |
9dc3f7de IR |
5669 | return false; |
5670 | } | |
5671 | ||
5672 | if (!vec_stmt) /* transformation not required. */ | |
5673 | { | |
5674 | STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type; | |
adac3a68 | 5675 | DUMP_VECT_SCOPE ("vectorizable_shift"); |
68435eb2 | 5676 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
9dc3f7de IR |
5677 | return true; |
5678 | } | |
5679 | ||
67b8dbac | 5680 | /* Transform. */ |
9dc3f7de | 5681 | |
73fbfcad | 5682 | if (dump_enabled_p ()) |
78c60e3d | 5683 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5684 | "transform binary/unary operation.\n"); |
9dc3f7de IR |
5685 | |
5686 | /* Handle def. */ | |
5687 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
5688 | ||
9dc3f7de IR |
5689 | prev_stmt_info = NULL; |
5690 | for (j = 0; j < ncopies; j++) | |
5691 | { | |
5692 | /* Handle uses. */ | |
5693 | if (j == 0) | |
5694 | { | |
5695 | if (scalar_shift_arg) | |
5696 | { | |
5697 | /* Vector shl and shr insn patterns can be defined with scalar | |
5698 | operand 2 (shift operand). In this case, use constant or loop | |
5699 | invariant op1 directly, without extending it to vector mode | |
5700 | first. */ | |
5701 | optab_op2_mode = insn_data[icode].operand[2].mode; | |
5702 | if (!VECTOR_MODE_P (optab_op2_mode)) | |
5703 | { | |
73fbfcad | 5704 | if (dump_enabled_p ()) |
78c60e3d | 5705 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 5706 | "operand 1 using scalar mode.\n"); |
9dc3f7de | 5707 | vec_oprnd1 = op1; |
8930f723 | 5708 | vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1); |
9771b263 | 5709 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5710 | if (slp_node) |
5711 | { | |
5712 | /* Store vec_oprnd1 for every vector stmt to be created | |
5713 | for SLP_NODE. We check during the analysis that all | |
5714 | the shift arguments are the same. | |
5715 | TODO: Allow different constants for different vector | |
5716 | stmts generated for an SLP instance. */ | |
5717 | for (k = 0; k < slp_node->vec_stmts_size - 1; k++) | |
9771b263 | 5718 | vec_oprnds1.quick_push (vec_oprnd1); |
9dc3f7de IR |
5719 | } |
5720 | } | |
5721 | } | |
5722 | ||
5723 | /* vec_oprnd1 is available if operand 1 should be of a scalar-type | |
5724 | (a special case for certain kind of vector shifts); otherwise, | |
5725 | operand 1 should be of a vector type (the usual case). */ | |
5726 | if (vec_oprnd1) | |
86a91c0a RS |
5727 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, |
5728 | slp_node); | |
9dc3f7de | 5729 | else |
86a91c0a RS |
5730 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, |
5731 | slp_node); | |
9dc3f7de IR |
5732 | } |
5733 | else | |
e4057a39 | 5734 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1); |
9dc3f7de IR |
5735 | |
5736 | /* Arguments are ready. Create the new vector stmt. */ | |
e1bd7296 | 5737 | stmt_vec_info new_stmt_info = NULL; |
9771b263 | 5738 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
9dc3f7de | 5739 | { |
9771b263 | 5740 | vop1 = vec_oprnds1[i]; |
e1bd7296 | 5741 | gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); |
9dc3f7de IR |
5742 | new_temp = make_ssa_name (vec_dest, new_stmt); |
5743 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
5744 | new_stmt_info |
5745 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
9dc3f7de | 5746 | if (slp_node) |
e1bd7296 | 5747 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
9dc3f7de IR |
5748 | } |
5749 | ||
5750 | if (slp_node) | |
5751 | continue; | |
5752 | ||
5753 | if (j == 0) | |
e1bd7296 | 5754 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
9dc3f7de | 5755 | else |
e1bd7296 RS |
5756 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
5757 | prev_stmt_info = new_stmt_info; | |
9dc3f7de IR |
5758 | } |
5759 | ||
9771b263 DN |
5760 | vec_oprnds0.release (); |
5761 | vec_oprnds1.release (); | |
9dc3f7de IR |
5762 | |
5763 | return true; | |
5764 | } | |
5765 | ||
5766 | ||
ebfd146a IR |
5767 | /* Function vectorizable_operation. |
5768 | ||
32e8e429 | 5769 | Check if STMT_INFO performs a binary, unary or ternary operation that can |
16949072 | 5770 | be vectorized. |
32e8e429 RS |
5771 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized |
5772 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
5773 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
5774 | |
5775 | static bool | |
32e8e429 | 5776 | vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 5777 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
68435eb2 | 5778 | stmt_vector_for_cost *cost_vec) |
ebfd146a | 5779 | { |
00f07b86 | 5780 | tree vec_dest; |
ebfd146a | 5781 | tree scalar_dest; |
16949072 | 5782 | tree op0, op1 = NULL_TREE, op2 = NULL_TREE; |
00f07b86 | 5783 | tree vectype; |
ebfd146a | 5784 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
0eb952ea | 5785 | enum tree_code code, orig_code; |
ef4bddc2 | 5786 | machine_mode vec_mode; |
ebfd146a IR |
5787 | tree new_temp; |
5788 | int op_type; | |
00f07b86 | 5789 | optab optab; |
523ba738 | 5790 | bool target_support_p; |
16949072 RG |
5791 | enum vect_def_type dt[3] |
5792 | = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 5793 | int ndts = 3; |
ebfd146a | 5794 | stmt_vec_info prev_stmt_info; |
928686b1 RS |
5795 | poly_uint64 nunits_in; |
5796 | poly_uint64 nunits_out; | |
ebfd146a IR |
5797 | tree vectype_out; |
5798 | int ncopies; | |
5799 | int j, i; | |
6e1aa848 DN |
5800 | vec<tree> vec_oprnds0 = vNULL; |
5801 | vec<tree> vec_oprnds1 = vNULL; | |
5802 | vec<tree> vec_oprnds2 = vNULL; | |
16949072 | 5803 | tree vop0, vop1, vop2; |
a70d6342 | 5804 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 5805 | vec_info *vinfo = stmt_info->vinfo; |
a70d6342 | 5806 | |
a70d6342 | 5807 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
5808 | return false; |
5809 | ||
66c16fd9 RB |
5810 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
5811 | && ! vec_stmt) | |
ebfd146a IR |
5812 | return false; |
5813 | ||
5814 | /* Is STMT a vectorizable binary/unary operation? */ | |
32e8e429 RS |
5815 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
5816 | if (!stmt) | |
ebfd146a IR |
5817 | return false; |
5818 | ||
5819 | if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
5820 | return false; | |
5821 | ||
0eb952ea | 5822 | orig_code = code = gimple_assign_rhs_code (stmt); |
ebfd146a | 5823 | |
1af4ebf5 MG |
5824 | /* For pointer addition and subtraction, we should use the normal |
5825 | plus and minus for the vector operation. */ | |
ebfd146a IR |
5826 | if (code == POINTER_PLUS_EXPR) |
5827 | code = PLUS_EXPR; | |
1af4ebf5 MG |
5828 | if (code == POINTER_DIFF_EXPR) |
5829 | code = MINUS_EXPR; | |
ebfd146a IR |
5830 | |
5831 | /* Support only unary or binary operations. */ | |
5832 | op_type = TREE_CODE_LENGTH (code); | |
16949072 | 5833 | if (op_type != unary_op && op_type != binary_op && op_type != ternary_op) |
ebfd146a | 5834 | { |
73fbfcad | 5835 | if (dump_enabled_p ()) |
78c60e3d | 5836 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5837 | "num. args = %d (not unary/binary/ternary op).\n", |
78c60e3d | 5838 | op_type); |
ebfd146a IR |
5839 | return false; |
5840 | } | |
5841 | ||
b690cc0f RG |
5842 | scalar_dest = gimple_assign_lhs (stmt); |
5843 | vectype_out = STMT_VINFO_VECTYPE (stmt_info); | |
5844 | ||
7b7b1813 RG |
5845 | /* Most operations cannot handle bit-precision types without extra |
5846 | truncations. */ | |
045c1278 | 5847 | if (!VECTOR_BOOLEAN_TYPE_P (vectype_out) |
2be65d9e | 5848 | && !type_has_mode_precision_p (TREE_TYPE (scalar_dest)) |
7b7b1813 RG |
5849 | /* Exception are bitwise binary operations. */ |
5850 | && code != BIT_IOR_EXPR | |
5851 | && code != BIT_XOR_EXPR | |
5852 | && code != BIT_AND_EXPR) | |
5853 | { | |
73fbfcad | 5854 | if (dump_enabled_p ()) |
78c60e3d | 5855 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5856 | "bit-precision arithmetic not supported.\n"); |
7b7b1813 RG |
5857 | return false; |
5858 | } | |
5859 | ||
ebfd146a | 5860 | op0 = gimple_assign_rhs1 (stmt); |
894dd753 | 5861 | if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype)) |
ebfd146a | 5862 | { |
73fbfcad | 5863 | if (dump_enabled_p ()) |
78c60e3d | 5864 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5865 | "use not simple.\n"); |
ebfd146a IR |
5866 | return false; |
5867 | } | |
b690cc0f RG |
5868 | /* If op0 is an external or constant def use a vector type with |
5869 | the same size as the output vector type. */ | |
5870 | if (!vectype) | |
b036c6c5 IE |
5871 | { |
5872 | /* For boolean type we cannot determine vectype by | |
5873 | invariant value (don't know whether it is a vector | |
5874 | of booleans or vector of integers). We use output | |
5875 | vectype because operations on boolean don't change | |
5876 | type. */ | |
2568d8a1 | 5877 | if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0))) |
b036c6c5 | 5878 | { |
2568d8a1 | 5879 | if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest))) |
b036c6c5 IE |
5880 | { |
5881 | if (dump_enabled_p ()) | |
5882 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5883 | "not supported operation on bool value.\n"); | |
5884 | return false; | |
5885 | } | |
5886 | vectype = vectype_out; | |
5887 | } | |
5888 | else | |
5889 | vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out); | |
5890 | } | |
7d8930a0 IR |
5891 | if (vec_stmt) |
5892 | gcc_assert (vectype); | |
5893 | if (!vectype) | |
5894 | { | |
73fbfcad | 5895 | if (dump_enabled_p ()) |
7d8930a0 | 5896 | { |
78c60e3d SS |
5897 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
5898 | "no vectype for scalar type "); | |
5899 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
5900 | TREE_TYPE (op0)); | |
e645e942 | 5901 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
7d8930a0 IR |
5902 | } |
5903 | ||
5904 | return false; | |
5905 | } | |
b690cc0f RG |
5906 | |
5907 | nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); | |
5908 | nunits_in = TYPE_VECTOR_SUBPARTS (vectype); | |
928686b1 | 5909 | if (maybe_ne (nunits_out, nunits_in)) |
b690cc0f | 5910 | return false; |
ebfd146a | 5911 | |
16949072 | 5912 | if (op_type == binary_op || op_type == ternary_op) |
ebfd146a IR |
5913 | { |
5914 | op1 = gimple_assign_rhs2 (stmt); | |
894dd753 | 5915 | if (!vect_is_simple_use (op1, vinfo, &dt[1])) |
ebfd146a | 5916 | { |
73fbfcad | 5917 | if (dump_enabled_p ()) |
78c60e3d | 5918 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5919 | "use not simple.\n"); |
ebfd146a IR |
5920 | return false; |
5921 | } | |
5922 | } | |
16949072 RG |
5923 | if (op_type == ternary_op) |
5924 | { | |
5925 | op2 = gimple_assign_rhs3 (stmt); | |
894dd753 | 5926 | if (!vect_is_simple_use (op2, vinfo, &dt[2])) |
16949072 | 5927 | { |
73fbfcad | 5928 | if (dump_enabled_p ()) |
78c60e3d | 5929 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5930 | "use not simple.\n"); |
16949072 RG |
5931 | return false; |
5932 | } | |
5933 | } | |
ebfd146a | 5934 | |
b690cc0f | 5935 | /* Multiple types in SLP are handled by creating the appropriate number of |
ff802fa1 | 5936 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
b690cc0f | 5937 | case of SLP. */ |
fce57248 | 5938 | if (slp_node) |
b690cc0f RG |
5939 | ncopies = 1; |
5940 | else | |
e8f142e2 | 5941 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
b690cc0f RG |
5942 | |
5943 | gcc_assert (ncopies >= 1); | |
5944 | ||
9dc3f7de | 5945 | /* Shifts are handled in vectorizable_shift (). */ |
ebfd146a IR |
5946 | if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR |
5947 | || code == RROTATE_EXPR) | |
9dc3f7de | 5948 | return false; |
ebfd146a | 5949 | |
ebfd146a | 5950 | /* Supportable by target? */ |
00f07b86 RH |
5951 | |
5952 | vec_mode = TYPE_MODE (vectype); | |
5953 | if (code == MULT_HIGHPART_EXPR) | |
523ba738 | 5954 | target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)); |
00f07b86 RH |
5955 | else |
5956 | { | |
5957 | optab = optab_for_tree_code (code, vectype, optab_default); | |
5958 | if (!optab) | |
5deb57cb | 5959 | { |
73fbfcad | 5960 | if (dump_enabled_p ()) |
78c60e3d | 5961 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5962 | "no optab.\n"); |
00f07b86 | 5963 | return false; |
5deb57cb | 5964 | } |
523ba738 RS |
5965 | target_support_p = (optab_handler (optab, vec_mode) |
5966 | != CODE_FOR_nothing); | |
5deb57cb JJ |
5967 | } |
5968 | ||
523ba738 | 5969 | if (!target_support_p) |
ebfd146a | 5970 | { |
73fbfcad | 5971 | if (dump_enabled_p ()) |
78c60e3d | 5972 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5973 | "op not supported by target.\n"); |
ebfd146a | 5974 | /* Check only during analysis. */ |
cf098191 | 5975 | if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) |
ca09abcb | 5976 | || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code))) |
ebfd146a | 5977 | return false; |
73fbfcad | 5978 | if (dump_enabled_p ()) |
e645e942 TJ |
5979 | dump_printf_loc (MSG_NOTE, vect_location, |
5980 | "proceeding using word mode.\n"); | |
383d9c83 IR |
5981 | } |
5982 | ||
4a00c761 | 5983 | /* Worthwhile without SIMD support? Check only during analysis. */ |
5deb57cb JJ |
5984 | if (!VECTOR_MODE_P (vec_mode) |
5985 | && !vec_stmt | |
ca09abcb | 5986 | && !vect_worthwhile_without_simd_p (vinfo, code)) |
7d8930a0 | 5987 | { |
73fbfcad | 5988 | if (dump_enabled_p ()) |
78c60e3d | 5989 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 5990 | "not worthwhile without SIMD support.\n"); |
e34842c6 | 5991 | return false; |
7d8930a0 | 5992 | } |
ebfd146a | 5993 | |
ebfd146a IR |
5994 | if (!vec_stmt) /* transformation not required. */ |
5995 | { | |
4a00c761 | 5996 | STMT_VINFO_TYPE (stmt_info) = op_vec_info_type; |
adac3a68 | 5997 | DUMP_VECT_SCOPE ("vectorizable_operation"); |
68435eb2 | 5998 | vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec); |
ebfd146a IR |
5999 | return true; |
6000 | } | |
6001 | ||
67b8dbac | 6002 | /* Transform. */ |
ebfd146a | 6003 | |
73fbfcad | 6004 | if (dump_enabled_p ()) |
78c60e3d | 6005 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 6006 | "transform binary/unary operation.\n"); |
383d9c83 | 6007 | |
0eb952ea JJ |
6008 | /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as |
6009 | vectors with unsigned elements, but the result is signed. So, we | |
6010 | need to compute the MINUS_EXPR into vectype temporary and | |
6011 | VIEW_CONVERT_EXPR it into the final vectype_out result. */ | |
6012 | tree vec_cvt_dest = NULL_TREE; | |
6013 | if (orig_code == POINTER_DIFF_EXPR) | |
7b76867b RB |
6014 | { |
6015 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
6016 | vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
6017 | } | |
6018 | /* Handle def. */ | |
6019 | else | |
6020 | vec_dest = vect_create_destination_var (scalar_dest, vectype_out); | |
0eb952ea | 6021 | |
ebfd146a IR |
6022 | /* In case the vectorization factor (VF) is bigger than the number |
6023 | of elements that we can fit in a vectype (nunits), we have to generate | |
6024 | more than one vector stmt - i.e - we need to "unroll" the | |
4a00c761 JJ |
6025 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
6026 | from one copy of the vector stmt to the next, in the field | |
6027 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following | |
6028 | stages to find the correct vector defs to be used when vectorizing | |
6029 | stmts that use the defs of the current stmt. The example below | |
6030 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., | |
6031 | we need to create 4 vectorized stmts): | |
6032 | ||
6033 | before vectorization: | |
6034 | RELATED_STMT VEC_STMT | |
6035 | S1: x = memref - - | |
6036 | S2: z = x + 1 - - | |
6037 | ||
6038 | step 1: vectorize stmt S1 (done in vectorizable_load. See more details | |
6039 | there): | |
6040 | RELATED_STMT VEC_STMT | |
6041 | VS1_0: vx0 = memref0 VS1_1 - | |
6042 | VS1_1: vx1 = memref1 VS1_2 - | |
6043 | VS1_2: vx2 = memref2 VS1_3 - | |
6044 | VS1_3: vx3 = memref3 - - | |
6045 | S1: x = load - VS1_0 | |
6046 | S2: z = x + 1 - - | |
6047 | ||
6048 | step2: vectorize stmt S2 (done here): | |
6049 | To vectorize stmt S2 we first need to find the relevant vector | |
6050 | def for the first operand 'x'. This is, as usual, obtained from | |
6051 | the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt | |
6052 | that defines 'x' (S1). This way we find the stmt VS1_0, and the | |
6053 | relevant vector def 'vx0'. Having found 'vx0' we can generate | |
6054 | the vector stmt VS2_0, and as usual, record it in the | |
6055 | STMT_VINFO_VEC_STMT of stmt S2. | |
6056 | When creating the second copy (VS2_1), we obtain the relevant vector | |
6057 | def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of | |
6058 | stmt VS1_0. This way we find the stmt VS1_1 and the relevant | |
6059 | vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a | |
6060 | pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0. | |
6061 | Similarly when creating stmts VS2_2 and VS2_3. This is the resulting | |
6062 | chain of stmts and pointers: | |
6063 | RELATED_STMT VEC_STMT | |
6064 | VS1_0: vx0 = memref0 VS1_1 - | |
6065 | VS1_1: vx1 = memref1 VS1_2 - | |
6066 | VS1_2: vx2 = memref2 VS1_3 - | |
6067 | VS1_3: vx3 = memref3 - - | |
6068 | S1: x = load - VS1_0 | |
6069 | VS2_0: vz0 = vx0 + v1 VS2_1 - | |
6070 | VS2_1: vz1 = vx1 + v1 VS2_2 - | |
6071 | VS2_2: vz2 = vx2 + v1 VS2_3 - | |
6072 | VS2_3: vz3 = vx3 + v1 - - | |
6073 | S2: z = x + 1 - VS2_0 */ | |
ebfd146a IR |
6074 | |
6075 | prev_stmt_info = NULL; | |
6076 | for (j = 0; j < ncopies; j++) | |
6077 | { | |
6078 | /* Handle uses. */ | |
6079 | if (j == 0) | |
4a00c761 | 6080 | { |
d6476f90 | 6081 | if (op_type == binary_op) |
86a91c0a | 6082 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1, |
306b0c92 | 6083 | slp_node); |
d6476f90 RB |
6084 | else if (op_type == ternary_op) |
6085 | { | |
6086 | if (slp_node) | |
6087 | { | |
6088 | auto_vec<tree> ops(3); | |
6089 | ops.quick_push (op0); | |
6090 | ops.quick_push (op1); | |
6091 | ops.quick_push (op2); | |
6092 | auto_vec<vec<tree> > vec_defs(3); | |
6093 | vect_get_slp_defs (ops, slp_node, &vec_defs); | |
6094 | vec_oprnds0 = vec_defs[0]; | |
6095 | vec_oprnds1 = vec_defs[1]; | |
6096 | vec_oprnds2 = vec_defs[2]; | |
6097 | } | |
6098 | else | |
6099 | { | |
86a91c0a RS |
6100 | vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, |
6101 | &vec_oprnds1, NULL); | |
6102 | vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2, | |
6103 | NULL, NULL); | |
d6476f90 RB |
6104 | } |
6105 | } | |
4a00c761 | 6106 | else |
86a91c0a | 6107 | vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL, |
306b0c92 | 6108 | slp_node); |
4a00c761 | 6109 | } |
ebfd146a | 6110 | else |
4a00c761 | 6111 | { |
e4057a39 | 6112 | vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1); |
4a00c761 JJ |
6113 | if (op_type == ternary_op) |
6114 | { | |
9771b263 | 6115 | tree vec_oprnd = vec_oprnds2.pop (); |
e4057a39 | 6116 | vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (vinfo, |
9771b263 | 6117 | vec_oprnd)); |
4a00c761 JJ |
6118 | } |
6119 | } | |
6120 | ||
6121 | /* Arguments are ready. Create the new vector stmt. */ | |
e1bd7296 | 6122 | stmt_vec_info new_stmt_info = NULL; |
9771b263 | 6123 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) |
ebfd146a | 6124 | { |
4a00c761 | 6125 | vop1 = ((op_type == binary_op || op_type == ternary_op) |
9771b263 | 6126 | ? vec_oprnds1[i] : NULL_TREE); |
4a00c761 | 6127 | vop2 = ((op_type == ternary_op) |
9771b263 | 6128 | ? vec_oprnds2[i] : NULL_TREE); |
e1bd7296 RS |
6129 | gassign *new_stmt = gimple_build_assign (vec_dest, code, |
6130 | vop0, vop1, vop2); | |
4a00c761 JJ |
6131 | new_temp = make_ssa_name (vec_dest, new_stmt); |
6132 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
6133 | new_stmt_info |
6134 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
0eb952ea JJ |
6135 | if (vec_cvt_dest) |
6136 | { | |
6137 | new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp); | |
e1bd7296 RS |
6138 | gassign *new_stmt |
6139 | = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR, | |
6140 | new_temp); | |
0eb952ea JJ |
6141 | new_temp = make_ssa_name (vec_cvt_dest, new_stmt); |
6142 | gimple_assign_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 6143 | new_stmt_info |
86a91c0a | 6144 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
0eb952ea | 6145 | } |
4a00c761 | 6146 | if (slp_node) |
e1bd7296 | 6147 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
ebfd146a IR |
6148 | } |
6149 | ||
4a00c761 JJ |
6150 | if (slp_node) |
6151 | continue; | |
6152 | ||
6153 | if (j == 0) | |
e1bd7296 | 6154 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
4a00c761 | 6155 | else |
e1bd7296 RS |
6156 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
6157 | prev_stmt_info = new_stmt_info; | |
ebfd146a IR |
6158 | } |
6159 | ||
9771b263 DN |
6160 | vec_oprnds0.release (); |
6161 | vec_oprnds1.release (); | |
6162 | vec_oprnds2.release (); | |
ebfd146a | 6163 | |
ebfd146a IR |
6164 | return true; |
6165 | } | |
6166 | ||
89fa689a | 6167 | /* A helper function to ensure data reference DR_INFO's base alignment. */ |
c716e67f XDL |
6168 | |
6169 | static void | |
89fa689a | 6170 | ensure_base_align (dr_vec_info *dr_info) |
c716e67f | 6171 | { |
89fa689a | 6172 | if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED) |
c716e67f XDL |
6173 | return; |
6174 | ||
89fa689a | 6175 | if (dr_info->base_misaligned) |
c716e67f | 6176 | { |
89fa689a | 6177 | tree base_decl = dr_info->base_decl; |
c716e67f | 6178 | |
89fa689a RS |
6179 | unsigned int align_base_to |
6180 | = DR_TARGET_ALIGNMENT (dr_info) * BITS_PER_UNIT; | |
f702e7d4 | 6181 | |
428f0c67 | 6182 | if (decl_in_symtab_p (base_decl)) |
f702e7d4 | 6183 | symtab_node::get (base_decl)->increase_alignment (align_base_to); |
428f0c67 JH |
6184 | else |
6185 | { | |
f702e7d4 | 6186 | SET_DECL_ALIGN (base_decl, align_base_to); |
428f0c67 JH |
6187 | DECL_USER_ALIGN (base_decl) = 1; |
6188 | } | |
89fa689a | 6189 | dr_info->base_misaligned = false; |
c716e67f XDL |
6190 | } |
6191 | } | |
6192 | ||
ebfd146a | 6193 | |
44fc7854 BE |
6194 | /* Function get_group_alias_ptr_type. |
6195 | ||
32e8e429 | 6196 | Return the alias type for the group starting at FIRST_STMT_INFO. */ |
44fc7854 BE |
6197 | |
6198 | static tree | |
32e8e429 | 6199 | get_group_alias_ptr_type (stmt_vec_info first_stmt_info) |
44fc7854 BE |
6200 | { |
6201 | struct data_reference *first_dr, *next_dr; | |
44fc7854 | 6202 | |
91987857 RS |
6203 | first_dr = STMT_VINFO_DATA_REF (first_stmt_info); |
6204 | stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info); | |
bffb8014 | 6205 | while (next_stmt_info) |
44fc7854 | 6206 | { |
bffb8014 | 6207 | next_dr = STMT_VINFO_DATA_REF (next_stmt_info); |
44fc7854 BE |
6208 | if (get_alias_set (DR_REF (first_dr)) |
6209 | != get_alias_set (DR_REF (next_dr))) | |
6210 | { | |
6211 | if (dump_enabled_p ()) | |
6212 | dump_printf_loc (MSG_NOTE, vect_location, | |
6213 | "conflicting alias set types.\n"); | |
6214 | return ptr_type_node; | |
6215 | } | |
bffb8014 | 6216 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
44fc7854 BE |
6217 | } |
6218 | return reference_alias_ptr_type (DR_REF (first_dr)); | |
6219 | } | |
6220 | ||
6221 | ||
ebfd146a IR |
6222 | /* Function vectorizable_store. |
6223 | ||
32e8e429 RS |
6224 | Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure) |
6225 | that can be vectorized. | |
6226 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
6227 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
6228 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
6229 | |
6230 | static bool | |
32e8e429 | 6231 | vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
6232 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
6233 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 6234 | { |
ebfd146a IR |
6235 | tree data_ref; |
6236 | tree op; | |
6237 | tree vec_oprnd = NULL_TREE; | |
272c6793 | 6238 | tree elem_type; |
ebfd146a | 6239 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 6240 | struct loop *loop = NULL; |
ef4bddc2 | 6241 | machine_mode vec_mode; |
ebfd146a IR |
6242 | tree dummy; |
6243 | enum dr_alignment_support alignment_support_scheme; | |
929b4411 RS |
6244 | enum vect_def_type rhs_dt = vect_unknown_def_type; |
6245 | enum vect_def_type mask_dt = vect_unknown_def_type; | |
ebfd146a IR |
6246 | stmt_vec_info prev_stmt_info = NULL; |
6247 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 6248 | tree dataref_offset = NULL_TREE; |
355fe088 | 6249 | gimple *ptr_incr = NULL; |
ebfd146a IR |
6250 | int ncopies; |
6251 | int j; | |
bffb8014 | 6252 | stmt_vec_info first_stmt_info; |
2de001ee | 6253 | bool grouped_store; |
ebfd146a | 6254 | unsigned int group_size, i; |
6e1aa848 DN |
6255 | vec<tree> oprnds = vNULL; |
6256 | vec<tree> result_chain = vNULL; | |
09dfa495 | 6257 | tree offset = NULL_TREE; |
6e1aa848 | 6258 | vec<tree> vec_oprnds = vNULL; |
ebfd146a | 6259 | bool slp = (slp_node != NULL); |
ebfd146a | 6260 | unsigned int vec_num; |
a70d6342 | 6261 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
310213d4 | 6262 | vec_info *vinfo = stmt_info->vinfo; |
272c6793 | 6263 | tree aggr_type; |
134c85ca | 6264 | gather_scatter_info gs_info; |
d9f21f6a | 6265 | poly_uint64 vf; |
2de001ee | 6266 | vec_load_store_type vls_type; |
44fc7854 | 6267 | tree ref_type; |
a70d6342 | 6268 | |
a70d6342 | 6269 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
ebfd146a IR |
6270 | return false; |
6271 | ||
66c16fd9 RB |
6272 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
6273 | && ! vec_stmt) | |
ebfd146a IR |
6274 | return false; |
6275 | ||
6276 | /* Is vectorizable store? */ | |
6277 | ||
c3a8f964 | 6278 | tree mask = NULL_TREE, mask_vectype = NULL_TREE; |
86a91c0a | 6279 | if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt)) |
c3a8f964 | 6280 | { |
beb456c3 | 6281 | tree scalar_dest = gimple_assign_lhs (assign); |
c3a8f964 RS |
6282 | if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR |
6283 | && is_pattern_stmt_p (stmt_info)) | |
6284 | scalar_dest = TREE_OPERAND (scalar_dest, 0); | |
6285 | if (TREE_CODE (scalar_dest) != ARRAY_REF | |
6286 | && TREE_CODE (scalar_dest) != BIT_FIELD_REF | |
6287 | && TREE_CODE (scalar_dest) != INDIRECT_REF | |
6288 | && TREE_CODE (scalar_dest) != COMPONENT_REF | |
6289 | && TREE_CODE (scalar_dest) != IMAGPART_EXPR | |
6290 | && TREE_CODE (scalar_dest) != REALPART_EXPR | |
6291 | && TREE_CODE (scalar_dest) != MEM_REF) | |
6292 | return false; | |
6293 | } | |
6294 | else | |
6295 | { | |
86a91c0a | 6296 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); |
f307441a RS |
6297 | if (!call || !gimple_call_internal_p (call)) |
6298 | return false; | |
6299 | ||
6300 | internal_fn ifn = gimple_call_internal_fn (call); | |
6301 | if (!internal_store_fn_p (ifn)) | |
c3a8f964 | 6302 | return false; |
ebfd146a | 6303 | |
c3a8f964 RS |
6304 | if (slp_node != NULL) |
6305 | { | |
6306 | if (dump_enabled_p ()) | |
6307 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6308 | "SLP of masked stores not supported.\n"); | |
6309 | return false; | |
6310 | } | |
6311 | ||
f307441a RS |
6312 | int mask_index = internal_fn_mask_index (ifn); |
6313 | if (mask_index >= 0) | |
6314 | { | |
6315 | mask = gimple_call_arg (call, mask_index); | |
86a91c0a | 6316 | if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt, |
929b4411 | 6317 | &mask_vectype)) |
f307441a RS |
6318 | return false; |
6319 | } | |
c3a8f964 RS |
6320 | } |
6321 | ||
86a91c0a | 6322 | op = vect_get_store_rhs (stmt_info); |
ebfd146a | 6323 | |
fce57248 RS |
6324 | /* Cannot have hybrid store SLP -- that would mean storing to the |
6325 | same location twice. */ | |
6326 | gcc_assert (slp == PURE_SLP_STMT (stmt_info)); | |
6327 | ||
f4d09712 | 6328 | tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE; |
4d694b27 | 6329 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
465c8c19 JJ |
6330 | |
6331 | if (loop_vinfo) | |
b17dc4d4 RB |
6332 | { |
6333 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
6334 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
6335 | } | |
6336 | else | |
6337 | vf = 1; | |
465c8c19 JJ |
6338 | |
6339 | /* Multiple types in SLP are handled by creating the appropriate number of | |
6340 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in | |
6341 | case of SLP. */ | |
fce57248 | 6342 | if (slp) |
465c8c19 JJ |
6343 | ncopies = 1; |
6344 | else | |
e8f142e2 | 6345 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
465c8c19 JJ |
6346 | |
6347 | gcc_assert (ncopies >= 1); | |
6348 | ||
6349 | /* FORNOW. This restriction should be relaxed. */ | |
86a91c0a | 6350 | if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1) |
465c8c19 JJ |
6351 | { |
6352 | if (dump_enabled_p ()) | |
6353 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6354 | "multiple types in nested loop.\n"); | |
6355 | return false; | |
6356 | } | |
6357 | ||
86a91c0a | 6358 | if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type)) |
f4d09712 KY |
6359 | return false; |
6360 | ||
272c6793 | 6361 | elem_type = TREE_TYPE (vectype); |
ebfd146a | 6362 | vec_mode = TYPE_MODE (vectype); |
7b7b1813 | 6363 | |
ebfd146a IR |
6364 | if (!STMT_VINFO_DATA_REF (stmt_info)) |
6365 | return false; | |
6366 | ||
2de001ee | 6367 | vect_memory_access_type memory_access_type; |
86a91c0a | 6368 | if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies, |
2de001ee RS |
6369 | &memory_access_type, &gs_info)) |
6370 | return false; | |
3bab6342 | 6371 | |
c3a8f964 RS |
6372 | if (mask) |
6373 | { | |
7e11fc7f RS |
6374 | if (memory_access_type == VMAT_CONTIGUOUS) |
6375 | { | |
6376 | if (!VECTOR_MODE_P (vec_mode) | |
6377 | || !can_vec_mask_load_store_p (vec_mode, | |
6378 | TYPE_MODE (mask_vectype), false)) | |
6379 | return false; | |
6380 | } | |
f307441a RS |
6381 | else if (memory_access_type != VMAT_LOAD_STORE_LANES |
6382 | && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl)) | |
c3a8f964 RS |
6383 | { |
6384 | if (dump_enabled_p ()) | |
6385 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
6386 | "unsupported access type for masked store.\n"); | |
6387 | return false; | |
6388 | } | |
c3a8f964 RS |
6389 | } |
6390 | else | |
6391 | { | |
6392 | /* FORNOW. In some cases can vectorize even if data-type not supported | |
6393 | (e.g. - array initialization with 0). */ | |
6394 | if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing) | |
6395 | return false; | |
6396 | } | |
6397 | ||
89fa689a | 6398 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL; |
f307441a | 6399 | grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
b5ec4de7 RS |
6400 | && memory_access_type != VMAT_GATHER_SCATTER |
6401 | && (slp || memory_access_type != VMAT_CONTIGUOUS)); | |
7cfb4d93 RS |
6402 | if (grouped_store) |
6403 | { | |
bffb8014 | 6404 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
89fa689a | 6405 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
bffb8014 | 6406 | group_size = DR_GROUP_SIZE (first_stmt_info); |
7cfb4d93 RS |
6407 | } |
6408 | else | |
6409 | { | |
bffb8014 | 6410 | first_stmt_info = stmt_info; |
89fa689a | 6411 | first_dr_info = dr_info; |
7cfb4d93 RS |
6412 | group_size = vec_num = 1; |
6413 | } | |
6414 | ||
ebfd146a IR |
6415 | if (!vec_stmt) /* transformation not required. */ |
6416 | { | |
2de001ee | 6417 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; |
7cfb4d93 RS |
6418 | |
6419 | if (loop_vinfo | |
6420 | && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) | |
6421 | check_load_store_masking (loop_vinfo, vectype, vls_type, group_size, | |
bfaa08b7 | 6422 | memory_access_type, &gs_info); |
7cfb4d93 | 6423 | |
ebfd146a | 6424 | STMT_VINFO_TYPE (stmt_info) = store_vec_info_type; |
68435eb2 RB |
6425 | vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type, |
6426 | vls_type, slp_node, cost_vec); | |
ebfd146a IR |
6427 | return true; |
6428 | } | |
2de001ee | 6429 | gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); |
ebfd146a | 6430 | |
67b8dbac | 6431 | /* Transform. */ |
ebfd146a | 6432 | |
89fa689a | 6433 | ensure_base_align (dr_info); |
c716e67f | 6434 | |
f307441a | 6435 | if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) |
3bab6342 | 6436 | { |
c3a8f964 | 6437 | tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src; |
134c85ca | 6438 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
6439 | tree rettype, srctype, ptrtype, idxtype, masktype, scaletype; |
6440 | tree ptr, mask, var, scale, perm_mask = NULL_TREE; | |
6441 | edge pe = loop_preheader_edge (loop); | |
6442 | gimple_seq seq; | |
6443 | basic_block new_bb; | |
6444 | enum { NARROW, NONE, WIDEN } modifier; | |
4d694b27 RS |
6445 | poly_uint64 scatter_off_nunits |
6446 | = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); | |
3bab6342 | 6447 | |
4d694b27 | 6448 | if (known_eq (nunits, scatter_off_nunits)) |
3bab6342 | 6449 | modifier = NONE; |
4d694b27 | 6450 | else if (known_eq (nunits * 2, scatter_off_nunits)) |
3bab6342 | 6451 | { |
3bab6342 AT |
6452 | modifier = WIDEN; |
6453 | ||
4d694b27 RS |
6454 | /* Currently gathers and scatters are only supported for |
6455 | fixed-length vectors. */ | |
6456 | unsigned int count = scatter_off_nunits.to_constant (); | |
6457 | vec_perm_builder sel (count, count, 1); | |
6458 | for (i = 0; i < (unsigned int) count; ++i) | |
6459 | sel.quick_push (i | (count / 2)); | |
3bab6342 | 6460 | |
4d694b27 | 6461 | vec_perm_indices indices (sel, 1, count); |
e3342de4 RS |
6462 | perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, |
6463 | indices); | |
3bab6342 AT |
6464 | gcc_assert (perm_mask != NULL_TREE); |
6465 | } | |
4d694b27 | 6466 | else if (known_eq (nunits, scatter_off_nunits * 2)) |
3bab6342 | 6467 | { |
3bab6342 AT |
6468 | modifier = NARROW; |
6469 | ||
4d694b27 RS |
6470 | /* Currently gathers and scatters are only supported for |
6471 | fixed-length vectors. */ | |
6472 | unsigned int count = nunits.to_constant (); | |
6473 | vec_perm_builder sel (count, count, 1); | |
6474 | for (i = 0; i < (unsigned int) count; ++i) | |
6475 | sel.quick_push (i | (count / 2)); | |
3bab6342 | 6476 | |
4d694b27 | 6477 | vec_perm_indices indices (sel, 2, count); |
e3342de4 | 6478 | perm_mask = vect_gen_perm_mask_checked (vectype, indices); |
3bab6342 AT |
6479 | gcc_assert (perm_mask != NULL_TREE); |
6480 | ncopies *= 2; | |
6481 | } | |
6482 | else | |
6483 | gcc_unreachable (); | |
6484 | ||
134c85ca | 6485 | rettype = TREE_TYPE (TREE_TYPE (gs_info.decl)); |
3bab6342 AT |
6486 | ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); |
6487 | masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6488 | idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6489 | srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); | |
6490 | scaletype = TREE_VALUE (arglist); | |
6491 | ||
6492 | gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE | |
6493 | && TREE_CODE (rettype) == VOID_TYPE); | |
6494 | ||
134c85ca | 6495 | ptr = fold_convert (ptrtype, gs_info.base); |
3bab6342 AT |
6496 | if (!is_gimple_min_invariant (ptr)) |
6497 | { | |
6498 | ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); | |
6499 | new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); | |
6500 | gcc_assert (!new_bb); | |
6501 | } | |
6502 | ||
6503 | /* Currently we support only unconditional scatter stores, | |
6504 | so mask should be all ones. */ | |
6505 | mask = build_int_cst (masktype, -1); | |
86a91c0a | 6506 | mask = vect_init_vector (stmt_info, mask, masktype, NULL); |
3bab6342 | 6507 | |
134c85ca | 6508 | scale = build_int_cst (scaletype, gs_info.scale); |
3bab6342 AT |
6509 | |
6510 | prev_stmt_info = NULL; | |
6511 | for (j = 0; j < ncopies; ++j) | |
6512 | { | |
6513 | if (j == 0) | |
6514 | { | |
6515 | src = vec_oprnd1 | |
86a91c0a | 6516 | = vect_get_vec_def_for_operand (op, stmt_info); |
3bab6342 | 6517 | op = vec_oprnd0 |
86a91c0a | 6518 | = vect_get_vec_def_for_operand (gs_info.offset, stmt_info); |
3bab6342 AT |
6519 | } |
6520 | else if (modifier != NONE && (j & 1)) | |
6521 | { | |
6522 | if (modifier == WIDEN) | |
6523 | { | |
6524 | src = vec_oprnd1 | |
e4057a39 | 6525 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); |
3bab6342 | 6526 | op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask, |
86a91c0a | 6527 | stmt_info, gsi); |
3bab6342 AT |
6528 | } |
6529 | else if (modifier == NARROW) | |
6530 | { | |
6531 | src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask, | |
86a91c0a | 6532 | stmt_info, gsi); |
3bab6342 | 6533 | op = vec_oprnd0 |
e4057a39 | 6534 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
3bab6342 AT |
6535 | } |
6536 | else | |
6537 | gcc_unreachable (); | |
6538 | } | |
6539 | else | |
6540 | { | |
6541 | src = vec_oprnd1 | |
e4057a39 | 6542 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1); |
3bab6342 | 6543 | op = vec_oprnd0 |
e4057a39 | 6544 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0); |
3bab6342 AT |
6545 | } |
6546 | ||
6547 | if (!useless_type_conversion_p (srctype, TREE_TYPE (src))) | |
6548 | { | |
928686b1 RS |
6549 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)), |
6550 | TYPE_VECTOR_SUBPARTS (srctype))); | |
0e22bb5a | 6551 | var = vect_get_new_ssa_name (srctype, vect_simple_var); |
3bab6342 | 6552 | src = build1 (VIEW_CONVERT_EXPR, srctype, src); |
e1bd7296 RS |
6553 | gassign *new_stmt |
6554 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, src); | |
86a91c0a | 6555 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3bab6342 AT |
6556 | src = var; |
6557 | } | |
6558 | ||
6559 | if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) | |
6560 | { | |
928686b1 RS |
6561 | gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)), |
6562 | TYPE_VECTOR_SUBPARTS (idxtype))); | |
0e22bb5a | 6563 | var = vect_get_new_ssa_name (idxtype, vect_simple_var); |
3bab6342 | 6564 | op = build1 (VIEW_CONVERT_EXPR, idxtype, op); |
e1bd7296 RS |
6565 | gassign *new_stmt |
6566 | = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); | |
86a91c0a | 6567 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3bab6342 AT |
6568 | op = var; |
6569 | } | |
6570 | ||
e1bd7296 | 6571 | gcall *new_stmt |
134c85ca | 6572 | = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale); |
e1bd7296 | 6573 | stmt_vec_info new_stmt_info |
86a91c0a | 6574 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
3bab6342 | 6575 | |
ddf98a96 | 6576 | if (prev_stmt_info == NULL) |
e1bd7296 | 6577 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
3bab6342 | 6578 | else |
e1bd7296 RS |
6579 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
6580 | prev_stmt_info = new_stmt_info; | |
3bab6342 AT |
6581 | } |
6582 | return true; | |
6583 | } | |
6584 | ||
f307441a | 6585 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
bffb8014 | 6586 | DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++; |
ebfd146a | 6587 | |
f307441a RS |
6588 | if (grouped_store) |
6589 | { | |
ebfd146a | 6590 | /* FORNOW */ |
86a91c0a | 6591 | gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info)); |
ebfd146a IR |
6592 | |
6593 | /* We vectorize all the stmts of the interleaving group when we | |
6594 | reach the last stmt in the group. */ | |
bffb8014 RS |
6595 | if (DR_GROUP_STORE_COUNT (first_stmt_info) |
6596 | < DR_GROUP_SIZE (first_stmt_info) | |
ebfd146a IR |
6597 | && !slp) |
6598 | { | |
6599 | *vec_stmt = NULL; | |
6600 | return true; | |
6601 | } | |
6602 | ||
6603 | if (slp) | |
4b5caab7 | 6604 | { |
0d0293ac | 6605 | grouped_store = false; |
4b5caab7 IR |
6606 | /* VEC_NUM is the number of vect stmts to be created for this |
6607 | group. */ | |
6608 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
bffb8014 RS |
6609 | first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
6610 | gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info) | |
6611 | == first_stmt_info); | |
89fa689a | 6612 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
bffb8014 | 6613 | op = vect_get_store_rhs (first_stmt_info); |
4b5caab7 | 6614 | } |
ebfd146a | 6615 | else |
4b5caab7 IR |
6616 | /* VEC_NUM is the number of vect stmts to be created for this |
6617 | group. */ | |
ebfd146a | 6618 | vec_num = group_size; |
44fc7854 | 6619 | |
bffb8014 | 6620 | ref_type = get_group_alias_ptr_type (first_stmt_info); |
ebfd146a | 6621 | } |
b8698a0f | 6622 | else |
89fa689a | 6623 | ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr)); |
b8698a0f | 6624 | |
73fbfcad | 6625 | if (dump_enabled_p ()) |
78c60e3d | 6626 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 6627 | "transform store. ncopies = %d\n", ncopies); |
ebfd146a | 6628 | |
2de001ee RS |
6629 | if (memory_access_type == VMAT_ELEMENTWISE |
6630 | || memory_access_type == VMAT_STRIDED_SLP) | |
f2e2a985 MM |
6631 | { |
6632 | gimple_stmt_iterator incr_gsi; | |
6633 | bool insert_after; | |
355fe088 | 6634 | gimple *incr; |
f2e2a985 MM |
6635 | tree offvar; |
6636 | tree ivstep; | |
6637 | tree running_off; | |
f2e2a985 MM |
6638 | tree stride_base, stride_step, alias_off; |
6639 | tree vec_oprnd; | |
f502d50e | 6640 | unsigned int g; |
4d694b27 RS |
6641 | /* Checked by get_load_store_type. */ |
6642 | unsigned int const_nunits = nunits.to_constant (); | |
f2e2a985 | 6643 | |
7cfb4d93 | 6644 | gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); |
86a91c0a | 6645 | gcc_assert (!nested_in_vect_loop_p (loop, stmt_info)); |
f2e2a985 MM |
6646 | |
6647 | stride_base | |
6648 | = fold_build_pointer_plus | |
89fa689a | 6649 | (DR_BASE_ADDRESS (first_dr_info->dr), |
f2e2a985 | 6650 | size_binop (PLUS_EXPR, |
89fa689a RS |
6651 | convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)), |
6652 | convert_to_ptrofftype (DR_INIT (first_dr_info->dr)))); | |
6653 | stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr)); | |
f2e2a985 MM |
6654 | |
6655 | /* For a store with loop-invariant (but other than power-of-2) | |
6656 | stride (i.e. not a grouped access) like so: | |
6657 | ||
6658 | for (i = 0; i < n; i += stride) | |
6659 | array[i] = ...; | |
6660 | ||
6661 | we generate a new induction variable and new stores from | |
6662 | the components of the (vectorized) rhs: | |
6663 | ||
6664 | for (j = 0; ; j += VF*stride) | |
6665 | vectemp = ...; | |
6666 | tmp1 = vectemp[0]; | |
6667 | array[j] = tmp1; | |
6668 | tmp2 = vectemp[1]; | |
6669 | array[j + stride] = tmp2; | |
6670 | ... | |
6671 | */ | |
6672 | ||
4d694b27 | 6673 | unsigned nstores = const_nunits; |
b17dc4d4 | 6674 | unsigned lnel = 1; |
cee62fee | 6675 | tree ltype = elem_type; |
04199738 | 6676 | tree lvectype = vectype; |
cee62fee MM |
6677 | if (slp) |
6678 | { | |
4d694b27 RS |
6679 | if (group_size < const_nunits |
6680 | && const_nunits % group_size == 0) | |
b17dc4d4 | 6681 | { |
4d694b27 | 6682 | nstores = const_nunits / group_size; |
b17dc4d4 RB |
6683 | lnel = group_size; |
6684 | ltype = build_vector_type (elem_type, group_size); | |
04199738 RB |
6685 | lvectype = vectype; |
6686 | ||
6687 | /* First check if vec_extract optab doesn't support extraction | |
6688 | of vector elts directly. */ | |
b397965c | 6689 | scalar_mode elmode = SCALAR_TYPE_MODE (elem_type); |
9da15d40 RS |
6690 | machine_mode vmode; |
6691 | if (!mode_for_vector (elmode, group_size).exists (&vmode) | |
6692 | || !VECTOR_MODE_P (vmode) | |
414fef4e | 6693 | || !targetm.vector_mode_supported_p (vmode) |
04199738 RB |
6694 | || (convert_optab_handler (vec_extract_optab, |
6695 | TYPE_MODE (vectype), vmode) | |
6696 | == CODE_FOR_nothing)) | |
6697 | { | |
6698 | /* Try to avoid emitting an extract of vector elements | |
6699 | by performing the extracts using an integer type of the | |
6700 | same size, extracting from a vector of those and then | |
6701 | re-interpreting it as the original vector type if | |
6702 | supported. */ | |
6703 | unsigned lsize | |
6704 | = group_size * GET_MODE_BITSIZE (elmode); | |
fffbab82 | 6705 | elmode = int_mode_for_size (lsize, 0).require (); |
4d694b27 | 6706 | unsigned int lnunits = const_nunits / group_size; |
04199738 RB |
6707 | /* If we can't construct such a vector fall back to |
6708 | element extracts from the original vector type and | |
6709 | element size stores. */ | |
4d694b27 | 6710 | if (mode_for_vector (elmode, lnunits).exists (&vmode) |
9da15d40 | 6711 | && VECTOR_MODE_P (vmode) |
414fef4e | 6712 | && targetm.vector_mode_supported_p (vmode) |
04199738 RB |
6713 | && (convert_optab_handler (vec_extract_optab, |
6714 | vmode, elmode) | |
6715 | != CODE_FOR_nothing)) | |
6716 | { | |
4d694b27 | 6717 | nstores = lnunits; |
04199738 RB |
6718 | lnel = group_size; |
6719 | ltype = build_nonstandard_integer_type (lsize, 1); | |
6720 | lvectype = build_vector_type (ltype, nstores); | |
6721 | } | |
6722 | /* Else fall back to vector extraction anyway. | |
6723 | Fewer stores are more important than avoiding spilling | |
6724 | of the vector we extract from. Compared to the | |
6725 | construction case in vectorizable_load no store-forwarding | |
6726 | issue exists here for reasonable archs. */ | |
6727 | } | |
b17dc4d4 | 6728 | } |
4d694b27 RS |
6729 | else if (group_size >= const_nunits |
6730 | && group_size % const_nunits == 0) | |
b17dc4d4 RB |
6731 | { |
6732 | nstores = 1; | |
4d694b27 | 6733 | lnel = const_nunits; |
b17dc4d4 | 6734 | ltype = vectype; |
04199738 | 6735 | lvectype = vectype; |
b17dc4d4 | 6736 | } |
cee62fee MM |
6737 | ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type)); |
6738 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
6739 | } | |
6740 | ||
f2e2a985 MM |
6741 | ivstep = stride_step; |
6742 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep, | |
b17dc4d4 | 6743 | build_int_cst (TREE_TYPE (ivstep), vf)); |
f2e2a985 MM |
6744 | |
6745 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
6746 | ||
b210f45f RB |
6747 | stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base); |
6748 | ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep); | |
f2e2a985 MM |
6749 | create_iv (stride_base, ivstep, NULL, |
6750 | loop, &incr_gsi, insert_after, | |
6751 | &offvar, NULL); | |
6752 | incr = gsi_stmt (incr_gsi); | |
4fbeb363 | 6753 | loop_vinfo->add_stmt (incr); |
f2e2a985 | 6754 | |
b210f45f | 6755 | stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step); |
f2e2a985 MM |
6756 | |
6757 | prev_stmt_info = NULL; | |
44fc7854 | 6758 | alias_off = build_int_cst (ref_type, 0); |
bffb8014 | 6759 | stmt_vec_info next_stmt_info = first_stmt_info; |
f502d50e | 6760 | for (g = 0; g < group_size; g++) |
f2e2a985 | 6761 | { |
f502d50e MM |
6762 | running_off = offvar; |
6763 | if (g) | |
f2e2a985 | 6764 | { |
f502d50e MM |
6765 | tree size = TYPE_SIZE_UNIT (ltype); |
6766 | tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g), | |
f2e2a985 | 6767 | size); |
f502d50e | 6768 | tree newoff = copy_ssa_name (running_off, NULL); |
f2e2a985 | 6769 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, |
f502d50e | 6770 | running_off, pos); |
86a91c0a | 6771 | vect_finish_stmt_generation (stmt_info, incr, gsi); |
f2e2a985 | 6772 | running_off = newoff; |
f502d50e | 6773 | } |
b17dc4d4 RB |
6774 | unsigned int group_el = 0; |
6775 | unsigned HOST_WIDE_INT | |
6776 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
f502d50e MM |
6777 | for (j = 0; j < ncopies; j++) |
6778 | { | |
c3a8f964 | 6779 | /* We've set op and dt above, from vect_get_store_rhs, |
bffb8014 | 6780 | and first_stmt_info == stmt_info. */ |
f502d50e MM |
6781 | if (j == 0) |
6782 | { | |
6783 | if (slp) | |
6784 | { | |
86a91c0a RS |
6785 | vect_get_vec_defs (op, NULL_TREE, stmt_info, |
6786 | &vec_oprnds, NULL, slp_node); | |
f502d50e MM |
6787 | vec_oprnd = vec_oprnds[0]; |
6788 | } | |
6789 | else | |
6790 | { | |
bffb8014 RS |
6791 | op = vect_get_store_rhs (next_stmt_info); |
6792 | vec_oprnd = vect_get_vec_def_for_operand | |
6793 | (op, next_stmt_info); | |
f502d50e MM |
6794 | } |
6795 | } | |
f2e2a985 | 6796 | else |
f502d50e MM |
6797 | { |
6798 | if (slp) | |
6799 | vec_oprnd = vec_oprnds[j]; | |
6800 | else | |
e4057a39 RS |
6801 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, |
6802 | vec_oprnd); | |
f502d50e | 6803 | } |
04199738 RB |
6804 | /* Pun the vector to extract from if necessary. */ |
6805 | if (lvectype != vectype) | |
6806 | { | |
6807 | tree tem = make_ssa_name (lvectype); | |
6808 | gimple *pun | |
6809 | = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, | |
6810 | lvectype, vec_oprnd)); | |
86a91c0a | 6811 | vect_finish_stmt_generation (stmt_info, pun, gsi); |
04199738 RB |
6812 | vec_oprnd = tem; |
6813 | } | |
f502d50e MM |
6814 | for (i = 0; i < nstores; i++) |
6815 | { | |
6816 | tree newref, newoff; | |
355fe088 | 6817 | gimple *incr, *assign; |
f502d50e MM |
6818 | tree size = TYPE_SIZE (ltype); |
6819 | /* Extract the i'th component. */ | |
6820 | tree pos = fold_build2 (MULT_EXPR, bitsizetype, | |
6821 | bitsize_int (i), size); | |
6822 | tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd, | |
6823 | size, pos); | |
6824 | ||
6825 | elem = force_gimple_operand_gsi (gsi, elem, true, | |
6826 | NULL_TREE, true, | |
6827 | GSI_SAME_STMT); | |
6828 | ||
b17dc4d4 RB |
6829 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
6830 | group_el * elsz); | |
f502d50e | 6831 | newref = build2 (MEM_REF, ltype, |
b17dc4d4 | 6832 | running_off, this_off); |
89fa689a | 6833 | vect_copy_ref_info (newref, DR_REF (first_dr_info->dr)); |
f502d50e MM |
6834 | |
6835 | /* And store it to *running_off. */ | |
6836 | assign = gimple_build_assign (newref, elem); | |
e1bd7296 | 6837 | stmt_vec_info assign_info |
86a91c0a | 6838 | = vect_finish_stmt_generation (stmt_info, assign, gsi); |
f502d50e | 6839 | |
b17dc4d4 RB |
6840 | group_el += lnel; |
6841 | if (! slp | |
6842 | || group_el == group_size) | |
6843 | { | |
6844 | newoff = copy_ssa_name (running_off, NULL); | |
6845 | incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
6846 | running_off, stride_step); | |
86a91c0a | 6847 | vect_finish_stmt_generation (stmt_info, incr, gsi); |
f502d50e | 6848 | |
b17dc4d4 RB |
6849 | running_off = newoff; |
6850 | group_el = 0; | |
6851 | } | |
225ce44b RB |
6852 | if (g == group_size - 1 |
6853 | && !slp) | |
f502d50e MM |
6854 | { |
6855 | if (j == 0 && i == 0) | |
225ce44b | 6856 | STMT_VINFO_VEC_STMT (stmt_info) |
e1bd7296 | 6857 | = *vec_stmt = assign_info; |
f502d50e | 6858 | else |
e1bd7296 RS |
6859 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign_info; |
6860 | prev_stmt_info = assign_info; | |
f502d50e MM |
6861 | } |
6862 | } | |
f2e2a985 | 6863 | } |
bffb8014 | 6864 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
b17dc4d4 RB |
6865 | if (slp) |
6866 | break; | |
f2e2a985 | 6867 | } |
778dd3b6 RB |
6868 | |
6869 | vec_oprnds.release (); | |
f2e2a985 MM |
6870 | return true; |
6871 | } | |
6872 | ||
8c681247 | 6873 | auto_vec<tree> dr_chain (group_size); |
9771b263 | 6874 | oprnds.create (group_size); |
ebfd146a | 6875 | |
89fa689a RS |
6876 | alignment_support_scheme |
6877 | = vect_supportable_dr_alignment (first_dr_info, false); | |
ebfd146a | 6878 | gcc_assert (alignment_support_scheme); |
70088b95 RS |
6879 | vec_loop_masks *loop_masks |
6880 | = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) | |
6881 | ? &LOOP_VINFO_MASKS (loop_vinfo) | |
6882 | : NULL); | |
272c6793 | 6883 | /* Targets with store-lane instructions must not require explicit |
c3a8f964 RS |
6884 | realignment. vect_supportable_dr_alignment always returns either |
6885 | dr_aligned or dr_unaligned_supported for masked operations. */ | |
7cfb4d93 RS |
6886 | gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES |
6887 | && !mask | |
70088b95 | 6888 | && !loop_masks) |
272c6793 RS |
6889 | || alignment_support_scheme == dr_aligned |
6890 | || alignment_support_scheme == dr_unaligned_supported); | |
6891 | ||
62da9e14 RS |
6892 | if (memory_access_type == VMAT_CONTIGUOUS_DOWN |
6893 | || memory_access_type == VMAT_CONTIGUOUS_REVERSE) | |
09dfa495 BM |
6894 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
6895 | ||
f307441a RS |
6896 | tree bump; |
6897 | tree vec_offset = NULL_TREE; | |
6898 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
6899 | { | |
6900 | aggr_type = NULL_TREE; | |
6901 | bump = NULL_TREE; | |
6902 | } | |
6903 | else if (memory_access_type == VMAT_GATHER_SCATTER) | |
6904 | { | |
6905 | aggr_type = elem_type; | |
86a91c0a | 6906 | vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info, |
f307441a RS |
6907 | &bump, &vec_offset); |
6908 | } | |
272c6793 | 6909 | else |
f307441a RS |
6910 | { |
6911 | if (memory_access_type == VMAT_LOAD_STORE_LANES) | |
6912 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); | |
6913 | else | |
6914 | aggr_type = vectype; | |
89fa689a RS |
6915 | bump = vect_get_data_ptr_increment (dr_info, aggr_type, |
6916 | memory_access_type); | |
f307441a | 6917 | } |
ebfd146a | 6918 | |
c3a8f964 RS |
6919 | if (mask) |
6920 | LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true; | |
6921 | ||
ebfd146a IR |
6922 | /* In case the vectorization factor (VF) is bigger than the number |
6923 | of elements that we can fit in a vectype (nunits), we have to generate | |
6924 | more than one vector stmt - i.e - we need to "unroll" the | |
b8698a0f | 6925 | vector stmt by a factor VF/nunits. For more details see documentation in |
ebfd146a IR |
6926 | vect_get_vec_def_for_copy_stmt. */ |
6927 | ||
0d0293ac | 6928 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
6929 | |
6930 | S1: &base + 2 = x2 | |
6931 | S2: &base = x0 | |
6932 | S3: &base + 1 = x1 | |
6933 | S4: &base + 3 = x3 | |
6934 | ||
6935 | We create vectorized stores starting from base address (the access of the | |
6936 | first stmt in the chain (S2 in the above example), when the last store stmt | |
6937 | of the chain (S4) is reached: | |
6938 | ||
6939 | VS1: &base = vx2 | |
6940 | VS2: &base + vec_size*1 = vx0 | |
6941 | VS3: &base + vec_size*2 = vx1 | |
6942 | VS4: &base + vec_size*3 = vx3 | |
6943 | ||
6944 | Then permutation statements are generated: | |
6945 | ||
3fcc1b55 JJ |
6946 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} > |
6947 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} > | |
ebfd146a | 6948 | ... |
b8698a0f | 6949 | |
ebfd146a IR |
6950 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts |
6951 | (the order of the data-refs in the output of vect_permute_store_chain | |
6952 | corresponds to the order of scalar stmts in the interleaving chain - see | |
6953 | the documentation of vect_permute_store_chain()). | |
6954 | ||
6955 | In case of both multiple types and interleaving, above vector stores and | |
ff802fa1 | 6956 | permutation stmts are created for every copy. The result vector stmts are |
ebfd146a | 6957 | put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding |
b8698a0f | 6958 | STMT_VINFO_RELATED_STMT for the next copies. |
ebfd146a IR |
6959 | */ |
6960 | ||
6961 | prev_stmt_info = NULL; | |
c3a8f964 | 6962 | tree vec_mask = NULL_TREE; |
ebfd146a IR |
6963 | for (j = 0; j < ncopies; j++) |
6964 | { | |
e1bd7296 | 6965 | stmt_vec_info new_stmt_info; |
ebfd146a IR |
6966 | if (j == 0) |
6967 | { | |
6968 | if (slp) | |
6969 | { | |
6970 | /* Get vectorized arguments for SLP_NODE. */ | |
86a91c0a RS |
6971 | vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds, |
6972 | NULL, slp_node); | |
ebfd146a | 6973 | |
9771b263 | 6974 | vec_oprnd = vec_oprnds[0]; |
ebfd146a IR |
6975 | } |
6976 | else | |
6977 | { | |
b8698a0f L |
6978 | /* For interleaved stores we collect vectorized defs for all the |
6979 | stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then | |
6980 | used as an input to vect_permute_store_chain(), and OPRNDS as | |
ebfd146a IR |
6981 | an input to vect_get_vec_def_for_stmt_copy() for the next copy. |
6982 | ||
2c53b149 | 6983 | If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a | 6984 | OPRNDS are of size 1. */ |
bffb8014 | 6985 | stmt_vec_info next_stmt_info = first_stmt_info; |
ebfd146a IR |
6986 | for (i = 0; i < group_size; i++) |
6987 | { | |
b8698a0f | 6988 | /* Since gaps are not supported for interleaved stores, |
2c53b149 | 6989 | DR_GROUP_SIZE is the exact number of stmts in the chain. |
bffb8014 RS |
6990 | Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case |
6991 | that there is no interleaving, DR_GROUP_SIZE is 1, | |
6992 | and only one iteration of the loop will be executed. */ | |
6993 | op = vect_get_store_rhs (next_stmt_info); | |
6994 | vec_oprnd = vect_get_vec_def_for_operand | |
6995 | (op, next_stmt_info); | |
9771b263 DN |
6996 | dr_chain.quick_push (vec_oprnd); |
6997 | oprnds.quick_push (vec_oprnd); | |
bffb8014 | 6998 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
ebfd146a | 6999 | } |
c3a8f964 | 7000 | if (mask) |
86a91c0a | 7001 | vec_mask = vect_get_vec_def_for_operand (mask, stmt_info, |
c3a8f964 | 7002 | mask_vectype); |
ebfd146a IR |
7003 | } |
7004 | ||
7005 | /* We should have catched mismatched types earlier. */ | |
7006 | gcc_assert (useless_type_conversion_p (vectype, | |
7007 | TREE_TYPE (vec_oprnd))); | |
74bf76ed JJ |
7008 | bool simd_lane_access_p |
7009 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
7010 | if (simd_lane_access_p | |
89fa689a RS |
7011 | && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR |
7012 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) | |
7013 | && integer_zerop (DR_OFFSET (first_dr_info->dr)) | |
7014 | && integer_zerop (DR_INIT (first_dr_info->dr)) | |
74bf76ed | 7015 | && alias_sets_conflict_p (get_alias_set (aggr_type), |
44fc7854 | 7016 | get_alias_set (TREE_TYPE (ref_type)))) |
74bf76ed | 7017 | { |
89fa689a | 7018 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); |
44fc7854 | 7019 | dataref_offset = build_int_cst (ref_type, 0); |
74bf76ed | 7020 | } |
f307441a | 7021 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
2d4bca81 RS |
7022 | vect_get_gather_scatter_ops (loop, stmt_info, &gs_info, |
7023 | &dataref_ptr, &vec_offset); | |
74bf76ed JJ |
7024 | else |
7025 | dataref_ptr | |
bffb8014 | 7026 | = vect_create_data_ref_ptr (first_stmt_info, aggr_type, |
74bf76ed | 7027 | simd_lane_access_p ? loop : NULL, |
09dfa495 | 7028 | offset, &dummy, gsi, &ptr_incr, |
2d4bca81 | 7029 | simd_lane_access_p, NULL_TREE, bump); |
ebfd146a | 7030 | } |
b8698a0f | 7031 | else |
ebfd146a | 7032 | { |
b8698a0f L |
7033 | /* For interleaved stores we created vectorized defs for all the |
7034 | defs stored in OPRNDS in the previous iteration (previous copy). | |
7035 | DR_CHAIN is then used as an input to vect_permute_store_chain(), | |
ebfd146a IR |
7036 | and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the |
7037 | next copy. | |
2c53b149 | 7038 | If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and |
ebfd146a IR |
7039 | OPRNDS are of size 1. */ |
7040 | for (i = 0; i < group_size; i++) | |
7041 | { | |
9771b263 | 7042 | op = oprnds[i]; |
e4057a39 | 7043 | vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, op); |
9771b263 DN |
7044 | dr_chain[i] = vec_oprnd; |
7045 | oprnds[i] = vec_oprnd; | |
ebfd146a | 7046 | } |
c3a8f964 | 7047 | if (mask) |
e4057a39 | 7048 | vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask); |
74bf76ed JJ |
7049 | if (dataref_offset) |
7050 | dataref_offset | |
f307441a RS |
7051 | = int_const_binop (PLUS_EXPR, dataref_offset, bump); |
7052 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
e4057a39 | 7053 | vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset); |
74bf76ed | 7054 | else |
86a91c0a RS |
7055 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
7056 | stmt_info, bump); | |
ebfd146a IR |
7057 | } |
7058 | ||
2de001ee | 7059 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 7060 | { |
272c6793 | 7061 | tree vec_array; |
267d3070 | 7062 | |
3ba4ff41 | 7063 | /* Get an array into which we can store the individual vectors. */ |
272c6793 | 7064 | vec_array = create_vector_array (vectype, vec_num); |
3ba4ff41 RS |
7065 | |
7066 | /* Invalidate the current contents of VEC_ARRAY. This should | |
7067 | become an RTL clobber too, which prevents the vector registers | |
7068 | from being upward-exposed. */ | |
86a91c0a | 7069 | vect_clobber_variable (stmt_info, gsi, vec_array); |
3ba4ff41 RS |
7070 | |
7071 | /* Store the individual vectors into the array. */ | |
272c6793 | 7072 | for (i = 0; i < vec_num; i++) |
c2d7ab2a | 7073 | { |
9771b263 | 7074 | vec_oprnd = dr_chain[i]; |
86a91c0a | 7075 | write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i); |
267d3070 | 7076 | } |
b8698a0f | 7077 | |
7cfb4d93 | 7078 | tree final_mask = NULL; |
70088b95 RS |
7079 | if (loop_masks) |
7080 | final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies, | |
7081 | vectype, j); | |
7cfb4d93 RS |
7082 | if (vec_mask) |
7083 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
7084 | vec_mask, gsi); | |
7085 | ||
7e11fc7f | 7086 | gcall *call; |
7cfb4d93 | 7087 | if (final_mask) |
7e11fc7f RS |
7088 | { |
7089 | /* Emit: | |
7090 | MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK, | |
7091 | VEC_ARRAY). */ | |
7092 | unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype)); | |
7093 | tree alias_ptr = build_int_cst (ref_type, align); | |
7094 | call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4, | |
7095 | dataref_ptr, alias_ptr, | |
7cfb4d93 | 7096 | final_mask, vec_array); |
7e11fc7f RS |
7097 | } |
7098 | else | |
7099 | { | |
7100 | /* Emit: | |
7101 | MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */ | |
7102 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); | |
7103 | call = gimple_build_call_internal (IFN_STORE_LANES, 1, | |
7104 | vec_array); | |
7105 | gimple_call_set_lhs (call, data_ref); | |
7106 | } | |
a844293d | 7107 | gimple_call_set_nothrow (call, true); |
86a91c0a | 7108 | new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi); |
3ba4ff41 RS |
7109 | |
7110 | /* Record that VEC_ARRAY is now dead. */ | |
86a91c0a | 7111 | vect_clobber_variable (stmt_info, gsi, vec_array); |
272c6793 RS |
7112 | } |
7113 | else | |
7114 | { | |
e1bd7296 | 7115 | new_stmt_info = NULL; |
0d0293ac | 7116 | if (grouped_store) |
272c6793 | 7117 | { |
b6b9227d JJ |
7118 | if (j == 0) |
7119 | result_chain.create (group_size); | |
272c6793 | 7120 | /* Permute. */ |
86a91c0a | 7121 | vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi, |
272c6793 RS |
7122 | &result_chain); |
7123 | } | |
c2d7ab2a | 7124 | |
bffb8014 | 7125 | stmt_vec_info next_stmt_info = first_stmt_info; |
272c6793 RS |
7126 | for (i = 0; i < vec_num; i++) |
7127 | { | |
644ffefd | 7128 | unsigned align, misalign; |
272c6793 | 7129 | |
7cfb4d93 | 7130 | tree final_mask = NULL_TREE; |
70088b95 RS |
7131 | if (loop_masks) |
7132 | final_mask = vect_get_loop_mask (gsi, loop_masks, | |
7133 | vec_num * ncopies, | |
7cfb4d93 RS |
7134 | vectype, vec_num * j + i); |
7135 | if (vec_mask) | |
7136 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
7137 | vec_mask, gsi); | |
7138 | ||
f307441a RS |
7139 | if (memory_access_type == VMAT_GATHER_SCATTER) |
7140 | { | |
7141 | tree scale = size_int (gs_info.scale); | |
7142 | gcall *call; | |
70088b95 | 7143 | if (loop_masks) |
f307441a RS |
7144 | call = gimple_build_call_internal |
7145 | (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset, | |
7146 | scale, vec_oprnd, final_mask); | |
7147 | else | |
7148 | call = gimple_build_call_internal | |
7149 | (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset, | |
7150 | scale, vec_oprnd); | |
7151 | gimple_call_set_nothrow (call, true); | |
e1bd7296 | 7152 | new_stmt_info |
86a91c0a | 7153 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
f307441a RS |
7154 | break; |
7155 | } | |
7156 | ||
272c6793 RS |
7157 | if (i > 0) |
7158 | /* Bump the vector pointer. */ | |
7159 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
86a91c0a | 7160 | stmt_info, bump); |
272c6793 RS |
7161 | |
7162 | if (slp) | |
9771b263 | 7163 | vec_oprnd = vec_oprnds[i]; |
0d0293ac MM |
7164 | else if (grouped_store) |
7165 | /* For grouped stores vectorized defs are interleaved in | |
272c6793 | 7166 | vect_permute_store_chain(). */ |
9771b263 | 7167 | vec_oprnd = result_chain[i]; |
272c6793 | 7168 | |
89fa689a RS |
7169 | align = DR_TARGET_ALIGNMENT (first_dr_info); |
7170 | if (aligned_access_p (first_dr_info)) | |
644ffefd | 7171 | misalign = 0; |
89fa689a | 7172 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
272c6793 | 7173 | { |
89fa689a | 7174 | align = dr_alignment (vect_dr_behavior (first_dr_info)); |
52639a61 | 7175 | misalign = 0; |
272c6793 RS |
7176 | } |
7177 | else | |
89fa689a | 7178 | misalign = DR_MISALIGNMENT (first_dr_info); |
aed93b23 RB |
7179 | if (dataref_offset == NULL_TREE |
7180 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
7181 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, |
7182 | misalign); | |
c2d7ab2a | 7183 | |
62da9e14 | 7184 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
09dfa495 BM |
7185 | { |
7186 | tree perm_mask = perm_mask_for_reverse (vectype); | |
86a91c0a RS |
7187 | tree perm_dest = vect_create_destination_var |
7188 | (vect_get_store_rhs (stmt_info), vectype); | |
b731b390 | 7189 | tree new_temp = make_ssa_name (perm_dest); |
09dfa495 BM |
7190 | |
7191 | /* Generate the permute statement. */ | |
355fe088 | 7192 | gimple *perm_stmt |
0d0e4a03 JJ |
7193 | = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, |
7194 | vec_oprnd, perm_mask); | |
86a91c0a | 7195 | vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); |
09dfa495 BM |
7196 | |
7197 | perm_stmt = SSA_NAME_DEF_STMT (new_temp); | |
7198 | vec_oprnd = new_temp; | |
7199 | } | |
7200 | ||
272c6793 | 7201 | /* Arguments are ready. Create the new vector stmt. */ |
7cfb4d93 | 7202 | if (final_mask) |
c3a8f964 RS |
7203 | { |
7204 | align = least_bit_hwi (misalign | align); | |
7205 | tree ptr = build_int_cst (ref_type, align); | |
7206 | gcall *call | |
7207 | = gimple_build_call_internal (IFN_MASK_STORE, 4, | |
7208 | dataref_ptr, ptr, | |
7cfb4d93 | 7209 | final_mask, vec_oprnd); |
c3a8f964 | 7210 | gimple_call_set_nothrow (call, true); |
e1bd7296 | 7211 | new_stmt_info |
86a91c0a | 7212 | = vect_finish_stmt_generation (stmt_info, call, gsi); |
c3a8f964 RS |
7213 | } |
7214 | else | |
7215 | { | |
7216 | data_ref = fold_build2 (MEM_REF, vectype, | |
7217 | dataref_ptr, | |
7218 | dataref_offset | |
7219 | ? dataref_offset | |
7220 | : build_int_cst (ref_type, 0)); | |
89fa689a | 7221 | if (aligned_access_p (first_dr_info)) |
c3a8f964 | 7222 | ; |
89fa689a | 7223 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
c3a8f964 RS |
7224 | TREE_TYPE (data_ref) |
7225 | = build_aligned_type (TREE_TYPE (data_ref), | |
7226 | align * BITS_PER_UNIT); | |
7227 | else | |
7228 | TREE_TYPE (data_ref) | |
7229 | = build_aligned_type (TREE_TYPE (data_ref), | |
7230 | TYPE_ALIGN (elem_type)); | |
89fa689a | 7231 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
e1bd7296 RS |
7232 | gassign *new_stmt |
7233 | = gimple_build_assign (data_ref, vec_oprnd); | |
7234 | new_stmt_info | |
86a91c0a | 7235 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
c3a8f964 | 7236 | } |
272c6793 RS |
7237 | |
7238 | if (slp) | |
7239 | continue; | |
7240 | ||
bffb8014 RS |
7241 | next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
7242 | if (!next_stmt_info) | |
272c6793 RS |
7243 | break; |
7244 | } | |
ebfd146a | 7245 | } |
1da0876c RS |
7246 | if (!slp) |
7247 | { | |
7248 | if (j == 0) | |
e1bd7296 | 7249 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
1da0876c | 7250 | else |
e1bd7296 RS |
7251 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
7252 | prev_stmt_info = new_stmt_info; | |
1da0876c | 7253 | } |
ebfd146a IR |
7254 | } |
7255 | ||
9771b263 DN |
7256 | oprnds.release (); |
7257 | result_chain.release (); | |
7258 | vec_oprnds.release (); | |
ebfd146a IR |
7259 | |
7260 | return true; | |
7261 | } | |
7262 | ||
557be5a8 AL |
7263 | /* Given a vector type VECTYPE, turns permutation SEL into the equivalent |
7264 | VECTOR_CST mask. No checks are made that the target platform supports the | |
7ac7e286 | 7265 | mask, so callers may wish to test can_vec_perm_const_p separately, or use |
557be5a8 | 7266 | vect_gen_perm_mask_checked. */ |
a1e53f3f | 7267 | |
3fcc1b55 | 7268 | tree |
4aae3cb3 | 7269 | vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel) |
a1e53f3f | 7270 | { |
b00cb3bf | 7271 | tree mask_type; |
a1e53f3f | 7272 | |
0ecc2b7d RS |
7273 | poly_uint64 nunits = sel.length (); |
7274 | gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype))); | |
b00cb3bf RS |
7275 | |
7276 | mask_type = build_vector_type (ssizetype, nunits); | |
736d0f28 | 7277 | return vec_perm_indices_to_tree (mask_type, sel); |
a1e53f3f L |
7278 | } |
7279 | ||
7ac7e286 | 7280 | /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p, |
cf7aa6a3 | 7281 | i.e. that the target supports the pattern _for arbitrary input vectors_. */ |
557be5a8 AL |
7282 | |
7283 | tree | |
4aae3cb3 | 7284 | vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel) |
557be5a8 | 7285 | { |
7ac7e286 | 7286 | gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel)); |
557be5a8 AL |
7287 | return vect_gen_perm_mask_any (vectype, sel); |
7288 | } | |
7289 | ||
aec7ae7d | 7290 | /* Given a vector variable X and Y, that was generated for the scalar |
82570274 | 7291 | STMT_INFO, generate instructions to permute the vector elements of X and Y |
aec7ae7d JJ |
7292 | using permutation mask MASK_VEC, insert them at *GSI and return the |
7293 | permuted vector variable. */ | |
a1e53f3f L |
7294 | |
7295 | static tree | |
82570274 | 7296 | permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info, |
aec7ae7d | 7297 | gimple_stmt_iterator *gsi) |
a1e53f3f L |
7298 | { |
7299 | tree vectype = TREE_TYPE (x); | |
aec7ae7d | 7300 | tree perm_dest, data_ref; |
355fe088 | 7301 | gimple *perm_stmt; |
a1e53f3f | 7302 | |
82570274 | 7303 | tree scalar_dest = gimple_get_lhs (stmt_info->stmt); |
7ad429a4 RS |
7304 | if (TREE_CODE (scalar_dest) == SSA_NAME) |
7305 | perm_dest = vect_create_destination_var (scalar_dest, vectype); | |
7306 | else | |
7307 | perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL); | |
b731b390 | 7308 | data_ref = make_ssa_name (perm_dest); |
a1e53f3f L |
7309 | |
7310 | /* Generate the permute statement. */ | |
0d0e4a03 | 7311 | perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec); |
82570274 | 7312 | vect_finish_stmt_generation (stmt_info, perm_stmt, gsi); |
a1e53f3f L |
7313 | |
7314 | return data_ref; | |
7315 | } | |
7316 | ||
32e8e429 | 7317 | /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP, |
6b916b36 | 7318 | inserting them on the loops preheader edge. Returns true if we |
32e8e429 | 7319 | were successful in doing so (and thus STMT_INFO can be moved then), |
6b916b36 RB |
7320 | otherwise returns false. */ |
7321 | ||
7322 | static bool | |
32e8e429 | 7323 | hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop) |
6b916b36 RB |
7324 | { |
7325 | ssa_op_iter i; | |
7326 | tree op; | |
7327 | bool any = false; | |
7328 | ||
32e8e429 | 7329 | FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE) |
6b916b36 | 7330 | { |
355fe088 | 7331 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
7332 | if (!gimple_nop_p (def_stmt) |
7333 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
7334 | { | |
7335 | /* Make sure we don't need to recurse. While we could do | |
7336 | so in simple cases when there are more complex use webs | |
7337 | we don't have an easy way to preserve stmt order to fulfil | |
7338 | dependencies within them. */ | |
7339 | tree op2; | |
7340 | ssa_op_iter i2; | |
d1417442 JJ |
7341 | if (gimple_code (def_stmt) == GIMPLE_PHI) |
7342 | return false; | |
6b916b36 RB |
7343 | FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE) |
7344 | { | |
355fe088 | 7345 | gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2); |
6b916b36 RB |
7346 | if (!gimple_nop_p (def_stmt2) |
7347 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2))) | |
7348 | return false; | |
7349 | } | |
7350 | any = true; | |
7351 | } | |
7352 | } | |
7353 | ||
7354 | if (!any) | |
7355 | return true; | |
7356 | ||
32e8e429 | 7357 | FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE) |
6b916b36 | 7358 | { |
355fe088 | 7359 | gimple *def_stmt = SSA_NAME_DEF_STMT (op); |
6b916b36 RB |
7360 | if (!gimple_nop_p (def_stmt) |
7361 | && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) | |
7362 | { | |
7363 | gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); | |
7364 | gsi_remove (&gsi, false); | |
7365 | gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt); | |
7366 | } | |
7367 | } | |
7368 | ||
7369 | return true; | |
7370 | } | |
7371 | ||
ebfd146a IR |
7372 | /* vectorizable_load. |
7373 | ||
32e8e429 RS |
7374 | Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure) |
7375 | that can be vectorized. | |
7376 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
7377 | stmt to replace it, put it in VEC_STMT, and insert it at GSI. | |
7378 | Return true if STMT_INFO is vectorizable in this way. */ | |
ebfd146a IR |
7379 | |
7380 | static bool | |
32e8e429 | 7381 | vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
7382 | stmt_vec_info *vec_stmt, slp_tree slp_node, |
7383 | slp_instance slp_node_instance, | |
68435eb2 | 7384 | stmt_vector_for_cost *cost_vec) |
ebfd146a IR |
7385 | { |
7386 | tree scalar_dest; | |
7387 | tree vec_dest = NULL; | |
7388 | tree data_ref = NULL; | |
b8698a0f | 7389 | stmt_vec_info prev_stmt_info; |
ebfd146a | 7390 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 7391 | struct loop *loop = NULL; |
32e8e429 | 7392 | struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father; |
a70d6342 | 7393 | bool nested_in_vect_loop = false; |
272c6793 | 7394 | tree elem_type; |
ebfd146a | 7395 | tree new_temp; |
ef4bddc2 | 7396 | machine_mode mode; |
ebfd146a IR |
7397 | tree dummy; |
7398 | enum dr_alignment_support alignment_support_scheme; | |
7399 | tree dataref_ptr = NULL_TREE; | |
74bf76ed | 7400 | tree dataref_offset = NULL_TREE; |
355fe088 | 7401 | gimple *ptr_incr = NULL; |
ebfd146a | 7402 | int ncopies; |
4d694b27 RS |
7403 | int i, j; |
7404 | unsigned int group_size; | |
7405 | poly_uint64 group_gap_adj; | |
ebfd146a IR |
7406 | tree msq = NULL_TREE, lsq; |
7407 | tree offset = NULL_TREE; | |
356bbc4c | 7408 | tree byte_offset = NULL_TREE; |
ebfd146a | 7409 | tree realignment_token = NULL_TREE; |
538dd0b7 | 7410 | gphi *phi = NULL; |
6e1aa848 | 7411 | vec<tree> dr_chain = vNULL; |
0d0293ac | 7412 | bool grouped_load = false; |
bffb8014 | 7413 | stmt_vec_info first_stmt_info; |
b9787581 | 7414 | stmt_vec_info first_stmt_info_for_drptr = NULL; |
ebfd146a IR |
7415 | bool compute_in_loop = false; |
7416 | struct loop *at_loop; | |
7417 | int vec_num; | |
7418 | bool slp = (slp_node != NULL); | |
7419 | bool slp_perm = false; | |
a70d6342 | 7420 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
d9f21f6a | 7421 | poly_uint64 vf; |
272c6793 | 7422 | tree aggr_type; |
134c85ca | 7423 | gather_scatter_info gs_info; |
310213d4 | 7424 | vec_info *vinfo = stmt_info->vinfo; |
44fc7854 | 7425 | tree ref_type; |
929b4411 | 7426 | enum vect_def_type mask_dt = vect_unknown_def_type; |
a70d6342 | 7427 | |
465c8c19 JJ |
7428 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
7429 | return false; | |
7430 | ||
66c16fd9 RB |
7431 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
7432 | && ! vec_stmt) | |
465c8c19 JJ |
7433 | return false; |
7434 | ||
c3a8f964 | 7435 | tree mask = NULL_TREE, mask_vectype = NULL_TREE; |
86a91c0a | 7436 | if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt)) |
c3a8f964 | 7437 | { |
beb456c3 | 7438 | scalar_dest = gimple_assign_lhs (assign); |
c3a8f964 RS |
7439 | if (TREE_CODE (scalar_dest) != SSA_NAME) |
7440 | return false; | |
465c8c19 | 7441 | |
beb456c3 | 7442 | tree_code code = gimple_assign_rhs_code (assign); |
c3a8f964 RS |
7443 | if (code != ARRAY_REF |
7444 | && code != BIT_FIELD_REF | |
7445 | && code != INDIRECT_REF | |
7446 | && code != COMPONENT_REF | |
7447 | && code != IMAGPART_EXPR | |
7448 | && code != REALPART_EXPR | |
7449 | && code != MEM_REF | |
7450 | && TREE_CODE_CLASS (code) != tcc_declaration) | |
7451 | return false; | |
7452 | } | |
7453 | else | |
7454 | { | |
86a91c0a | 7455 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); |
bfaa08b7 RS |
7456 | if (!call || !gimple_call_internal_p (call)) |
7457 | return false; | |
7458 | ||
7459 | internal_fn ifn = gimple_call_internal_fn (call); | |
7460 | if (!internal_load_fn_p (ifn)) | |
c3a8f964 | 7461 | return false; |
465c8c19 | 7462 | |
c3a8f964 RS |
7463 | scalar_dest = gimple_call_lhs (call); |
7464 | if (!scalar_dest) | |
7465 | return false; | |
7466 | ||
7467 | if (slp_node != NULL) | |
7468 | { | |
7469 | if (dump_enabled_p ()) | |
7470 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7471 | "SLP of masked loads not supported.\n"); | |
7472 | return false; | |
7473 | } | |
7474 | ||
bfaa08b7 RS |
7475 | int mask_index = internal_fn_mask_index (ifn); |
7476 | if (mask_index >= 0) | |
7477 | { | |
7478 | mask = gimple_call_arg (call, mask_index); | |
86a91c0a | 7479 | if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt, |
929b4411 | 7480 | &mask_vectype)) |
bfaa08b7 RS |
7481 | return false; |
7482 | } | |
c3a8f964 | 7483 | } |
465c8c19 JJ |
7484 | |
7485 | if (!STMT_VINFO_DATA_REF (stmt_info)) | |
7486 | return false; | |
7487 | ||
7488 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
4d694b27 | 7489 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); |
465c8c19 | 7490 | |
a70d6342 IR |
7491 | if (loop_vinfo) |
7492 | { | |
7493 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
86a91c0a | 7494 | nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info); |
a70d6342 IR |
7495 | vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); |
7496 | } | |
7497 | else | |
3533e503 | 7498 | vf = 1; |
ebfd146a IR |
7499 | |
7500 | /* Multiple types in SLP are handled by creating the appropriate number of | |
ff802fa1 | 7501 | vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in |
ebfd146a | 7502 | case of SLP. */ |
fce57248 | 7503 | if (slp) |
ebfd146a IR |
7504 | ncopies = 1; |
7505 | else | |
e8f142e2 | 7506 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
ebfd146a IR |
7507 | |
7508 | gcc_assert (ncopies >= 1); | |
7509 | ||
7510 | /* FORNOW. This restriction should be relaxed. */ | |
7511 | if (nested_in_vect_loop && ncopies > 1) | |
7512 | { | |
73fbfcad | 7513 | if (dump_enabled_p ()) |
78c60e3d | 7514 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 7515 | "multiple types in nested loop.\n"); |
ebfd146a IR |
7516 | return false; |
7517 | } | |
7518 | ||
f2556b68 RB |
7519 | /* Invalidate assumptions made by dependence analysis when vectorization |
7520 | on the unrolled body effectively re-orders stmts. */ | |
7521 | if (ncopies > 1 | |
7522 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
d9f21f6a RS |
7523 | && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo), |
7524 | STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
f2556b68 RB |
7525 | { |
7526 | if (dump_enabled_p ()) | |
7527 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7528 | "cannot perform implicit CSE when unrolling " | |
7529 | "with negative dependence distance\n"); | |
7530 | return false; | |
7531 | } | |
7532 | ||
7b7b1813 | 7533 | elem_type = TREE_TYPE (vectype); |
947131ba | 7534 | mode = TYPE_MODE (vectype); |
ebfd146a IR |
7535 | |
7536 | /* FORNOW. In some cases can vectorize even if data-type not supported | |
7537 | (e.g. - data copies). */ | |
947131ba | 7538 | if (optab_handler (mov_optab, mode) == CODE_FOR_nothing) |
ebfd146a | 7539 | { |
73fbfcad | 7540 | if (dump_enabled_p ()) |
78c60e3d | 7541 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 7542 | "Aligned load, but unsupported type.\n"); |
ebfd146a IR |
7543 | return false; |
7544 | } | |
7545 | ||
ebfd146a | 7546 | /* Check if the load is a part of an interleaving chain. */ |
0d0293ac | 7547 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
ebfd146a | 7548 | { |
0d0293ac | 7549 | grouped_load = true; |
ebfd146a | 7550 | /* FORNOW */ |
2de001ee RS |
7551 | gcc_assert (!nested_in_vect_loop); |
7552 | gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info)); | |
ebfd146a | 7553 | |
bffb8014 RS |
7554 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
7555 | group_size = DR_GROUP_SIZE (first_stmt_info); | |
d5f035ea | 7556 | |
b1af7da6 RB |
7557 | if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) |
7558 | slp_perm = true; | |
7559 | ||
f2556b68 RB |
7560 | /* Invalidate assumptions made by dependence analysis when vectorization |
7561 | on the unrolled body effectively re-orders stmts. */ | |
7562 | if (!PURE_SLP_STMT (stmt_info) | |
7563 | && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0 | |
d9f21f6a RS |
7564 | && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo), |
7565 | STMT_VINFO_MIN_NEG_DIST (stmt_info))) | |
f2556b68 RB |
7566 | { |
7567 | if (dump_enabled_p ()) | |
7568 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7569 | "cannot perform implicit CSE when performing " | |
7570 | "group loads with negative dependence distance\n"); | |
7571 | return false; | |
7572 | } | |
96bb56b2 RB |
7573 | |
7574 | /* Similarly when the stmt is a load that is both part of a SLP | |
7575 | instance and a loop vectorized stmt via the same-dr mechanism | |
7576 | we have to give up. */ | |
2c53b149 | 7577 | if (DR_GROUP_SAME_DR_STMT (stmt_info) |
96bb56b2 | 7578 | && (STMT_SLP_TYPE (stmt_info) |
c26228d4 | 7579 | != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info)))) |
96bb56b2 RB |
7580 | { |
7581 | if (dump_enabled_p ()) | |
7582 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7583 | "conflicting SLP types for CSEd load\n"); | |
7584 | return false; | |
7585 | } | |
ebfd146a | 7586 | } |
7cfb4d93 RS |
7587 | else |
7588 | group_size = 1; | |
ebfd146a | 7589 | |
2de001ee | 7590 | vect_memory_access_type memory_access_type; |
86a91c0a | 7591 | if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies, |
2de001ee RS |
7592 | &memory_access_type, &gs_info)) |
7593 | return false; | |
a1e53f3f | 7594 | |
c3a8f964 RS |
7595 | if (mask) |
7596 | { | |
7597 | if (memory_access_type == VMAT_CONTIGUOUS) | |
7598 | { | |
7e11fc7f RS |
7599 | machine_mode vec_mode = TYPE_MODE (vectype); |
7600 | if (!VECTOR_MODE_P (vec_mode) | |
7601 | || !can_vec_mask_load_store_p (vec_mode, | |
c3a8f964 RS |
7602 | TYPE_MODE (mask_vectype), true)) |
7603 | return false; | |
7604 | } | |
bfaa08b7 | 7605 | else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) |
c3a8f964 RS |
7606 | { |
7607 | tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl)); | |
7608 | tree masktype | |
7609 | = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))); | |
7610 | if (TREE_CODE (masktype) == INTEGER_TYPE) | |
7611 | { | |
7612 | if (dump_enabled_p ()) | |
7613 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7614 | "masked gather with integer mask not" | |
7615 | " supported."); | |
7616 | return false; | |
7617 | } | |
7618 | } | |
bfaa08b7 RS |
7619 | else if (memory_access_type != VMAT_LOAD_STORE_LANES |
7620 | && memory_access_type != VMAT_GATHER_SCATTER) | |
c3a8f964 RS |
7621 | { |
7622 | if (dump_enabled_p ()) | |
7623 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
7624 | "unsupported access type for masked load.\n"); | |
7625 | return false; | |
7626 | } | |
7627 | } | |
7628 | ||
ebfd146a IR |
7629 | if (!vec_stmt) /* transformation not required. */ |
7630 | { | |
2de001ee RS |
7631 | if (!slp) |
7632 | STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type; | |
7cfb4d93 RS |
7633 | |
7634 | if (loop_vinfo | |
7635 | && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) | |
7636 | check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size, | |
bfaa08b7 | 7637 | memory_access_type, &gs_info); |
7cfb4d93 | 7638 | |
ebfd146a | 7639 | STMT_VINFO_TYPE (stmt_info) = load_vec_info_type; |
68435eb2 RB |
7640 | vect_model_load_cost (stmt_info, ncopies, memory_access_type, |
7641 | slp_node_instance, slp_node, cost_vec); | |
ebfd146a IR |
7642 | return true; |
7643 | } | |
7644 | ||
2de001ee RS |
7645 | if (!slp) |
7646 | gcc_assert (memory_access_type | |
7647 | == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info)); | |
7648 | ||
73fbfcad | 7649 | if (dump_enabled_p ()) |
78c60e3d | 7650 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 7651 | "transform load. ncopies = %d\n", ncopies); |
ebfd146a | 7652 | |
67b8dbac | 7653 | /* Transform. */ |
ebfd146a | 7654 | |
89fa689a RS |
7655 | dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL; |
7656 | ensure_base_align (dr_info); | |
c716e67f | 7657 | |
bfaa08b7 | 7658 | if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) |
aec7ae7d | 7659 | { |
e4057a39 | 7660 | vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask); |
aec7ae7d JJ |
7661 | return true; |
7662 | } | |
2de001ee | 7663 | |
2d4bca81 RS |
7664 | if (memory_access_type == VMAT_INVARIANT) |
7665 | { | |
7666 | gcc_assert (!grouped_load && !mask && !bb_vinfo); | |
7667 | /* If we have versioned for aliasing or the loop doesn't | |
7668 | have any data dependencies that would preclude this, | |
7669 | then we are sure this is a loop invariant load and | |
7670 | thus we can insert it on the preheader edge. */ | |
7671 | bool hoist_p = (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) | |
7672 | && !nested_in_vect_loop | |
7673 | && hoist_defs_of_uses (stmt_info, loop)); | |
7674 | if (hoist_p) | |
7675 | { | |
7676 | gassign *stmt = as_a <gassign *> (stmt_info->stmt); | |
7677 | if (dump_enabled_p ()) | |
7678 | { | |
7679 | dump_printf_loc (MSG_NOTE, vect_location, | |
7680 | "hoisting out of the vectorized loop: "); | |
7681 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
7682 | } | |
7683 | scalar_dest = copy_ssa_name (scalar_dest); | |
7684 | tree rhs = unshare_expr (gimple_assign_rhs1 (stmt)); | |
7685 | gsi_insert_on_edge_immediate | |
7686 | (loop_preheader_edge (loop), | |
7687 | gimple_build_assign (scalar_dest, rhs)); | |
7688 | } | |
7689 | /* These copies are all equivalent, but currently the representation | |
7690 | requires a separate STMT_VINFO_VEC_STMT for each one. */ | |
7691 | prev_stmt_info = NULL; | |
7692 | gimple_stmt_iterator gsi2 = *gsi; | |
7693 | gsi_next (&gsi2); | |
7694 | for (j = 0; j < ncopies; j++) | |
7695 | { | |
7696 | stmt_vec_info new_stmt_info; | |
7697 | if (hoist_p) | |
7698 | { | |
7699 | new_temp = vect_init_vector (stmt_info, scalar_dest, | |
7700 | vectype, NULL); | |
7701 | gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp); | |
7702 | new_stmt_info = vinfo->add_stmt (new_stmt); | |
7703 | } | |
7704 | else | |
7705 | { | |
7706 | new_temp = vect_init_vector (stmt_info, scalar_dest, | |
7707 | vectype, &gsi2); | |
7708 | new_stmt_info = vinfo->lookup_def (new_temp); | |
7709 | } | |
7710 | if (slp) | |
7711 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); | |
7712 | else if (j == 0) | |
7713 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; | |
7714 | else | |
7715 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; | |
7716 | prev_stmt_info = new_stmt_info; | |
7717 | } | |
7718 | return true; | |
7719 | } | |
7720 | ||
2de001ee RS |
7721 | if (memory_access_type == VMAT_ELEMENTWISE |
7722 | || memory_access_type == VMAT_STRIDED_SLP) | |
7d75abc8 MM |
7723 | { |
7724 | gimple_stmt_iterator incr_gsi; | |
7725 | bool insert_after; | |
355fe088 | 7726 | gimple *incr; |
7d75abc8 | 7727 | tree offvar; |
7d75abc8 MM |
7728 | tree ivstep; |
7729 | tree running_off; | |
9771b263 | 7730 | vec<constructor_elt, va_gc> *v = NULL; |
14ac6aa2 | 7731 | tree stride_base, stride_step, alias_off; |
4d694b27 RS |
7732 | /* Checked by get_load_store_type. */ |
7733 | unsigned int const_nunits = nunits.to_constant (); | |
b210f45f | 7734 | unsigned HOST_WIDE_INT cst_offset = 0; |
14ac6aa2 | 7735 | |
7cfb4d93 | 7736 | gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); |
14ac6aa2 | 7737 | gcc_assert (!nested_in_vect_loop); |
7d75abc8 | 7738 | |
b210f45f | 7739 | if (grouped_load) |
44fc7854 | 7740 | { |
bffb8014 | 7741 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
89fa689a | 7742 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
44fc7854 | 7743 | } |
ab313a8c | 7744 | else |
44fc7854 | 7745 | { |
bffb8014 | 7746 | first_stmt_info = stmt_info; |
89fa689a | 7747 | first_dr_info = dr_info; |
b210f45f RB |
7748 | } |
7749 | if (slp && grouped_load) | |
7750 | { | |
bffb8014 RS |
7751 | group_size = DR_GROUP_SIZE (first_stmt_info); |
7752 | ref_type = get_group_alias_ptr_type (first_stmt_info); | |
b210f45f RB |
7753 | } |
7754 | else | |
7755 | { | |
7756 | if (grouped_load) | |
7757 | cst_offset | |
7758 | = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))) | |
86a91c0a | 7759 | * vect_get_place_in_interleaving_chain (stmt_info, |
bffb8014 | 7760 | first_stmt_info)); |
44fc7854 | 7761 | group_size = 1; |
89fa689a | 7762 | ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr)); |
44fc7854 | 7763 | } |
ab313a8c | 7764 | |
14ac6aa2 RB |
7765 | stride_base |
7766 | = fold_build_pointer_plus | |
89fa689a | 7767 | (DR_BASE_ADDRESS (first_dr_info->dr), |
14ac6aa2 | 7768 | size_binop (PLUS_EXPR, |
89fa689a RS |
7769 | convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)), |
7770 | convert_to_ptrofftype (DR_INIT (first_dr_info->dr)))); | |
7771 | stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr)); | |
7d75abc8 MM |
7772 | |
7773 | /* For a load with loop-invariant (but other than power-of-2) | |
7774 | stride (i.e. not a grouped access) like so: | |
7775 | ||
7776 | for (i = 0; i < n; i += stride) | |
7777 | ... = array[i]; | |
7778 | ||
7779 | we generate a new induction variable and new accesses to | |
7780 | form a new vector (or vectors, depending on ncopies): | |
7781 | ||
7782 | for (j = 0; ; j += VF*stride) | |
7783 | tmp1 = array[j]; | |
7784 | tmp2 = array[j + stride]; | |
7785 | ... | |
7786 | vectemp = {tmp1, tmp2, ...} | |
7787 | */ | |
7788 | ||
ab313a8c RB |
7789 | ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step, |
7790 | build_int_cst (TREE_TYPE (stride_step), vf)); | |
7d75abc8 MM |
7791 | |
7792 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
7793 | ||
b210f45f RB |
7794 | stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base); |
7795 | ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep); | |
7796 | create_iv (stride_base, ivstep, NULL, | |
7d75abc8 MM |
7797 | loop, &incr_gsi, insert_after, |
7798 | &offvar, NULL); | |
7799 | incr = gsi_stmt (incr_gsi); | |
4fbeb363 | 7800 | loop_vinfo->add_stmt (incr); |
7d75abc8 | 7801 | |
b210f45f | 7802 | stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step); |
7d75abc8 MM |
7803 | |
7804 | prev_stmt_info = NULL; | |
7805 | running_off = offvar; | |
44fc7854 | 7806 | alias_off = build_int_cst (ref_type, 0); |
4d694b27 | 7807 | int nloads = const_nunits; |
e09b4c37 | 7808 | int lnel = 1; |
7b5fc413 | 7809 | tree ltype = TREE_TYPE (vectype); |
ea60dd34 | 7810 | tree lvectype = vectype; |
b266b968 | 7811 | auto_vec<tree> dr_chain; |
2de001ee | 7812 | if (memory_access_type == VMAT_STRIDED_SLP) |
7b5fc413 | 7813 | { |
4d694b27 | 7814 | if (group_size < const_nunits) |
e09b4c37 | 7815 | { |
ff03930a JJ |
7816 | /* First check if vec_init optab supports construction from |
7817 | vector elts directly. */ | |
b397965c | 7818 | scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype)); |
9da15d40 RS |
7819 | machine_mode vmode; |
7820 | if (mode_for_vector (elmode, group_size).exists (&vmode) | |
7821 | && VECTOR_MODE_P (vmode) | |
414fef4e | 7822 | && targetm.vector_mode_supported_p (vmode) |
ff03930a JJ |
7823 | && (convert_optab_handler (vec_init_optab, |
7824 | TYPE_MODE (vectype), vmode) | |
7825 | != CODE_FOR_nothing)) | |
ea60dd34 | 7826 | { |
4d694b27 | 7827 | nloads = const_nunits / group_size; |
ea60dd34 | 7828 | lnel = group_size; |
ff03930a JJ |
7829 | ltype = build_vector_type (TREE_TYPE (vectype), group_size); |
7830 | } | |
7831 | else | |
7832 | { | |
7833 | /* Otherwise avoid emitting a constructor of vector elements | |
7834 | by performing the loads using an integer type of the same | |
7835 | size, constructing a vector of those and then | |
7836 | re-interpreting it as the original vector type. | |
7837 | This avoids a huge runtime penalty due to the general | |
7838 | inability to perform store forwarding from smaller stores | |
7839 | to a larger load. */ | |
7840 | unsigned lsize | |
7841 | = group_size * TYPE_PRECISION (TREE_TYPE (vectype)); | |
fffbab82 | 7842 | elmode = int_mode_for_size (lsize, 0).require (); |
4d694b27 | 7843 | unsigned int lnunits = const_nunits / group_size; |
ff03930a JJ |
7844 | /* If we can't construct such a vector fall back to |
7845 | element loads of the original vector type. */ | |
4d694b27 | 7846 | if (mode_for_vector (elmode, lnunits).exists (&vmode) |
9da15d40 | 7847 | && VECTOR_MODE_P (vmode) |
414fef4e | 7848 | && targetm.vector_mode_supported_p (vmode) |
ff03930a JJ |
7849 | && (convert_optab_handler (vec_init_optab, vmode, elmode) |
7850 | != CODE_FOR_nothing)) | |
7851 | { | |
4d694b27 | 7852 | nloads = lnunits; |
ff03930a JJ |
7853 | lnel = group_size; |
7854 | ltype = build_nonstandard_integer_type (lsize, 1); | |
7855 | lvectype = build_vector_type (ltype, nloads); | |
7856 | } | |
ea60dd34 | 7857 | } |
e09b4c37 | 7858 | } |
2de001ee | 7859 | else |
e09b4c37 | 7860 | { |
ea60dd34 | 7861 | nloads = 1; |
4d694b27 | 7862 | lnel = const_nunits; |
e09b4c37 | 7863 | ltype = vectype; |
e09b4c37 | 7864 | } |
2de001ee RS |
7865 | ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype))); |
7866 | } | |
bb4e4747 BC |
7867 | /* Load vector(1) scalar_type if it's 1 element-wise vectype. */ |
7868 | else if (nloads == 1) | |
7869 | ltype = vectype; | |
7870 | ||
2de001ee RS |
7871 | if (slp) |
7872 | { | |
66c16fd9 RB |
7873 | /* For SLP permutation support we need to load the whole group, |
7874 | not only the number of vector stmts the permutation result | |
7875 | fits in. */ | |
b266b968 | 7876 | if (slp_perm) |
66c16fd9 | 7877 | { |
d9f21f6a RS |
7878 | /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for |
7879 | variable VF. */ | |
7880 | unsigned int const_vf = vf.to_constant (); | |
4d694b27 | 7881 | ncopies = CEIL (group_size * const_vf, const_nunits); |
66c16fd9 RB |
7882 | dr_chain.create (ncopies); |
7883 | } | |
7884 | else | |
7885 | ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
7b5fc413 | 7886 | } |
4d694b27 | 7887 | unsigned int group_el = 0; |
e09b4c37 RB |
7888 | unsigned HOST_WIDE_INT |
7889 | elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); | |
7d75abc8 MM |
7890 | for (j = 0; j < ncopies; j++) |
7891 | { | |
7b5fc413 | 7892 | if (nloads > 1) |
e09b4c37 | 7893 | vec_alloc (v, nloads); |
e1bd7296 | 7894 | stmt_vec_info new_stmt_info = NULL; |
e09b4c37 | 7895 | for (i = 0; i < nloads; i++) |
7b5fc413 | 7896 | { |
e09b4c37 | 7897 | tree this_off = build_int_cst (TREE_TYPE (alias_off), |
b210f45f | 7898 | group_el * elsz + cst_offset); |
19986382 | 7899 | tree data_ref = build2 (MEM_REF, ltype, running_off, this_off); |
89fa689a | 7900 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
e1bd7296 RS |
7901 | gassign *new_stmt |
7902 | = gimple_build_assign (make_ssa_name (ltype), data_ref); | |
7903 | new_stmt_info | |
86a91c0a | 7904 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
e09b4c37 RB |
7905 | if (nloads > 1) |
7906 | CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, | |
7907 | gimple_assign_lhs (new_stmt)); | |
7908 | ||
7909 | group_el += lnel; | |
7910 | if (! slp | |
7911 | || group_el == group_size) | |
7b5fc413 | 7912 | { |
e09b4c37 RB |
7913 | tree newoff = copy_ssa_name (running_off); |
7914 | gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, | |
7915 | running_off, stride_step); | |
86a91c0a | 7916 | vect_finish_stmt_generation (stmt_info, incr, gsi); |
7b5fc413 RB |
7917 | |
7918 | running_off = newoff; | |
e09b4c37 | 7919 | group_el = 0; |
7b5fc413 | 7920 | } |
7b5fc413 | 7921 | } |
e09b4c37 | 7922 | if (nloads > 1) |
7d75abc8 | 7923 | { |
ea60dd34 | 7924 | tree vec_inv = build_constructor (lvectype, v); |
86a91c0a | 7925 | new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi); |
e1bd7296 | 7926 | new_stmt_info = vinfo->lookup_def (new_temp); |
ea60dd34 RB |
7927 | if (lvectype != vectype) |
7928 | { | |
e1bd7296 RS |
7929 | gassign *new_stmt |
7930 | = gimple_build_assign (make_ssa_name (vectype), | |
7931 | VIEW_CONVERT_EXPR, | |
7932 | build1 (VIEW_CONVERT_EXPR, | |
7933 | vectype, new_temp)); | |
7934 | new_stmt_info | |
86a91c0a | 7935 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
ea60dd34 | 7936 | } |
7d75abc8 MM |
7937 | } |
7938 | ||
7b5fc413 | 7939 | if (slp) |
b266b968 | 7940 | { |
b266b968 | 7941 | if (slp_perm) |
e1bd7296 | 7942 | dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt)); |
66c16fd9 | 7943 | else |
e1bd7296 | 7944 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
b266b968 | 7945 | } |
7d75abc8 | 7946 | else |
225ce44b RB |
7947 | { |
7948 | if (j == 0) | |
e1bd7296 | 7949 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
225ce44b | 7950 | else |
e1bd7296 RS |
7951 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
7952 | prev_stmt_info = new_stmt_info; | |
225ce44b | 7953 | } |
7d75abc8 | 7954 | } |
b266b968 | 7955 | if (slp_perm) |
29afecdf RB |
7956 | { |
7957 | unsigned n_perms; | |
7958 | vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, | |
7959 | slp_node_instance, false, &n_perms); | |
7960 | } | |
7d75abc8 MM |
7961 | return true; |
7962 | } | |
aec7ae7d | 7963 | |
b5ec4de7 RS |
7964 | if (memory_access_type == VMAT_GATHER_SCATTER |
7965 | || (!slp && memory_access_type == VMAT_CONTIGUOUS)) | |
ab2fc782 RS |
7966 | grouped_load = false; |
7967 | ||
0d0293ac | 7968 | if (grouped_load) |
ebfd146a | 7969 | { |
bffb8014 RS |
7970 | first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
7971 | group_size = DR_GROUP_SIZE (first_stmt_info); | |
4f0a0218 | 7972 | /* For SLP vectorization we directly vectorize a subchain |
52eab378 RB |
7973 | without permutation. */ |
7974 | if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) | |
bffb8014 | 7975 | first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
4f0a0218 RB |
7976 | /* For BB vectorization always use the first stmt to base |
7977 | the data ref pointer on. */ | |
7978 | if (bb_vinfo) | |
b9787581 | 7979 | first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0]; |
6aa904c4 | 7980 | |
ebfd146a | 7981 | /* Check if the chain of loads is already vectorized. */ |
bffb8014 | 7982 | if (STMT_VINFO_VEC_STMT (first_stmt_info) |
01d8bf07 RB |
7983 | /* For SLP we would need to copy over SLP_TREE_VEC_STMTS. |
7984 | ??? But we can only do so if there is exactly one | |
7985 | as we have no way to get at the rest. Leave the CSE | |
7986 | opportunity alone. | |
7987 | ??? With the group load eventually participating | |
7988 | in multiple different permutations (having multiple | |
7989 | slp nodes which refer to the same group) the CSE | |
7990 | is even wrong code. See PR56270. */ | |
7991 | && !slp) | |
ebfd146a IR |
7992 | { |
7993 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); | |
7994 | return true; | |
7995 | } | |
89fa689a | 7996 | first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info); |
9b999e8c | 7997 | group_gap_adj = 0; |
ebfd146a IR |
7998 | |
7999 | /* VEC_NUM is the number of vect stmts to be created for this group. */ | |
8000 | if (slp) | |
8001 | { | |
0d0293ac | 8002 | grouped_load = false; |
ab7e60ce RS |
8003 | /* If an SLP permutation is from N elements to N elements, |
8004 | and if one vector holds a whole number of N, we can load | |
8005 | the inputs to the permutation in the same way as an | |
8006 | unpermuted sequence. In other cases we need to load the | |
8007 | whole group, not only the number of vector stmts the | |
8008 | permutation result fits in. */ | |
8009 | if (slp_perm | |
8010 | && (group_size != SLP_INSTANCE_GROUP_SIZE (slp_node_instance) | |
8011 | || !multiple_p (nunits, group_size))) | |
b267968e | 8012 | { |
ab7e60ce RS |
8013 | /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for |
8014 | variable VF; see vect_transform_slp_perm_load. */ | |
d9f21f6a | 8015 | unsigned int const_vf = vf.to_constant (); |
4d694b27 RS |
8016 | unsigned int const_nunits = nunits.to_constant (); |
8017 | vec_num = CEIL (group_size * const_vf, const_nunits); | |
b267968e RB |
8018 | group_gap_adj = vf * group_size - nunits * vec_num; |
8019 | } | |
91ff1504 | 8020 | else |
b267968e RB |
8021 | { |
8022 | vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); | |
796bd467 RB |
8023 | group_gap_adj |
8024 | = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance); | |
b267968e | 8025 | } |
a70d6342 | 8026 | } |
ebfd146a | 8027 | else |
9b999e8c | 8028 | vec_num = group_size; |
44fc7854 | 8029 | |
bffb8014 | 8030 | ref_type = get_group_alias_ptr_type (first_stmt_info); |
ebfd146a IR |
8031 | } |
8032 | else | |
8033 | { | |
bffb8014 | 8034 | first_stmt_info = stmt_info; |
89fa689a | 8035 | first_dr_info = dr_info; |
ebfd146a | 8036 | group_size = vec_num = 1; |
9b999e8c | 8037 | group_gap_adj = 0; |
89fa689a | 8038 | ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr)); |
ebfd146a IR |
8039 | } |
8040 | ||
89fa689a RS |
8041 | alignment_support_scheme |
8042 | = vect_supportable_dr_alignment (first_dr_info, false); | |
ebfd146a | 8043 | gcc_assert (alignment_support_scheme); |
70088b95 RS |
8044 | vec_loop_masks *loop_masks |
8045 | = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) | |
8046 | ? &LOOP_VINFO_MASKS (loop_vinfo) | |
8047 | : NULL); | |
7cfb4d93 RS |
8048 | /* Targets with store-lane instructions must not require explicit |
8049 | realignment. vect_supportable_dr_alignment always returns either | |
8050 | dr_aligned or dr_unaligned_supported for masked operations. */ | |
8051 | gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES | |
8052 | && !mask | |
70088b95 | 8053 | && !loop_masks) |
272c6793 RS |
8054 | || alignment_support_scheme == dr_aligned |
8055 | || alignment_support_scheme == dr_unaligned_supported); | |
ebfd146a IR |
8056 | |
8057 | /* In case the vectorization factor (VF) is bigger than the number | |
8058 | of elements that we can fit in a vectype (nunits), we have to generate | |
8059 | more than one vector stmt - i.e - we need to "unroll" the | |
ff802fa1 | 8060 | vector stmt by a factor VF/nunits. In doing so, we record a pointer |
ebfd146a | 8061 | from one copy of the vector stmt to the next, in the field |
ff802fa1 | 8062 | STMT_VINFO_RELATED_STMT. This is necessary in order to allow following |
ebfd146a | 8063 | stages to find the correct vector defs to be used when vectorizing |
ff802fa1 IR |
8064 | stmts that use the defs of the current stmt. The example below |
8065 | illustrates the vectorization process when VF=16 and nunits=4 (i.e., we | |
8066 | need to create 4 vectorized stmts): | |
ebfd146a IR |
8067 | |
8068 | before vectorization: | |
8069 | RELATED_STMT VEC_STMT | |
8070 | S1: x = memref - - | |
8071 | S2: z = x + 1 - - | |
8072 | ||
8073 | step 1: vectorize stmt S1: | |
8074 | We first create the vector stmt VS1_0, and, as usual, record a | |
8075 | pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1. | |
8076 | Next, we create the vector stmt VS1_1, and record a pointer to | |
8077 | it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0. | |
ff802fa1 | 8078 | Similarly, for VS1_2 and VS1_3. This is the resulting chain of |
ebfd146a IR |
8079 | stmts and pointers: |
8080 | RELATED_STMT VEC_STMT | |
8081 | VS1_0: vx0 = memref0 VS1_1 - | |
8082 | VS1_1: vx1 = memref1 VS1_2 - | |
8083 | VS1_2: vx2 = memref2 VS1_3 - | |
8084 | VS1_3: vx3 = memref3 - - | |
8085 | S1: x = load - VS1_0 | |
8086 | S2: z = x + 1 - - | |
8087 | ||
b8698a0f L |
8088 | See in documentation in vect_get_vec_def_for_stmt_copy for how the |
8089 | information we recorded in RELATED_STMT field is used to vectorize | |
ebfd146a IR |
8090 | stmt S2. */ |
8091 | ||
0d0293ac | 8092 | /* In case of interleaving (non-unit grouped access): |
ebfd146a IR |
8093 | |
8094 | S1: x2 = &base + 2 | |
8095 | S2: x0 = &base | |
8096 | S3: x1 = &base + 1 | |
8097 | S4: x3 = &base + 3 | |
8098 | ||
b8698a0f | 8099 | Vectorized loads are created in the order of memory accesses |
ebfd146a IR |
8100 | starting from the access of the first stmt of the chain: |
8101 | ||
8102 | VS1: vx0 = &base | |
8103 | VS2: vx1 = &base + vec_size*1 | |
8104 | VS3: vx3 = &base + vec_size*2 | |
8105 | VS4: vx4 = &base + vec_size*3 | |
8106 | ||
8107 | Then permutation statements are generated: | |
8108 | ||
e2c83630 RH |
8109 | VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } > |
8110 | VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } > | |
ebfd146a IR |
8111 | ... |
8112 | ||
8113 | And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts | |
8114 | (the order of the data-refs in the output of vect_permute_load_chain | |
8115 | corresponds to the order of scalar stmts in the interleaving chain - see | |
8116 | the documentation of vect_permute_load_chain()). | |
8117 | The generation of permutation stmts and recording them in | |
0d0293ac | 8118 | STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load(). |
ebfd146a | 8119 | |
b8698a0f | 8120 | In case of both multiple types and interleaving, the vector loads and |
ff802fa1 IR |
8121 | permutation stmts above are created for every copy. The result vector |
8122 | stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the | |
8123 | corresponding STMT_VINFO_RELATED_STMT for the next copies. */ | |
ebfd146a IR |
8124 | |
8125 | /* If the data reference is aligned (dr_aligned) or potentially unaligned | |
8126 | on a target that supports unaligned accesses (dr_unaligned_supported) | |
8127 | we generate the following code: | |
8128 | p = initial_addr; | |
8129 | indx = 0; | |
8130 | loop { | |
8131 | p = p + indx * vectype_size; | |
8132 | vec_dest = *(p); | |
8133 | indx = indx + 1; | |
8134 | } | |
8135 | ||
8136 | Otherwise, the data reference is potentially unaligned on a target that | |
b8698a0f | 8137 | does not support unaligned accesses (dr_explicit_realign_optimized) - |
ebfd146a IR |
8138 | then generate the following code, in which the data in each iteration is |
8139 | obtained by two vector loads, one from the previous iteration, and one | |
8140 | from the current iteration: | |
8141 | p1 = initial_addr; | |
8142 | msq_init = *(floor(p1)) | |
8143 | p2 = initial_addr + VS - 1; | |
8144 | realignment_token = call target_builtin; | |
8145 | indx = 0; | |
8146 | loop { | |
8147 | p2 = p2 + indx * vectype_size | |
8148 | lsq = *(floor(p2)) | |
8149 | vec_dest = realign_load (msq, lsq, realignment_token) | |
8150 | indx = indx + 1; | |
8151 | msq = lsq; | |
8152 | } */ | |
8153 | ||
8154 | /* If the misalignment remains the same throughout the execution of the | |
8155 | loop, we can create the init_addr and permutation mask at the loop | |
ff802fa1 | 8156 | preheader. Otherwise, it needs to be created inside the loop. |
ebfd146a IR |
8157 | This can only occur when vectorizing memory accesses in the inner-loop |
8158 | nested within an outer-loop that is being vectorized. */ | |
8159 | ||
d1e4b493 | 8160 | if (nested_in_vect_loop |
89fa689a | 8161 | && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr), |
cf098191 | 8162 | GET_MODE_SIZE (TYPE_MODE (vectype)))) |
ebfd146a IR |
8163 | { |
8164 | gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); | |
8165 | compute_in_loop = true; | |
8166 | } | |
8167 | ||
8168 | if ((alignment_support_scheme == dr_explicit_realign_optimized | |
8169 | || alignment_support_scheme == dr_explicit_realign) | |
59fd17e3 | 8170 | && !compute_in_loop) |
ebfd146a | 8171 | { |
bffb8014 | 8172 | msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token, |
ebfd146a IR |
8173 | alignment_support_scheme, NULL_TREE, |
8174 | &at_loop); | |
8175 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
8176 | { | |
538dd0b7 | 8177 | phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq)); |
356bbc4c JJ |
8178 | byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype), |
8179 | size_one_node); | |
ebfd146a IR |
8180 | } |
8181 | } | |
8182 | else | |
8183 | at_loop = loop; | |
8184 | ||
62da9e14 | 8185 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
a1e53f3f L |
8186 | offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1); |
8187 | ||
ab2fc782 RS |
8188 | tree bump; |
8189 | tree vec_offset = NULL_TREE; | |
8190 | if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) | |
8191 | { | |
8192 | aggr_type = NULL_TREE; | |
8193 | bump = NULL_TREE; | |
8194 | } | |
8195 | else if (memory_access_type == VMAT_GATHER_SCATTER) | |
8196 | { | |
8197 | aggr_type = elem_type; | |
86a91c0a | 8198 | vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info, |
ab2fc782 RS |
8199 | &bump, &vec_offset); |
8200 | } | |
272c6793 | 8201 | else |
ab2fc782 RS |
8202 | { |
8203 | if (memory_access_type == VMAT_LOAD_STORE_LANES) | |
8204 | aggr_type = build_array_type_nelts (elem_type, vec_num * nunits); | |
8205 | else | |
8206 | aggr_type = vectype; | |
89fa689a RS |
8207 | bump = vect_get_data_ptr_increment (dr_info, aggr_type, |
8208 | memory_access_type); | |
ab2fc782 | 8209 | } |
272c6793 | 8210 | |
c3a8f964 | 8211 | tree vec_mask = NULL_TREE; |
ebfd146a | 8212 | prev_stmt_info = NULL; |
4d694b27 | 8213 | poly_uint64 group_elt = 0; |
ebfd146a | 8214 | for (j = 0; j < ncopies; j++) |
b8698a0f | 8215 | { |
e1bd7296 | 8216 | stmt_vec_info new_stmt_info = NULL; |
272c6793 | 8217 | /* 1. Create the vector or array pointer update chain. */ |
ebfd146a | 8218 | if (j == 0) |
74bf76ed JJ |
8219 | { |
8220 | bool simd_lane_access_p | |
8221 | = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info); | |
8222 | if (simd_lane_access_p | |
89fa689a RS |
8223 | && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR |
8224 | && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0)) | |
8225 | && integer_zerop (DR_OFFSET (first_dr_info->dr)) | |
8226 | && integer_zerop (DR_INIT (first_dr_info->dr)) | |
74bf76ed | 8227 | && alias_sets_conflict_p (get_alias_set (aggr_type), |
44fc7854 | 8228 | get_alias_set (TREE_TYPE (ref_type))) |
74bf76ed JJ |
8229 | && (alignment_support_scheme == dr_aligned |
8230 | || alignment_support_scheme == dr_unaligned_supported)) | |
8231 | { | |
89fa689a | 8232 | dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr)); |
44fc7854 | 8233 | dataref_offset = build_int_cst (ref_type, 0); |
74bf76ed | 8234 | } |
b9787581 | 8235 | else if (first_stmt_info_for_drptr |
bffb8014 | 8236 | && first_stmt_info != first_stmt_info_for_drptr) |
4f0a0218 RB |
8237 | { |
8238 | dataref_ptr | |
b9787581 RS |
8239 | = vect_create_data_ref_ptr (first_stmt_info_for_drptr, |
8240 | aggr_type, at_loop, offset, &dummy, | |
8241 | gsi, &ptr_incr, simd_lane_access_p, | |
2d4bca81 | 8242 | byte_offset, bump); |
4f0a0218 RB |
8243 | /* Adjust the pointer by the difference to first_stmt. */ |
8244 | data_reference_p ptrdr | |
b9787581 | 8245 | = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr); |
89fa689a RS |
8246 | tree diff |
8247 | = fold_convert (sizetype, | |
8248 | size_binop (MINUS_EXPR, | |
8249 | DR_INIT (first_dr_info->dr), | |
8250 | DR_INIT (ptrdr))); | |
4f0a0218 | 8251 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8252 | stmt_info, diff); |
4f0a0218 | 8253 | } |
bfaa08b7 | 8254 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
2d4bca81 RS |
8255 | vect_get_gather_scatter_ops (loop, stmt_info, &gs_info, |
8256 | &dataref_ptr, &vec_offset); | |
74bf76ed JJ |
8257 | else |
8258 | dataref_ptr | |
bffb8014 | 8259 | = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop, |
74bf76ed | 8260 | offset, &dummy, gsi, &ptr_incr, |
2d4bca81 | 8261 | simd_lane_access_p, |
ab2fc782 | 8262 | byte_offset, bump); |
c3a8f964 | 8263 | if (mask) |
86a91c0a | 8264 | vec_mask = vect_get_vec_def_for_operand (mask, stmt_info, |
c3a8f964 | 8265 | mask_vectype); |
74bf76ed | 8266 | } |
ebfd146a | 8267 | else |
c3a8f964 RS |
8268 | { |
8269 | if (dataref_offset) | |
8270 | dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, | |
ab2fc782 | 8271 | bump); |
bfaa08b7 | 8272 | else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) |
e4057a39 | 8273 | vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset); |
c3a8f964 | 8274 | else |
ab2fc782 | 8275 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8276 | stmt_info, bump); |
c3a8f964 | 8277 | if (mask) |
e4057a39 | 8278 | vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask); |
c3a8f964 | 8279 | } |
ebfd146a | 8280 | |
0d0293ac | 8281 | if (grouped_load || slp_perm) |
9771b263 | 8282 | dr_chain.create (vec_num); |
5ce1ee7f | 8283 | |
2de001ee | 8284 | if (memory_access_type == VMAT_LOAD_STORE_LANES) |
ebfd146a | 8285 | { |
272c6793 RS |
8286 | tree vec_array; |
8287 | ||
8288 | vec_array = create_vector_array (vectype, vec_num); | |
8289 | ||
7cfb4d93 | 8290 | tree final_mask = NULL_TREE; |
70088b95 RS |
8291 | if (loop_masks) |
8292 | final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies, | |
8293 | vectype, j); | |
7cfb4d93 RS |
8294 | if (vec_mask) |
8295 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
8296 | vec_mask, gsi); | |
8297 | ||
7e11fc7f | 8298 | gcall *call; |
7cfb4d93 | 8299 | if (final_mask) |
7e11fc7f RS |
8300 | { |
8301 | /* Emit: | |
8302 | VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR, | |
8303 | VEC_MASK). */ | |
8304 | unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype)); | |
8305 | tree alias_ptr = build_int_cst (ref_type, align); | |
8306 | call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3, | |
8307 | dataref_ptr, alias_ptr, | |
7cfb4d93 | 8308 | final_mask); |
7e11fc7f RS |
8309 | } |
8310 | else | |
8311 | { | |
8312 | /* Emit: | |
8313 | VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */ | |
8314 | data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type); | |
8315 | call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref); | |
8316 | } | |
a844293d RS |
8317 | gimple_call_set_lhs (call, vec_array); |
8318 | gimple_call_set_nothrow (call, true); | |
86a91c0a | 8319 | new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi); |
ebfd146a | 8320 | |
272c6793 RS |
8321 | /* Extract each vector into an SSA_NAME. */ |
8322 | for (i = 0; i < vec_num; i++) | |
ebfd146a | 8323 | { |
86a91c0a | 8324 | new_temp = read_vector_array (stmt_info, gsi, scalar_dest, |
272c6793 | 8325 | vec_array, i); |
9771b263 | 8326 | dr_chain.quick_push (new_temp); |
272c6793 RS |
8327 | } |
8328 | ||
8329 | /* Record the mapping between SSA_NAMEs and statements. */ | |
86a91c0a | 8330 | vect_record_grouped_load_vectors (stmt_info, dr_chain); |
3ba4ff41 RS |
8331 | |
8332 | /* Record that VEC_ARRAY is now dead. */ | |
86a91c0a | 8333 | vect_clobber_variable (stmt_info, gsi, vec_array); |
272c6793 RS |
8334 | } |
8335 | else | |
8336 | { | |
8337 | for (i = 0; i < vec_num; i++) | |
8338 | { | |
7cfb4d93 | 8339 | tree final_mask = NULL_TREE; |
70088b95 | 8340 | if (loop_masks |
7cfb4d93 | 8341 | && memory_access_type != VMAT_INVARIANT) |
70088b95 RS |
8342 | final_mask = vect_get_loop_mask (gsi, loop_masks, |
8343 | vec_num * ncopies, | |
7cfb4d93 RS |
8344 | vectype, vec_num * j + i); |
8345 | if (vec_mask) | |
8346 | final_mask = prepare_load_store_mask (mask_vectype, final_mask, | |
8347 | vec_mask, gsi); | |
8348 | ||
272c6793 RS |
8349 | if (i > 0) |
8350 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, | |
86a91c0a | 8351 | stmt_info, bump); |
272c6793 RS |
8352 | |
8353 | /* 2. Create the vector-load in the loop. */ | |
e1bd7296 | 8354 | gimple *new_stmt = NULL; |
272c6793 RS |
8355 | switch (alignment_support_scheme) |
8356 | { | |
8357 | case dr_aligned: | |
8358 | case dr_unaligned_supported: | |
be1ac4ec | 8359 | { |
644ffefd MJ |
8360 | unsigned int align, misalign; |
8361 | ||
bfaa08b7 RS |
8362 | if (memory_access_type == VMAT_GATHER_SCATTER) |
8363 | { | |
8364 | tree scale = size_int (gs_info.scale); | |
8365 | gcall *call; | |
70088b95 | 8366 | if (loop_masks) |
bfaa08b7 RS |
8367 | call = gimple_build_call_internal |
8368 | (IFN_MASK_GATHER_LOAD, 4, dataref_ptr, | |
8369 | vec_offset, scale, final_mask); | |
8370 | else | |
8371 | call = gimple_build_call_internal | |
8372 | (IFN_GATHER_LOAD, 3, dataref_ptr, | |
8373 | vec_offset, scale); | |
8374 | gimple_call_set_nothrow (call, true); | |
8375 | new_stmt = call; | |
8376 | data_ref = NULL_TREE; | |
8377 | break; | |
8378 | } | |
8379 | ||
89fa689a | 8380 | align = DR_TARGET_ALIGNMENT (dr_info); |
272c6793 RS |
8381 | if (alignment_support_scheme == dr_aligned) |
8382 | { | |
89fa689a | 8383 | gcc_assert (aligned_access_p (first_dr_info)); |
644ffefd | 8384 | misalign = 0; |
272c6793 | 8385 | } |
89fa689a | 8386 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
272c6793 | 8387 | { |
89fa689a RS |
8388 | align = dr_alignment |
8389 | (vect_dr_behavior (first_dr_info)); | |
52639a61 | 8390 | misalign = 0; |
272c6793 RS |
8391 | } |
8392 | else | |
89fa689a | 8393 | misalign = DR_MISALIGNMENT (first_dr_info); |
aed93b23 RB |
8394 | if (dataref_offset == NULL_TREE |
8395 | && TREE_CODE (dataref_ptr) == SSA_NAME) | |
74bf76ed JJ |
8396 | set_ptr_info_alignment (get_ptr_info (dataref_ptr), |
8397 | align, misalign); | |
c3a8f964 | 8398 | |
7cfb4d93 | 8399 | if (final_mask) |
c3a8f964 RS |
8400 | { |
8401 | align = least_bit_hwi (misalign | align); | |
8402 | tree ptr = build_int_cst (ref_type, align); | |
8403 | gcall *call | |
8404 | = gimple_build_call_internal (IFN_MASK_LOAD, 3, | |
8405 | dataref_ptr, ptr, | |
7cfb4d93 | 8406 | final_mask); |
c3a8f964 RS |
8407 | gimple_call_set_nothrow (call, true); |
8408 | new_stmt = call; | |
8409 | data_ref = NULL_TREE; | |
8410 | } | |
8411 | else | |
8412 | { | |
8413 | data_ref | |
8414 | = fold_build2 (MEM_REF, vectype, dataref_ptr, | |
8415 | dataref_offset | |
8416 | ? dataref_offset | |
8417 | : build_int_cst (ref_type, 0)); | |
8418 | if (alignment_support_scheme == dr_aligned) | |
8419 | ; | |
89fa689a | 8420 | else if (DR_MISALIGNMENT (first_dr_info) == -1) |
c3a8f964 RS |
8421 | TREE_TYPE (data_ref) |
8422 | = build_aligned_type (TREE_TYPE (data_ref), | |
8423 | align * BITS_PER_UNIT); | |
8424 | else | |
8425 | TREE_TYPE (data_ref) | |
8426 | = build_aligned_type (TREE_TYPE (data_ref), | |
8427 | TYPE_ALIGN (elem_type)); | |
8428 | } | |
272c6793 | 8429 | break; |
be1ac4ec | 8430 | } |
272c6793 | 8431 | case dr_explicit_realign: |
267d3070 | 8432 | { |
272c6793 | 8433 | tree ptr, bump; |
272c6793 | 8434 | |
d88981fc | 8435 | tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); |
272c6793 RS |
8436 | |
8437 | if (compute_in_loop) | |
bffb8014 | 8438 | msq = vect_setup_realignment (first_stmt_info, gsi, |
272c6793 RS |
8439 | &realignment_token, |
8440 | dr_explicit_realign, | |
8441 | dataref_ptr, NULL); | |
8442 | ||
aed93b23 RB |
8443 | if (TREE_CODE (dataref_ptr) == SSA_NAME) |
8444 | ptr = copy_ssa_name (dataref_ptr); | |
8445 | else | |
8446 | ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
89fa689a | 8447 | unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info); |
0d0e4a03 JJ |
8448 | new_stmt = gimple_build_assign |
8449 | (ptr, BIT_AND_EXPR, dataref_ptr, | |
272c6793 RS |
8450 | build_int_cst |
8451 | (TREE_TYPE (dataref_ptr), | |
f702e7d4 | 8452 | -(HOST_WIDE_INT) align)); |
86a91c0a | 8453 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
8454 | data_ref |
8455 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 8456 | build_int_cst (ref_type, 0)); |
89fa689a | 8457 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
272c6793 RS |
8458 | vec_dest = vect_create_destination_var (scalar_dest, |
8459 | vectype); | |
8460 | new_stmt = gimple_build_assign (vec_dest, data_ref); | |
8461 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
8462 | gimple_assign_set_lhs (new_stmt, new_temp); | |
86a91c0a RS |
8463 | gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt)); |
8464 | gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt)); | |
8465 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); | |
272c6793 RS |
8466 | msq = new_temp; |
8467 | ||
d88981fc | 8468 | bump = size_binop (MULT_EXPR, vs, |
7b7b1813 | 8469 | TYPE_SIZE_UNIT (elem_type)); |
d88981fc | 8470 | bump = size_binop (MINUS_EXPR, bump, size_one_node); |
86a91c0a RS |
8471 | ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, |
8472 | stmt_info, bump); | |
0d0e4a03 JJ |
8473 | new_stmt = gimple_build_assign |
8474 | (NULL_TREE, BIT_AND_EXPR, ptr, | |
272c6793 | 8475 | build_int_cst |
f702e7d4 | 8476 | (TREE_TYPE (ptr), -(HOST_WIDE_INT) align)); |
aed93b23 | 8477 | ptr = copy_ssa_name (ptr, new_stmt); |
272c6793 | 8478 | gimple_assign_set_lhs (new_stmt, ptr); |
86a91c0a | 8479 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
8480 | data_ref |
8481 | = build2 (MEM_REF, vectype, ptr, | |
44fc7854 | 8482 | build_int_cst (ref_type, 0)); |
272c6793 | 8483 | break; |
267d3070 | 8484 | } |
272c6793 | 8485 | case dr_explicit_realign_optimized: |
f702e7d4 RS |
8486 | { |
8487 | if (TREE_CODE (dataref_ptr) == SSA_NAME) | |
8488 | new_temp = copy_ssa_name (dataref_ptr); | |
8489 | else | |
8490 | new_temp = make_ssa_name (TREE_TYPE (dataref_ptr)); | |
89fa689a | 8491 | unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info); |
f702e7d4 RS |
8492 | new_stmt = gimple_build_assign |
8493 | (new_temp, BIT_AND_EXPR, dataref_ptr, | |
8494 | build_int_cst (TREE_TYPE (dataref_ptr), | |
8495 | -(HOST_WIDE_INT) align)); | |
86a91c0a | 8496 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
f702e7d4 RS |
8497 | data_ref |
8498 | = build2 (MEM_REF, vectype, new_temp, | |
8499 | build_int_cst (ref_type, 0)); | |
8500 | break; | |
8501 | } | |
272c6793 RS |
8502 | default: |
8503 | gcc_unreachable (); | |
8504 | } | |
ebfd146a | 8505 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
c3a8f964 RS |
8506 | /* DATA_REF is null if we've already built the statement. */ |
8507 | if (data_ref) | |
19986382 | 8508 | { |
89fa689a | 8509 | vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr)); |
19986382 RB |
8510 | new_stmt = gimple_build_assign (vec_dest, data_ref); |
8511 | } | |
ebfd146a | 8512 | new_temp = make_ssa_name (vec_dest, new_stmt); |
c3a8f964 | 8513 | gimple_set_lhs (new_stmt, new_temp); |
e1bd7296 | 8514 | new_stmt_info |
86a91c0a | 8515 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
ebfd146a | 8516 | |
272c6793 RS |
8517 | /* 3. Handle explicit realignment if necessary/supported. |
8518 | Create in loop: | |
8519 | vec_dest = realign_load (msq, lsq, realignment_token) */ | |
8520 | if (alignment_support_scheme == dr_explicit_realign_optimized | |
8521 | || alignment_support_scheme == dr_explicit_realign) | |
ebfd146a | 8522 | { |
272c6793 RS |
8523 | lsq = gimple_assign_lhs (new_stmt); |
8524 | if (!realignment_token) | |
8525 | realignment_token = dataref_ptr; | |
8526 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
0d0e4a03 JJ |
8527 | new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR, |
8528 | msq, lsq, realignment_token); | |
272c6793 RS |
8529 | new_temp = make_ssa_name (vec_dest, new_stmt); |
8530 | gimple_assign_set_lhs (new_stmt, new_temp); | |
e1bd7296 | 8531 | new_stmt_info |
86a91c0a | 8532 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
272c6793 RS |
8533 | |
8534 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
8535 | { | |
8536 | gcc_assert (phi); | |
8537 | if (i == vec_num - 1 && j == ncopies - 1) | |
8538 | add_phi_arg (phi, lsq, | |
8539 | loop_latch_edge (containing_loop), | |
9e227d60 | 8540 | UNKNOWN_LOCATION); |
272c6793 RS |
8541 | msq = lsq; |
8542 | } | |
ebfd146a | 8543 | } |
ebfd146a | 8544 | |
62da9e14 | 8545 | if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) |
272c6793 | 8546 | { |
aec7ae7d JJ |
8547 | tree perm_mask = perm_mask_for_reverse (vectype); |
8548 | new_temp = permute_vec_elements (new_temp, new_temp, | |
86a91c0a | 8549 | perm_mask, stmt_info, gsi); |
e1bd7296 | 8550 | new_stmt_info = vinfo->lookup_def (new_temp); |
ebfd146a | 8551 | } |
267d3070 | 8552 | |
272c6793 | 8553 | /* Collect vector loads and later create their permutation in |
0d0293ac MM |
8554 | vect_transform_grouped_load (). */ |
8555 | if (grouped_load || slp_perm) | |
9771b263 | 8556 | dr_chain.quick_push (new_temp); |
267d3070 | 8557 | |
272c6793 RS |
8558 | /* Store vector loads in the corresponding SLP_NODE. */ |
8559 | if (slp && !slp_perm) | |
e1bd7296 | 8560 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
b267968e RB |
8561 | |
8562 | /* With SLP permutation we load the gaps as well, without | |
8563 | we need to skip the gaps after we manage to fully load | |
2c53b149 | 8564 | all elements. group_gap_adj is DR_GROUP_SIZE here. */ |
b267968e | 8565 | group_elt += nunits; |
d9f21f6a RS |
8566 | if (maybe_ne (group_gap_adj, 0U) |
8567 | && !slp_perm | |
8568 | && known_eq (group_elt, group_size - group_gap_adj)) | |
b267968e | 8569 | { |
d9f21f6a RS |
8570 | poly_wide_int bump_val |
8571 | = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) | |
8572 | * group_gap_adj); | |
8e6cdc90 | 8573 | tree bump = wide_int_to_tree (sizetype, bump_val); |
b267968e | 8574 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8575 | stmt_info, bump); |
b267968e RB |
8576 | group_elt = 0; |
8577 | } | |
272c6793 | 8578 | } |
9b999e8c RB |
8579 | /* Bump the vector pointer to account for a gap or for excess |
8580 | elements loaded for a permuted SLP load. */ | |
d9f21f6a | 8581 | if (maybe_ne (group_gap_adj, 0U) && slp_perm) |
a64b9c26 | 8582 | { |
d9f21f6a RS |
8583 | poly_wide_int bump_val |
8584 | = (wi::to_wide (TYPE_SIZE_UNIT (elem_type)) | |
8585 | * group_gap_adj); | |
8e6cdc90 | 8586 | tree bump = wide_int_to_tree (sizetype, bump_val); |
a64b9c26 | 8587 | dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, |
86a91c0a | 8588 | stmt_info, bump); |
a64b9c26 | 8589 | } |
ebfd146a IR |
8590 | } |
8591 | ||
8592 | if (slp && !slp_perm) | |
8593 | continue; | |
8594 | ||
8595 | if (slp_perm) | |
8596 | { | |
29afecdf | 8597 | unsigned n_perms; |
01d8bf07 | 8598 | if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf, |
29afecdf RB |
8599 | slp_node_instance, false, |
8600 | &n_perms)) | |
ebfd146a | 8601 | { |
9771b263 | 8602 | dr_chain.release (); |
ebfd146a IR |
8603 | return false; |
8604 | } | |
8605 | } | |
8606 | else | |
8607 | { | |
0d0293ac | 8608 | if (grouped_load) |
ebfd146a | 8609 | { |
2de001ee | 8610 | if (memory_access_type != VMAT_LOAD_STORE_LANES) |
86a91c0a RS |
8611 | vect_transform_grouped_load (stmt_info, dr_chain, |
8612 | group_size, gsi); | |
ebfd146a | 8613 | *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); |
ebfd146a IR |
8614 | } |
8615 | else | |
8616 | { | |
8617 | if (j == 0) | |
e1bd7296 | 8618 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
ebfd146a | 8619 | else |
e1bd7296 RS |
8620 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
8621 | prev_stmt_info = new_stmt_info; | |
ebfd146a IR |
8622 | } |
8623 | } | |
9771b263 | 8624 | dr_chain.release (); |
ebfd146a IR |
8625 | } |
8626 | ||
ebfd146a IR |
8627 | return true; |
8628 | } | |
8629 | ||
8630 | /* Function vect_is_simple_cond. | |
b8698a0f | 8631 | |
ebfd146a IR |
8632 | Input: |
8633 | LOOP - the loop that is being vectorized. | |
8634 | COND - Condition that is checked for simple use. | |
8635 | ||
e9e1d143 RG |
8636 | Output: |
8637 | *COMP_VECTYPE - the vector type for the comparison. | |
4fc5ebf1 | 8638 | *DTS - The def types for the arguments of the comparison |
e9e1d143 | 8639 | |
ebfd146a IR |
8640 | Returns whether a COND can be vectorized. Checks whether |
8641 | condition operands are supportable using vec_is_simple_use. */ | |
8642 | ||
87aab9b2 | 8643 | static bool |
4fc5ebf1 | 8644 | vect_is_simple_cond (tree cond, vec_info *vinfo, |
8da4c8d8 RB |
8645 | tree *comp_vectype, enum vect_def_type *dts, |
8646 | tree vectype) | |
ebfd146a IR |
8647 | { |
8648 | tree lhs, rhs; | |
e9e1d143 | 8649 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
ebfd146a | 8650 | |
a414c77f IE |
8651 | /* Mask case. */ |
8652 | if (TREE_CODE (cond) == SSA_NAME | |
2568d8a1 | 8653 | && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond))) |
a414c77f | 8654 | { |
894dd753 | 8655 | if (!vect_is_simple_use (cond, vinfo, &dts[0], comp_vectype) |
a414c77f IE |
8656 | || !*comp_vectype |
8657 | || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype)) | |
8658 | return false; | |
8659 | return true; | |
8660 | } | |
8661 | ||
ebfd146a IR |
8662 | if (!COMPARISON_CLASS_P (cond)) |
8663 | return false; | |
8664 | ||
8665 | lhs = TREE_OPERAND (cond, 0); | |
8666 | rhs = TREE_OPERAND (cond, 1); | |
8667 | ||
8668 | if (TREE_CODE (lhs) == SSA_NAME) | |
8669 | { | |
894dd753 | 8670 | if (!vect_is_simple_use (lhs, vinfo, &dts[0], &vectype1)) |
ebfd146a IR |
8671 | return false; |
8672 | } | |
4fc5ebf1 JG |
8673 | else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST |
8674 | || TREE_CODE (lhs) == FIXED_CST) | |
8675 | dts[0] = vect_constant_def; | |
8676 | else | |
ebfd146a IR |
8677 | return false; |
8678 | ||
8679 | if (TREE_CODE (rhs) == SSA_NAME) | |
8680 | { | |
894dd753 | 8681 | if (!vect_is_simple_use (rhs, vinfo, &dts[1], &vectype2)) |
ebfd146a IR |
8682 | return false; |
8683 | } | |
4fc5ebf1 JG |
8684 | else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST |
8685 | || TREE_CODE (rhs) == FIXED_CST) | |
8686 | dts[1] = vect_constant_def; | |
8687 | else | |
ebfd146a IR |
8688 | return false; |
8689 | ||
28b33016 | 8690 | if (vectype1 && vectype2 |
928686b1 RS |
8691 | && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1), |
8692 | TYPE_VECTOR_SUBPARTS (vectype2))) | |
28b33016 IE |
8693 | return false; |
8694 | ||
e9e1d143 | 8695 | *comp_vectype = vectype1 ? vectype1 : vectype2; |
8da4c8d8 | 8696 | /* Invariant comparison. */ |
4515e413 | 8697 | if (! *comp_vectype && vectype) |
8da4c8d8 RB |
8698 | { |
8699 | tree scalar_type = TREE_TYPE (lhs); | |
8700 | /* If we can widen the comparison to match vectype do so. */ | |
8701 | if (INTEGRAL_TYPE_P (scalar_type) | |
8702 | && tree_int_cst_lt (TYPE_SIZE (scalar_type), | |
8703 | TYPE_SIZE (TREE_TYPE (vectype)))) | |
8704 | scalar_type = build_nonstandard_integer_type | |
8705 | (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))), | |
8706 | TYPE_UNSIGNED (scalar_type)); | |
8707 | *comp_vectype = get_vectype_for_scalar_type (scalar_type); | |
8708 | } | |
8709 | ||
ebfd146a IR |
8710 | return true; |
8711 | } | |
8712 | ||
8713 | /* vectorizable_condition. | |
8714 | ||
32e8e429 RS |
8715 | Check if STMT_INFO is conditional modify expression that can be vectorized. |
8716 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
b8698a0f | 8717 | stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it |
4bbe8262 IR |
8718 | at GSI. |
8719 | ||
32e8e429 RS |
8720 | When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector |
8721 | variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, | |
8722 | and in else clause if it is 2). | |
ebfd146a | 8723 | |
32e8e429 | 8724 | Return true if STMT_INFO is vectorizable in this way. */ |
ebfd146a | 8725 | |
4bbe8262 | 8726 | bool |
32e8e429 | 8727 | vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 RS |
8728 | stmt_vec_info *vec_stmt, tree reduc_def, |
8729 | int reduc_index, slp_tree slp_node, | |
8730 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 8731 | { |
e4057a39 | 8732 | vec_info *vinfo = stmt_info->vinfo; |
ebfd146a IR |
8733 | tree scalar_dest = NULL_TREE; |
8734 | tree vec_dest = NULL_TREE; | |
01216d27 JJ |
8735 | tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE; |
8736 | tree then_clause, else_clause; | |
df11cc78 | 8737 | tree comp_vectype = NULL_TREE; |
ff802fa1 IR |
8738 | tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE; |
8739 | tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE; | |
5958f9e2 | 8740 | tree vec_compare; |
ebfd146a IR |
8741 | tree new_temp; |
8742 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
4fc5ebf1 JG |
8743 | enum vect_def_type dts[4] |
8744 | = {vect_unknown_def_type, vect_unknown_def_type, | |
8745 | vect_unknown_def_type, vect_unknown_def_type}; | |
8746 | int ndts = 4; | |
f7e531cf | 8747 | int ncopies; |
01216d27 | 8748 | enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
a855b1b1 | 8749 | stmt_vec_info prev_stmt_info = NULL; |
f7e531cf IR |
8750 | int i, j; |
8751 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
6e1aa848 DN |
8752 | vec<tree> vec_oprnds0 = vNULL; |
8753 | vec<tree> vec_oprnds1 = vNULL; | |
8754 | vec<tree> vec_oprnds2 = vNULL; | |
8755 | vec<tree> vec_oprnds3 = vNULL; | |
74946978 | 8756 | tree vec_cmp_type; |
a414c77f | 8757 | bool masked = false; |
b8698a0f | 8758 | |
f7e531cf IR |
8759 | if (reduc_index && STMT_SLP_TYPE (stmt_info)) |
8760 | return false; | |
8761 | ||
bb6c2b68 RS |
8762 | vect_reduction_type reduction_type |
8763 | = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info); | |
8764 | if (reduction_type == TREE_CODE_REDUCTION) | |
af29617a AH |
8765 | { |
8766 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) | |
8767 | return false; | |
ebfd146a | 8768 | |
af29617a AH |
8769 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
8770 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
8771 | && reduc_def)) | |
8772 | return false; | |
ebfd146a | 8773 | |
af29617a AH |
8774 | /* FORNOW: not yet supported. */ |
8775 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
8776 | { | |
8777 | if (dump_enabled_p ()) | |
8778 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
8779 | "value used after loop.\n"); | |
8780 | return false; | |
8781 | } | |
ebfd146a IR |
8782 | } |
8783 | ||
8784 | /* Is vectorizable conditional operation? */ | |
32e8e429 RS |
8785 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
8786 | if (!stmt) | |
ebfd146a IR |
8787 | return false; |
8788 | ||
8789 | code = gimple_assign_rhs_code (stmt); | |
8790 | ||
8791 | if (code != COND_EXPR) | |
8792 | return false; | |
8793 | ||
465c8c19 | 8794 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); |
2947d3b2 | 8795 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
465c8c19 | 8796 | |
fce57248 | 8797 | if (slp_node) |
465c8c19 JJ |
8798 | ncopies = 1; |
8799 | else | |
e8f142e2 | 8800 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
465c8c19 JJ |
8801 | |
8802 | gcc_assert (ncopies >= 1); | |
8803 | if (reduc_index && ncopies > 1) | |
8804 | return false; /* FORNOW */ | |
8805 | ||
4e71066d RG |
8806 | cond_expr = gimple_assign_rhs1 (stmt); |
8807 | then_clause = gimple_assign_rhs2 (stmt); | |
8808 | else_clause = gimple_assign_rhs3 (stmt); | |
ebfd146a | 8809 | |
4fc5ebf1 | 8810 | if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, |
4515e413 | 8811 | &comp_vectype, &dts[0], slp_node ? NULL : vectype) |
e9e1d143 | 8812 | || !comp_vectype) |
ebfd146a IR |
8813 | return false; |
8814 | ||
894dd753 | 8815 | if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1)) |
2947d3b2 | 8816 | return false; |
894dd753 | 8817 | if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2)) |
ebfd146a | 8818 | return false; |
2947d3b2 IE |
8819 | |
8820 | if (vectype1 && !useless_type_conversion_p (vectype, vectype1)) | |
8821 | return false; | |
8822 | ||
8823 | if (vectype2 && !useless_type_conversion_p (vectype, vectype2)) | |
ebfd146a IR |
8824 | return false; |
8825 | ||
28b33016 IE |
8826 | masked = !COMPARISON_CLASS_P (cond_expr); |
8827 | vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype); | |
8828 | ||
74946978 MP |
8829 | if (vec_cmp_type == NULL_TREE) |
8830 | return false; | |
784fb9b3 | 8831 | |
01216d27 JJ |
8832 | cond_code = TREE_CODE (cond_expr); |
8833 | if (!masked) | |
8834 | { | |
8835 | cond_expr0 = TREE_OPERAND (cond_expr, 0); | |
8836 | cond_expr1 = TREE_OPERAND (cond_expr, 1); | |
8837 | } | |
8838 | ||
8839 | if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype)) | |
8840 | { | |
8841 | /* Boolean values may have another representation in vectors | |
8842 | and therefore we prefer bit operations over comparison for | |
8843 | them (which also works for scalar masks). We store opcodes | |
8844 | to use in bitop1 and bitop2. Statement is vectorized as | |
8845 | BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2) | |
8846 | depending on bitop1 and bitop2 arity. */ | |
8847 | switch (cond_code) | |
8848 | { | |
8849 | case GT_EXPR: | |
8850 | bitop1 = BIT_NOT_EXPR; | |
8851 | bitop2 = BIT_AND_EXPR; | |
8852 | break; | |
8853 | case GE_EXPR: | |
8854 | bitop1 = BIT_NOT_EXPR; | |
8855 | bitop2 = BIT_IOR_EXPR; | |
8856 | break; | |
8857 | case LT_EXPR: | |
8858 | bitop1 = BIT_NOT_EXPR; | |
8859 | bitop2 = BIT_AND_EXPR; | |
8860 | std::swap (cond_expr0, cond_expr1); | |
8861 | break; | |
8862 | case LE_EXPR: | |
8863 | bitop1 = BIT_NOT_EXPR; | |
8864 | bitop2 = BIT_IOR_EXPR; | |
8865 | std::swap (cond_expr0, cond_expr1); | |
8866 | break; | |
8867 | case NE_EXPR: | |
8868 | bitop1 = BIT_XOR_EXPR; | |
8869 | break; | |
8870 | case EQ_EXPR: | |
8871 | bitop1 = BIT_XOR_EXPR; | |
8872 | bitop2 = BIT_NOT_EXPR; | |
8873 | break; | |
8874 | default: | |
8875 | return false; | |
8876 | } | |
8877 | cond_code = SSA_NAME; | |
8878 | } | |
8879 | ||
b8698a0f | 8880 | if (!vec_stmt) |
ebfd146a | 8881 | { |
01216d27 JJ |
8882 | if (bitop1 != NOP_EXPR) |
8883 | { | |
8884 | machine_mode mode = TYPE_MODE (comp_vectype); | |
8885 | optab optab; | |
8886 | ||
8887 | optab = optab_for_tree_code (bitop1, comp_vectype, optab_default); | |
8888 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8889 | return false; | |
8890 | ||
8891 | if (bitop2 != NOP_EXPR) | |
8892 | { | |
8893 | optab = optab_for_tree_code (bitop2, comp_vectype, | |
8894 | optab_default); | |
8895 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
8896 | return false; | |
8897 | } | |
8898 | } | |
4fc5ebf1 JG |
8899 | if (expand_vec_cond_expr_p (vectype, comp_vectype, |
8900 | cond_code)) | |
8901 | { | |
68435eb2 RB |
8902 | STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type; |
8903 | vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node, | |
8904 | cost_vec); | |
4fc5ebf1 JG |
8905 | return true; |
8906 | } | |
8907 | return false; | |
ebfd146a IR |
8908 | } |
8909 | ||
f7e531cf IR |
8910 | /* Transform. */ |
8911 | ||
8912 | if (!slp_node) | |
8913 | { | |
9771b263 DN |
8914 | vec_oprnds0.create (1); |
8915 | vec_oprnds1.create (1); | |
8916 | vec_oprnds2.create (1); | |
8917 | vec_oprnds3.create (1); | |
f7e531cf | 8918 | } |
ebfd146a IR |
8919 | |
8920 | /* Handle def. */ | |
8921 | scalar_dest = gimple_assign_lhs (stmt); | |
bb6c2b68 RS |
8922 | if (reduction_type != EXTRACT_LAST_REDUCTION) |
8923 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
ebfd146a IR |
8924 | |
8925 | /* Handle cond expr. */ | |
a855b1b1 MM |
8926 | for (j = 0; j < ncopies; j++) |
8927 | { | |
e1bd7296 | 8928 | stmt_vec_info new_stmt_info = NULL; |
a855b1b1 MM |
8929 | if (j == 0) |
8930 | { | |
f7e531cf IR |
8931 | if (slp_node) |
8932 | { | |
00f96dc9 TS |
8933 | auto_vec<tree, 4> ops; |
8934 | auto_vec<vec<tree>, 4> vec_defs; | |
9771b263 | 8935 | |
a414c77f | 8936 | if (masked) |
01216d27 | 8937 | ops.safe_push (cond_expr); |
a414c77f IE |
8938 | else |
8939 | { | |
01216d27 JJ |
8940 | ops.safe_push (cond_expr0); |
8941 | ops.safe_push (cond_expr1); | |
a414c77f | 8942 | } |
9771b263 DN |
8943 | ops.safe_push (then_clause); |
8944 | ops.safe_push (else_clause); | |
306b0c92 | 8945 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
37b5ec8f JJ |
8946 | vec_oprnds3 = vec_defs.pop (); |
8947 | vec_oprnds2 = vec_defs.pop (); | |
a414c77f IE |
8948 | if (!masked) |
8949 | vec_oprnds1 = vec_defs.pop (); | |
37b5ec8f | 8950 | vec_oprnds0 = vec_defs.pop (); |
f7e531cf IR |
8951 | } |
8952 | else | |
8953 | { | |
a414c77f IE |
8954 | if (masked) |
8955 | { | |
8956 | vec_cond_lhs | |
86a91c0a | 8957 | = vect_get_vec_def_for_operand (cond_expr, stmt_info, |
a414c77f | 8958 | comp_vectype); |
894dd753 | 8959 | vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]); |
a414c77f IE |
8960 | } |
8961 | else | |
8962 | { | |
01216d27 JJ |
8963 | vec_cond_lhs |
8964 | = vect_get_vec_def_for_operand (cond_expr0, | |
86a91c0a | 8965 | stmt_info, comp_vectype); |
894dd753 | 8966 | vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]); |
01216d27 JJ |
8967 | |
8968 | vec_cond_rhs | |
8969 | = vect_get_vec_def_for_operand (cond_expr1, | |
86a91c0a | 8970 | stmt_info, comp_vectype); |
894dd753 | 8971 | vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]); |
a414c77f | 8972 | } |
f7e531cf IR |
8973 | if (reduc_index == 1) |
8974 | vec_then_clause = reduc_def; | |
8975 | else | |
8976 | { | |
8977 | vec_then_clause = vect_get_vec_def_for_operand (then_clause, | |
86a91c0a | 8978 | stmt_info); |
894dd753 | 8979 | vect_is_simple_use (then_clause, loop_vinfo, &dts[2]); |
f7e531cf IR |
8980 | } |
8981 | if (reduc_index == 2) | |
8982 | vec_else_clause = reduc_def; | |
8983 | else | |
8984 | { | |
8985 | vec_else_clause = vect_get_vec_def_for_operand (else_clause, | |
86a91c0a | 8986 | stmt_info); |
894dd753 | 8987 | vect_is_simple_use (else_clause, loop_vinfo, &dts[3]); |
f7e531cf | 8988 | } |
a855b1b1 MM |
8989 | } |
8990 | } | |
8991 | else | |
8992 | { | |
a414c77f | 8993 | vec_cond_lhs |
e4057a39 | 8994 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds0.pop ()); |
a414c77f IE |
8995 | if (!masked) |
8996 | vec_cond_rhs | |
e4057a39 | 8997 | = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds1.pop ()); |
a414c77f | 8998 | |
e4057a39 | 8999 | vec_then_clause = vect_get_vec_def_for_stmt_copy (vinfo, |
9771b263 | 9000 | vec_oprnds2.pop ()); |
e4057a39 | 9001 | vec_else_clause = vect_get_vec_def_for_stmt_copy (vinfo, |
9771b263 | 9002 | vec_oprnds3.pop ()); |
f7e531cf IR |
9003 | } |
9004 | ||
9005 | if (!slp_node) | |
9006 | { | |
9771b263 | 9007 | vec_oprnds0.quick_push (vec_cond_lhs); |
a414c77f IE |
9008 | if (!masked) |
9009 | vec_oprnds1.quick_push (vec_cond_rhs); | |
9771b263 DN |
9010 | vec_oprnds2.quick_push (vec_then_clause); |
9011 | vec_oprnds3.quick_push (vec_else_clause); | |
a855b1b1 MM |
9012 | } |
9013 | ||
9dc3f7de | 9014 | /* Arguments are ready. Create the new vector stmt. */ |
9771b263 | 9015 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs) |
f7e531cf | 9016 | { |
9771b263 DN |
9017 | vec_then_clause = vec_oprnds2[i]; |
9018 | vec_else_clause = vec_oprnds3[i]; | |
a855b1b1 | 9019 | |
a414c77f IE |
9020 | if (masked) |
9021 | vec_compare = vec_cond_lhs; | |
9022 | else | |
9023 | { | |
9024 | vec_cond_rhs = vec_oprnds1[i]; | |
01216d27 JJ |
9025 | if (bitop1 == NOP_EXPR) |
9026 | vec_compare = build2 (cond_code, vec_cmp_type, | |
9027 | vec_cond_lhs, vec_cond_rhs); | |
9028 | else | |
9029 | { | |
9030 | new_temp = make_ssa_name (vec_cmp_type); | |
e1bd7296 | 9031 | gassign *new_stmt; |
01216d27 JJ |
9032 | if (bitop1 == BIT_NOT_EXPR) |
9033 | new_stmt = gimple_build_assign (new_temp, bitop1, | |
9034 | vec_cond_rhs); | |
9035 | else | |
9036 | new_stmt | |
9037 | = gimple_build_assign (new_temp, bitop1, vec_cond_lhs, | |
9038 | vec_cond_rhs); | |
86a91c0a | 9039 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
01216d27 JJ |
9040 | if (bitop2 == NOP_EXPR) |
9041 | vec_compare = new_temp; | |
9042 | else if (bitop2 == BIT_NOT_EXPR) | |
9043 | { | |
9044 | /* Instead of doing ~x ? y : z do x ? z : y. */ | |
9045 | vec_compare = new_temp; | |
9046 | std::swap (vec_then_clause, vec_else_clause); | |
9047 | } | |
9048 | else | |
9049 | { | |
9050 | vec_compare = make_ssa_name (vec_cmp_type); | |
9051 | new_stmt | |
9052 | = gimple_build_assign (vec_compare, bitop2, | |
9053 | vec_cond_lhs, new_temp); | |
86a91c0a | 9054 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
01216d27 JJ |
9055 | } |
9056 | } | |
a414c77f | 9057 | } |
bb6c2b68 RS |
9058 | if (reduction_type == EXTRACT_LAST_REDUCTION) |
9059 | { | |
9060 | if (!is_gimple_val (vec_compare)) | |
9061 | { | |
9062 | tree vec_compare_name = make_ssa_name (vec_cmp_type); | |
e1bd7296 RS |
9063 | gassign *new_stmt = gimple_build_assign (vec_compare_name, |
9064 | vec_compare); | |
86a91c0a | 9065 | vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
bb6c2b68 RS |
9066 | vec_compare = vec_compare_name; |
9067 | } | |
9068 | gcc_assert (reduc_index == 2); | |
e1bd7296 | 9069 | gcall *new_stmt = gimple_build_call_internal |
bb6c2b68 RS |
9070 | (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare, |
9071 | vec_then_clause); | |
9072 | gimple_call_set_lhs (new_stmt, scalar_dest); | |
9073 | SSA_NAME_DEF_STMT (scalar_dest) = new_stmt; | |
86a91c0a RS |
9074 | if (stmt_info->stmt == gsi_stmt (*gsi)) |
9075 | new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt); | |
bb6c2b68 RS |
9076 | else |
9077 | { | |
9078 | /* In this case we're moving the definition to later in the | |
9079 | block. That doesn't matter because the only uses of the | |
9080 | lhs are in phi statements. */ | |
86a91c0a RS |
9081 | gimple_stmt_iterator old_gsi |
9082 | = gsi_for_stmt (stmt_info->stmt); | |
bb6c2b68 | 9083 | gsi_remove (&old_gsi, true); |
e1bd7296 | 9084 | new_stmt_info |
86a91c0a | 9085 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
bb6c2b68 RS |
9086 | } |
9087 | } | |
9088 | else | |
9089 | { | |
9090 | new_temp = make_ssa_name (vec_dest); | |
e1bd7296 RS |
9091 | gassign *new_stmt |
9092 | = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare, | |
9093 | vec_then_clause, vec_else_clause); | |
9094 | new_stmt_info | |
86a91c0a | 9095 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
bb6c2b68 | 9096 | } |
f7e531cf | 9097 | if (slp_node) |
e1bd7296 | 9098 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
f7e531cf IR |
9099 | } |
9100 | ||
9101 | if (slp_node) | |
9102 | continue; | |
9103 | ||
e1bd7296 RS |
9104 | if (j == 0) |
9105 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; | |
9106 | else | |
9107 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; | |
f7e531cf | 9108 | |
e1bd7296 | 9109 | prev_stmt_info = new_stmt_info; |
a855b1b1 | 9110 | } |
b8698a0f | 9111 | |
9771b263 DN |
9112 | vec_oprnds0.release (); |
9113 | vec_oprnds1.release (); | |
9114 | vec_oprnds2.release (); | |
9115 | vec_oprnds3.release (); | |
f7e531cf | 9116 | |
ebfd146a IR |
9117 | return true; |
9118 | } | |
9119 | ||
42fd8198 IE |
9120 | /* vectorizable_comparison. |
9121 | ||
32e8e429 RS |
9122 | Check if STMT_INFO is comparison expression that can be vectorized. |
9123 | If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized | |
42fd8198 IE |
9124 | comparison, put it in VEC_STMT, and insert it at GSI. |
9125 | ||
32e8e429 | 9126 | Return true if STMT_INFO is vectorizable in this way. */ |
42fd8198 | 9127 | |
fce57248 | 9128 | static bool |
32e8e429 | 9129 | vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 9130 | stmt_vec_info *vec_stmt, tree reduc_def, |
68435eb2 | 9131 | slp_tree slp_node, stmt_vector_for_cost *cost_vec) |
42fd8198 | 9132 | { |
e4057a39 | 9133 | vec_info *vinfo = stmt_info->vinfo; |
42fd8198 | 9134 | tree lhs, rhs1, rhs2; |
42fd8198 IE |
9135 | tree vectype1 = NULL_TREE, vectype2 = NULL_TREE; |
9136 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
9137 | tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE; | |
9138 | tree new_temp; | |
9139 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
9140 | enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type}; | |
4fc5ebf1 | 9141 | int ndts = 2; |
928686b1 | 9142 | poly_uint64 nunits; |
42fd8198 | 9143 | int ncopies; |
49e76ff1 | 9144 | enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR; |
42fd8198 IE |
9145 | stmt_vec_info prev_stmt_info = NULL; |
9146 | int i, j; | |
9147 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); | |
9148 | vec<tree> vec_oprnds0 = vNULL; | |
9149 | vec<tree> vec_oprnds1 = vNULL; | |
42fd8198 IE |
9150 | tree mask_type; |
9151 | tree mask; | |
9152 | ||
c245362b IE |
9153 | if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo) |
9154 | return false; | |
9155 | ||
30480bcd | 9156 | if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype)) |
42fd8198 IE |
9157 | return false; |
9158 | ||
9159 | mask_type = vectype; | |
9160 | nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
9161 | ||
fce57248 | 9162 | if (slp_node) |
42fd8198 IE |
9163 | ncopies = 1; |
9164 | else | |
e8f142e2 | 9165 | ncopies = vect_get_num_copies (loop_vinfo, vectype); |
42fd8198 IE |
9166 | |
9167 | gcc_assert (ncopies >= 1); | |
42fd8198 IE |
9168 | if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def |
9169 | && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle | |
9170 | && reduc_def)) | |
9171 | return false; | |
9172 | ||
9173 | if (STMT_VINFO_LIVE_P (stmt_info)) | |
9174 | { | |
9175 | if (dump_enabled_p ()) | |
9176 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
9177 | "value used after loop.\n"); | |
9178 | return false; | |
9179 | } | |
9180 | ||
32e8e429 RS |
9181 | gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt); |
9182 | if (!stmt) | |
42fd8198 IE |
9183 | return false; |
9184 | ||
9185 | code = gimple_assign_rhs_code (stmt); | |
9186 | ||
9187 | if (TREE_CODE_CLASS (code) != tcc_comparison) | |
9188 | return false; | |
9189 | ||
9190 | rhs1 = gimple_assign_rhs1 (stmt); | |
9191 | rhs2 = gimple_assign_rhs2 (stmt); | |
9192 | ||
894dd753 | 9193 | if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1)) |
42fd8198 IE |
9194 | return false; |
9195 | ||
894dd753 | 9196 | if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2)) |
42fd8198 IE |
9197 | return false; |
9198 | ||
9199 | if (vectype1 && vectype2 | |
928686b1 RS |
9200 | && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1), |
9201 | TYPE_VECTOR_SUBPARTS (vectype2))) | |
42fd8198 IE |
9202 | return false; |
9203 | ||
9204 | vectype = vectype1 ? vectype1 : vectype2; | |
9205 | ||
9206 | /* Invariant comparison. */ | |
9207 | if (!vectype) | |
9208 | { | |
69a9a66f | 9209 | vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1)); |
928686b1 | 9210 | if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits)) |
42fd8198 IE |
9211 | return false; |
9212 | } | |
928686b1 | 9213 | else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype))) |
42fd8198 IE |
9214 | return false; |
9215 | ||
49e76ff1 IE |
9216 | /* Can't compare mask and non-mask types. */ |
9217 | if (vectype1 && vectype2 | |
9218 | && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2))) | |
9219 | return false; | |
9220 | ||
9221 | /* Boolean values may have another representation in vectors | |
9222 | and therefore we prefer bit operations over comparison for | |
9223 | them (which also works for scalar masks). We store opcodes | |
9224 | to use in bitop1 and bitop2. Statement is vectorized as | |
9225 | BITOP2 (rhs1 BITOP1 rhs2) or | |
9226 | rhs1 BITOP2 (BITOP1 rhs2) | |
9227 | depending on bitop1 and bitop2 arity. */ | |
9228 | if (VECTOR_BOOLEAN_TYPE_P (vectype)) | |
9229 | { | |
9230 | if (code == GT_EXPR) | |
9231 | { | |
9232 | bitop1 = BIT_NOT_EXPR; | |
9233 | bitop2 = BIT_AND_EXPR; | |
9234 | } | |
9235 | else if (code == GE_EXPR) | |
9236 | { | |
9237 | bitop1 = BIT_NOT_EXPR; | |
9238 | bitop2 = BIT_IOR_EXPR; | |
9239 | } | |
9240 | else if (code == LT_EXPR) | |
9241 | { | |
9242 | bitop1 = BIT_NOT_EXPR; | |
9243 | bitop2 = BIT_AND_EXPR; | |
9244 | std::swap (rhs1, rhs2); | |
264d951a | 9245 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
9246 | } |
9247 | else if (code == LE_EXPR) | |
9248 | { | |
9249 | bitop1 = BIT_NOT_EXPR; | |
9250 | bitop2 = BIT_IOR_EXPR; | |
9251 | std::swap (rhs1, rhs2); | |
264d951a | 9252 | std::swap (dts[0], dts[1]); |
49e76ff1 IE |
9253 | } |
9254 | else | |
9255 | { | |
9256 | bitop1 = BIT_XOR_EXPR; | |
9257 | if (code == EQ_EXPR) | |
9258 | bitop2 = BIT_NOT_EXPR; | |
9259 | } | |
9260 | } | |
9261 | ||
42fd8198 IE |
9262 | if (!vec_stmt) |
9263 | { | |
49e76ff1 | 9264 | if (bitop1 == NOP_EXPR) |
68435eb2 RB |
9265 | { |
9266 | if (!expand_vec_cmp_expr_p (vectype, mask_type, code)) | |
9267 | return false; | |
9268 | } | |
49e76ff1 IE |
9269 | else |
9270 | { | |
9271 | machine_mode mode = TYPE_MODE (vectype); | |
9272 | optab optab; | |
9273 | ||
9274 | optab = optab_for_tree_code (bitop1, vectype, optab_default); | |
9275 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
9276 | return false; | |
9277 | ||
9278 | if (bitop2 != NOP_EXPR) | |
9279 | { | |
9280 | optab = optab_for_tree_code (bitop2, vectype, optab_default); | |
9281 | if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing) | |
9282 | return false; | |
9283 | } | |
49e76ff1 | 9284 | } |
68435eb2 RB |
9285 | |
9286 | STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type; | |
9287 | vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)), | |
9288 | dts, ndts, slp_node, cost_vec); | |
9289 | return true; | |
42fd8198 IE |
9290 | } |
9291 | ||
9292 | /* Transform. */ | |
9293 | if (!slp_node) | |
9294 | { | |
9295 | vec_oprnds0.create (1); | |
9296 | vec_oprnds1.create (1); | |
9297 | } | |
9298 | ||
9299 | /* Handle def. */ | |
9300 | lhs = gimple_assign_lhs (stmt); | |
9301 | mask = vect_create_destination_var (lhs, mask_type); | |
9302 | ||
9303 | /* Handle cmp expr. */ | |
9304 | for (j = 0; j < ncopies; j++) | |
9305 | { | |
e1bd7296 | 9306 | stmt_vec_info new_stmt_info = NULL; |
42fd8198 IE |
9307 | if (j == 0) |
9308 | { | |
9309 | if (slp_node) | |
9310 | { | |
9311 | auto_vec<tree, 2> ops; | |
9312 | auto_vec<vec<tree>, 2> vec_defs; | |
9313 | ||
9314 | ops.safe_push (rhs1); | |
9315 | ops.safe_push (rhs2); | |
306b0c92 | 9316 | vect_get_slp_defs (ops, slp_node, &vec_defs); |
42fd8198 IE |
9317 | vec_oprnds1 = vec_defs.pop (); |
9318 | vec_oprnds0 = vec_defs.pop (); | |
9319 | } | |
9320 | else | |
9321 | { | |
86a91c0a RS |
9322 | vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info, |
9323 | vectype); | |
9324 | vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info, | |
9325 | vectype); | |
42fd8198 IE |
9326 | } |
9327 | } | |
9328 | else | |
9329 | { | |
e4057a39 | 9330 | vec_rhs1 = vect_get_vec_def_for_stmt_copy (vinfo, |
42fd8198 | 9331 | vec_oprnds0.pop ()); |
e4057a39 | 9332 | vec_rhs2 = vect_get_vec_def_for_stmt_copy (vinfo, |
42fd8198 IE |
9333 | vec_oprnds1.pop ()); |
9334 | } | |
9335 | ||
9336 | if (!slp_node) | |
9337 | { | |
9338 | vec_oprnds0.quick_push (vec_rhs1); | |
9339 | vec_oprnds1.quick_push (vec_rhs2); | |
9340 | } | |
9341 | ||
9342 | /* Arguments are ready. Create the new vector stmt. */ | |
9343 | FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1) | |
9344 | { | |
9345 | vec_rhs2 = vec_oprnds1[i]; | |
9346 | ||
9347 | new_temp = make_ssa_name (mask); | |
49e76ff1 IE |
9348 | if (bitop1 == NOP_EXPR) |
9349 | { | |
e1bd7296 RS |
9350 | gassign *new_stmt = gimple_build_assign (new_temp, code, |
9351 | vec_rhs1, vec_rhs2); | |
9352 | new_stmt_info | |
86a91c0a | 9353 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
49e76ff1 IE |
9354 | } |
9355 | else | |
9356 | { | |
e1bd7296 | 9357 | gassign *new_stmt; |
49e76ff1 IE |
9358 | if (bitop1 == BIT_NOT_EXPR) |
9359 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2); | |
9360 | else | |
9361 | new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1, | |
9362 | vec_rhs2); | |
e1bd7296 | 9363 | new_stmt_info |
86a91c0a | 9364 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
49e76ff1 IE |
9365 | if (bitop2 != NOP_EXPR) |
9366 | { | |
9367 | tree res = make_ssa_name (mask); | |
9368 | if (bitop2 == BIT_NOT_EXPR) | |
9369 | new_stmt = gimple_build_assign (res, bitop2, new_temp); | |
9370 | else | |
9371 | new_stmt = gimple_build_assign (res, bitop2, vec_rhs1, | |
9372 | new_temp); | |
e1bd7296 | 9373 | new_stmt_info |
86a91c0a | 9374 | = vect_finish_stmt_generation (stmt_info, new_stmt, gsi); |
49e76ff1 IE |
9375 | } |
9376 | } | |
42fd8198 | 9377 | if (slp_node) |
e1bd7296 | 9378 | SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); |
42fd8198 IE |
9379 | } |
9380 | ||
9381 | if (slp_node) | |
9382 | continue; | |
9383 | ||
9384 | if (j == 0) | |
e1bd7296 | 9385 | STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; |
42fd8198 | 9386 | else |
e1bd7296 | 9387 | STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info; |
42fd8198 | 9388 | |
e1bd7296 | 9389 | prev_stmt_info = new_stmt_info; |
42fd8198 IE |
9390 | } |
9391 | ||
9392 | vec_oprnds0.release (); | |
9393 | vec_oprnds1.release (); | |
9394 | ||
9395 | return true; | |
9396 | } | |
ebfd146a | 9397 | |
68a0f2ff RS |
9398 | /* If SLP_NODE is nonnull, return true if vectorizable_live_operation |
9399 | can handle all live statements in the node. Otherwise return true | |
82570274 | 9400 | if STMT_INFO is not live or if vectorizable_live_operation can handle it. |
68a0f2ff RS |
9401 | GSI and VEC_STMT are as for vectorizable_live_operation. */ |
9402 | ||
9403 | static bool | |
82570274 | 9404 | can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
1eede195 | 9405 | slp_tree slp_node, stmt_vec_info *vec_stmt, |
68435eb2 | 9406 | stmt_vector_for_cost *cost_vec) |
68a0f2ff RS |
9407 | { |
9408 | if (slp_node) | |
9409 | { | |
b9787581 | 9410 | stmt_vec_info slp_stmt_info; |
68a0f2ff | 9411 | unsigned int i; |
b9787581 | 9412 | FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info) |
68a0f2ff | 9413 | { |
68a0f2ff | 9414 | if (STMT_VINFO_LIVE_P (slp_stmt_info) |
b9787581 | 9415 | && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i, |
68435eb2 | 9416 | vec_stmt, cost_vec)) |
68a0f2ff RS |
9417 | return false; |
9418 | } | |
9419 | } | |
82570274 RS |
9420 | else if (STMT_VINFO_LIVE_P (stmt_info) |
9421 | && !vectorizable_live_operation (stmt_info, gsi, slp_node, -1, | |
9422 | vec_stmt, cost_vec)) | |
68a0f2ff RS |
9423 | return false; |
9424 | ||
9425 | return true; | |
9426 | } | |
9427 | ||
8644a673 | 9428 | /* Make sure the statement is vectorizable. */ |
ebfd146a IR |
9429 | |
9430 | bool | |
32e8e429 RS |
9431 | vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize, |
9432 | slp_tree node, slp_instance node_instance, | |
9433 | stmt_vector_for_cost *cost_vec) | |
ebfd146a | 9434 | { |
6585ff8f | 9435 | vec_info *vinfo = stmt_info->vinfo; |
a70d6342 | 9436 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
b8698a0f | 9437 | enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info); |
ebfd146a | 9438 | bool ok; |
363477c0 | 9439 | gimple_seq pattern_def_seq; |
ebfd146a | 9440 | |
73fbfcad | 9441 | if (dump_enabled_p ()) |
ebfd146a | 9442 | { |
78c60e3d | 9443 | dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); |
86a91c0a | 9444 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
8644a673 | 9445 | } |
ebfd146a | 9446 | |
86a91c0a | 9447 | if (gimple_has_volatile_ops (stmt_info->stmt)) |
b8698a0f | 9448 | { |
73fbfcad | 9449 | if (dump_enabled_p ()) |
78c60e3d | 9450 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9451 | "not vectorized: stmt has volatile operands\n"); |
1825a1f3 IR |
9452 | |
9453 | return false; | |
9454 | } | |
b8698a0f | 9455 | |
d54a098e RS |
9456 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
9457 | && node == NULL | |
9458 | && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info))) | |
9459 | { | |
9460 | gimple_stmt_iterator si; | |
9461 | ||
9462 | for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si)) | |
9463 | { | |
6585ff8f RS |
9464 | stmt_vec_info pattern_def_stmt_info |
9465 | = vinfo->lookup_stmt (gsi_stmt (si)); | |
9466 | if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) | |
9467 | || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) | |
d54a098e RS |
9468 | { |
9469 | /* Analyze def stmt of STMT if it's a pattern stmt. */ | |
9470 | if (dump_enabled_p ()) | |
9471 | { | |
9472 | dump_printf_loc (MSG_NOTE, vect_location, | |
9473 | "==> examining pattern def statement: "); | |
86a91c0a RS |
9474 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, |
9475 | pattern_def_stmt_info->stmt, 0); | |
d54a098e RS |
9476 | } |
9477 | ||
86a91c0a | 9478 | if (!vect_analyze_stmt (pattern_def_stmt_info, |
d54a098e RS |
9479 | need_to_vectorize, node, node_instance, |
9480 | cost_vec)) | |
9481 | return false; | |
9482 | } | |
9483 | } | |
9484 | } | |
9485 | ||
b8698a0f | 9486 | /* Skip stmts that do not need to be vectorized. In loops this is expected |
8644a673 IR |
9487 | to include: |
9488 | - the COND_EXPR which is the loop exit condition | |
9489 | - any LABEL_EXPRs in the loop | |
b8698a0f | 9490 | - computations that are used only for array indexing or loop control. |
8644a673 | 9491 | In basic blocks we only analyze statements that are a part of some SLP |
83197f37 | 9492 | instance, therefore, all the statements are relevant. |
ebfd146a | 9493 | |
d092494c | 9494 | Pattern statement needs to be analyzed instead of the original statement |
83197f37 | 9495 | if the original statement is not relevant. Otherwise, we analyze both |
079c527f JJ |
9496 | statements. In basic blocks we are called from some SLP instance |
9497 | traversal, don't analyze pattern stmts instead, the pattern stmts | |
9498 | already will be part of SLP instance. */ | |
83197f37 | 9499 | |
10681ce8 | 9500 | stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info); |
b8698a0f | 9501 | if (!STMT_VINFO_RELEVANT_P (stmt_info) |
8644a673 | 9502 | && !STMT_VINFO_LIVE_P (stmt_info)) |
ebfd146a | 9503 | { |
9d5e7640 | 9504 | if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
10681ce8 RS |
9505 | && pattern_stmt_info |
9506 | && (STMT_VINFO_RELEVANT_P (pattern_stmt_info) | |
9507 | || STMT_VINFO_LIVE_P (pattern_stmt_info))) | |
9d5e7640 | 9508 | { |
83197f37 | 9509 | /* Analyze PATTERN_STMT instead of the original stmt. */ |
10681ce8 | 9510 | stmt_info = pattern_stmt_info; |
73fbfcad | 9511 | if (dump_enabled_p ()) |
9d5e7640 | 9512 | { |
78c60e3d SS |
9513 | dump_printf_loc (MSG_NOTE, vect_location, |
9514 | "==> examining pattern statement: "); | |
86a91c0a | 9515 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0); |
9d5e7640 IR |
9516 | } |
9517 | } | |
9518 | else | |
9519 | { | |
73fbfcad | 9520 | if (dump_enabled_p ()) |
e645e942 | 9521 | dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n"); |
ebfd146a | 9522 | |
9d5e7640 IR |
9523 | return true; |
9524 | } | |
8644a673 | 9525 | } |
83197f37 | 9526 | else if (STMT_VINFO_IN_PATTERN_P (stmt_info) |
079c527f | 9527 | && node == NULL |
10681ce8 RS |
9528 | && pattern_stmt_info |
9529 | && (STMT_VINFO_RELEVANT_P (pattern_stmt_info) | |
9530 | || STMT_VINFO_LIVE_P (pattern_stmt_info))) | |
83197f37 IR |
9531 | { |
9532 | /* Analyze PATTERN_STMT too. */ | |
73fbfcad | 9533 | if (dump_enabled_p ()) |
83197f37 | 9534 | { |
78c60e3d SS |
9535 | dump_printf_loc (MSG_NOTE, vect_location, |
9536 | "==> examining pattern statement: "); | |
86a91c0a | 9537 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0); |
83197f37 IR |
9538 | } |
9539 | ||
10681ce8 | 9540 | if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node, |
68435eb2 | 9541 | node_instance, cost_vec)) |
83197f37 IR |
9542 | return false; |
9543 | } | |
ebfd146a | 9544 | |
8644a673 IR |
9545 | switch (STMT_VINFO_DEF_TYPE (stmt_info)) |
9546 | { | |
9547 | case vect_internal_def: | |
9548 | break; | |
ebfd146a | 9549 | |
8644a673 | 9550 | case vect_reduction_def: |
7c5222ff | 9551 | case vect_nested_cycle: |
14a61437 RB |
9552 | gcc_assert (!bb_vinfo |
9553 | && (relevance == vect_used_in_outer | |
9554 | || relevance == vect_used_in_outer_by_reduction | |
9555 | || relevance == vect_used_by_reduction | |
b28ead45 AH |
9556 | || relevance == vect_unused_in_scope |
9557 | || relevance == vect_used_only_live)); | |
8644a673 IR |
9558 | break; |
9559 | ||
9560 | case vect_induction_def: | |
e7baeb39 RB |
9561 | gcc_assert (!bb_vinfo); |
9562 | break; | |
9563 | ||
8644a673 IR |
9564 | case vect_constant_def: |
9565 | case vect_external_def: | |
9566 | case vect_unknown_def_type: | |
9567 | default: | |
9568 | gcc_unreachable (); | |
9569 | } | |
ebfd146a | 9570 | |
8644a673 | 9571 | if (STMT_VINFO_RELEVANT_P (stmt_info)) |
ebfd146a | 9572 | { |
86a91c0a RS |
9573 | tree type = gimple_expr_type (stmt_info->stmt); |
9574 | gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type))); | |
9575 | gcall *call = dyn_cast <gcall *> (stmt_info->stmt); | |
0136f8f0 | 9576 | gcc_assert (STMT_VINFO_VECTYPE (stmt_info) |
beb456c3 | 9577 | || (call && gimple_call_lhs (call) == NULL_TREE)); |
8644a673 | 9578 | *need_to_vectorize = true; |
ebfd146a IR |
9579 | } |
9580 | ||
b1af7da6 RB |
9581 | if (PURE_SLP_STMT (stmt_info) && !node) |
9582 | { | |
9583 | dump_printf_loc (MSG_NOTE, vect_location, | |
9584 | "handled only by SLP analysis\n"); | |
9585 | return true; | |
9586 | } | |
9587 | ||
9588 | ok = true; | |
9589 | if (!bb_vinfo | |
9590 | && (STMT_VINFO_RELEVANT_P (stmt_info) | |
9591 | || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)) | |
86a91c0a RS |
9592 | ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, cost_vec) |
9593 | || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec) | |
9594 | || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec) | |
9595 | || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec) | |
9596 | || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec) | |
9597 | || vectorizable_load (stmt_info, NULL, NULL, node, node_instance, | |
9598 | cost_vec) | |
9599 | || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec) | |
9600 | || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec) | |
9601 | || vectorizable_reduction (stmt_info, NULL, NULL, node, | |
9602 | node_instance, cost_vec) | |
9603 | || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec) | |
9604 | || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node, | |
68435eb2 | 9605 | cost_vec) |
86a91c0a RS |
9606 | || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node, |
9607 | cost_vec)); | |
b1af7da6 RB |
9608 | else |
9609 | { | |
9610 | if (bb_vinfo) | |
86a91c0a RS |
9611 | ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, |
9612 | cost_vec) | |
9613 | || vectorizable_conversion (stmt_info, NULL, NULL, node, | |
9614 | cost_vec) | |
9615 | || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec) | |
9616 | || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec) | |
9617 | || vectorizable_assignment (stmt_info, NULL, NULL, node, | |
9618 | cost_vec) | |
9619 | || vectorizable_load (stmt_info, NULL, NULL, node, node_instance, | |
68435eb2 | 9620 | cost_vec) |
86a91c0a RS |
9621 | || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec) |
9622 | || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec) | |
9623 | || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node, | |
68435eb2 | 9624 | cost_vec) |
86a91c0a | 9625 | || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node, |
68435eb2 | 9626 | cost_vec)); |
b1af7da6 | 9627 | } |
8644a673 IR |
9628 | |
9629 | if (!ok) | |
ebfd146a | 9630 | { |
73fbfcad | 9631 | if (dump_enabled_p ()) |
8644a673 | 9632 | { |
78c60e3d SS |
9633 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
9634 | "not vectorized: relevant stmt not "); | |
9635 | dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); | |
86a91c0a RS |
9636 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
9637 | stmt_info->stmt, 0); | |
8644a673 | 9638 | } |
b8698a0f | 9639 | |
ebfd146a IR |
9640 | return false; |
9641 | } | |
9642 | ||
8644a673 IR |
9643 | /* Stmts that are (also) "live" (i.e. - that are used out of the loop) |
9644 | need extra handling, except for vectorizable reductions. */ | |
68435eb2 RB |
9645 | if (!bb_vinfo |
9646 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type | |
86a91c0a | 9647 | && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec)) |
ebfd146a | 9648 | { |
73fbfcad | 9649 | if (dump_enabled_p ()) |
8644a673 | 9650 | { |
78c60e3d | 9651 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
68a0f2ff | 9652 | "not vectorized: live stmt not supported: "); |
86a91c0a RS |
9653 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
9654 | stmt_info->stmt, 0); | |
8644a673 | 9655 | } |
b8698a0f | 9656 | |
8644a673 | 9657 | return false; |
ebfd146a IR |
9658 | } |
9659 | ||
ebfd146a IR |
9660 | return true; |
9661 | } | |
9662 | ||
9663 | ||
9664 | /* Function vect_transform_stmt. | |
9665 | ||
32e8e429 | 9666 | Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */ |
ebfd146a IR |
9667 | |
9668 | bool | |
32e8e429 | 9669 | vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi, |
b0b45e58 | 9670 | slp_tree slp_node, slp_instance slp_node_instance) |
ebfd146a | 9671 | { |
6585ff8f | 9672 | vec_info *vinfo = stmt_info->vinfo; |
ebfd146a | 9673 | bool is_store = false; |
1eede195 | 9674 | stmt_vec_info vec_stmt = NULL; |
ebfd146a | 9675 | bool done; |
ebfd146a | 9676 | |
fce57248 | 9677 | gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info)); |
1eede195 | 9678 | stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info); |
225ce44b | 9679 | |
e57d9a82 RB |
9680 | bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info) |
9681 | && nested_in_vect_loop_p | |
9682 | (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)), | |
86a91c0a | 9683 | stmt_info)); |
e57d9a82 | 9684 | |
32e8e429 | 9685 | gimple *stmt = stmt_info->stmt; |
ebfd146a IR |
9686 | switch (STMT_VINFO_TYPE (stmt_info)) |
9687 | { | |
9688 | case type_demotion_vec_info_type: | |
ebfd146a | 9689 | case type_promotion_vec_info_type: |
ebfd146a | 9690 | case type_conversion_vec_info_type: |
86a91c0a RS |
9691 | done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node, |
9692 | NULL); | |
ebfd146a IR |
9693 | gcc_assert (done); |
9694 | break; | |
9695 | ||
9696 | case induc_vec_info_type: | |
86a91c0a RS |
9697 | done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node, |
9698 | NULL); | |
ebfd146a IR |
9699 | gcc_assert (done); |
9700 | break; | |
9701 | ||
9dc3f7de | 9702 | case shift_vec_info_type: |
86a91c0a | 9703 | done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL); |
9dc3f7de IR |
9704 | gcc_assert (done); |
9705 | break; | |
9706 | ||
ebfd146a | 9707 | case op_vec_info_type: |
86a91c0a RS |
9708 | done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node, |
9709 | NULL); | |
ebfd146a IR |
9710 | gcc_assert (done); |
9711 | break; | |
9712 | ||
9713 | case assignment_vec_info_type: | |
86a91c0a RS |
9714 | done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node, |
9715 | NULL); | |
ebfd146a IR |
9716 | gcc_assert (done); |
9717 | break; | |
9718 | ||
9719 | case load_vec_info_type: | |
86a91c0a | 9720 | done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node, |
68435eb2 | 9721 | slp_node_instance, NULL); |
ebfd146a IR |
9722 | gcc_assert (done); |
9723 | break; | |
9724 | ||
9725 | case store_vec_info_type: | |
86a91c0a | 9726 | done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL); |
ebfd146a | 9727 | gcc_assert (done); |
0d0293ac | 9728 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node) |
ebfd146a IR |
9729 | { |
9730 | /* In case of interleaving, the whole chain is vectorized when the | |
ff802fa1 | 9731 | last store in the chain is reached. Store stmts before the last |
ebfd146a IR |
9732 | one are skipped, and there vec_stmt_info shouldn't be freed |
9733 | meanwhile. */ | |
bffb8014 | 9734 | stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info); |
2c53b149 | 9735 | if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info)) |
ebfd146a | 9736 | is_store = true; |
f307441a | 9737 | } |
ebfd146a IR |
9738 | else |
9739 | is_store = true; | |
9740 | break; | |
9741 | ||
9742 | case condition_vec_info_type: | |
86a91c0a RS |
9743 | done = vectorizable_condition (stmt_info, gsi, &vec_stmt, NULL, 0, |
9744 | slp_node, NULL); | |
ebfd146a IR |
9745 | gcc_assert (done); |
9746 | break; | |
9747 | ||
42fd8198 | 9748 | case comparison_vec_info_type: |
86a91c0a RS |
9749 | done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, NULL, |
9750 | slp_node, NULL); | |
42fd8198 IE |
9751 | gcc_assert (done); |
9752 | break; | |
9753 | ||
ebfd146a | 9754 | case call_vec_info_type: |
86a91c0a | 9755 | done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL); |
039d9ea1 | 9756 | stmt = gsi_stmt (*gsi); |
ebfd146a IR |
9757 | break; |
9758 | ||
0136f8f0 | 9759 | case call_simd_clone_vec_info_type: |
86a91c0a RS |
9760 | done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt, |
9761 | slp_node, NULL); | |
0136f8f0 AH |
9762 | stmt = gsi_stmt (*gsi); |
9763 | break; | |
9764 | ||
ebfd146a | 9765 | case reduc_vec_info_type: |
86a91c0a | 9766 | done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node, |
68435eb2 | 9767 | slp_node_instance, NULL); |
ebfd146a IR |
9768 | gcc_assert (done); |
9769 | break; | |
9770 | ||
9771 | default: | |
9772 | if (!STMT_VINFO_LIVE_P (stmt_info)) | |
9773 | { | |
73fbfcad | 9774 | if (dump_enabled_p ()) |
78c60e3d | 9775 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 9776 | "stmt not supported.\n"); |
ebfd146a IR |
9777 | gcc_unreachable (); |
9778 | } | |
9779 | } | |
9780 | ||
225ce44b RB |
9781 | /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT. |
9782 | This would break hybrid SLP vectorization. */ | |
9783 | if (slp_node) | |
d90f8440 | 9784 | gcc_assert (!vec_stmt |
1eede195 | 9785 | && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info); |
225ce44b | 9786 | |
ebfd146a IR |
9787 | /* Handle inner-loop stmts whose DEF is used in the loop-nest that |
9788 | is being vectorized, but outside the immediately enclosing loop. */ | |
9789 | if (vec_stmt | |
e57d9a82 | 9790 | && nested_p |
ebfd146a IR |
9791 | && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type |
9792 | && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer | |
b8698a0f | 9793 | || STMT_VINFO_RELEVANT (stmt_info) == |
a70d6342 | 9794 | vect_used_in_outer_by_reduction)) |
ebfd146a | 9795 | { |
a70d6342 IR |
9796 | struct loop *innerloop = LOOP_VINFO_LOOP ( |
9797 | STMT_VINFO_LOOP_VINFO (stmt_info))->inner; | |
ebfd146a IR |
9798 | imm_use_iterator imm_iter; |
9799 | use_operand_p use_p; | |
9800 | tree scalar_dest; | |
ebfd146a | 9801 | |
73fbfcad | 9802 | if (dump_enabled_p ()) |
78c60e3d | 9803 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 9804 | "Record the vdef for outer-loop vectorization.\n"); |
ebfd146a IR |
9805 | |
9806 | /* Find the relevant loop-exit phi-node, and reord the vec_stmt there | |
9807 | (to be used when vectorizing outer-loop stmts that use the DEF of | |
9808 | STMT). */ | |
9809 | if (gimple_code (stmt) == GIMPLE_PHI) | |
9810 | scalar_dest = PHI_RESULT (stmt); | |
9811 | else | |
4beb6642 | 9812 | scalar_dest = gimple_get_lhs (stmt); |
ebfd146a IR |
9813 | |
9814 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) | |
6585ff8f RS |
9815 | if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p)))) |
9816 | { | |
9817 | stmt_vec_info exit_phi_info | |
9818 | = vinfo->lookup_stmt (USE_STMT (use_p)); | |
9819 | STMT_VINFO_VEC_STMT (exit_phi_info) = vec_stmt; | |
9820 | } | |
ebfd146a IR |
9821 | } |
9822 | ||
9823 | /* Handle stmts whose DEF is used outside the loop-nest that is | |
9824 | being vectorized. */ | |
68a0f2ff | 9825 | if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) |
ebfd146a | 9826 | { |
86a91c0a RS |
9827 | done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt, |
9828 | NULL); | |
ebfd146a IR |
9829 | gcc_assert (done); |
9830 | } | |
9831 | ||
9832 | if (vec_stmt) | |
83197f37 | 9833 | STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt; |
ebfd146a | 9834 | |
b8698a0f | 9835 | return is_store; |
ebfd146a IR |
9836 | } |
9837 | ||
9838 | ||
b8698a0f | 9839 | /* Remove a group of stores (for SLP or interleaving), free their |
ebfd146a IR |
9840 | stmt_vec_info. */ |
9841 | ||
9842 | void | |
32e8e429 | 9843 | vect_remove_stores (stmt_vec_info first_stmt_info) |
ebfd146a | 9844 | { |
b5b56c2a | 9845 | vec_info *vinfo = first_stmt_info->vinfo; |
32e8e429 | 9846 | stmt_vec_info next_stmt_info = first_stmt_info; |
ebfd146a | 9847 | |
a1824cfd | 9848 | while (next_stmt_info) |
ebfd146a | 9849 | { |
a1824cfd | 9850 | stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info); |
211cd1e2 | 9851 | next_stmt_info = vect_orig_stmt (next_stmt_info); |
ebfd146a | 9852 | /* Free the attached stmt_vec_info and remove the stmt. */ |
b5b56c2a | 9853 | vinfo->remove_stmt (next_stmt_info); |
a1824cfd | 9854 | next_stmt_info = tmp; |
ebfd146a IR |
9855 | } |
9856 | } | |
9857 | ||
bb67d9c7 | 9858 | /* Function get_vectype_for_scalar_type_and_size. |
ebfd146a | 9859 | |
bb67d9c7 | 9860 | Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported |
ebfd146a IR |
9861 | by the target. */ |
9862 | ||
c803b2a9 | 9863 | tree |
86e36728 | 9864 | get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size) |
ebfd146a | 9865 | { |
c7d97b28 | 9866 | tree orig_scalar_type = scalar_type; |
3bd8f481 | 9867 | scalar_mode inner_mode; |
ef4bddc2 | 9868 | machine_mode simd_mode; |
86e36728 | 9869 | poly_uint64 nunits; |
ebfd146a IR |
9870 | tree vectype; |
9871 | ||
3bd8f481 RS |
9872 | if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode) |
9873 | && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode)) | |
ebfd146a IR |
9874 | return NULL_TREE; |
9875 | ||
3bd8f481 | 9876 | unsigned int nbytes = GET_MODE_SIZE (inner_mode); |
48f2e373 | 9877 | |
7b7b1813 RG |
9878 | /* For vector types of elements whose mode precision doesn't |
9879 | match their types precision we use a element type of mode | |
9880 | precision. The vectorization routines will have to make sure | |
48f2e373 RB |
9881 | they support the proper result truncation/extension. |
9882 | We also make sure to build vector types with INTEGER_TYPE | |
9883 | component type only. */ | |
6d7971b8 | 9884 | if (INTEGRAL_TYPE_P (scalar_type) |
48f2e373 RB |
9885 | && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type) |
9886 | || TREE_CODE (scalar_type) != INTEGER_TYPE)) | |
7b7b1813 RG |
9887 | scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode), |
9888 | TYPE_UNSIGNED (scalar_type)); | |
6d7971b8 | 9889 | |
ccbf5bb4 RG |
9890 | /* We shouldn't end up building VECTOR_TYPEs of non-scalar components. |
9891 | When the component mode passes the above test simply use a type | |
9892 | corresponding to that mode. The theory is that any use that | |
9893 | would cause problems with this will disable vectorization anyway. */ | |
dfc2e2ac | 9894 | else if (!SCALAR_FLOAT_TYPE_P (scalar_type) |
e67f39f7 | 9895 | && !INTEGRAL_TYPE_P (scalar_type)) |
60b95d28 RB |
9896 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1); |
9897 | ||
9898 | /* We can't build a vector type of elements with alignment bigger than | |
9899 | their size. */ | |
dfc2e2ac | 9900 | else if (nbytes < TYPE_ALIGN_UNIT (scalar_type)) |
aca43c6c JJ |
9901 | scalar_type = lang_hooks.types.type_for_mode (inner_mode, |
9902 | TYPE_UNSIGNED (scalar_type)); | |
ccbf5bb4 | 9903 | |
dfc2e2ac RB |
9904 | /* If we felt back to using the mode fail if there was |
9905 | no scalar type for it. */ | |
9906 | if (scalar_type == NULL_TREE) | |
9907 | return NULL_TREE; | |
9908 | ||
bb67d9c7 RG |
9909 | /* If no size was supplied use the mode the target prefers. Otherwise |
9910 | lookup a vector mode of the specified size. */ | |
86e36728 | 9911 | if (known_eq (size, 0U)) |
bb67d9c7 | 9912 | simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode); |
86e36728 RS |
9913 | else if (!multiple_p (size, nbytes, &nunits) |
9914 | || !mode_for_vector (inner_mode, nunits).exists (&simd_mode)) | |
9da15d40 | 9915 | return NULL_TREE; |
4c8fd8ac | 9916 | /* NOTE: nunits == 1 is allowed to support single element vector types. */ |
86e36728 | 9917 | if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits)) |
cc4b5170 | 9918 | return NULL_TREE; |
ebfd146a IR |
9919 | |
9920 | vectype = build_vector_type (scalar_type, nunits); | |
ebfd146a IR |
9921 | |
9922 | if (!VECTOR_MODE_P (TYPE_MODE (vectype)) | |
9923 | && !INTEGRAL_MODE_P (TYPE_MODE (vectype))) | |
451dabda | 9924 | return NULL_TREE; |
ebfd146a | 9925 | |
c7d97b28 RB |
9926 | /* Re-attach the address-space qualifier if we canonicalized the scalar |
9927 | type. */ | |
9928 | if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype)) | |
9929 | return build_qualified_type | |
9930 | (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type))); | |
9931 | ||
ebfd146a IR |
9932 | return vectype; |
9933 | } | |
9934 | ||
86e36728 | 9935 | poly_uint64 current_vector_size; |
bb67d9c7 RG |
9936 | |
9937 | /* Function get_vectype_for_scalar_type. | |
9938 | ||
9939 | Returns the vector type corresponding to SCALAR_TYPE as supported | |
9940 | by the target. */ | |
9941 | ||
9942 | tree | |
9943 | get_vectype_for_scalar_type (tree scalar_type) | |
9944 | { | |
9945 | tree vectype; | |
9946 | vectype = get_vectype_for_scalar_type_and_size (scalar_type, | |
9947 | current_vector_size); | |
9948 | if (vectype | |
86e36728 | 9949 | && known_eq (current_vector_size, 0U)) |
bb67d9c7 RG |
9950 | current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype)); |
9951 | return vectype; | |
9952 | } | |
9953 | ||
42fd8198 IE |
9954 | /* Function get_mask_type_for_scalar_type. |
9955 | ||
9956 | Returns the mask type corresponding to a result of comparison | |
9957 | of vectors of specified SCALAR_TYPE as supported by target. */ | |
9958 | ||
9959 | tree | |
9960 | get_mask_type_for_scalar_type (tree scalar_type) | |
9961 | { | |
9962 | tree vectype = get_vectype_for_scalar_type (scalar_type); | |
9963 | ||
9964 | if (!vectype) | |
9965 | return NULL; | |
9966 | ||
9967 | return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), | |
9968 | current_vector_size); | |
9969 | } | |
9970 | ||
b690cc0f RG |
9971 | /* Function get_same_sized_vectype |
9972 | ||
9973 | Returns a vector type corresponding to SCALAR_TYPE of size | |
9974 | VECTOR_TYPE if supported by the target. */ | |
9975 | ||
9976 | tree | |
bb67d9c7 | 9977 | get_same_sized_vectype (tree scalar_type, tree vector_type) |
b690cc0f | 9978 | { |
2568d8a1 | 9979 | if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)) |
9f47c7e5 IE |
9980 | return build_same_sized_truth_vector_type (vector_type); |
9981 | ||
bb67d9c7 RG |
9982 | return get_vectype_for_scalar_type_and_size |
9983 | (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type))); | |
b690cc0f RG |
9984 | } |
9985 | ||
ebfd146a IR |
9986 | /* Function vect_is_simple_use. |
9987 | ||
9988 | Input: | |
81c40241 RB |
9989 | VINFO - the vect info of the loop or basic block that is being vectorized. |
9990 | OPERAND - operand in the loop or bb. | |
9991 | Output: | |
fef96d8e RS |
9992 | DEF_STMT_INFO_OUT (optional) - information about the defining stmt in |
9993 | case OPERAND is an SSA_NAME that is defined in the vectorizable region | |
9994 | DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME; | |
9995 | the definition could be anywhere in the function | |
81c40241 | 9996 | DT - the type of definition |
ebfd146a IR |
9997 | |
9998 | Returns whether a stmt with OPERAND can be vectorized. | |
b8698a0f | 9999 | For loops, supportable operands are constants, loop invariants, and operands |
ff802fa1 | 10000 | that are defined by the current iteration of the loop. Unsupportable |
b8698a0f | 10001 | operands are those that are defined by a previous iteration of the loop (as |
a70d6342 IR |
10002 | is the case in reduction/induction computations). |
10003 | For basic blocks, supportable operands are constants and bb invariants. | |
10004 | For now, operands defined outside the basic block are not supported. */ | |
ebfd146a IR |
10005 | |
10006 | bool | |
894dd753 | 10007 | vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, |
fef96d8e | 10008 | stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out) |
b8698a0f | 10009 | { |
fef96d8e RS |
10010 | if (def_stmt_info_out) |
10011 | *def_stmt_info_out = NULL; | |
894dd753 RS |
10012 | if (def_stmt_out) |
10013 | *def_stmt_out = NULL; | |
3fc356dc | 10014 | *dt = vect_unknown_def_type; |
b8698a0f | 10015 | |
73fbfcad | 10016 | if (dump_enabled_p ()) |
ebfd146a | 10017 | { |
78c60e3d SS |
10018 | dump_printf_loc (MSG_NOTE, vect_location, |
10019 | "vect_is_simple_use: operand "); | |
30f502ed RB |
10020 | if (TREE_CODE (operand) == SSA_NAME |
10021 | && !SSA_NAME_IS_DEFAULT_DEF (operand)) | |
10022 | dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0); | |
10023 | else | |
10024 | dump_generic_expr (MSG_NOTE, TDF_SLIM, operand); | |
ebfd146a | 10025 | } |
b8698a0f | 10026 | |
b758f602 | 10027 | if (CONSTANT_CLASS_P (operand)) |
30f502ed RB |
10028 | *dt = vect_constant_def; |
10029 | else if (is_gimple_min_invariant (operand)) | |
10030 | *dt = vect_external_def; | |
10031 | else if (TREE_CODE (operand) != SSA_NAME) | |
10032 | *dt = vect_unknown_def_type; | |
10033 | else if (SSA_NAME_IS_DEFAULT_DEF (operand)) | |
8644a673 | 10034 | *dt = vect_external_def; |
ebfd146a IR |
10035 | else |
10036 | { | |
30f502ed | 10037 | gimple *def_stmt = SSA_NAME_DEF_STMT (operand); |
c98d0595 RS |
10038 | stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand); |
10039 | if (!stmt_vinfo) | |
30f502ed RB |
10040 | *dt = vect_external_def; |
10041 | else | |
0f8c840c | 10042 | { |
6e6b18e5 RS |
10043 | stmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo); |
10044 | def_stmt = stmt_vinfo->stmt; | |
30f502ed RB |
10045 | switch (gimple_code (def_stmt)) |
10046 | { | |
10047 | case GIMPLE_PHI: | |
10048 | case GIMPLE_ASSIGN: | |
10049 | case GIMPLE_CALL: | |
10050 | *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo); | |
10051 | break; | |
10052 | default: | |
10053 | *dt = vect_unknown_def_type; | |
10054 | break; | |
10055 | } | |
fef96d8e RS |
10056 | if (def_stmt_info_out) |
10057 | *def_stmt_info_out = stmt_vinfo; | |
0f8c840c | 10058 | } |
30f502ed RB |
10059 | if (def_stmt_out) |
10060 | *def_stmt_out = def_stmt; | |
ebfd146a IR |
10061 | } |
10062 | ||
2e8ab70c RB |
10063 | if (dump_enabled_p ()) |
10064 | { | |
30f502ed | 10065 | dump_printf (MSG_NOTE, ", type of def: "); |
2e8ab70c RB |
10066 | switch (*dt) |
10067 | { | |
10068 | case vect_uninitialized_def: | |
10069 | dump_printf (MSG_NOTE, "uninitialized\n"); | |
10070 | break; | |
10071 | case vect_constant_def: | |
10072 | dump_printf (MSG_NOTE, "constant\n"); | |
10073 | break; | |
10074 | case vect_external_def: | |
10075 | dump_printf (MSG_NOTE, "external\n"); | |
10076 | break; | |
10077 | case vect_internal_def: | |
10078 | dump_printf (MSG_NOTE, "internal\n"); | |
10079 | break; | |
10080 | case vect_induction_def: | |
10081 | dump_printf (MSG_NOTE, "induction\n"); | |
10082 | break; | |
10083 | case vect_reduction_def: | |
10084 | dump_printf (MSG_NOTE, "reduction\n"); | |
10085 | break; | |
10086 | case vect_double_reduction_def: | |
10087 | dump_printf (MSG_NOTE, "double reduction\n"); | |
10088 | break; | |
10089 | case vect_nested_cycle: | |
10090 | dump_printf (MSG_NOTE, "nested cycle\n"); | |
10091 | break; | |
10092 | case vect_unknown_def_type: | |
10093 | dump_printf (MSG_NOTE, "unknown\n"); | |
10094 | break; | |
10095 | } | |
10096 | } | |
10097 | ||
81c40241 | 10098 | if (*dt == vect_unknown_def_type) |
ebfd146a | 10099 | { |
73fbfcad | 10100 | if (dump_enabled_p ()) |
78c60e3d | 10101 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 10102 | "Unsupported pattern.\n"); |
ebfd146a IR |
10103 | return false; |
10104 | } | |
10105 | ||
ebfd146a IR |
10106 | return true; |
10107 | } | |
10108 | ||
81c40241 | 10109 | /* Function vect_is_simple_use. |
b690cc0f | 10110 | |
81c40241 | 10111 | Same as vect_is_simple_use but also determines the vector operand |
b690cc0f RG |
10112 | type of OPERAND and stores it to *VECTYPE. If the definition of |
10113 | OPERAND is vect_uninitialized_def, vect_constant_def or | |
10114 | vect_external_def *VECTYPE will be set to NULL_TREE and the caller | |
10115 | is responsible to compute the best suited vector type for the | |
10116 | scalar operand. */ | |
10117 | ||
10118 | bool | |
894dd753 | 10119 | vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt, |
fef96d8e RS |
10120 | tree *vectype, stmt_vec_info *def_stmt_info_out, |
10121 | gimple **def_stmt_out) | |
b690cc0f | 10122 | { |
fef96d8e | 10123 | stmt_vec_info def_stmt_info; |
894dd753 | 10124 | gimple *def_stmt; |
fef96d8e | 10125 | if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt)) |
b690cc0f RG |
10126 | return false; |
10127 | ||
894dd753 RS |
10128 | if (def_stmt_out) |
10129 | *def_stmt_out = def_stmt; | |
fef96d8e RS |
10130 | if (def_stmt_info_out) |
10131 | *def_stmt_info_out = def_stmt_info; | |
894dd753 | 10132 | |
b690cc0f RG |
10133 | /* Now get a vector type if the def is internal, otherwise supply |
10134 | NULL_TREE and leave it up to the caller to figure out a proper | |
10135 | type for the use stmt. */ | |
10136 | if (*dt == vect_internal_def | |
10137 | || *dt == vect_induction_def | |
10138 | || *dt == vect_reduction_def | |
10139 | || *dt == vect_double_reduction_def | |
10140 | || *dt == vect_nested_cycle) | |
10141 | { | |
fef96d8e | 10142 | *vectype = STMT_VINFO_VECTYPE (def_stmt_info); |
b690cc0f | 10143 | gcc_assert (*vectype != NULL_TREE); |
30f502ed RB |
10144 | if (dump_enabled_p ()) |
10145 | { | |
10146 | dump_printf_loc (MSG_NOTE, vect_location, | |
10147 | "vect_is_simple_use: vectype "); | |
10148 | dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype); | |
10149 | dump_printf (MSG_NOTE, "\n"); | |
10150 | } | |
b690cc0f RG |
10151 | } |
10152 | else if (*dt == vect_uninitialized_def | |
10153 | || *dt == vect_constant_def | |
10154 | || *dt == vect_external_def) | |
10155 | *vectype = NULL_TREE; | |
10156 | else | |
10157 | gcc_unreachable (); | |
10158 | ||
10159 | return true; | |
10160 | } | |
10161 | ||
ebfd146a IR |
10162 | |
10163 | /* Function supportable_widening_operation | |
10164 | ||
b8698a0f L |
10165 | Check whether an operation represented by the code CODE is a |
10166 | widening operation that is supported by the target platform in | |
b690cc0f RG |
10167 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
10168 | producing a result of type VECTYPE_OUT). | |
b8698a0f | 10169 | |
1bda738b JJ |
10170 | Widening operations we currently support are NOP (CONVERT), FLOAT, |
10171 | FIX_TRUNC and WIDEN_MULT. This function checks if these operations | |
10172 | are supported by the target platform either directly (via vector | |
10173 | tree-codes), or via target builtins. | |
ebfd146a IR |
10174 | |
10175 | Output: | |
b8698a0f L |
10176 | - CODE1 and CODE2 are codes of vector operations to be used when |
10177 | vectorizing the operation, if available. | |
ebfd146a IR |
10178 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
10179 | case of multi-step conversion (like char->short->int - in that case | |
10180 | MULTI_STEP_CVT will be 1). | |
b8698a0f L |
10181 | - INTERM_TYPES contains the intermediate type required to perform the |
10182 | widening operation (short in the above example). */ | |
ebfd146a IR |
10183 | |
10184 | bool | |
32e8e429 | 10185 | supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info, |
b690cc0f | 10186 | tree vectype_out, tree vectype_in, |
ebfd146a IR |
10187 | enum tree_code *code1, enum tree_code *code2, |
10188 | int *multi_step_cvt, | |
9771b263 | 10189 | vec<tree> *interm_types) |
ebfd146a | 10190 | { |
ebfd146a | 10191 | loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); |
4ef69dfc | 10192 | struct loop *vect_loop = NULL; |
ef4bddc2 | 10193 | machine_mode vec_mode; |
81f40b79 | 10194 | enum insn_code icode1, icode2; |
ebfd146a | 10195 | optab optab1, optab2; |
b690cc0f RG |
10196 | tree vectype = vectype_in; |
10197 | tree wide_vectype = vectype_out; | |
ebfd146a | 10198 | enum tree_code c1, c2; |
4a00c761 JJ |
10199 | int i; |
10200 | tree prev_type, intermediate_type; | |
ef4bddc2 | 10201 | machine_mode intermediate_mode, prev_mode; |
4a00c761 | 10202 | optab optab3, optab4; |
ebfd146a | 10203 | |
4a00c761 | 10204 | *multi_step_cvt = 0; |
4ef69dfc IR |
10205 | if (loop_info) |
10206 | vect_loop = LOOP_VINFO_LOOP (loop_info); | |
10207 | ||
ebfd146a IR |
10208 | switch (code) |
10209 | { | |
10210 | case WIDEN_MULT_EXPR: | |
6ae6116f RH |
10211 | /* The result of a vectorized widening operation usually requires |
10212 | two vectors (because the widened results do not fit into one vector). | |
10213 | The generated vector results would normally be expected to be | |
10214 | generated in the same order as in the original scalar computation, | |
10215 | i.e. if 8 results are generated in each vector iteration, they are | |
10216 | to be organized as follows: | |
10217 | vect1: [res1,res2,res3,res4], | |
10218 | vect2: [res5,res6,res7,res8]. | |
10219 | ||
10220 | However, in the special case that the result of the widening | |
10221 | operation is used in a reduction computation only, the order doesn't | |
10222 | matter (because when vectorizing a reduction we change the order of | |
10223 | the computation). Some targets can take advantage of this and | |
10224 | generate more efficient code. For example, targets like Altivec, | |
10225 | that support widen_mult using a sequence of {mult_even,mult_odd} | |
10226 | generate the following vectors: | |
10227 | vect1: [res1,res3,res5,res7], | |
10228 | vect2: [res2,res4,res6,res8]. | |
10229 | ||
10230 | When vectorizing outer-loops, we execute the inner-loop sequentially | |
10231 | (each vectorized inner-loop iteration contributes to VF outer-loop | |
10232 | iterations in parallel). We therefore don't allow to change the | |
10233 | order of the computation in the inner-loop during outer-loop | |
10234 | vectorization. */ | |
10235 | /* TODO: Another case in which order doesn't *really* matter is when we | |
10236 | widen and then contract again, e.g. (short)((int)x * y >> 8). | |
10237 | Normally, pack_trunc performs an even/odd permute, whereas the | |
10238 | repack from an even/odd expansion would be an interleave, which | |
10239 | would be significantly simpler for e.g. AVX2. */ | |
10240 | /* In any case, in order to avoid duplicating the code below, recurse | |
10241 | on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values | |
10242 | are properly set up for the caller. If we fail, we'll continue with | |
10243 | a VEC_WIDEN_MULT_LO/HI_EXPR check. */ | |
10244 | if (vect_loop | |
10245 | && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction | |
86a91c0a | 10246 | && !nested_in_vect_loop_p (vect_loop, stmt_info) |
6ae6116f | 10247 | && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR, |
86a91c0a RS |
10248 | stmt_info, vectype_out, |
10249 | vectype_in, code1, code2, | |
10250 | multi_step_cvt, interm_types)) | |
ebc047a2 CH |
10251 | { |
10252 | /* Elements in a vector with vect_used_by_reduction property cannot | |
10253 | be reordered if the use chain with this property does not have the | |
10254 | same operation. One such an example is s += a * b, where elements | |
10255 | in a and b cannot be reordered. Here we check if the vector defined | |
10256 | by STMT is only directly used in the reduction statement. */ | |
86a91c0a | 10257 | tree lhs = gimple_assign_lhs (stmt_info->stmt); |
0d0a4e20 RS |
10258 | stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs); |
10259 | if (use_stmt_info | |
10260 | && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def) | |
10261 | return true; | |
ebc047a2 | 10262 | } |
4a00c761 JJ |
10263 | c1 = VEC_WIDEN_MULT_LO_EXPR; |
10264 | c2 = VEC_WIDEN_MULT_HI_EXPR; | |
ebfd146a IR |
10265 | break; |
10266 | ||
81c40241 RB |
10267 | case DOT_PROD_EXPR: |
10268 | c1 = DOT_PROD_EXPR; | |
10269 | c2 = DOT_PROD_EXPR; | |
10270 | break; | |
10271 | ||
10272 | case SAD_EXPR: | |
10273 | c1 = SAD_EXPR; | |
10274 | c2 = SAD_EXPR; | |
10275 | break; | |
10276 | ||
6ae6116f RH |
10277 | case VEC_WIDEN_MULT_EVEN_EXPR: |
10278 | /* Support the recursion induced just above. */ | |
10279 | c1 = VEC_WIDEN_MULT_EVEN_EXPR; | |
10280 | c2 = VEC_WIDEN_MULT_ODD_EXPR; | |
10281 | break; | |
10282 | ||
36ba4aae | 10283 | case WIDEN_LSHIFT_EXPR: |
4a00c761 JJ |
10284 | c1 = VEC_WIDEN_LSHIFT_LO_EXPR; |
10285 | c2 = VEC_WIDEN_LSHIFT_HI_EXPR; | |
36ba4aae IR |
10286 | break; |
10287 | ||
ebfd146a | 10288 | CASE_CONVERT: |
4a00c761 JJ |
10289 | c1 = VEC_UNPACK_LO_EXPR; |
10290 | c2 = VEC_UNPACK_HI_EXPR; | |
ebfd146a IR |
10291 | break; |
10292 | ||
10293 | case FLOAT_EXPR: | |
4a00c761 JJ |
10294 | c1 = VEC_UNPACK_FLOAT_LO_EXPR; |
10295 | c2 = VEC_UNPACK_FLOAT_HI_EXPR; | |
ebfd146a IR |
10296 | break; |
10297 | ||
10298 | case FIX_TRUNC_EXPR: | |
1bda738b JJ |
10299 | c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR; |
10300 | c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR; | |
10301 | break; | |
ebfd146a IR |
10302 | |
10303 | default: | |
10304 | gcc_unreachable (); | |
10305 | } | |
10306 | ||
6ae6116f | 10307 | if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR) |
6b4db501 | 10308 | std::swap (c1, c2); |
4a00c761 | 10309 | |
ebfd146a IR |
10310 | if (code == FIX_TRUNC_EXPR) |
10311 | { | |
10312 | /* The signedness is determined from output operand. */ | |
b690cc0f RG |
10313 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
10314 | optab2 = optab_for_tree_code (c2, vectype_out, optab_default); | |
ebfd146a IR |
10315 | } |
10316 | else | |
10317 | { | |
10318 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
10319 | optab2 = optab_for_tree_code (c2, vectype, optab_default); | |
10320 | } | |
10321 | ||
10322 | if (!optab1 || !optab2) | |
10323 | return false; | |
10324 | ||
10325 | vec_mode = TYPE_MODE (vectype); | |
947131ba RS |
10326 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing |
10327 | || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing) | |
ebfd146a IR |
10328 | return false; |
10329 | ||
4a00c761 JJ |
10330 | *code1 = c1; |
10331 | *code2 = c2; | |
10332 | ||
10333 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
10334 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff IE |
10335 | /* For scalar masks we may have different boolean |
10336 | vector types having the same QImode. Thus we | |
10337 | add additional check for elements number. */ | |
10338 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
928686b1 RS |
10339 | || known_eq (TYPE_VECTOR_SUBPARTS (vectype), |
10340 | TYPE_VECTOR_SUBPARTS (wide_vectype) * 2)); | |
4a00c761 | 10341 | |
b8698a0f | 10342 | /* Check if it's a multi-step conversion that can be done using intermediate |
ebfd146a | 10343 | types. */ |
ebfd146a | 10344 | |
4a00c761 JJ |
10345 | prev_type = vectype; |
10346 | prev_mode = vec_mode; | |
b8698a0f | 10347 | |
4a00c761 JJ |
10348 | if (!CONVERT_EXPR_CODE_P (code)) |
10349 | return false; | |
b8698a0f | 10350 | |
4a00c761 JJ |
10351 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
10352 | intermediate steps in promotion sequence. We try | |
10353 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do | |
10354 | not. */ | |
9771b263 | 10355 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
10356 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
10357 | { | |
10358 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
10359 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
10360 | { | |
7cfb4d93 | 10361 | intermediate_type = vect_halve_mask_nunits (prev_type); |
3ae0661a IE |
10362 | if (intermediate_mode != TYPE_MODE (intermediate_type)) |
10363 | return false; | |
10364 | } | |
10365 | else | |
10366 | intermediate_type | |
10367 | = lang_hooks.types.type_for_mode (intermediate_mode, | |
10368 | TYPE_UNSIGNED (prev_type)); | |
10369 | ||
4a00c761 JJ |
10370 | optab3 = optab_for_tree_code (c1, intermediate_type, optab_default); |
10371 | optab4 = optab_for_tree_code (c2, intermediate_type, optab_default); | |
10372 | ||
10373 | if (!optab3 || !optab4 | |
10374 | || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing | |
10375 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
10376 | || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing | |
10377 | || insn_data[icode2].operand[0].mode != intermediate_mode | |
10378 | || ((icode1 = optab_handler (optab3, intermediate_mode)) | |
10379 | == CODE_FOR_nothing) | |
10380 | || ((icode2 = optab_handler (optab4, intermediate_mode)) | |
10381 | == CODE_FOR_nothing)) | |
10382 | break; | |
ebfd146a | 10383 | |
9771b263 | 10384 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
10385 | (*multi_step_cvt)++; |
10386 | ||
10387 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype) | |
10388 | && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype)) | |
5e8d6dff | 10389 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
928686b1 RS |
10390 | || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type), |
10391 | TYPE_VECTOR_SUBPARTS (wide_vectype) * 2)); | |
4a00c761 JJ |
10392 | |
10393 | prev_type = intermediate_type; | |
10394 | prev_mode = intermediate_mode; | |
ebfd146a IR |
10395 | } |
10396 | ||
9771b263 | 10397 | interm_types->release (); |
4a00c761 | 10398 | return false; |
ebfd146a IR |
10399 | } |
10400 | ||
10401 | ||
10402 | /* Function supportable_narrowing_operation | |
10403 | ||
b8698a0f L |
10404 | Check whether an operation represented by the code CODE is a |
10405 | narrowing operation that is supported by the target platform in | |
b690cc0f RG |
10406 | vector form (i.e., when operating on arguments of type VECTYPE_IN |
10407 | and producing a result of type VECTYPE_OUT). | |
b8698a0f | 10408 | |
1bda738b JJ |
10409 | Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC |
10410 | and FLOAT. This function checks if these operations are supported by | |
ebfd146a IR |
10411 | the target platform directly via vector tree-codes. |
10412 | ||
10413 | Output: | |
b8698a0f L |
10414 | - CODE1 is the code of a vector operation to be used when |
10415 | vectorizing the operation, if available. | |
ebfd146a IR |
10416 | - MULTI_STEP_CVT determines the number of required intermediate steps in |
10417 | case of multi-step conversion (like int->short->char - in that case | |
10418 | MULTI_STEP_CVT will be 1). | |
10419 | - INTERM_TYPES contains the intermediate type required to perform the | |
b8698a0f | 10420 | narrowing operation (short in the above example). */ |
ebfd146a IR |
10421 | |
10422 | bool | |
10423 | supportable_narrowing_operation (enum tree_code code, | |
b690cc0f | 10424 | tree vectype_out, tree vectype_in, |
ebfd146a | 10425 | enum tree_code *code1, int *multi_step_cvt, |
9771b263 | 10426 | vec<tree> *interm_types) |
ebfd146a | 10427 | { |
ef4bddc2 | 10428 | machine_mode vec_mode; |
ebfd146a IR |
10429 | enum insn_code icode1; |
10430 | optab optab1, interm_optab; | |
b690cc0f RG |
10431 | tree vectype = vectype_in; |
10432 | tree narrow_vectype = vectype_out; | |
ebfd146a | 10433 | enum tree_code c1; |
3ae0661a | 10434 | tree intermediate_type, prev_type; |
ef4bddc2 | 10435 | machine_mode intermediate_mode, prev_mode; |
ebfd146a | 10436 | int i; |
4a00c761 | 10437 | bool uns; |
ebfd146a | 10438 | |
4a00c761 | 10439 | *multi_step_cvt = 0; |
ebfd146a IR |
10440 | switch (code) |
10441 | { | |
10442 | CASE_CONVERT: | |
10443 | c1 = VEC_PACK_TRUNC_EXPR; | |
10444 | break; | |
10445 | ||
10446 | case FIX_TRUNC_EXPR: | |
10447 | c1 = VEC_PACK_FIX_TRUNC_EXPR; | |
10448 | break; | |
10449 | ||
10450 | case FLOAT_EXPR: | |
1bda738b JJ |
10451 | c1 = VEC_PACK_FLOAT_EXPR; |
10452 | break; | |
ebfd146a IR |
10453 | |
10454 | default: | |
10455 | gcc_unreachable (); | |
10456 | } | |
10457 | ||
10458 | if (code == FIX_TRUNC_EXPR) | |
10459 | /* The signedness is determined from output operand. */ | |
b690cc0f | 10460 | optab1 = optab_for_tree_code (c1, vectype_out, optab_default); |
ebfd146a IR |
10461 | else |
10462 | optab1 = optab_for_tree_code (c1, vectype, optab_default); | |
10463 | ||
10464 | if (!optab1) | |
10465 | return false; | |
10466 | ||
10467 | vec_mode = TYPE_MODE (vectype); | |
947131ba | 10468 | if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing) |
ebfd146a IR |
10469 | return false; |
10470 | ||
4a00c761 JJ |
10471 | *code1 = c1; |
10472 | ||
10473 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff IE |
10474 | /* For scalar masks we may have different boolean |
10475 | vector types having the same QImode. Thus we | |
10476 | add additional check for elements number. */ | |
10477 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) | |
928686b1 RS |
10478 | || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2, |
10479 | TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 | 10480 | |
1bda738b JJ |
10481 | if (code == FLOAT_EXPR) |
10482 | return false; | |
10483 | ||
ebfd146a IR |
10484 | /* Check if it's a multi-step conversion that can be done using intermediate |
10485 | types. */ | |
4a00c761 | 10486 | prev_mode = vec_mode; |
3ae0661a | 10487 | prev_type = vectype; |
4a00c761 JJ |
10488 | if (code == FIX_TRUNC_EXPR) |
10489 | uns = TYPE_UNSIGNED (vectype_out); | |
10490 | else | |
10491 | uns = TYPE_UNSIGNED (vectype); | |
10492 | ||
10493 | /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer | |
10494 | conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more | |
10495 | costly than signed. */ | |
10496 | if (code == FIX_TRUNC_EXPR && uns) | |
10497 | { | |
10498 | enum insn_code icode2; | |
10499 | ||
10500 | intermediate_type | |
10501 | = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0); | |
10502 | interm_optab | |
10503 | = optab_for_tree_code (c1, intermediate_type, optab_default); | |
2225b9f2 | 10504 | if (interm_optab != unknown_optab |
4a00c761 JJ |
10505 | && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing |
10506 | && insn_data[icode1].operand[0].mode | |
10507 | == insn_data[icode2].operand[0].mode) | |
10508 | { | |
10509 | uns = false; | |
10510 | optab1 = interm_optab; | |
10511 | icode1 = icode2; | |
10512 | } | |
10513 | } | |
ebfd146a | 10514 | |
4a00c761 JJ |
10515 | /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS |
10516 | intermediate steps in promotion sequence. We try | |
10517 | MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */ | |
9771b263 | 10518 | interm_types->create (MAX_INTERM_CVT_STEPS); |
4a00c761 JJ |
10519 | for (i = 0; i < MAX_INTERM_CVT_STEPS; i++) |
10520 | { | |
10521 | intermediate_mode = insn_data[icode1].operand[0].mode; | |
3ae0661a IE |
10522 | if (VECTOR_BOOLEAN_TYPE_P (prev_type)) |
10523 | { | |
7cfb4d93 | 10524 | intermediate_type = vect_double_mask_nunits (prev_type); |
3ae0661a | 10525 | if (intermediate_mode != TYPE_MODE (intermediate_type)) |
7cfb4d93 | 10526 | return false; |
3ae0661a IE |
10527 | } |
10528 | else | |
10529 | intermediate_type | |
10530 | = lang_hooks.types.type_for_mode (intermediate_mode, uns); | |
4a00c761 JJ |
10531 | interm_optab |
10532 | = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type, | |
10533 | optab_default); | |
10534 | if (!interm_optab | |
10535 | || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing) | |
10536 | || insn_data[icode1].operand[0].mode != intermediate_mode | |
10537 | || ((icode1 = optab_handler (interm_optab, intermediate_mode)) | |
10538 | == CODE_FOR_nothing)) | |
10539 | break; | |
10540 | ||
9771b263 | 10541 | interm_types->quick_push (intermediate_type); |
4a00c761 JJ |
10542 | (*multi_step_cvt)++; |
10543 | ||
10544 | if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype)) | |
5e8d6dff | 10545 | return (!VECTOR_BOOLEAN_TYPE_P (vectype) |
928686b1 RS |
10546 | || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2, |
10547 | TYPE_VECTOR_SUBPARTS (narrow_vectype))); | |
4a00c761 JJ |
10548 | |
10549 | prev_mode = intermediate_mode; | |
3ae0661a | 10550 | prev_type = intermediate_type; |
4a00c761 | 10551 | optab1 = interm_optab; |
ebfd146a IR |
10552 | } |
10553 | ||
9771b263 | 10554 | interm_types->release (); |
4a00c761 | 10555 | return false; |
ebfd146a | 10556 | } |
7cfb4d93 RS |
10557 | |
10558 | /* Generate and return a statement that sets vector mask MASK such that | |
10559 | MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */ | |
10560 | ||
10561 | gcall * | |
10562 | vect_gen_while (tree mask, tree start_index, tree end_index) | |
10563 | { | |
10564 | tree cmp_type = TREE_TYPE (start_index); | |
10565 | tree mask_type = TREE_TYPE (mask); | |
10566 | gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT, | |
10567 | cmp_type, mask_type, | |
10568 | OPTIMIZE_FOR_SPEED)); | |
10569 | gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3, | |
10570 | start_index, end_index, | |
10571 | build_zero_cst (mask_type)); | |
10572 | gimple_call_set_lhs (call, mask); | |
10573 | return call; | |
10574 | } | |
535e7c11 RS |
10575 | |
10576 | /* Generate a vector mask of type MASK_TYPE for which index I is false iff | |
10577 | J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */ | |
10578 | ||
10579 | tree | |
10580 | vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index, | |
10581 | tree end_index) | |
10582 | { | |
10583 | tree tmp = make_ssa_name (mask_type); | |
10584 | gcall *call = vect_gen_while (tmp, start_index, end_index); | |
10585 | gimple_seq_add_stmt (seq, call); | |
10586 | return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp); | |
10587 | } | |
1f3cb663 RS |
10588 | |
10589 | /* Try to compute the vector types required to vectorize STMT_INFO, | |
10590 | returning true on success and false if vectorization isn't possible. | |
10591 | ||
10592 | On success: | |
10593 | ||
10594 | - Set *STMT_VECTYPE_OUT to: | |
10595 | - NULL_TREE if the statement doesn't need to be vectorized; | |
10596 | - boolean_type_node if the statement is a boolean operation whose | |
10597 | vector type can only be determined once all the other vector types | |
10598 | are known; and | |
10599 | - the equivalent of STMT_VINFO_VECTYPE otherwise. | |
10600 | ||
10601 | - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum | |
10602 | number of units needed to vectorize STMT_INFO, or NULL_TREE if the | |
10603 | statement does not help to determine the overall number of units. */ | |
10604 | ||
10605 | bool | |
10606 | vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, | |
10607 | tree *stmt_vectype_out, | |
10608 | tree *nunits_vectype_out) | |
10609 | { | |
10610 | gimple *stmt = stmt_info->stmt; | |
10611 | ||
10612 | *stmt_vectype_out = NULL_TREE; | |
10613 | *nunits_vectype_out = NULL_TREE; | |
10614 | ||
10615 | if (gimple_get_lhs (stmt) == NULL_TREE | |
10616 | /* MASK_STORE has no lhs, but is ok. */ | |
10617 | && !gimple_call_internal_p (stmt, IFN_MASK_STORE)) | |
10618 | { | |
10619 | if (is_a <gcall *> (stmt)) | |
10620 | { | |
10621 | /* Ignore calls with no lhs. These must be calls to | |
10622 | #pragma omp simd functions, and what vectorization factor | |
10623 | it really needs can't be determined until | |
10624 | vectorizable_simd_clone_call. */ | |
10625 | if (dump_enabled_p ()) | |
10626 | dump_printf_loc (MSG_NOTE, vect_location, | |
10627 | "defer to SIMD clone analysis.\n"); | |
10628 | return true; | |
10629 | } | |
10630 | ||
10631 | if (dump_enabled_p ()) | |
10632 | { | |
10633 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10634 | "not vectorized: irregular stmt."); | |
10635 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
10636 | } | |
10637 | return false; | |
10638 | } | |
10639 | ||
10640 | if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) | |
10641 | { | |
10642 | if (dump_enabled_p ()) | |
10643 | { | |
10644 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10645 | "not vectorized: vector stmt in loop:"); | |
10646 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
10647 | } | |
10648 | return false; | |
10649 | } | |
10650 | ||
10651 | tree vectype; | |
10652 | tree scalar_type = NULL_TREE; | |
10653 | if (STMT_VINFO_VECTYPE (stmt_info)) | |
10654 | *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info); | |
10655 | else | |
10656 | { | |
10657 | gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); | |
10658 | if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) | |
10659 | scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); | |
10660 | else | |
10661 | scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); | |
10662 | ||
10663 | /* Pure bool ops don't participate in number-of-units computation. | |
10664 | For comparisons use the types being compared. */ | |
10665 | if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type) | |
10666 | && is_gimple_assign (stmt) | |
10667 | && gimple_assign_rhs_code (stmt) != COND_EXPR) | |
10668 | { | |
10669 | *stmt_vectype_out = boolean_type_node; | |
10670 | ||
10671 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
10672 | if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison | |
10673 | && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1))) | |
10674 | scalar_type = TREE_TYPE (rhs1); | |
10675 | else | |
10676 | { | |
10677 | if (dump_enabled_p ()) | |
10678 | dump_printf_loc (MSG_NOTE, vect_location, | |
10679 | "pure bool operation.\n"); | |
10680 | return true; | |
10681 | } | |
10682 | } | |
10683 | ||
10684 | if (dump_enabled_p ()) | |
10685 | { | |
10686 | dump_printf_loc (MSG_NOTE, vect_location, | |
10687 | "get vectype for scalar type: "); | |
10688 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
10689 | dump_printf (MSG_NOTE, "\n"); | |
10690 | } | |
10691 | vectype = get_vectype_for_scalar_type (scalar_type); | |
10692 | if (!vectype) | |
10693 | { | |
10694 | if (dump_enabled_p ()) | |
10695 | { | |
10696 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10697 | "not vectorized: unsupported data-type "); | |
10698 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10699 | scalar_type); | |
10700 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10701 | } | |
10702 | return false; | |
10703 | } | |
10704 | ||
10705 | if (!*stmt_vectype_out) | |
10706 | *stmt_vectype_out = vectype; | |
10707 | ||
10708 | if (dump_enabled_p ()) | |
10709 | { | |
10710 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); | |
10711 | dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); | |
10712 | dump_printf (MSG_NOTE, "\n"); | |
10713 | } | |
10714 | } | |
10715 | ||
10716 | /* Don't try to compute scalar types if the stmt produces a boolean | |
10717 | vector; use the existing vector type instead. */ | |
10718 | tree nunits_vectype; | |
10719 | if (VECTOR_BOOLEAN_TYPE_P (vectype)) | |
10720 | nunits_vectype = vectype; | |
10721 | else | |
10722 | { | |
10723 | /* The number of units is set according to the smallest scalar | |
10724 | type (or the largest vector size, but we only support one | |
10725 | vector size per vectorization). */ | |
10726 | if (*stmt_vectype_out != boolean_type_node) | |
10727 | { | |
10728 | HOST_WIDE_INT dummy; | |
86a91c0a RS |
10729 | scalar_type = vect_get_smallest_scalar_type (stmt_info, |
10730 | &dummy, &dummy); | |
1f3cb663 RS |
10731 | } |
10732 | if (dump_enabled_p ()) | |
10733 | { | |
10734 | dump_printf_loc (MSG_NOTE, vect_location, | |
10735 | "get vectype for scalar type: "); | |
10736 | dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); | |
10737 | dump_printf (MSG_NOTE, "\n"); | |
10738 | } | |
10739 | nunits_vectype = get_vectype_for_scalar_type (scalar_type); | |
10740 | } | |
10741 | if (!nunits_vectype) | |
10742 | { | |
10743 | if (dump_enabled_p ()) | |
10744 | { | |
10745 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10746 | "not vectorized: unsupported data-type "); | |
10747 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); | |
10748 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10749 | } | |
10750 | return false; | |
10751 | } | |
10752 | ||
10753 | if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), | |
10754 | GET_MODE_SIZE (TYPE_MODE (nunits_vectype)))) | |
10755 | { | |
10756 | if (dump_enabled_p ()) | |
10757 | { | |
10758 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10759 | "not vectorized: different sized vector " | |
10760 | "types in statement, "); | |
10761 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); | |
10762 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
10763 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype); | |
10764 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10765 | } | |
10766 | return false; | |
10767 | } | |
10768 | ||
10769 | if (dump_enabled_p ()) | |
10770 | { | |
10771 | dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); | |
10772 | dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype); | |
10773 | dump_printf (MSG_NOTE, "\n"); | |
10774 | ||
10775 | dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); | |
10776 | dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype)); | |
10777 | dump_printf (MSG_NOTE, "\n"); | |
10778 | } | |
10779 | ||
10780 | *nunits_vectype_out = nunits_vectype; | |
10781 | return true; | |
10782 | } | |
10783 | ||
10784 | /* Try to determine the correct vector type for STMT_INFO, which is a | |
10785 | statement that produces a scalar boolean result. Return the vector | |
10786 | type on success, otherwise return NULL_TREE. */ | |
10787 | ||
10788 | tree | |
10789 | vect_get_mask_type_for_stmt (stmt_vec_info stmt_info) | |
10790 | { | |
10791 | gimple *stmt = stmt_info->stmt; | |
10792 | tree mask_type = NULL; | |
10793 | tree vectype, scalar_type; | |
10794 | ||
10795 | if (is_gimple_assign (stmt) | |
10796 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison | |
10797 | && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) | |
10798 | { | |
10799 | scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); | |
10800 | mask_type = get_mask_type_for_scalar_type (scalar_type); | |
10801 | ||
10802 | if (!mask_type) | |
10803 | { | |
10804 | if (dump_enabled_p ()) | |
10805 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10806 | "not vectorized: unsupported mask\n"); | |
10807 | return NULL_TREE; | |
10808 | } | |
10809 | } | |
10810 | else | |
10811 | { | |
10812 | tree rhs; | |
10813 | ssa_op_iter iter; | |
1f3cb663 RS |
10814 | enum vect_def_type dt; |
10815 | ||
10816 | FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE) | |
10817 | { | |
894dd753 | 10818 | if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype)) |
1f3cb663 RS |
10819 | { |
10820 | if (dump_enabled_p ()) | |
10821 | { | |
10822 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10823 | "not vectorized: can't compute mask type " | |
10824 | "for statement, "); | |
10825 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, | |
10826 | 0); | |
10827 | } | |
10828 | return NULL_TREE; | |
10829 | } | |
10830 | ||
10831 | /* No vectype probably means external definition. | |
10832 | Allow it in case there is another operand which | |
10833 | allows to determine mask type. */ | |
10834 | if (!vectype) | |
10835 | continue; | |
10836 | ||
10837 | if (!mask_type) | |
10838 | mask_type = vectype; | |
10839 | else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type), | |
10840 | TYPE_VECTOR_SUBPARTS (vectype))) | |
10841 | { | |
10842 | if (dump_enabled_p ()) | |
10843 | { | |
10844 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10845 | "not vectorized: different sized masks " | |
10846 | "types in statement, "); | |
10847 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10848 | mask_type); | |
10849 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
10850 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10851 | vectype); | |
10852 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10853 | } | |
10854 | return NULL_TREE; | |
10855 | } | |
10856 | else if (VECTOR_BOOLEAN_TYPE_P (mask_type) | |
10857 | != VECTOR_BOOLEAN_TYPE_P (vectype)) | |
10858 | { | |
10859 | if (dump_enabled_p ()) | |
10860 | { | |
10861 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10862 | "not vectorized: mixed mask and " | |
10863 | "nonmask vector types in statement, "); | |
10864 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10865 | mask_type); | |
10866 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
10867 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
10868 | vectype); | |
10869 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
10870 | } | |
10871 | return NULL_TREE; | |
10872 | } | |
10873 | } | |
10874 | ||
10875 | /* We may compare boolean value loaded as vector of integers. | |
10876 | Fix mask_type in such case. */ | |
10877 | if (mask_type | |
10878 | && !VECTOR_BOOLEAN_TYPE_P (mask_type) | |
10879 | && gimple_code (stmt) == GIMPLE_ASSIGN | |
10880 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) | |
10881 | mask_type = build_same_sized_truth_vector_type (mask_type); | |
10882 | } | |
10883 | ||
10884 | /* No mask_type should mean loop invariant predicate. | |
10885 | This is probably a subject for optimization in if-conversion. */ | |
10886 | if (!mask_type && dump_enabled_p ()) | |
10887 | { | |
10888 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
10889 | "not vectorized: can't compute mask type " | |
10890 | "for statement, "); | |
10891 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
10892 | } | |
10893 | return mask_type; | |
10894 | } |