]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-stmts.c
poly_int: GET_MODE_SIZE
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
56
57 /* Says whether a statement is a load, a store of a vectorized statement
58 result, or a store of an invariant value. */
59 enum vec_load_store_type {
60 VLS_LOAD,
61 VLS_STORE,
62 VLS_STORE_INVARIANT
63 };
64
65 /* Return the vectorized type for the given statement. */
66
67 tree
68 stmt_vectype (struct _stmt_vec_info *stmt_info)
69 {
70 return STMT_VINFO_VECTYPE (stmt_info);
71 }
72
73 /* Return TRUE iff the given statement is in an inner loop relative to
74 the loop being vectorized. */
75 bool
76 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
77 {
78 gimple *stmt = STMT_VINFO_STMT (stmt_info);
79 basic_block bb = gimple_bb (stmt);
80 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
81 struct loop* loop;
82
83 if (!loop_vinfo)
84 return false;
85
86 loop = LOOP_VINFO_LOOP (loop_vinfo);
87
88 return (bb->loop_father == loop->inner);
89 }
90
91 /* Record the cost of a statement, either by directly informing the
92 target model or by saving it in a vector for later processing.
93 Return a preliminary estimate of the statement's cost. */
94
95 unsigned
96 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
97 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
98 int misalign, enum vect_cost_model_location where)
99 {
100 if ((kind == vector_load || kind == unaligned_load)
101 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
102 kind = vector_gather_load;
103 if ((kind == vector_store || kind == unaligned_store)
104 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
105 kind = vector_scatter_store;
106 if (body_cost_vec)
107 {
108 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
109 stmt_info_for_cost si = { count, kind,
110 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
111 misalign };
112 body_cost_vec->safe_push (si);
113 return (unsigned)
114 (builtin_vectorization_cost (kind, vectype, misalign) * count);
115 }
116 else
117 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
118 count, kind, stmt_info, misalign, where);
119 }
120
121 /* Return a variable of type ELEM_TYPE[NELEMS]. */
122
123 static tree
124 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
125 {
126 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
127 "vect_array");
128 }
129
130 /* ARRAY is an array of vectors created by create_vector_array.
131 Return an SSA_NAME for the vector in index N. The reference
132 is part of the vectorization of STMT and the vector is associated
133 with scalar destination SCALAR_DEST. */
134
135 static tree
136 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
137 tree array, unsigned HOST_WIDE_INT n)
138 {
139 tree vect_type, vect, vect_name, array_ref;
140 gimple *new_stmt;
141
142 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
143 vect_type = TREE_TYPE (TREE_TYPE (array));
144 vect = vect_create_destination_var (scalar_dest, vect_type);
145 array_ref = build4 (ARRAY_REF, vect_type, array,
146 build_int_cst (size_type_node, n),
147 NULL_TREE, NULL_TREE);
148
149 new_stmt = gimple_build_assign (vect, array_ref);
150 vect_name = make_ssa_name (vect, new_stmt);
151 gimple_assign_set_lhs (new_stmt, vect_name);
152 vect_finish_stmt_generation (stmt, new_stmt, gsi);
153
154 return vect_name;
155 }
156
157 /* ARRAY is an array of vectors created by create_vector_array.
158 Emit code to store SSA_NAME VECT in index N of the array.
159 The store is part of the vectorization of STMT. */
160
161 static void
162 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
163 tree array, unsigned HOST_WIDE_INT n)
164 {
165 tree array_ref;
166 gimple *new_stmt;
167
168 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
169 build_int_cst (size_type_node, n),
170 NULL_TREE, NULL_TREE);
171
172 new_stmt = gimple_build_assign (array_ref, vect);
173 vect_finish_stmt_generation (stmt, new_stmt, gsi);
174 }
175
176 /* PTR is a pointer to an array of type TYPE. Return a representation
177 of *PTR. The memory reference replaces those in FIRST_DR
178 (and its group). */
179
180 static tree
181 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
182 {
183 tree mem_ref;
184
185 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
186 /* Arrays have the same alignment as their type. */
187 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
188 return mem_ref;
189 }
190
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
192
193 /* Function vect_mark_relevant.
194
195 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
196
197 static void
198 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
199 enum vect_relevant relevant, bool live_p)
200 {
201 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
202 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
203 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 gimple *pattern_stmt;
205
206 if (dump_enabled_p ())
207 {
208 dump_printf_loc (MSG_NOTE, vect_location,
209 "mark relevant %d, live %d: ", relevant, live_p);
210 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
211 }
212
213 /* If this stmt is an original stmt in a pattern, we might need to mark its
214 related pattern stmt instead of the original stmt. However, such stmts
215 may have their own uses that are not in any pattern, in such cases the
216 stmt itself should be marked. */
217 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
218 {
219 /* This is the last stmt in a sequence that was detected as a
220 pattern that can potentially be vectorized. Don't mark the stmt
221 as relevant/live because it's not going to be vectorized.
222 Instead mark the pattern-stmt that replaces it. */
223
224 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
225
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE, vect_location,
228 "last stmt in pattern. don't mark"
229 " relevant/live.\n");
230 stmt_info = vinfo_for_stmt (pattern_stmt);
231 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
232 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
233 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
234 stmt = pattern_stmt;
235 }
236
237 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
238 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
239 STMT_VINFO_RELEVANT (stmt_info) = relevant;
240
241 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
242 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
243 {
244 if (dump_enabled_p ())
245 dump_printf_loc (MSG_NOTE, vect_location,
246 "already marked relevant/live.\n");
247 return;
248 }
249
250 worklist->safe_push (stmt);
251 }
252
253
254 /* Function is_simple_and_all_uses_invariant
255
256 Return true if STMT is simple and all uses of it are invariant. */
257
258 bool
259 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
260 {
261 tree op;
262 gimple *def_stmt;
263 ssa_op_iter iter;
264
265 if (!is_gimple_assign (stmt))
266 return false;
267
268 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
269 {
270 enum vect_def_type dt = vect_uninitialized_def;
271
272 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
273 {
274 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
276 "use not simple.\n");
277 return false;
278 }
279
280 if (dt != vect_external_def && dt != vect_constant_def)
281 return false;
282 }
283 return true;
284 }
285
286 /* Function vect_stmt_relevant_p.
287
288 Return true if STMT in loop that is represented by LOOP_VINFO is
289 "relevant for vectorization".
290
291 A stmt is considered "relevant for vectorization" if:
292 - it has uses outside the loop.
293 - it has vdefs (it alters memory).
294 - control stmts in the loop (except for the exit condition).
295
296 CHECKME: what other side effects would the vectorizer allow? */
297
298 static bool
299 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
300 enum vect_relevant *relevant, bool *live_p)
301 {
302 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
303 ssa_op_iter op_iter;
304 imm_use_iterator imm_iter;
305 use_operand_p use_p;
306 def_operand_p def_p;
307
308 *relevant = vect_unused_in_scope;
309 *live_p = false;
310
311 /* cond stmt other than loop exit cond. */
312 if (is_ctrl_stmt (stmt)
313 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
314 != loop_exit_ctrl_vec_info_type)
315 *relevant = vect_used_in_scope;
316
317 /* changing memory. */
318 if (gimple_code (stmt) != GIMPLE_PHI)
319 if (gimple_vdef (stmt)
320 && !gimple_clobber_p (stmt))
321 {
322 if (dump_enabled_p ())
323 dump_printf_loc (MSG_NOTE, vect_location,
324 "vec_stmt_relevant_p: stmt has vdefs.\n");
325 *relevant = vect_used_in_scope;
326 }
327
328 /* uses outside the loop. */
329 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
330 {
331 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
332 {
333 basic_block bb = gimple_bb (USE_STMT (use_p));
334 if (!flow_bb_inside_loop_p (loop, bb))
335 {
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: used out of loop.\n");
339
340 if (is_gimple_debug (USE_STMT (use_p)))
341 continue;
342
343 /* We expect all such uses to be in the loop exit phis
344 (because of loop closed form) */
345 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
346 gcc_assert (bb == single_exit (loop)->dest);
347
348 *live_p = true;
349 }
350 }
351 }
352
353 if (*live_p && *relevant == vect_unused_in_scope
354 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
355 {
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "vec_stmt_relevant_p: stmt live but not relevant.\n");
359 *relevant = vect_used_only_live;
360 }
361
362 return (*live_p || *relevant);
363 }
364
365
366 /* Function exist_non_indexing_operands_for_use_p
367
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
370
371 static bool
372 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
373 {
374 tree operand;
375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
376
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info))
381 return true;
382
383 /* STMT has a data_ref. FORNOW this means that its of one of
384 the following forms:
385 -1- ARRAY_REF = var
386 -2- var = ARRAY_REF
387 (This should have been verified in analyze_data_refs).
388
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
391 for array indexing.
392
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
395
396 if (!gimple_assign_copy_p (stmt))
397 {
398 if (is_gimple_call (stmt)
399 && gimple_call_internal_p (stmt))
400 switch (gimple_call_internal_fn (stmt))
401 {
402 case IFN_MASK_STORE:
403 operand = gimple_call_arg (stmt, 3);
404 if (operand == use)
405 return true;
406 /* FALLTHRU */
407 case IFN_MASK_LOAD:
408 operand = gimple_call_arg (stmt, 2);
409 if (operand == use)
410 return true;
411 break;
412 default:
413 break;
414 }
415 return false;
416 }
417
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
419 return false;
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
422 return false;
423
424 if (operand == use)
425 return true;
426
427 return false;
428 }
429
430
431 /*
432 Function process_use.
433
434 Inputs:
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
440 be performed.
441
442 Outputs:
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 Exceptions:
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
455
456 Return true if everything is as expected. Return false otherwise. */
457
458 static bool
459 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
460 enum vect_relevant relevant, vec<gimple *> *worklist,
461 bool force)
462 {
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
467 gimple *def_stmt;
468 enum vect_def_type dt;
469
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
473 return true;
474
475 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
476 {
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
479 "not vectorized: unsupported use in stmt.\n");
480 return false;
481 }
482
483 if (!def_stmt || gimple_nop_p (def_stmt))
484 return true;
485
486 def_bb = gimple_bb (def_stmt);
487 if (!flow_bb_inside_loop_p (loop, def_bb))
488 {
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
491 return true;
492 }
493
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo = vinfo_for_stmt (def_stmt);
500 bb = gimple_bb (stmt);
501 if (gimple_code (stmt) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
503 && gimple_code (def_stmt) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
505 && bb->loop_father == def_bb->loop_father)
506 {
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
511 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
515 return true;
516 }
517
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
520 d = def_stmt
521 inner-loop:
522 stmt # use (d)
523 outer-loop-tail-bb:
524 ... */
525 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
526 {
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE, vect_location,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
530
531 switch (relevant)
532 {
533 case vect_unused_in_scope:
534 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
535 vect_used_in_scope : vect_unused_in_scope;
536 break;
537
538 case vect_used_in_outer_by_reduction:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
540 relevant = vect_used_by_reduction;
541 break;
542
543 case vect_used_in_outer:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
545 relevant = vect_used_in_scope;
546 break;
547
548 case vect_used_in_scope:
549 break;
550
551 default:
552 gcc_unreachable ();
553 }
554 }
555
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
558 ...
559 inner-loop:
560 d = def_stmt
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
562 stmt # use (d) */
563 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
564 {
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
568
569 switch (relevant)
570 {
571 case vect_unused_in_scope:
572 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
574 vect_used_in_outer_by_reduction : vect_unused_in_scope;
575 break;
576
577 case vect_used_by_reduction:
578 case vect_used_only_live:
579 relevant = vect_used_in_outer_by_reduction;
580 break;
581
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
584 break;
585
586 default:
587 gcc_unreachable ();
588 }
589 }
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
593 of course. */
594 else if (gimple_code (stmt) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
598 == use))
599 {
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE, vect_location,
602 "induction value on backedge.\n");
603 return true;
604 }
605
606
607 vect_mark_relevant (worklist, def_stmt, relevant, false);
608 return true;
609 }
610
611
612 /* Function vect_mark_stmts_to_be_vectorized.
613
614 Not all stmts in the loop need to be vectorized. For example:
615
616 for i...
617 for j...
618 1. T0 = i + j
619 2. T1 = a[T0]
620
621 3. j = j + 1
622
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
625
626 This pass detects such stmts. */
627
628 bool
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
630 {
631 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
632 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
633 unsigned int nbbs = loop->num_nodes;
634 gimple_stmt_iterator si;
635 gimple *stmt;
636 unsigned int i;
637 stmt_vec_info stmt_vinfo;
638 basic_block bb;
639 gimple *phi;
640 bool live_p;
641 enum vect_relevant relevant;
642
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
646
647 auto_vec<gimple *, 64> worklist;
648
649 /* 1. Init worklist. */
650 for (i = 0; i < nbbs; i++)
651 {
652 bb = bbs[i];
653 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
654 {
655 phi = gsi_stmt (si);
656 if (dump_enabled_p ())
657 {
658 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
660 }
661
662 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
663 vect_mark_relevant (&worklist, phi, relevant, live_p);
664 }
665 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
666 {
667 stmt = gsi_stmt (si);
668 if (dump_enabled_p ())
669 {
670 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
672 }
673
674 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
675 vect_mark_relevant (&worklist, stmt, relevant, live_p);
676 }
677 }
678
679 /* 2. Process_worklist */
680 while (worklist.length () > 0)
681 {
682 use_operand_p use_p;
683 ssa_op_iter iter;
684
685 stmt = worklist.pop ();
686 if (dump_enabled_p ())
687 {
688 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
689 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
690 }
691
692 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
693 (DEF_STMT) as relevant/irrelevant according to the relevance property
694 of STMT. */
695 stmt_vinfo = vinfo_for_stmt (stmt);
696 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
697
698 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
699 propagated as is to the DEF_STMTs of its USEs.
700
701 One exception is when STMT has been identified as defining a reduction
702 variable; in this case we set the relevance to vect_used_by_reduction.
703 This is because we distinguish between two kinds of relevant stmts -
704 those that are used by a reduction computation, and those that are
705 (also) used by a regular computation. This allows us later on to
706 identify stmts that are used solely by a reduction, and therefore the
707 order of the results that they produce does not have to be kept. */
708
709 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
710 {
711 case vect_reduction_def:
712 gcc_assert (relevant != vect_unused_in_scope);
713 if (relevant != vect_unused_in_scope
714 && relevant != vect_used_in_scope
715 && relevant != vect_used_by_reduction
716 && relevant != vect_used_only_live)
717 {
718 if (dump_enabled_p ())
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720 "unsupported use of reduction.\n");
721 return false;
722 }
723 break;
724
725 case vect_nested_cycle:
726 if (relevant != vect_unused_in_scope
727 && relevant != vect_used_in_outer_by_reduction
728 && relevant != vect_used_in_outer)
729 {
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of nested cycle.\n");
733
734 return false;
735 }
736 break;
737
738 case vect_double_reduction_def:
739 if (relevant != vect_unused_in_scope
740 && relevant != vect_used_by_reduction
741 && relevant != vect_used_only_live)
742 {
743 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "unsupported use of double reduction.\n");
746
747 return false;
748 }
749 break;
750
751 default:
752 break;
753 }
754
755 if (is_pattern_stmt_p (stmt_vinfo))
756 {
757 /* Pattern statements are not inserted into the code, so
758 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
759 have to scan the RHS or function arguments instead. */
760 if (is_gimple_assign (stmt))
761 {
762 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
763 tree op = gimple_assign_rhs1 (stmt);
764
765 i = 1;
766 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
767 {
768 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
769 relevant, &worklist, false)
770 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
771 relevant, &worklist, false))
772 return false;
773 i = 2;
774 }
775 for (; i < gimple_num_ops (stmt); i++)
776 {
777 op = gimple_op (stmt, i);
778 if (TREE_CODE (op) == SSA_NAME
779 && !process_use (stmt, op, loop_vinfo, relevant,
780 &worklist, false))
781 return false;
782 }
783 }
784 else if (is_gimple_call (stmt))
785 {
786 for (i = 0; i < gimple_call_num_args (stmt); i++)
787 {
788 tree arg = gimple_call_arg (stmt, i);
789 if (!process_use (stmt, arg, loop_vinfo, relevant,
790 &worklist, false))
791 return false;
792 }
793 }
794 }
795 else
796 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
797 {
798 tree op = USE_FROM_PTR (use_p);
799 if (!process_use (stmt, op, loop_vinfo, relevant,
800 &worklist, false))
801 return false;
802 }
803
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
805 {
806 gather_scatter_info gs_info;
807 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
808 gcc_unreachable ();
809 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
810 &worklist, true))
811 return false;
812 }
813 } /* while worklist */
814
815 return true;
816 }
817
818
819 /* Function vect_model_simple_cost.
820
821 Models cost for simple operations, i.e. those that only emit ncopies of a
822 single op. Right now, this does not account for multiple insns that could
823 be generated for the single vector op. We will handle that shortly. */
824
825 void
826 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
827 enum vect_def_type *dt,
828 int ndts,
829 stmt_vector_for_cost *prologue_cost_vec,
830 stmt_vector_for_cost *body_cost_vec)
831 {
832 int i;
833 int inside_cost = 0, prologue_cost = 0;
834
835 /* The SLP costs were already calculated during SLP tree build. */
836 if (PURE_SLP_STMT (stmt_info))
837 return;
838
839 /* Cost the "broadcast" of a scalar operand in to a vector operand.
840 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
841 cost model. */
842 for (i = 0; i < ndts; i++)
843 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
844 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
845 stmt_info, 0, vect_prologue);
846
847 /* Pass the inside-of-loop statements to the target-specific cost model. */
848 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
849 stmt_info, 0, vect_body);
850
851 if (dump_enabled_p ())
852 dump_printf_loc (MSG_NOTE, vect_location,
853 "vect_model_simple_cost: inside_cost = %d, "
854 "prologue_cost = %d .\n", inside_cost, prologue_cost);
855 }
856
857
858 /* Model cost for type demotion and promotion operations. PWR is normally
859 zero for single-step promotions and demotions. It will be one if
860 two-step promotion/demotion is required, and so on. Each additional
861 step doubles the number of instructions required. */
862
863 static void
864 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
865 enum vect_def_type *dt, int pwr)
866 {
867 int i, tmp;
868 int inside_cost = 0, prologue_cost = 0;
869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
870 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
871 void *target_cost_data;
872
873 /* The SLP costs were already calculated during SLP tree build. */
874 if (PURE_SLP_STMT (stmt_info))
875 return;
876
877 if (loop_vinfo)
878 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
879 else
880 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
881
882 for (i = 0; i < pwr + 1; i++)
883 {
884 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
885 (i + 1) : i;
886 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
887 vec_promote_demote, stmt_info, 0,
888 vect_body);
889 }
890
891 /* FORNOW: Assuming maximum 2 args per stmts. */
892 for (i = 0; i < 2; i++)
893 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
894 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
895 stmt_info, 0, vect_prologue);
896
897 if (dump_enabled_p ())
898 dump_printf_loc (MSG_NOTE, vect_location,
899 "vect_model_promotion_demotion_cost: inside_cost = %d, "
900 "prologue_cost = %d .\n", inside_cost, prologue_cost);
901 }
902
903 /* Function vect_model_store_cost
904
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
907
908 void
909 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
910 vect_memory_access_type memory_access_type,
911 enum vect_def_type dt, slp_tree slp_node,
912 stmt_vector_for_cost *prologue_cost_vec,
913 stmt_vector_for_cost *body_cost_vec)
914 {
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
917 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
918 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
919
920 if (dt == vect_constant_def || dt == vect_external_def)
921 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
922 stmt_info, 0, vect_prologue);
923
924 /* Grouped stores update all elements in the group at once,
925 so we want the DR for the first statement. */
926 if (!slp_node && grouped_access_p)
927 {
928 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
929 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
930 }
931
932 /* True if we should include any once-per-group costs as well as
933 the cost of the statement itself. For SLP we only get called
934 once per group anyhow. */
935 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
936
937 /* We assume that the cost of a single store-lanes instruction is
938 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
939 access is instead being provided by a permute-and-store operation,
940 include the cost of the permutes. */
941 if (first_stmt_p
942 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
943 {
944 /* Uses a high and low interleave or shuffle operations for each
945 needed permute. */
946 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
947 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
948 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
949 stmt_info, 0, vect_body);
950
951 if (dump_enabled_p ())
952 dump_printf_loc (MSG_NOTE, vect_location,
953 "vect_model_store_cost: strided group_size = %d .\n",
954 group_size);
955 }
956
957 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
958 /* Costs of the stores. */
959 if (memory_access_type == VMAT_ELEMENTWISE
960 || memory_access_type == VMAT_GATHER_SCATTER)
961 {
962 /* N scalar stores plus extracting the elements. */
963 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
964 inside_cost += record_stmt_cost (body_cost_vec,
965 ncopies * assumed_nunits,
966 scalar_store, stmt_info, 0, vect_body);
967 }
968 else
969 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
970
971 if (memory_access_type == VMAT_ELEMENTWISE
972 || memory_access_type == VMAT_STRIDED_SLP)
973 {
974 /* N scalar stores plus extracting the elements. */
975 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
976 inside_cost += record_stmt_cost (body_cost_vec,
977 ncopies * assumed_nunits,
978 vec_to_scalar, stmt_info, 0, vect_body);
979 }
980
981 if (dump_enabled_p ())
982 dump_printf_loc (MSG_NOTE, vect_location,
983 "vect_model_store_cost: inside_cost = %d, "
984 "prologue_cost = %d .\n", inside_cost, prologue_cost);
985 }
986
987
988 /* Calculate cost of DR's memory access. */
989 void
990 vect_get_store_cost (struct data_reference *dr, int ncopies,
991 unsigned int *inside_cost,
992 stmt_vector_for_cost *body_cost_vec)
993 {
994 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
995 gimple *stmt = DR_STMT (dr);
996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
997
998 switch (alignment_support_scheme)
999 {
1000 case dr_aligned:
1001 {
1002 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1003 vector_store, stmt_info, 0,
1004 vect_body);
1005
1006 if (dump_enabled_p ())
1007 dump_printf_loc (MSG_NOTE, vect_location,
1008 "vect_model_store_cost: aligned.\n");
1009 break;
1010 }
1011
1012 case dr_unaligned_supported:
1013 {
1014 /* Here, we assign an additional cost for the unaligned store. */
1015 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1016 unaligned_store, stmt_info,
1017 DR_MISALIGNMENT (dr), vect_body);
1018 if (dump_enabled_p ())
1019 dump_printf_loc (MSG_NOTE, vect_location,
1020 "vect_model_store_cost: unaligned supported by "
1021 "hardware.\n");
1022 break;
1023 }
1024
1025 case dr_unaligned_unsupported:
1026 {
1027 *inside_cost = VECT_MAX_COST;
1028
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1031 "vect_model_store_cost: unsupported access.\n");
1032 break;
1033 }
1034
1035 default:
1036 gcc_unreachable ();
1037 }
1038 }
1039
1040
1041 /* Function vect_model_load_cost
1042
1043 Models cost for loads. In the case of grouped accesses, one access has
1044 the overhead of the grouped access attributed to it. Since unaligned
1045 accesses are supported for loads, we also account for the costs of the
1046 access scheme chosen. */
1047
1048 void
1049 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1050 vect_memory_access_type memory_access_type,
1051 slp_tree slp_node,
1052 stmt_vector_for_cost *prologue_cost_vec,
1053 stmt_vector_for_cost *body_cost_vec)
1054 {
1055 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1056 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1057 unsigned int inside_cost = 0, prologue_cost = 0;
1058 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1059
1060 /* Grouped loads read all elements in the group at once,
1061 so we want the DR for the first statement. */
1062 if (!slp_node && grouped_access_p)
1063 {
1064 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1065 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1066 }
1067
1068 /* True if we should include any once-per-group costs as well as
1069 the cost of the statement itself. For SLP we only get called
1070 once per group anyhow. */
1071 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1072
1073 /* We assume that the cost of a single load-lanes instruction is
1074 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1075 access is instead being provided by a load-and-permute operation,
1076 include the cost of the permutes. */
1077 if (first_stmt_p
1078 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1079 {
1080 /* Uses an even and odd extract operations or shuffle operations
1081 for each needed permute. */
1082 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1083 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1084 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1085 stmt_info, 0, vect_body);
1086
1087 if (dump_enabled_p ())
1088 dump_printf_loc (MSG_NOTE, vect_location,
1089 "vect_model_load_cost: strided group_size = %d .\n",
1090 group_size);
1091 }
1092
1093 /* The loads themselves. */
1094 if (memory_access_type == VMAT_ELEMENTWISE
1095 || memory_access_type == VMAT_GATHER_SCATTER)
1096 {
1097 /* N scalar loads plus gathering them into a vector. */
1098 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1099 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1100 inside_cost += record_stmt_cost (body_cost_vec,
1101 ncopies * assumed_nunits,
1102 scalar_load, stmt_info, 0, vect_body);
1103 }
1104 else
1105 vect_get_load_cost (dr, ncopies, first_stmt_p,
1106 &inside_cost, &prologue_cost,
1107 prologue_cost_vec, body_cost_vec, true);
1108 if (memory_access_type == VMAT_ELEMENTWISE
1109 || memory_access_type == VMAT_STRIDED_SLP)
1110 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1111 stmt_info, 0, vect_body);
1112
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE, vect_location,
1115 "vect_model_load_cost: inside_cost = %d, "
1116 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1117 }
1118
1119
1120 /* Calculate cost of DR's memory access. */
1121 void
1122 vect_get_load_cost (struct data_reference *dr, int ncopies,
1123 bool add_realign_cost, unsigned int *inside_cost,
1124 unsigned int *prologue_cost,
1125 stmt_vector_for_cost *prologue_cost_vec,
1126 stmt_vector_for_cost *body_cost_vec,
1127 bool record_prologue_costs)
1128 {
1129 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1130 gimple *stmt = DR_STMT (dr);
1131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1132
1133 switch (alignment_support_scheme)
1134 {
1135 case dr_aligned:
1136 {
1137 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1138 stmt_info, 0, vect_body);
1139
1140 if (dump_enabled_p ())
1141 dump_printf_loc (MSG_NOTE, vect_location,
1142 "vect_model_load_cost: aligned.\n");
1143
1144 break;
1145 }
1146 case dr_unaligned_supported:
1147 {
1148 /* Here, we assign an additional cost for the unaligned load. */
1149 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1150 unaligned_load, stmt_info,
1151 DR_MISALIGNMENT (dr), vect_body);
1152
1153 if (dump_enabled_p ())
1154 dump_printf_loc (MSG_NOTE, vect_location,
1155 "vect_model_load_cost: unaligned supported by "
1156 "hardware.\n");
1157
1158 break;
1159 }
1160 case dr_explicit_realign:
1161 {
1162 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1163 vector_load, stmt_info, 0, vect_body);
1164 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1165 vec_perm, stmt_info, 0, vect_body);
1166
1167 /* FIXME: If the misalignment remains fixed across the iterations of
1168 the containing loop, the following cost should be added to the
1169 prologue costs. */
1170 if (targetm.vectorize.builtin_mask_for_load)
1171 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1172 stmt_info, 0, vect_body);
1173
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE, vect_location,
1176 "vect_model_load_cost: explicit realign\n");
1177
1178 break;
1179 }
1180 case dr_explicit_realign_optimized:
1181 {
1182 if (dump_enabled_p ())
1183 dump_printf_loc (MSG_NOTE, vect_location,
1184 "vect_model_load_cost: unaligned software "
1185 "pipelined.\n");
1186
1187 /* Unaligned software pipeline has a load of an address, an initial
1188 load, and possibly a mask operation to "prime" the loop. However,
1189 if this is an access in a group of loads, which provide grouped
1190 access, then the above cost should only be considered for one
1191 access in the group. Inside the loop, there is a load op
1192 and a realignment op. */
1193
1194 if (add_realign_cost && record_prologue_costs)
1195 {
1196 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1197 vector_stmt, stmt_info,
1198 0, vect_prologue);
1199 if (targetm.vectorize.builtin_mask_for_load)
1200 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1201 vector_stmt, stmt_info,
1202 0, vect_prologue);
1203 }
1204
1205 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1206 stmt_info, 0, vect_body);
1207 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1208 stmt_info, 0, vect_body);
1209
1210 if (dump_enabled_p ())
1211 dump_printf_loc (MSG_NOTE, vect_location,
1212 "vect_model_load_cost: explicit realign optimized"
1213 "\n");
1214
1215 break;
1216 }
1217
1218 case dr_unaligned_unsupported:
1219 {
1220 *inside_cost = VECT_MAX_COST;
1221
1222 if (dump_enabled_p ())
1223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1224 "vect_model_load_cost: unsupported access.\n");
1225 break;
1226 }
1227
1228 default:
1229 gcc_unreachable ();
1230 }
1231 }
1232
1233 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1234 the loop preheader for the vectorized stmt STMT. */
1235
1236 static void
1237 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1238 {
1239 if (gsi)
1240 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1241 else
1242 {
1243 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1244 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1245
1246 if (loop_vinfo)
1247 {
1248 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1249 basic_block new_bb;
1250 edge pe;
1251
1252 if (nested_in_vect_loop_p (loop, stmt))
1253 loop = loop->inner;
1254
1255 pe = loop_preheader_edge (loop);
1256 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1257 gcc_assert (!new_bb);
1258 }
1259 else
1260 {
1261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1262 basic_block bb;
1263 gimple_stmt_iterator gsi_bb_start;
1264
1265 gcc_assert (bb_vinfo);
1266 bb = BB_VINFO_BB (bb_vinfo);
1267 gsi_bb_start = gsi_after_labels (bb);
1268 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1269 }
1270 }
1271
1272 if (dump_enabled_p ())
1273 {
1274 dump_printf_loc (MSG_NOTE, vect_location,
1275 "created new init_stmt: ");
1276 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1277 }
1278 }
1279
1280 /* Function vect_init_vector.
1281
1282 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1283 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1284 vector type a vector with all elements equal to VAL is created first.
1285 Place the initialization at BSI if it is not NULL. Otherwise, place the
1286 initialization at the loop preheader.
1287 Return the DEF of INIT_STMT.
1288 It will be used in the vectorization of STMT. */
1289
1290 tree
1291 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1292 {
1293 gimple *init_stmt;
1294 tree new_temp;
1295
1296 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1297 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1298 {
1299 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1300 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1301 {
1302 /* Scalar boolean value should be transformed into
1303 all zeros or all ones value before building a vector. */
1304 if (VECTOR_BOOLEAN_TYPE_P (type))
1305 {
1306 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1307 tree false_val = build_zero_cst (TREE_TYPE (type));
1308
1309 if (CONSTANT_CLASS_P (val))
1310 val = integer_zerop (val) ? false_val : true_val;
1311 else
1312 {
1313 new_temp = make_ssa_name (TREE_TYPE (type));
1314 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1315 val, true_val, false_val);
1316 vect_init_vector_1 (stmt, init_stmt, gsi);
1317 val = new_temp;
1318 }
1319 }
1320 else if (CONSTANT_CLASS_P (val))
1321 val = fold_convert (TREE_TYPE (type), val);
1322 else
1323 {
1324 new_temp = make_ssa_name (TREE_TYPE (type));
1325 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1326 init_stmt = gimple_build_assign (new_temp,
1327 fold_build1 (VIEW_CONVERT_EXPR,
1328 TREE_TYPE (type),
1329 val));
1330 else
1331 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1332 vect_init_vector_1 (stmt, init_stmt, gsi);
1333 val = new_temp;
1334 }
1335 }
1336 val = build_vector_from_val (type, val);
1337 }
1338
1339 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1340 init_stmt = gimple_build_assign (new_temp, val);
1341 vect_init_vector_1 (stmt, init_stmt, gsi);
1342 return new_temp;
1343 }
1344
1345 /* Function vect_get_vec_def_for_operand_1.
1346
1347 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1348 DT that will be used in the vectorized stmt. */
1349
1350 tree
1351 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1352 {
1353 tree vec_oprnd;
1354 gimple *vec_stmt;
1355 stmt_vec_info def_stmt_info = NULL;
1356
1357 switch (dt)
1358 {
1359 /* operand is a constant or a loop invariant. */
1360 case vect_constant_def:
1361 case vect_external_def:
1362 /* Code should use vect_get_vec_def_for_operand. */
1363 gcc_unreachable ();
1364
1365 /* operand is defined inside the loop. */
1366 case vect_internal_def:
1367 {
1368 /* Get the def from the vectorized stmt. */
1369 def_stmt_info = vinfo_for_stmt (def_stmt);
1370
1371 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1372 /* Get vectorized pattern statement. */
1373 if (!vec_stmt
1374 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1375 && !STMT_VINFO_RELEVANT (def_stmt_info))
1376 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1377 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1378 gcc_assert (vec_stmt);
1379 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1380 vec_oprnd = PHI_RESULT (vec_stmt);
1381 else if (is_gimple_call (vec_stmt))
1382 vec_oprnd = gimple_call_lhs (vec_stmt);
1383 else
1384 vec_oprnd = gimple_assign_lhs (vec_stmt);
1385 return vec_oprnd;
1386 }
1387
1388 /* operand is defined by a loop header phi. */
1389 case vect_reduction_def:
1390 case vect_double_reduction_def:
1391 case vect_nested_cycle:
1392 case vect_induction_def:
1393 {
1394 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1395
1396 /* Get the def from the vectorized stmt. */
1397 def_stmt_info = vinfo_for_stmt (def_stmt);
1398 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1399 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1400 vec_oprnd = PHI_RESULT (vec_stmt);
1401 else
1402 vec_oprnd = gimple_get_lhs (vec_stmt);
1403 return vec_oprnd;
1404 }
1405
1406 default:
1407 gcc_unreachable ();
1408 }
1409 }
1410
1411
1412 /* Function vect_get_vec_def_for_operand.
1413
1414 OP is an operand in STMT. This function returns a (vector) def that will be
1415 used in the vectorized stmt for STMT.
1416
1417 In the case that OP is an SSA_NAME which is defined in the loop, then
1418 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1419
1420 In case OP is an invariant or constant, a new stmt that creates a vector def
1421 needs to be introduced. VECTYPE may be used to specify a required type for
1422 vector invariant. */
1423
1424 tree
1425 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1426 {
1427 gimple *def_stmt;
1428 enum vect_def_type dt;
1429 bool is_simple_use;
1430 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1431 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1432
1433 if (dump_enabled_p ())
1434 {
1435 dump_printf_loc (MSG_NOTE, vect_location,
1436 "vect_get_vec_def_for_operand: ");
1437 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1438 dump_printf (MSG_NOTE, "\n");
1439 }
1440
1441 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1442 gcc_assert (is_simple_use);
1443 if (def_stmt && dump_enabled_p ())
1444 {
1445 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1446 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1447 }
1448
1449 if (dt == vect_constant_def || dt == vect_external_def)
1450 {
1451 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1452 tree vector_type;
1453
1454 if (vectype)
1455 vector_type = vectype;
1456 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1457 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1458 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1459 else
1460 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1461
1462 gcc_assert (vector_type);
1463 return vect_init_vector (stmt, op, vector_type, NULL);
1464 }
1465 else
1466 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1467 }
1468
1469
1470 /* Function vect_get_vec_def_for_stmt_copy
1471
1472 Return a vector-def for an operand. This function is used when the
1473 vectorized stmt to be created (by the caller to this function) is a "copy"
1474 created in case the vectorized result cannot fit in one vector, and several
1475 copies of the vector-stmt are required. In this case the vector-def is
1476 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1477 of the stmt that defines VEC_OPRND.
1478 DT is the type of the vector def VEC_OPRND.
1479
1480 Context:
1481 In case the vectorization factor (VF) is bigger than the number
1482 of elements that can fit in a vectype (nunits), we have to generate
1483 more than one vector stmt to vectorize the scalar stmt. This situation
1484 arises when there are multiple data-types operated upon in the loop; the
1485 smallest data-type determines the VF, and as a result, when vectorizing
1486 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1487 vector stmt (each computing a vector of 'nunits' results, and together
1488 computing 'VF' results in each iteration). This function is called when
1489 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1490 which VF=16 and nunits=4, so the number of copies required is 4):
1491
1492 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1493
1494 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1495 VS1.1: vx.1 = memref1 VS1.2
1496 VS1.2: vx.2 = memref2 VS1.3
1497 VS1.3: vx.3 = memref3
1498
1499 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1500 VSnew.1: vz1 = vx.1 + ... VSnew.2
1501 VSnew.2: vz2 = vx.2 + ... VSnew.3
1502 VSnew.3: vz3 = vx.3 + ...
1503
1504 The vectorization of S1 is explained in vectorizable_load.
1505 The vectorization of S2:
1506 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1507 the function 'vect_get_vec_def_for_operand' is called to
1508 get the relevant vector-def for each operand of S2. For operand x it
1509 returns the vector-def 'vx.0'.
1510
1511 To create the remaining copies of the vector-stmt (VSnew.j), this
1512 function is called to get the relevant vector-def for each operand. It is
1513 obtained from the respective VS1.j stmt, which is recorded in the
1514 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1515
1516 For example, to obtain the vector-def 'vx.1' in order to create the
1517 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1518 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1519 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1520 and return its def ('vx.1').
1521 Overall, to create the above sequence this function will be called 3 times:
1522 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1523 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1524 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1525
1526 tree
1527 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1528 {
1529 gimple *vec_stmt_for_operand;
1530 stmt_vec_info def_stmt_info;
1531
1532 /* Do nothing; can reuse same def. */
1533 if (dt == vect_external_def || dt == vect_constant_def )
1534 return vec_oprnd;
1535
1536 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1537 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1538 gcc_assert (def_stmt_info);
1539 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1540 gcc_assert (vec_stmt_for_operand);
1541 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1542 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1543 else
1544 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1545 return vec_oprnd;
1546 }
1547
1548
1549 /* Get vectorized definitions for the operands to create a copy of an original
1550 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1551
1552 void
1553 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1554 vec<tree> *vec_oprnds0,
1555 vec<tree> *vec_oprnds1)
1556 {
1557 tree vec_oprnd = vec_oprnds0->pop ();
1558
1559 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1560 vec_oprnds0->quick_push (vec_oprnd);
1561
1562 if (vec_oprnds1 && vec_oprnds1->length ())
1563 {
1564 vec_oprnd = vec_oprnds1->pop ();
1565 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1566 vec_oprnds1->quick_push (vec_oprnd);
1567 }
1568 }
1569
1570
1571 /* Get vectorized definitions for OP0 and OP1. */
1572
1573 void
1574 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1575 vec<tree> *vec_oprnds0,
1576 vec<tree> *vec_oprnds1,
1577 slp_tree slp_node)
1578 {
1579 if (slp_node)
1580 {
1581 int nops = (op1 == NULL_TREE) ? 1 : 2;
1582 auto_vec<tree> ops (nops);
1583 auto_vec<vec<tree> > vec_defs (nops);
1584
1585 ops.quick_push (op0);
1586 if (op1)
1587 ops.quick_push (op1);
1588
1589 vect_get_slp_defs (ops, slp_node, &vec_defs);
1590
1591 *vec_oprnds0 = vec_defs[0];
1592 if (op1)
1593 *vec_oprnds1 = vec_defs[1];
1594 }
1595 else
1596 {
1597 tree vec_oprnd;
1598
1599 vec_oprnds0->create (1);
1600 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1601 vec_oprnds0->quick_push (vec_oprnd);
1602
1603 if (op1)
1604 {
1605 vec_oprnds1->create (1);
1606 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1607 vec_oprnds1->quick_push (vec_oprnd);
1608 }
1609 }
1610 }
1611
1612
1613 /* Function vect_finish_stmt_generation.
1614
1615 Insert a new stmt. */
1616
1617 void
1618 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1619 gimple_stmt_iterator *gsi)
1620 {
1621 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1622 vec_info *vinfo = stmt_info->vinfo;
1623
1624 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1625
1626 if (!gsi_end_p (*gsi)
1627 && gimple_has_mem_ops (vec_stmt))
1628 {
1629 gimple *at_stmt = gsi_stmt (*gsi);
1630 tree vuse = gimple_vuse (at_stmt);
1631 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1632 {
1633 tree vdef = gimple_vdef (at_stmt);
1634 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1635 /* If we have an SSA vuse and insert a store, update virtual
1636 SSA form to avoid triggering the renamer. Do so only
1637 if we can easily see all uses - which is what almost always
1638 happens with the way vectorized stmts are inserted. */
1639 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1640 && ((is_gimple_assign (vec_stmt)
1641 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1642 || (is_gimple_call (vec_stmt)
1643 && !(gimple_call_flags (vec_stmt)
1644 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1645 {
1646 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1647 gimple_set_vdef (vec_stmt, new_vdef);
1648 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1649 }
1650 }
1651 }
1652 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1653
1654 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1655
1656 if (dump_enabled_p ())
1657 {
1658 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1660 }
1661
1662 gimple_set_location (vec_stmt, gimple_location (stmt));
1663
1664 /* While EH edges will generally prevent vectorization, stmt might
1665 e.g. be in a must-not-throw region. Ensure newly created stmts
1666 that could throw are part of the same region. */
1667 int lp_nr = lookup_stmt_eh_lp (stmt);
1668 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1669 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1670 }
1671
1672 /* We want to vectorize a call to combined function CFN with function
1673 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1674 as the types of all inputs. Check whether this is possible using
1675 an internal function, returning its code if so or IFN_LAST if not. */
1676
1677 static internal_fn
1678 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1679 tree vectype_out, tree vectype_in)
1680 {
1681 internal_fn ifn;
1682 if (internal_fn_p (cfn))
1683 ifn = as_internal_fn (cfn);
1684 else
1685 ifn = associated_internal_fn (fndecl);
1686 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1687 {
1688 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1689 if (info.vectorizable)
1690 {
1691 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1692 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1693 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1694 OPTIMIZE_FOR_SPEED))
1695 return ifn;
1696 }
1697 }
1698 return IFN_LAST;
1699 }
1700
1701
1702 static tree permute_vec_elements (tree, tree, tree, gimple *,
1703 gimple_stmt_iterator *);
1704
1705 /* STMT is a non-strided load or store, meaning that it accesses
1706 elements with a known constant step. Return -1 if that step
1707 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1708
1709 static int
1710 compare_step_with_zero (gimple *stmt)
1711 {
1712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1713 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1714 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1715 size_zero_node);
1716 }
1717
1718 /* If the target supports a permute mask that reverses the elements in
1719 a vector of type VECTYPE, return that mask, otherwise return null. */
1720
1721 static tree
1722 perm_mask_for_reverse (tree vectype)
1723 {
1724 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1725
1726 /* The encoding has a single stepped pattern. */
1727 vec_perm_builder sel (nunits, 1, 3);
1728 for (int i = 0; i < 3; ++i)
1729 sel.quick_push (nunits - 1 - i);
1730
1731 vec_perm_indices indices (sel, 1, nunits);
1732 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
1733 return NULL_TREE;
1734 return vect_gen_perm_mask_checked (vectype, indices);
1735 }
1736
1737 /* A subroutine of get_load_store_type, with a subset of the same
1738 arguments. Handle the case where STMT is part of a grouped load
1739 or store.
1740
1741 For stores, the statements in the group are all consecutive
1742 and there is no gap at the end. For loads, the statements in the
1743 group might not be consecutive; there can be gaps between statements
1744 as well as at the end. */
1745
1746 static bool
1747 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1748 vec_load_store_type vls_type,
1749 vect_memory_access_type *memory_access_type)
1750 {
1751 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1752 vec_info *vinfo = stmt_info->vinfo;
1753 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1754 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1755 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1756 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1757 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1758 bool single_element_p = (stmt == first_stmt
1759 && !GROUP_NEXT_ELEMENT (stmt_info));
1760 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1761 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1762
1763 /* True if the vectorized statements would access beyond the last
1764 statement in the group. */
1765 bool overrun_p = false;
1766
1767 /* True if we can cope with such overrun by peeling for gaps, so that
1768 there is at least one final scalar iteration after the vector loop. */
1769 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1770
1771 /* There can only be a gap at the end of the group if the stride is
1772 known at compile time. */
1773 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1774
1775 /* Stores can't yet have gaps. */
1776 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1777
1778 if (slp)
1779 {
1780 if (STMT_VINFO_STRIDED_P (stmt_info))
1781 {
1782 /* Try to use consecutive accesses of GROUP_SIZE elements,
1783 separated by the stride, until we have a complete vector.
1784 Fall back to scalar accesses if that isn't possible. */
1785 if (multiple_p (nunits, group_size))
1786 *memory_access_type = VMAT_STRIDED_SLP;
1787 else
1788 *memory_access_type = VMAT_ELEMENTWISE;
1789 }
1790 else
1791 {
1792 overrun_p = loop_vinfo && gap != 0;
1793 if (overrun_p && vls_type != VLS_LOAD)
1794 {
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1796 "Grouped store with gaps requires"
1797 " non-consecutive accesses\n");
1798 return false;
1799 }
1800 /* An overrun is fine if the trailing elements are smaller
1801 than the alignment boundary B. Every vector access will
1802 be a multiple of B and so we are guaranteed to access a
1803 non-gap element in the same B-sized block. */
1804 if (overrun_p
1805 && gap < (vect_known_alignment_in_bytes (first_dr)
1806 / vect_get_scalar_dr_size (first_dr)))
1807 overrun_p = false;
1808 if (overrun_p && !can_overrun_p)
1809 {
1810 if (dump_enabled_p ())
1811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1812 "Peeling for outer loop is not supported\n");
1813 return false;
1814 }
1815 *memory_access_type = VMAT_CONTIGUOUS;
1816 }
1817 }
1818 else
1819 {
1820 /* We can always handle this case using elementwise accesses,
1821 but see if something more efficient is available. */
1822 *memory_access_type = VMAT_ELEMENTWISE;
1823
1824 /* If there is a gap at the end of the group then these optimizations
1825 would access excess elements in the last iteration. */
1826 bool would_overrun_p = (gap != 0);
1827 /* An overrun is fine if the trailing elements are smaller than the
1828 alignment boundary B. Every vector access will be a multiple of B
1829 and so we are guaranteed to access a non-gap element in the
1830 same B-sized block. */
1831 if (would_overrun_p
1832 && gap < (vect_known_alignment_in_bytes (first_dr)
1833 / vect_get_scalar_dr_size (first_dr)))
1834 would_overrun_p = false;
1835
1836 if (!STMT_VINFO_STRIDED_P (stmt_info)
1837 && (can_overrun_p || !would_overrun_p)
1838 && compare_step_with_zero (stmt) > 0)
1839 {
1840 /* First try using LOAD/STORE_LANES. */
1841 if (vls_type == VLS_LOAD
1842 ? vect_load_lanes_supported (vectype, group_size)
1843 : vect_store_lanes_supported (vectype, group_size))
1844 {
1845 *memory_access_type = VMAT_LOAD_STORE_LANES;
1846 overrun_p = would_overrun_p;
1847 }
1848
1849 /* If that fails, try using permuting loads. */
1850 if (*memory_access_type == VMAT_ELEMENTWISE
1851 && (vls_type == VLS_LOAD
1852 ? vect_grouped_load_supported (vectype, single_element_p,
1853 group_size)
1854 : vect_grouped_store_supported (vectype, group_size)))
1855 {
1856 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1857 overrun_p = would_overrun_p;
1858 }
1859 }
1860 }
1861
1862 if (vls_type != VLS_LOAD && first_stmt == stmt)
1863 {
1864 /* STMT is the leader of the group. Check the operands of all the
1865 stmts of the group. */
1866 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1867 while (next_stmt)
1868 {
1869 gcc_assert (gimple_assign_single_p (next_stmt));
1870 tree op = gimple_assign_rhs1 (next_stmt);
1871 gimple *def_stmt;
1872 enum vect_def_type dt;
1873 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1874 {
1875 if (dump_enabled_p ())
1876 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1877 "use not simple.\n");
1878 return false;
1879 }
1880 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1881 }
1882 }
1883
1884 if (overrun_p)
1885 {
1886 gcc_assert (can_overrun_p);
1887 if (dump_enabled_p ())
1888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1889 "Data access with gaps requires scalar "
1890 "epilogue loop\n");
1891 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1892 }
1893
1894 return true;
1895 }
1896
1897 /* A subroutine of get_load_store_type, with a subset of the same
1898 arguments. Handle the case where STMT is a load or store that
1899 accesses consecutive elements with a negative step. */
1900
1901 static vect_memory_access_type
1902 get_negative_load_store_type (gimple *stmt, tree vectype,
1903 vec_load_store_type vls_type,
1904 unsigned int ncopies)
1905 {
1906 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1907 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1908 dr_alignment_support alignment_support_scheme;
1909
1910 if (ncopies > 1)
1911 {
1912 if (dump_enabled_p ())
1913 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1914 "multiple types with negative step.\n");
1915 return VMAT_ELEMENTWISE;
1916 }
1917
1918 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1919 if (alignment_support_scheme != dr_aligned
1920 && alignment_support_scheme != dr_unaligned_supported)
1921 {
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1924 "negative step but alignment required.\n");
1925 return VMAT_ELEMENTWISE;
1926 }
1927
1928 if (vls_type == VLS_STORE_INVARIANT)
1929 {
1930 if (dump_enabled_p ())
1931 dump_printf_loc (MSG_NOTE, vect_location,
1932 "negative step with invariant source;"
1933 " no permute needed.\n");
1934 return VMAT_CONTIGUOUS_DOWN;
1935 }
1936
1937 if (!perm_mask_for_reverse (vectype))
1938 {
1939 if (dump_enabled_p ())
1940 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1941 "negative step and reversing not supported.\n");
1942 return VMAT_ELEMENTWISE;
1943 }
1944
1945 return VMAT_CONTIGUOUS_REVERSE;
1946 }
1947
1948 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1949 if there is a memory access type that the vectorized form can use,
1950 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1951 or scatters, fill in GS_INFO accordingly.
1952
1953 SLP says whether we're performing SLP rather than loop vectorization.
1954 VECTYPE is the vector type that the vectorized statements will use.
1955 NCOPIES is the number of vector statements that will be needed. */
1956
1957 static bool
1958 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1959 vec_load_store_type vls_type, unsigned int ncopies,
1960 vect_memory_access_type *memory_access_type,
1961 gather_scatter_info *gs_info)
1962 {
1963 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1964 vec_info *vinfo = stmt_info->vinfo;
1965 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1966 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1967 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1968 {
1969 *memory_access_type = VMAT_GATHER_SCATTER;
1970 gimple *def_stmt;
1971 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1972 gcc_unreachable ();
1973 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1974 &gs_info->offset_dt,
1975 &gs_info->offset_vectype))
1976 {
1977 if (dump_enabled_p ())
1978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1979 "%s index use not simple.\n",
1980 vls_type == VLS_LOAD ? "gather" : "scatter");
1981 return false;
1982 }
1983 }
1984 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1985 {
1986 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1987 memory_access_type))
1988 return false;
1989 }
1990 else if (STMT_VINFO_STRIDED_P (stmt_info))
1991 {
1992 gcc_assert (!slp);
1993 *memory_access_type = VMAT_ELEMENTWISE;
1994 }
1995 else
1996 {
1997 int cmp = compare_step_with_zero (stmt);
1998 if (cmp < 0)
1999 *memory_access_type = get_negative_load_store_type
2000 (stmt, vectype, vls_type, ncopies);
2001 else if (cmp == 0)
2002 {
2003 gcc_assert (vls_type == VLS_LOAD);
2004 *memory_access_type = VMAT_INVARIANT;
2005 }
2006 else
2007 *memory_access_type = VMAT_CONTIGUOUS;
2008 }
2009
2010 if ((*memory_access_type == VMAT_ELEMENTWISE
2011 || *memory_access_type == VMAT_STRIDED_SLP)
2012 && !nunits.is_constant ())
2013 {
2014 if (dump_enabled_p ())
2015 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2016 "Not using elementwise accesses due to variable "
2017 "vectorization factor.\n");
2018 return false;
2019 }
2020
2021 /* FIXME: At the moment the cost model seems to underestimate the
2022 cost of using elementwise accesses. This check preserves the
2023 traditional behavior until that can be fixed. */
2024 if (*memory_access_type == VMAT_ELEMENTWISE
2025 && !STMT_VINFO_STRIDED_P (stmt_info))
2026 {
2027 if (dump_enabled_p ())
2028 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2029 "not falling back to elementwise accesses\n");
2030 return false;
2031 }
2032 return true;
2033 }
2034
2035 /* Function vectorizable_mask_load_store.
2036
2037 Check if STMT performs a conditional load or store that can be vectorized.
2038 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2039 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2040 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2041
2042 static bool
2043 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2044 gimple **vec_stmt, slp_tree slp_node)
2045 {
2046 tree vec_dest = NULL;
2047 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2048 stmt_vec_info prev_stmt_info;
2049 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2050 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2051 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2052 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2053 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2054 tree rhs_vectype = NULL_TREE;
2055 tree mask_vectype;
2056 tree elem_type;
2057 gimple *new_stmt;
2058 tree dummy;
2059 tree dataref_ptr = NULL_TREE;
2060 gimple *ptr_incr;
2061 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2062 int ncopies;
2063 int i, j;
2064 bool inv_p;
2065 gather_scatter_info gs_info;
2066 vec_load_store_type vls_type;
2067 tree mask;
2068 gimple *def_stmt;
2069 enum vect_def_type dt;
2070
2071 if (slp_node != NULL)
2072 return false;
2073
2074 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2075 gcc_assert (ncopies >= 1);
2076
2077 mask = gimple_call_arg (stmt, 2);
2078
2079 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2080 return false;
2081
2082 /* FORNOW. This restriction should be relaxed. */
2083 if (nested_in_vect_loop && ncopies > 1)
2084 {
2085 if (dump_enabled_p ())
2086 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2087 "multiple types in nested loop.");
2088 return false;
2089 }
2090
2091 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2092 return false;
2093
2094 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2095 && ! vec_stmt)
2096 return false;
2097
2098 if (!STMT_VINFO_DATA_REF (stmt_info))
2099 return false;
2100
2101 elem_type = TREE_TYPE (vectype);
2102
2103 if (TREE_CODE (mask) != SSA_NAME)
2104 return false;
2105
2106 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2107 return false;
2108
2109 if (!mask_vectype)
2110 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2111
2112 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2113 || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2114 TYPE_VECTOR_SUBPARTS (vectype)))
2115 return false;
2116
2117 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2118 {
2119 tree rhs = gimple_call_arg (stmt, 3);
2120 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2121 return false;
2122 if (dt == vect_constant_def || dt == vect_external_def)
2123 vls_type = VLS_STORE_INVARIANT;
2124 else
2125 vls_type = VLS_STORE;
2126 }
2127 else
2128 vls_type = VLS_LOAD;
2129
2130 vect_memory_access_type memory_access_type;
2131 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2132 &memory_access_type, &gs_info))
2133 return false;
2134
2135 if (memory_access_type == VMAT_GATHER_SCATTER)
2136 {
2137 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2138 tree masktype
2139 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2140 if (TREE_CODE (masktype) == INTEGER_TYPE)
2141 {
2142 if (dump_enabled_p ())
2143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2144 "masked gather with integer mask not supported.");
2145 return false;
2146 }
2147 }
2148 else if (memory_access_type != VMAT_CONTIGUOUS)
2149 {
2150 if (dump_enabled_p ())
2151 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2152 "unsupported access type for masked %s.\n",
2153 vls_type == VLS_LOAD ? "load" : "store");
2154 return false;
2155 }
2156 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2157 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2158 TYPE_MODE (mask_vectype),
2159 vls_type == VLS_LOAD)
2160 || (rhs_vectype
2161 && !useless_type_conversion_p (vectype, rhs_vectype)))
2162 return false;
2163
2164 if (!vec_stmt) /* transformation not required. */
2165 {
2166 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2167 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2168 if (vls_type == VLS_LOAD)
2169 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2170 NULL, NULL, NULL);
2171 else
2172 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2173 dt, NULL, NULL, NULL);
2174 return true;
2175 }
2176 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2177
2178 /* Transform. */
2179
2180 if (memory_access_type == VMAT_GATHER_SCATTER)
2181 {
2182 tree vec_oprnd0 = NULL_TREE, op;
2183 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2184 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2185 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2186 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2187 tree mask_perm_mask = NULL_TREE;
2188 edge pe = loop_preheader_edge (loop);
2189 gimple_seq seq;
2190 basic_block new_bb;
2191 enum { NARROW, NONE, WIDEN } modifier;
2192 poly_uint64 gather_off_nunits
2193 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2194
2195 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2196 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2197 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2198 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2199 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2200 scaletype = TREE_VALUE (arglist);
2201 gcc_checking_assert (types_compatible_p (srctype, rettype)
2202 && types_compatible_p (srctype, masktype));
2203
2204 if (known_eq (nunits, gather_off_nunits))
2205 modifier = NONE;
2206 else if (known_eq (nunits * 2, gather_off_nunits))
2207 {
2208 modifier = WIDEN;
2209
2210 /* Currently widening gathers and scatters are only supported for
2211 fixed-length vectors. */
2212 int count = gather_off_nunits.to_constant ();
2213 vec_perm_builder sel (count, count, 1);
2214 for (i = 0; i < count; ++i)
2215 sel.quick_push (i | (count / 2));
2216
2217 vec_perm_indices indices (sel, 1, count);
2218 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
2219 indices);
2220 }
2221 else if (known_eq (nunits, gather_off_nunits * 2))
2222 {
2223 modifier = NARROW;
2224
2225 /* Currently narrowing gathers and scatters are only supported for
2226 fixed-length vectors. */
2227 int count = nunits.to_constant ();
2228 vec_perm_builder sel (count, count, 1);
2229 sel.quick_grow (count);
2230 for (i = 0; i < count; ++i)
2231 sel[i] = i < count / 2 ? i : i + count / 2;
2232 vec_perm_indices indices (sel, 2, count);
2233 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2234
2235 ncopies *= 2;
2236 for (i = 0; i < count; ++i)
2237 sel[i] = i | (count / 2);
2238 indices.new_vector (sel, 2, count);
2239 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2240 }
2241 else
2242 gcc_unreachable ();
2243
2244 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2245
2246 ptr = fold_convert (ptrtype, gs_info.base);
2247 if (!is_gimple_min_invariant (ptr))
2248 {
2249 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2250 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2251 gcc_assert (!new_bb);
2252 }
2253
2254 scale = build_int_cst (scaletype, gs_info.scale);
2255
2256 prev_stmt_info = NULL;
2257 for (j = 0; j < ncopies; ++j)
2258 {
2259 if (modifier == WIDEN && (j & 1))
2260 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2261 perm_mask, stmt, gsi);
2262 else if (j == 0)
2263 op = vec_oprnd0
2264 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2265 else
2266 op = vec_oprnd0
2267 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2268
2269 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2270 {
2271 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2272 TYPE_VECTOR_SUBPARTS (idxtype)));
2273 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2274 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2275 new_stmt
2276 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2277 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2278 op = var;
2279 }
2280
2281 if (mask_perm_mask && (j & 1))
2282 mask_op = permute_vec_elements (mask_op, mask_op,
2283 mask_perm_mask, stmt, gsi);
2284 else
2285 {
2286 if (j == 0)
2287 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2288 else
2289 {
2290 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2291 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2292 }
2293
2294 mask_op = vec_mask;
2295 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2296 {
2297 gcc_assert
2298 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2299 TYPE_VECTOR_SUBPARTS (masktype)));
2300 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2301 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2302 new_stmt
2303 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2305 mask_op = var;
2306 }
2307 }
2308
2309 new_stmt
2310 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2311 scale);
2312
2313 if (!useless_type_conversion_p (vectype, rettype))
2314 {
2315 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2316 TYPE_VECTOR_SUBPARTS (rettype)));
2317 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2318 gimple_call_set_lhs (new_stmt, op);
2319 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2320 var = make_ssa_name (vec_dest);
2321 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2322 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2323 }
2324 else
2325 {
2326 var = make_ssa_name (vec_dest, new_stmt);
2327 gimple_call_set_lhs (new_stmt, var);
2328 }
2329
2330 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2331
2332 if (modifier == NARROW)
2333 {
2334 if ((j & 1) == 0)
2335 {
2336 prev_res = var;
2337 continue;
2338 }
2339 var = permute_vec_elements (prev_res, var,
2340 perm_mask, stmt, gsi);
2341 new_stmt = SSA_NAME_DEF_STMT (var);
2342 }
2343
2344 if (prev_stmt_info == NULL)
2345 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2346 else
2347 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2348 prev_stmt_info = vinfo_for_stmt (new_stmt);
2349 }
2350
2351 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2352 from the IL. */
2353 if (STMT_VINFO_RELATED_STMT (stmt_info))
2354 {
2355 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2356 stmt_info = vinfo_for_stmt (stmt);
2357 }
2358 tree lhs = gimple_call_lhs (stmt);
2359 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2360 set_vinfo_for_stmt (new_stmt, stmt_info);
2361 set_vinfo_for_stmt (stmt, NULL);
2362 STMT_VINFO_STMT (stmt_info) = new_stmt;
2363 gsi_replace (gsi, new_stmt, true);
2364 return true;
2365 }
2366 else if (vls_type != VLS_LOAD)
2367 {
2368 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2369 prev_stmt_info = NULL;
2370 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2371 for (i = 0; i < ncopies; i++)
2372 {
2373 unsigned align, misalign;
2374
2375 if (i == 0)
2376 {
2377 tree rhs = gimple_call_arg (stmt, 3);
2378 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2379 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2380 mask_vectype);
2381 /* We should have catched mismatched types earlier. */
2382 gcc_assert (useless_type_conversion_p (vectype,
2383 TREE_TYPE (vec_rhs)));
2384 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2385 NULL_TREE, &dummy, gsi,
2386 &ptr_incr, false, &inv_p);
2387 gcc_assert (!inv_p);
2388 }
2389 else
2390 {
2391 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2392 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2393 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2394 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2395 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2396 TYPE_SIZE_UNIT (vectype));
2397 }
2398
2399 align = DR_TARGET_ALIGNMENT (dr);
2400 if (aligned_access_p (dr))
2401 misalign = 0;
2402 else if (DR_MISALIGNMENT (dr) == -1)
2403 {
2404 align = TYPE_ALIGN_UNIT (elem_type);
2405 misalign = 0;
2406 }
2407 else
2408 misalign = DR_MISALIGNMENT (dr);
2409 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2410 misalign);
2411 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2412 misalign ? least_bit_hwi (misalign) : align);
2413 gcall *call
2414 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2415 ptr, vec_mask, vec_rhs);
2416 gimple_call_set_nothrow (call, true);
2417 new_stmt = call;
2418 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2419 if (i == 0)
2420 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2421 else
2422 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2423 prev_stmt_info = vinfo_for_stmt (new_stmt);
2424 }
2425 }
2426 else
2427 {
2428 tree vec_mask = NULL_TREE;
2429 prev_stmt_info = NULL;
2430 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2431 for (i = 0; i < ncopies; i++)
2432 {
2433 unsigned align, misalign;
2434
2435 if (i == 0)
2436 {
2437 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2438 mask_vectype);
2439 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2440 NULL_TREE, &dummy, gsi,
2441 &ptr_incr, false, &inv_p);
2442 gcc_assert (!inv_p);
2443 }
2444 else
2445 {
2446 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2447 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2448 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2449 TYPE_SIZE_UNIT (vectype));
2450 }
2451
2452 align = DR_TARGET_ALIGNMENT (dr);
2453 if (aligned_access_p (dr))
2454 misalign = 0;
2455 else if (DR_MISALIGNMENT (dr) == -1)
2456 {
2457 align = TYPE_ALIGN_UNIT (elem_type);
2458 misalign = 0;
2459 }
2460 else
2461 misalign = DR_MISALIGNMENT (dr);
2462 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2463 misalign);
2464 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2465 misalign ? least_bit_hwi (misalign) : align);
2466 gcall *call
2467 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2468 ptr, vec_mask);
2469 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2470 gimple_call_set_nothrow (call, true);
2471 vect_finish_stmt_generation (stmt, call, gsi);
2472 if (i == 0)
2473 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2474 else
2475 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2476 prev_stmt_info = vinfo_for_stmt (call);
2477 }
2478 }
2479
2480 if (vls_type == VLS_LOAD)
2481 {
2482 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2483 from the IL. */
2484 if (STMT_VINFO_RELATED_STMT (stmt_info))
2485 {
2486 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2487 stmt_info = vinfo_for_stmt (stmt);
2488 }
2489 tree lhs = gimple_call_lhs (stmt);
2490 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2491 set_vinfo_for_stmt (new_stmt, stmt_info);
2492 set_vinfo_for_stmt (stmt, NULL);
2493 STMT_VINFO_STMT (stmt_info) = new_stmt;
2494 gsi_replace (gsi, new_stmt, true);
2495 }
2496
2497 return true;
2498 }
2499
2500 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2501
2502 static bool
2503 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2504 gimple **vec_stmt, slp_tree slp_node,
2505 tree vectype_in, enum vect_def_type *dt)
2506 {
2507 tree op, vectype;
2508 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2509 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2510 unsigned ncopies;
2511 unsigned HOST_WIDE_INT nunits, num_bytes;
2512
2513 op = gimple_call_arg (stmt, 0);
2514 vectype = STMT_VINFO_VECTYPE (stmt_info);
2515
2516 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2517 return false;
2518
2519 /* Multiple types in SLP are handled by creating the appropriate number of
2520 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2521 case of SLP. */
2522 if (slp_node)
2523 ncopies = 1;
2524 else
2525 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2526
2527 gcc_assert (ncopies >= 1);
2528
2529 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2530 if (! char_vectype)
2531 return false;
2532
2533 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2534 return false;
2535
2536 unsigned word_bytes = num_bytes / nunits;
2537
2538 /* The encoding uses one stepped pattern for each byte in the word. */
2539 vec_perm_builder elts (num_bytes, word_bytes, 3);
2540 for (unsigned i = 0; i < 3; ++i)
2541 for (unsigned j = 0; j < word_bytes; ++j)
2542 elts.quick_push ((i + 1) * word_bytes - j - 1);
2543
2544 vec_perm_indices indices (elts, 1, num_bytes);
2545 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2546 return false;
2547
2548 if (! vec_stmt)
2549 {
2550 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2551 if (dump_enabled_p ())
2552 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2553 "\n");
2554 if (! PURE_SLP_STMT (stmt_info))
2555 {
2556 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2557 1, vector_stmt, stmt_info, 0, vect_prologue);
2558 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2559 ncopies, vec_perm, stmt_info, 0, vect_body);
2560 }
2561 return true;
2562 }
2563
2564 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2565
2566 /* Transform. */
2567 vec<tree> vec_oprnds = vNULL;
2568 gimple *new_stmt = NULL;
2569 stmt_vec_info prev_stmt_info = NULL;
2570 for (unsigned j = 0; j < ncopies; j++)
2571 {
2572 /* Handle uses. */
2573 if (j == 0)
2574 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2575 else
2576 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2577
2578 /* Arguments are ready. create the new vector stmt. */
2579 unsigned i;
2580 tree vop;
2581 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2582 {
2583 tree tem = make_ssa_name (char_vectype);
2584 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2585 char_vectype, vop));
2586 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2587 tree tem2 = make_ssa_name (char_vectype);
2588 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2589 tem, tem, bswap_vconst);
2590 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2591 tem = make_ssa_name (vectype);
2592 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2593 vectype, tem2));
2594 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2595 if (slp_node)
2596 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2597 }
2598
2599 if (slp_node)
2600 continue;
2601
2602 if (j == 0)
2603 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2604 else
2605 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2606
2607 prev_stmt_info = vinfo_for_stmt (new_stmt);
2608 }
2609
2610 vec_oprnds.release ();
2611 return true;
2612 }
2613
2614 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2615 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2616 in a single step. On success, store the binary pack code in
2617 *CONVERT_CODE. */
2618
2619 static bool
2620 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2621 tree_code *convert_code)
2622 {
2623 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2624 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2625 return false;
2626
2627 tree_code code;
2628 int multi_step_cvt = 0;
2629 auto_vec <tree, 8> interm_types;
2630 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2631 &code, &multi_step_cvt,
2632 &interm_types)
2633 || multi_step_cvt)
2634 return false;
2635
2636 *convert_code = code;
2637 return true;
2638 }
2639
2640 /* Function vectorizable_call.
2641
2642 Check if GS performs a function call that can be vectorized.
2643 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2644 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2645 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2646
2647 static bool
2648 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2649 slp_tree slp_node)
2650 {
2651 gcall *stmt;
2652 tree vec_dest;
2653 tree scalar_dest;
2654 tree op, type;
2655 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2656 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2657 tree vectype_out, vectype_in;
2658 poly_uint64 nunits_in;
2659 poly_uint64 nunits_out;
2660 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2661 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2662 vec_info *vinfo = stmt_info->vinfo;
2663 tree fndecl, new_temp, rhs_type;
2664 gimple *def_stmt;
2665 enum vect_def_type dt[3]
2666 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2667 int ndts = 3;
2668 gimple *new_stmt = NULL;
2669 int ncopies, j;
2670 vec<tree> vargs = vNULL;
2671 enum { NARROW, NONE, WIDEN } modifier;
2672 size_t i, nargs;
2673 tree lhs;
2674
2675 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2676 return false;
2677
2678 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2679 && ! vec_stmt)
2680 return false;
2681
2682 /* Is GS a vectorizable call? */
2683 stmt = dyn_cast <gcall *> (gs);
2684 if (!stmt)
2685 return false;
2686
2687 if (gimple_call_internal_p (stmt)
2688 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2689 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2690 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2691 slp_node);
2692
2693 if (gimple_call_lhs (stmt) == NULL_TREE
2694 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2695 return false;
2696
2697 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2698
2699 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2700
2701 /* Process function arguments. */
2702 rhs_type = NULL_TREE;
2703 vectype_in = NULL_TREE;
2704 nargs = gimple_call_num_args (stmt);
2705
2706 /* Bail out if the function has more than three arguments, we do not have
2707 interesting builtin functions to vectorize with more than two arguments
2708 except for fma. No arguments is also not good. */
2709 if (nargs == 0 || nargs > 3)
2710 return false;
2711
2712 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2713 if (gimple_call_internal_p (stmt)
2714 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2715 {
2716 nargs = 0;
2717 rhs_type = unsigned_type_node;
2718 }
2719
2720 for (i = 0; i < nargs; i++)
2721 {
2722 tree opvectype;
2723
2724 op = gimple_call_arg (stmt, i);
2725
2726 /* We can only handle calls with arguments of the same type. */
2727 if (rhs_type
2728 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2729 {
2730 if (dump_enabled_p ())
2731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2732 "argument types differ.\n");
2733 return false;
2734 }
2735 if (!rhs_type)
2736 rhs_type = TREE_TYPE (op);
2737
2738 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2739 {
2740 if (dump_enabled_p ())
2741 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2742 "use not simple.\n");
2743 return false;
2744 }
2745
2746 if (!vectype_in)
2747 vectype_in = opvectype;
2748 else if (opvectype
2749 && opvectype != vectype_in)
2750 {
2751 if (dump_enabled_p ())
2752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2753 "argument vector types differ.\n");
2754 return false;
2755 }
2756 }
2757 /* If all arguments are external or constant defs use a vector type with
2758 the same size as the output vector type. */
2759 if (!vectype_in)
2760 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2761 if (vec_stmt)
2762 gcc_assert (vectype_in);
2763 if (!vectype_in)
2764 {
2765 if (dump_enabled_p ())
2766 {
2767 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2768 "no vectype for scalar type ");
2769 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2770 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2771 }
2772
2773 return false;
2774 }
2775
2776 /* FORNOW */
2777 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2778 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2779 if (known_eq (nunits_in * 2, nunits_out))
2780 modifier = NARROW;
2781 else if (known_eq (nunits_out, nunits_in))
2782 modifier = NONE;
2783 else if (known_eq (nunits_out * 2, nunits_in))
2784 modifier = WIDEN;
2785 else
2786 return false;
2787
2788 /* We only handle functions that do not read or clobber memory. */
2789 if (gimple_vuse (stmt))
2790 {
2791 if (dump_enabled_p ())
2792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2793 "function reads from or writes to memory.\n");
2794 return false;
2795 }
2796
2797 /* For now, we only vectorize functions if a target specific builtin
2798 is available. TODO -- in some cases, it might be profitable to
2799 insert the calls for pieces of the vector, in order to be able
2800 to vectorize other operations in the loop. */
2801 fndecl = NULL_TREE;
2802 internal_fn ifn = IFN_LAST;
2803 combined_fn cfn = gimple_call_combined_fn (stmt);
2804 tree callee = gimple_call_fndecl (stmt);
2805
2806 /* First try using an internal function. */
2807 tree_code convert_code = ERROR_MARK;
2808 if (cfn != CFN_LAST
2809 && (modifier == NONE
2810 || (modifier == NARROW
2811 && simple_integer_narrowing (vectype_out, vectype_in,
2812 &convert_code))))
2813 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2814 vectype_in);
2815
2816 /* If that fails, try asking for a target-specific built-in function. */
2817 if (ifn == IFN_LAST)
2818 {
2819 if (cfn != CFN_LAST)
2820 fndecl = targetm.vectorize.builtin_vectorized_function
2821 (cfn, vectype_out, vectype_in);
2822 else
2823 fndecl = targetm.vectorize.builtin_md_vectorized_function
2824 (callee, vectype_out, vectype_in);
2825 }
2826
2827 if (ifn == IFN_LAST && !fndecl)
2828 {
2829 if (cfn == CFN_GOMP_SIMD_LANE
2830 && !slp_node
2831 && loop_vinfo
2832 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2833 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2834 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2835 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2836 {
2837 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2838 { 0, 1, 2, ... vf - 1 } vector. */
2839 gcc_assert (nargs == 0);
2840 }
2841 else if (modifier == NONE
2842 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2843 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2844 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2845 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2846 vectype_in, dt);
2847 else
2848 {
2849 if (dump_enabled_p ())
2850 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2851 "function is not vectorizable.\n");
2852 return false;
2853 }
2854 }
2855
2856 if (slp_node)
2857 ncopies = 1;
2858 else if (modifier == NARROW && ifn == IFN_LAST)
2859 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
2860 else
2861 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
2862
2863 /* Sanity check: make sure that at least one copy of the vectorized stmt
2864 needs to be generated. */
2865 gcc_assert (ncopies >= 1);
2866
2867 if (!vec_stmt) /* transformation not required. */
2868 {
2869 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2870 if (dump_enabled_p ())
2871 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2872 "\n");
2873 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2874 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2875 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2876 vec_promote_demote, stmt_info, 0, vect_body);
2877
2878 return true;
2879 }
2880
2881 /* Transform. */
2882
2883 if (dump_enabled_p ())
2884 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2885
2886 /* Handle def. */
2887 scalar_dest = gimple_call_lhs (stmt);
2888 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2889
2890 prev_stmt_info = NULL;
2891 if (modifier == NONE || ifn != IFN_LAST)
2892 {
2893 tree prev_res = NULL_TREE;
2894 for (j = 0; j < ncopies; ++j)
2895 {
2896 /* Build argument list for the vectorized call. */
2897 if (j == 0)
2898 vargs.create (nargs);
2899 else
2900 vargs.truncate (0);
2901
2902 if (slp_node)
2903 {
2904 auto_vec<vec<tree> > vec_defs (nargs);
2905 vec<tree> vec_oprnds0;
2906
2907 for (i = 0; i < nargs; i++)
2908 vargs.quick_push (gimple_call_arg (stmt, i));
2909 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2910 vec_oprnds0 = vec_defs[0];
2911
2912 /* Arguments are ready. Create the new vector stmt. */
2913 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2914 {
2915 size_t k;
2916 for (k = 0; k < nargs; k++)
2917 {
2918 vec<tree> vec_oprndsk = vec_defs[k];
2919 vargs[k] = vec_oprndsk[i];
2920 }
2921 if (modifier == NARROW)
2922 {
2923 tree half_res = make_ssa_name (vectype_in);
2924 gcall *call
2925 = gimple_build_call_internal_vec (ifn, vargs);
2926 gimple_call_set_lhs (call, half_res);
2927 gimple_call_set_nothrow (call, true);
2928 new_stmt = call;
2929 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2930 if ((i & 1) == 0)
2931 {
2932 prev_res = half_res;
2933 continue;
2934 }
2935 new_temp = make_ssa_name (vec_dest);
2936 new_stmt = gimple_build_assign (new_temp, convert_code,
2937 prev_res, half_res);
2938 }
2939 else
2940 {
2941 gcall *call;
2942 if (ifn != IFN_LAST)
2943 call = gimple_build_call_internal_vec (ifn, vargs);
2944 else
2945 call = gimple_build_call_vec (fndecl, vargs);
2946 new_temp = make_ssa_name (vec_dest, call);
2947 gimple_call_set_lhs (call, new_temp);
2948 gimple_call_set_nothrow (call, true);
2949 new_stmt = call;
2950 }
2951 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2952 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2953 }
2954
2955 for (i = 0; i < nargs; i++)
2956 {
2957 vec<tree> vec_oprndsi = vec_defs[i];
2958 vec_oprndsi.release ();
2959 }
2960 continue;
2961 }
2962
2963 for (i = 0; i < nargs; i++)
2964 {
2965 op = gimple_call_arg (stmt, i);
2966 if (j == 0)
2967 vec_oprnd0
2968 = vect_get_vec_def_for_operand (op, stmt);
2969 else
2970 {
2971 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2972 vec_oprnd0
2973 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2974 }
2975
2976 vargs.quick_push (vec_oprnd0);
2977 }
2978
2979 if (gimple_call_internal_p (stmt)
2980 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2981 {
2982 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
2983 tree new_var
2984 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2985 gimple *init_stmt = gimple_build_assign (new_var, cst);
2986 vect_init_vector_1 (stmt, init_stmt, NULL);
2987 new_temp = make_ssa_name (vec_dest);
2988 new_stmt = gimple_build_assign (new_temp, new_var);
2989 }
2990 else if (modifier == NARROW)
2991 {
2992 tree half_res = make_ssa_name (vectype_in);
2993 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2994 gimple_call_set_lhs (call, half_res);
2995 gimple_call_set_nothrow (call, true);
2996 new_stmt = call;
2997 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2998 if ((j & 1) == 0)
2999 {
3000 prev_res = half_res;
3001 continue;
3002 }
3003 new_temp = make_ssa_name (vec_dest);
3004 new_stmt = gimple_build_assign (new_temp, convert_code,
3005 prev_res, half_res);
3006 }
3007 else
3008 {
3009 gcall *call;
3010 if (ifn != IFN_LAST)
3011 call = gimple_build_call_internal_vec (ifn, vargs);
3012 else
3013 call = gimple_build_call_vec (fndecl, vargs);
3014 new_temp = make_ssa_name (vec_dest, new_stmt);
3015 gimple_call_set_lhs (call, new_temp);
3016 gimple_call_set_nothrow (call, true);
3017 new_stmt = call;
3018 }
3019 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3020
3021 if (j == (modifier == NARROW ? 1 : 0))
3022 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3023 else
3024 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3025
3026 prev_stmt_info = vinfo_for_stmt (new_stmt);
3027 }
3028 }
3029 else if (modifier == NARROW)
3030 {
3031 for (j = 0; j < ncopies; ++j)
3032 {
3033 /* Build argument list for the vectorized call. */
3034 if (j == 0)
3035 vargs.create (nargs * 2);
3036 else
3037 vargs.truncate (0);
3038
3039 if (slp_node)
3040 {
3041 auto_vec<vec<tree> > vec_defs (nargs);
3042 vec<tree> vec_oprnds0;
3043
3044 for (i = 0; i < nargs; i++)
3045 vargs.quick_push (gimple_call_arg (stmt, i));
3046 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3047 vec_oprnds0 = vec_defs[0];
3048
3049 /* Arguments are ready. Create the new vector stmt. */
3050 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3051 {
3052 size_t k;
3053 vargs.truncate (0);
3054 for (k = 0; k < nargs; k++)
3055 {
3056 vec<tree> vec_oprndsk = vec_defs[k];
3057 vargs.quick_push (vec_oprndsk[i]);
3058 vargs.quick_push (vec_oprndsk[i + 1]);
3059 }
3060 gcall *call;
3061 if (ifn != IFN_LAST)
3062 call = gimple_build_call_internal_vec (ifn, vargs);
3063 else
3064 call = gimple_build_call_vec (fndecl, vargs);
3065 new_temp = make_ssa_name (vec_dest, call);
3066 gimple_call_set_lhs (call, new_temp);
3067 gimple_call_set_nothrow (call, true);
3068 new_stmt = call;
3069 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3070 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3071 }
3072
3073 for (i = 0; i < nargs; i++)
3074 {
3075 vec<tree> vec_oprndsi = vec_defs[i];
3076 vec_oprndsi.release ();
3077 }
3078 continue;
3079 }
3080
3081 for (i = 0; i < nargs; i++)
3082 {
3083 op = gimple_call_arg (stmt, i);
3084 if (j == 0)
3085 {
3086 vec_oprnd0
3087 = vect_get_vec_def_for_operand (op, stmt);
3088 vec_oprnd1
3089 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3090 }
3091 else
3092 {
3093 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3094 vec_oprnd0
3095 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3096 vec_oprnd1
3097 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3098 }
3099
3100 vargs.quick_push (vec_oprnd0);
3101 vargs.quick_push (vec_oprnd1);
3102 }
3103
3104 new_stmt = gimple_build_call_vec (fndecl, vargs);
3105 new_temp = make_ssa_name (vec_dest, new_stmt);
3106 gimple_call_set_lhs (new_stmt, new_temp);
3107 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3108
3109 if (j == 0)
3110 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3111 else
3112 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3113
3114 prev_stmt_info = vinfo_for_stmt (new_stmt);
3115 }
3116
3117 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3118 }
3119 else
3120 /* No current target implements this case. */
3121 return false;
3122
3123 vargs.release ();
3124
3125 /* The call in STMT might prevent it from being removed in dce.
3126 We however cannot remove it here, due to the way the ssa name
3127 it defines is mapped to the new definition. So just replace
3128 rhs of the statement with something harmless. */
3129
3130 if (slp_node)
3131 return true;
3132
3133 type = TREE_TYPE (scalar_dest);
3134 if (is_pattern_stmt_p (stmt_info))
3135 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3136 else
3137 lhs = gimple_call_lhs (stmt);
3138
3139 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3140 set_vinfo_for_stmt (new_stmt, stmt_info);
3141 set_vinfo_for_stmt (stmt, NULL);
3142 STMT_VINFO_STMT (stmt_info) = new_stmt;
3143 gsi_replace (gsi, new_stmt, false);
3144
3145 return true;
3146 }
3147
3148
3149 struct simd_call_arg_info
3150 {
3151 tree vectype;
3152 tree op;
3153 HOST_WIDE_INT linear_step;
3154 enum vect_def_type dt;
3155 unsigned int align;
3156 bool simd_lane_linear;
3157 };
3158
3159 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3160 is linear within simd lane (but not within whole loop), note it in
3161 *ARGINFO. */
3162
3163 static void
3164 vect_simd_lane_linear (tree op, struct loop *loop,
3165 struct simd_call_arg_info *arginfo)
3166 {
3167 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3168
3169 if (!is_gimple_assign (def_stmt)
3170 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3171 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3172 return;
3173
3174 tree base = gimple_assign_rhs1 (def_stmt);
3175 HOST_WIDE_INT linear_step = 0;
3176 tree v = gimple_assign_rhs2 (def_stmt);
3177 while (TREE_CODE (v) == SSA_NAME)
3178 {
3179 tree t;
3180 def_stmt = SSA_NAME_DEF_STMT (v);
3181 if (is_gimple_assign (def_stmt))
3182 switch (gimple_assign_rhs_code (def_stmt))
3183 {
3184 case PLUS_EXPR:
3185 t = gimple_assign_rhs2 (def_stmt);
3186 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3187 return;
3188 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3189 v = gimple_assign_rhs1 (def_stmt);
3190 continue;
3191 case MULT_EXPR:
3192 t = gimple_assign_rhs2 (def_stmt);
3193 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3194 return;
3195 linear_step = tree_to_shwi (t);
3196 v = gimple_assign_rhs1 (def_stmt);
3197 continue;
3198 CASE_CONVERT:
3199 t = gimple_assign_rhs1 (def_stmt);
3200 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3201 || (TYPE_PRECISION (TREE_TYPE (v))
3202 < TYPE_PRECISION (TREE_TYPE (t))))
3203 return;
3204 if (!linear_step)
3205 linear_step = 1;
3206 v = t;
3207 continue;
3208 default:
3209 return;
3210 }
3211 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3212 && loop->simduid
3213 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3214 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3215 == loop->simduid))
3216 {
3217 if (!linear_step)
3218 linear_step = 1;
3219 arginfo->linear_step = linear_step;
3220 arginfo->op = base;
3221 arginfo->simd_lane_linear = true;
3222 return;
3223 }
3224 }
3225 }
3226
3227 /* Return the number of elements in vector type VECTYPE, which is associated
3228 with a SIMD clone. At present these vectors always have a constant
3229 length. */
3230
3231 static unsigned HOST_WIDE_INT
3232 simd_clone_subparts (tree vectype)
3233 {
3234 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3235 }
3236
3237 /* Function vectorizable_simd_clone_call.
3238
3239 Check if STMT performs a function call that can be vectorized
3240 by calling a simd clone of the function.
3241 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3242 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3243 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3244
3245 static bool
3246 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3247 gimple **vec_stmt, slp_tree slp_node)
3248 {
3249 tree vec_dest;
3250 tree scalar_dest;
3251 tree op, type;
3252 tree vec_oprnd0 = NULL_TREE;
3253 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3254 tree vectype;
3255 unsigned int nunits;
3256 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3257 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3258 vec_info *vinfo = stmt_info->vinfo;
3259 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3260 tree fndecl, new_temp;
3261 gimple *def_stmt;
3262 gimple *new_stmt = NULL;
3263 int ncopies, j;
3264 auto_vec<simd_call_arg_info> arginfo;
3265 vec<tree> vargs = vNULL;
3266 size_t i, nargs;
3267 tree lhs, rtype, ratype;
3268 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3269
3270 /* Is STMT a vectorizable call? */
3271 if (!is_gimple_call (stmt))
3272 return false;
3273
3274 fndecl = gimple_call_fndecl (stmt);
3275 if (fndecl == NULL_TREE)
3276 return false;
3277
3278 struct cgraph_node *node = cgraph_node::get (fndecl);
3279 if (node == NULL || node->simd_clones == NULL)
3280 return false;
3281
3282 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3283 return false;
3284
3285 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3286 && ! vec_stmt)
3287 return false;
3288
3289 if (gimple_call_lhs (stmt)
3290 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3291 return false;
3292
3293 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3294
3295 vectype = STMT_VINFO_VECTYPE (stmt_info);
3296
3297 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3298 return false;
3299
3300 /* FORNOW */
3301 if (slp_node)
3302 return false;
3303
3304 /* Process function arguments. */
3305 nargs = gimple_call_num_args (stmt);
3306
3307 /* Bail out if the function has zero arguments. */
3308 if (nargs == 0)
3309 return false;
3310
3311 arginfo.reserve (nargs, true);
3312
3313 for (i = 0; i < nargs; i++)
3314 {
3315 simd_call_arg_info thisarginfo;
3316 affine_iv iv;
3317
3318 thisarginfo.linear_step = 0;
3319 thisarginfo.align = 0;
3320 thisarginfo.op = NULL_TREE;
3321 thisarginfo.simd_lane_linear = false;
3322
3323 op = gimple_call_arg (stmt, i);
3324 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3325 &thisarginfo.vectype)
3326 || thisarginfo.dt == vect_uninitialized_def)
3327 {
3328 if (dump_enabled_p ())
3329 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3330 "use not simple.\n");
3331 return false;
3332 }
3333
3334 if (thisarginfo.dt == vect_constant_def
3335 || thisarginfo.dt == vect_external_def)
3336 gcc_assert (thisarginfo.vectype == NULL_TREE);
3337 else
3338 gcc_assert (thisarginfo.vectype != NULL_TREE);
3339
3340 /* For linear arguments, the analyze phase should have saved
3341 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3342 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3343 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3344 {
3345 gcc_assert (vec_stmt);
3346 thisarginfo.linear_step
3347 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3348 thisarginfo.op
3349 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3350 thisarginfo.simd_lane_linear
3351 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3352 == boolean_true_node);
3353 /* If loop has been peeled for alignment, we need to adjust it. */
3354 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3355 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3356 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3357 {
3358 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3359 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3360 tree opt = TREE_TYPE (thisarginfo.op);
3361 bias = fold_convert (TREE_TYPE (step), bias);
3362 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3363 thisarginfo.op
3364 = fold_build2 (POINTER_TYPE_P (opt)
3365 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3366 thisarginfo.op, bias);
3367 }
3368 }
3369 else if (!vec_stmt
3370 && thisarginfo.dt != vect_constant_def
3371 && thisarginfo.dt != vect_external_def
3372 && loop_vinfo
3373 && TREE_CODE (op) == SSA_NAME
3374 && simple_iv (loop, loop_containing_stmt (stmt), op,
3375 &iv, false)
3376 && tree_fits_shwi_p (iv.step))
3377 {
3378 thisarginfo.linear_step = tree_to_shwi (iv.step);
3379 thisarginfo.op = iv.base;
3380 }
3381 else if ((thisarginfo.dt == vect_constant_def
3382 || thisarginfo.dt == vect_external_def)
3383 && POINTER_TYPE_P (TREE_TYPE (op)))
3384 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3385 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3386 linear too. */
3387 if (POINTER_TYPE_P (TREE_TYPE (op))
3388 && !thisarginfo.linear_step
3389 && !vec_stmt
3390 && thisarginfo.dt != vect_constant_def
3391 && thisarginfo.dt != vect_external_def
3392 && loop_vinfo
3393 && !slp_node
3394 && TREE_CODE (op) == SSA_NAME)
3395 vect_simd_lane_linear (op, loop, &thisarginfo);
3396
3397 arginfo.quick_push (thisarginfo);
3398 }
3399
3400 unsigned HOST_WIDE_INT vf;
3401 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3402 {
3403 if (dump_enabled_p ())
3404 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3405 "not considering SIMD clones; not yet supported"
3406 " for variable-width vectors.\n");
3407 return NULL;
3408 }
3409
3410 unsigned int badness = 0;
3411 struct cgraph_node *bestn = NULL;
3412 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3413 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3414 else
3415 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3416 n = n->simdclone->next_clone)
3417 {
3418 unsigned int this_badness = 0;
3419 if (n->simdclone->simdlen > vf
3420 || n->simdclone->nargs != nargs)
3421 continue;
3422 if (n->simdclone->simdlen < vf)
3423 this_badness += (exact_log2 (vf)
3424 - exact_log2 (n->simdclone->simdlen)) * 1024;
3425 if (n->simdclone->inbranch)
3426 this_badness += 2048;
3427 int target_badness = targetm.simd_clone.usable (n);
3428 if (target_badness < 0)
3429 continue;
3430 this_badness += target_badness * 512;
3431 /* FORNOW: Have to add code to add the mask argument. */
3432 if (n->simdclone->inbranch)
3433 continue;
3434 for (i = 0; i < nargs; i++)
3435 {
3436 switch (n->simdclone->args[i].arg_type)
3437 {
3438 case SIMD_CLONE_ARG_TYPE_VECTOR:
3439 if (!useless_type_conversion_p
3440 (n->simdclone->args[i].orig_type,
3441 TREE_TYPE (gimple_call_arg (stmt, i))))
3442 i = -1;
3443 else if (arginfo[i].dt == vect_constant_def
3444 || arginfo[i].dt == vect_external_def
3445 || arginfo[i].linear_step)
3446 this_badness += 64;
3447 break;
3448 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3449 if (arginfo[i].dt != vect_constant_def
3450 && arginfo[i].dt != vect_external_def)
3451 i = -1;
3452 break;
3453 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3454 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3455 if (arginfo[i].dt == vect_constant_def
3456 || arginfo[i].dt == vect_external_def
3457 || (arginfo[i].linear_step
3458 != n->simdclone->args[i].linear_step))
3459 i = -1;
3460 break;
3461 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3462 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3463 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3464 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3465 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3466 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3467 /* FORNOW */
3468 i = -1;
3469 break;
3470 case SIMD_CLONE_ARG_TYPE_MASK:
3471 gcc_unreachable ();
3472 }
3473 if (i == (size_t) -1)
3474 break;
3475 if (n->simdclone->args[i].alignment > arginfo[i].align)
3476 {
3477 i = -1;
3478 break;
3479 }
3480 if (arginfo[i].align)
3481 this_badness += (exact_log2 (arginfo[i].align)
3482 - exact_log2 (n->simdclone->args[i].alignment));
3483 }
3484 if (i == (size_t) -1)
3485 continue;
3486 if (bestn == NULL || this_badness < badness)
3487 {
3488 bestn = n;
3489 badness = this_badness;
3490 }
3491 }
3492
3493 if (bestn == NULL)
3494 return false;
3495
3496 for (i = 0; i < nargs; i++)
3497 if ((arginfo[i].dt == vect_constant_def
3498 || arginfo[i].dt == vect_external_def)
3499 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3500 {
3501 arginfo[i].vectype
3502 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3503 i)));
3504 if (arginfo[i].vectype == NULL
3505 || (simd_clone_subparts (arginfo[i].vectype)
3506 > bestn->simdclone->simdlen))
3507 return false;
3508 }
3509
3510 fndecl = bestn->decl;
3511 nunits = bestn->simdclone->simdlen;
3512 ncopies = vf / nunits;
3513
3514 /* If the function isn't const, only allow it in simd loops where user
3515 has asserted that at least nunits consecutive iterations can be
3516 performed using SIMD instructions. */
3517 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3518 && gimple_vuse (stmt))
3519 return false;
3520
3521 /* Sanity check: make sure that at least one copy of the vectorized stmt
3522 needs to be generated. */
3523 gcc_assert (ncopies >= 1);
3524
3525 if (!vec_stmt) /* transformation not required. */
3526 {
3527 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3528 for (i = 0; i < nargs; i++)
3529 if ((bestn->simdclone->args[i].arg_type
3530 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3531 || (bestn->simdclone->args[i].arg_type
3532 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3533 {
3534 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3535 + 1);
3536 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3537 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3538 ? size_type_node : TREE_TYPE (arginfo[i].op);
3539 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3540 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3541 tree sll = arginfo[i].simd_lane_linear
3542 ? boolean_true_node : boolean_false_node;
3543 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3544 }
3545 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3546 if (dump_enabled_p ())
3547 dump_printf_loc (MSG_NOTE, vect_location,
3548 "=== vectorizable_simd_clone_call ===\n");
3549 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3550 return true;
3551 }
3552
3553 /* Transform. */
3554
3555 if (dump_enabled_p ())
3556 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3557
3558 /* Handle def. */
3559 scalar_dest = gimple_call_lhs (stmt);
3560 vec_dest = NULL_TREE;
3561 rtype = NULL_TREE;
3562 ratype = NULL_TREE;
3563 if (scalar_dest)
3564 {
3565 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3566 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3567 if (TREE_CODE (rtype) == ARRAY_TYPE)
3568 {
3569 ratype = rtype;
3570 rtype = TREE_TYPE (ratype);
3571 }
3572 }
3573
3574 prev_stmt_info = NULL;
3575 for (j = 0; j < ncopies; ++j)
3576 {
3577 /* Build argument list for the vectorized call. */
3578 if (j == 0)
3579 vargs.create (nargs);
3580 else
3581 vargs.truncate (0);
3582
3583 for (i = 0; i < nargs; i++)
3584 {
3585 unsigned int k, l, m, o;
3586 tree atype;
3587 op = gimple_call_arg (stmt, i);
3588 switch (bestn->simdclone->args[i].arg_type)
3589 {
3590 case SIMD_CLONE_ARG_TYPE_VECTOR:
3591 atype = bestn->simdclone->args[i].vector_type;
3592 o = nunits / simd_clone_subparts (atype);
3593 for (m = j * o; m < (j + 1) * o; m++)
3594 {
3595 if (simd_clone_subparts (atype)
3596 < simd_clone_subparts (arginfo[i].vectype))
3597 {
3598 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3599 k = (simd_clone_subparts (arginfo[i].vectype)
3600 / simd_clone_subparts (atype));
3601 gcc_assert ((k & (k - 1)) == 0);
3602 if (m == 0)
3603 vec_oprnd0
3604 = vect_get_vec_def_for_operand (op, stmt);
3605 else
3606 {
3607 vec_oprnd0 = arginfo[i].op;
3608 if ((m & (k - 1)) == 0)
3609 vec_oprnd0
3610 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3611 vec_oprnd0);
3612 }
3613 arginfo[i].op = vec_oprnd0;
3614 vec_oprnd0
3615 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3616 bitsize_int (prec),
3617 bitsize_int ((m & (k - 1)) * prec));
3618 new_stmt
3619 = gimple_build_assign (make_ssa_name (atype),
3620 vec_oprnd0);
3621 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3622 vargs.safe_push (gimple_assign_lhs (new_stmt));
3623 }
3624 else
3625 {
3626 k = (simd_clone_subparts (atype)
3627 / simd_clone_subparts (arginfo[i].vectype));
3628 gcc_assert ((k & (k - 1)) == 0);
3629 vec<constructor_elt, va_gc> *ctor_elts;
3630 if (k != 1)
3631 vec_alloc (ctor_elts, k);
3632 else
3633 ctor_elts = NULL;
3634 for (l = 0; l < k; l++)
3635 {
3636 if (m == 0 && l == 0)
3637 vec_oprnd0
3638 = vect_get_vec_def_for_operand (op, stmt);
3639 else
3640 vec_oprnd0
3641 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3642 arginfo[i].op);
3643 arginfo[i].op = vec_oprnd0;
3644 if (k == 1)
3645 break;
3646 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3647 vec_oprnd0);
3648 }
3649 if (k == 1)
3650 vargs.safe_push (vec_oprnd0);
3651 else
3652 {
3653 vec_oprnd0 = build_constructor (atype, ctor_elts);
3654 new_stmt
3655 = gimple_build_assign (make_ssa_name (atype),
3656 vec_oprnd0);
3657 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3658 vargs.safe_push (gimple_assign_lhs (new_stmt));
3659 }
3660 }
3661 }
3662 break;
3663 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3664 vargs.safe_push (op);
3665 break;
3666 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3667 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3668 if (j == 0)
3669 {
3670 gimple_seq stmts;
3671 arginfo[i].op
3672 = force_gimple_operand (arginfo[i].op, &stmts, true,
3673 NULL_TREE);
3674 if (stmts != NULL)
3675 {
3676 basic_block new_bb;
3677 edge pe = loop_preheader_edge (loop);
3678 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3679 gcc_assert (!new_bb);
3680 }
3681 if (arginfo[i].simd_lane_linear)
3682 {
3683 vargs.safe_push (arginfo[i].op);
3684 break;
3685 }
3686 tree phi_res = copy_ssa_name (op);
3687 gphi *new_phi = create_phi_node (phi_res, loop->header);
3688 set_vinfo_for_stmt (new_phi,
3689 new_stmt_vec_info (new_phi, loop_vinfo));
3690 add_phi_arg (new_phi, arginfo[i].op,
3691 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3692 enum tree_code code
3693 = POINTER_TYPE_P (TREE_TYPE (op))
3694 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3695 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3696 ? sizetype : TREE_TYPE (op);
3697 widest_int cst
3698 = wi::mul (bestn->simdclone->args[i].linear_step,
3699 ncopies * nunits);
3700 tree tcst = wide_int_to_tree (type, cst);
3701 tree phi_arg = copy_ssa_name (op);
3702 new_stmt
3703 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3704 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3705 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3706 set_vinfo_for_stmt (new_stmt,
3707 new_stmt_vec_info (new_stmt, loop_vinfo));
3708 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3709 UNKNOWN_LOCATION);
3710 arginfo[i].op = phi_res;
3711 vargs.safe_push (phi_res);
3712 }
3713 else
3714 {
3715 enum tree_code code
3716 = POINTER_TYPE_P (TREE_TYPE (op))
3717 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3718 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3719 ? sizetype : TREE_TYPE (op);
3720 widest_int cst
3721 = wi::mul (bestn->simdclone->args[i].linear_step,
3722 j * nunits);
3723 tree tcst = wide_int_to_tree (type, cst);
3724 new_temp = make_ssa_name (TREE_TYPE (op));
3725 new_stmt = gimple_build_assign (new_temp, code,
3726 arginfo[i].op, tcst);
3727 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3728 vargs.safe_push (new_temp);
3729 }
3730 break;
3731 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3732 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3733 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3734 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3735 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3736 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3737 default:
3738 gcc_unreachable ();
3739 }
3740 }
3741
3742 new_stmt = gimple_build_call_vec (fndecl, vargs);
3743 if (vec_dest)
3744 {
3745 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
3746 if (ratype)
3747 new_temp = create_tmp_var (ratype);
3748 else if (simd_clone_subparts (vectype)
3749 == simd_clone_subparts (rtype))
3750 new_temp = make_ssa_name (vec_dest, new_stmt);
3751 else
3752 new_temp = make_ssa_name (rtype, new_stmt);
3753 gimple_call_set_lhs (new_stmt, new_temp);
3754 }
3755 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3756
3757 if (vec_dest)
3758 {
3759 if (simd_clone_subparts (vectype) < nunits)
3760 {
3761 unsigned int k, l;
3762 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3763 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
3764 k = nunits / simd_clone_subparts (vectype);
3765 gcc_assert ((k & (k - 1)) == 0);
3766 for (l = 0; l < k; l++)
3767 {
3768 tree t;
3769 if (ratype)
3770 {
3771 t = build_fold_addr_expr (new_temp);
3772 t = build2 (MEM_REF, vectype, t,
3773 build_int_cst (TREE_TYPE (t), l * bytes));
3774 }
3775 else
3776 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3777 bitsize_int (prec), bitsize_int (l * prec));
3778 new_stmt
3779 = gimple_build_assign (make_ssa_name (vectype), t);
3780 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3781 if (j == 0 && l == 0)
3782 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3783 else
3784 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3785
3786 prev_stmt_info = vinfo_for_stmt (new_stmt);
3787 }
3788
3789 if (ratype)
3790 {
3791 tree clobber = build_constructor (ratype, NULL);
3792 TREE_THIS_VOLATILE (clobber) = 1;
3793 new_stmt = gimple_build_assign (new_temp, clobber);
3794 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3795 }
3796 continue;
3797 }
3798 else if (simd_clone_subparts (vectype) > nunits)
3799 {
3800 unsigned int k = (simd_clone_subparts (vectype)
3801 / simd_clone_subparts (rtype));
3802 gcc_assert ((k & (k - 1)) == 0);
3803 if ((j & (k - 1)) == 0)
3804 vec_alloc (ret_ctor_elts, k);
3805 if (ratype)
3806 {
3807 unsigned int m, o = nunits / simd_clone_subparts (rtype);
3808 for (m = 0; m < o; m++)
3809 {
3810 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3811 size_int (m), NULL_TREE, NULL_TREE);
3812 new_stmt
3813 = gimple_build_assign (make_ssa_name (rtype), tem);
3814 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3815 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3816 gimple_assign_lhs (new_stmt));
3817 }
3818 tree clobber = build_constructor (ratype, NULL);
3819 TREE_THIS_VOLATILE (clobber) = 1;
3820 new_stmt = gimple_build_assign (new_temp, clobber);
3821 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3822 }
3823 else
3824 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3825 if ((j & (k - 1)) != k - 1)
3826 continue;
3827 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3828 new_stmt
3829 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3830 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3831
3832 if ((unsigned) j == k - 1)
3833 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3834 else
3835 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3836
3837 prev_stmt_info = vinfo_for_stmt (new_stmt);
3838 continue;
3839 }
3840 else if (ratype)
3841 {
3842 tree t = build_fold_addr_expr (new_temp);
3843 t = build2 (MEM_REF, vectype, t,
3844 build_int_cst (TREE_TYPE (t), 0));
3845 new_stmt
3846 = gimple_build_assign (make_ssa_name (vec_dest), t);
3847 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3848 tree clobber = build_constructor (ratype, NULL);
3849 TREE_THIS_VOLATILE (clobber) = 1;
3850 vect_finish_stmt_generation (stmt,
3851 gimple_build_assign (new_temp,
3852 clobber), gsi);
3853 }
3854 }
3855
3856 if (j == 0)
3857 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3858 else
3859 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3860
3861 prev_stmt_info = vinfo_for_stmt (new_stmt);
3862 }
3863
3864 vargs.release ();
3865
3866 /* The call in STMT might prevent it from being removed in dce.
3867 We however cannot remove it here, due to the way the ssa name
3868 it defines is mapped to the new definition. So just replace
3869 rhs of the statement with something harmless. */
3870
3871 if (slp_node)
3872 return true;
3873
3874 if (scalar_dest)
3875 {
3876 type = TREE_TYPE (scalar_dest);
3877 if (is_pattern_stmt_p (stmt_info))
3878 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3879 else
3880 lhs = gimple_call_lhs (stmt);
3881 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3882 }
3883 else
3884 new_stmt = gimple_build_nop ();
3885 set_vinfo_for_stmt (new_stmt, stmt_info);
3886 set_vinfo_for_stmt (stmt, NULL);
3887 STMT_VINFO_STMT (stmt_info) = new_stmt;
3888 gsi_replace (gsi, new_stmt, true);
3889 unlink_stmt_vdef (stmt);
3890
3891 return true;
3892 }
3893
3894
3895 /* Function vect_gen_widened_results_half
3896
3897 Create a vector stmt whose code, type, number of arguments, and result
3898 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3899 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3900 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3901 needs to be created (DECL is a function-decl of a target-builtin).
3902 STMT is the original scalar stmt that we are vectorizing. */
3903
3904 static gimple *
3905 vect_gen_widened_results_half (enum tree_code code,
3906 tree decl,
3907 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3908 tree vec_dest, gimple_stmt_iterator *gsi,
3909 gimple *stmt)
3910 {
3911 gimple *new_stmt;
3912 tree new_temp;
3913
3914 /* Generate half of the widened result: */
3915 if (code == CALL_EXPR)
3916 {
3917 /* Target specific support */
3918 if (op_type == binary_op)
3919 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3920 else
3921 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3922 new_temp = make_ssa_name (vec_dest, new_stmt);
3923 gimple_call_set_lhs (new_stmt, new_temp);
3924 }
3925 else
3926 {
3927 /* Generic support */
3928 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3929 if (op_type != binary_op)
3930 vec_oprnd1 = NULL;
3931 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3932 new_temp = make_ssa_name (vec_dest, new_stmt);
3933 gimple_assign_set_lhs (new_stmt, new_temp);
3934 }
3935 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3936
3937 return new_stmt;
3938 }
3939
3940
3941 /* Get vectorized definitions for loop-based vectorization. For the first
3942 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3943 scalar operand), and for the rest we get a copy with
3944 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3945 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3946 The vectors are collected into VEC_OPRNDS. */
3947
3948 static void
3949 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3950 vec<tree> *vec_oprnds, int multi_step_cvt)
3951 {
3952 tree vec_oprnd;
3953
3954 /* Get first vector operand. */
3955 /* All the vector operands except the very first one (that is scalar oprnd)
3956 are stmt copies. */
3957 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3958 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3959 else
3960 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3961
3962 vec_oprnds->quick_push (vec_oprnd);
3963
3964 /* Get second vector operand. */
3965 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3966 vec_oprnds->quick_push (vec_oprnd);
3967
3968 *oprnd = vec_oprnd;
3969
3970 /* For conversion in multiple steps, continue to get operands
3971 recursively. */
3972 if (multi_step_cvt)
3973 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3974 }
3975
3976
3977 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3978 For multi-step conversions store the resulting vectors and call the function
3979 recursively. */
3980
3981 static void
3982 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3983 int multi_step_cvt, gimple *stmt,
3984 vec<tree> vec_dsts,
3985 gimple_stmt_iterator *gsi,
3986 slp_tree slp_node, enum tree_code code,
3987 stmt_vec_info *prev_stmt_info)
3988 {
3989 unsigned int i;
3990 tree vop0, vop1, new_tmp, vec_dest;
3991 gimple *new_stmt;
3992 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3993
3994 vec_dest = vec_dsts.pop ();
3995
3996 for (i = 0; i < vec_oprnds->length (); i += 2)
3997 {
3998 /* Create demotion operation. */
3999 vop0 = (*vec_oprnds)[i];
4000 vop1 = (*vec_oprnds)[i + 1];
4001 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4002 new_tmp = make_ssa_name (vec_dest, new_stmt);
4003 gimple_assign_set_lhs (new_stmt, new_tmp);
4004 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4005
4006 if (multi_step_cvt)
4007 /* Store the resulting vector for next recursive call. */
4008 (*vec_oprnds)[i/2] = new_tmp;
4009 else
4010 {
4011 /* This is the last step of the conversion sequence. Store the
4012 vectors in SLP_NODE or in vector info of the scalar statement
4013 (or in STMT_VINFO_RELATED_STMT chain). */
4014 if (slp_node)
4015 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4016 else
4017 {
4018 if (!*prev_stmt_info)
4019 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4020 else
4021 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4022
4023 *prev_stmt_info = vinfo_for_stmt (new_stmt);
4024 }
4025 }
4026 }
4027
4028 /* For multi-step demotion operations we first generate demotion operations
4029 from the source type to the intermediate types, and then combine the
4030 results (stored in VEC_OPRNDS) in demotion operation to the destination
4031 type. */
4032 if (multi_step_cvt)
4033 {
4034 /* At each level of recursion we have half of the operands we had at the
4035 previous level. */
4036 vec_oprnds->truncate ((i+1)/2);
4037 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4038 stmt, vec_dsts, gsi, slp_node,
4039 VEC_PACK_TRUNC_EXPR,
4040 prev_stmt_info);
4041 }
4042
4043 vec_dsts.quick_push (vec_dest);
4044 }
4045
4046
4047 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4048 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4049 the resulting vectors and call the function recursively. */
4050
4051 static void
4052 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4053 vec<tree> *vec_oprnds1,
4054 gimple *stmt, tree vec_dest,
4055 gimple_stmt_iterator *gsi,
4056 enum tree_code code1,
4057 enum tree_code code2, tree decl1,
4058 tree decl2, int op_type)
4059 {
4060 int i;
4061 tree vop0, vop1, new_tmp1, new_tmp2;
4062 gimple *new_stmt1, *new_stmt2;
4063 vec<tree> vec_tmp = vNULL;
4064
4065 vec_tmp.create (vec_oprnds0->length () * 2);
4066 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4067 {
4068 if (op_type == binary_op)
4069 vop1 = (*vec_oprnds1)[i];
4070 else
4071 vop1 = NULL_TREE;
4072
4073 /* Generate the two halves of promotion operation. */
4074 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4075 op_type, vec_dest, gsi, stmt);
4076 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4077 op_type, vec_dest, gsi, stmt);
4078 if (is_gimple_call (new_stmt1))
4079 {
4080 new_tmp1 = gimple_call_lhs (new_stmt1);
4081 new_tmp2 = gimple_call_lhs (new_stmt2);
4082 }
4083 else
4084 {
4085 new_tmp1 = gimple_assign_lhs (new_stmt1);
4086 new_tmp2 = gimple_assign_lhs (new_stmt2);
4087 }
4088
4089 /* Store the results for the next step. */
4090 vec_tmp.quick_push (new_tmp1);
4091 vec_tmp.quick_push (new_tmp2);
4092 }
4093
4094 vec_oprnds0->release ();
4095 *vec_oprnds0 = vec_tmp;
4096 }
4097
4098
4099 /* Check if STMT performs a conversion operation, that can be vectorized.
4100 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4101 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4102 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4103
4104 static bool
4105 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4106 gimple **vec_stmt, slp_tree slp_node)
4107 {
4108 tree vec_dest;
4109 tree scalar_dest;
4110 tree op0, op1 = NULL_TREE;
4111 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4112 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4113 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4114 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4115 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4116 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4117 tree new_temp;
4118 gimple *def_stmt;
4119 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4120 int ndts = 2;
4121 gimple *new_stmt = NULL;
4122 stmt_vec_info prev_stmt_info;
4123 poly_uint64 nunits_in;
4124 poly_uint64 nunits_out;
4125 tree vectype_out, vectype_in;
4126 int ncopies, i, j;
4127 tree lhs_type, rhs_type;
4128 enum { NARROW, NONE, WIDEN } modifier;
4129 vec<tree> vec_oprnds0 = vNULL;
4130 vec<tree> vec_oprnds1 = vNULL;
4131 tree vop0;
4132 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4133 vec_info *vinfo = stmt_info->vinfo;
4134 int multi_step_cvt = 0;
4135 vec<tree> interm_types = vNULL;
4136 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4137 int op_type;
4138 unsigned short fltsz;
4139
4140 /* Is STMT a vectorizable conversion? */
4141
4142 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4143 return false;
4144
4145 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4146 && ! vec_stmt)
4147 return false;
4148
4149 if (!is_gimple_assign (stmt))
4150 return false;
4151
4152 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4153 return false;
4154
4155 code = gimple_assign_rhs_code (stmt);
4156 if (!CONVERT_EXPR_CODE_P (code)
4157 && code != FIX_TRUNC_EXPR
4158 && code != FLOAT_EXPR
4159 && code != WIDEN_MULT_EXPR
4160 && code != WIDEN_LSHIFT_EXPR)
4161 return false;
4162
4163 op_type = TREE_CODE_LENGTH (code);
4164
4165 /* Check types of lhs and rhs. */
4166 scalar_dest = gimple_assign_lhs (stmt);
4167 lhs_type = TREE_TYPE (scalar_dest);
4168 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4169
4170 op0 = gimple_assign_rhs1 (stmt);
4171 rhs_type = TREE_TYPE (op0);
4172
4173 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4174 && !((INTEGRAL_TYPE_P (lhs_type)
4175 && INTEGRAL_TYPE_P (rhs_type))
4176 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4177 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4178 return false;
4179
4180 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4181 && ((INTEGRAL_TYPE_P (lhs_type)
4182 && !type_has_mode_precision_p (lhs_type))
4183 || (INTEGRAL_TYPE_P (rhs_type)
4184 && !type_has_mode_precision_p (rhs_type))))
4185 {
4186 if (dump_enabled_p ())
4187 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4188 "type conversion to/from bit-precision unsupported."
4189 "\n");
4190 return false;
4191 }
4192
4193 /* Check the operands of the operation. */
4194 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4195 {
4196 if (dump_enabled_p ())
4197 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4198 "use not simple.\n");
4199 return false;
4200 }
4201 if (op_type == binary_op)
4202 {
4203 bool ok;
4204
4205 op1 = gimple_assign_rhs2 (stmt);
4206 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4207 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4208 OP1. */
4209 if (CONSTANT_CLASS_P (op0))
4210 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4211 else
4212 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4213
4214 if (!ok)
4215 {
4216 if (dump_enabled_p ())
4217 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4218 "use not simple.\n");
4219 return false;
4220 }
4221 }
4222
4223 /* If op0 is an external or constant defs use a vector type of
4224 the same size as the output vector type. */
4225 if (!vectype_in)
4226 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4227 if (vec_stmt)
4228 gcc_assert (vectype_in);
4229 if (!vectype_in)
4230 {
4231 if (dump_enabled_p ())
4232 {
4233 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4234 "no vectype for scalar type ");
4235 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4236 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4237 }
4238
4239 return false;
4240 }
4241
4242 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4243 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4244 {
4245 if (dump_enabled_p ())
4246 {
4247 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4248 "can't convert between boolean and non "
4249 "boolean vectors");
4250 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4251 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4252 }
4253
4254 return false;
4255 }
4256
4257 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4258 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4259 if (known_eq (nunits_out, nunits_in))
4260 modifier = NONE;
4261 else if (multiple_p (nunits_out, nunits_in))
4262 modifier = NARROW;
4263 else
4264 {
4265 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4266 modifier = WIDEN;
4267 }
4268
4269 /* Multiple types in SLP are handled by creating the appropriate number of
4270 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4271 case of SLP. */
4272 if (slp_node)
4273 ncopies = 1;
4274 else if (modifier == NARROW)
4275 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4276 else
4277 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4278
4279 /* Sanity check: make sure that at least one copy of the vectorized stmt
4280 needs to be generated. */
4281 gcc_assert (ncopies >= 1);
4282
4283 bool found_mode = false;
4284 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4285 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4286 opt_scalar_mode rhs_mode_iter;
4287
4288 /* Supportable by target? */
4289 switch (modifier)
4290 {
4291 case NONE:
4292 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4293 return false;
4294 if (supportable_convert_operation (code, vectype_out, vectype_in,
4295 &decl1, &code1))
4296 break;
4297 /* FALLTHRU */
4298 unsupported:
4299 if (dump_enabled_p ())
4300 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4301 "conversion not supported by target.\n");
4302 return false;
4303
4304 case WIDEN:
4305 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4306 &code1, &code2, &multi_step_cvt,
4307 &interm_types))
4308 {
4309 /* Binary widening operation can only be supported directly by the
4310 architecture. */
4311 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4312 break;
4313 }
4314
4315 if (code != FLOAT_EXPR
4316 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4317 goto unsupported;
4318
4319 fltsz = GET_MODE_SIZE (lhs_mode);
4320 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4321 {
4322 rhs_mode = rhs_mode_iter.require ();
4323 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4324 break;
4325
4326 cvt_type
4327 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4328 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4329 if (cvt_type == NULL_TREE)
4330 goto unsupported;
4331
4332 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4333 {
4334 if (!supportable_convert_operation (code, vectype_out,
4335 cvt_type, &decl1, &codecvt1))
4336 goto unsupported;
4337 }
4338 else if (!supportable_widening_operation (code, stmt, vectype_out,
4339 cvt_type, &codecvt1,
4340 &codecvt2, &multi_step_cvt,
4341 &interm_types))
4342 continue;
4343 else
4344 gcc_assert (multi_step_cvt == 0);
4345
4346 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4347 vectype_in, &code1, &code2,
4348 &multi_step_cvt, &interm_types))
4349 {
4350 found_mode = true;
4351 break;
4352 }
4353 }
4354
4355 if (!found_mode)
4356 goto unsupported;
4357
4358 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4359 codecvt2 = ERROR_MARK;
4360 else
4361 {
4362 multi_step_cvt++;
4363 interm_types.safe_push (cvt_type);
4364 cvt_type = NULL_TREE;
4365 }
4366 break;
4367
4368 case NARROW:
4369 gcc_assert (op_type == unary_op);
4370 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4371 &code1, &multi_step_cvt,
4372 &interm_types))
4373 break;
4374
4375 if (code != FIX_TRUNC_EXPR
4376 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4377 goto unsupported;
4378
4379 cvt_type
4380 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4381 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4382 if (cvt_type == NULL_TREE)
4383 goto unsupported;
4384 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4385 &decl1, &codecvt1))
4386 goto unsupported;
4387 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4388 &code1, &multi_step_cvt,
4389 &interm_types))
4390 break;
4391 goto unsupported;
4392
4393 default:
4394 gcc_unreachable ();
4395 }
4396
4397 if (!vec_stmt) /* transformation not required. */
4398 {
4399 if (dump_enabled_p ())
4400 dump_printf_loc (MSG_NOTE, vect_location,
4401 "=== vectorizable_conversion ===\n");
4402 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4403 {
4404 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4405 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4406 }
4407 else if (modifier == NARROW)
4408 {
4409 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4410 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4411 }
4412 else
4413 {
4414 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4415 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4416 }
4417 interm_types.release ();
4418 return true;
4419 }
4420
4421 /* Transform. */
4422 if (dump_enabled_p ())
4423 dump_printf_loc (MSG_NOTE, vect_location,
4424 "transform conversion. ncopies = %d.\n", ncopies);
4425
4426 if (op_type == binary_op)
4427 {
4428 if (CONSTANT_CLASS_P (op0))
4429 op0 = fold_convert (TREE_TYPE (op1), op0);
4430 else if (CONSTANT_CLASS_P (op1))
4431 op1 = fold_convert (TREE_TYPE (op0), op1);
4432 }
4433
4434 /* In case of multi-step conversion, we first generate conversion operations
4435 to the intermediate types, and then from that types to the final one.
4436 We create vector destinations for the intermediate type (TYPES) received
4437 from supportable_*_operation, and store them in the correct order
4438 for future use in vect_create_vectorized_*_stmts (). */
4439 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4440 vec_dest = vect_create_destination_var (scalar_dest,
4441 (cvt_type && modifier == WIDEN)
4442 ? cvt_type : vectype_out);
4443 vec_dsts.quick_push (vec_dest);
4444
4445 if (multi_step_cvt)
4446 {
4447 for (i = interm_types.length () - 1;
4448 interm_types.iterate (i, &intermediate_type); i--)
4449 {
4450 vec_dest = vect_create_destination_var (scalar_dest,
4451 intermediate_type);
4452 vec_dsts.quick_push (vec_dest);
4453 }
4454 }
4455
4456 if (cvt_type)
4457 vec_dest = vect_create_destination_var (scalar_dest,
4458 modifier == WIDEN
4459 ? vectype_out : cvt_type);
4460
4461 if (!slp_node)
4462 {
4463 if (modifier == WIDEN)
4464 {
4465 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4466 if (op_type == binary_op)
4467 vec_oprnds1.create (1);
4468 }
4469 else if (modifier == NARROW)
4470 vec_oprnds0.create (
4471 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4472 }
4473 else if (code == WIDEN_LSHIFT_EXPR)
4474 vec_oprnds1.create (slp_node->vec_stmts_size);
4475
4476 last_oprnd = op0;
4477 prev_stmt_info = NULL;
4478 switch (modifier)
4479 {
4480 case NONE:
4481 for (j = 0; j < ncopies; j++)
4482 {
4483 if (j == 0)
4484 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4485 else
4486 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4487
4488 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4489 {
4490 /* Arguments are ready, create the new vector stmt. */
4491 if (code1 == CALL_EXPR)
4492 {
4493 new_stmt = gimple_build_call (decl1, 1, vop0);
4494 new_temp = make_ssa_name (vec_dest, new_stmt);
4495 gimple_call_set_lhs (new_stmt, new_temp);
4496 }
4497 else
4498 {
4499 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4500 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4501 new_temp = make_ssa_name (vec_dest, new_stmt);
4502 gimple_assign_set_lhs (new_stmt, new_temp);
4503 }
4504
4505 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4506 if (slp_node)
4507 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4508 else
4509 {
4510 if (!prev_stmt_info)
4511 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4512 else
4513 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4514 prev_stmt_info = vinfo_for_stmt (new_stmt);
4515 }
4516 }
4517 }
4518 break;
4519
4520 case WIDEN:
4521 /* In case the vectorization factor (VF) is bigger than the number
4522 of elements that we can fit in a vectype (nunits), we have to
4523 generate more than one vector stmt - i.e - we need to "unroll"
4524 the vector stmt by a factor VF/nunits. */
4525 for (j = 0; j < ncopies; j++)
4526 {
4527 /* Handle uses. */
4528 if (j == 0)
4529 {
4530 if (slp_node)
4531 {
4532 if (code == WIDEN_LSHIFT_EXPR)
4533 {
4534 unsigned int k;
4535
4536 vec_oprnd1 = op1;
4537 /* Store vec_oprnd1 for every vector stmt to be created
4538 for SLP_NODE. We check during the analysis that all
4539 the shift arguments are the same. */
4540 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4541 vec_oprnds1.quick_push (vec_oprnd1);
4542
4543 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4544 slp_node);
4545 }
4546 else
4547 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4548 &vec_oprnds1, slp_node);
4549 }
4550 else
4551 {
4552 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4553 vec_oprnds0.quick_push (vec_oprnd0);
4554 if (op_type == binary_op)
4555 {
4556 if (code == WIDEN_LSHIFT_EXPR)
4557 vec_oprnd1 = op1;
4558 else
4559 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4560 vec_oprnds1.quick_push (vec_oprnd1);
4561 }
4562 }
4563 }
4564 else
4565 {
4566 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4567 vec_oprnds0.truncate (0);
4568 vec_oprnds0.quick_push (vec_oprnd0);
4569 if (op_type == binary_op)
4570 {
4571 if (code == WIDEN_LSHIFT_EXPR)
4572 vec_oprnd1 = op1;
4573 else
4574 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4575 vec_oprnd1);
4576 vec_oprnds1.truncate (0);
4577 vec_oprnds1.quick_push (vec_oprnd1);
4578 }
4579 }
4580
4581 /* Arguments are ready. Create the new vector stmts. */
4582 for (i = multi_step_cvt; i >= 0; i--)
4583 {
4584 tree this_dest = vec_dsts[i];
4585 enum tree_code c1 = code1, c2 = code2;
4586 if (i == 0 && codecvt2 != ERROR_MARK)
4587 {
4588 c1 = codecvt1;
4589 c2 = codecvt2;
4590 }
4591 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4592 &vec_oprnds1,
4593 stmt, this_dest, gsi,
4594 c1, c2, decl1, decl2,
4595 op_type);
4596 }
4597
4598 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4599 {
4600 if (cvt_type)
4601 {
4602 if (codecvt1 == CALL_EXPR)
4603 {
4604 new_stmt = gimple_build_call (decl1, 1, vop0);
4605 new_temp = make_ssa_name (vec_dest, new_stmt);
4606 gimple_call_set_lhs (new_stmt, new_temp);
4607 }
4608 else
4609 {
4610 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4611 new_temp = make_ssa_name (vec_dest);
4612 new_stmt = gimple_build_assign (new_temp, codecvt1,
4613 vop0);
4614 }
4615
4616 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4617 }
4618 else
4619 new_stmt = SSA_NAME_DEF_STMT (vop0);
4620
4621 if (slp_node)
4622 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4623 else
4624 {
4625 if (!prev_stmt_info)
4626 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4627 else
4628 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4629 prev_stmt_info = vinfo_for_stmt (new_stmt);
4630 }
4631 }
4632 }
4633
4634 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4635 break;
4636
4637 case NARROW:
4638 /* In case the vectorization factor (VF) is bigger than the number
4639 of elements that we can fit in a vectype (nunits), we have to
4640 generate more than one vector stmt - i.e - we need to "unroll"
4641 the vector stmt by a factor VF/nunits. */
4642 for (j = 0; j < ncopies; j++)
4643 {
4644 /* Handle uses. */
4645 if (slp_node)
4646 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4647 slp_node);
4648 else
4649 {
4650 vec_oprnds0.truncate (0);
4651 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4652 vect_pow2 (multi_step_cvt) - 1);
4653 }
4654
4655 /* Arguments are ready. Create the new vector stmts. */
4656 if (cvt_type)
4657 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4658 {
4659 if (codecvt1 == CALL_EXPR)
4660 {
4661 new_stmt = gimple_build_call (decl1, 1, vop0);
4662 new_temp = make_ssa_name (vec_dest, new_stmt);
4663 gimple_call_set_lhs (new_stmt, new_temp);
4664 }
4665 else
4666 {
4667 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4668 new_temp = make_ssa_name (vec_dest);
4669 new_stmt = gimple_build_assign (new_temp, codecvt1,
4670 vop0);
4671 }
4672
4673 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4674 vec_oprnds0[i] = new_temp;
4675 }
4676
4677 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4678 stmt, vec_dsts, gsi,
4679 slp_node, code1,
4680 &prev_stmt_info);
4681 }
4682
4683 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4684 break;
4685 }
4686
4687 vec_oprnds0.release ();
4688 vec_oprnds1.release ();
4689 interm_types.release ();
4690
4691 return true;
4692 }
4693
4694
4695 /* Function vectorizable_assignment.
4696
4697 Check if STMT performs an assignment (copy) that can be vectorized.
4698 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4699 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4700 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4701
4702 static bool
4703 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4704 gimple **vec_stmt, slp_tree slp_node)
4705 {
4706 tree vec_dest;
4707 tree scalar_dest;
4708 tree op;
4709 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4710 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4711 tree new_temp;
4712 gimple *def_stmt;
4713 enum vect_def_type dt[1] = {vect_unknown_def_type};
4714 int ndts = 1;
4715 int ncopies;
4716 int i, j;
4717 vec<tree> vec_oprnds = vNULL;
4718 tree vop;
4719 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4720 vec_info *vinfo = stmt_info->vinfo;
4721 gimple *new_stmt = NULL;
4722 stmt_vec_info prev_stmt_info = NULL;
4723 enum tree_code code;
4724 tree vectype_in;
4725
4726 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4727 return false;
4728
4729 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4730 && ! vec_stmt)
4731 return false;
4732
4733 /* Is vectorizable assignment? */
4734 if (!is_gimple_assign (stmt))
4735 return false;
4736
4737 scalar_dest = gimple_assign_lhs (stmt);
4738 if (TREE_CODE (scalar_dest) != SSA_NAME)
4739 return false;
4740
4741 code = gimple_assign_rhs_code (stmt);
4742 if (gimple_assign_single_p (stmt)
4743 || code == PAREN_EXPR
4744 || CONVERT_EXPR_CODE_P (code))
4745 op = gimple_assign_rhs1 (stmt);
4746 else
4747 return false;
4748
4749 if (code == VIEW_CONVERT_EXPR)
4750 op = TREE_OPERAND (op, 0);
4751
4752 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4753 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4754
4755 /* Multiple types in SLP are handled by creating the appropriate number of
4756 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4757 case of SLP. */
4758 if (slp_node)
4759 ncopies = 1;
4760 else
4761 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4762
4763 gcc_assert (ncopies >= 1);
4764
4765 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4766 {
4767 if (dump_enabled_p ())
4768 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4769 "use not simple.\n");
4770 return false;
4771 }
4772
4773 /* We can handle NOP_EXPR conversions that do not change the number
4774 of elements or the vector size. */
4775 if ((CONVERT_EXPR_CODE_P (code)
4776 || code == VIEW_CONVERT_EXPR)
4777 && (!vectype_in
4778 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
4779 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
4780 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4781 return false;
4782
4783 /* We do not handle bit-precision changes. */
4784 if ((CONVERT_EXPR_CODE_P (code)
4785 || code == VIEW_CONVERT_EXPR)
4786 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4787 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4788 || !type_has_mode_precision_p (TREE_TYPE (op)))
4789 /* But a conversion that does not change the bit-pattern is ok. */
4790 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4791 > TYPE_PRECISION (TREE_TYPE (op)))
4792 && TYPE_UNSIGNED (TREE_TYPE (op)))
4793 /* Conversion between boolean types of different sizes is
4794 a simple assignment in case their vectypes are same
4795 boolean vectors. */
4796 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4797 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4798 {
4799 if (dump_enabled_p ())
4800 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4801 "type conversion to/from bit-precision "
4802 "unsupported.\n");
4803 return false;
4804 }
4805
4806 if (!vec_stmt) /* transformation not required. */
4807 {
4808 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4809 if (dump_enabled_p ())
4810 dump_printf_loc (MSG_NOTE, vect_location,
4811 "=== vectorizable_assignment ===\n");
4812 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4813 return true;
4814 }
4815
4816 /* Transform. */
4817 if (dump_enabled_p ())
4818 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4819
4820 /* Handle def. */
4821 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4822
4823 /* Handle use. */
4824 for (j = 0; j < ncopies; j++)
4825 {
4826 /* Handle uses. */
4827 if (j == 0)
4828 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4829 else
4830 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4831
4832 /* Arguments are ready. create the new vector stmt. */
4833 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4834 {
4835 if (CONVERT_EXPR_CODE_P (code)
4836 || code == VIEW_CONVERT_EXPR)
4837 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4838 new_stmt = gimple_build_assign (vec_dest, vop);
4839 new_temp = make_ssa_name (vec_dest, new_stmt);
4840 gimple_assign_set_lhs (new_stmt, new_temp);
4841 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4842 if (slp_node)
4843 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4844 }
4845
4846 if (slp_node)
4847 continue;
4848
4849 if (j == 0)
4850 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4851 else
4852 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4853
4854 prev_stmt_info = vinfo_for_stmt (new_stmt);
4855 }
4856
4857 vec_oprnds.release ();
4858 return true;
4859 }
4860
4861
4862 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4863 either as shift by a scalar or by a vector. */
4864
4865 bool
4866 vect_supportable_shift (enum tree_code code, tree scalar_type)
4867 {
4868
4869 machine_mode vec_mode;
4870 optab optab;
4871 int icode;
4872 tree vectype;
4873
4874 vectype = get_vectype_for_scalar_type (scalar_type);
4875 if (!vectype)
4876 return false;
4877
4878 optab = optab_for_tree_code (code, vectype, optab_scalar);
4879 if (!optab
4880 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4881 {
4882 optab = optab_for_tree_code (code, vectype, optab_vector);
4883 if (!optab
4884 || (optab_handler (optab, TYPE_MODE (vectype))
4885 == CODE_FOR_nothing))
4886 return false;
4887 }
4888
4889 vec_mode = TYPE_MODE (vectype);
4890 icode = (int) optab_handler (optab, vec_mode);
4891 if (icode == CODE_FOR_nothing)
4892 return false;
4893
4894 return true;
4895 }
4896
4897
4898 /* Function vectorizable_shift.
4899
4900 Check if STMT performs a shift operation that can be vectorized.
4901 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4902 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4903 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4904
4905 static bool
4906 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4907 gimple **vec_stmt, slp_tree slp_node)
4908 {
4909 tree vec_dest;
4910 tree scalar_dest;
4911 tree op0, op1 = NULL;
4912 tree vec_oprnd1 = NULL_TREE;
4913 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4914 tree vectype;
4915 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4916 enum tree_code code;
4917 machine_mode vec_mode;
4918 tree new_temp;
4919 optab optab;
4920 int icode;
4921 machine_mode optab_op2_mode;
4922 gimple *def_stmt;
4923 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4924 int ndts = 2;
4925 gimple *new_stmt = NULL;
4926 stmt_vec_info prev_stmt_info;
4927 poly_uint64 nunits_in;
4928 poly_uint64 nunits_out;
4929 tree vectype_out;
4930 tree op1_vectype;
4931 int ncopies;
4932 int j, i;
4933 vec<tree> vec_oprnds0 = vNULL;
4934 vec<tree> vec_oprnds1 = vNULL;
4935 tree vop0, vop1;
4936 unsigned int k;
4937 bool scalar_shift_arg = true;
4938 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4939 vec_info *vinfo = stmt_info->vinfo;
4940
4941 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4942 return false;
4943
4944 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4945 && ! vec_stmt)
4946 return false;
4947
4948 /* Is STMT a vectorizable binary/unary operation? */
4949 if (!is_gimple_assign (stmt))
4950 return false;
4951
4952 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4953 return false;
4954
4955 code = gimple_assign_rhs_code (stmt);
4956
4957 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4958 || code == RROTATE_EXPR))
4959 return false;
4960
4961 scalar_dest = gimple_assign_lhs (stmt);
4962 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4963 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4964 {
4965 if (dump_enabled_p ())
4966 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4967 "bit-precision shifts not supported.\n");
4968 return false;
4969 }
4970
4971 op0 = gimple_assign_rhs1 (stmt);
4972 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4973 {
4974 if (dump_enabled_p ())
4975 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4976 "use not simple.\n");
4977 return false;
4978 }
4979 /* If op0 is an external or constant def use a vector type with
4980 the same size as the output vector type. */
4981 if (!vectype)
4982 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4983 if (vec_stmt)
4984 gcc_assert (vectype);
4985 if (!vectype)
4986 {
4987 if (dump_enabled_p ())
4988 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4989 "no vectype for scalar type\n");
4990 return false;
4991 }
4992
4993 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4994 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4995 if (maybe_ne (nunits_out, nunits_in))
4996 return false;
4997
4998 op1 = gimple_assign_rhs2 (stmt);
4999 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
5000 {
5001 if (dump_enabled_p ())
5002 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5003 "use not simple.\n");
5004 return false;
5005 }
5006
5007 /* Multiple types in SLP are handled by creating the appropriate number of
5008 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5009 case of SLP. */
5010 if (slp_node)
5011 ncopies = 1;
5012 else
5013 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5014
5015 gcc_assert (ncopies >= 1);
5016
5017 /* Determine whether the shift amount is a vector, or scalar. If the
5018 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5019
5020 if ((dt[1] == vect_internal_def
5021 || dt[1] == vect_induction_def)
5022 && !slp_node)
5023 scalar_shift_arg = false;
5024 else if (dt[1] == vect_constant_def
5025 || dt[1] == vect_external_def
5026 || dt[1] == vect_internal_def)
5027 {
5028 /* In SLP, need to check whether the shift count is the same,
5029 in loops if it is a constant or invariant, it is always
5030 a scalar shift. */
5031 if (slp_node)
5032 {
5033 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5034 gimple *slpstmt;
5035
5036 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
5037 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5038 scalar_shift_arg = false;
5039 }
5040
5041 /* If the shift amount is computed by a pattern stmt we cannot
5042 use the scalar amount directly thus give up and use a vector
5043 shift. */
5044 if (dt[1] == vect_internal_def)
5045 {
5046 gimple *def = SSA_NAME_DEF_STMT (op1);
5047 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5048 scalar_shift_arg = false;
5049 }
5050 }
5051 else
5052 {
5053 if (dump_enabled_p ())
5054 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5055 "operand mode requires invariant argument.\n");
5056 return false;
5057 }
5058
5059 /* Vector shifted by vector. */
5060 if (!scalar_shift_arg)
5061 {
5062 optab = optab_for_tree_code (code, vectype, optab_vector);
5063 if (dump_enabled_p ())
5064 dump_printf_loc (MSG_NOTE, vect_location,
5065 "vector/vector shift/rotate found.\n");
5066
5067 if (!op1_vectype)
5068 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5069 if (op1_vectype == NULL_TREE
5070 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5071 {
5072 if (dump_enabled_p ())
5073 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5074 "unusable type for last operand in"
5075 " vector/vector shift/rotate.\n");
5076 return false;
5077 }
5078 }
5079 /* See if the machine has a vector shifted by scalar insn and if not
5080 then see if it has a vector shifted by vector insn. */
5081 else
5082 {
5083 optab = optab_for_tree_code (code, vectype, optab_scalar);
5084 if (optab
5085 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5086 {
5087 if (dump_enabled_p ())
5088 dump_printf_loc (MSG_NOTE, vect_location,
5089 "vector/scalar shift/rotate found.\n");
5090 }
5091 else
5092 {
5093 optab = optab_for_tree_code (code, vectype, optab_vector);
5094 if (optab
5095 && (optab_handler (optab, TYPE_MODE (vectype))
5096 != CODE_FOR_nothing))
5097 {
5098 scalar_shift_arg = false;
5099
5100 if (dump_enabled_p ())
5101 dump_printf_loc (MSG_NOTE, vect_location,
5102 "vector/vector shift/rotate found.\n");
5103
5104 /* Unlike the other binary operators, shifts/rotates have
5105 the rhs being int, instead of the same type as the lhs,
5106 so make sure the scalar is the right type if we are
5107 dealing with vectors of long long/long/short/char. */
5108 if (dt[1] == vect_constant_def)
5109 op1 = fold_convert (TREE_TYPE (vectype), op1);
5110 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5111 TREE_TYPE (op1)))
5112 {
5113 if (slp_node
5114 && TYPE_MODE (TREE_TYPE (vectype))
5115 != TYPE_MODE (TREE_TYPE (op1)))
5116 {
5117 if (dump_enabled_p ())
5118 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5119 "unusable type for last operand in"
5120 " vector/vector shift/rotate.\n");
5121 return false;
5122 }
5123 if (vec_stmt && !slp_node)
5124 {
5125 op1 = fold_convert (TREE_TYPE (vectype), op1);
5126 op1 = vect_init_vector (stmt, op1,
5127 TREE_TYPE (vectype), NULL);
5128 }
5129 }
5130 }
5131 }
5132 }
5133
5134 /* Supportable by target? */
5135 if (!optab)
5136 {
5137 if (dump_enabled_p ())
5138 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5139 "no optab.\n");
5140 return false;
5141 }
5142 vec_mode = TYPE_MODE (vectype);
5143 icode = (int) optab_handler (optab, vec_mode);
5144 if (icode == CODE_FOR_nothing)
5145 {
5146 if (dump_enabled_p ())
5147 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5148 "op not supported by target.\n");
5149 /* Check only during analysis. */
5150 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5151 || (!vec_stmt
5152 && !vect_worthwhile_without_simd_p (vinfo, code)))
5153 return false;
5154 if (dump_enabled_p ())
5155 dump_printf_loc (MSG_NOTE, vect_location,
5156 "proceeding using word mode.\n");
5157 }
5158
5159 /* Worthwhile without SIMD support? Check only during analysis. */
5160 if (!vec_stmt
5161 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5162 && !vect_worthwhile_without_simd_p (vinfo, code))
5163 {
5164 if (dump_enabled_p ())
5165 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5166 "not worthwhile without SIMD support.\n");
5167 return false;
5168 }
5169
5170 if (!vec_stmt) /* transformation not required. */
5171 {
5172 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5173 if (dump_enabled_p ())
5174 dump_printf_loc (MSG_NOTE, vect_location,
5175 "=== vectorizable_shift ===\n");
5176 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5177 return true;
5178 }
5179
5180 /* Transform. */
5181
5182 if (dump_enabled_p ())
5183 dump_printf_loc (MSG_NOTE, vect_location,
5184 "transform binary/unary operation.\n");
5185
5186 /* Handle def. */
5187 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5188
5189 prev_stmt_info = NULL;
5190 for (j = 0; j < ncopies; j++)
5191 {
5192 /* Handle uses. */
5193 if (j == 0)
5194 {
5195 if (scalar_shift_arg)
5196 {
5197 /* Vector shl and shr insn patterns can be defined with scalar
5198 operand 2 (shift operand). In this case, use constant or loop
5199 invariant op1 directly, without extending it to vector mode
5200 first. */
5201 optab_op2_mode = insn_data[icode].operand[2].mode;
5202 if (!VECTOR_MODE_P (optab_op2_mode))
5203 {
5204 if (dump_enabled_p ())
5205 dump_printf_loc (MSG_NOTE, vect_location,
5206 "operand 1 using scalar mode.\n");
5207 vec_oprnd1 = op1;
5208 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5209 vec_oprnds1.quick_push (vec_oprnd1);
5210 if (slp_node)
5211 {
5212 /* Store vec_oprnd1 for every vector stmt to be created
5213 for SLP_NODE. We check during the analysis that all
5214 the shift arguments are the same.
5215 TODO: Allow different constants for different vector
5216 stmts generated for an SLP instance. */
5217 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5218 vec_oprnds1.quick_push (vec_oprnd1);
5219 }
5220 }
5221 }
5222
5223 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5224 (a special case for certain kind of vector shifts); otherwise,
5225 operand 1 should be of a vector type (the usual case). */
5226 if (vec_oprnd1)
5227 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5228 slp_node);
5229 else
5230 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5231 slp_node);
5232 }
5233 else
5234 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5235
5236 /* Arguments are ready. Create the new vector stmt. */
5237 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5238 {
5239 vop1 = vec_oprnds1[i];
5240 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5241 new_temp = make_ssa_name (vec_dest, new_stmt);
5242 gimple_assign_set_lhs (new_stmt, new_temp);
5243 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5244 if (slp_node)
5245 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5246 }
5247
5248 if (slp_node)
5249 continue;
5250
5251 if (j == 0)
5252 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5253 else
5254 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5255 prev_stmt_info = vinfo_for_stmt (new_stmt);
5256 }
5257
5258 vec_oprnds0.release ();
5259 vec_oprnds1.release ();
5260
5261 return true;
5262 }
5263
5264
5265 /* Function vectorizable_operation.
5266
5267 Check if STMT performs a binary, unary or ternary operation that can
5268 be vectorized.
5269 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5270 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5271 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5272
5273 static bool
5274 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5275 gimple **vec_stmt, slp_tree slp_node)
5276 {
5277 tree vec_dest;
5278 tree scalar_dest;
5279 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5280 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5281 tree vectype;
5282 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5283 enum tree_code code, orig_code;
5284 machine_mode vec_mode;
5285 tree new_temp;
5286 int op_type;
5287 optab optab;
5288 bool target_support_p;
5289 gimple *def_stmt;
5290 enum vect_def_type dt[3]
5291 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5292 int ndts = 3;
5293 gimple *new_stmt = NULL;
5294 stmt_vec_info prev_stmt_info;
5295 poly_uint64 nunits_in;
5296 poly_uint64 nunits_out;
5297 tree vectype_out;
5298 int ncopies;
5299 int j, i;
5300 vec<tree> vec_oprnds0 = vNULL;
5301 vec<tree> vec_oprnds1 = vNULL;
5302 vec<tree> vec_oprnds2 = vNULL;
5303 tree vop0, vop1, vop2;
5304 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5305 vec_info *vinfo = stmt_info->vinfo;
5306
5307 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5308 return false;
5309
5310 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5311 && ! vec_stmt)
5312 return false;
5313
5314 /* Is STMT a vectorizable binary/unary operation? */
5315 if (!is_gimple_assign (stmt))
5316 return false;
5317
5318 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5319 return false;
5320
5321 orig_code = code = gimple_assign_rhs_code (stmt);
5322
5323 /* For pointer addition and subtraction, we should use the normal
5324 plus and minus for the vector operation. */
5325 if (code == POINTER_PLUS_EXPR)
5326 code = PLUS_EXPR;
5327 if (code == POINTER_DIFF_EXPR)
5328 code = MINUS_EXPR;
5329
5330 /* Support only unary or binary operations. */
5331 op_type = TREE_CODE_LENGTH (code);
5332 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5333 {
5334 if (dump_enabled_p ())
5335 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5336 "num. args = %d (not unary/binary/ternary op).\n",
5337 op_type);
5338 return false;
5339 }
5340
5341 scalar_dest = gimple_assign_lhs (stmt);
5342 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5343
5344 /* Most operations cannot handle bit-precision types without extra
5345 truncations. */
5346 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5347 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5348 /* Exception are bitwise binary operations. */
5349 && code != BIT_IOR_EXPR
5350 && code != BIT_XOR_EXPR
5351 && code != BIT_AND_EXPR)
5352 {
5353 if (dump_enabled_p ())
5354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5355 "bit-precision arithmetic not supported.\n");
5356 return false;
5357 }
5358
5359 op0 = gimple_assign_rhs1 (stmt);
5360 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5361 {
5362 if (dump_enabled_p ())
5363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5364 "use not simple.\n");
5365 return false;
5366 }
5367 /* If op0 is an external or constant def use a vector type with
5368 the same size as the output vector type. */
5369 if (!vectype)
5370 {
5371 /* For boolean type we cannot determine vectype by
5372 invariant value (don't know whether it is a vector
5373 of booleans or vector of integers). We use output
5374 vectype because operations on boolean don't change
5375 type. */
5376 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5377 {
5378 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5379 {
5380 if (dump_enabled_p ())
5381 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5382 "not supported operation on bool value.\n");
5383 return false;
5384 }
5385 vectype = vectype_out;
5386 }
5387 else
5388 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5389 }
5390 if (vec_stmt)
5391 gcc_assert (vectype);
5392 if (!vectype)
5393 {
5394 if (dump_enabled_p ())
5395 {
5396 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5397 "no vectype for scalar type ");
5398 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5399 TREE_TYPE (op0));
5400 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5401 }
5402
5403 return false;
5404 }
5405
5406 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5407 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5408 if (maybe_ne (nunits_out, nunits_in))
5409 return false;
5410
5411 if (op_type == binary_op || op_type == ternary_op)
5412 {
5413 op1 = gimple_assign_rhs2 (stmt);
5414 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5415 {
5416 if (dump_enabled_p ())
5417 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5418 "use not simple.\n");
5419 return false;
5420 }
5421 }
5422 if (op_type == ternary_op)
5423 {
5424 op2 = gimple_assign_rhs3 (stmt);
5425 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5426 {
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5429 "use not simple.\n");
5430 return false;
5431 }
5432 }
5433
5434 /* Multiple types in SLP are handled by creating the appropriate number of
5435 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5436 case of SLP. */
5437 if (slp_node)
5438 ncopies = 1;
5439 else
5440 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5441
5442 gcc_assert (ncopies >= 1);
5443
5444 /* Shifts are handled in vectorizable_shift (). */
5445 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5446 || code == RROTATE_EXPR)
5447 return false;
5448
5449 /* Supportable by target? */
5450
5451 vec_mode = TYPE_MODE (vectype);
5452 if (code == MULT_HIGHPART_EXPR)
5453 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5454 else
5455 {
5456 optab = optab_for_tree_code (code, vectype, optab_default);
5457 if (!optab)
5458 {
5459 if (dump_enabled_p ())
5460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5461 "no optab.\n");
5462 return false;
5463 }
5464 target_support_p = (optab_handler (optab, vec_mode)
5465 != CODE_FOR_nothing);
5466 }
5467
5468 if (!target_support_p)
5469 {
5470 if (dump_enabled_p ())
5471 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5472 "op not supported by target.\n");
5473 /* Check only during analysis. */
5474 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5475 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5476 return false;
5477 if (dump_enabled_p ())
5478 dump_printf_loc (MSG_NOTE, vect_location,
5479 "proceeding using word mode.\n");
5480 }
5481
5482 /* Worthwhile without SIMD support? Check only during analysis. */
5483 if (!VECTOR_MODE_P (vec_mode)
5484 && !vec_stmt
5485 && !vect_worthwhile_without_simd_p (vinfo, code))
5486 {
5487 if (dump_enabled_p ())
5488 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5489 "not worthwhile without SIMD support.\n");
5490 return false;
5491 }
5492
5493 if (!vec_stmt) /* transformation not required. */
5494 {
5495 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5496 if (dump_enabled_p ())
5497 dump_printf_loc (MSG_NOTE, vect_location,
5498 "=== vectorizable_operation ===\n");
5499 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5500 return true;
5501 }
5502
5503 /* Transform. */
5504
5505 if (dump_enabled_p ())
5506 dump_printf_loc (MSG_NOTE, vect_location,
5507 "transform binary/unary operation.\n");
5508
5509 /* Handle def. */
5510 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5511
5512 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5513 vectors with unsigned elements, but the result is signed. So, we
5514 need to compute the MINUS_EXPR into vectype temporary and
5515 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5516 tree vec_cvt_dest = NULL_TREE;
5517 if (orig_code == POINTER_DIFF_EXPR)
5518 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5519
5520 /* In case the vectorization factor (VF) is bigger than the number
5521 of elements that we can fit in a vectype (nunits), we have to generate
5522 more than one vector stmt - i.e - we need to "unroll" the
5523 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5524 from one copy of the vector stmt to the next, in the field
5525 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5526 stages to find the correct vector defs to be used when vectorizing
5527 stmts that use the defs of the current stmt. The example below
5528 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5529 we need to create 4 vectorized stmts):
5530
5531 before vectorization:
5532 RELATED_STMT VEC_STMT
5533 S1: x = memref - -
5534 S2: z = x + 1 - -
5535
5536 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5537 there):
5538 RELATED_STMT VEC_STMT
5539 VS1_0: vx0 = memref0 VS1_1 -
5540 VS1_1: vx1 = memref1 VS1_2 -
5541 VS1_2: vx2 = memref2 VS1_3 -
5542 VS1_3: vx3 = memref3 - -
5543 S1: x = load - VS1_0
5544 S2: z = x + 1 - -
5545
5546 step2: vectorize stmt S2 (done here):
5547 To vectorize stmt S2 we first need to find the relevant vector
5548 def for the first operand 'x'. This is, as usual, obtained from
5549 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5550 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5551 relevant vector def 'vx0'. Having found 'vx0' we can generate
5552 the vector stmt VS2_0, and as usual, record it in the
5553 STMT_VINFO_VEC_STMT of stmt S2.
5554 When creating the second copy (VS2_1), we obtain the relevant vector
5555 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5556 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5557 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5558 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5559 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5560 chain of stmts and pointers:
5561 RELATED_STMT VEC_STMT
5562 VS1_0: vx0 = memref0 VS1_1 -
5563 VS1_1: vx1 = memref1 VS1_2 -
5564 VS1_2: vx2 = memref2 VS1_3 -
5565 VS1_3: vx3 = memref3 - -
5566 S1: x = load - VS1_0
5567 VS2_0: vz0 = vx0 + v1 VS2_1 -
5568 VS2_1: vz1 = vx1 + v1 VS2_2 -
5569 VS2_2: vz2 = vx2 + v1 VS2_3 -
5570 VS2_3: vz3 = vx3 + v1 - -
5571 S2: z = x + 1 - VS2_0 */
5572
5573 prev_stmt_info = NULL;
5574 for (j = 0; j < ncopies; j++)
5575 {
5576 /* Handle uses. */
5577 if (j == 0)
5578 {
5579 if (op_type == binary_op || op_type == ternary_op)
5580 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5581 slp_node);
5582 else
5583 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5584 slp_node);
5585 if (op_type == ternary_op)
5586 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5587 slp_node);
5588 }
5589 else
5590 {
5591 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5592 if (op_type == ternary_op)
5593 {
5594 tree vec_oprnd = vec_oprnds2.pop ();
5595 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5596 vec_oprnd));
5597 }
5598 }
5599
5600 /* Arguments are ready. Create the new vector stmt. */
5601 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5602 {
5603 vop1 = ((op_type == binary_op || op_type == ternary_op)
5604 ? vec_oprnds1[i] : NULL_TREE);
5605 vop2 = ((op_type == ternary_op)
5606 ? vec_oprnds2[i] : NULL_TREE);
5607 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5608 new_temp = make_ssa_name (vec_dest, new_stmt);
5609 gimple_assign_set_lhs (new_stmt, new_temp);
5610 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5611 if (vec_cvt_dest)
5612 {
5613 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5614 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5615 new_temp);
5616 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5617 gimple_assign_set_lhs (new_stmt, new_temp);
5618 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5619 }
5620 if (slp_node)
5621 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5622 }
5623
5624 if (slp_node)
5625 continue;
5626
5627 if (j == 0)
5628 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5629 else
5630 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5631 prev_stmt_info = vinfo_for_stmt (new_stmt);
5632 }
5633
5634 vec_oprnds0.release ();
5635 vec_oprnds1.release ();
5636 vec_oprnds2.release ();
5637
5638 return true;
5639 }
5640
5641 /* A helper function to ensure data reference DR's base alignment. */
5642
5643 static void
5644 ensure_base_align (struct data_reference *dr)
5645 {
5646 if (!dr->aux)
5647 return;
5648
5649 if (DR_VECT_AUX (dr)->base_misaligned)
5650 {
5651 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5652
5653 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5654
5655 if (decl_in_symtab_p (base_decl))
5656 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5657 else
5658 {
5659 SET_DECL_ALIGN (base_decl, align_base_to);
5660 DECL_USER_ALIGN (base_decl) = 1;
5661 }
5662 DR_VECT_AUX (dr)->base_misaligned = false;
5663 }
5664 }
5665
5666
5667 /* Function get_group_alias_ptr_type.
5668
5669 Return the alias type for the group starting at FIRST_STMT. */
5670
5671 static tree
5672 get_group_alias_ptr_type (gimple *first_stmt)
5673 {
5674 struct data_reference *first_dr, *next_dr;
5675 gimple *next_stmt;
5676
5677 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5678 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5679 while (next_stmt)
5680 {
5681 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5682 if (get_alias_set (DR_REF (first_dr))
5683 != get_alias_set (DR_REF (next_dr)))
5684 {
5685 if (dump_enabled_p ())
5686 dump_printf_loc (MSG_NOTE, vect_location,
5687 "conflicting alias set types.\n");
5688 return ptr_type_node;
5689 }
5690 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5691 }
5692 return reference_alias_ptr_type (DR_REF (first_dr));
5693 }
5694
5695
5696 /* Function vectorizable_store.
5697
5698 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5699 can be vectorized.
5700 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5701 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5702 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5703
5704 static bool
5705 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5706 slp_tree slp_node)
5707 {
5708 tree scalar_dest;
5709 tree data_ref;
5710 tree op;
5711 tree vec_oprnd = NULL_TREE;
5712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5713 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5714 tree elem_type;
5715 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5716 struct loop *loop = NULL;
5717 machine_mode vec_mode;
5718 tree dummy;
5719 enum dr_alignment_support alignment_support_scheme;
5720 gimple *def_stmt;
5721 enum vect_def_type dt;
5722 stmt_vec_info prev_stmt_info = NULL;
5723 tree dataref_ptr = NULL_TREE;
5724 tree dataref_offset = NULL_TREE;
5725 gimple *ptr_incr = NULL;
5726 int ncopies;
5727 int j;
5728 gimple *next_stmt, *first_stmt;
5729 bool grouped_store;
5730 unsigned int group_size, i;
5731 vec<tree> oprnds = vNULL;
5732 vec<tree> result_chain = vNULL;
5733 bool inv_p;
5734 tree offset = NULL_TREE;
5735 vec<tree> vec_oprnds = vNULL;
5736 bool slp = (slp_node != NULL);
5737 unsigned int vec_num;
5738 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5739 vec_info *vinfo = stmt_info->vinfo;
5740 tree aggr_type;
5741 gather_scatter_info gs_info;
5742 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5743 gimple *new_stmt;
5744 poly_uint64 vf;
5745 vec_load_store_type vls_type;
5746 tree ref_type;
5747
5748 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5749 return false;
5750
5751 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5752 && ! vec_stmt)
5753 return false;
5754
5755 /* Is vectorizable store? */
5756
5757 if (!is_gimple_assign (stmt))
5758 return false;
5759
5760 scalar_dest = gimple_assign_lhs (stmt);
5761 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5762 && is_pattern_stmt_p (stmt_info))
5763 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5764 if (TREE_CODE (scalar_dest) != ARRAY_REF
5765 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5766 && TREE_CODE (scalar_dest) != INDIRECT_REF
5767 && TREE_CODE (scalar_dest) != COMPONENT_REF
5768 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5769 && TREE_CODE (scalar_dest) != REALPART_EXPR
5770 && TREE_CODE (scalar_dest) != MEM_REF)
5771 return false;
5772
5773 /* Cannot have hybrid store SLP -- that would mean storing to the
5774 same location twice. */
5775 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5776
5777 gcc_assert (gimple_assign_single_p (stmt));
5778
5779 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5780 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5781
5782 if (loop_vinfo)
5783 {
5784 loop = LOOP_VINFO_LOOP (loop_vinfo);
5785 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5786 }
5787 else
5788 vf = 1;
5789
5790 /* Multiple types in SLP are handled by creating the appropriate number of
5791 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5792 case of SLP. */
5793 if (slp)
5794 ncopies = 1;
5795 else
5796 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5797
5798 gcc_assert (ncopies >= 1);
5799
5800 /* FORNOW. This restriction should be relaxed. */
5801 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5802 {
5803 if (dump_enabled_p ())
5804 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5805 "multiple types in nested loop.\n");
5806 return false;
5807 }
5808
5809 op = gimple_assign_rhs1 (stmt);
5810
5811 /* In the case this is a store from a constant make sure
5812 native_encode_expr can handle it. */
5813 if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
5814 return false;
5815
5816 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5817 {
5818 if (dump_enabled_p ())
5819 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5820 "use not simple.\n");
5821 return false;
5822 }
5823
5824 if (dt == vect_constant_def || dt == vect_external_def)
5825 vls_type = VLS_STORE_INVARIANT;
5826 else
5827 vls_type = VLS_STORE;
5828
5829 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5830 return false;
5831
5832 elem_type = TREE_TYPE (vectype);
5833 vec_mode = TYPE_MODE (vectype);
5834
5835 /* FORNOW. In some cases can vectorize even if data-type not supported
5836 (e.g. - array initialization with 0). */
5837 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5838 return false;
5839
5840 if (!STMT_VINFO_DATA_REF (stmt_info))
5841 return false;
5842
5843 vect_memory_access_type memory_access_type;
5844 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5845 &memory_access_type, &gs_info))
5846 return false;
5847
5848 if (!vec_stmt) /* transformation not required. */
5849 {
5850 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5851 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5852 /* The SLP costs are calculated during SLP analysis. */
5853 if (!PURE_SLP_STMT (stmt_info))
5854 vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
5855 NULL, NULL, NULL);
5856 return true;
5857 }
5858 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5859
5860 /* Transform. */
5861
5862 ensure_base_align (dr);
5863
5864 if (memory_access_type == VMAT_GATHER_SCATTER)
5865 {
5866 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5867 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5868 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5869 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5870 edge pe = loop_preheader_edge (loop);
5871 gimple_seq seq;
5872 basic_block new_bb;
5873 enum { NARROW, NONE, WIDEN } modifier;
5874 poly_uint64 scatter_off_nunits
5875 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5876
5877 if (known_eq (nunits, scatter_off_nunits))
5878 modifier = NONE;
5879 else if (known_eq (nunits * 2, scatter_off_nunits))
5880 {
5881 modifier = WIDEN;
5882
5883 /* Currently gathers and scatters are only supported for
5884 fixed-length vectors. */
5885 unsigned int count = scatter_off_nunits.to_constant ();
5886 vec_perm_builder sel (count, count, 1);
5887 for (i = 0; i < (unsigned int) count; ++i)
5888 sel.quick_push (i | (count / 2));
5889
5890 vec_perm_indices indices (sel, 1, count);
5891 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
5892 indices);
5893 gcc_assert (perm_mask != NULL_TREE);
5894 }
5895 else if (known_eq (nunits, scatter_off_nunits * 2))
5896 {
5897 modifier = NARROW;
5898
5899 /* Currently gathers and scatters are only supported for
5900 fixed-length vectors. */
5901 unsigned int count = nunits.to_constant ();
5902 vec_perm_builder sel (count, count, 1);
5903 for (i = 0; i < (unsigned int) count; ++i)
5904 sel.quick_push (i | (count / 2));
5905
5906 vec_perm_indices indices (sel, 2, count);
5907 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
5908 gcc_assert (perm_mask != NULL_TREE);
5909 ncopies *= 2;
5910 }
5911 else
5912 gcc_unreachable ();
5913
5914 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5915 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5916 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5917 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5918 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5919 scaletype = TREE_VALUE (arglist);
5920
5921 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5922 && TREE_CODE (rettype) == VOID_TYPE);
5923
5924 ptr = fold_convert (ptrtype, gs_info.base);
5925 if (!is_gimple_min_invariant (ptr))
5926 {
5927 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5928 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5929 gcc_assert (!new_bb);
5930 }
5931
5932 /* Currently we support only unconditional scatter stores,
5933 so mask should be all ones. */
5934 mask = build_int_cst (masktype, -1);
5935 mask = vect_init_vector (stmt, mask, masktype, NULL);
5936
5937 scale = build_int_cst (scaletype, gs_info.scale);
5938
5939 prev_stmt_info = NULL;
5940 for (j = 0; j < ncopies; ++j)
5941 {
5942 if (j == 0)
5943 {
5944 src = vec_oprnd1
5945 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5946 op = vec_oprnd0
5947 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5948 }
5949 else if (modifier != NONE && (j & 1))
5950 {
5951 if (modifier == WIDEN)
5952 {
5953 src = vec_oprnd1
5954 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5955 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5956 stmt, gsi);
5957 }
5958 else if (modifier == NARROW)
5959 {
5960 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5961 stmt, gsi);
5962 op = vec_oprnd0
5963 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5964 vec_oprnd0);
5965 }
5966 else
5967 gcc_unreachable ();
5968 }
5969 else
5970 {
5971 src = vec_oprnd1
5972 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5973 op = vec_oprnd0
5974 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5975 vec_oprnd0);
5976 }
5977
5978 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5979 {
5980 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
5981 TYPE_VECTOR_SUBPARTS (srctype)));
5982 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5983 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5984 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5985 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5986 src = var;
5987 }
5988
5989 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5990 {
5991 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
5992 TYPE_VECTOR_SUBPARTS (idxtype)));
5993 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5994 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5995 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5996 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5997 op = var;
5998 }
5999
6000 new_stmt
6001 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
6002
6003 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6004
6005 if (prev_stmt_info == NULL)
6006 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6007 else
6008 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6009 prev_stmt_info = vinfo_for_stmt (new_stmt);
6010 }
6011 return true;
6012 }
6013
6014 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
6015 if (grouped_store)
6016 {
6017 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6018 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6019 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6020
6021 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
6022
6023 /* FORNOW */
6024 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
6025
6026 /* We vectorize all the stmts of the interleaving group when we
6027 reach the last stmt in the group. */
6028 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
6029 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
6030 && !slp)
6031 {
6032 *vec_stmt = NULL;
6033 return true;
6034 }
6035
6036 if (slp)
6037 {
6038 grouped_store = false;
6039 /* VEC_NUM is the number of vect stmts to be created for this
6040 group. */
6041 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6042 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6043 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6044 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6045 op = gimple_assign_rhs1 (first_stmt);
6046 }
6047 else
6048 /* VEC_NUM is the number of vect stmts to be created for this
6049 group. */
6050 vec_num = group_size;
6051
6052 ref_type = get_group_alias_ptr_type (first_stmt);
6053 }
6054 else
6055 {
6056 first_stmt = stmt;
6057 first_dr = dr;
6058 group_size = vec_num = 1;
6059 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6060 }
6061
6062 if (dump_enabled_p ())
6063 dump_printf_loc (MSG_NOTE, vect_location,
6064 "transform store. ncopies = %d\n", ncopies);
6065
6066 if (memory_access_type == VMAT_ELEMENTWISE
6067 || memory_access_type == VMAT_STRIDED_SLP)
6068 {
6069 gimple_stmt_iterator incr_gsi;
6070 bool insert_after;
6071 gimple *incr;
6072 tree offvar;
6073 tree ivstep;
6074 tree running_off;
6075 gimple_seq stmts = NULL;
6076 tree stride_base, stride_step, alias_off;
6077 tree vec_oprnd;
6078 unsigned int g;
6079 /* Checked by get_load_store_type. */
6080 unsigned int const_nunits = nunits.to_constant ();
6081
6082 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6083
6084 stride_base
6085 = fold_build_pointer_plus
6086 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
6087 size_binop (PLUS_EXPR,
6088 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
6089 convert_to_ptrofftype (DR_INIT (first_dr))));
6090 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
6091
6092 /* For a store with loop-invariant (but other than power-of-2)
6093 stride (i.e. not a grouped access) like so:
6094
6095 for (i = 0; i < n; i += stride)
6096 array[i] = ...;
6097
6098 we generate a new induction variable and new stores from
6099 the components of the (vectorized) rhs:
6100
6101 for (j = 0; ; j += VF*stride)
6102 vectemp = ...;
6103 tmp1 = vectemp[0];
6104 array[j] = tmp1;
6105 tmp2 = vectemp[1];
6106 array[j + stride] = tmp2;
6107 ...
6108 */
6109
6110 unsigned nstores = const_nunits;
6111 unsigned lnel = 1;
6112 tree ltype = elem_type;
6113 tree lvectype = vectype;
6114 if (slp)
6115 {
6116 if (group_size < const_nunits
6117 && const_nunits % group_size == 0)
6118 {
6119 nstores = const_nunits / group_size;
6120 lnel = group_size;
6121 ltype = build_vector_type (elem_type, group_size);
6122 lvectype = vectype;
6123
6124 /* First check if vec_extract optab doesn't support extraction
6125 of vector elts directly. */
6126 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6127 machine_mode vmode;
6128 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6129 || !VECTOR_MODE_P (vmode)
6130 || (convert_optab_handler (vec_extract_optab,
6131 TYPE_MODE (vectype), vmode)
6132 == CODE_FOR_nothing))
6133 {
6134 /* Try to avoid emitting an extract of vector elements
6135 by performing the extracts using an integer type of the
6136 same size, extracting from a vector of those and then
6137 re-interpreting it as the original vector type if
6138 supported. */
6139 unsigned lsize
6140 = group_size * GET_MODE_BITSIZE (elmode);
6141 elmode = int_mode_for_size (lsize, 0).require ();
6142 unsigned int lnunits = const_nunits / group_size;
6143 /* If we can't construct such a vector fall back to
6144 element extracts from the original vector type and
6145 element size stores. */
6146 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6147 && VECTOR_MODE_P (vmode)
6148 && (convert_optab_handler (vec_extract_optab,
6149 vmode, elmode)
6150 != CODE_FOR_nothing))
6151 {
6152 nstores = lnunits;
6153 lnel = group_size;
6154 ltype = build_nonstandard_integer_type (lsize, 1);
6155 lvectype = build_vector_type (ltype, nstores);
6156 }
6157 /* Else fall back to vector extraction anyway.
6158 Fewer stores are more important than avoiding spilling
6159 of the vector we extract from. Compared to the
6160 construction case in vectorizable_load no store-forwarding
6161 issue exists here for reasonable archs. */
6162 }
6163 }
6164 else if (group_size >= const_nunits
6165 && group_size % const_nunits == 0)
6166 {
6167 nstores = 1;
6168 lnel = const_nunits;
6169 ltype = vectype;
6170 lvectype = vectype;
6171 }
6172 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6173 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6174 }
6175
6176 ivstep = stride_step;
6177 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6178 build_int_cst (TREE_TYPE (ivstep), vf));
6179
6180 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6181
6182 create_iv (stride_base, ivstep, NULL,
6183 loop, &incr_gsi, insert_after,
6184 &offvar, NULL);
6185 incr = gsi_stmt (incr_gsi);
6186 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6187
6188 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6189 if (stmts)
6190 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6191
6192 prev_stmt_info = NULL;
6193 alias_off = build_int_cst (ref_type, 0);
6194 next_stmt = first_stmt;
6195 for (g = 0; g < group_size; g++)
6196 {
6197 running_off = offvar;
6198 if (g)
6199 {
6200 tree size = TYPE_SIZE_UNIT (ltype);
6201 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6202 size);
6203 tree newoff = copy_ssa_name (running_off, NULL);
6204 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6205 running_off, pos);
6206 vect_finish_stmt_generation (stmt, incr, gsi);
6207 running_off = newoff;
6208 }
6209 unsigned int group_el = 0;
6210 unsigned HOST_WIDE_INT
6211 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6212 for (j = 0; j < ncopies; j++)
6213 {
6214 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6215 and first_stmt == stmt. */
6216 if (j == 0)
6217 {
6218 if (slp)
6219 {
6220 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6221 slp_node);
6222 vec_oprnd = vec_oprnds[0];
6223 }
6224 else
6225 {
6226 gcc_assert (gimple_assign_single_p (next_stmt));
6227 op = gimple_assign_rhs1 (next_stmt);
6228 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6229 }
6230 }
6231 else
6232 {
6233 if (slp)
6234 vec_oprnd = vec_oprnds[j];
6235 else
6236 {
6237 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6238 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6239 }
6240 }
6241 /* Pun the vector to extract from if necessary. */
6242 if (lvectype != vectype)
6243 {
6244 tree tem = make_ssa_name (lvectype);
6245 gimple *pun
6246 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6247 lvectype, vec_oprnd));
6248 vect_finish_stmt_generation (stmt, pun, gsi);
6249 vec_oprnd = tem;
6250 }
6251 for (i = 0; i < nstores; i++)
6252 {
6253 tree newref, newoff;
6254 gimple *incr, *assign;
6255 tree size = TYPE_SIZE (ltype);
6256 /* Extract the i'th component. */
6257 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6258 bitsize_int (i), size);
6259 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6260 size, pos);
6261
6262 elem = force_gimple_operand_gsi (gsi, elem, true,
6263 NULL_TREE, true,
6264 GSI_SAME_STMT);
6265
6266 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6267 group_el * elsz);
6268 newref = build2 (MEM_REF, ltype,
6269 running_off, this_off);
6270
6271 /* And store it to *running_off. */
6272 assign = gimple_build_assign (newref, elem);
6273 vect_finish_stmt_generation (stmt, assign, gsi);
6274
6275 group_el += lnel;
6276 if (! slp
6277 || group_el == group_size)
6278 {
6279 newoff = copy_ssa_name (running_off, NULL);
6280 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6281 running_off, stride_step);
6282 vect_finish_stmt_generation (stmt, incr, gsi);
6283
6284 running_off = newoff;
6285 group_el = 0;
6286 }
6287 if (g == group_size - 1
6288 && !slp)
6289 {
6290 if (j == 0 && i == 0)
6291 STMT_VINFO_VEC_STMT (stmt_info)
6292 = *vec_stmt = assign;
6293 else
6294 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6295 prev_stmt_info = vinfo_for_stmt (assign);
6296 }
6297 }
6298 }
6299 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6300 if (slp)
6301 break;
6302 }
6303
6304 vec_oprnds.release ();
6305 return true;
6306 }
6307
6308 auto_vec<tree> dr_chain (group_size);
6309 oprnds.create (group_size);
6310
6311 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6312 gcc_assert (alignment_support_scheme);
6313 /* Targets with store-lane instructions must not require explicit
6314 realignment. */
6315 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6316 || alignment_support_scheme == dr_aligned
6317 || alignment_support_scheme == dr_unaligned_supported);
6318
6319 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6320 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6321 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6322
6323 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6324 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6325 else
6326 aggr_type = vectype;
6327
6328 /* In case the vectorization factor (VF) is bigger than the number
6329 of elements that we can fit in a vectype (nunits), we have to generate
6330 more than one vector stmt - i.e - we need to "unroll" the
6331 vector stmt by a factor VF/nunits. For more details see documentation in
6332 vect_get_vec_def_for_copy_stmt. */
6333
6334 /* In case of interleaving (non-unit grouped access):
6335
6336 S1: &base + 2 = x2
6337 S2: &base = x0
6338 S3: &base + 1 = x1
6339 S4: &base + 3 = x3
6340
6341 We create vectorized stores starting from base address (the access of the
6342 first stmt in the chain (S2 in the above example), when the last store stmt
6343 of the chain (S4) is reached:
6344
6345 VS1: &base = vx2
6346 VS2: &base + vec_size*1 = vx0
6347 VS3: &base + vec_size*2 = vx1
6348 VS4: &base + vec_size*3 = vx3
6349
6350 Then permutation statements are generated:
6351
6352 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6353 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6354 ...
6355
6356 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6357 (the order of the data-refs in the output of vect_permute_store_chain
6358 corresponds to the order of scalar stmts in the interleaving chain - see
6359 the documentation of vect_permute_store_chain()).
6360
6361 In case of both multiple types and interleaving, above vector stores and
6362 permutation stmts are created for every copy. The result vector stmts are
6363 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6364 STMT_VINFO_RELATED_STMT for the next copies.
6365 */
6366
6367 prev_stmt_info = NULL;
6368 for (j = 0; j < ncopies; j++)
6369 {
6370
6371 if (j == 0)
6372 {
6373 if (slp)
6374 {
6375 /* Get vectorized arguments for SLP_NODE. */
6376 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6377 NULL, slp_node);
6378
6379 vec_oprnd = vec_oprnds[0];
6380 }
6381 else
6382 {
6383 /* For interleaved stores we collect vectorized defs for all the
6384 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6385 used as an input to vect_permute_store_chain(), and OPRNDS as
6386 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6387
6388 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6389 OPRNDS are of size 1. */
6390 next_stmt = first_stmt;
6391 for (i = 0; i < group_size; i++)
6392 {
6393 /* Since gaps are not supported for interleaved stores,
6394 GROUP_SIZE is the exact number of stmts in the chain.
6395 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6396 there is no interleaving, GROUP_SIZE is 1, and only one
6397 iteration of the loop will be executed. */
6398 gcc_assert (next_stmt
6399 && gimple_assign_single_p (next_stmt));
6400 op = gimple_assign_rhs1 (next_stmt);
6401
6402 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6403 dr_chain.quick_push (vec_oprnd);
6404 oprnds.quick_push (vec_oprnd);
6405 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6406 }
6407 }
6408
6409 /* We should have catched mismatched types earlier. */
6410 gcc_assert (useless_type_conversion_p (vectype,
6411 TREE_TYPE (vec_oprnd)));
6412 bool simd_lane_access_p
6413 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6414 if (simd_lane_access_p
6415 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6416 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6417 && integer_zerop (DR_OFFSET (first_dr))
6418 && integer_zerop (DR_INIT (first_dr))
6419 && alias_sets_conflict_p (get_alias_set (aggr_type),
6420 get_alias_set (TREE_TYPE (ref_type))))
6421 {
6422 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6423 dataref_offset = build_int_cst (ref_type, 0);
6424 inv_p = false;
6425 }
6426 else
6427 dataref_ptr
6428 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6429 simd_lane_access_p ? loop : NULL,
6430 offset, &dummy, gsi, &ptr_incr,
6431 simd_lane_access_p, &inv_p);
6432 gcc_assert (bb_vinfo || !inv_p);
6433 }
6434 else
6435 {
6436 /* For interleaved stores we created vectorized defs for all the
6437 defs stored in OPRNDS in the previous iteration (previous copy).
6438 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6439 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6440 next copy.
6441 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6442 OPRNDS are of size 1. */
6443 for (i = 0; i < group_size; i++)
6444 {
6445 op = oprnds[i];
6446 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6447 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6448 dr_chain[i] = vec_oprnd;
6449 oprnds[i] = vec_oprnd;
6450 }
6451 if (dataref_offset)
6452 dataref_offset
6453 = int_const_binop (PLUS_EXPR, dataref_offset,
6454 TYPE_SIZE_UNIT (aggr_type));
6455 else
6456 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6457 TYPE_SIZE_UNIT (aggr_type));
6458 }
6459
6460 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6461 {
6462 tree vec_array;
6463
6464 /* Combine all the vectors into an array. */
6465 vec_array = create_vector_array (vectype, vec_num);
6466 for (i = 0; i < vec_num; i++)
6467 {
6468 vec_oprnd = dr_chain[i];
6469 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6470 }
6471
6472 /* Emit:
6473 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6474 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6475 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6476 vec_array);
6477 gimple_call_set_lhs (call, data_ref);
6478 gimple_call_set_nothrow (call, true);
6479 new_stmt = call;
6480 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6481 }
6482 else
6483 {
6484 new_stmt = NULL;
6485 if (grouped_store)
6486 {
6487 if (j == 0)
6488 result_chain.create (group_size);
6489 /* Permute. */
6490 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6491 &result_chain);
6492 }
6493
6494 next_stmt = first_stmt;
6495 for (i = 0; i < vec_num; i++)
6496 {
6497 unsigned align, misalign;
6498
6499 if (i > 0)
6500 /* Bump the vector pointer. */
6501 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6502 stmt, NULL_TREE);
6503
6504 if (slp)
6505 vec_oprnd = vec_oprnds[i];
6506 else if (grouped_store)
6507 /* For grouped stores vectorized defs are interleaved in
6508 vect_permute_store_chain(). */
6509 vec_oprnd = result_chain[i];
6510
6511 data_ref = fold_build2 (MEM_REF, vectype,
6512 dataref_ptr,
6513 dataref_offset
6514 ? dataref_offset
6515 : build_int_cst (ref_type, 0));
6516 align = DR_TARGET_ALIGNMENT (first_dr);
6517 if (aligned_access_p (first_dr))
6518 misalign = 0;
6519 else if (DR_MISALIGNMENT (first_dr) == -1)
6520 {
6521 align = dr_alignment (vect_dr_behavior (first_dr));
6522 misalign = 0;
6523 TREE_TYPE (data_ref)
6524 = build_aligned_type (TREE_TYPE (data_ref),
6525 align * BITS_PER_UNIT);
6526 }
6527 else
6528 {
6529 TREE_TYPE (data_ref)
6530 = build_aligned_type (TREE_TYPE (data_ref),
6531 TYPE_ALIGN (elem_type));
6532 misalign = DR_MISALIGNMENT (first_dr);
6533 }
6534 if (dataref_offset == NULL_TREE
6535 && TREE_CODE (dataref_ptr) == SSA_NAME)
6536 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6537 misalign);
6538
6539 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6540 {
6541 tree perm_mask = perm_mask_for_reverse (vectype);
6542 tree perm_dest
6543 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6544 vectype);
6545 tree new_temp = make_ssa_name (perm_dest);
6546
6547 /* Generate the permute statement. */
6548 gimple *perm_stmt
6549 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6550 vec_oprnd, perm_mask);
6551 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6552
6553 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6554 vec_oprnd = new_temp;
6555 }
6556
6557 /* Arguments are ready. Create the new vector stmt. */
6558 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6559 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6560
6561 if (slp)
6562 continue;
6563
6564 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6565 if (!next_stmt)
6566 break;
6567 }
6568 }
6569 if (!slp)
6570 {
6571 if (j == 0)
6572 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6573 else
6574 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6575 prev_stmt_info = vinfo_for_stmt (new_stmt);
6576 }
6577 }
6578
6579 oprnds.release ();
6580 result_chain.release ();
6581 vec_oprnds.release ();
6582
6583 return true;
6584 }
6585
6586 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6587 VECTOR_CST mask. No checks are made that the target platform supports the
6588 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6589 vect_gen_perm_mask_checked. */
6590
6591 tree
6592 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
6593 {
6594 tree mask_type;
6595
6596 poly_uint64 nunits = sel.length ();
6597 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
6598
6599 mask_type = build_vector_type (ssizetype, nunits);
6600 return vec_perm_indices_to_tree (mask_type, sel);
6601 }
6602
6603 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6604 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6605
6606 tree
6607 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
6608 {
6609 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
6610 return vect_gen_perm_mask_any (vectype, sel);
6611 }
6612
6613 /* Given a vector variable X and Y, that was generated for the scalar
6614 STMT, generate instructions to permute the vector elements of X and Y
6615 using permutation mask MASK_VEC, insert them at *GSI and return the
6616 permuted vector variable. */
6617
6618 static tree
6619 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6620 gimple_stmt_iterator *gsi)
6621 {
6622 tree vectype = TREE_TYPE (x);
6623 tree perm_dest, data_ref;
6624 gimple *perm_stmt;
6625
6626 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6627 data_ref = make_ssa_name (perm_dest);
6628
6629 /* Generate the permute statement. */
6630 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6631 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6632
6633 return data_ref;
6634 }
6635
6636 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6637 inserting them on the loops preheader edge. Returns true if we
6638 were successful in doing so (and thus STMT can be moved then),
6639 otherwise returns false. */
6640
6641 static bool
6642 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6643 {
6644 ssa_op_iter i;
6645 tree op;
6646 bool any = false;
6647
6648 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6649 {
6650 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6651 if (!gimple_nop_p (def_stmt)
6652 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6653 {
6654 /* Make sure we don't need to recurse. While we could do
6655 so in simple cases when there are more complex use webs
6656 we don't have an easy way to preserve stmt order to fulfil
6657 dependencies within them. */
6658 tree op2;
6659 ssa_op_iter i2;
6660 if (gimple_code (def_stmt) == GIMPLE_PHI)
6661 return false;
6662 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6663 {
6664 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6665 if (!gimple_nop_p (def_stmt2)
6666 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6667 return false;
6668 }
6669 any = true;
6670 }
6671 }
6672
6673 if (!any)
6674 return true;
6675
6676 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6677 {
6678 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6679 if (!gimple_nop_p (def_stmt)
6680 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6681 {
6682 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6683 gsi_remove (&gsi, false);
6684 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6685 }
6686 }
6687
6688 return true;
6689 }
6690
6691 /* vectorizable_load.
6692
6693 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6694 can be vectorized.
6695 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6696 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6697 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6698
6699 static bool
6700 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6701 slp_tree slp_node, slp_instance slp_node_instance)
6702 {
6703 tree scalar_dest;
6704 tree vec_dest = NULL;
6705 tree data_ref = NULL;
6706 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6707 stmt_vec_info prev_stmt_info;
6708 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6709 struct loop *loop = NULL;
6710 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6711 bool nested_in_vect_loop = false;
6712 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6713 tree elem_type;
6714 tree new_temp;
6715 machine_mode mode;
6716 gimple *new_stmt = NULL;
6717 tree dummy;
6718 enum dr_alignment_support alignment_support_scheme;
6719 tree dataref_ptr = NULL_TREE;
6720 tree dataref_offset = NULL_TREE;
6721 gimple *ptr_incr = NULL;
6722 int ncopies;
6723 int i, j;
6724 unsigned int group_size;
6725 poly_uint64 group_gap_adj;
6726 tree msq = NULL_TREE, lsq;
6727 tree offset = NULL_TREE;
6728 tree byte_offset = NULL_TREE;
6729 tree realignment_token = NULL_TREE;
6730 gphi *phi = NULL;
6731 vec<tree> dr_chain = vNULL;
6732 bool grouped_load = false;
6733 gimple *first_stmt;
6734 gimple *first_stmt_for_drptr = NULL;
6735 bool inv_p;
6736 bool compute_in_loop = false;
6737 struct loop *at_loop;
6738 int vec_num;
6739 bool slp = (slp_node != NULL);
6740 bool slp_perm = false;
6741 enum tree_code code;
6742 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6743 poly_uint64 vf;
6744 tree aggr_type;
6745 gather_scatter_info gs_info;
6746 vec_info *vinfo = stmt_info->vinfo;
6747 tree ref_type;
6748
6749 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6750 return false;
6751
6752 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6753 && ! vec_stmt)
6754 return false;
6755
6756 /* Is vectorizable load? */
6757 if (!is_gimple_assign (stmt))
6758 return false;
6759
6760 scalar_dest = gimple_assign_lhs (stmt);
6761 if (TREE_CODE (scalar_dest) != SSA_NAME)
6762 return false;
6763
6764 code = gimple_assign_rhs_code (stmt);
6765 if (code != ARRAY_REF
6766 && code != BIT_FIELD_REF
6767 && code != INDIRECT_REF
6768 && code != COMPONENT_REF
6769 && code != IMAGPART_EXPR
6770 && code != REALPART_EXPR
6771 && code != MEM_REF
6772 && TREE_CODE_CLASS (code) != tcc_declaration)
6773 return false;
6774
6775 if (!STMT_VINFO_DATA_REF (stmt_info))
6776 return false;
6777
6778 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6779 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6780
6781 if (loop_vinfo)
6782 {
6783 loop = LOOP_VINFO_LOOP (loop_vinfo);
6784 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6785 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6786 }
6787 else
6788 vf = 1;
6789
6790 /* Multiple types in SLP are handled by creating the appropriate number of
6791 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6792 case of SLP. */
6793 if (slp)
6794 ncopies = 1;
6795 else
6796 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6797
6798 gcc_assert (ncopies >= 1);
6799
6800 /* FORNOW. This restriction should be relaxed. */
6801 if (nested_in_vect_loop && ncopies > 1)
6802 {
6803 if (dump_enabled_p ())
6804 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6805 "multiple types in nested loop.\n");
6806 return false;
6807 }
6808
6809 /* Invalidate assumptions made by dependence analysis when vectorization
6810 on the unrolled body effectively re-orders stmts. */
6811 if (ncopies > 1
6812 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6813 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6814 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6815 {
6816 if (dump_enabled_p ())
6817 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6818 "cannot perform implicit CSE when unrolling "
6819 "with negative dependence distance\n");
6820 return false;
6821 }
6822
6823 elem_type = TREE_TYPE (vectype);
6824 mode = TYPE_MODE (vectype);
6825
6826 /* FORNOW. In some cases can vectorize even if data-type not supported
6827 (e.g. - data copies). */
6828 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6829 {
6830 if (dump_enabled_p ())
6831 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6832 "Aligned load, but unsupported type.\n");
6833 return false;
6834 }
6835
6836 /* Check if the load is a part of an interleaving chain. */
6837 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6838 {
6839 grouped_load = true;
6840 /* FORNOW */
6841 gcc_assert (!nested_in_vect_loop);
6842 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6843
6844 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6845 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6846
6847 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6848 slp_perm = true;
6849
6850 /* Invalidate assumptions made by dependence analysis when vectorization
6851 on the unrolled body effectively re-orders stmts. */
6852 if (!PURE_SLP_STMT (stmt_info)
6853 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6854 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6855 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6856 {
6857 if (dump_enabled_p ())
6858 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6859 "cannot perform implicit CSE when performing "
6860 "group loads with negative dependence distance\n");
6861 return false;
6862 }
6863
6864 /* Similarly when the stmt is a load that is both part of a SLP
6865 instance and a loop vectorized stmt via the same-dr mechanism
6866 we have to give up. */
6867 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6868 && (STMT_SLP_TYPE (stmt_info)
6869 != STMT_SLP_TYPE (vinfo_for_stmt
6870 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6871 {
6872 if (dump_enabled_p ())
6873 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6874 "conflicting SLP types for CSEd load\n");
6875 return false;
6876 }
6877 }
6878
6879 vect_memory_access_type memory_access_type;
6880 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6881 &memory_access_type, &gs_info))
6882 return false;
6883
6884 if (!vec_stmt) /* transformation not required. */
6885 {
6886 if (!slp)
6887 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6888 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6889 /* The SLP costs are calculated during SLP analysis. */
6890 if (!PURE_SLP_STMT (stmt_info))
6891 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6892 NULL, NULL, NULL);
6893 return true;
6894 }
6895
6896 if (!slp)
6897 gcc_assert (memory_access_type
6898 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6899
6900 if (dump_enabled_p ())
6901 dump_printf_loc (MSG_NOTE, vect_location,
6902 "transform load. ncopies = %d\n", ncopies);
6903
6904 /* Transform. */
6905
6906 ensure_base_align (dr);
6907
6908 if (memory_access_type == VMAT_GATHER_SCATTER)
6909 {
6910 tree vec_oprnd0 = NULL_TREE, op;
6911 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6912 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6913 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6914 edge pe = loop_preheader_edge (loop);
6915 gimple_seq seq;
6916 basic_block new_bb;
6917 enum { NARROW, NONE, WIDEN } modifier;
6918 poly_uint64 gather_off_nunits
6919 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6920
6921 if (known_eq (nunits, gather_off_nunits))
6922 modifier = NONE;
6923 else if (known_eq (nunits * 2, gather_off_nunits))
6924 {
6925 modifier = WIDEN;
6926
6927 /* Currently widening gathers are only supported for
6928 fixed-length vectors. */
6929 int count = gather_off_nunits.to_constant ();
6930 vec_perm_builder sel (count, count, 1);
6931 for (i = 0; i < count; ++i)
6932 sel.quick_push (i | (count / 2));
6933
6934 vec_perm_indices indices (sel, 1, count);
6935 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6936 indices);
6937 }
6938 else if (known_eq (nunits, gather_off_nunits * 2))
6939 {
6940 modifier = NARROW;
6941
6942 /* Currently narrowing gathers are only supported for
6943 fixed-length vectors. */
6944 int count = nunits.to_constant ();
6945 vec_perm_builder sel (count, count, 1);
6946 for (i = 0; i < count; ++i)
6947 sel.quick_push (i < count / 2 ? i : i + count / 2);
6948
6949 vec_perm_indices indices (sel, 2, count);
6950 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6951 ncopies *= 2;
6952 }
6953 else
6954 gcc_unreachable ();
6955
6956 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6957 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6958 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6959 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6960 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6961 scaletype = TREE_VALUE (arglist);
6962 gcc_checking_assert (types_compatible_p (srctype, rettype));
6963
6964 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6965
6966 ptr = fold_convert (ptrtype, gs_info.base);
6967 if (!is_gimple_min_invariant (ptr))
6968 {
6969 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6970 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6971 gcc_assert (!new_bb);
6972 }
6973
6974 /* Currently we support only unconditional gather loads,
6975 so mask should be all ones. */
6976 if (TREE_CODE (masktype) == INTEGER_TYPE)
6977 mask = build_int_cst (masktype, -1);
6978 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6979 {
6980 mask = build_int_cst (TREE_TYPE (masktype), -1);
6981 mask = build_vector_from_val (masktype, mask);
6982 mask = vect_init_vector (stmt, mask, masktype, NULL);
6983 }
6984 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6985 {
6986 REAL_VALUE_TYPE r;
6987 long tmp[6];
6988 for (j = 0; j < 6; ++j)
6989 tmp[j] = -1;
6990 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6991 mask = build_real (TREE_TYPE (masktype), r);
6992 mask = build_vector_from_val (masktype, mask);
6993 mask = vect_init_vector (stmt, mask, masktype, NULL);
6994 }
6995 else
6996 gcc_unreachable ();
6997
6998 scale = build_int_cst (scaletype, gs_info.scale);
6999
7000 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
7001 merge = build_int_cst (TREE_TYPE (rettype), 0);
7002 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
7003 {
7004 REAL_VALUE_TYPE r;
7005 long tmp[6];
7006 for (j = 0; j < 6; ++j)
7007 tmp[j] = 0;
7008 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
7009 merge = build_real (TREE_TYPE (rettype), r);
7010 }
7011 else
7012 gcc_unreachable ();
7013 merge = build_vector_from_val (rettype, merge);
7014 merge = vect_init_vector (stmt, merge, rettype, NULL);
7015
7016 prev_stmt_info = NULL;
7017 for (j = 0; j < ncopies; ++j)
7018 {
7019 if (modifier == WIDEN && (j & 1))
7020 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
7021 perm_mask, stmt, gsi);
7022 else if (j == 0)
7023 op = vec_oprnd0
7024 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
7025 else
7026 op = vec_oprnd0
7027 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
7028
7029 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
7030 {
7031 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
7032 TYPE_VECTOR_SUBPARTS (idxtype)));
7033 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
7034 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
7035 new_stmt
7036 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7037 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7038 op = var;
7039 }
7040
7041 new_stmt
7042 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
7043
7044 if (!useless_type_conversion_p (vectype, rettype))
7045 {
7046 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
7047 TYPE_VECTOR_SUBPARTS (rettype)));
7048 op = vect_get_new_ssa_name (rettype, vect_simple_var);
7049 gimple_call_set_lhs (new_stmt, op);
7050 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7051 var = make_ssa_name (vec_dest);
7052 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
7053 new_stmt
7054 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7055 }
7056 else
7057 {
7058 var = make_ssa_name (vec_dest, new_stmt);
7059 gimple_call_set_lhs (new_stmt, var);
7060 }
7061
7062 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7063
7064 if (modifier == NARROW)
7065 {
7066 if ((j & 1) == 0)
7067 {
7068 prev_res = var;
7069 continue;
7070 }
7071 var = permute_vec_elements (prev_res, var,
7072 perm_mask, stmt, gsi);
7073 new_stmt = SSA_NAME_DEF_STMT (var);
7074 }
7075
7076 if (prev_stmt_info == NULL)
7077 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7078 else
7079 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7080 prev_stmt_info = vinfo_for_stmt (new_stmt);
7081 }
7082 return true;
7083 }
7084
7085 if (memory_access_type == VMAT_ELEMENTWISE
7086 || memory_access_type == VMAT_STRIDED_SLP)
7087 {
7088 gimple_stmt_iterator incr_gsi;
7089 bool insert_after;
7090 gimple *incr;
7091 tree offvar;
7092 tree ivstep;
7093 tree running_off;
7094 vec<constructor_elt, va_gc> *v = NULL;
7095 gimple_seq stmts = NULL;
7096 tree stride_base, stride_step, alias_off;
7097 /* Checked by get_load_store_type. */
7098 unsigned int const_nunits = nunits.to_constant ();
7099
7100 gcc_assert (!nested_in_vect_loop);
7101
7102 if (slp && grouped_load)
7103 {
7104 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7105 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7106 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7107 ref_type = get_group_alias_ptr_type (first_stmt);
7108 }
7109 else
7110 {
7111 first_stmt = stmt;
7112 first_dr = dr;
7113 group_size = 1;
7114 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7115 }
7116
7117 stride_base
7118 = fold_build_pointer_plus
7119 (DR_BASE_ADDRESS (first_dr),
7120 size_binop (PLUS_EXPR,
7121 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7122 convert_to_ptrofftype (DR_INIT (first_dr))));
7123 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7124
7125 /* For a load with loop-invariant (but other than power-of-2)
7126 stride (i.e. not a grouped access) like so:
7127
7128 for (i = 0; i < n; i += stride)
7129 ... = array[i];
7130
7131 we generate a new induction variable and new accesses to
7132 form a new vector (or vectors, depending on ncopies):
7133
7134 for (j = 0; ; j += VF*stride)
7135 tmp1 = array[j];
7136 tmp2 = array[j + stride];
7137 ...
7138 vectemp = {tmp1, tmp2, ...}
7139 */
7140
7141 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7142 build_int_cst (TREE_TYPE (stride_step), vf));
7143
7144 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7145
7146 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7147 loop, &incr_gsi, insert_after,
7148 &offvar, NULL);
7149 incr = gsi_stmt (incr_gsi);
7150 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7151
7152 stride_step = force_gimple_operand (unshare_expr (stride_step),
7153 &stmts, true, NULL_TREE);
7154 if (stmts)
7155 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7156
7157 prev_stmt_info = NULL;
7158 running_off = offvar;
7159 alias_off = build_int_cst (ref_type, 0);
7160 int nloads = const_nunits;
7161 int lnel = 1;
7162 tree ltype = TREE_TYPE (vectype);
7163 tree lvectype = vectype;
7164 auto_vec<tree> dr_chain;
7165 if (memory_access_type == VMAT_STRIDED_SLP)
7166 {
7167 if (group_size < const_nunits)
7168 {
7169 /* First check if vec_init optab supports construction from
7170 vector elts directly. */
7171 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7172 machine_mode vmode;
7173 if (mode_for_vector (elmode, group_size).exists (&vmode)
7174 && VECTOR_MODE_P (vmode)
7175 && (convert_optab_handler (vec_init_optab,
7176 TYPE_MODE (vectype), vmode)
7177 != CODE_FOR_nothing))
7178 {
7179 nloads = const_nunits / group_size;
7180 lnel = group_size;
7181 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7182 }
7183 else
7184 {
7185 /* Otherwise avoid emitting a constructor of vector elements
7186 by performing the loads using an integer type of the same
7187 size, constructing a vector of those and then
7188 re-interpreting it as the original vector type.
7189 This avoids a huge runtime penalty due to the general
7190 inability to perform store forwarding from smaller stores
7191 to a larger load. */
7192 unsigned lsize
7193 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7194 elmode = int_mode_for_size (lsize, 0).require ();
7195 unsigned int lnunits = const_nunits / group_size;
7196 /* If we can't construct such a vector fall back to
7197 element loads of the original vector type. */
7198 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7199 && VECTOR_MODE_P (vmode)
7200 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7201 != CODE_FOR_nothing))
7202 {
7203 nloads = lnunits;
7204 lnel = group_size;
7205 ltype = build_nonstandard_integer_type (lsize, 1);
7206 lvectype = build_vector_type (ltype, nloads);
7207 }
7208 }
7209 }
7210 else
7211 {
7212 nloads = 1;
7213 lnel = const_nunits;
7214 ltype = vectype;
7215 }
7216 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7217 }
7218 if (slp)
7219 {
7220 /* For SLP permutation support we need to load the whole group,
7221 not only the number of vector stmts the permutation result
7222 fits in. */
7223 if (slp_perm)
7224 {
7225 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7226 variable VF. */
7227 unsigned int const_vf = vf.to_constant ();
7228 ncopies = CEIL (group_size * const_vf, const_nunits);
7229 dr_chain.create (ncopies);
7230 }
7231 else
7232 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7233 }
7234 unsigned int group_el = 0;
7235 unsigned HOST_WIDE_INT
7236 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7237 for (j = 0; j < ncopies; j++)
7238 {
7239 if (nloads > 1)
7240 vec_alloc (v, nloads);
7241 for (i = 0; i < nloads; i++)
7242 {
7243 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7244 group_el * elsz);
7245 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7246 build2 (MEM_REF, ltype,
7247 running_off, this_off));
7248 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7249 if (nloads > 1)
7250 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7251 gimple_assign_lhs (new_stmt));
7252
7253 group_el += lnel;
7254 if (! slp
7255 || group_el == group_size)
7256 {
7257 tree newoff = copy_ssa_name (running_off);
7258 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7259 running_off, stride_step);
7260 vect_finish_stmt_generation (stmt, incr, gsi);
7261
7262 running_off = newoff;
7263 group_el = 0;
7264 }
7265 }
7266 if (nloads > 1)
7267 {
7268 tree vec_inv = build_constructor (lvectype, v);
7269 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7270 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7271 if (lvectype != vectype)
7272 {
7273 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7274 VIEW_CONVERT_EXPR,
7275 build1 (VIEW_CONVERT_EXPR,
7276 vectype, new_temp));
7277 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7278 }
7279 }
7280
7281 if (slp)
7282 {
7283 if (slp_perm)
7284 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7285 else
7286 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7287 }
7288 else
7289 {
7290 if (j == 0)
7291 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7292 else
7293 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7294 prev_stmt_info = vinfo_for_stmt (new_stmt);
7295 }
7296 }
7297 if (slp_perm)
7298 {
7299 unsigned n_perms;
7300 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7301 slp_node_instance, false, &n_perms);
7302 }
7303 return true;
7304 }
7305
7306 if (grouped_load)
7307 {
7308 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7309 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7310 /* For SLP vectorization we directly vectorize a subchain
7311 without permutation. */
7312 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7313 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7314 /* For BB vectorization always use the first stmt to base
7315 the data ref pointer on. */
7316 if (bb_vinfo)
7317 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7318
7319 /* Check if the chain of loads is already vectorized. */
7320 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7321 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7322 ??? But we can only do so if there is exactly one
7323 as we have no way to get at the rest. Leave the CSE
7324 opportunity alone.
7325 ??? With the group load eventually participating
7326 in multiple different permutations (having multiple
7327 slp nodes which refer to the same group) the CSE
7328 is even wrong code. See PR56270. */
7329 && !slp)
7330 {
7331 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7332 return true;
7333 }
7334 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7335 group_gap_adj = 0;
7336
7337 /* VEC_NUM is the number of vect stmts to be created for this group. */
7338 if (slp)
7339 {
7340 grouped_load = false;
7341 /* For SLP permutation support we need to load the whole group,
7342 not only the number of vector stmts the permutation result
7343 fits in. */
7344 if (slp_perm)
7345 {
7346 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7347 variable VF. */
7348 unsigned int const_vf = vf.to_constant ();
7349 unsigned int const_nunits = nunits.to_constant ();
7350 vec_num = CEIL (group_size * const_vf, const_nunits);
7351 group_gap_adj = vf * group_size - nunits * vec_num;
7352 }
7353 else
7354 {
7355 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7356 group_gap_adj
7357 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7358 }
7359 }
7360 else
7361 vec_num = group_size;
7362
7363 ref_type = get_group_alias_ptr_type (first_stmt);
7364 }
7365 else
7366 {
7367 first_stmt = stmt;
7368 first_dr = dr;
7369 group_size = vec_num = 1;
7370 group_gap_adj = 0;
7371 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7372 }
7373
7374 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7375 gcc_assert (alignment_support_scheme);
7376 /* Targets with load-lane instructions must not require explicit
7377 realignment. */
7378 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7379 || alignment_support_scheme == dr_aligned
7380 || alignment_support_scheme == dr_unaligned_supported);
7381
7382 /* In case the vectorization factor (VF) is bigger than the number
7383 of elements that we can fit in a vectype (nunits), we have to generate
7384 more than one vector stmt - i.e - we need to "unroll" the
7385 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7386 from one copy of the vector stmt to the next, in the field
7387 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7388 stages to find the correct vector defs to be used when vectorizing
7389 stmts that use the defs of the current stmt. The example below
7390 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7391 need to create 4 vectorized stmts):
7392
7393 before vectorization:
7394 RELATED_STMT VEC_STMT
7395 S1: x = memref - -
7396 S2: z = x + 1 - -
7397
7398 step 1: vectorize stmt S1:
7399 We first create the vector stmt VS1_0, and, as usual, record a
7400 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7401 Next, we create the vector stmt VS1_1, and record a pointer to
7402 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7403 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7404 stmts and pointers:
7405 RELATED_STMT VEC_STMT
7406 VS1_0: vx0 = memref0 VS1_1 -
7407 VS1_1: vx1 = memref1 VS1_2 -
7408 VS1_2: vx2 = memref2 VS1_3 -
7409 VS1_3: vx3 = memref3 - -
7410 S1: x = load - VS1_0
7411 S2: z = x + 1 - -
7412
7413 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7414 information we recorded in RELATED_STMT field is used to vectorize
7415 stmt S2. */
7416
7417 /* In case of interleaving (non-unit grouped access):
7418
7419 S1: x2 = &base + 2
7420 S2: x0 = &base
7421 S3: x1 = &base + 1
7422 S4: x3 = &base + 3
7423
7424 Vectorized loads are created in the order of memory accesses
7425 starting from the access of the first stmt of the chain:
7426
7427 VS1: vx0 = &base
7428 VS2: vx1 = &base + vec_size*1
7429 VS3: vx3 = &base + vec_size*2
7430 VS4: vx4 = &base + vec_size*3
7431
7432 Then permutation statements are generated:
7433
7434 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7435 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7436 ...
7437
7438 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7439 (the order of the data-refs in the output of vect_permute_load_chain
7440 corresponds to the order of scalar stmts in the interleaving chain - see
7441 the documentation of vect_permute_load_chain()).
7442 The generation of permutation stmts and recording them in
7443 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7444
7445 In case of both multiple types and interleaving, the vector loads and
7446 permutation stmts above are created for every copy. The result vector
7447 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7448 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7449
7450 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7451 on a target that supports unaligned accesses (dr_unaligned_supported)
7452 we generate the following code:
7453 p = initial_addr;
7454 indx = 0;
7455 loop {
7456 p = p + indx * vectype_size;
7457 vec_dest = *(p);
7458 indx = indx + 1;
7459 }
7460
7461 Otherwise, the data reference is potentially unaligned on a target that
7462 does not support unaligned accesses (dr_explicit_realign_optimized) -
7463 then generate the following code, in which the data in each iteration is
7464 obtained by two vector loads, one from the previous iteration, and one
7465 from the current iteration:
7466 p1 = initial_addr;
7467 msq_init = *(floor(p1))
7468 p2 = initial_addr + VS - 1;
7469 realignment_token = call target_builtin;
7470 indx = 0;
7471 loop {
7472 p2 = p2 + indx * vectype_size
7473 lsq = *(floor(p2))
7474 vec_dest = realign_load (msq, lsq, realignment_token)
7475 indx = indx + 1;
7476 msq = lsq;
7477 } */
7478
7479 /* If the misalignment remains the same throughout the execution of the
7480 loop, we can create the init_addr and permutation mask at the loop
7481 preheader. Otherwise, it needs to be created inside the loop.
7482 This can only occur when vectorizing memory accesses in the inner-loop
7483 nested within an outer-loop that is being vectorized. */
7484
7485 if (nested_in_vect_loop
7486 && !multiple_p (DR_STEP_ALIGNMENT (dr),
7487 GET_MODE_SIZE (TYPE_MODE (vectype))))
7488 {
7489 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7490 compute_in_loop = true;
7491 }
7492
7493 if ((alignment_support_scheme == dr_explicit_realign_optimized
7494 || alignment_support_scheme == dr_explicit_realign)
7495 && !compute_in_loop)
7496 {
7497 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7498 alignment_support_scheme, NULL_TREE,
7499 &at_loop);
7500 if (alignment_support_scheme == dr_explicit_realign_optimized)
7501 {
7502 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7503 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7504 size_one_node);
7505 }
7506 }
7507 else
7508 at_loop = loop;
7509
7510 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7511 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7512
7513 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7514 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7515 else
7516 aggr_type = vectype;
7517
7518 prev_stmt_info = NULL;
7519 poly_uint64 group_elt = 0;
7520 for (j = 0; j < ncopies; j++)
7521 {
7522 /* 1. Create the vector or array pointer update chain. */
7523 if (j == 0)
7524 {
7525 bool simd_lane_access_p
7526 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7527 if (simd_lane_access_p
7528 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7529 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7530 && integer_zerop (DR_OFFSET (first_dr))
7531 && integer_zerop (DR_INIT (first_dr))
7532 && alias_sets_conflict_p (get_alias_set (aggr_type),
7533 get_alias_set (TREE_TYPE (ref_type)))
7534 && (alignment_support_scheme == dr_aligned
7535 || alignment_support_scheme == dr_unaligned_supported))
7536 {
7537 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7538 dataref_offset = build_int_cst (ref_type, 0);
7539 inv_p = false;
7540 }
7541 else if (first_stmt_for_drptr
7542 && first_stmt != first_stmt_for_drptr)
7543 {
7544 dataref_ptr
7545 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7546 at_loop, offset, &dummy, gsi,
7547 &ptr_incr, simd_lane_access_p,
7548 &inv_p, byte_offset);
7549 /* Adjust the pointer by the difference to first_stmt. */
7550 data_reference_p ptrdr
7551 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7552 tree diff = fold_convert (sizetype,
7553 size_binop (MINUS_EXPR,
7554 DR_INIT (first_dr),
7555 DR_INIT (ptrdr)));
7556 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7557 stmt, diff);
7558 }
7559 else
7560 dataref_ptr
7561 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7562 offset, &dummy, gsi, &ptr_incr,
7563 simd_lane_access_p, &inv_p,
7564 byte_offset);
7565 }
7566 else if (dataref_offset)
7567 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7568 TYPE_SIZE_UNIT (aggr_type));
7569 else
7570 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7571 TYPE_SIZE_UNIT (aggr_type));
7572
7573 if (grouped_load || slp_perm)
7574 dr_chain.create (vec_num);
7575
7576 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7577 {
7578 tree vec_array;
7579
7580 vec_array = create_vector_array (vectype, vec_num);
7581
7582 /* Emit:
7583 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7584 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7585 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7586 data_ref);
7587 gimple_call_set_lhs (call, vec_array);
7588 gimple_call_set_nothrow (call, true);
7589 new_stmt = call;
7590 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7591
7592 /* Extract each vector into an SSA_NAME. */
7593 for (i = 0; i < vec_num; i++)
7594 {
7595 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7596 vec_array, i);
7597 dr_chain.quick_push (new_temp);
7598 }
7599
7600 /* Record the mapping between SSA_NAMEs and statements. */
7601 vect_record_grouped_load_vectors (stmt, dr_chain);
7602 }
7603 else
7604 {
7605 for (i = 0; i < vec_num; i++)
7606 {
7607 if (i > 0)
7608 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7609 stmt, NULL_TREE);
7610
7611 /* 2. Create the vector-load in the loop. */
7612 switch (alignment_support_scheme)
7613 {
7614 case dr_aligned:
7615 case dr_unaligned_supported:
7616 {
7617 unsigned int align, misalign;
7618
7619 data_ref
7620 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7621 dataref_offset
7622 ? dataref_offset
7623 : build_int_cst (ref_type, 0));
7624 align = DR_TARGET_ALIGNMENT (dr);
7625 if (alignment_support_scheme == dr_aligned)
7626 {
7627 gcc_assert (aligned_access_p (first_dr));
7628 misalign = 0;
7629 }
7630 else if (DR_MISALIGNMENT (first_dr) == -1)
7631 {
7632 align = dr_alignment (vect_dr_behavior (first_dr));
7633 misalign = 0;
7634 TREE_TYPE (data_ref)
7635 = build_aligned_type (TREE_TYPE (data_ref),
7636 align * BITS_PER_UNIT);
7637 }
7638 else
7639 {
7640 TREE_TYPE (data_ref)
7641 = build_aligned_type (TREE_TYPE (data_ref),
7642 TYPE_ALIGN (elem_type));
7643 misalign = DR_MISALIGNMENT (first_dr);
7644 }
7645 if (dataref_offset == NULL_TREE
7646 && TREE_CODE (dataref_ptr) == SSA_NAME)
7647 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7648 align, misalign);
7649 break;
7650 }
7651 case dr_explicit_realign:
7652 {
7653 tree ptr, bump;
7654
7655 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7656
7657 if (compute_in_loop)
7658 msq = vect_setup_realignment (first_stmt, gsi,
7659 &realignment_token,
7660 dr_explicit_realign,
7661 dataref_ptr, NULL);
7662
7663 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7664 ptr = copy_ssa_name (dataref_ptr);
7665 else
7666 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7667 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7668 new_stmt = gimple_build_assign
7669 (ptr, BIT_AND_EXPR, dataref_ptr,
7670 build_int_cst
7671 (TREE_TYPE (dataref_ptr),
7672 -(HOST_WIDE_INT) align));
7673 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7674 data_ref
7675 = build2 (MEM_REF, vectype, ptr,
7676 build_int_cst (ref_type, 0));
7677 vec_dest = vect_create_destination_var (scalar_dest,
7678 vectype);
7679 new_stmt = gimple_build_assign (vec_dest, data_ref);
7680 new_temp = make_ssa_name (vec_dest, new_stmt);
7681 gimple_assign_set_lhs (new_stmt, new_temp);
7682 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7683 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7684 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7685 msq = new_temp;
7686
7687 bump = size_binop (MULT_EXPR, vs,
7688 TYPE_SIZE_UNIT (elem_type));
7689 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7690 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7691 new_stmt = gimple_build_assign
7692 (NULL_TREE, BIT_AND_EXPR, ptr,
7693 build_int_cst
7694 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
7695 ptr = copy_ssa_name (ptr, new_stmt);
7696 gimple_assign_set_lhs (new_stmt, ptr);
7697 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7698 data_ref
7699 = build2 (MEM_REF, vectype, ptr,
7700 build_int_cst (ref_type, 0));
7701 break;
7702 }
7703 case dr_explicit_realign_optimized:
7704 {
7705 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7706 new_temp = copy_ssa_name (dataref_ptr);
7707 else
7708 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7709 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7710 new_stmt = gimple_build_assign
7711 (new_temp, BIT_AND_EXPR, dataref_ptr,
7712 build_int_cst (TREE_TYPE (dataref_ptr),
7713 -(HOST_WIDE_INT) align));
7714 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7715 data_ref
7716 = build2 (MEM_REF, vectype, new_temp,
7717 build_int_cst (ref_type, 0));
7718 break;
7719 }
7720 default:
7721 gcc_unreachable ();
7722 }
7723 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7724 new_stmt = gimple_build_assign (vec_dest, data_ref);
7725 new_temp = make_ssa_name (vec_dest, new_stmt);
7726 gimple_assign_set_lhs (new_stmt, new_temp);
7727 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7728
7729 /* 3. Handle explicit realignment if necessary/supported.
7730 Create in loop:
7731 vec_dest = realign_load (msq, lsq, realignment_token) */
7732 if (alignment_support_scheme == dr_explicit_realign_optimized
7733 || alignment_support_scheme == dr_explicit_realign)
7734 {
7735 lsq = gimple_assign_lhs (new_stmt);
7736 if (!realignment_token)
7737 realignment_token = dataref_ptr;
7738 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7739 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7740 msq, lsq, realignment_token);
7741 new_temp = make_ssa_name (vec_dest, new_stmt);
7742 gimple_assign_set_lhs (new_stmt, new_temp);
7743 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7744
7745 if (alignment_support_scheme == dr_explicit_realign_optimized)
7746 {
7747 gcc_assert (phi);
7748 if (i == vec_num - 1 && j == ncopies - 1)
7749 add_phi_arg (phi, lsq,
7750 loop_latch_edge (containing_loop),
7751 UNKNOWN_LOCATION);
7752 msq = lsq;
7753 }
7754 }
7755
7756 /* 4. Handle invariant-load. */
7757 if (inv_p && !bb_vinfo)
7758 {
7759 gcc_assert (!grouped_load);
7760 /* If we have versioned for aliasing or the loop doesn't
7761 have any data dependencies that would preclude this,
7762 then we are sure this is a loop invariant load and
7763 thus we can insert it on the preheader edge. */
7764 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7765 && !nested_in_vect_loop
7766 && hoist_defs_of_uses (stmt, loop))
7767 {
7768 if (dump_enabled_p ())
7769 {
7770 dump_printf_loc (MSG_NOTE, vect_location,
7771 "hoisting out of the vectorized "
7772 "loop: ");
7773 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7774 }
7775 tree tem = copy_ssa_name (scalar_dest);
7776 gsi_insert_on_edge_immediate
7777 (loop_preheader_edge (loop),
7778 gimple_build_assign (tem,
7779 unshare_expr
7780 (gimple_assign_rhs1 (stmt))));
7781 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7782 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7783 set_vinfo_for_stmt (new_stmt,
7784 new_stmt_vec_info (new_stmt, vinfo));
7785 }
7786 else
7787 {
7788 gimple_stmt_iterator gsi2 = *gsi;
7789 gsi_next (&gsi2);
7790 new_temp = vect_init_vector (stmt, scalar_dest,
7791 vectype, &gsi2);
7792 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7793 }
7794 }
7795
7796 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7797 {
7798 tree perm_mask = perm_mask_for_reverse (vectype);
7799 new_temp = permute_vec_elements (new_temp, new_temp,
7800 perm_mask, stmt, gsi);
7801 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7802 }
7803
7804 /* Collect vector loads and later create their permutation in
7805 vect_transform_grouped_load (). */
7806 if (grouped_load || slp_perm)
7807 dr_chain.quick_push (new_temp);
7808
7809 /* Store vector loads in the corresponding SLP_NODE. */
7810 if (slp && !slp_perm)
7811 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7812
7813 /* With SLP permutation we load the gaps as well, without
7814 we need to skip the gaps after we manage to fully load
7815 all elements. group_gap_adj is GROUP_SIZE here. */
7816 group_elt += nunits;
7817 if (maybe_ne (group_gap_adj, 0U)
7818 && !slp_perm
7819 && known_eq (group_elt, group_size - group_gap_adj))
7820 {
7821 poly_wide_int bump_val
7822 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7823 * group_gap_adj);
7824 tree bump = wide_int_to_tree (sizetype, bump_val);
7825 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7826 stmt, bump);
7827 group_elt = 0;
7828 }
7829 }
7830 /* Bump the vector pointer to account for a gap or for excess
7831 elements loaded for a permuted SLP load. */
7832 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
7833 {
7834 poly_wide_int bump_val
7835 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7836 * group_gap_adj);
7837 tree bump = wide_int_to_tree (sizetype, bump_val);
7838 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7839 stmt, bump);
7840 }
7841 }
7842
7843 if (slp && !slp_perm)
7844 continue;
7845
7846 if (slp_perm)
7847 {
7848 unsigned n_perms;
7849 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7850 slp_node_instance, false,
7851 &n_perms))
7852 {
7853 dr_chain.release ();
7854 return false;
7855 }
7856 }
7857 else
7858 {
7859 if (grouped_load)
7860 {
7861 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7862 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7863 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7864 }
7865 else
7866 {
7867 if (j == 0)
7868 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7869 else
7870 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7871 prev_stmt_info = vinfo_for_stmt (new_stmt);
7872 }
7873 }
7874 dr_chain.release ();
7875 }
7876
7877 return true;
7878 }
7879
7880 /* Function vect_is_simple_cond.
7881
7882 Input:
7883 LOOP - the loop that is being vectorized.
7884 COND - Condition that is checked for simple use.
7885
7886 Output:
7887 *COMP_VECTYPE - the vector type for the comparison.
7888 *DTS - The def types for the arguments of the comparison
7889
7890 Returns whether a COND can be vectorized. Checks whether
7891 condition operands are supportable using vec_is_simple_use. */
7892
7893 static bool
7894 vect_is_simple_cond (tree cond, vec_info *vinfo,
7895 tree *comp_vectype, enum vect_def_type *dts,
7896 tree vectype)
7897 {
7898 tree lhs, rhs;
7899 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7900
7901 /* Mask case. */
7902 if (TREE_CODE (cond) == SSA_NAME
7903 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7904 {
7905 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7906 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7907 &dts[0], comp_vectype)
7908 || !*comp_vectype
7909 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7910 return false;
7911 return true;
7912 }
7913
7914 if (!COMPARISON_CLASS_P (cond))
7915 return false;
7916
7917 lhs = TREE_OPERAND (cond, 0);
7918 rhs = TREE_OPERAND (cond, 1);
7919
7920 if (TREE_CODE (lhs) == SSA_NAME)
7921 {
7922 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7923 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7924 return false;
7925 }
7926 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7927 || TREE_CODE (lhs) == FIXED_CST)
7928 dts[0] = vect_constant_def;
7929 else
7930 return false;
7931
7932 if (TREE_CODE (rhs) == SSA_NAME)
7933 {
7934 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7935 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7936 return false;
7937 }
7938 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7939 || TREE_CODE (rhs) == FIXED_CST)
7940 dts[1] = vect_constant_def;
7941 else
7942 return false;
7943
7944 if (vectype1 && vectype2
7945 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
7946 TYPE_VECTOR_SUBPARTS (vectype2)))
7947 return false;
7948
7949 *comp_vectype = vectype1 ? vectype1 : vectype2;
7950 /* Invariant comparison. */
7951 if (! *comp_vectype)
7952 {
7953 tree scalar_type = TREE_TYPE (lhs);
7954 /* If we can widen the comparison to match vectype do so. */
7955 if (INTEGRAL_TYPE_P (scalar_type)
7956 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
7957 TYPE_SIZE (TREE_TYPE (vectype))))
7958 scalar_type = build_nonstandard_integer_type
7959 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
7960 TYPE_UNSIGNED (scalar_type));
7961 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
7962 }
7963
7964 return true;
7965 }
7966
7967 /* vectorizable_condition.
7968
7969 Check if STMT is conditional modify expression that can be vectorized.
7970 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7971 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7972 at GSI.
7973
7974 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7975 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7976 else clause if it is 2).
7977
7978 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7979
7980 bool
7981 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7982 gimple **vec_stmt, tree reduc_def, int reduc_index,
7983 slp_tree slp_node)
7984 {
7985 tree scalar_dest = NULL_TREE;
7986 tree vec_dest = NULL_TREE;
7987 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7988 tree then_clause, else_clause;
7989 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7990 tree comp_vectype = NULL_TREE;
7991 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7992 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7993 tree vec_compare;
7994 tree new_temp;
7995 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7996 enum vect_def_type dts[4]
7997 = {vect_unknown_def_type, vect_unknown_def_type,
7998 vect_unknown_def_type, vect_unknown_def_type};
7999 int ndts = 4;
8000 int ncopies;
8001 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8002 stmt_vec_info prev_stmt_info = NULL;
8003 int i, j;
8004 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8005 vec<tree> vec_oprnds0 = vNULL;
8006 vec<tree> vec_oprnds1 = vNULL;
8007 vec<tree> vec_oprnds2 = vNULL;
8008 vec<tree> vec_oprnds3 = vNULL;
8009 tree vec_cmp_type;
8010 bool masked = false;
8011
8012 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8013 return false;
8014
8015 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
8016 {
8017 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8018 return false;
8019
8020 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8021 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8022 && reduc_def))
8023 return false;
8024
8025 /* FORNOW: not yet supported. */
8026 if (STMT_VINFO_LIVE_P (stmt_info))
8027 {
8028 if (dump_enabled_p ())
8029 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8030 "value used after loop.\n");
8031 return false;
8032 }
8033 }
8034
8035 /* Is vectorizable conditional operation? */
8036 if (!is_gimple_assign (stmt))
8037 return false;
8038
8039 code = gimple_assign_rhs_code (stmt);
8040
8041 if (code != COND_EXPR)
8042 return false;
8043
8044 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8045 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8046
8047 if (slp_node)
8048 ncopies = 1;
8049 else
8050 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8051
8052 gcc_assert (ncopies >= 1);
8053 if (reduc_index && ncopies > 1)
8054 return false; /* FORNOW */
8055
8056 cond_expr = gimple_assign_rhs1 (stmt);
8057 then_clause = gimple_assign_rhs2 (stmt);
8058 else_clause = gimple_assign_rhs3 (stmt);
8059
8060 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8061 &comp_vectype, &dts[0], vectype)
8062 || !comp_vectype)
8063 return false;
8064
8065 gimple *def_stmt;
8066 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8067 &vectype1))
8068 return false;
8069 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8070 &vectype2))
8071 return false;
8072
8073 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8074 return false;
8075
8076 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8077 return false;
8078
8079 masked = !COMPARISON_CLASS_P (cond_expr);
8080 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8081
8082 if (vec_cmp_type == NULL_TREE)
8083 return false;
8084
8085 cond_code = TREE_CODE (cond_expr);
8086 if (!masked)
8087 {
8088 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8089 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8090 }
8091
8092 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8093 {
8094 /* Boolean values may have another representation in vectors
8095 and therefore we prefer bit operations over comparison for
8096 them (which also works for scalar masks). We store opcodes
8097 to use in bitop1 and bitop2. Statement is vectorized as
8098 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8099 depending on bitop1 and bitop2 arity. */
8100 switch (cond_code)
8101 {
8102 case GT_EXPR:
8103 bitop1 = BIT_NOT_EXPR;
8104 bitop2 = BIT_AND_EXPR;
8105 break;
8106 case GE_EXPR:
8107 bitop1 = BIT_NOT_EXPR;
8108 bitop2 = BIT_IOR_EXPR;
8109 break;
8110 case LT_EXPR:
8111 bitop1 = BIT_NOT_EXPR;
8112 bitop2 = BIT_AND_EXPR;
8113 std::swap (cond_expr0, cond_expr1);
8114 break;
8115 case LE_EXPR:
8116 bitop1 = BIT_NOT_EXPR;
8117 bitop2 = BIT_IOR_EXPR;
8118 std::swap (cond_expr0, cond_expr1);
8119 break;
8120 case NE_EXPR:
8121 bitop1 = BIT_XOR_EXPR;
8122 break;
8123 case EQ_EXPR:
8124 bitop1 = BIT_XOR_EXPR;
8125 bitop2 = BIT_NOT_EXPR;
8126 break;
8127 default:
8128 return false;
8129 }
8130 cond_code = SSA_NAME;
8131 }
8132
8133 if (!vec_stmt)
8134 {
8135 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8136 if (bitop1 != NOP_EXPR)
8137 {
8138 machine_mode mode = TYPE_MODE (comp_vectype);
8139 optab optab;
8140
8141 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8142 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8143 return false;
8144
8145 if (bitop2 != NOP_EXPR)
8146 {
8147 optab = optab_for_tree_code (bitop2, comp_vectype,
8148 optab_default);
8149 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8150 return false;
8151 }
8152 }
8153 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8154 cond_code))
8155 {
8156 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8157 return true;
8158 }
8159 return false;
8160 }
8161
8162 /* Transform. */
8163
8164 if (!slp_node)
8165 {
8166 vec_oprnds0.create (1);
8167 vec_oprnds1.create (1);
8168 vec_oprnds2.create (1);
8169 vec_oprnds3.create (1);
8170 }
8171
8172 /* Handle def. */
8173 scalar_dest = gimple_assign_lhs (stmt);
8174 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8175
8176 /* Handle cond expr. */
8177 for (j = 0; j < ncopies; j++)
8178 {
8179 gassign *new_stmt = NULL;
8180 if (j == 0)
8181 {
8182 if (slp_node)
8183 {
8184 auto_vec<tree, 4> ops;
8185 auto_vec<vec<tree>, 4> vec_defs;
8186
8187 if (masked)
8188 ops.safe_push (cond_expr);
8189 else
8190 {
8191 ops.safe_push (cond_expr0);
8192 ops.safe_push (cond_expr1);
8193 }
8194 ops.safe_push (then_clause);
8195 ops.safe_push (else_clause);
8196 vect_get_slp_defs (ops, slp_node, &vec_defs);
8197 vec_oprnds3 = vec_defs.pop ();
8198 vec_oprnds2 = vec_defs.pop ();
8199 if (!masked)
8200 vec_oprnds1 = vec_defs.pop ();
8201 vec_oprnds0 = vec_defs.pop ();
8202 }
8203 else
8204 {
8205 gimple *gtemp;
8206 if (masked)
8207 {
8208 vec_cond_lhs
8209 = vect_get_vec_def_for_operand (cond_expr, stmt,
8210 comp_vectype);
8211 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8212 &gtemp, &dts[0]);
8213 }
8214 else
8215 {
8216 vec_cond_lhs
8217 = vect_get_vec_def_for_operand (cond_expr0,
8218 stmt, comp_vectype);
8219 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8220
8221 vec_cond_rhs
8222 = vect_get_vec_def_for_operand (cond_expr1,
8223 stmt, comp_vectype);
8224 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8225 }
8226 if (reduc_index == 1)
8227 vec_then_clause = reduc_def;
8228 else
8229 {
8230 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8231 stmt);
8232 vect_is_simple_use (then_clause, loop_vinfo,
8233 &gtemp, &dts[2]);
8234 }
8235 if (reduc_index == 2)
8236 vec_else_clause = reduc_def;
8237 else
8238 {
8239 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8240 stmt);
8241 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8242 }
8243 }
8244 }
8245 else
8246 {
8247 vec_cond_lhs
8248 = vect_get_vec_def_for_stmt_copy (dts[0],
8249 vec_oprnds0.pop ());
8250 if (!masked)
8251 vec_cond_rhs
8252 = vect_get_vec_def_for_stmt_copy (dts[1],
8253 vec_oprnds1.pop ());
8254
8255 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8256 vec_oprnds2.pop ());
8257 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8258 vec_oprnds3.pop ());
8259 }
8260
8261 if (!slp_node)
8262 {
8263 vec_oprnds0.quick_push (vec_cond_lhs);
8264 if (!masked)
8265 vec_oprnds1.quick_push (vec_cond_rhs);
8266 vec_oprnds2.quick_push (vec_then_clause);
8267 vec_oprnds3.quick_push (vec_else_clause);
8268 }
8269
8270 /* Arguments are ready. Create the new vector stmt. */
8271 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8272 {
8273 vec_then_clause = vec_oprnds2[i];
8274 vec_else_clause = vec_oprnds3[i];
8275
8276 if (masked)
8277 vec_compare = vec_cond_lhs;
8278 else
8279 {
8280 vec_cond_rhs = vec_oprnds1[i];
8281 if (bitop1 == NOP_EXPR)
8282 vec_compare = build2 (cond_code, vec_cmp_type,
8283 vec_cond_lhs, vec_cond_rhs);
8284 else
8285 {
8286 new_temp = make_ssa_name (vec_cmp_type);
8287 if (bitop1 == BIT_NOT_EXPR)
8288 new_stmt = gimple_build_assign (new_temp, bitop1,
8289 vec_cond_rhs);
8290 else
8291 new_stmt
8292 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8293 vec_cond_rhs);
8294 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8295 if (bitop2 == NOP_EXPR)
8296 vec_compare = new_temp;
8297 else if (bitop2 == BIT_NOT_EXPR)
8298 {
8299 /* Instead of doing ~x ? y : z do x ? z : y. */
8300 vec_compare = new_temp;
8301 std::swap (vec_then_clause, vec_else_clause);
8302 }
8303 else
8304 {
8305 vec_compare = make_ssa_name (vec_cmp_type);
8306 new_stmt
8307 = gimple_build_assign (vec_compare, bitop2,
8308 vec_cond_lhs, new_temp);
8309 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8310 }
8311 }
8312 }
8313 new_temp = make_ssa_name (vec_dest);
8314 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8315 vec_compare, vec_then_clause,
8316 vec_else_clause);
8317 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8318 if (slp_node)
8319 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8320 }
8321
8322 if (slp_node)
8323 continue;
8324
8325 if (j == 0)
8326 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8327 else
8328 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8329
8330 prev_stmt_info = vinfo_for_stmt (new_stmt);
8331 }
8332
8333 vec_oprnds0.release ();
8334 vec_oprnds1.release ();
8335 vec_oprnds2.release ();
8336 vec_oprnds3.release ();
8337
8338 return true;
8339 }
8340
8341 /* vectorizable_comparison.
8342
8343 Check if STMT is comparison expression that can be vectorized.
8344 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8345 comparison, put it in VEC_STMT, and insert it at GSI.
8346
8347 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8348
8349 static bool
8350 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8351 gimple **vec_stmt, tree reduc_def,
8352 slp_tree slp_node)
8353 {
8354 tree lhs, rhs1, rhs2;
8355 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8356 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8357 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8358 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8359 tree new_temp;
8360 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8361 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8362 int ndts = 2;
8363 poly_uint64 nunits;
8364 int ncopies;
8365 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8366 stmt_vec_info prev_stmt_info = NULL;
8367 int i, j;
8368 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8369 vec<tree> vec_oprnds0 = vNULL;
8370 vec<tree> vec_oprnds1 = vNULL;
8371 gimple *def_stmt;
8372 tree mask_type;
8373 tree mask;
8374
8375 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8376 return false;
8377
8378 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8379 return false;
8380
8381 mask_type = vectype;
8382 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8383
8384 if (slp_node)
8385 ncopies = 1;
8386 else
8387 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8388
8389 gcc_assert (ncopies >= 1);
8390 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8391 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8392 && reduc_def))
8393 return false;
8394
8395 if (STMT_VINFO_LIVE_P (stmt_info))
8396 {
8397 if (dump_enabled_p ())
8398 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8399 "value used after loop.\n");
8400 return false;
8401 }
8402
8403 if (!is_gimple_assign (stmt))
8404 return false;
8405
8406 code = gimple_assign_rhs_code (stmt);
8407
8408 if (TREE_CODE_CLASS (code) != tcc_comparison)
8409 return false;
8410
8411 rhs1 = gimple_assign_rhs1 (stmt);
8412 rhs2 = gimple_assign_rhs2 (stmt);
8413
8414 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8415 &dts[0], &vectype1))
8416 return false;
8417
8418 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8419 &dts[1], &vectype2))
8420 return false;
8421
8422 if (vectype1 && vectype2
8423 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8424 TYPE_VECTOR_SUBPARTS (vectype2)))
8425 return false;
8426
8427 vectype = vectype1 ? vectype1 : vectype2;
8428
8429 /* Invariant comparison. */
8430 if (!vectype)
8431 {
8432 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8433 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
8434 return false;
8435 }
8436 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
8437 return false;
8438
8439 /* Can't compare mask and non-mask types. */
8440 if (vectype1 && vectype2
8441 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8442 return false;
8443
8444 /* Boolean values may have another representation in vectors
8445 and therefore we prefer bit operations over comparison for
8446 them (which also works for scalar masks). We store opcodes
8447 to use in bitop1 and bitop2. Statement is vectorized as
8448 BITOP2 (rhs1 BITOP1 rhs2) or
8449 rhs1 BITOP2 (BITOP1 rhs2)
8450 depending on bitop1 and bitop2 arity. */
8451 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8452 {
8453 if (code == GT_EXPR)
8454 {
8455 bitop1 = BIT_NOT_EXPR;
8456 bitop2 = BIT_AND_EXPR;
8457 }
8458 else if (code == GE_EXPR)
8459 {
8460 bitop1 = BIT_NOT_EXPR;
8461 bitop2 = BIT_IOR_EXPR;
8462 }
8463 else if (code == LT_EXPR)
8464 {
8465 bitop1 = BIT_NOT_EXPR;
8466 bitop2 = BIT_AND_EXPR;
8467 std::swap (rhs1, rhs2);
8468 std::swap (dts[0], dts[1]);
8469 }
8470 else if (code == LE_EXPR)
8471 {
8472 bitop1 = BIT_NOT_EXPR;
8473 bitop2 = BIT_IOR_EXPR;
8474 std::swap (rhs1, rhs2);
8475 std::swap (dts[0], dts[1]);
8476 }
8477 else
8478 {
8479 bitop1 = BIT_XOR_EXPR;
8480 if (code == EQ_EXPR)
8481 bitop2 = BIT_NOT_EXPR;
8482 }
8483 }
8484
8485 if (!vec_stmt)
8486 {
8487 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8488 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8489 dts, ndts, NULL, NULL);
8490 if (bitop1 == NOP_EXPR)
8491 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8492 else
8493 {
8494 machine_mode mode = TYPE_MODE (vectype);
8495 optab optab;
8496
8497 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8498 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8499 return false;
8500
8501 if (bitop2 != NOP_EXPR)
8502 {
8503 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8504 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8505 return false;
8506 }
8507 return true;
8508 }
8509 }
8510
8511 /* Transform. */
8512 if (!slp_node)
8513 {
8514 vec_oprnds0.create (1);
8515 vec_oprnds1.create (1);
8516 }
8517
8518 /* Handle def. */
8519 lhs = gimple_assign_lhs (stmt);
8520 mask = vect_create_destination_var (lhs, mask_type);
8521
8522 /* Handle cmp expr. */
8523 for (j = 0; j < ncopies; j++)
8524 {
8525 gassign *new_stmt = NULL;
8526 if (j == 0)
8527 {
8528 if (slp_node)
8529 {
8530 auto_vec<tree, 2> ops;
8531 auto_vec<vec<tree>, 2> vec_defs;
8532
8533 ops.safe_push (rhs1);
8534 ops.safe_push (rhs2);
8535 vect_get_slp_defs (ops, slp_node, &vec_defs);
8536 vec_oprnds1 = vec_defs.pop ();
8537 vec_oprnds0 = vec_defs.pop ();
8538 }
8539 else
8540 {
8541 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8542 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8543 }
8544 }
8545 else
8546 {
8547 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8548 vec_oprnds0.pop ());
8549 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8550 vec_oprnds1.pop ());
8551 }
8552
8553 if (!slp_node)
8554 {
8555 vec_oprnds0.quick_push (vec_rhs1);
8556 vec_oprnds1.quick_push (vec_rhs2);
8557 }
8558
8559 /* Arguments are ready. Create the new vector stmt. */
8560 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8561 {
8562 vec_rhs2 = vec_oprnds1[i];
8563
8564 new_temp = make_ssa_name (mask);
8565 if (bitop1 == NOP_EXPR)
8566 {
8567 new_stmt = gimple_build_assign (new_temp, code,
8568 vec_rhs1, vec_rhs2);
8569 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8570 }
8571 else
8572 {
8573 if (bitop1 == BIT_NOT_EXPR)
8574 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8575 else
8576 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8577 vec_rhs2);
8578 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8579 if (bitop2 != NOP_EXPR)
8580 {
8581 tree res = make_ssa_name (mask);
8582 if (bitop2 == BIT_NOT_EXPR)
8583 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8584 else
8585 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8586 new_temp);
8587 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8588 }
8589 }
8590 if (slp_node)
8591 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8592 }
8593
8594 if (slp_node)
8595 continue;
8596
8597 if (j == 0)
8598 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8599 else
8600 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8601
8602 prev_stmt_info = vinfo_for_stmt (new_stmt);
8603 }
8604
8605 vec_oprnds0.release ();
8606 vec_oprnds1.release ();
8607
8608 return true;
8609 }
8610
8611 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8612 can handle all live statements in the node. Otherwise return true
8613 if STMT is not live or if vectorizable_live_operation can handle it.
8614 GSI and VEC_STMT are as for vectorizable_live_operation. */
8615
8616 static bool
8617 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
8618 slp_tree slp_node, gimple **vec_stmt)
8619 {
8620 if (slp_node)
8621 {
8622 gimple *slp_stmt;
8623 unsigned int i;
8624 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8625 {
8626 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8627 if (STMT_VINFO_LIVE_P (slp_stmt_info)
8628 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8629 vec_stmt))
8630 return false;
8631 }
8632 }
8633 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
8634 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
8635 return false;
8636
8637 return true;
8638 }
8639
8640 /* Make sure the statement is vectorizable. */
8641
8642 bool
8643 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8644 slp_instance node_instance)
8645 {
8646 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8647 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8648 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8649 bool ok;
8650 gimple *pattern_stmt;
8651 gimple_seq pattern_def_seq;
8652
8653 if (dump_enabled_p ())
8654 {
8655 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8656 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8657 }
8658
8659 if (gimple_has_volatile_ops (stmt))
8660 {
8661 if (dump_enabled_p ())
8662 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8663 "not vectorized: stmt has volatile operands\n");
8664
8665 return false;
8666 }
8667
8668 /* Skip stmts that do not need to be vectorized. In loops this is expected
8669 to include:
8670 - the COND_EXPR which is the loop exit condition
8671 - any LABEL_EXPRs in the loop
8672 - computations that are used only for array indexing or loop control.
8673 In basic blocks we only analyze statements that are a part of some SLP
8674 instance, therefore, all the statements are relevant.
8675
8676 Pattern statement needs to be analyzed instead of the original statement
8677 if the original statement is not relevant. Otherwise, we analyze both
8678 statements. In basic blocks we are called from some SLP instance
8679 traversal, don't analyze pattern stmts instead, the pattern stmts
8680 already will be part of SLP instance. */
8681
8682 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8683 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8684 && !STMT_VINFO_LIVE_P (stmt_info))
8685 {
8686 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8687 && pattern_stmt
8688 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8689 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8690 {
8691 /* Analyze PATTERN_STMT instead of the original stmt. */
8692 stmt = pattern_stmt;
8693 stmt_info = vinfo_for_stmt (pattern_stmt);
8694 if (dump_enabled_p ())
8695 {
8696 dump_printf_loc (MSG_NOTE, vect_location,
8697 "==> examining pattern statement: ");
8698 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8699 }
8700 }
8701 else
8702 {
8703 if (dump_enabled_p ())
8704 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8705
8706 return true;
8707 }
8708 }
8709 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8710 && node == NULL
8711 && pattern_stmt
8712 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8713 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8714 {
8715 /* Analyze PATTERN_STMT too. */
8716 if (dump_enabled_p ())
8717 {
8718 dump_printf_loc (MSG_NOTE, vect_location,
8719 "==> examining pattern statement: ");
8720 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8721 }
8722
8723 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8724 node_instance))
8725 return false;
8726 }
8727
8728 if (is_pattern_stmt_p (stmt_info)
8729 && node == NULL
8730 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8731 {
8732 gimple_stmt_iterator si;
8733
8734 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8735 {
8736 gimple *pattern_def_stmt = gsi_stmt (si);
8737 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8738 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8739 {
8740 /* Analyze def stmt of STMT if it's a pattern stmt. */
8741 if (dump_enabled_p ())
8742 {
8743 dump_printf_loc (MSG_NOTE, vect_location,
8744 "==> examining pattern def statement: ");
8745 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8746 }
8747
8748 if (!vect_analyze_stmt (pattern_def_stmt,
8749 need_to_vectorize, node, node_instance))
8750 return false;
8751 }
8752 }
8753 }
8754
8755 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8756 {
8757 case vect_internal_def:
8758 break;
8759
8760 case vect_reduction_def:
8761 case vect_nested_cycle:
8762 gcc_assert (!bb_vinfo
8763 && (relevance == vect_used_in_outer
8764 || relevance == vect_used_in_outer_by_reduction
8765 || relevance == vect_used_by_reduction
8766 || relevance == vect_unused_in_scope
8767 || relevance == vect_used_only_live));
8768 break;
8769
8770 case vect_induction_def:
8771 gcc_assert (!bb_vinfo);
8772 break;
8773
8774 case vect_constant_def:
8775 case vect_external_def:
8776 case vect_unknown_def_type:
8777 default:
8778 gcc_unreachable ();
8779 }
8780
8781 if (STMT_VINFO_RELEVANT_P (stmt_info))
8782 {
8783 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8784 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8785 || (is_gimple_call (stmt)
8786 && gimple_call_lhs (stmt) == NULL_TREE));
8787 *need_to_vectorize = true;
8788 }
8789
8790 if (PURE_SLP_STMT (stmt_info) && !node)
8791 {
8792 dump_printf_loc (MSG_NOTE, vect_location,
8793 "handled only by SLP analysis\n");
8794 return true;
8795 }
8796
8797 ok = true;
8798 if (!bb_vinfo
8799 && (STMT_VINFO_RELEVANT_P (stmt_info)
8800 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8801 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8802 || vectorizable_conversion (stmt, NULL, NULL, node)
8803 || vectorizable_shift (stmt, NULL, NULL, node)
8804 || vectorizable_operation (stmt, NULL, NULL, node)
8805 || vectorizable_assignment (stmt, NULL, NULL, node)
8806 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8807 || vectorizable_call (stmt, NULL, NULL, node)
8808 || vectorizable_store (stmt, NULL, NULL, node)
8809 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8810 || vectorizable_induction (stmt, NULL, NULL, node)
8811 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8812 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8813 else
8814 {
8815 if (bb_vinfo)
8816 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8817 || vectorizable_conversion (stmt, NULL, NULL, node)
8818 || vectorizable_shift (stmt, NULL, NULL, node)
8819 || vectorizable_operation (stmt, NULL, NULL, node)
8820 || vectorizable_assignment (stmt, NULL, NULL, node)
8821 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8822 || vectorizable_call (stmt, NULL, NULL, node)
8823 || vectorizable_store (stmt, NULL, NULL, node)
8824 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8825 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8826 }
8827
8828 if (!ok)
8829 {
8830 if (dump_enabled_p ())
8831 {
8832 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8833 "not vectorized: relevant stmt not ");
8834 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8835 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8836 }
8837
8838 return false;
8839 }
8840
8841 if (bb_vinfo)
8842 return true;
8843
8844 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8845 need extra handling, except for vectorizable reductions. */
8846 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8847 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
8848 {
8849 if (dump_enabled_p ())
8850 {
8851 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8852 "not vectorized: live stmt not supported: ");
8853 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8854 }
8855
8856 return false;
8857 }
8858
8859 return true;
8860 }
8861
8862
8863 /* Function vect_transform_stmt.
8864
8865 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8866
8867 bool
8868 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8869 bool *grouped_store, slp_tree slp_node,
8870 slp_instance slp_node_instance)
8871 {
8872 bool is_store = false;
8873 gimple *vec_stmt = NULL;
8874 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8875 bool done;
8876
8877 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8878 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8879
8880 switch (STMT_VINFO_TYPE (stmt_info))
8881 {
8882 case type_demotion_vec_info_type:
8883 case type_promotion_vec_info_type:
8884 case type_conversion_vec_info_type:
8885 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8886 gcc_assert (done);
8887 break;
8888
8889 case induc_vec_info_type:
8890 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8891 gcc_assert (done);
8892 break;
8893
8894 case shift_vec_info_type:
8895 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8896 gcc_assert (done);
8897 break;
8898
8899 case op_vec_info_type:
8900 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8901 gcc_assert (done);
8902 break;
8903
8904 case assignment_vec_info_type:
8905 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8906 gcc_assert (done);
8907 break;
8908
8909 case load_vec_info_type:
8910 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8911 slp_node_instance);
8912 gcc_assert (done);
8913 break;
8914
8915 case store_vec_info_type:
8916 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8917 gcc_assert (done);
8918 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8919 {
8920 /* In case of interleaving, the whole chain is vectorized when the
8921 last store in the chain is reached. Store stmts before the last
8922 one are skipped, and there vec_stmt_info shouldn't be freed
8923 meanwhile. */
8924 *grouped_store = true;
8925 if (STMT_VINFO_VEC_STMT (stmt_info))
8926 is_store = true;
8927 }
8928 else
8929 is_store = true;
8930 break;
8931
8932 case condition_vec_info_type:
8933 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8934 gcc_assert (done);
8935 break;
8936
8937 case comparison_vec_info_type:
8938 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8939 gcc_assert (done);
8940 break;
8941
8942 case call_vec_info_type:
8943 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8944 stmt = gsi_stmt (*gsi);
8945 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8946 is_store = true;
8947 break;
8948
8949 case call_simd_clone_vec_info_type:
8950 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8951 stmt = gsi_stmt (*gsi);
8952 break;
8953
8954 case reduc_vec_info_type:
8955 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8956 slp_node_instance);
8957 gcc_assert (done);
8958 break;
8959
8960 default:
8961 if (!STMT_VINFO_LIVE_P (stmt_info))
8962 {
8963 if (dump_enabled_p ())
8964 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8965 "stmt not supported.\n");
8966 gcc_unreachable ();
8967 }
8968 }
8969
8970 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8971 This would break hybrid SLP vectorization. */
8972 if (slp_node)
8973 gcc_assert (!vec_stmt
8974 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8975
8976 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8977 is being vectorized, but outside the immediately enclosing loop. */
8978 if (vec_stmt
8979 && STMT_VINFO_LOOP_VINFO (stmt_info)
8980 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8981 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8982 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8983 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8984 || STMT_VINFO_RELEVANT (stmt_info) ==
8985 vect_used_in_outer_by_reduction))
8986 {
8987 struct loop *innerloop = LOOP_VINFO_LOOP (
8988 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8989 imm_use_iterator imm_iter;
8990 use_operand_p use_p;
8991 tree scalar_dest;
8992 gimple *exit_phi;
8993
8994 if (dump_enabled_p ())
8995 dump_printf_loc (MSG_NOTE, vect_location,
8996 "Record the vdef for outer-loop vectorization.\n");
8997
8998 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8999 (to be used when vectorizing outer-loop stmts that use the DEF of
9000 STMT). */
9001 if (gimple_code (stmt) == GIMPLE_PHI)
9002 scalar_dest = PHI_RESULT (stmt);
9003 else
9004 scalar_dest = gimple_assign_lhs (stmt);
9005
9006 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9007 {
9008 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9009 {
9010 exit_phi = USE_STMT (use_p);
9011 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
9012 }
9013 }
9014 }
9015
9016 /* Handle stmts whose DEF is used outside the loop-nest that is
9017 being vectorized. */
9018 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9019 {
9020 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
9021 gcc_assert (done);
9022 }
9023
9024 if (vec_stmt)
9025 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9026
9027 return is_store;
9028 }
9029
9030
9031 /* Remove a group of stores (for SLP or interleaving), free their
9032 stmt_vec_info. */
9033
9034 void
9035 vect_remove_stores (gimple *first_stmt)
9036 {
9037 gimple *next = first_stmt;
9038 gimple *tmp;
9039 gimple_stmt_iterator next_si;
9040
9041 while (next)
9042 {
9043 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9044
9045 tmp = GROUP_NEXT_ELEMENT (stmt_info);
9046 if (is_pattern_stmt_p (stmt_info))
9047 next = STMT_VINFO_RELATED_STMT (stmt_info);
9048 /* Free the attached stmt_vec_info and remove the stmt. */
9049 next_si = gsi_for_stmt (next);
9050 unlink_stmt_vdef (next);
9051 gsi_remove (&next_si, true);
9052 release_defs (next);
9053 free_stmt_vec_info (next);
9054 next = tmp;
9055 }
9056 }
9057
9058
9059 /* Function new_stmt_vec_info.
9060
9061 Create and initialize a new stmt_vec_info struct for STMT. */
9062
9063 stmt_vec_info
9064 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9065 {
9066 stmt_vec_info res;
9067 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9068
9069 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9070 STMT_VINFO_STMT (res) = stmt;
9071 res->vinfo = vinfo;
9072 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9073 STMT_VINFO_LIVE_P (res) = false;
9074 STMT_VINFO_VECTYPE (res) = NULL;
9075 STMT_VINFO_VEC_STMT (res) = NULL;
9076 STMT_VINFO_VECTORIZABLE (res) = true;
9077 STMT_VINFO_IN_PATTERN_P (res) = false;
9078 STMT_VINFO_RELATED_STMT (res) = NULL;
9079 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9080 STMT_VINFO_DATA_REF (res) = NULL;
9081 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9082 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9083
9084 if (gimple_code (stmt) == GIMPLE_PHI
9085 && is_loop_header_bb_p (gimple_bb (stmt)))
9086 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9087 else
9088 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9089
9090 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9091 STMT_SLP_TYPE (res) = loop_vect;
9092 STMT_VINFO_NUM_SLP_USES (res) = 0;
9093
9094 GROUP_FIRST_ELEMENT (res) = NULL;
9095 GROUP_NEXT_ELEMENT (res) = NULL;
9096 GROUP_SIZE (res) = 0;
9097 GROUP_STORE_COUNT (res) = 0;
9098 GROUP_GAP (res) = 0;
9099 GROUP_SAME_DR_STMT (res) = NULL;
9100
9101 return res;
9102 }
9103
9104
9105 /* Create a hash table for stmt_vec_info. */
9106
9107 void
9108 init_stmt_vec_info_vec (void)
9109 {
9110 gcc_assert (!stmt_vec_info_vec.exists ());
9111 stmt_vec_info_vec.create (50);
9112 }
9113
9114
9115 /* Free hash table for stmt_vec_info. */
9116
9117 void
9118 free_stmt_vec_info_vec (void)
9119 {
9120 unsigned int i;
9121 stmt_vec_info info;
9122 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9123 if (info != NULL)
9124 free_stmt_vec_info (STMT_VINFO_STMT (info));
9125 gcc_assert (stmt_vec_info_vec.exists ());
9126 stmt_vec_info_vec.release ();
9127 }
9128
9129
9130 /* Free stmt vectorization related info. */
9131
9132 void
9133 free_stmt_vec_info (gimple *stmt)
9134 {
9135 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9136
9137 if (!stmt_info)
9138 return;
9139
9140 /* Check if this statement has a related "pattern stmt"
9141 (introduced by the vectorizer during the pattern recognition
9142 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9143 too. */
9144 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9145 {
9146 stmt_vec_info patt_info
9147 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9148 if (patt_info)
9149 {
9150 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9151 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9152 gimple_set_bb (patt_stmt, NULL);
9153 tree lhs = gimple_get_lhs (patt_stmt);
9154 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9155 release_ssa_name (lhs);
9156 if (seq)
9157 {
9158 gimple_stmt_iterator si;
9159 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9160 {
9161 gimple *seq_stmt = gsi_stmt (si);
9162 gimple_set_bb (seq_stmt, NULL);
9163 lhs = gimple_get_lhs (seq_stmt);
9164 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9165 release_ssa_name (lhs);
9166 free_stmt_vec_info (seq_stmt);
9167 }
9168 }
9169 free_stmt_vec_info (patt_stmt);
9170 }
9171 }
9172
9173 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9174 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9175 set_vinfo_for_stmt (stmt, NULL);
9176 free (stmt_info);
9177 }
9178
9179
9180 /* Function get_vectype_for_scalar_type_and_size.
9181
9182 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9183 by the target. */
9184
9185 static tree
9186 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9187 {
9188 tree orig_scalar_type = scalar_type;
9189 scalar_mode inner_mode;
9190 machine_mode simd_mode;
9191 poly_uint64 nunits;
9192 tree vectype;
9193
9194 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9195 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9196 return NULL_TREE;
9197
9198 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9199
9200 /* For vector types of elements whose mode precision doesn't
9201 match their types precision we use a element type of mode
9202 precision. The vectorization routines will have to make sure
9203 they support the proper result truncation/extension.
9204 We also make sure to build vector types with INTEGER_TYPE
9205 component type only. */
9206 if (INTEGRAL_TYPE_P (scalar_type)
9207 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9208 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9209 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9210 TYPE_UNSIGNED (scalar_type));
9211
9212 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9213 When the component mode passes the above test simply use a type
9214 corresponding to that mode. The theory is that any use that
9215 would cause problems with this will disable vectorization anyway. */
9216 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9217 && !INTEGRAL_TYPE_P (scalar_type))
9218 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9219
9220 /* We can't build a vector type of elements with alignment bigger than
9221 their size. */
9222 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9223 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9224 TYPE_UNSIGNED (scalar_type));
9225
9226 /* If we felt back to using the mode fail if there was
9227 no scalar type for it. */
9228 if (scalar_type == NULL_TREE)
9229 return NULL_TREE;
9230
9231 /* If no size was supplied use the mode the target prefers. Otherwise
9232 lookup a vector mode of the specified size. */
9233 if (known_eq (size, 0U))
9234 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9235 else if (!multiple_p (size, nbytes, &nunits)
9236 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9237 return NULL_TREE;
9238 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9239 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9240 return NULL_TREE;
9241
9242 vectype = build_vector_type (scalar_type, nunits);
9243
9244 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9245 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9246 return NULL_TREE;
9247
9248 /* Re-attach the address-space qualifier if we canonicalized the scalar
9249 type. */
9250 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9251 return build_qualified_type
9252 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9253
9254 return vectype;
9255 }
9256
9257 poly_uint64 current_vector_size;
9258
9259 /* Function get_vectype_for_scalar_type.
9260
9261 Returns the vector type corresponding to SCALAR_TYPE as supported
9262 by the target. */
9263
9264 tree
9265 get_vectype_for_scalar_type (tree scalar_type)
9266 {
9267 tree vectype;
9268 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9269 current_vector_size);
9270 if (vectype
9271 && known_eq (current_vector_size, 0U))
9272 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9273 return vectype;
9274 }
9275
9276 /* Function get_mask_type_for_scalar_type.
9277
9278 Returns the mask type corresponding to a result of comparison
9279 of vectors of specified SCALAR_TYPE as supported by target. */
9280
9281 tree
9282 get_mask_type_for_scalar_type (tree scalar_type)
9283 {
9284 tree vectype = get_vectype_for_scalar_type (scalar_type);
9285
9286 if (!vectype)
9287 return NULL;
9288
9289 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9290 current_vector_size);
9291 }
9292
9293 /* Function get_same_sized_vectype
9294
9295 Returns a vector type corresponding to SCALAR_TYPE of size
9296 VECTOR_TYPE if supported by the target. */
9297
9298 tree
9299 get_same_sized_vectype (tree scalar_type, tree vector_type)
9300 {
9301 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9302 return build_same_sized_truth_vector_type (vector_type);
9303
9304 return get_vectype_for_scalar_type_and_size
9305 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9306 }
9307
9308 /* Function vect_is_simple_use.
9309
9310 Input:
9311 VINFO - the vect info of the loop or basic block that is being vectorized.
9312 OPERAND - operand in the loop or bb.
9313 Output:
9314 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9315 DT - the type of definition
9316
9317 Returns whether a stmt with OPERAND can be vectorized.
9318 For loops, supportable operands are constants, loop invariants, and operands
9319 that are defined by the current iteration of the loop. Unsupportable
9320 operands are those that are defined by a previous iteration of the loop (as
9321 is the case in reduction/induction computations).
9322 For basic blocks, supportable operands are constants and bb invariants.
9323 For now, operands defined outside the basic block are not supported. */
9324
9325 bool
9326 vect_is_simple_use (tree operand, vec_info *vinfo,
9327 gimple **def_stmt, enum vect_def_type *dt)
9328 {
9329 *def_stmt = NULL;
9330 *dt = vect_unknown_def_type;
9331
9332 if (dump_enabled_p ())
9333 {
9334 dump_printf_loc (MSG_NOTE, vect_location,
9335 "vect_is_simple_use: operand ");
9336 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9337 dump_printf (MSG_NOTE, "\n");
9338 }
9339
9340 if (CONSTANT_CLASS_P (operand))
9341 {
9342 *dt = vect_constant_def;
9343 return true;
9344 }
9345
9346 if (is_gimple_min_invariant (operand))
9347 {
9348 *dt = vect_external_def;
9349 return true;
9350 }
9351
9352 if (TREE_CODE (operand) != SSA_NAME)
9353 {
9354 if (dump_enabled_p ())
9355 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9356 "not ssa-name.\n");
9357 return false;
9358 }
9359
9360 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9361 {
9362 *dt = vect_external_def;
9363 return true;
9364 }
9365
9366 *def_stmt = SSA_NAME_DEF_STMT (operand);
9367 if (dump_enabled_p ())
9368 {
9369 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9370 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9371 }
9372
9373 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9374 *dt = vect_external_def;
9375 else
9376 {
9377 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9378 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9379 }
9380
9381 if (dump_enabled_p ())
9382 {
9383 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9384 switch (*dt)
9385 {
9386 case vect_uninitialized_def:
9387 dump_printf (MSG_NOTE, "uninitialized\n");
9388 break;
9389 case vect_constant_def:
9390 dump_printf (MSG_NOTE, "constant\n");
9391 break;
9392 case vect_external_def:
9393 dump_printf (MSG_NOTE, "external\n");
9394 break;
9395 case vect_internal_def:
9396 dump_printf (MSG_NOTE, "internal\n");
9397 break;
9398 case vect_induction_def:
9399 dump_printf (MSG_NOTE, "induction\n");
9400 break;
9401 case vect_reduction_def:
9402 dump_printf (MSG_NOTE, "reduction\n");
9403 break;
9404 case vect_double_reduction_def:
9405 dump_printf (MSG_NOTE, "double reduction\n");
9406 break;
9407 case vect_nested_cycle:
9408 dump_printf (MSG_NOTE, "nested cycle\n");
9409 break;
9410 case vect_unknown_def_type:
9411 dump_printf (MSG_NOTE, "unknown\n");
9412 break;
9413 }
9414 }
9415
9416 if (*dt == vect_unknown_def_type)
9417 {
9418 if (dump_enabled_p ())
9419 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9420 "Unsupported pattern.\n");
9421 return false;
9422 }
9423
9424 switch (gimple_code (*def_stmt))
9425 {
9426 case GIMPLE_PHI:
9427 case GIMPLE_ASSIGN:
9428 case GIMPLE_CALL:
9429 break;
9430 default:
9431 if (dump_enabled_p ())
9432 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9433 "unsupported defining stmt:\n");
9434 return false;
9435 }
9436
9437 return true;
9438 }
9439
9440 /* Function vect_is_simple_use.
9441
9442 Same as vect_is_simple_use but also determines the vector operand
9443 type of OPERAND and stores it to *VECTYPE. If the definition of
9444 OPERAND is vect_uninitialized_def, vect_constant_def or
9445 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9446 is responsible to compute the best suited vector type for the
9447 scalar operand. */
9448
9449 bool
9450 vect_is_simple_use (tree operand, vec_info *vinfo,
9451 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9452 {
9453 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9454 return false;
9455
9456 /* Now get a vector type if the def is internal, otherwise supply
9457 NULL_TREE and leave it up to the caller to figure out a proper
9458 type for the use stmt. */
9459 if (*dt == vect_internal_def
9460 || *dt == vect_induction_def
9461 || *dt == vect_reduction_def
9462 || *dt == vect_double_reduction_def
9463 || *dt == vect_nested_cycle)
9464 {
9465 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9466
9467 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9468 && !STMT_VINFO_RELEVANT (stmt_info)
9469 && !STMT_VINFO_LIVE_P (stmt_info))
9470 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9471
9472 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9473 gcc_assert (*vectype != NULL_TREE);
9474 }
9475 else if (*dt == vect_uninitialized_def
9476 || *dt == vect_constant_def
9477 || *dt == vect_external_def)
9478 *vectype = NULL_TREE;
9479 else
9480 gcc_unreachable ();
9481
9482 return true;
9483 }
9484
9485
9486 /* Function supportable_widening_operation
9487
9488 Check whether an operation represented by the code CODE is a
9489 widening operation that is supported by the target platform in
9490 vector form (i.e., when operating on arguments of type VECTYPE_IN
9491 producing a result of type VECTYPE_OUT).
9492
9493 Widening operations we currently support are NOP (CONVERT), FLOAT
9494 and WIDEN_MULT. This function checks if these operations are supported
9495 by the target platform either directly (via vector tree-codes), or via
9496 target builtins.
9497
9498 Output:
9499 - CODE1 and CODE2 are codes of vector operations to be used when
9500 vectorizing the operation, if available.
9501 - MULTI_STEP_CVT determines the number of required intermediate steps in
9502 case of multi-step conversion (like char->short->int - in that case
9503 MULTI_STEP_CVT will be 1).
9504 - INTERM_TYPES contains the intermediate type required to perform the
9505 widening operation (short in the above example). */
9506
9507 bool
9508 supportable_widening_operation (enum tree_code code, gimple *stmt,
9509 tree vectype_out, tree vectype_in,
9510 enum tree_code *code1, enum tree_code *code2,
9511 int *multi_step_cvt,
9512 vec<tree> *interm_types)
9513 {
9514 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9515 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9516 struct loop *vect_loop = NULL;
9517 machine_mode vec_mode;
9518 enum insn_code icode1, icode2;
9519 optab optab1, optab2;
9520 tree vectype = vectype_in;
9521 tree wide_vectype = vectype_out;
9522 enum tree_code c1, c2;
9523 int i;
9524 tree prev_type, intermediate_type;
9525 machine_mode intermediate_mode, prev_mode;
9526 optab optab3, optab4;
9527
9528 *multi_step_cvt = 0;
9529 if (loop_info)
9530 vect_loop = LOOP_VINFO_LOOP (loop_info);
9531
9532 switch (code)
9533 {
9534 case WIDEN_MULT_EXPR:
9535 /* The result of a vectorized widening operation usually requires
9536 two vectors (because the widened results do not fit into one vector).
9537 The generated vector results would normally be expected to be
9538 generated in the same order as in the original scalar computation,
9539 i.e. if 8 results are generated in each vector iteration, they are
9540 to be organized as follows:
9541 vect1: [res1,res2,res3,res4],
9542 vect2: [res5,res6,res7,res8].
9543
9544 However, in the special case that the result of the widening
9545 operation is used in a reduction computation only, the order doesn't
9546 matter (because when vectorizing a reduction we change the order of
9547 the computation). Some targets can take advantage of this and
9548 generate more efficient code. For example, targets like Altivec,
9549 that support widen_mult using a sequence of {mult_even,mult_odd}
9550 generate the following vectors:
9551 vect1: [res1,res3,res5,res7],
9552 vect2: [res2,res4,res6,res8].
9553
9554 When vectorizing outer-loops, we execute the inner-loop sequentially
9555 (each vectorized inner-loop iteration contributes to VF outer-loop
9556 iterations in parallel). We therefore don't allow to change the
9557 order of the computation in the inner-loop during outer-loop
9558 vectorization. */
9559 /* TODO: Another case in which order doesn't *really* matter is when we
9560 widen and then contract again, e.g. (short)((int)x * y >> 8).
9561 Normally, pack_trunc performs an even/odd permute, whereas the
9562 repack from an even/odd expansion would be an interleave, which
9563 would be significantly simpler for e.g. AVX2. */
9564 /* In any case, in order to avoid duplicating the code below, recurse
9565 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9566 are properly set up for the caller. If we fail, we'll continue with
9567 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9568 if (vect_loop
9569 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9570 && !nested_in_vect_loop_p (vect_loop, stmt)
9571 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9572 stmt, vectype_out, vectype_in,
9573 code1, code2, multi_step_cvt,
9574 interm_types))
9575 {
9576 /* Elements in a vector with vect_used_by_reduction property cannot
9577 be reordered if the use chain with this property does not have the
9578 same operation. One such an example is s += a * b, where elements
9579 in a and b cannot be reordered. Here we check if the vector defined
9580 by STMT is only directly used in the reduction statement. */
9581 tree lhs = gimple_assign_lhs (stmt);
9582 use_operand_p dummy;
9583 gimple *use_stmt;
9584 stmt_vec_info use_stmt_info = NULL;
9585 if (single_imm_use (lhs, &dummy, &use_stmt)
9586 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9587 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9588 return true;
9589 }
9590 c1 = VEC_WIDEN_MULT_LO_EXPR;
9591 c2 = VEC_WIDEN_MULT_HI_EXPR;
9592 break;
9593
9594 case DOT_PROD_EXPR:
9595 c1 = DOT_PROD_EXPR;
9596 c2 = DOT_PROD_EXPR;
9597 break;
9598
9599 case SAD_EXPR:
9600 c1 = SAD_EXPR;
9601 c2 = SAD_EXPR;
9602 break;
9603
9604 case VEC_WIDEN_MULT_EVEN_EXPR:
9605 /* Support the recursion induced just above. */
9606 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9607 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9608 break;
9609
9610 case WIDEN_LSHIFT_EXPR:
9611 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9612 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9613 break;
9614
9615 CASE_CONVERT:
9616 c1 = VEC_UNPACK_LO_EXPR;
9617 c2 = VEC_UNPACK_HI_EXPR;
9618 break;
9619
9620 case FLOAT_EXPR:
9621 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9622 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9623 break;
9624
9625 case FIX_TRUNC_EXPR:
9626 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9627 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9628 computing the operation. */
9629 return false;
9630
9631 default:
9632 gcc_unreachable ();
9633 }
9634
9635 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9636 std::swap (c1, c2);
9637
9638 if (code == FIX_TRUNC_EXPR)
9639 {
9640 /* The signedness is determined from output operand. */
9641 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9642 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9643 }
9644 else
9645 {
9646 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9647 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9648 }
9649
9650 if (!optab1 || !optab2)
9651 return false;
9652
9653 vec_mode = TYPE_MODE (vectype);
9654 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9655 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9656 return false;
9657
9658 *code1 = c1;
9659 *code2 = c2;
9660
9661 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9662 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9663 /* For scalar masks we may have different boolean
9664 vector types having the same QImode. Thus we
9665 add additional check for elements number. */
9666 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9667 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
9668 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
9669
9670 /* Check if it's a multi-step conversion that can be done using intermediate
9671 types. */
9672
9673 prev_type = vectype;
9674 prev_mode = vec_mode;
9675
9676 if (!CONVERT_EXPR_CODE_P (code))
9677 return false;
9678
9679 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9680 intermediate steps in promotion sequence. We try
9681 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9682 not. */
9683 interm_types->create (MAX_INTERM_CVT_STEPS);
9684 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9685 {
9686 intermediate_mode = insn_data[icode1].operand[0].mode;
9687 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9688 {
9689 poly_uint64 intermediate_nelts
9690 = exact_div (TYPE_VECTOR_SUBPARTS (prev_type), 2);
9691 intermediate_type
9692 = build_truth_vector_type (intermediate_nelts,
9693 current_vector_size);
9694 if (intermediate_mode != TYPE_MODE (intermediate_type))
9695 return false;
9696 }
9697 else
9698 intermediate_type
9699 = lang_hooks.types.type_for_mode (intermediate_mode,
9700 TYPE_UNSIGNED (prev_type));
9701
9702 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9703 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9704
9705 if (!optab3 || !optab4
9706 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9707 || insn_data[icode1].operand[0].mode != intermediate_mode
9708 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9709 || insn_data[icode2].operand[0].mode != intermediate_mode
9710 || ((icode1 = optab_handler (optab3, intermediate_mode))
9711 == CODE_FOR_nothing)
9712 || ((icode2 = optab_handler (optab4, intermediate_mode))
9713 == CODE_FOR_nothing))
9714 break;
9715
9716 interm_types->quick_push (intermediate_type);
9717 (*multi_step_cvt)++;
9718
9719 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9720 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9721 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9722 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
9723 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
9724
9725 prev_type = intermediate_type;
9726 prev_mode = intermediate_mode;
9727 }
9728
9729 interm_types->release ();
9730 return false;
9731 }
9732
9733
9734 /* Function supportable_narrowing_operation
9735
9736 Check whether an operation represented by the code CODE is a
9737 narrowing operation that is supported by the target platform in
9738 vector form (i.e., when operating on arguments of type VECTYPE_IN
9739 and producing a result of type VECTYPE_OUT).
9740
9741 Narrowing operations we currently support are NOP (CONVERT) and
9742 FIX_TRUNC. This function checks if these operations are supported by
9743 the target platform directly via vector tree-codes.
9744
9745 Output:
9746 - CODE1 is the code of a vector operation to be used when
9747 vectorizing the operation, if available.
9748 - MULTI_STEP_CVT determines the number of required intermediate steps in
9749 case of multi-step conversion (like int->short->char - in that case
9750 MULTI_STEP_CVT will be 1).
9751 - INTERM_TYPES contains the intermediate type required to perform the
9752 narrowing operation (short in the above example). */
9753
9754 bool
9755 supportable_narrowing_operation (enum tree_code code,
9756 tree vectype_out, tree vectype_in,
9757 enum tree_code *code1, int *multi_step_cvt,
9758 vec<tree> *interm_types)
9759 {
9760 machine_mode vec_mode;
9761 enum insn_code icode1;
9762 optab optab1, interm_optab;
9763 tree vectype = vectype_in;
9764 tree narrow_vectype = vectype_out;
9765 enum tree_code c1;
9766 tree intermediate_type, prev_type;
9767 machine_mode intermediate_mode, prev_mode;
9768 int i;
9769 bool uns;
9770
9771 *multi_step_cvt = 0;
9772 switch (code)
9773 {
9774 CASE_CONVERT:
9775 c1 = VEC_PACK_TRUNC_EXPR;
9776 break;
9777
9778 case FIX_TRUNC_EXPR:
9779 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9780 break;
9781
9782 case FLOAT_EXPR:
9783 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9784 tree code and optabs used for computing the operation. */
9785 return false;
9786
9787 default:
9788 gcc_unreachable ();
9789 }
9790
9791 if (code == FIX_TRUNC_EXPR)
9792 /* The signedness is determined from output operand. */
9793 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9794 else
9795 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9796
9797 if (!optab1)
9798 return false;
9799
9800 vec_mode = TYPE_MODE (vectype);
9801 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9802 return false;
9803
9804 *code1 = c1;
9805
9806 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9807 /* For scalar masks we may have different boolean
9808 vector types having the same QImode. Thus we
9809 add additional check for elements number. */
9810 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9811 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
9812 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9813
9814 /* Check if it's a multi-step conversion that can be done using intermediate
9815 types. */
9816 prev_mode = vec_mode;
9817 prev_type = vectype;
9818 if (code == FIX_TRUNC_EXPR)
9819 uns = TYPE_UNSIGNED (vectype_out);
9820 else
9821 uns = TYPE_UNSIGNED (vectype);
9822
9823 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9824 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9825 costly than signed. */
9826 if (code == FIX_TRUNC_EXPR && uns)
9827 {
9828 enum insn_code icode2;
9829
9830 intermediate_type
9831 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9832 interm_optab
9833 = optab_for_tree_code (c1, intermediate_type, optab_default);
9834 if (interm_optab != unknown_optab
9835 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9836 && insn_data[icode1].operand[0].mode
9837 == insn_data[icode2].operand[0].mode)
9838 {
9839 uns = false;
9840 optab1 = interm_optab;
9841 icode1 = icode2;
9842 }
9843 }
9844
9845 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9846 intermediate steps in promotion sequence. We try
9847 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9848 interm_types->create (MAX_INTERM_CVT_STEPS);
9849 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9850 {
9851 intermediate_mode = insn_data[icode1].operand[0].mode;
9852 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9853 {
9854 intermediate_type
9855 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9856 current_vector_size);
9857 if (intermediate_mode != TYPE_MODE (intermediate_type))
9858 return false;
9859 }
9860 else
9861 intermediate_type
9862 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9863 interm_optab
9864 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9865 optab_default);
9866 if (!interm_optab
9867 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9868 || insn_data[icode1].operand[0].mode != intermediate_mode
9869 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9870 == CODE_FOR_nothing))
9871 break;
9872
9873 interm_types->quick_push (intermediate_type);
9874 (*multi_step_cvt)++;
9875
9876 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9877 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9878 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
9879 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9880
9881 prev_mode = intermediate_mode;
9882 prev_type = intermediate_type;
9883 optab1 = interm_optab;
9884 }
9885
9886 interm_types->release ();
9887 return false;
9888 }