1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Says whether a statement is a load, a store of a vectorized statement
58 result, or a store of an invariant value. */
59 enum vec_load_store_type
{
65 /* Return the vectorized type for the given statement. */
68 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
70 return STMT_VINFO_VECTYPE (stmt_info
);
73 /* Return TRUE iff the given statement is in an inner loop relative to
74 the loop being vectorized. */
76 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
78 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
79 basic_block bb
= gimple_bb (stmt
);
80 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
86 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
88 return (bb
->loop_father
== loop
->inner
);
91 /* Record the cost of a statement, either by directly informing the
92 target model or by saving it in a vector for later processing.
93 Return a preliminary estimate of the statement's cost. */
96 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
97 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
98 int misalign
, enum vect_cost_model_location where
)
100 if ((kind
== vector_load
|| kind
== unaligned_load
)
101 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
102 kind
= vector_gather_load
;
103 if ((kind
== vector_store
|| kind
== unaligned_store
)
104 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
105 kind
= vector_scatter_store
;
108 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
109 stmt_info_for_cost si
= { count
, kind
,
110 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
112 body_cost_vec
->safe_push (si
);
114 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
117 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
118 count
, kind
, stmt_info
, misalign
, where
);
121 /* Return a variable of type ELEM_TYPE[NELEMS]. */
124 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
126 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
130 /* ARRAY is an array of vectors created by create_vector_array.
131 Return an SSA_NAME for the vector in index N. The reference
132 is part of the vectorization of STMT and the vector is associated
133 with scalar destination SCALAR_DEST. */
136 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
137 tree array
, unsigned HOST_WIDE_INT n
)
139 tree vect_type
, vect
, vect_name
, array_ref
;
142 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
143 vect_type
= TREE_TYPE (TREE_TYPE (array
));
144 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
145 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
146 build_int_cst (size_type_node
, n
),
147 NULL_TREE
, NULL_TREE
);
149 new_stmt
= gimple_build_assign (vect
, array_ref
);
150 vect_name
= make_ssa_name (vect
, new_stmt
);
151 gimple_assign_set_lhs (new_stmt
, vect_name
);
152 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
157 /* ARRAY is an array of vectors created by create_vector_array.
158 Emit code to store SSA_NAME VECT in index N of the array.
159 The store is part of the vectorization of STMT. */
162 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
163 tree array
, unsigned HOST_WIDE_INT n
)
168 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
169 build_int_cst (size_type_node
, n
),
170 NULL_TREE
, NULL_TREE
);
172 new_stmt
= gimple_build_assign (array_ref
, vect
);
173 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
176 /* PTR is a pointer to an array of type TYPE. Return a representation
177 of *PTR. The memory reference replaces those in FIRST_DR
181 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
185 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
186 /* Arrays have the same alignment as their type. */
187 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
198 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
199 enum vect_relevant relevant
, bool live_p
)
201 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
202 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
203 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
204 gimple
*pattern_stmt
;
206 if (dump_enabled_p ())
208 dump_printf_loc (MSG_NOTE
, vect_location
,
209 "mark relevant %d, live %d: ", relevant
, live_p
);
210 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
213 /* If this stmt is an original stmt in a pattern, we might need to mark its
214 related pattern stmt instead of the original stmt. However, such stmts
215 may have their own uses that are not in any pattern, in such cases the
216 stmt itself should be marked. */
217 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
219 /* This is the last stmt in a sequence that was detected as a
220 pattern that can potentially be vectorized. Don't mark the stmt
221 as relevant/live because it's not going to be vectorized.
222 Instead mark the pattern-stmt that replaces it. */
224 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE
, vect_location
,
228 "last stmt in pattern. don't mark"
229 " relevant/live.\n");
230 stmt_info
= vinfo_for_stmt (pattern_stmt
);
231 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
232 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
233 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
237 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
238 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
239 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
241 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
242 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
244 if (dump_enabled_p ())
245 dump_printf_loc (MSG_NOTE
, vect_location
,
246 "already marked relevant/live.\n");
250 worklist
->safe_push (stmt
);
254 /* Function is_simple_and_all_uses_invariant
256 Return true if STMT is simple and all uses of it are invariant. */
259 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
265 if (!is_gimple_assign (stmt
))
268 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
270 enum vect_def_type dt
= vect_uninitialized_def
;
272 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
274 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
276 "use not simple.\n");
280 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
286 /* Function vect_stmt_relevant_p.
288 Return true if STMT in loop that is represented by LOOP_VINFO is
289 "relevant for vectorization".
291 A stmt is considered "relevant for vectorization" if:
292 - it has uses outside the loop.
293 - it has vdefs (it alters memory).
294 - control stmts in the loop (except for the exit condition).
296 CHECKME: what other side effects would the vectorizer allow? */
299 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
300 enum vect_relevant
*relevant
, bool *live_p
)
302 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
304 imm_use_iterator imm_iter
;
308 *relevant
= vect_unused_in_scope
;
311 /* cond stmt other than loop exit cond. */
312 if (is_ctrl_stmt (stmt
)
313 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
314 != loop_exit_ctrl_vec_info_type
)
315 *relevant
= vect_used_in_scope
;
317 /* changing memory. */
318 if (gimple_code (stmt
) != GIMPLE_PHI
)
319 if (gimple_vdef (stmt
)
320 && !gimple_clobber_p (stmt
))
322 if (dump_enabled_p ())
323 dump_printf_loc (MSG_NOTE
, vect_location
,
324 "vec_stmt_relevant_p: stmt has vdefs.\n");
325 *relevant
= vect_used_in_scope
;
328 /* uses outside the loop. */
329 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
331 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
333 basic_block bb
= gimple_bb (USE_STMT (use_p
));
334 if (!flow_bb_inside_loop_p (loop
, bb
))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE
, vect_location
,
338 "vec_stmt_relevant_p: used out of loop.\n");
340 if (is_gimple_debug (USE_STMT (use_p
)))
343 /* We expect all such uses to be in the loop exit phis
344 (because of loop closed form) */
345 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
346 gcc_assert (bb
== single_exit (loop
)->dest
);
353 if (*live_p
&& *relevant
== vect_unused_in_scope
354 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE
, vect_location
,
358 "vec_stmt_relevant_p: stmt live but not relevant.\n");
359 *relevant
= vect_used_only_live
;
362 return (*live_p
|| *relevant
);
366 /* Function exist_non_indexing_operands_for_use_p
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
372 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
375 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info
))
383 /* STMT has a data_ref. FORNOW this means that its of one of
387 (This should have been verified in analyze_data_refs).
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
396 if (!gimple_assign_copy_p (stmt
))
398 if (is_gimple_call (stmt
)
399 && gimple_call_internal_p (stmt
))
400 switch (gimple_call_internal_fn (stmt
))
403 operand
= gimple_call_arg (stmt
, 3);
408 operand
= gimple_call_arg (stmt
, 2);
418 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
420 operand
= gimple_assign_rhs1 (stmt
);
421 if (TREE_CODE (operand
) != SSA_NAME
)
432 Function process_use.
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
459 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
460 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
463 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
464 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
465 stmt_vec_info dstmt_vinfo
;
466 basic_block bb
, def_bb
;
468 enum vect_def_type dt
;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
475 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
479 "not vectorized: unsupported use in stmt.\n");
483 if (!def_stmt
|| gimple_nop_p (def_stmt
))
486 def_bb
= gimple_bb (def_stmt
);
487 if (!flow_bb_inside_loop_p (loop
, def_bb
))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
500 bb
= gimple_bb (stmt
);
501 if (gimple_code (stmt
) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
503 && gimple_code (def_stmt
) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
505 && bb
->loop_father
== def_bb
->loop_father
)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE
, vect_location
,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
511 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
525 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE
, vect_location
,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
533 case vect_unused_in_scope
:
534 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
535 vect_used_in_scope
: vect_unused_in_scope
;
538 case vect_used_in_outer_by_reduction
:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
540 relevant
= vect_used_by_reduction
;
543 case vect_used_in_outer
:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
545 relevant
= vect_used_in_scope
;
548 case vect_used_in_scope
:
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
563 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE
, vect_location
,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
571 case vect_unused_in_scope
:
572 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
574 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
577 case vect_used_by_reduction
:
578 case vect_used_only_live
:
579 relevant
= vect_used_in_outer_by_reduction
;
582 case vect_used_in_scope
:
583 relevant
= vect_used_in_outer
;
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
594 else if (gimple_code (stmt
) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE
, vect_location
,
602 "induction value on backedge.\n");
607 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
612 /* Function vect_mark_stmts_to_be_vectorized.
614 Not all stmts in the loop need to be vectorized. For example:
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
626 This pass detects such stmts. */
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
631 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
632 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
633 unsigned int nbbs
= loop
->num_nodes
;
634 gimple_stmt_iterator si
;
637 stmt_vec_info stmt_vinfo
;
641 enum vect_relevant relevant
;
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE
, vect_location
,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
647 auto_vec
<gimple
*, 64> worklist
;
649 /* 1. Init worklist. */
650 for (i
= 0; i
< nbbs
; i
++)
653 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
662 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
663 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
665 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
667 stmt
= gsi_stmt (si
);
668 if (dump_enabled_p ())
670 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
671 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
674 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
675 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
679 /* 2. Process_worklist */
680 while (worklist
.length () > 0)
685 stmt
= worklist
.pop ();
686 if (dump_enabled_p ())
688 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
689 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
692 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
693 (DEF_STMT) as relevant/irrelevant according to the relevance property
695 stmt_vinfo
= vinfo_for_stmt (stmt
);
696 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
698 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
699 propagated as is to the DEF_STMTs of its USEs.
701 One exception is when STMT has been identified as defining a reduction
702 variable; in this case we set the relevance to vect_used_by_reduction.
703 This is because we distinguish between two kinds of relevant stmts -
704 those that are used by a reduction computation, and those that are
705 (also) used by a regular computation. This allows us later on to
706 identify stmts that are used solely by a reduction, and therefore the
707 order of the results that they produce does not have to be kept. */
709 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
711 case vect_reduction_def
:
712 gcc_assert (relevant
!= vect_unused_in_scope
);
713 if (relevant
!= vect_unused_in_scope
714 && relevant
!= vect_used_in_scope
715 && relevant
!= vect_used_by_reduction
716 && relevant
!= vect_used_only_live
)
718 if (dump_enabled_p ())
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
720 "unsupported use of reduction.\n");
725 case vect_nested_cycle
:
726 if (relevant
!= vect_unused_in_scope
727 && relevant
!= vect_used_in_outer_by_reduction
728 && relevant
!= vect_used_in_outer
)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
732 "unsupported use of nested cycle.\n");
738 case vect_double_reduction_def
:
739 if (relevant
!= vect_unused_in_scope
740 && relevant
!= vect_used_by_reduction
741 && relevant
!= vect_used_only_live
)
743 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
745 "unsupported use of double reduction.\n");
755 if (is_pattern_stmt_p (stmt_vinfo
))
757 /* Pattern statements are not inserted into the code, so
758 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
759 have to scan the RHS or function arguments instead. */
760 if (is_gimple_assign (stmt
))
762 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
763 tree op
= gimple_assign_rhs1 (stmt
);
766 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
768 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
769 relevant
, &worklist
, false)
770 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
771 relevant
, &worklist
, false))
775 for (; i
< gimple_num_ops (stmt
); i
++)
777 op
= gimple_op (stmt
, i
);
778 if (TREE_CODE (op
) == SSA_NAME
779 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
784 else if (is_gimple_call (stmt
))
786 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
788 tree arg
= gimple_call_arg (stmt
, i
);
789 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
796 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
798 tree op
= USE_FROM_PTR (use_p
);
799 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
806 gather_scatter_info gs_info
;
807 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
809 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
813 } /* while worklist */
819 /* Function vect_model_simple_cost.
821 Models cost for simple operations, i.e. those that only emit ncopies of a
822 single op. Right now, this does not account for multiple insns that could
823 be generated for the single vector op. We will handle that shortly. */
826 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
827 enum vect_def_type
*dt
,
829 stmt_vector_for_cost
*prologue_cost_vec
,
830 stmt_vector_for_cost
*body_cost_vec
)
833 int inside_cost
= 0, prologue_cost
= 0;
835 /* The SLP costs were already calculated during SLP tree build. */
836 if (PURE_SLP_STMT (stmt_info
))
839 /* Cost the "broadcast" of a scalar operand in to a vector operand.
840 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
842 for (i
= 0; i
< ndts
; i
++)
843 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
844 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
845 stmt_info
, 0, vect_prologue
);
847 /* Pass the inside-of-loop statements to the target-specific cost model. */
848 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
849 stmt_info
, 0, vect_body
);
851 if (dump_enabled_p ())
852 dump_printf_loc (MSG_NOTE
, vect_location
,
853 "vect_model_simple_cost: inside_cost = %d, "
854 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
858 /* Model cost for type demotion and promotion operations. PWR is normally
859 zero for single-step promotions and demotions. It will be one if
860 two-step promotion/demotion is required, and so on. Each additional
861 step doubles the number of instructions required. */
864 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
865 enum vect_def_type
*dt
, int pwr
)
868 int inside_cost
= 0, prologue_cost
= 0;
869 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
870 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
871 void *target_cost_data
;
873 /* The SLP costs were already calculated during SLP tree build. */
874 if (PURE_SLP_STMT (stmt_info
))
878 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
880 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
882 for (i
= 0; i
< pwr
+ 1; i
++)
884 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
886 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
887 vec_promote_demote
, stmt_info
, 0,
891 /* FORNOW: Assuming maximum 2 args per stmts. */
892 for (i
= 0; i
< 2; i
++)
893 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
894 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
895 stmt_info
, 0, vect_prologue
);
897 if (dump_enabled_p ())
898 dump_printf_loc (MSG_NOTE
, vect_location
,
899 "vect_model_promotion_demotion_cost: inside_cost = %d, "
900 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
909 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
910 vect_memory_access_type memory_access_type
,
911 enum vect_def_type dt
, slp_tree slp_node
,
912 stmt_vector_for_cost
*prologue_cost_vec
,
913 stmt_vector_for_cost
*body_cost_vec
)
915 unsigned int inside_cost
= 0, prologue_cost
= 0;
916 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
917 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
918 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
920 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
921 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
922 stmt_info
, 0, vect_prologue
);
924 /* Grouped stores update all elements in the group at once,
925 so we want the DR for the first statement. */
926 if (!slp_node
&& grouped_access_p
)
928 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
929 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
932 /* True if we should include any once-per-group costs as well as
933 the cost of the statement itself. For SLP we only get called
934 once per group anyhow. */
935 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
937 /* We assume that the cost of a single store-lanes instruction is
938 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
939 access is instead being provided by a permute-and-store operation,
940 include the cost of the permutes. */
942 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
944 /* Uses a high and low interleave or shuffle operations for each
946 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
947 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
948 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
949 stmt_info
, 0, vect_body
);
951 if (dump_enabled_p ())
952 dump_printf_loc (MSG_NOTE
, vect_location
,
953 "vect_model_store_cost: strided group_size = %d .\n",
957 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
958 /* Costs of the stores. */
959 if (memory_access_type
== VMAT_ELEMENTWISE
960 || memory_access_type
== VMAT_GATHER_SCATTER
)
962 /* N scalar stores plus extracting the elements. */
963 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
964 inside_cost
+= record_stmt_cost (body_cost_vec
,
965 ncopies
* assumed_nunits
,
966 scalar_store
, stmt_info
, 0, vect_body
);
969 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
971 if (memory_access_type
== VMAT_ELEMENTWISE
972 || memory_access_type
== VMAT_STRIDED_SLP
)
974 /* N scalar stores plus extracting the elements. */
975 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
976 inside_cost
+= record_stmt_cost (body_cost_vec
,
977 ncopies
* assumed_nunits
,
978 vec_to_scalar
, stmt_info
, 0, vect_body
);
981 if (dump_enabled_p ())
982 dump_printf_loc (MSG_NOTE
, vect_location
,
983 "vect_model_store_cost: inside_cost = %d, "
984 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
988 /* Calculate cost of DR's memory access. */
990 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
991 unsigned int *inside_cost
,
992 stmt_vector_for_cost
*body_cost_vec
)
994 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
995 gimple
*stmt
= DR_STMT (dr
);
996 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
998 switch (alignment_support_scheme
)
1002 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1003 vector_store
, stmt_info
, 0,
1006 if (dump_enabled_p ())
1007 dump_printf_loc (MSG_NOTE
, vect_location
,
1008 "vect_model_store_cost: aligned.\n");
1012 case dr_unaligned_supported
:
1014 /* Here, we assign an additional cost for the unaligned store. */
1015 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1016 unaligned_store
, stmt_info
,
1017 DR_MISALIGNMENT (dr
), vect_body
);
1018 if (dump_enabled_p ())
1019 dump_printf_loc (MSG_NOTE
, vect_location
,
1020 "vect_model_store_cost: unaligned supported by "
1025 case dr_unaligned_unsupported
:
1027 *inside_cost
= VECT_MAX_COST
;
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1031 "vect_model_store_cost: unsupported access.\n");
1041 /* Function vect_model_load_cost
1043 Models cost for loads. In the case of grouped accesses, one access has
1044 the overhead of the grouped access attributed to it. Since unaligned
1045 accesses are supported for loads, we also account for the costs of the
1046 access scheme chosen. */
1049 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1050 vect_memory_access_type memory_access_type
,
1052 stmt_vector_for_cost
*prologue_cost_vec
,
1053 stmt_vector_for_cost
*body_cost_vec
)
1055 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1056 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1057 unsigned int inside_cost
= 0, prologue_cost
= 0;
1058 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1060 /* Grouped loads read all elements in the group at once,
1061 so we want the DR for the first statement. */
1062 if (!slp_node
&& grouped_access_p
)
1064 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1065 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1068 /* True if we should include any once-per-group costs as well as
1069 the cost of the statement itself. For SLP we only get called
1070 once per group anyhow. */
1071 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1073 /* We assume that the cost of a single load-lanes instruction is
1074 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1075 access is instead being provided by a load-and-permute operation,
1076 include the cost of the permutes. */
1078 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1080 /* Uses an even and odd extract operations or shuffle operations
1081 for each needed permute. */
1082 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1083 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1084 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1085 stmt_info
, 0, vect_body
);
1087 if (dump_enabled_p ())
1088 dump_printf_loc (MSG_NOTE
, vect_location
,
1089 "vect_model_load_cost: strided group_size = %d .\n",
1093 /* The loads themselves. */
1094 if (memory_access_type
== VMAT_ELEMENTWISE
1095 || memory_access_type
== VMAT_GATHER_SCATTER
)
1097 /* N scalar loads plus gathering them into a vector. */
1098 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1099 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1100 inside_cost
+= record_stmt_cost (body_cost_vec
,
1101 ncopies
* assumed_nunits
,
1102 scalar_load
, stmt_info
, 0, vect_body
);
1105 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1106 &inside_cost
, &prologue_cost
,
1107 prologue_cost_vec
, body_cost_vec
, true);
1108 if (memory_access_type
== VMAT_ELEMENTWISE
1109 || memory_access_type
== VMAT_STRIDED_SLP
)
1110 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1111 stmt_info
, 0, vect_body
);
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE
, vect_location
,
1115 "vect_model_load_cost: inside_cost = %d, "
1116 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1120 /* Calculate cost of DR's memory access. */
1122 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1123 bool add_realign_cost
, unsigned int *inside_cost
,
1124 unsigned int *prologue_cost
,
1125 stmt_vector_for_cost
*prologue_cost_vec
,
1126 stmt_vector_for_cost
*body_cost_vec
,
1127 bool record_prologue_costs
)
1129 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1130 gimple
*stmt
= DR_STMT (dr
);
1131 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1133 switch (alignment_support_scheme
)
1137 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1138 stmt_info
, 0, vect_body
);
1140 if (dump_enabled_p ())
1141 dump_printf_loc (MSG_NOTE
, vect_location
,
1142 "vect_model_load_cost: aligned.\n");
1146 case dr_unaligned_supported
:
1148 /* Here, we assign an additional cost for the unaligned load. */
1149 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1150 unaligned_load
, stmt_info
,
1151 DR_MISALIGNMENT (dr
), vect_body
);
1153 if (dump_enabled_p ())
1154 dump_printf_loc (MSG_NOTE
, vect_location
,
1155 "vect_model_load_cost: unaligned supported by "
1160 case dr_explicit_realign
:
1162 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1163 vector_load
, stmt_info
, 0, vect_body
);
1164 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1165 vec_perm
, stmt_info
, 0, vect_body
);
1167 /* FIXME: If the misalignment remains fixed across the iterations of
1168 the containing loop, the following cost should be added to the
1170 if (targetm
.vectorize
.builtin_mask_for_load
)
1171 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1172 stmt_info
, 0, vect_body
);
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE
, vect_location
,
1176 "vect_model_load_cost: explicit realign\n");
1180 case dr_explicit_realign_optimized
:
1182 if (dump_enabled_p ())
1183 dump_printf_loc (MSG_NOTE
, vect_location
,
1184 "vect_model_load_cost: unaligned software "
1187 /* Unaligned software pipeline has a load of an address, an initial
1188 load, and possibly a mask operation to "prime" the loop. However,
1189 if this is an access in a group of loads, which provide grouped
1190 access, then the above cost should only be considered for one
1191 access in the group. Inside the loop, there is a load op
1192 and a realignment op. */
1194 if (add_realign_cost
&& record_prologue_costs
)
1196 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1197 vector_stmt
, stmt_info
,
1199 if (targetm
.vectorize
.builtin_mask_for_load
)
1200 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1201 vector_stmt
, stmt_info
,
1205 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1206 stmt_info
, 0, vect_body
);
1207 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1208 stmt_info
, 0, vect_body
);
1210 if (dump_enabled_p ())
1211 dump_printf_loc (MSG_NOTE
, vect_location
,
1212 "vect_model_load_cost: explicit realign optimized"
1218 case dr_unaligned_unsupported
:
1220 *inside_cost
= VECT_MAX_COST
;
1222 if (dump_enabled_p ())
1223 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1224 "vect_model_load_cost: unsupported access.\n");
1233 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1234 the loop preheader for the vectorized stmt STMT. */
1237 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1240 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1243 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1244 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1248 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1252 if (nested_in_vect_loop_p (loop
, stmt
))
1255 pe
= loop_preheader_edge (loop
);
1256 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1257 gcc_assert (!new_bb
);
1261 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1263 gimple_stmt_iterator gsi_bb_start
;
1265 gcc_assert (bb_vinfo
);
1266 bb
= BB_VINFO_BB (bb_vinfo
);
1267 gsi_bb_start
= gsi_after_labels (bb
);
1268 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1272 if (dump_enabled_p ())
1274 dump_printf_loc (MSG_NOTE
, vect_location
,
1275 "created new init_stmt: ");
1276 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1280 /* Function vect_init_vector.
1282 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1283 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1284 vector type a vector with all elements equal to VAL is created first.
1285 Place the initialization at BSI if it is not NULL. Otherwise, place the
1286 initialization at the loop preheader.
1287 Return the DEF of INIT_STMT.
1288 It will be used in the vectorization of STMT. */
1291 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1296 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1297 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1299 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1300 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1302 /* Scalar boolean value should be transformed into
1303 all zeros or all ones value before building a vector. */
1304 if (VECTOR_BOOLEAN_TYPE_P (type
))
1306 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1307 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1309 if (CONSTANT_CLASS_P (val
))
1310 val
= integer_zerop (val
) ? false_val
: true_val
;
1313 new_temp
= make_ssa_name (TREE_TYPE (type
));
1314 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1315 val
, true_val
, false_val
);
1316 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1320 else if (CONSTANT_CLASS_P (val
))
1321 val
= fold_convert (TREE_TYPE (type
), val
);
1324 new_temp
= make_ssa_name (TREE_TYPE (type
));
1325 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1326 init_stmt
= gimple_build_assign (new_temp
,
1327 fold_build1 (VIEW_CONVERT_EXPR
,
1331 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1332 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1336 val
= build_vector_from_val (type
, val
);
1339 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1340 init_stmt
= gimple_build_assign (new_temp
, val
);
1341 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1345 /* Function vect_get_vec_def_for_operand_1.
1347 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1348 DT that will be used in the vectorized stmt. */
1351 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1355 stmt_vec_info def_stmt_info
= NULL
;
1359 /* operand is a constant or a loop invariant. */
1360 case vect_constant_def
:
1361 case vect_external_def
:
1362 /* Code should use vect_get_vec_def_for_operand. */
1365 /* operand is defined inside the loop. */
1366 case vect_internal_def
:
1368 /* Get the def from the vectorized stmt. */
1369 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1371 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1372 /* Get vectorized pattern statement. */
1374 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1375 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1376 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1377 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1378 gcc_assert (vec_stmt
);
1379 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1380 vec_oprnd
= PHI_RESULT (vec_stmt
);
1381 else if (is_gimple_call (vec_stmt
))
1382 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1384 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1388 /* operand is defined by a loop header phi. */
1389 case vect_reduction_def
:
1390 case vect_double_reduction_def
:
1391 case vect_nested_cycle
:
1392 case vect_induction_def
:
1394 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1396 /* Get the def from the vectorized stmt. */
1397 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1398 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1399 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1400 vec_oprnd
= PHI_RESULT (vec_stmt
);
1402 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1412 /* Function vect_get_vec_def_for_operand.
1414 OP is an operand in STMT. This function returns a (vector) def that will be
1415 used in the vectorized stmt for STMT.
1417 In the case that OP is an SSA_NAME which is defined in the loop, then
1418 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1420 In case OP is an invariant or constant, a new stmt that creates a vector def
1421 needs to be introduced. VECTYPE may be used to specify a required type for
1422 vector invariant. */
1425 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1428 enum vect_def_type dt
;
1430 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1431 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1433 if (dump_enabled_p ())
1435 dump_printf_loc (MSG_NOTE
, vect_location
,
1436 "vect_get_vec_def_for_operand: ");
1437 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1438 dump_printf (MSG_NOTE
, "\n");
1441 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1442 gcc_assert (is_simple_use
);
1443 if (def_stmt
&& dump_enabled_p ())
1445 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1446 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1449 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1451 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1455 vector_type
= vectype
;
1456 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1457 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1458 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1460 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1462 gcc_assert (vector_type
);
1463 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1466 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1470 /* Function vect_get_vec_def_for_stmt_copy
1472 Return a vector-def for an operand. This function is used when the
1473 vectorized stmt to be created (by the caller to this function) is a "copy"
1474 created in case the vectorized result cannot fit in one vector, and several
1475 copies of the vector-stmt are required. In this case the vector-def is
1476 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1477 of the stmt that defines VEC_OPRND.
1478 DT is the type of the vector def VEC_OPRND.
1481 In case the vectorization factor (VF) is bigger than the number
1482 of elements that can fit in a vectype (nunits), we have to generate
1483 more than one vector stmt to vectorize the scalar stmt. This situation
1484 arises when there are multiple data-types operated upon in the loop; the
1485 smallest data-type determines the VF, and as a result, when vectorizing
1486 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1487 vector stmt (each computing a vector of 'nunits' results, and together
1488 computing 'VF' results in each iteration). This function is called when
1489 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1490 which VF=16 and nunits=4, so the number of copies required is 4):
1492 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1494 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1495 VS1.1: vx.1 = memref1 VS1.2
1496 VS1.2: vx.2 = memref2 VS1.3
1497 VS1.3: vx.3 = memref3
1499 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1500 VSnew.1: vz1 = vx.1 + ... VSnew.2
1501 VSnew.2: vz2 = vx.2 + ... VSnew.3
1502 VSnew.3: vz3 = vx.3 + ...
1504 The vectorization of S1 is explained in vectorizable_load.
1505 The vectorization of S2:
1506 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1507 the function 'vect_get_vec_def_for_operand' is called to
1508 get the relevant vector-def for each operand of S2. For operand x it
1509 returns the vector-def 'vx.0'.
1511 To create the remaining copies of the vector-stmt (VSnew.j), this
1512 function is called to get the relevant vector-def for each operand. It is
1513 obtained from the respective VS1.j stmt, which is recorded in the
1514 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1516 For example, to obtain the vector-def 'vx.1' in order to create the
1517 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1518 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1519 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1520 and return its def ('vx.1').
1521 Overall, to create the above sequence this function will be called 3 times:
1522 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1523 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1524 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1527 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1529 gimple
*vec_stmt_for_operand
;
1530 stmt_vec_info def_stmt_info
;
1532 /* Do nothing; can reuse same def. */
1533 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1536 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1537 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1538 gcc_assert (def_stmt_info
);
1539 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1540 gcc_assert (vec_stmt_for_operand
);
1541 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1542 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1544 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1549 /* Get vectorized definitions for the operands to create a copy of an original
1550 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1553 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1554 vec
<tree
> *vec_oprnds0
,
1555 vec
<tree
> *vec_oprnds1
)
1557 tree vec_oprnd
= vec_oprnds0
->pop ();
1559 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1560 vec_oprnds0
->quick_push (vec_oprnd
);
1562 if (vec_oprnds1
&& vec_oprnds1
->length ())
1564 vec_oprnd
= vec_oprnds1
->pop ();
1565 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1566 vec_oprnds1
->quick_push (vec_oprnd
);
1571 /* Get vectorized definitions for OP0 and OP1. */
1574 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1575 vec
<tree
> *vec_oprnds0
,
1576 vec
<tree
> *vec_oprnds1
,
1581 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1582 auto_vec
<tree
> ops (nops
);
1583 auto_vec
<vec
<tree
> > vec_defs (nops
);
1585 ops
.quick_push (op0
);
1587 ops
.quick_push (op1
);
1589 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1591 *vec_oprnds0
= vec_defs
[0];
1593 *vec_oprnds1
= vec_defs
[1];
1599 vec_oprnds0
->create (1);
1600 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1601 vec_oprnds0
->quick_push (vec_oprnd
);
1605 vec_oprnds1
->create (1);
1606 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1607 vec_oprnds1
->quick_push (vec_oprnd
);
1613 /* Function vect_finish_stmt_generation.
1615 Insert a new stmt. */
1618 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1619 gimple_stmt_iterator
*gsi
)
1621 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1622 vec_info
*vinfo
= stmt_info
->vinfo
;
1624 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1626 if (!gsi_end_p (*gsi
)
1627 && gimple_has_mem_ops (vec_stmt
))
1629 gimple
*at_stmt
= gsi_stmt (*gsi
);
1630 tree vuse
= gimple_vuse (at_stmt
);
1631 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1633 tree vdef
= gimple_vdef (at_stmt
);
1634 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1635 /* If we have an SSA vuse and insert a store, update virtual
1636 SSA form to avoid triggering the renamer. Do so only
1637 if we can easily see all uses - which is what almost always
1638 happens with the way vectorized stmts are inserted. */
1639 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1640 && ((is_gimple_assign (vec_stmt
)
1641 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1642 || (is_gimple_call (vec_stmt
)
1643 && !(gimple_call_flags (vec_stmt
)
1644 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1646 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1647 gimple_set_vdef (vec_stmt
, new_vdef
);
1648 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1652 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1654 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1656 if (dump_enabled_p ())
1658 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1659 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1662 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1664 /* While EH edges will generally prevent vectorization, stmt might
1665 e.g. be in a must-not-throw region. Ensure newly created stmts
1666 that could throw are part of the same region. */
1667 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1668 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1669 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1672 /* We want to vectorize a call to combined function CFN with function
1673 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1674 as the types of all inputs. Check whether this is possible using
1675 an internal function, returning its code if so or IFN_LAST if not. */
1678 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1679 tree vectype_out
, tree vectype_in
)
1682 if (internal_fn_p (cfn
))
1683 ifn
= as_internal_fn (cfn
);
1685 ifn
= associated_internal_fn (fndecl
);
1686 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1688 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1689 if (info
.vectorizable
)
1691 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1692 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1693 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1694 OPTIMIZE_FOR_SPEED
))
1702 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1703 gimple_stmt_iterator
*);
1705 /* STMT is a non-strided load or store, meaning that it accesses
1706 elements with a known constant step. Return -1 if that step
1707 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1710 compare_step_with_zero (gimple
*stmt
)
1712 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1713 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1714 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
1718 /* If the target supports a permute mask that reverses the elements in
1719 a vector of type VECTYPE, return that mask, otherwise return null. */
1722 perm_mask_for_reverse (tree vectype
)
1724 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1726 /* The encoding has a single stepped pattern. */
1727 vec_perm_builder
sel (nunits
, 1, 3);
1728 for (int i
= 0; i
< 3; ++i
)
1729 sel
.quick_push (nunits
- 1 - i
);
1731 vec_perm_indices
indices (sel
, 1, nunits
);
1732 if (!can_vec_perm_const_p (TYPE_MODE (vectype
), indices
))
1734 return vect_gen_perm_mask_checked (vectype
, indices
);
1737 /* A subroutine of get_load_store_type, with a subset of the same
1738 arguments. Handle the case where STMT is part of a grouped load
1741 For stores, the statements in the group are all consecutive
1742 and there is no gap at the end. For loads, the statements in the
1743 group might not be consecutive; there can be gaps between statements
1744 as well as at the end. */
1747 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1748 vec_load_store_type vls_type
,
1749 vect_memory_access_type
*memory_access_type
)
1751 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1752 vec_info
*vinfo
= stmt_info
->vinfo
;
1753 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1754 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
1755 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1756 data_reference
*first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1757 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1758 bool single_element_p
= (stmt
== first_stmt
1759 && !GROUP_NEXT_ELEMENT (stmt_info
));
1760 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
1761 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1763 /* True if the vectorized statements would access beyond the last
1764 statement in the group. */
1765 bool overrun_p
= false;
1767 /* True if we can cope with such overrun by peeling for gaps, so that
1768 there is at least one final scalar iteration after the vector loop. */
1769 bool can_overrun_p
= (vls_type
== VLS_LOAD
&& loop_vinfo
&& !loop
->inner
);
1771 /* There can only be a gap at the end of the group if the stride is
1772 known at compile time. */
1773 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
1775 /* Stores can't yet have gaps. */
1776 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
1780 if (STMT_VINFO_STRIDED_P (stmt_info
))
1782 /* Try to use consecutive accesses of GROUP_SIZE elements,
1783 separated by the stride, until we have a complete vector.
1784 Fall back to scalar accesses if that isn't possible. */
1785 if (multiple_p (nunits
, group_size
))
1786 *memory_access_type
= VMAT_STRIDED_SLP
;
1788 *memory_access_type
= VMAT_ELEMENTWISE
;
1792 overrun_p
= loop_vinfo
&& gap
!= 0;
1793 if (overrun_p
&& vls_type
!= VLS_LOAD
)
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1796 "Grouped store with gaps requires"
1797 " non-consecutive accesses\n");
1800 /* An overrun is fine if the trailing elements are smaller
1801 than the alignment boundary B. Every vector access will
1802 be a multiple of B and so we are guaranteed to access a
1803 non-gap element in the same B-sized block. */
1805 && gap
< (vect_known_alignment_in_bytes (first_dr
)
1806 / vect_get_scalar_dr_size (first_dr
)))
1808 if (overrun_p
&& !can_overrun_p
)
1810 if (dump_enabled_p ())
1811 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1812 "Peeling for outer loop is not supported\n");
1815 *memory_access_type
= VMAT_CONTIGUOUS
;
1820 /* We can always handle this case using elementwise accesses,
1821 but see if something more efficient is available. */
1822 *memory_access_type
= VMAT_ELEMENTWISE
;
1824 /* If there is a gap at the end of the group then these optimizations
1825 would access excess elements in the last iteration. */
1826 bool would_overrun_p
= (gap
!= 0);
1827 /* An overrun is fine if the trailing elements are smaller than the
1828 alignment boundary B. Every vector access will be a multiple of B
1829 and so we are guaranteed to access a non-gap element in the
1830 same B-sized block. */
1832 && gap
< (vect_known_alignment_in_bytes (first_dr
)
1833 / vect_get_scalar_dr_size (first_dr
)))
1834 would_overrun_p
= false;
1836 if (!STMT_VINFO_STRIDED_P (stmt_info
)
1837 && (can_overrun_p
|| !would_overrun_p
)
1838 && compare_step_with_zero (stmt
) > 0)
1840 /* First try using LOAD/STORE_LANES. */
1841 if (vls_type
== VLS_LOAD
1842 ? vect_load_lanes_supported (vectype
, group_size
)
1843 : vect_store_lanes_supported (vectype
, group_size
))
1845 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
1846 overrun_p
= would_overrun_p
;
1849 /* If that fails, try using permuting loads. */
1850 if (*memory_access_type
== VMAT_ELEMENTWISE
1851 && (vls_type
== VLS_LOAD
1852 ? vect_grouped_load_supported (vectype
, single_element_p
,
1854 : vect_grouped_store_supported (vectype
, group_size
)))
1856 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
1857 overrun_p
= would_overrun_p
;
1862 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
1864 /* STMT is the leader of the group. Check the operands of all the
1865 stmts of the group. */
1866 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
1869 gcc_assert (gimple_assign_single_p (next_stmt
));
1870 tree op
= gimple_assign_rhs1 (next_stmt
);
1872 enum vect_def_type dt
;
1873 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
1875 if (dump_enabled_p ())
1876 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1877 "use not simple.\n");
1880 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
1886 gcc_assert (can_overrun_p
);
1887 if (dump_enabled_p ())
1888 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1889 "Data access with gaps requires scalar "
1891 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
1897 /* A subroutine of get_load_store_type, with a subset of the same
1898 arguments. Handle the case where STMT is a load or store that
1899 accesses consecutive elements with a negative step. */
1901 static vect_memory_access_type
1902 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
1903 vec_load_store_type vls_type
,
1904 unsigned int ncopies
)
1906 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1907 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1908 dr_alignment_support alignment_support_scheme
;
1912 if (dump_enabled_p ())
1913 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1914 "multiple types with negative step.\n");
1915 return VMAT_ELEMENTWISE
;
1918 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1919 if (alignment_support_scheme
!= dr_aligned
1920 && alignment_support_scheme
!= dr_unaligned_supported
)
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1924 "negative step but alignment required.\n");
1925 return VMAT_ELEMENTWISE
;
1928 if (vls_type
== VLS_STORE_INVARIANT
)
1930 if (dump_enabled_p ())
1931 dump_printf_loc (MSG_NOTE
, vect_location
,
1932 "negative step with invariant source;"
1933 " no permute needed.\n");
1934 return VMAT_CONTIGUOUS_DOWN
;
1937 if (!perm_mask_for_reverse (vectype
))
1939 if (dump_enabled_p ())
1940 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1941 "negative step and reversing not supported.\n");
1942 return VMAT_ELEMENTWISE
;
1945 return VMAT_CONTIGUOUS_REVERSE
;
1948 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1949 if there is a memory access type that the vectorized form can use,
1950 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1951 or scatters, fill in GS_INFO accordingly.
1953 SLP says whether we're performing SLP rather than loop vectorization.
1954 VECTYPE is the vector type that the vectorized statements will use.
1955 NCOPIES is the number of vector statements that will be needed. */
1958 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1959 vec_load_store_type vls_type
, unsigned int ncopies
,
1960 vect_memory_access_type
*memory_access_type
,
1961 gather_scatter_info
*gs_info
)
1963 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1964 vec_info
*vinfo
= stmt_info
->vinfo
;
1965 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1966 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1967 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1969 *memory_access_type
= VMAT_GATHER_SCATTER
;
1971 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
1973 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
1974 &gs_info
->offset_dt
,
1975 &gs_info
->offset_vectype
))
1977 if (dump_enabled_p ())
1978 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1979 "%s index use not simple.\n",
1980 vls_type
== VLS_LOAD
? "gather" : "scatter");
1984 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1986 if (!get_group_load_store_type (stmt
, vectype
, slp
, vls_type
,
1987 memory_access_type
))
1990 else if (STMT_VINFO_STRIDED_P (stmt_info
))
1993 *memory_access_type
= VMAT_ELEMENTWISE
;
1997 int cmp
= compare_step_with_zero (stmt
);
1999 *memory_access_type
= get_negative_load_store_type
2000 (stmt
, vectype
, vls_type
, ncopies
);
2003 gcc_assert (vls_type
== VLS_LOAD
);
2004 *memory_access_type
= VMAT_INVARIANT
;
2007 *memory_access_type
= VMAT_CONTIGUOUS
;
2010 if ((*memory_access_type
== VMAT_ELEMENTWISE
2011 || *memory_access_type
== VMAT_STRIDED_SLP
)
2012 && !nunits
.is_constant ())
2014 if (dump_enabled_p ())
2015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2016 "Not using elementwise accesses due to variable "
2017 "vectorization factor.\n");
2021 /* FIXME: At the moment the cost model seems to underestimate the
2022 cost of using elementwise accesses. This check preserves the
2023 traditional behavior until that can be fixed. */
2024 if (*memory_access_type
== VMAT_ELEMENTWISE
2025 && !STMT_VINFO_STRIDED_P (stmt_info
))
2027 if (dump_enabled_p ())
2028 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2029 "not falling back to elementwise accesses\n");
2035 /* Function vectorizable_mask_load_store.
2037 Check if STMT performs a conditional load or store that can be vectorized.
2038 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2039 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2040 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2043 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2044 gimple
**vec_stmt
, slp_tree slp_node
)
2046 tree vec_dest
= NULL
;
2047 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2048 stmt_vec_info prev_stmt_info
;
2049 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2050 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2051 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
2052 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2053 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2054 tree rhs_vectype
= NULL_TREE
;
2059 tree dataref_ptr
= NULL_TREE
;
2061 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2065 gather_scatter_info gs_info
;
2066 vec_load_store_type vls_type
;
2069 enum vect_def_type dt
;
2071 if (slp_node
!= NULL
)
2074 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2075 gcc_assert (ncopies
>= 1);
2077 mask
= gimple_call_arg (stmt
, 2);
2079 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2082 /* FORNOW. This restriction should be relaxed. */
2083 if (nested_in_vect_loop
&& ncopies
> 1)
2085 if (dump_enabled_p ())
2086 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2087 "multiple types in nested loop.");
2091 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2094 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2098 if (!STMT_VINFO_DATA_REF (stmt_info
))
2101 elem_type
= TREE_TYPE (vectype
);
2103 if (TREE_CODE (mask
) != SSA_NAME
)
2106 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
2110 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2112 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
)
2113 || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype
),
2114 TYPE_VECTOR_SUBPARTS (vectype
)))
2117 if (gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
2119 tree rhs
= gimple_call_arg (stmt
, 3);
2120 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
2122 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
2123 vls_type
= VLS_STORE_INVARIANT
;
2125 vls_type
= VLS_STORE
;
2128 vls_type
= VLS_LOAD
;
2130 vect_memory_access_type memory_access_type
;
2131 if (!get_load_store_type (stmt
, vectype
, false, vls_type
, ncopies
,
2132 &memory_access_type
, &gs_info
))
2135 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2137 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2139 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
2140 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2142 if (dump_enabled_p ())
2143 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2144 "masked gather with integer mask not supported.");
2148 else if (memory_access_type
!= VMAT_CONTIGUOUS
)
2150 if (dump_enabled_p ())
2151 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2152 "unsupported access type for masked %s.\n",
2153 vls_type
== VLS_LOAD
? "load" : "store");
2156 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2157 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
2158 TYPE_MODE (mask_vectype
),
2159 vls_type
== VLS_LOAD
)
2161 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
2164 if (!vec_stmt
) /* transformation not required. */
2166 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
2167 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2168 if (vls_type
== VLS_LOAD
)
2169 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
2172 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
2173 dt
, NULL
, NULL
, NULL
);
2176 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
2180 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2182 tree vec_oprnd0
= NULL_TREE
, op
;
2183 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2184 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
2185 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
2186 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
2187 tree mask_perm_mask
= NULL_TREE
;
2188 edge pe
= loop_preheader_edge (loop
);
2191 enum { NARROW
, NONE
, WIDEN
} modifier
;
2192 poly_uint64 gather_off_nunits
2193 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
2195 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
2196 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2197 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2198 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2199 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2200 scaletype
= TREE_VALUE (arglist
);
2201 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2202 && types_compatible_p (srctype
, masktype
));
2204 if (known_eq (nunits
, gather_off_nunits
))
2206 else if (known_eq (nunits
* 2, gather_off_nunits
))
2210 /* Currently widening gathers and scatters are only supported for
2211 fixed-length vectors. */
2212 int count
= gather_off_nunits
.to_constant ();
2213 vec_perm_builder
sel (count
, count
, 1);
2214 for (i
= 0; i
< count
; ++i
)
2215 sel
.quick_push (i
| (count
/ 2));
2217 vec_perm_indices
indices (sel
, 1, count
);
2218 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
2221 else if (known_eq (nunits
, gather_off_nunits
* 2))
2225 /* Currently narrowing gathers and scatters are only supported for
2226 fixed-length vectors. */
2227 int count
= nunits
.to_constant ();
2228 vec_perm_builder
sel (count
, count
, 1);
2229 sel
.quick_grow (count
);
2230 for (i
= 0; i
< count
; ++i
)
2231 sel
[i
] = i
< count
/ 2 ? i
: i
+ count
/ 2;
2232 vec_perm_indices
indices (sel
, 2, count
);
2233 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
2236 for (i
= 0; i
< count
; ++i
)
2237 sel
[i
] = i
| (count
/ 2);
2238 indices
.new_vector (sel
, 2, count
);
2239 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, indices
);
2244 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2246 ptr
= fold_convert (ptrtype
, gs_info
.base
);
2247 if (!is_gimple_min_invariant (ptr
))
2249 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2250 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2251 gcc_assert (!new_bb
);
2254 scale
= build_int_cst (scaletype
, gs_info
.scale
);
2256 prev_stmt_info
= NULL
;
2257 for (j
= 0; j
< ncopies
; ++j
)
2259 if (modifier
== WIDEN
&& (j
& 1))
2260 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2261 perm_mask
, stmt
, gsi
);
2264 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
2267 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
2269 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2271 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
2272 TYPE_VECTOR_SUBPARTS (idxtype
)));
2273 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2274 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2276 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2277 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2281 if (mask_perm_mask
&& (j
& 1))
2282 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2283 mask_perm_mask
, stmt
, gsi
);
2287 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2290 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2291 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2295 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2298 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
)),
2299 TYPE_VECTOR_SUBPARTS (masktype
)));
2300 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2301 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2303 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
2304 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2310 = gimple_build_call (gs_info
.decl
, 5, mask_op
, ptr
, op
, mask_op
,
2313 if (!useless_type_conversion_p (vectype
, rettype
))
2315 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
2316 TYPE_VECTOR_SUBPARTS (rettype
)));
2317 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2318 gimple_call_set_lhs (new_stmt
, op
);
2319 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2320 var
= make_ssa_name (vec_dest
);
2321 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2322 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2326 var
= make_ssa_name (vec_dest
, new_stmt
);
2327 gimple_call_set_lhs (new_stmt
, var
);
2330 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2332 if (modifier
== NARROW
)
2339 var
= permute_vec_elements (prev_res
, var
,
2340 perm_mask
, stmt
, gsi
);
2341 new_stmt
= SSA_NAME_DEF_STMT (var
);
2344 if (prev_stmt_info
== NULL
)
2345 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2347 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2348 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2351 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2353 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2355 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2356 stmt_info
= vinfo_for_stmt (stmt
);
2358 tree lhs
= gimple_call_lhs (stmt
);
2359 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2360 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2361 set_vinfo_for_stmt (stmt
, NULL
);
2362 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2363 gsi_replace (gsi
, new_stmt
, true);
2366 else if (vls_type
!= VLS_LOAD
)
2368 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2369 prev_stmt_info
= NULL
;
2370 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
2371 for (i
= 0; i
< ncopies
; i
++)
2373 unsigned align
, misalign
;
2377 tree rhs
= gimple_call_arg (stmt
, 3);
2378 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2379 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
2381 /* We should have catched mismatched types earlier. */
2382 gcc_assert (useless_type_conversion_p (vectype
,
2383 TREE_TYPE (vec_rhs
)));
2384 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2385 NULL_TREE
, &dummy
, gsi
,
2386 &ptr_incr
, false, &inv_p
);
2387 gcc_assert (!inv_p
);
2391 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2392 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2393 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2394 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2395 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2396 TYPE_SIZE_UNIT (vectype
));
2399 align
= DR_TARGET_ALIGNMENT (dr
);
2400 if (aligned_access_p (dr
))
2402 else if (DR_MISALIGNMENT (dr
) == -1)
2404 align
= TYPE_ALIGN_UNIT (elem_type
);
2408 misalign
= DR_MISALIGNMENT (dr
);
2409 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2411 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2412 misalign
? least_bit_hwi (misalign
) : align
);
2414 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2415 ptr
, vec_mask
, vec_rhs
);
2416 gimple_call_set_nothrow (call
, true);
2418 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2420 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2422 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2423 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2428 tree vec_mask
= NULL_TREE
;
2429 prev_stmt_info
= NULL
;
2430 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2431 for (i
= 0; i
< ncopies
; i
++)
2433 unsigned align
, misalign
;
2437 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
2439 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2440 NULL_TREE
, &dummy
, gsi
,
2441 &ptr_incr
, false, &inv_p
);
2442 gcc_assert (!inv_p
);
2446 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2447 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2448 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2449 TYPE_SIZE_UNIT (vectype
));
2452 align
= DR_TARGET_ALIGNMENT (dr
);
2453 if (aligned_access_p (dr
))
2455 else if (DR_MISALIGNMENT (dr
) == -1)
2457 align
= TYPE_ALIGN_UNIT (elem_type
);
2461 misalign
= DR_MISALIGNMENT (dr
);
2462 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2464 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2465 misalign
? least_bit_hwi (misalign
) : align
);
2467 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2469 gimple_call_set_lhs (call
, make_ssa_name (vec_dest
));
2470 gimple_call_set_nothrow (call
, true);
2471 vect_finish_stmt_generation (stmt
, call
, gsi
);
2473 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= call
;
2475 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = call
;
2476 prev_stmt_info
= vinfo_for_stmt (call
);
2480 if (vls_type
== VLS_LOAD
)
2482 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2484 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2486 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2487 stmt_info
= vinfo_for_stmt (stmt
);
2489 tree lhs
= gimple_call_lhs (stmt
);
2490 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2491 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2492 set_vinfo_for_stmt (stmt
, NULL
);
2493 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2494 gsi_replace (gsi
, new_stmt
, true);
2500 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2503 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2504 gimple
**vec_stmt
, slp_tree slp_node
,
2505 tree vectype_in
, enum vect_def_type
*dt
)
2508 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2509 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2511 unsigned HOST_WIDE_INT nunits
, num_bytes
;
2513 op
= gimple_call_arg (stmt
, 0);
2514 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2516 if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant (&nunits
))
2519 /* Multiple types in SLP are handled by creating the appropriate number of
2520 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2525 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2527 gcc_assert (ncopies
>= 1);
2529 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2533 if (!TYPE_VECTOR_SUBPARTS (char_vectype
).is_constant (&num_bytes
))
2536 unsigned word_bytes
= num_bytes
/ nunits
;
2538 /* The encoding uses one stepped pattern for each byte in the word. */
2539 vec_perm_builder
elts (num_bytes
, word_bytes
, 3);
2540 for (unsigned i
= 0; i
< 3; ++i
)
2541 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2542 elts
.quick_push ((i
+ 1) * word_bytes
- j
- 1);
2544 vec_perm_indices
indices (elts
, 1, num_bytes
);
2545 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype
), indices
))
2550 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2551 if (dump_enabled_p ())
2552 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2554 if (! PURE_SLP_STMT (stmt_info
))
2556 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2557 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2558 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2559 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2564 tree bswap_vconst
= vec_perm_indices_to_tree (char_vectype
, indices
);
2567 vec
<tree
> vec_oprnds
= vNULL
;
2568 gimple
*new_stmt
= NULL
;
2569 stmt_vec_info prev_stmt_info
= NULL
;
2570 for (unsigned j
= 0; j
< ncopies
; j
++)
2574 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2576 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2578 /* Arguments are ready. create the new vector stmt. */
2581 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2583 tree tem
= make_ssa_name (char_vectype
);
2584 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2585 char_vectype
, vop
));
2586 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2587 tree tem2
= make_ssa_name (char_vectype
);
2588 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2589 tem
, tem
, bswap_vconst
);
2590 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2591 tem
= make_ssa_name (vectype
);
2592 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2594 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2596 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2603 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2605 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2607 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2610 vec_oprnds
.release ();
2614 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2615 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2616 in a single step. On success, store the binary pack code in
2620 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2621 tree_code
*convert_code
)
2623 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2624 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2628 int multi_step_cvt
= 0;
2629 auto_vec
<tree
, 8> interm_types
;
2630 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2631 &code
, &multi_step_cvt
,
2636 *convert_code
= code
;
2640 /* Function vectorizable_call.
2642 Check if GS performs a function call that can be vectorized.
2643 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2644 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2645 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2648 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2655 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2656 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2657 tree vectype_out
, vectype_in
;
2658 poly_uint64 nunits_in
;
2659 poly_uint64 nunits_out
;
2660 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2661 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2662 vec_info
*vinfo
= stmt_info
->vinfo
;
2663 tree fndecl
, new_temp
, rhs_type
;
2665 enum vect_def_type dt
[3]
2666 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2668 gimple
*new_stmt
= NULL
;
2670 vec
<tree
> vargs
= vNULL
;
2671 enum { NARROW
, NONE
, WIDEN
} modifier
;
2675 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2678 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2682 /* Is GS a vectorizable call? */
2683 stmt
= dyn_cast
<gcall
*> (gs
);
2687 if (gimple_call_internal_p (stmt
)
2688 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2689 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2690 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2693 if (gimple_call_lhs (stmt
) == NULL_TREE
2694 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2697 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2699 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2701 /* Process function arguments. */
2702 rhs_type
= NULL_TREE
;
2703 vectype_in
= NULL_TREE
;
2704 nargs
= gimple_call_num_args (stmt
);
2706 /* Bail out if the function has more than three arguments, we do not have
2707 interesting builtin functions to vectorize with more than two arguments
2708 except for fma. No arguments is also not good. */
2709 if (nargs
== 0 || nargs
> 3)
2712 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2713 if (gimple_call_internal_p (stmt
)
2714 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2717 rhs_type
= unsigned_type_node
;
2720 for (i
= 0; i
< nargs
; i
++)
2724 op
= gimple_call_arg (stmt
, i
);
2726 /* We can only handle calls with arguments of the same type. */
2728 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2730 if (dump_enabled_p ())
2731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2732 "argument types differ.\n");
2736 rhs_type
= TREE_TYPE (op
);
2738 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2740 if (dump_enabled_p ())
2741 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2742 "use not simple.\n");
2747 vectype_in
= opvectype
;
2749 && opvectype
!= vectype_in
)
2751 if (dump_enabled_p ())
2752 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2753 "argument vector types differ.\n");
2757 /* If all arguments are external or constant defs use a vector type with
2758 the same size as the output vector type. */
2760 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2762 gcc_assert (vectype_in
);
2765 if (dump_enabled_p ())
2767 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2768 "no vectype for scalar type ");
2769 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2770 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2777 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2778 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2779 if (known_eq (nunits_in
* 2, nunits_out
))
2781 else if (known_eq (nunits_out
, nunits_in
))
2783 else if (known_eq (nunits_out
* 2, nunits_in
))
2788 /* We only handle functions that do not read or clobber memory. */
2789 if (gimple_vuse (stmt
))
2791 if (dump_enabled_p ())
2792 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2793 "function reads from or writes to memory.\n");
2797 /* For now, we only vectorize functions if a target specific builtin
2798 is available. TODO -- in some cases, it might be profitable to
2799 insert the calls for pieces of the vector, in order to be able
2800 to vectorize other operations in the loop. */
2802 internal_fn ifn
= IFN_LAST
;
2803 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2804 tree callee
= gimple_call_fndecl (stmt
);
2806 /* First try using an internal function. */
2807 tree_code convert_code
= ERROR_MARK
;
2809 && (modifier
== NONE
2810 || (modifier
== NARROW
2811 && simple_integer_narrowing (vectype_out
, vectype_in
,
2813 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2816 /* If that fails, try asking for a target-specific built-in function. */
2817 if (ifn
== IFN_LAST
)
2819 if (cfn
!= CFN_LAST
)
2820 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2821 (cfn
, vectype_out
, vectype_in
);
2823 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2824 (callee
, vectype_out
, vectype_in
);
2827 if (ifn
== IFN_LAST
&& !fndecl
)
2829 if (cfn
== CFN_GOMP_SIMD_LANE
2832 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2833 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2834 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2835 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2837 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2838 { 0, 1, 2, ... vf - 1 } vector. */
2839 gcc_assert (nargs
== 0);
2841 else if (modifier
== NONE
2842 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
2843 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
2844 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
2845 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
2849 if (dump_enabled_p ())
2850 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2851 "function is not vectorizable.\n");
2858 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2859 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
2861 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
2863 /* Sanity check: make sure that at least one copy of the vectorized stmt
2864 needs to be generated. */
2865 gcc_assert (ncopies
>= 1);
2867 if (!vec_stmt
) /* transformation not required. */
2869 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2870 if (dump_enabled_p ())
2871 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2873 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
2874 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2875 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2876 vec_promote_demote
, stmt_info
, 0, vect_body
);
2883 if (dump_enabled_p ())
2884 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2887 scalar_dest
= gimple_call_lhs (stmt
);
2888 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2890 prev_stmt_info
= NULL
;
2891 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2893 tree prev_res
= NULL_TREE
;
2894 for (j
= 0; j
< ncopies
; ++j
)
2896 /* Build argument list for the vectorized call. */
2898 vargs
.create (nargs
);
2904 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2905 vec
<tree
> vec_oprnds0
;
2907 for (i
= 0; i
< nargs
; i
++)
2908 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2909 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
2910 vec_oprnds0
= vec_defs
[0];
2912 /* Arguments are ready. Create the new vector stmt. */
2913 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2916 for (k
= 0; k
< nargs
; k
++)
2918 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2919 vargs
[k
] = vec_oprndsk
[i
];
2921 if (modifier
== NARROW
)
2923 tree half_res
= make_ssa_name (vectype_in
);
2925 = gimple_build_call_internal_vec (ifn
, vargs
);
2926 gimple_call_set_lhs (call
, half_res
);
2927 gimple_call_set_nothrow (call
, true);
2929 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2932 prev_res
= half_res
;
2935 new_temp
= make_ssa_name (vec_dest
);
2936 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2937 prev_res
, half_res
);
2942 if (ifn
!= IFN_LAST
)
2943 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2945 call
= gimple_build_call_vec (fndecl
, vargs
);
2946 new_temp
= make_ssa_name (vec_dest
, call
);
2947 gimple_call_set_lhs (call
, new_temp
);
2948 gimple_call_set_nothrow (call
, true);
2951 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2952 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2955 for (i
= 0; i
< nargs
; i
++)
2957 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2958 vec_oprndsi
.release ();
2963 for (i
= 0; i
< nargs
; i
++)
2965 op
= gimple_call_arg (stmt
, i
);
2968 = vect_get_vec_def_for_operand (op
, stmt
);
2971 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2973 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2976 vargs
.quick_push (vec_oprnd0
);
2979 if (gimple_call_internal_p (stmt
)
2980 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2982 tree cst
= build_index_vector (vectype_out
, j
* nunits_out
, 1);
2984 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2985 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2986 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2987 new_temp
= make_ssa_name (vec_dest
);
2988 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2990 else if (modifier
== NARROW
)
2992 tree half_res
= make_ssa_name (vectype_in
);
2993 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
2994 gimple_call_set_lhs (call
, half_res
);
2995 gimple_call_set_nothrow (call
, true);
2997 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3000 prev_res
= half_res
;
3003 new_temp
= make_ssa_name (vec_dest
);
3004 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
3005 prev_res
, half_res
);
3010 if (ifn
!= IFN_LAST
)
3011 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3013 call
= gimple_build_call_vec (fndecl
, vargs
);
3014 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3015 gimple_call_set_lhs (call
, new_temp
);
3016 gimple_call_set_nothrow (call
, true);
3019 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3021 if (j
== (modifier
== NARROW
? 1 : 0))
3022 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3024 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3026 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3029 else if (modifier
== NARROW
)
3031 for (j
= 0; j
< ncopies
; ++j
)
3033 /* Build argument list for the vectorized call. */
3035 vargs
.create (nargs
* 2);
3041 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3042 vec
<tree
> vec_oprnds0
;
3044 for (i
= 0; i
< nargs
; i
++)
3045 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3046 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3047 vec_oprnds0
= vec_defs
[0];
3049 /* Arguments are ready. Create the new vector stmt. */
3050 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3054 for (k
= 0; k
< nargs
; k
++)
3056 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3057 vargs
.quick_push (vec_oprndsk
[i
]);
3058 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3061 if (ifn
!= IFN_LAST
)
3062 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3064 call
= gimple_build_call_vec (fndecl
, vargs
);
3065 new_temp
= make_ssa_name (vec_dest
, call
);
3066 gimple_call_set_lhs (call
, new_temp
);
3067 gimple_call_set_nothrow (call
, true);
3069 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3070 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3073 for (i
= 0; i
< nargs
; i
++)
3075 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3076 vec_oprndsi
.release ();
3081 for (i
= 0; i
< nargs
; i
++)
3083 op
= gimple_call_arg (stmt
, i
);
3087 = vect_get_vec_def_for_operand (op
, stmt
);
3089 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3093 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3095 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3097 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3100 vargs
.quick_push (vec_oprnd0
);
3101 vargs
.quick_push (vec_oprnd1
);
3104 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3105 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3106 gimple_call_set_lhs (new_stmt
, new_temp
);
3107 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3110 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3112 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3114 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3117 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3120 /* No current target implements this case. */
3125 /* The call in STMT might prevent it from being removed in dce.
3126 We however cannot remove it here, due to the way the ssa name
3127 it defines is mapped to the new definition. So just replace
3128 rhs of the statement with something harmless. */
3133 type
= TREE_TYPE (scalar_dest
);
3134 if (is_pattern_stmt_p (stmt_info
))
3135 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3137 lhs
= gimple_call_lhs (stmt
);
3139 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3140 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3141 set_vinfo_for_stmt (stmt
, NULL
);
3142 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3143 gsi_replace (gsi
, new_stmt
, false);
3149 struct simd_call_arg_info
3153 HOST_WIDE_INT linear_step
;
3154 enum vect_def_type dt
;
3156 bool simd_lane_linear
;
3159 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3160 is linear within simd lane (but not within whole loop), note it in
3164 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3165 struct simd_call_arg_info
*arginfo
)
3167 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3169 if (!is_gimple_assign (def_stmt
)
3170 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3171 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3174 tree base
= gimple_assign_rhs1 (def_stmt
);
3175 HOST_WIDE_INT linear_step
= 0;
3176 tree v
= gimple_assign_rhs2 (def_stmt
);
3177 while (TREE_CODE (v
) == SSA_NAME
)
3180 def_stmt
= SSA_NAME_DEF_STMT (v
);
3181 if (is_gimple_assign (def_stmt
))
3182 switch (gimple_assign_rhs_code (def_stmt
))
3185 t
= gimple_assign_rhs2 (def_stmt
);
3186 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3188 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3189 v
= gimple_assign_rhs1 (def_stmt
);
3192 t
= gimple_assign_rhs2 (def_stmt
);
3193 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3195 linear_step
= tree_to_shwi (t
);
3196 v
= gimple_assign_rhs1 (def_stmt
);
3199 t
= gimple_assign_rhs1 (def_stmt
);
3200 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3201 || (TYPE_PRECISION (TREE_TYPE (v
))
3202 < TYPE_PRECISION (TREE_TYPE (t
))))
3211 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3213 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3214 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3219 arginfo
->linear_step
= linear_step
;
3221 arginfo
->simd_lane_linear
= true;
3227 /* Return the number of elements in vector type VECTYPE, which is associated
3228 with a SIMD clone. At present these vectors always have a constant
3231 static unsigned HOST_WIDE_INT
3232 simd_clone_subparts (tree vectype
)
3234 return TYPE_VECTOR_SUBPARTS (vectype
).to_constant ();
3237 /* Function vectorizable_simd_clone_call.
3239 Check if STMT performs a function call that can be vectorized
3240 by calling a simd clone of the function.
3241 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3242 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3243 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3246 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3247 gimple
**vec_stmt
, slp_tree slp_node
)
3252 tree vec_oprnd0
= NULL_TREE
;
3253 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3255 unsigned int nunits
;
3256 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3257 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3258 vec_info
*vinfo
= stmt_info
->vinfo
;
3259 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3260 tree fndecl
, new_temp
;
3262 gimple
*new_stmt
= NULL
;
3264 auto_vec
<simd_call_arg_info
> arginfo
;
3265 vec
<tree
> vargs
= vNULL
;
3267 tree lhs
, rtype
, ratype
;
3268 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
= NULL
;
3270 /* Is STMT a vectorizable call? */
3271 if (!is_gimple_call (stmt
))
3274 fndecl
= gimple_call_fndecl (stmt
);
3275 if (fndecl
== NULL_TREE
)
3278 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3279 if (node
== NULL
|| node
->simd_clones
== NULL
)
3282 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3285 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3289 if (gimple_call_lhs (stmt
)
3290 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3293 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3295 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3297 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3304 /* Process function arguments. */
3305 nargs
= gimple_call_num_args (stmt
);
3307 /* Bail out if the function has zero arguments. */
3311 arginfo
.reserve (nargs
, true);
3313 for (i
= 0; i
< nargs
; i
++)
3315 simd_call_arg_info thisarginfo
;
3318 thisarginfo
.linear_step
= 0;
3319 thisarginfo
.align
= 0;
3320 thisarginfo
.op
= NULL_TREE
;
3321 thisarginfo
.simd_lane_linear
= false;
3323 op
= gimple_call_arg (stmt
, i
);
3324 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3325 &thisarginfo
.vectype
)
3326 || thisarginfo
.dt
== vect_uninitialized_def
)
3328 if (dump_enabled_p ())
3329 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3330 "use not simple.\n");
3334 if (thisarginfo
.dt
== vect_constant_def
3335 || thisarginfo
.dt
== vect_external_def
)
3336 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3338 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3340 /* For linear arguments, the analyze phase should have saved
3341 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3342 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3343 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3345 gcc_assert (vec_stmt
);
3346 thisarginfo
.linear_step
3347 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3349 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3350 thisarginfo
.simd_lane_linear
3351 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3352 == boolean_true_node
);
3353 /* If loop has been peeled for alignment, we need to adjust it. */
3354 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3355 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3356 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3358 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3359 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3360 tree opt
= TREE_TYPE (thisarginfo
.op
);
3361 bias
= fold_convert (TREE_TYPE (step
), bias
);
3362 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3364 = fold_build2 (POINTER_TYPE_P (opt
)
3365 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3366 thisarginfo
.op
, bias
);
3370 && thisarginfo
.dt
!= vect_constant_def
3371 && thisarginfo
.dt
!= vect_external_def
3373 && TREE_CODE (op
) == SSA_NAME
3374 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3376 && tree_fits_shwi_p (iv
.step
))
3378 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3379 thisarginfo
.op
= iv
.base
;
3381 else if ((thisarginfo
.dt
== vect_constant_def
3382 || thisarginfo
.dt
== vect_external_def
)
3383 && POINTER_TYPE_P (TREE_TYPE (op
)))
3384 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3385 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3387 if (POINTER_TYPE_P (TREE_TYPE (op
))
3388 && !thisarginfo
.linear_step
3390 && thisarginfo
.dt
!= vect_constant_def
3391 && thisarginfo
.dt
!= vect_external_def
3394 && TREE_CODE (op
) == SSA_NAME
)
3395 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3397 arginfo
.quick_push (thisarginfo
);
3400 unsigned HOST_WIDE_INT vf
;
3401 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
3403 if (dump_enabled_p ())
3404 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3405 "not considering SIMD clones; not yet supported"
3406 " for variable-width vectors.\n");
3410 unsigned int badness
= 0;
3411 struct cgraph_node
*bestn
= NULL
;
3412 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3413 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3415 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3416 n
= n
->simdclone
->next_clone
)
3418 unsigned int this_badness
= 0;
3419 if (n
->simdclone
->simdlen
> vf
3420 || n
->simdclone
->nargs
!= nargs
)
3422 if (n
->simdclone
->simdlen
< vf
)
3423 this_badness
+= (exact_log2 (vf
)
3424 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3425 if (n
->simdclone
->inbranch
)
3426 this_badness
+= 2048;
3427 int target_badness
= targetm
.simd_clone
.usable (n
);
3428 if (target_badness
< 0)
3430 this_badness
+= target_badness
* 512;
3431 /* FORNOW: Have to add code to add the mask argument. */
3432 if (n
->simdclone
->inbranch
)
3434 for (i
= 0; i
< nargs
; i
++)
3436 switch (n
->simdclone
->args
[i
].arg_type
)
3438 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3439 if (!useless_type_conversion_p
3440 (n
->simdclone
->args
[i
].orig_type
,
3441 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3443 else if (arginfo
[i
].dt
== vect_constant_def
3444 || arginfo
[i
].dt
== vect_external_def
3445 || arginfo
[i
].linear_step
)
3448 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3449 if (arginfo
[i
].dt
!= vect_constant_def
3450 && arginfo
[i
].dt
!= vect_external_def
)
3453 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3454 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3455 if (arginfo
[i
].dt
== vect_constant_def
3456 || arginfo
[i
].dt
== vect_external_def
3457 || (arginfo
[i
].linear_step
3458 != n
->simdclone
->args
[i
].linear_step
))
3461 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3462 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3463 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3464 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3465 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3466 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3470 case SIMD_CLONE_ARG_TYPE_MASK
:
3473 if (i
== (size_t) -1)
3475 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3480 if (arginfo
[i
].align
)
3481 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3482 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3484 if (i
== (size_t) -1)
3486 if (bestn
== NULL
|| this_badness
< badness
)
3489 badness
= this_badness
;
3496 for (i
= 0; i
< nargs
; i
++)
3497 if ((arginfo
[i
].dt
== vect_constant_def
3498 || arginfo
[i
].dt
== vect_external_def
)
3499 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3502 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3504 if (arginfo
[i
].vectype
== NULL
3505 || (simd_clone_subparts (arginfo
[i
].vectype
)
3506 > bestn
->simdclone
->simdlen
))
3510 fndecl
= bestn
->decl
;
3511 nunits
= bestn
->simdclone
->simdlen
;
3512 ncopies
= vf
/ nunits
;
3514 /* If the function isn't const, only allow it in simd loops where user
3515 has asserted that at least nunits consecutive iterations can be
3516 performed using SIMD instructions. */
3517 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3518 && gimple_vuse (stmt
))
3521 /* Sanity check: make sure that at least one copy of the vectorized stmt
3522 needs to be generated. */
3523 gcc_assert (ncopies
>= 1);
3525 if (!vec_stmt
) /* transformation not required. */
3527 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3528 for (i
= 0; i
< nargs
; i
++)
3529 if ((bestn
->simdclone
->args
[i
].arg_type
3530 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3531 || (bestn
->simdclone
->args
[i
].arg_type
3532 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3534 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3536 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3537 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3538 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3539 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3540 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3541 tree sll
= arginfo
[i
].simd_lane_linear
3542 ? boolean_true_node
: boolean_false_node
;
3543 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3545 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3546 if (dump_enabled_p ())
3547 dump_printf_loc (MSG_NOTE
, vect_location
,
3548 "=== vectorizable_simd_clone_call ===\n");
3549 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3555 if (dump_enabled_p ())
3556 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3559 scalar_dest
= gimple_call_lhs (stmt
);
3560 vec_dest
= NULL_TREE
;
3565 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3566 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3567 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3570 rtype
= TREE_TYPE (ratype
);
3574 prev_stmt_info
= NULL
;
3575 for (j
= 0; j
< ncopies
; ++j
)
3577 /* Build argument list for the vectorized call. */
3579 vargs
.create (nargs
);
3583 for (i
= 0; i
< nargs
; i
++)
3585 unsigned int k
, l
, m
, o
;
3587 op
= gimple_call_arg (stmt
, i
);
3588 switch (bestn
->simdclone
->args
[i
].arg_type
)
3590 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3591 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3592 o
= nunits
/ simd_clone_subparts (atype
);
3593 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3595 if (simd_clone_subparts (atype
)
3596 < simd_clone_subparts (arginfo
[i
].vectype
))
3598 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3599 k
= (simd_clone_subparts (arginfo
[i
].vectype
)
3600 / simd_clone_subparts (atype
));
3601 gcc_assert ((k
& (k
- 1)) == 0);
3604 = vect_get_vec_def_for_operand (op
, stmt
);
3607 vec_oprnd0
= arginfo
[i
].op
;
3608 if ((m
& (k
- 1)) == 0)
3610 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3613 arginfo
[i
].op
= vec_oprnd0
;
3615 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3617 bitsize_int ((m
& (k
- 1)) * prec
));
3619 = gimple_build_assign (make_ssa_name (atype
),
3621 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3622 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3626 k
= (simd_clone_subparts (atype
)
3627 / simd_clone_subparts (arginfo
[i
].vectype
));
3628 gcc_assert ((k
& (k
- 1)) == 0);
3629 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3631 vec_alloc (ctor_elts
, k
);
3634 for (l
= 0; l
< k
; l
++)
3636 if (m
== 0 && l
== 0)
3638 = vect_get_vec_def_for_operand (op
, stmt
);
3641 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3643 arginfo
[i
].op
= vec_oprnd0
;
3646 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3650 vargs
.safe_push (vec_oprnd0
);
3653 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3655 = gimple_build_assign (make_ssa_name (atype
),
3657 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3658 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3663 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3664 vargs
.safe_push (op
);
3666 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3667 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3672 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3677 edge pe
= loop_preheader_edge (loop
);
3678 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3679 gcc_assert (!new_bb
);
3681 if (arginfo
[i
].simd_lane_linear
)
3683 vargs
.safe_push (arginfo
[i
].op
);
3686 tree phi_res
= copy_ssa_name (op
);
3687 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3688 set_vinfo_for_stmt (new_phi
,
3689 new_stmt_vec_info (new_phi
, loop_vinfo
));
3690 add_phi_arg (new_phi
, arginfo
[i
].op
,
3691 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3693 = POINTER_TYPE_P (TREE_TYPE (op
))
3694 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3695 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3696 ? sizetype
: TREE_TYPE (op
);
3698 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3700 tree tcst
= wide_int_to_tree (type
, cst
);
3701 tree phi_arg
= copy_ssa_name (op
);
3703 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3704 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3705 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3706 set_vinfo_for_stmt (new_stmt
,
3707 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3708 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3710 arginfo
[i
].op
= phi_res
;
3711 vargs
.safe_push (phi_res
);
3716 = POINTER_TYPE_P (TREE_TYPE (op
))
3717 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3718 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3719 ? sizetype
: TREE_TYPE (op
);
3721 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3723 tree tcst
= wide_int_to_tree (type
, cst
);
3724 new_temp
= make_ssa_name (TREE_TYPE (op
));
3725 new_stmt
= gimple_build_assign (new_temp
, code
,
3726 arginfo
[i
].op
, tcst
);
3727 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3728 vargs
.safe_push (new_temp
);
3731 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3732 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3733 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3734 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3735 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3736 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3742 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3745 gcc_assert (ratype
|| simd_clone_subparts (rtype
) == nunits
);
3747 new_temp
= create_tmp_var (ratype
);
3748 else if (simd_clone_subparts (vectype
)
3749 == simd_clone_subparts (rtype
))
3750 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3752 new_temp
= make_ssa_name (rtype
, new_stmt
);
3753 gimple_call_set_lhs (new_stmt
, new_temp
);
3755 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3759 if (simd_clone_subparts (vectype
) < nunits
)
3762 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3763 poly_uint64 bytes
= GET_MODE_SIZE (TYPE_MODE (vectype
));
3764 k
= nunits
/ simd_clone_subparts (vectype
);
3765 gcc_assert ((k
& (k
- 1)) == 0);
3766 for (l
= 0; l
< k
; l
++)
3771 t
= build_fold_addr_expr (new_temp
);
3772 t
= build2 (MEM_REF
, vectype
, t
,
3773 build_int_cst (TREE_TYPE (t
), l
* bytes
));
3776 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3777 bitsize_int (prec
), bitsize_int (l
* prec
));
3779 = gimple_build_assign (make_ssa_name (vectype
), t
);
3780 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3781 if (j
== 0 && l
== 0)
3782 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3784 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3786 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3791 tree clobber
= build_constructor (ratype
, NULL
);
3792 TREE_THIS_VOLATILE (clobber
) = 1;
3793 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3794 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3798 else if (simd_clone_subparts (vectype
) > nunits
)
3800 unsigned int k
= (simd_clone_subparts (vectype
)
3801 / simd_clone_subparts (rtype
));
3802 gcc_assert ((k
& (k
- 1)) == 0);
3803 if ((j
& (k
- 1)) == 0)
3804 vec_alloc (ret_ctor_elts
, k
);
3807 unsigned int m
, o
= nunits
/ simd_clone_subparts (rtype
);
3808 for (m
= 0; m
< o
; m
++)
3810 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3811 size_int (m
), NULL_TREE
, NULL_TREE
);
3813 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3814 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3815 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3816 gimple_assign_lhs (new_stmt
));
3818 tree clobber
= build_constructor (ratype
, NULL
);
3819 TREE_THIS_VOLATILE (clobber
) = 1;
3820 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3821 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3824 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3825 if ((j
& (k
- 1)) != k
- 1)
3827 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3829 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3830 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3832 if ((unsigned) j
== k
- 1)
3833 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3835 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3837 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3842 tree t
= build_fold_addr_expr (new_temp
);
3843 t
= build2 (MEM_REF
, vectype
, t
,
3844 build_int_cst (TREE_TYPE (t
), 0));
3846 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3847 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3848 tree clobber
= build_constructor (ratype
, NULL
);
3849 TREE_THIS_VOLATILE (clobber
) = 1;
3850 vect_finish_stmt_generation (stmt
,
3851 gimple_build_assign (new_temp
,
3857 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3859 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3861 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3866 /* The call in STMT might prevent it from being removed in dce.
3867 We however cannot remove it here, due to the way the ssa name
3868 it defines is mapped to the new definition. So just replace
3869 rhs of the statement with something harmless. */
3876 type
= TREE_TYPE (scalar_dest
);
3877 if (is_pattern_stmt_p (stmt_info
))
3878 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3880 lhs
= gimple_call_lhs (stmt
);
3881 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3884 new_stmt
= gimple_build_nop ();
3885 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3886 set_vinfo_for_stmt (stmt
, NULL
);
3887 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3888 gsi_replace (gsi
, new_stmt
, true);
3889 unlink_stmt_vdef (stmt
);
3895 /* Function vect_gen_widened_results_half
3897 Create a vector stmt whose code, type, number of arguments, and result
3898 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3899 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3900 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3901 needs to be created (DECL is a function-decl of a target-builtin).
3902 STMT is the original scalar stmt that we are vectorizing. */
3905 vect_gen_widened_results_half (enum tree_code code
,
3907 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3908 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3914 /* Generate half of the widened result: */
3915 if (code
== CALL_EXPR
)
3917 /* Target specific support */
3918 if (op_type
== binary_op
)
3919 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3921 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3922 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3923 gimple_call_set_lhs (new_stmt
, new_temp
);
3927 /* Generic support */
3928 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3929 if (op_type
!= binary_op
)
3931 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3932 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3933 gimple_assign_set_lhs (new_stmt
, new_temp
);
3935 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3941 /* Get vectorized definitions for loop-based vectorization. For the first
3942 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3943 scalar operand), and for the rest we get a copy with
3944 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3945 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3946 The vectors are collected into VEC_OPRNDS. */
3949 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3950 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3954 /* Get first vector operand. */
3955 /* All the vector operands except the very first one (that is scalar oprnd)
3957 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3958 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3960 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3962 vec_oprnds
->quick_push (vec_oprnd
);
3964 /* Get second vector operand. */
3965 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3966 vec_oprnds
->quick_push (vec_oprnd
);
3970 /* For conversion in multiple steps, continue to get operands
3973 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3977 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3978 For multi-step conversions store the resulting vectors and call the function
3982 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3983 int multi_step_cvt
, gimple
*stmt
,
3985 gimple_stmt_iterator
*gsi
,
3986 slp_tree slp_node
, enum tree_code code
,
3987 stmt_vec_info
*prev_stmt_info
)
3990 tree vop0
, vop1
, new_tmp
, vec_dest
;
3992 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3994 vec_dest
= vec_dsts
.pop ();
3996 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3998 /* Create demotion operation. */
3999 vop0
= (*vec_oprnds
)[i
];
4000 vop1
= (*vec_oprnds
)[i
+ 1];
4001 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4002 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
4003 gimple_assign_set_lhs (new_stmt
, new_tmp
);
4004 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4007 /* Store the resulting vector for next recursive call. */
4008 (*vec_oprnds
)[i
/2] = new_tmp
;
4011 /* This is the last step of the conversion sequence. Store the
4012 vectors in SLP_NODE or in vector info of the scalar statement
4013 (or in STMT_VINFO_RELATED_STMT chain). */
4015 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4018 if (!*prev_stmt_info
)
4019 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4021 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
4023 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4028 /* For multi-step demotion operations we first generate demotion operations
4029 from the source type to the intermediate types, and then combine the
4030 results (stored in VEC_OPRNDS) in demotion operation to the destination
4034 /* At each level of recursion we have half of the operands we had at the
4036 vec_oprnds
->truncate ((i
+1)/2);
4037 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
4038 stmt
, vec_dsts
, gsi
, slp_node
,
4039 VEC_PACK_TRUNC_EXPR
,
4043 vec_dsts
.quick_push (vec_dest
);
4047 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4048 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4049 the resulting vectors and call the function recursively. */
4052 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
4053 vec
<tree
> *vec_oprnds1
,
4054 gimple
*stmt
, tree vec_dest
,
4055 gimple_stmt_iterator
*gsi
,
4056 enum tree_code code1
,
4057 enum tree_code code2
, tree decl1
,
4058 tree decl2
, int op_type
)
4061 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
4062 gimple
*new_stmt1
, *new_stmt2
;
4063 vec
<tree
> vec_tmp
= vNULL
;
4065 vec_tmp
.create (vec_oprnds0
->length () * 2);
4066 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4068 if (op_type
== binary_op
)
4069 vop1
= (*vec_oprnds1
)[i
];
4073 /* Generate the two halves of promotion operation. */
4074 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4075 op_type
, vec_dest
, gsi
, stmt
);
4076 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4077 op_type
, vec_dest
, gsi
, stmt
);
4078 if (is_gimple_call (new_stmt1
))
4080 new_tmp1
= gimple_call_lhs (new_stmt1
);
4081 new_tmp2
= gimple_call_lhs (new_stmt2
);
4085 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4086 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4089 /* Store the results for the next step. */
4090 vec_tmp
.quick_push (new_tmp1
);
4091 vec_tmp
.quick_push (new_tmp2
);
4094 vec_oprnds0
->release ();
4095 *vec_oprnds0
= vec_tmp
;
4099 /* Check if STMT performs a conversion operation, that can be vectorized.
4100 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4101 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4102 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4105 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4106 gimple
**vec_stmt
, slp_tree slp_node
)
4110 tree op0
, op1
= NULL_TREE
;
4111 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4112 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4113 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4114 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4115 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4116 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4119 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4121 gimple
*new_stmt
= NULL
;
4122 stmt_vec_info prev_stmt_info
;
4123 poly_uint64 nunits_in
;
4124 poly_uint64 nunits_out
;
4125 tree vectype_out
, vectype_in
;
4127 tree lhs_type
, rhs_type
;
4128 enum { NARROW
, NONE
, WIDEN
} modifier
;
4129 vec
<tree
> vec_oprnds0
= vNULL
;
4130 vec
<tree
> vec_oprnds1
= vNULL
;
4132 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4133 vec_info
*vinfo
= stmt_info
->vinfo
;
4134 int multi_step_cvt
= 0;
4135 vec
<tree
> interm_types
= vNULL
;
4136 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4138 unsigned short fltsz
;
4140 /* Is STMT a vectorizable conversion? */
4142 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4145 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4149 if (!is_gimple_assign (stmt
))
4152 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4155 code
= gimple_assign_rhs_code (stmt
);
4156 if (!CONVERT_EXPR_CODE_P (code
)
4157 && code
!= FIX_TRUNC_EXPR
4158 && code
!= FLOAT_EXPR
4159 && code
!= WIDEN_MULT_EXPR
4160 && code
!= WIDEN_LSHIFT_EXPR
)
4163 op_type
= TREE_CODE_LENGTH (code
);
4165 /* Check types of lhs and rhs. */
4166 scalar_dest
= gimple_assign_lhs (stmt
);
4167 lhs_type
= TREE_TYPE (scalar_dest
);
4168 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4170 op0
= gimple_assign_rhs1 (stmt
);
4171 rhs_type
= TREE_TYPE (op0
);
4173 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4174 && !((INTEGRAL_TYPE_P (lhs_type
)
4175 && INTEGRAL_TYPE_P (rhs_type
))
4176 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4177 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4180 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4181 && ((INTEGRAL_TYPE_P (lhs_type
)
4182 && !type_has_mode_precision_p (lhs_type
))
4183 || (INTEGRAL_TYPE_P (rhs_type
)
4184 && !type_has_mode_precision_p (rhs_type
))))
4186 if (dump_enabled_p ())
4187 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4188 "type conversion to/from bit-precision unsupported."
4193 /* Check the operands of the operation. */
4194 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4196 if (dump_enabled_p ())
4197 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4198 "use not simple.\n");
4201 if (op_type
== binary_op
)
4205 op1
= gimple_assign_rhs2 (stmt
);
4206 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4207 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4209 if (CONSTANT_CLASS_P (op0
))
4210 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4212 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4216 if (dump_enabled_p ())
4217 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4218 "use not simple.\n");
4223 /* If op0 is an external or constant defs use a vector type of
4224 the same size as the output vector type. */
4226 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4228 gcc_assert (vectype_in
);
4231 if (dump_enabled_p ())
4233 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4234 "no vectype for scalar type ");
4235 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4236 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4242 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4243 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4245 if (dump_enabled_p ())
4247 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4248 "can't convert between boolean and non "
4250 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4251 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4257 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4258 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4259 if (known_eq (nunits_out
, nunits_in
))
4261 else if (multiple_p (nunits_out
, nunits_in
))
4265 gcc_checking_assert (multiple_p (nunits_in
, nunits_out
));
4269 /* Multiple types in SLP are handled by creating the appropriate number of
4270 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4274 else if (modifier
== NARROW
)
4275 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
4277 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
4279 /* Sanity check: make sure that at least one copy of the vectorized stmt
4280 needs to be generated. */
4281 gcc_assert (ncopies
>= 1);
4283 bool found_mode
= false;
4284 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4285 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4286 opt_scalar_mode rhs_mode_iter
;
4288 /* Supportable by target? */
4292 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4294 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4299 if (dump_enabled_p ())
4300 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4301 "conversion not supported by target.\n");
4305 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4306 &code1
, &code2
, &multi_step_cvt
,
4309 /* Binary widening operation can only be supported directly by the
4311 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4315 if (code
!= FLOAT_EXPR
4316 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4319 fltsz
= GET_MODE_SIZE (lhs_mode
);
4320 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4322 rhs_mode
= rhs_mode_iter
.require ();
4323 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4327 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4328 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4329 if (cvt_type
== NULL_TREE
)
4332 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4334 if (!supportable_convert_operation (code
, vectype_out
,
4335 cvt_type
, &decl1
, &codecvt1
))
4338 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4339 cvt_type
, &codecvt1
,
4340 &codecvt2
, &multi_step_cvt
,
4344 gcc_assert (multi_step_cvt
== 0);
4346 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4347 vectype_in
, &code1
, &code2
,
4348 &multi_step_cvt
, &interm_types
))
4358 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4359 codecvt2
= ERROR_MARK
;
4363 interm_types
.safe_push (cvt_type
);
4364 cvt_type
= NULL_TREE
;
4369 gcc_assert (op_type
== unary_op
);
4370 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4371 &code1
, &multi_step_cvt
,
4375 if (code
!= FIX_TRUNC_EXPR
4376 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4380 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4381 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4382 if (cvt_type
== NULL_TREE
)
4384 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4387 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4388 &code1
, &multi_step_cvt
,
4397 if (!vec_stmt
) /* transformation not required. */
4399 if (dump_enabled_p ())
4400 dump_printf_loc (MSG_NOTE
, vect_location
,
4401 "=== vectorizable_conversion ===\n");
4402 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4404 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4405 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4407 else if (modifier
== NARROW
)
4409 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4410 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4414 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4415 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4417 interm_types
.release ();
4422 if (dump_enabled_p ())
4423 dump_printf_loc (MSG_NOTE
, vect_location
,
4424 "transform conversion. ncopies = %d.\n", ncopies
);
4426 if (op_type
== binary_op
)
4428 if (CONSTANT_CLASS_P (op0
))
4429 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4430 else if (CONSTANT_CLASS_P (op1
))
4431 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4434 /* In case of multi-step conversion, we first generate conversion operations
4435 to the intermediate types, and then from that types to the final one.
4436 We create vector destinations for the intermediate type (TYPES) received
4437 from supportable_*_operation, and store them in the correct order
4438 for future use in vect_create_vectorized_*_stmts (). */
4439 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4440 vec_dest
= vect_create_destination_var (scalar_dest
,
4441 (cvt_type
&& modifier
== WIDEN
)
4442 ? cvt_type
: vectype_out
);
4443 vec_dsts
.quick_push (vec_dest
);
4447 for (i
= interm_types
.length () - 1;
4448 interm_types
.iterate (i
, &intermediate_type
); i
--)
4450 vec_dest
= vect_create_destination_var (scalar_dest
,
4452 vec_dsts
.quick_push (vec_dest
);
4457 vec_dest
= vect_create_destination_var (scalar_dest
,
4459 ? vectype_out
: cvt_type
);
4463 if (modifier
== WIDEN
)
4465 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4466 if (op_type
== binary_op
)
4467 vec_oprnds1
.create (1);
4469 else if (modifier
== NARROW
)
4470 vec_oprnds0
.create (
4471 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4473 else if (code
== WIDEN_LSHIFT_EXPR
)
4474 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4477 prev_stmt_info
= NULL
;
4481 for (j
= 0; j
< ncopies
; j
++)
4484 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4486 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4488 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4490 /* Arguments are ready, create the new vector stmt. */
4491 if (code1
== CALL_EXPR
)
4493 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4494 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4495 gimple_call_set_lhs (new_stmt
, new_temp
);
4499 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4500 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4501 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4502 gimple_assign_set_lhs (new_stmt
, new_temp
);
4505 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4507 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4510 if (!prev_stmt_info
)
4511 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4513 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4514 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4521 /* In case the vectorization factor (VF) is bigger than the number
4522 of elements that we can fit in a vectype (nunits), we have to
4523 generate more than one vector stmt - i.e - we need to "unroll"
4524 the vector stmt by a factor VF/nunits. */
4525 for (j
= 0; j
< ncopies
; j
++)
4532 if (code
== WIDEN_LSHIFT_EXPR
)
4537 /* Store vec_oprnd1 for every vector stmt to be created
4538 for SLP_NODE. We check during the analysis that all
4539 the shift arguments are the same. */
4540 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4541 vec_oprnds1
.quick_push (vec_oprnd1
);
4543 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4547 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4548 &vec_oprnds1
, slp_node
);
4552 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4553 vec_oprnds0
.quick_push (vec_oprnd0
);
4554 if (op_type
== binary_op
)
4556 if (code
== WIDEN_LSHIFT_EXPR
)
4559 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4560 vec_oprnds1
.quick_push (vec_oprnd1
);
4566 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4567 vec_oprnds0
.truncate (0);
4568 vec_oprnds0
.quick_push (vec_oprnd0
);
4569 if (op_type
== binary_op
)
4571 if (code
== WIDEN_LSHIFT_EXPR
)
4574 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4576 vec_oprnds1
.truncate (0);
4577 vec_oprnds1
.quick_push (vec_oprnd1
);
4581 /* Arguments are ready. Create the new vector stmts. */
4582 for (i
= multi_step_cvt
; i
>= 0; i
--)
4584 tree this_dest
= vec_dsts
[i
];
4585 enum tree_code c1
= code1
, c2
= code2
;
4586 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4591 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4593 stmt
, this_dest
, gsi
,
4594 c1
, c2
, decl1
, decl2
,
4598 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4602 if (codecvt1
== CALL_EXPR
)
4604 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4605 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4606 gimple_call_set_lhs (new_stmt
, new_temp
);
4610 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4611 new_temp
= make_ssa_name (vec_dest
);
4612 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4616 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4619 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4622 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4625 if (!prev_stmt_info
)
4626 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4628 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4629 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4634 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4638 /* In case the vectorization factor (VF) is bigger than the number
4639 of elements that we can fit in a vectype (nunits), we have to
4640 generate more than one vector stmt - i.e - we need to "unroll"
4641 the vector stmt by a factor VF/nunits. */
4642 for (j
= 0; j
< ncopies
; j
++)
4646 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4650 vec_oprnds0
.truncate (0);
4651 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4652 vect_pow2 (multi_step_cvt
) - 1);
4655 /* Arguments are ready. Create the new vector stmts. */
4657 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4659 if (codecvt1
== CALL_EXPR
)
4661 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4662 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4663 gimple_call_set_lhs (new_stmt
, new_temp
);
4667 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4668 new_temp
= make_ssa_name (vec_dest
);
4669 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4673 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4674 vec_oprnds0
[i
] = new_temp
;
4677 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4678 stmt
, vec_dsts
, gsi
,
4683 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4687 vec_oprnds0
.release ();
4688 vec_oprnds1
.release ();
4689 interm_types
.release ();
4695 /* Function vectorizable_assignment.
4697 Check if STMT performs an assignment (copy) that can be vectorized.
4698 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4699 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4700 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4703 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4704 gimple
**vec_stmt
, slp_tree slp_node
)
4709 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4710 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4713 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
4717 vec
<tree
> vec_oprnds
= vNULL
;
4719 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4720 vec_info
*vinfo
= stmt_info
->vinfo
;
4721 gimple
*new_stmt
= NULL
;
4722 stmt_vec_info prev_stmt_info
= NULL
;
4723 enum tree_code code
;
4726 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4729 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4733 /* Is vectorizable assignment? */
4734 if (!is_gimple_assign (stmt
))
4737 scalar_dest
= gimple_assign_lhs (stmt
);
4738 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4741 code
= gimple_assign_rhs_code (stmt
);
4742 if (gimple_assign_single_p (stmt
)
4743 || code
== PAREN_EXPR
4744 || CONVERT_EXPR_CODE_P (code
))
4745 op
= gimple_assign_rhs1 (stmt
);
4749 if (code
== VIEW_CONVERT_EXPR
)
4750 op
= TREE_OPERAND (op
, 0);
4752 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4753 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4755 /* Multiple types in SLP are handled by creating the appropriate number of
4756 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4761 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
4763 gcc_assert (ncopies
>= 1);
4765 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4767 if (dump_enabled_p ())
4768 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4769 "use not simple.\n");
4773 /* We can handle NOP_EXPR conversions that do not change the number
4774 of elements or the vector size. */
4775 if ((CONVERT_EXPR_CODE_P (code
)
4776 || code
== VIEW_CONVERT_EXPR
)
4778 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in
), nunits
)
4779 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype
)),
4780 GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4783 /* We do not handle bit-precision changes. */
4784 if ((CONVERT_EXPR_CODE_P (code
)
4785 || code
== VIEW_CONVERT_EXPR
)
4786 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4787 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
4788 || !type_has_mode_precision_p (TREE_TYPE (op
)))
4789 /* But a conversion that does not change the bit-pattern is ok. */
4790 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4791 > TYPE_PRECISION (TREE_TYPE (op
)))
4792 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4793 /* Conversion between boolean types of different sizes is
4794 a simple assignment in case their vectypes are same
4796 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4797 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4799 if (dump_enabled_p ())
4800 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4801 "type conversion to/from bit-precision "
4806 if (!vec_stmt
) /* transformation not required. */
4808 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4809 if (dump_enabled_p ())
4810 dump_printf_loc (MSG_NOTE
, vect_location
,
4811 "=== vectorizable_assignment ===\n");
4812 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4817 if (dump_enabled_p ())
4818 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4821 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4824 for (j
= 0; j
< ncopies
; j
++)
4828 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
4830 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4832 /* Arguments are ready. create the new vector stmt. */
4833 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4835 if (CONVERT_EXPR_CODE_P (code
)
4836 || code
== VIEW_CONVERT_EXPR
)
4837 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4838 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4839 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4840 gimple_assign_set_lhs (new_stmt
, new_temp
);
4841 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4843 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4850 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4852 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4854 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4857 vec_oprnds
.release ();
4862 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4863 either as shift by a scalar or by a vector. */
4866 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4869 machine_mode vec_mode
;
4874 vectype
= get_vectype_for_scalar_type (scalar_type
);
4878 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4880 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4882 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4884 || (optab_handler (optab
, TYPE_MODE (vectype
))
4885 == CODE_FOR_nothing
))
4889 vec_mode
= TYPE_MODE (vectype
);
4890 icode
= (int) optab_handler (optab
, vec_mode
);
4891 if (icode
== CODE_FOR_nothing
)
4898 /* Function vectorizable_shift.
4900 Check if STMT performs a shift operation that can be vectorized.
4901 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4902 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4903 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4906 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4907 gimple
**vec_stmt
, slp_tree slp_node
)
4911 tree op0
, op1
= NULL
;
4912 tree vec_oprnd1
= NULL_TREE
;
4913 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4915 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4916 enum tree_code code
;
4917 machine_mode vec_mode
;
4921 machine_mode optab_op2_mode
;
4923 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4925 gimple
*new_stmt
= NULL
;
4926 stmt_vec_info prev_stmt_info
;
4927 poly_uint64 nunits_in
;
4928 poly_uint64 nunits_out
;
4933 vec
<tree
> vec_oprnds0
= vNULL
;
4934 vec
<tree
> vec_oprnds1
= vNULL
;
4937 bool scalar_shift_arg
= true;
4938 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4939 vec_info
*vinfo
= stmt_info
->vinfo
;
4941 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4944 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4948 /* Is STMT a vectorizable binary/unary operation? */
4949 if (!is_gimple_assign (stmt
))
4952 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4955 code
= gimple_assign_rhs_code (stmt
);
4957 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4958 || code
== RROTATE_EXPR
))
4961 scalar_dest
= gimple_assign_lhs (stmt
);
4962 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4963 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
4965 if (dump_enabled_p ())
4966 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4967 "bit-precision shifts not supported.\n");
4971 op0
= gimple_assign_rhs1 (stmt
);
4972 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4974 if (dump_enabled_p ())
4975 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4976 "use not simple.\n");
4979 /* If op0 is an external or constant def use a vector type with
4980 the same size as the output vector type. */
4982 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4984 gcc_assert (vectype
);
4987 if (dump_enabled_p ())
4988 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4989 "no vectype for scalar type\n");
4993 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4994 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4995 if (maybe_ne (nunits_out
, nunits_in
))
4998 op1
= gimple_assign_rhs2 (stmt
);
4999 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
5001 if (dump_enabled_p ())
5002 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5003 "use not simple.\n");
5007 /* Multiple types in SLP are handled by creating the appropriate number of
5008 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5013 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5015 gcc_assert (ncopies
>= 1);
5017 /* Determine whether the shift amount is a vector, or scalar. If the
5018 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5020 if ((dt
[1] == vect_internal_def
5021 || dt
[1] == vect_induction_def
)
5023 scalar_shift_arg
= false;
5024 else if (dt
[1] == vect_constant_def
5025 || dt
[1] == vect_external_def
5026 || dt
[1] == vect_internal_def
)
5028 /* In SLP, need to check whether the shift count is the same,
5029 in loops if it is a constant or invariant, it is always
5033 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
5036 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
5037 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
5038 scalar_shift_arg
= false;
5041 /* If the shift amount is computed by a pattern stmt we cannot
5042 use the scalar amount directly thus give up and use a vector
5044 if (dt
[1] == vect_internal_def
)
5046 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
5047 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
5048 scalar_shift_arg
= false;
5053 if (dump_enabled_p ())
5054 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5055 "operand mode requires invariant argument.\n");
5059 /* Vector shifted by vector. */
5060 if (!scalar_shift_arg
)
5062 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5063 if (dump_enabled_p ())
5064 dump_printf_loc (MSG_NOTE
, vect_location
,
5065 "vector/vector shift/rotate found.\n");
5068 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5069 if (op1_vectype
== NULL_TREE
5070 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5072 if (dump_enabled_p ())
5073 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5074 "unusable type for last operand in"
5075 " vector/vector shift/rotate.\n");
5079 /* See if the machine has a vector shifted by scalar insn and if not
5080 then see if it has a vector shifted by vector insn. */
5083 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5085 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5087 if (dump_enabled_p ())
5088 dump_printf_loc (MSG_NOTE
, vect_location
,
5089 "vector/scalar shift/rotate found.\n");
5093 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5095 && (optab_handler (optab
, TYPE_MODE (vectype
))
5096 != CODE_FOR_nothing
))
5098 scalar_shift_arg
= false;
5100 if (dump_enabled_p ())
5101 dump_printf_loc (MSG_NOTE
, vect_location
,
5102 "vector/vector shift/rotate found.\n");
5104 /* Unlike the other binary operators, shifts/rotates have
5105 the rhs being int, instead of the same type as the lhs,
5106 so make sure the scalar is the right type if we are
5107 dealing with vectors of long long/long/short/char. */
5108 if (dt
[1] == vect_constant_def
)
5109 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5110 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5114 && TYPE_MODE (TREE_TYPE (vectype
))
5115 != TYPE_MODE (TREE_TYPE (op1
)))
5117 if (dump_enabled_p ())
5118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5119 "unusable type for last operand in"
5120 " vector/vector shift/rotate.\n");
5123 if (vec_stmt
&& !slp_node
)
5125 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5126 op1
= vect_init_vector (stmt
, op1
,
5127 TREE_TYPE (vectype
), NULL
);
5134 /* Supportable by target? */
5137 if (dump_enabled_p ())
5138 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5142 vec_mode
= TYPE_MODE (vectype
);
5143 icode
= (int) optab_handler (optab
, vec_mode
);
5144 if (icode
== CODE_FOR_nothing
)
5146 if (dump_enabled_p ())
5147 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5148 "op not supported by target.\n");
5149 /* Check only during analysis. */
5150 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5152 && !vect_worthwhile_without_simd_p (vinfo
, code
)))
5154 if (dump_enabled_p ())
5155 dump_printf_loc (MSG_NOTE
, vect_location
,
5156 "proceeding using word mode.\n");
5159 /* Worthwhile without SIMD support? Check only during analysis. */
5161 && !VECTOR_MODE_P (TYPE_MODE (vectype
))
5162 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5164 if (dump_enabled_p ())
5165 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5166 "not worthwhile without SIMD support.\n");
5170 if (!vec_stmt
) /* transformation not required. */
5172 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5173 if (dump_enabled_p ())
5174 dump_printf_loc (MSG_NOTE
, vect_location
,
5175 "=== vectorizable_shift ===\n");
5176 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5182 if (dump_enabled_p ())
5183 dump_printf_loc (MSG_NOTE
, vect_location
,
5184 "transform binary/unary operation.\n");
5187 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5189 prev_stmt_info
= NULL
;
5190 for (j
= 0; j
< ncopies
; j
++)
5195 if (scalar_shift_arg
)
5197 /* Vector shl and shr insn patterns can be defined with scalar
5198 operand 2 (shift operand). In this case, use constant or loop
5199 invariant op1 directly, without extending it to vector mode
5201 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5202 if (!VECTOR_MODE_P (optab_op2_mode
))
5204 if (dump_enabled_p ())
5205 dump_printf_loc (MSG_NOTE
, vect_location
,
5206 "operand 1 using scalar mode.\n");
5208 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5209 vec_oprnds1
.quick_push (vec_oprnd1
);
5212 /* Store vec_oprnd1 for every vector stmt to be created
5213 for SLP_NODE. We check during the analysis that all
5214 the shift arguments are the same.
5215 TODO: Allow different constants for different vector
5216 stmts generated for an SLP instance. */
5217 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5218 vec_oprnds1
.quick_push (vec_oprnd1
);
5223 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5224 (a special case for certain kind of vector shifts); otherwise,
5225 operand 1 should be of a vector type (the usual case). */
5227 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5230 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5234 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5236 /* Arguments are ready. Create the new vector stmt. */
5237 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5239 vop1
= vec_oprnds1
[i
];
5240 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5241 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5242 gimple_assign_set_lhs (new_stmt
, new_temp
);
5243 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5245 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5252 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5254 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5255 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5258 vec_oprnds0
.release ();
5259 vec_oprnds1
.release ();
5265 /* Function vectorizable_operation.
5267 Check if STMT performs a binary, unary or ternary operation that can
5269 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5270 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5271 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5274 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5275 gimple
**vec_stmt
, slp_tree slp_node
)
5279 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5280 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5282 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5283 enum tree_code code
, orig_code
;
5284 machine_mode vec_mode
;
5288 bool target_support_p
;
5290 enum vect_def_type dt
[3]
5291 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5293 gimple
*new_stmt
= NULL
;
5294 stmt_vec_info prev_stmt_info
;
5295 poly_uint64 nunits_in
;
5296 poly_uint64 nunits_out
;
5300 vec
<tree
> vec_oprnds0
= vNULL
;
5301 vec
<tree
> vec_oprnds1
= vNULL
;
5302 vec
<tree
> vec_oprnds2
= vNULL
;
5303 tree vop0
, vop1
, vop2
;
5304 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5305 vec_info
*vinfo
= stmt_info
->vinfo
;
5307 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5310 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5314 /* Is STMT a vectorizable binary/unary operation? */
5315 if (!is_gimple_assign (stmt
))
5318 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5321 orig_code
= code
= gimple_assign_rhs_code (stmt
);
5323 /* For pointer addition and subtraction, we should use the normal
5324 plus and minus for the vector operation. */
5325 if (code
== POINTER_PLUS_EXPR
)
5327 if (code
== POINTER_DIFF_EXPR
)
5330 /* Support only unary or binary operations. */
5331 op_type
= TREE_CODE_LENGTH (code
);
5332 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5334 if (dump_enabled_p ())
5335 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5336 "num. args = %d (not unary/binary/ternary op).\n",
5341 scalar_dest
= gimple_assign_lhs (stmt
);
5342 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5344 /* Most operations cannot handle bit-precision types without extra
5346 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5347 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5348 /* Exception are bitwise binary operations. */
5349 && code
!= BIT_IOR_EXPR
5350 && code
!= BIT_XOR_EXPR
5351 && code
!= BIT_AND_EXPR
)
5353 if (dump_enabled_p ())
5354 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5355 "bit-precision arithmetic not supported.\n");
5359 op0
= gimple_assign_rhs1 (stmt
);
5360 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5362 if (dump_enabled_p ())
5363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5364 "use not simple.\n");
5367 /* If op0 is an external or constant def use a vector type with
5368 the same size as the output vector type. */
5371 /* For boolean type we cannot determine vectype by
5372 invariant value (don't know whether it is a vector
5373 of booleans or vector of integers). We use output
5374 vectype because operations on boolean don't change
5376 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5378 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5380 if (dump_enabled_p ())
5381 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5382 "not supported operation on bool value.\n");
5385 vectype
= vectype_out
;
5388 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5391 gcc_assert (vectype
);
5394 if (dump_enabled_p ())
5396 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5397 "no vectype for scalar type ");
5398 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5400 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5406 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5407 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5408 if (maybe_ne (nunits_out
, nunits_in
))
5411 if (op_type
== binary_op
|| op_type
== ternary_op
)
5413 op1
= gimple_assign_rhs2 (stmt
);
5414 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5416 if (dump_enabled_p ())
5417 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5418 "use not simple.\n");
5422 if (op_type
== ternary_op
)
5424 op2
= gimple_assign_rhs3 (stmt
);
5425 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5429 "use not simple.\n");
5434 /* Multiple types in SLP are handled by creating the appropriate number of
5435 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5440 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5442 gcc_assert (ncopies
>= 1);
5444 /* Shifts are handled in vectorizable_shift (). */
5445 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5446 || code
== RROTATE_EXPR
)
5449 /* Supportable by target? */
5451 vec_mode
= TYPE_MODE (vectype
);
5452 if (code
== MULT_HIGHPART_EXPR
)
5453 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5456 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5459 if (dump_enabled_p ())
5460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5464 target_support_p
= (optab_handler (optab
, vec_mode
)
5465 != CODE_FOR_nothing
);
5468 if (!target_support_p
)
5470 if (dump_enabled_p ())
5471 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5472 "op not supported by target.\n");
5473 /* Check only during analysis. */
5474 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5475 || (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo
, code
)))
5477 if (dump_enabled_p ())
5478 dump_printf_loc (MSG_NOTE
, vect_location
,
5479 "proceeding using word mode.\n");
5482 /* Worthwhile without SIMD support? Check only during analysis. */
5483 if (!VECTOR_MODE_P (vec_mode
)
5485 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5487 if (dump_enabled_p ())
5488 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5489 "not worthwhile without SIMD support.\n");
5493 if (!vec_stmt
) /* transformation not required. */
5495 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5496 if (dump_enabled_p ())
5497 dump_printf_loc (MSG_NOTE
, vect_location
,
5498 "=== vectorizable_operation ===\n");
5499 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5505 if (dump_enabled_p ())
5506 dump_printf_loc (MSG_NOTE
, vect_location
,
5507 "transform binary/unary operation.\n");
5510 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5512 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5513 vectors with unsigned elements, but the result is signed. So, we
5514 need to compute the MINUS_EXPR into vectype temporary and
5515 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5516 tree vec_cvt_dest
= NULL_TREE
;
5517 if (orig_code
== POINTER_DIFF_EXPR
)
5518 vec_cvt_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5520 /* In case the vectorization factor (VF) is bigger than the number
5521 of elements that we can fit in a vectype (nunits), we have to generate
5522 more than one vector stmt - i.e - we need to "unroll" the
5523 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5524 from one copy of the vector stmt to the next, in the field
5525 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5526 stages to find the correct vector defs to be used when vectorizing
5527 stmts that use the defs of the current stmt. The example below
5528 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5529 we need to create 4 vectorized stmts):
5531 before vectorization:
5532 RELATED_STMT VEC_STMT
5536 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5538 RELATED_STMT VEC_STMT
5539 VS1_0: vx0 = memref0 VS1_1 -
5540 VS1_1: vx1 = memref1 VS1_2 -
5541 VS1_2: vx2 = memref2 VS1_3 -
5542 VS1_3: vx3 = memref3 - -
5543 S1: x = load - VS1_0
5546 step2: vectorize stmt S2 (done here):
5547 To vectorize stmt S2 we first need to find the relevant vector
5548 def for the first operand 'x'. This is, as usual, obtained from
5549 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5550 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5551 relevant vector def 'vx0'. Having found 'vx0' we can generate
5552 the vector stmt VS2_0, and as usual, record it in the
5553 STMT_VINFO_VEC_STMT of stmt S2.
5554 When creating the second copy (VS2_1), we obtain the relevant vector
5555 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5556 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5557 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5558 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5559 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5560 chain of stmts and pointers:
5561 RELATED_STMT VEC_STMT
5562 VS1_0: vx0 = memref0 VS1_1 -
5563 VS1_1: vx1 = memref1 VS1_2 -
5564 VS1_2: vx2 = memref2 VS1_3 -
5565 VS1_3: vx3 = memref3 - -
5566 S1: x = load - VS1_0
5567 VS2_0: vz0 = vx0 + v1 VS2_1 -
5568 VS2_1: vz1 = vx1 + v1 VS2_2 -
5569 VS2_2: vz2 = vx2 + v1 VS2_3 -
5570 VS2_3: vz3 = vx3 + v1 - -
5571 S2: z = x + 1 - VS2_0 */
5573 prev_stmt_info
= NULL
;
5574 for (j
= 0; j
< ncopies
; j
++)
5579 if (op_type
== binary_op
|| op_type
== ternary_op
)
5580 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5583 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5585 if (op_type
== ternary_op
)
5586 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5591 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5592 if (op_type
== ternary_op
)
5594 tree vec_oprnd
= vec_oprnds2
.pop ();
5595 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5600 /* Arguments are ready. Create the new vector stmt. */
5601 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5603 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5604 ? vec_oprnds1
[i
] : NULL_TREE
);
5605 vop2
= ((op_type
== ternary_op
)
5606 ? vec_oprnds2
[i
] : NULL_TREE
);
5607 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5608 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5609 gimple_assign_set_lhs (new_stmt
, new_temp
);
5610 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5613 new_temp
= build1 (VIEW_CONVERT_EXPR
, vectype_out
, new_temp
);
5614 new_stmt
= gimple_build_assign (vec_cvt_dest
, VIEW_CONVERT_EXPR
,
5616 new_temp
= make_ssa_name (vec_cvt_dest
, new_stmt
);
5617 gimple_assign_set_lhs (new_stmt
, new_temp
);
5618 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5621 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5628 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5630 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5631 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5634 vec_oprnds0
.release ();
5635 vec_oprnds1
.release ();
5636 vec_oprnds2
.release ();
5641 /* A helper function to ensure data reference DR's base alignment. */
5644 ensure_base_align (struct data_reference
*dr
)
5649 if (DR_VECT_AUX (dr
)->base_misaligned
)
5651 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5653 unsigned int align_base_to
= DR_TARGET_ALIGNMENT (dr
) * BITS_PER_UNIT
;
5655 if (decl_in_symtab_p (base_decl
))
5656 symtab_node::get (base_decl
)->increase_alignment (align_base_to
);
5659 SET_DECL_ALIGN (base_decl
, align_base_to
);
5660 DECL_USER_ALIGN (base_decl
) = 1;
5662 DR_VECT_AUX (dr
)->base_misaligned
= false;
5667 /* Function get_group_alias_ptr_type.
5669 Return the alias type for the group starting at FIRST_STMT. */
5672 get_group_alias_ptr_type (gimple
*first_stmt
)
5674 struct data_reference
*first_dr
, *next_dr
;
5677 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5678 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
5681 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
5682 if (get_alias_set (DR_REF (first_dr
))
5683 != get_alias_set (DR_REF (next_dr
)))
5685 if (dump_enabled_p ())
5686 dump_printf_loc (MSG_NOTE
, vect_location
,
5687 "conflicting alias set types.\n");
5688 return ptr_type_node
;
5690 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5692 return reference_alias_ptr_type (DR_REF (first_dr
));
5696 /* Function vectorizable_store.
5698 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5700 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5701 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5702 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5705 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5711 tree vec_oprnd
= NULL_TREE
;
5712 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5713 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5715 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5716 struct loop
*loop
= NULL
;
5717 machine_mode vec_mode
;
5719 enum dr_alignment_support alignment_support_scheme
;
5721 enum vect_def_type dt
;
5722 stmt_vec_info prev_stmt_info
= NULL
;
5723 tree dataref_ptr
= NULL_TREE
;
5724 tree dataref_offset
= NULL_TREE
;
5725 gimple
*ptr_incr
= NULL
;
5728 gimple
*next_stmt
, *first_stmt
;
5730 unsigned int group_size
, i
;
5731 vec
<tree
> oprnds
= vNULL
;
5732 vec
<tree
> result_chain
= vNULL
;
5734 tree offset
= NULL_TREE
;
5735 vec
<tree
> vec_oprnds
= vNULL
;
5736 bool slp
= (slp_node
!= NULL
);
5737 unsigned int vec_num
;
5738 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5739 vec_info
*vinfo
= stmt_info
->vinfo
;
5741 gather_scatter_info gs_info
;
5742 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5745 vec_load_store_type vls_type
;
5748 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5751 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5755 /* Is vectorizable store? */
5757 if (!is_gimple_assign (stmt
))
5760 scalar_dest
= gimple_assign_lhs (stmt
);
5761 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5762 && is_pattern_stmt_p (stmt_info
))
5763 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5764 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5765 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5766 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5767 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5768 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5769 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5770 && TREE_CODE (scalar_dest
) != MEM_REF
)
5773 /* Cannot have hybrid store SLP -- that would mean storing to the
5774 same location twice. */
5775 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
5777 gcc_assert (gimple_assign_single_p (stmt
));
5779 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5780 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5784 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5785 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5790 /* Multiple types in SLP are handled by creating the appropriate number of
5791 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5796 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5798 gcc_assert (ncopies
>= 1);
5800 /* FORNOW. This restriction should be relaxed. */
5801 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5803 if (dump_enabled_p ())
5804 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5805 "multiple types in nested loop.\n");
5809 op
= gimple_assign_rhs1 (stmt
);
5811 /* In the case this is a store from a constant make sure
5812 native_encode_expr can handle it. */
5813 if (CONSTANT_CLASS_P (op
) && native_encode_expr (op
, NULL
, 64) == 0)
5816 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5818 if (dump_enabled_p ())
5819 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5820 "use not simple.\n");
5824 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
5825 vls_type
= VLS_STORE_INVARIANT
;
5827 vls_type
= VLS_STORE
;
5829 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5832 elem_type
= TREE_TYPE (vectype
);
5833 vec_mode
= TYPE_MODE (vectype
);
5835 /* FORNOW. In some cases can vectorize even if data-type not supported
5836 (e.g. - array initialization with 0). */
5837 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5840 if (!STMT_VINFO_DATA_REF (stmt_info
))
5843 vect_memory_access_type memory_access_type
;
5844 if (!get_load_store_type (stmt
, vectype
, slp
, vls_type
, ncopies
,
5845 &memory_access_type
, &gs_info
))
5848 if (!vec_stmt
) /* transformation not required. */
5850 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
5851 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5852 /* The SLP costs are calculated during SLP analysis. */
5853 if (!PURE_SLP_STMT (stmt_info
))
5854 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
, dt
,
5858 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
5862 ensure_base_align (dr
);
5864 if (memory_access_type
== VMAT_GATHER_SCATTER
)
5866 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5867 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
5868 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5869 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5870 edge pe
= loop_preheader_edge (loop
);
5873 enum { NARROW
, NONE
, WIDEN
} modifier
;
5874 poly_uint64 scatter_off_nunits
5875 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
5877 if (known_eq (nunits
, scatter_off_nunits
))
5879 else if (known_eq (nunits
* 2, scatter_off_nunits
))
5883 /* Currently gathers and scatters are only supported for
5884 fixed-length vectors. */
5885 unsigned int count
= scatter_off_nunits
.to_constant ();
5886 vec_perm_builder
sel (count
, count
, 1);
5887 for (i
= 0; i
< (unsigned int) count
; ++i
)
5888 sel
.quick_push (i
| (count
/ 2));
5890 vec_perm_indices
indices (sel
, 1, count
);
5891 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
5893 gcc_assert (perm_mask
!= NULL_TREE
);
5895 else if (known_eq (nunits
, scatter_off_nunits
* 2))
5899 /* Currently gathers and scatters are only supported for
5900 fixed-length vectors. */
5901 unsigned int count
= nunits
.to_constant ();
5902 vec_perm_builder
sel (count
, count
, 1);
5903 for (i
= 0; i
< (unsigned int) count
; ++i
)
5904 sel
.quick_push (i
| (count
/ 2));
5906 vec_perm_indices
indices (sel
, 2, count
);
5907 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
5908 gcc_assert (perm_mask
!= NULL_TREE
);
5914 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
5915 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5916 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5917 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5918 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5919 scaletype
= TREE_VALUE (arglist
);
5921 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5922 && TREE_CODE (rettype
) == VOID_TYPE
);
5924 ptr
= fold_convert (ptrtype
, gs_info
.base
);
5925 if (!is_gimple_min_invariant (ptr
))
5927 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5928 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5929 gcc_assert (!new_bb
);
5932 /* Currently we support only unconditional scatter stores,
5933 so mask should be all ones. */
5934 mask
= build_int_cst (masktype
, -1);
5935 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5937 scale
= build_int_cst (scaletype
, gs_info
.scale
);
5939 prev_stmt_info
= NULL
;
5940 for (j
= 0; j
< ncopies
; ++j
)
5945 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5947 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
5949 else if (modifier
!= NONE
&& (j
& 1))
5951 if (modifier
== WIDEN
)
5954 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5955 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5958 else if (modifier
== NARROW
)
5960 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5963 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5972 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5974 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5978 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5980 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
)),
5981 TYPE_VECTOR_SUBPARTS (srctype
)));
5982 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5983 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5984 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5985 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5989 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5991 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
5992 TYPE_VECTOR_SUBPARTS (idxtype
)));
5993 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5994 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5995 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5996 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6001 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
6003 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6005 if (prev_stmt_info
== NULL
)
6006 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6008 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6009 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6014 grouped_store
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
6017 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6018 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6019 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6021 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
6024 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
6026 /* We vectorize all the stmts of the interleaving group when we
6027 reach the last stmt in the group. */
6028 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
6029 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
6038 grouped_store
= false;
6039 /* VEC_NUM is the number of vect stmts to be created for this
6041 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6042 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6043 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
6044 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6045 op
= gimple_assign_rhs1 (first_stmt
);
6048 /* VEC_NUM is the number of vect stmts to be created for this
6050 vec_num
= group_size
;
6052 ref_type
= get_group_alias_ptr_type (first_stmt
);
6058 group_size
= vec_num
= 1;
6059 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
6062 if (dump_enabled_p ())
6063 dump_printf_loc (MSG_NOTE
, vect_location
,
6064 "transform store. ncopies = %d\n", ncopies
);
6066 if (memory_access_type
== VMAT_ELEMENTWISE
6067 || memory_access_type
== VMAT_STRIDED_SLP
)
6069 gimple_stmt_iterator incr_gsi
;
6075 gimple_seq stmts
= NULL
;
6076 tree stride_base
, stride_step
, alias_off
;
6079 /* Checked by get_load_store_type. */
6080 unsigned int const_nunits
= nunits
.to_constant ();
6082 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
6085 = fold_build_pointer_plus
6086 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
6087 size_binop (PLUS_EXPR
,
6088 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
6089 convert_to_ptrofftype (DR_INIT (first_dr
))));
6090 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
6092 /* For a store with loop-invariant (but other than power-of-2)
6093 stride (i.e. not a grouped access) like so:
6095 for (i = 0; i < n; i += stride)
6098 we generate a new induction variable and new stores from
6099 the components of the (vectorized) rhs:
6101 for (j = 0; ; j += VF*stride)
6106 array[j + stride] = tmp2;
6110 unsigned nstores
= const_nunits
;
6112 tree ltype
= elem_type
;
6113 tree lvectype
= vectype
;
6116 if (group_size
< const_nunits
6117 && const_nunits
% group_size
== 0)
6119 nstores
= const_nunits
/ group_size
;
6121 ltype
= build_vector_type (elem_type
, group_size
);
6124 /* First check if vec_extract optab doesn't support extraction
6125 of vector elts directly. */
6126 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6128 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6129 || !VECTOR_MODE_P (vmode
)
6130 || (convert_optab_handler (vec_extract_optab
,
6131 TYPE_MODE (vectype
), vmode
)
6132 == CODE_FOR_nothing
))
6134 /* Try to avoid emitting an extract of vector elements
6135 by performing the extracts using an integer type of the
6136 same size, extracting from a vector of those and then
6137 re-interpreting it as the original vector type if
6140 = group_size
* GET_MODE_BITSIZE (elmode
);
6141 elmode
= int_mode_for_size (lsize
, 0).require ();
6142 unsigned int lnunits
= const_nunits
/ group_size
;
6143 /* If we can't construct such a vector fall back to
6144 element extracts from the original vector type and
6145 element size stores. */
6146 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
6147 && VECTOR_MODE_P (vmode
)
6148 && (convert_optab_handler (vec_extract_optab
,
6150 != CODE_FOR_nothing
))
6154 ltype
= build_nonstandard_integer_type (lsize
, 1);
6155 lvectype
= build_vector_type (ltype
, nstores
);
6157 /* Else fall back to vector extraction anyway.
6158 Fewer stores are more important than avoiding spilling
6159 of the vector we extract from. Compared to the
6160 construction case in vectorizable_load no store-forwarding
6161 issue exists here for reasonable archs. */
6164 else if (group_size
>= const_nunits
6165 && group_size
% const_nunits
== 0)
6168 lnel
= const_nunits
;
6172 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6173 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6176 ivstep
= stride_step
;
6177 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6178 build_int_cst (TREE_TYPE (ivstep
), vf
));
6180 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6182 create_iv (stride_base
, ivstep
, NULL
,
6183 loop
, &incr_gsi
, insert_after
,
6185 incr
= gsi_stmt (incr_gsi
);
6186 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6188 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6190 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6192 prev_stmt_info
= NULL
;
6193 alias_off
= build_int_cst (ref_type
, 0);
6194 next_stmt
= first_stmt
;
6195 for (g
= 0; g
< group_size
; g
++)
6197 running_off
= offvar
;
6200 tree size
= TYPE_SIZE_UNIT (ltype
);
6201 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6203 tree newoff
= copy_ssa_name (running_off
, NULL
);
6204 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6206 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6207 running_off
= newoff
;
6209 unsigned int group_el
= 0;
6210 unsigned HOST_WIDE_INT
6211 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6212 for (j
= 0; j
< ncopies
; j
++)
6214 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6215 and first_stmt == stmt. */
6220 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6222 vec_oprnd
= vec_oprnds
[0];
6226 gcc_assert (gimple_assign_single_p (next_stmt
));
6227 op
= gimple_assign_rhs1 (next_stmt
);
6228 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6234 vec_oprnd
= vec_oprnds
[j
];
6237 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
6238 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
6241 /* Pun the vector to extract from if necessary. */
6242 if (lvectype
!= vectype
)
6244 tree tem
= make_ssa_name (lvectype
);
6246 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6247 lvectype
, vec_oprnd
));
6248 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6251 for (i
= 0; i
< nstores
; i
++)
6253 tree newref
, newoff
;
6254 gimple
*incr
, *assign
;
6255 tree size
= TYPE_SIZE (ltype
);
6256 /* Extract the i'th component. */
6257 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6258 bitsize_int (i
), size
);
6259 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6262 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6266 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6268 newref
= build2 (MEM_REF
, ltype
,
6269 running_off
, this_off
);
6271 /* And store it to *running_off. */
6272 assign
= gimple_build_assign (newref
, elem
);
6273 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6277 || group_el
== group_size
)
6279 newoff
= copy_ssa_name (running_off
, NULL
);
6280 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6281 running_off
, stride_step
);
6282 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6284 running_off
= newoff
;
6287 if (g
== group_size
- 1
6290 if (j
== 0 && i
== 0)
6291 STMT_VINFO_VEC_STMT (stmt_info
)
6292 = *vec_stmt
= assign
;
6294 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6295 prev_stmt_info
= vinfo_for_stmt (assign
);
6299 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6304 vec_oprnds
.release ();
6308 auto_vec
<tree
> dr_chain (group_size
);
6309 oprnds
.create (group_size
);
6311 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6312 gcc_assert (alignment_support_scheme
);
6313 /* Targets with store-lane instructions must not require explicit
6315 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
6316 || alignment_support_scheme
== dr_aligned
6317 || alignment_support_scheme
== dr_unaligned_supported
);
6319 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6320 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6321 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6323 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6324 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6326 aggr_type
= vectype
;
6328 /* In case the vectorization factor (VF) is bigger than the number
6329 of elements that we can fit in a vectype (nunits), we have to generate
6330 more than one vector stmt - i.e - we need to "unroll" the
6331 vector stmt by a factor VF/nunits. For more details see documentation in
6332 vect_get_vec_def_for_copy_stmt. */
6334 /* In case of interleaving (non-unit grouped access):
6341 We create vectorized stores starting from base address (the access of the
6342 first stmt in the chain (S2 in the above example), when the last store stmt
6343 of the chain (S4) is reached:
6346 VS2: &base + vec_size*1 = vx0
6347 VS3: &base + vec_size*2 = vx1
6348 VS4: &base + vec_size*3 = vx3
6350 Then permutation statements are generated:
6352 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6353 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6356 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6357 (the order of the data-refs in the output of vect_permute_store_chain
6358 corresponds to the order of scalar stmts in the interleaving chain - see
6359 the documentation of vect_permute_store_chain()).
6361 In case of both multiple types and interleaving, above vector stores and
6362 permutation stmts are created for every copy. The result vector stmts are
6363 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6364 STMT_VINFO_RELATED_STMT for the next copies.
6367 prev_stmt_info
= NULL
;
6368 for (j
= 0; j
< ncopies
; j
++)
6375 /* Get vectorized arguments for SLP_NODE. */
6376 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6379 vec_oprnd
= vec_oprnds
[0];
6383 /* For interleaved stores we collect vectorized defs for all the
6384 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6385 used as an input to vect_permute_store_chain(), and OPRNDS as
6386 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6388 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6389 OPRNDS are of size 1. */
6390 next_stmt
= first_stmt
;
6391 for (i
= 0; i
< group_size
; i
++)
6393 /* Since gaps are not supported for interleaved stores,
6394 GROUP_SIZE is the exact number of stmts in the chain.
6395 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6396 there is no interleaving, GROUP_SIZE is 1, and only one
6397 iteration of the loop will be executed. */
6398 gcc_assert (next_stmt
6399 && gimple_assign_single_p (next_stmt
));
6400 op
= gimple_assign_rhs1 (next_stmt
);
6402 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6403 dr_chain
.quick_push (vec_oprnd
);
6404 oprnds
.quick_push (vec_oprnd
);
6405 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6409 /* We should have catched mismatched types earlier. */
6410 gcc_assert (useless_type_conversion_p (vectype
,
6411 TREE_TYPE (vec_oprnd
)));
6412 bool simd_lane_access_p
6413 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6414 if (simd_lane_access_p
6415 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6416 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6417 && integer_zerop (DR_OFFSET (first_dr
))
6418 && integer_zerop (DR_INIT (first_dr
))
6419 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6420 get_alias_set (TREE_TYPE (ref_type
))))
6422 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6423 dataref_offset
= build_int_cst (ref_type
, 0);
6428 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6429 simd_lane_access_p
? loop
: NULL
,
6430 offset
, &dummy
, gsi
, &ptr_incr
,
6431 simd_lane_access_p
, &inv_p
);
6432 gcc_assert (bb_vinfo
|| !inv_p
);
6436 /* For interleaved stores we created vectorized defs for all the
6437 defs stored in OPRNDS in the previous iteration (previous copy).
6438 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6439 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6441 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6442 OPRNDS are of size 1. */
6443 for (i
= 0; i
< group_size
; i
++)
6446 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
6447 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
6448 dr_chain
[i
] = vec_oprnd
;
6449 oprnds
[i
] = vec_oprnd
;
6453 = int_const_binop (PLUS_EXPR
, dataref_offset
,
6454 TYPE_SIZE_UNIT (aggr_type
));
6456 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6457 TYPE_SIZE_UNIT (aggr_type
));
6460 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6464 /* Combine all the vectors into an array. */
6465 vec_array
= create_vector_array (vectype
, vec_num
);
6466 for (i
= 0; i
< vec_num
; i
++)
6468 vec_oprnd
= dr_chain
[i
];
6469 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6473 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6474 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6475 gcall
*call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6477 gimple_call_set_lhs (call
, data_ref
);
6478 gimple_call_set_nothrow (call
, true);
6480 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6488 result_chain
.create (group_size
);
6490 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6494 next_stmt
= first_stmt
;
6495 for (i
= 0; i
< vec_num
; i
++)
6497 unsigned align
, misalign
;
6500 /* Bump the vector pointer. */
6501 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6505 vec_oprnd
= vec_oprnds
[i
];
6506 else if (grouped_store
)
6507 /* For grouped stores vectorized defs are interleaved in
6508 vect_permute_store_chain(). */
6509 vec_oprnd
= result_chain
[i
];
6511 data_ref
= fold_build2 (MEM_REF
, vectype
,
6515 : build_int_cst (ref_type
, 0));
6516 align
= DR_TARGET_ALIGNMENT (first_dr
);
6517 if (aligned_access_p (first_dr
))
6519 else if (DR_MISALIGNMENT (first_dr
) == -1)
6521 align
= dr_alignment (vect_dr_behavior (first_dr
));
6523 TREE_TYPE (data_ref
)
6524 = build_aligned_type (TREE_TYPE (data_ref
),
6525 align
* BITS_PER_UNIT
);
6529 TREE_TYPE (data_ref
)
6530 = build_aligned_type (TREE_TYPE (data_ref
),
6531 TYPE_ALIGN (elem_type
));
6532 misalign
= DR_MISALIGNMENT (first_dr
);
6534 if (dataref_offset
== NULL_TREE
6535 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6536 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
6539 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6541 tree perm_mask
= perm_mask_for_reverse (vectype
);
6543 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
6545 tree new_temp
= make_ssa_name (perm_dest
);
6547 /* Generate the permute statement. */
6549 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6550 vec_oprnd
, perm_mask
);
6551 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6553 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6554 vec_oprnd
= new_temp
;
6557 /* Arguments are ready. Create the new vector stmt. */
6558 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6559 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6564 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6572 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6574 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6575 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6580 result_chain
.release ();
6581 vec_oprnds
.release ();
6586 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6587 VECTOR_CST mask. No checks are made that the target platform supports the
6588 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6589 vect_gen_perm_mask_checked. */
6592 vect_gen_perm_mask_any (tree vectype
, const vec_perm_indices
&sel
)
6596 poly_uint64 nunits
= sel
.length ();
6597 gcc_assert (known_eq (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)));
6599 mask_type
= build_vector_type (ssizetype
, nunits
);
6600 return vec_perm_indices_to_tree (mask_type
, sel
);
6603 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6604 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6607 vect_gen_perm_mask_checked (tree vectype
, const vec_perm_indices
&sel
)
6609 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype
), sel
));
6610 return vect_gen_perm_mask_any (vectype
, sel
);
6613 /* Given a vector variable X and Y, that was generated for the scalar
6614 STMT, generate instructions to permute the vector elements of X and Y
6615 using permutation mask MASK_VEC, insert them at *GSI and return the
6616 permuted vector variable. */
6619 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6620 gimple_stmt_iterator
*gsi
)
6622 tree vectype
= TREE_TYPE (x
);
6623 tree perm_dest
, data_ref
;
6626 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6627 data_ref
= make_ssa_name (perm_dest
);
6629 /* Generate the permute statement. */
6630 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6631 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6636 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6637 inserting them on the loops preheader edge. Returns true if we
6638 were successful in doing so (and thus STMT can be moved then),
6639 otherwise returns false. */
6642 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6648 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6650 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6651 if (!gimple_nop_p (def_stmt
)
6652 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6654 /* Make sure we don't need to recurse. While we could do
6655 so in simple cases when there are more complex use webs
6656 we don't have an easy way to preserve stmt order to fulfil
6657 dependencies within them. */
6660 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6662 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6664 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6665 if (!gimple_nop_p (def_stmt2
)
6666 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6676 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6678 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6679 if (!gimple_nop_p (def_stmt
)
6680 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6682 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6683 gsi_remove (&gsi
, false);
6684 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6691 /* vectorizable_load.
6693 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6695 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6696 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6697 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6700 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6701 slp_tree slp_node
, slp_instance slp_node_instance
)
6704 tree vec_dest
= NULL
;
6705 tree data_ref
= NULL
;
6706 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6707 stmt_vec_info prev_stmt_info
;
6708 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6709 struct loop
*loop
= NULL
;
6710 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6711 bool nested_in_vect_loop
= false;
6712 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6716 gimple
*new_stmt
= NULL
;
6718 enum dr_alignment_support alignment_support_scheme
;
6719 tree dataref_ptr
= NULL_TREE
;
6720 tree dataref_offset
= NULL_TREE
;
6721 gimple
*ptr_incr
= NULL
;
6724 unsigned int group_size
;
6725 poly_uint64 group_gap_adj
;
6726 tree msq
= NULL_TREE
, lsq
;
6727 tree offset
= NULL_TREE
;
6728 tree byte_offset
= NULL_TREE
;
6729 tree realignment_token
= NULL_TREE
;
6731 vec
<tree
> dr_chain
= vNULL
;
6732 bool grouped_load
= false;
6734 gimple
*first_stmt_for_drptr
= NULL
;
6736 bool compute_in_loop
= false;
6737 struct loop
*at_loop
;
6739 bool slp
= (slp_node
!= NULL
);
6740 bool slp_perm
= false;
6741 enum tree_code code
;
6742 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6745 gather_scatter_info gs_info
;
6746 vec_info
*vinfo
= stmt_info
->vinfo
;
6749 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6752 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6756 /* Is vectorizable load? */
6757 if (!is_gimple_assign (stmt
))
6760 scalar_dest
= gimple_assign_lhs (stmt
);
6761 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6764 code
= gimple_assign_rhs_code (stmt
);
6765 if (code
!= ARRAY_REF
6766 && code
!= BIT_FIELD_REF
6767 && code
!= INDIRECT_REF
6768 && code
!= COMPONENT_REF
6769 && code
!= IMAGPART_EXPR
6770 && code
!= REALPART_EXPR
6772 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6775 if (!STMT_VINFO_DATA_REF (stmt_info
))
6778 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6779 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6783 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6784 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6785 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6790 /* Multiple types in SLP are handled by creating the appropriate number of
6791 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6796 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
6798 gcc_assert (ncopies
>= 1);
6800 /* FORNOW. This restriction should be relaxed. */
6801 if (nested_in_vect_loop
&& ncopies
> 1)
6803 if (dump_enabled_p ())
6804 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6805 "multiple types in nested loop.\n");
6809 /* Invalidate assumptions made by dependence analysis when vectorization
6810 on the unrolled body effectively re-orders stmts. */
6812 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6813 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
6814 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6816 if (dump_enabled_p ())
6817 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6818 "cannot perform implicit CSE when unrolling "
6819 "with negative dependence distance\n");
6823 elem_type
= TREE_TYPE (vectype
);
6824 mode
= TYPE_MODE (vectype
);
6826 /* FORNOW. In some cases can vectorize even if data-type not supported
6827 (e.g. - data copies). */
6828 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6830 if (dump_enabled_p ())
6831 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6832 "Aligned load, but unsupported type.\n");
6836 /* Check if the load is a part of an interleaving chain. */
6837 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6839 grouped_load
= true;
6841 gcc_assert (!nested_in_vect_loop
);
6842 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6844 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6845 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6847 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6850 /* Invalidate assumptions made by dependence analysis when vectorization
6851 on the unrolled body effectively re-orders stmts. */
6852 if (!PURE_SLP_STMT (stmt_info
)
6853 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6854 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
6855 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6857 if (dump_enabled_p ())
6858 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6859 "cannot perform implicit CSE when performing "
6860 "group loads with negative dependence distance\n");
6864 /* Similarly when the stmt is a load that is both part of a SLP
6865 instance and a loop vectorized stmt via the same-dr mechanism
6866 we have to give up. */
6867 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6868 && (STMT_SLP_TYPE (stmt_info
)
6869 != STMT_SLP_TYPE (vinfo_for_stmt
6870 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6872 if (dump_enabled_p ())
6873 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6874 "conflicting SLP types for CSEd load\n");
6879 vect_memory_access_type memory_access_type
;
6880 if (!get_load_store_type (stmt
, vectype
, slp
, VLS_LOAD
, ncopies
,
6881 &memory_access_type
, &gs_info
))
6884 if (!vec_stmt
) /* transformation not required. */
6887 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6888 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6889 /* The SLP costs are calculated during SLP analysis. */
6890 if (!PURE_SLP_STMT (stmt_info
))
6891 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
6897 gcc_assert (memory_access_type
6898 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6900 if (dump_enabled_p ())
6901 dump_printf_loc (MSG_NOTE
, vect_location
,
6902 "transform load. ncopies = %d\n", ncopies
);
6906 ensure_base_align (dr
);
6908 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6910 tree vec_oprnd0
= NULL_TREE
, op
;
6911 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6912 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6913 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6914 edge pe
= loop_preheader_edge (loop
);
6917 enum { NARROW
, NONE
, WIDEN
} modifier
;
6918 poly_uint64 gather_off_nunits
6919 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6921 if (known_eq (nunits
, gather_off_nunits
))
6923 else if (known_eq (nunits
* 2, gather_off_nunits
))
6927 /* Currently widening gathers are only supported for
6928 fixed-length vectors. */
6929 int count
= gather_off_nunits
.to_constant ();
6930 vec_perm_builder
sel (count
, count
, 1);
6931 for (i
= 0; i
< count
; ++i
)
6932 sel
.quick_push (i
| (count
/ 2));
6934 vec_perm_indices
indices (sel
, 1, count
);
6935 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
6938 else if (known_eq (nunits
, gather_off_nunits
* 2))
6942 /* Currently narrowing gathers are only supported for
6943 fixed-length vectors. */
6944 int count
= nunits
.to_constant ();
6945 vec_perm_builder
sel (count
, count
, 1);
6946 for (i
= 0; i
< count
; ++i
)
6947 sel
.quick_push (i
< count
/ 2 ? i
: i
+ count
/ 2);
6949 vec_perm_indices
indices (sel
, 2, count
);
6950 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
6956 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6957 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6958 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6959 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6960 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6961 scaletype
= TREE_VALUE (arglist
);
6962 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6964 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6966 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6967 if (!is_gimple_min_invariant (ptr
))
6969 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6970 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6971 gcc_assert (!new_bb
);
6974 /* Currently we support only unconditional gather loads,
6975 so mask should be all ones. */
6976 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6977 mask
= build_int_cst (masktype
, -1);
6978 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6980 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6981 mask
= build_vector_from_val (masktype
, mask
);
6982 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6984 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6988 for (j
= 0; j
< 6; ++j
)
6990 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6991 mask
= build_real (TREE_TYPE (masktype
), r
);
6992 mask
= build_vector_from_val (masktype
, mask
);
6993 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6998 scale
= build_int_cst (scaletype
, gs_info
.scale
);
7000 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
7001 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
7002 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
7006 for (j
= 0; j
< 6; ++j
)
7008 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
7009 merge
= build_real (TREE_TYPE (rettype
), r
);
7013 merge
= build_vector_from_val (rettype
, merge
);
7014 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
7016 prev_stmt_info
= NULL
;
7017 for (j
= 0; j
< ncopies
; ++j
)
7019 if (modifier
== WIDEN
&& (j
& 1))
7020 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
7021 perm_mask
, stmt
, gsi
);
7024 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
7027 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
7029 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
7031 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
7032 TYPE_VECTOR_SUBPARTS (idxtype
)));
7033 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
7034 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
7036 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
7037 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7042 = gimple_build_call (gs_info
.decl
, 5, merge
, ptr
, op
, mask
, scale
);
7044 if (!useless_type_conversion_p (vectype
, rettype
))
7046 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
7047 TYPE_VECTOR_SUBPARTS (rettype
)));
7048 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
7049 gimple_call_set_lhs (new_stmt
, op
);
7050 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7051 var
= make_ssa_name (vec_dest
);
7052 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
7054 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
7058 var
= make_ssa_name (vec_dest
, new_stmt
);
7059 gimple_call_set_lhs (new_stmt
, var
);
7062 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7064 if (modifier
== NARROW
)
7071 var
= permute_vec_elements (prev_res
, var
,
7072 perm_mask
, stmt
, gsi
);
7073 new_stmt
= SSA_NAME_DEF_STMT (var
);
7076 if (prev_stmt_info
== NULL
)
7077 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7079 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7080 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7085 if (memory_access_type
== VMAT_ELEMENTWISE
7086 || memory_access_type
== VMAT_STRIDED_SLP
)
7088 gimple_stmt_iterator incr_gsi
;
7094 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7095 gimple_seq stmts
= NULL
;
7096 tree stride_base
, stride_step
, alias_off
;
7097 /* Checked by get_load_store_type. */
7098 unsigned int const_nunits
= nunits
.to_constant ();
7100 gcc_assert (!nested_in_vect_loop
);
7102 if (slp
&& grouped_load
)
7104 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7105 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7106 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7107 ref_type
= get_group_alias_ptr_type (first_stmt
);
7114 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7118 = fold_build_pointer_plus
7119 (DR_BASE_ADDRESS (first_dr
),
7120 size_binop (PLUS_EXPR
,
7121 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7122 convert_to_ptrofftype (DR_INIT (first_dr
))));
7123 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7125 /* For a load with loop-invariant (but other than power-of-2)
7126 stride (i.e. not a grouped access) like so:
7128 for (i = 0; i < n; i += stride)
7131 we generate a new induction variable and new accesses to
7132 form a new vector (or vectors, depending on ncopies):
7134 for (j = 0; ; j += VF*stride)
7136 tmp2 = array[j + stride];
7138 vectemp = {tmp1, tmp2, ...}
7141 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7142 build_int_cst (TREE_TYPE (stride_step
), vf
));
7144 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7146 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
7147 loop
, &incr_gsi
, insert_after
,
7149 incr
= gsi_stmt (incr_gsi
);
7150 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7152 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
7153 &stmts
, true, NULL_TREE
);
7155 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
7157 prev_stmt_info
= NULL
;
7158 running_off
= offvar
;
7159 alias_off
= build_int_cst (ref_type
, 0);
7160 int nloads
= const_nunits
;
7162 tree ltype
= TREE_TYPE (vectype
);
7163 tree lvectype
= vectype
;
7164 auto_vec
<tree
> dr_chain
;
7165 if (memory_access_type
== VMAT_STRIDED_SLP
)
7167 if (group_size
< const_nunits
)
7169 /* First check if vec_init optab supports construction from
7170 vector elts directly. */
7171 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7173 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7174 && VECTOR_MODE_P (vmode
)
7175 && (convert_optab_handler (vec_init_optab
,
7176 TYPE_MODE (vectype
), vmode
)
7177 != CODE_FOR_nothing
))
7179 nloads
= const_nunits
/ group_size
;
7181 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7185 /* Otherwise avoid emitting a constructor of vector elements
7186 by performing the loads using an integer type of the same
7187 size, constructing a vector of those and then
7188 re-interpreting it as the original vector type.
7189 This avoids a huge runtime penalty due to the general
7190 inability to perform store forwarding from smaller stores
7191 to a larger load. */
7193 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7194 elmode
= int_mode_for_size (lsize
, 0).require ();
7195 unsigned int lnunits
= const_nunits
/ group_size
;
7196 /* If we can't construct such a vector fall back to
7197 element loads of the original vector type. */
7198 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
7199 && VECTOR_MODE_P (vmode
)
7200 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7201 != CODE_FOR_nothing
))
7205 ltype
= build_nonstandard_integer_type (lsize
, 1);
7206 lvectype
= build_vector_type (ltype
, nloads
);
7213 lnel
= const_nunits
;
7216 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7220 /* For SLP permutation support we need to load the whole group,
7221 not only the number of vector stmts the permutation result
7225 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7227 unsigned int const_vf
= vf
.to_constant ();
7228 ncopies
= CEIL (group_size
* const_vf
, const_nunits
);
7229 dr_chain
.create (ncopies
);
7232 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7234 unsigned int group_el
= 0;
7235 unsigned HOST_WIDE_INT
7236 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7237 for (j
= 0; j
< ncopies
; j
++)
7240 vec_alloc (v
, nloads
);
7241 for (i
= 0; i
< nloads
; i
++)
7243 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7245 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
7246 build2 (MEM_REF
, ltype
,
7247 running_off
, this_off
));
7248 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7250 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7251 gimple_assign_lhs (new_stmt
));
7255 || group_el
== group_size
)
7257 tree newoff
= copy_ssa_name (running_off
);
7258 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7259 running_off
, stride_step
);
7260 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7262 running_off
= newoff
;
7268 tree vec_inv
= build_constructor (lvectype
, v
);
7269 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7270 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7271 if (lvectype
!= vectype
)
7273 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7275 build1 (VIEW_CONVERT_EXPR
,
7276 vectype
, new_temp
));
7277 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7284 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7286 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7291 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7293 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7294 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7300 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7301 slp_node_instance
, false, &n_perms
);
7308 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7309 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7310 /* For SLP vectorization we directly vectorize a subchain
7311 without permutation. */
7312 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7313 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7314 /* For BB vectorization always use the first stmt to base
7315 the data ref pointer on. */
7317 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7319 /* Check if the chain of loads is already vectorized. */
7320 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7321 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7322 ??? But we can only do so if there is exactly one
7323 as we have no way to get at the rest. Leave the CSE
7325 ??? With the group load eventually participating
7326 in multiple different permutations (having multiple
7327 slp nodes which refer to the same group) the CSE
7328 is even wrong code. See PR56270. */
7331 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7334 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7337 /* VEC_NUM is the number of vect stmts to be created for this group. */
7340 grouped_load
= false;
7341 /* For SLP permutation support we need to load the whole group,
7342 not only the number of vector stmts the permutation result
7346 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7348 unsigned int const_vf
= vf
.to_constant ();
7349 unsigned int const_nunits
= nunits
.to_constant ();
7350 vec_num
= CEIL (group_size
* const_vf
, const_nunits
);
7351 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7355 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7357 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
7361 vec_num
= group_size
;
7363 ref_type
= get_group_alias_ptr_type (first_stmt
);
7369 group_size
= vec_num
= 1;
7371 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7374 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7375 gcc_assert (alignment_support_scheme
);
7376 /* Targets with load-lane instructions must not require explicit
7378 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
7379 || alignment_support_scheme
== dr_aligned
7380 || alignment_support_scheme
== dr_unaligned_supported
);
7382 /* In case the vectorization factor (VF) is bigger than the number
7383 of elements that we can fit in a vectype (nunits), we have to generate
7384 more than one vector stmt - i.e - we need to "unroll" the
7385 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7386 from one copy of the vector stmt to the next, in the field
7387 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7388 stages to find the correct vector defs to be used when vectorizing
7389 stmts that use the defs of the current stmt. The example below
7390 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7391 need to create 4 vectorized stmts):
7393 before vectorization:
7394 RELATED_STMT VEC_STMT
7398 step 1: vectorize stmt S1:
7399 We first create the vector stmt VS1_0, and, as usual, record a
7400 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7401 Next, we create the vector stmt VS1_1, and record a pointer to
7402 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7403 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7405 RELATED_STMT VEC_STMT
7406 VS1_0: vx0 = memref0 VS1_1 -
7407 VS1_1: vx1 = memref1 VS1_2 -
7408 VS1_2: vx2 = memref2 VS1_3 -
7409 VS1_3: vx3 = memref3 - -
7410 S1: x = load - VS1_0
7413 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7414 information we recorded in RELATED_STMT field is used to vectorize
7417 /* In case of interleaving (non-unit grouped access):
7424 Vectorized loads are created in the order of memory accesses
7425 starting from the access of the first stmt of the chain:
7428 VS2: vx1 = &base + vec_size*1
7429 VS3: vx3 = &base + vec_size*2
7430 VS4: vx4 = &base + vec_size*3
7432 Then permutation statements are generated:
7434 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7435 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7438 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7439 (the order of the data-refs in the output of vect_permute_load_chain
7440 corresponds to the order of scalar stmts in the interleaving chain - see
7441 the documentation of vect_permute_load_chain()).
7442 The generation of permutation stmts and recording them in
7443 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7445 In case of both multiple types and interleaving, the vector loads and
7446 permutation stmts above are created for every copy. The result vector
7447 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7448 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7450 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7451 on a target that supports unaligned accesses (dr_unaligned_supported)
7452 we generate the following code:
7456 p = p + indx * vectype_size;
7461 Otherwise, the data reference is potentially unaligned on a target that
7462 does not support unaligned accesses (dr_explicit_realign_optimized) -
7463 then generate the following code, in which the data in each iteration is
7464 obtained by two vector loads, one from the previous iteration, and one
7465 from the current iteration:
7467 msq_init = *(floor(p1))
7468 p2 = initial_addr + VS - 1;
7469 realignment_token = call target_builtin;
7472 p2 = p2 + indx * vectype_size
7474 vec_dest = realign_load (msq, lsq, realignment_token)
7479 /* If the misalignment remains the same throughout the execution of the
7480 loop, we can create the init_addr and permutation mask at the loop
7481 preheader. Otherwise, it needs to be created inside the loop.
7482 This can only occur when vectorizing memory accesses in the inner-loop
7483 nested within an outer-loop that is being vectorized. */
7485 if (nested_in_vect_loop
7486 && !multiple_p (DR_STEP_ALIGNMENT (dr
),
7487 GET_MODE_SIZE (TYPE_MODE (vectype
))))
7489 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7490 compute_in_loop
= true;
7493 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7494 || alignment_support_scheme
== dr_explicit_realign
)
7495 && !compute_in_loop
)
7497 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7498 alignment_support_scheme
, NULL_TREE
,
7500 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7502 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7503 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7510 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7511 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7513 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7514 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7516 aggr_type
= vectype
;
7518 prev_stmt_info
= NULL
;
7519 poly_uint64 group_elt
= 0;
7520 for (j
= 0; j
< ncopies
; j
++)
7522 /* 1. Create the vector or array pointer update chain. */
7525 bool simd_lane_access_p
7526 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7527 if (simd_lane_access_p
7528 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7529 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7530 && integer_zerop (DR_OFFSET (first_dr
))
7531 && integer_zerop (DR_INIT (first_dr
))
7532 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7533 get_alias_set (TREE_TYPE (ref_type
)))
7534 && (alignment_support_scheme
== dr_aligned
7535 || alignment_support_scheme
== dr_unaligned_supported
))
7537 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7538 dataref_offset
= build_int_cst (ref_type
, 0);
7541 else if (first_stmt_for_drptr
7542 && first_stmt
!= first_stmt_for_drptr
)
7545 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7546 at_loop
, offset
, &dummy
, gsi
,
7547 &ptr_incr
, simd_lane_access_p
,
7548 &inv_p
, byte_offset
);
7549 /* Adjust the pointer by the difference to first_stmt. */
7550 data_reference_p ptrdr
7551 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7552 tree diff
= fold_convert (sizetype
,
7553 size_binop (MINUS_EXPR
,
7556 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7561 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7562 offset
, &dummy
, gsi
, &ptr_incr
,
7563 simd_lane_access_p
, &inv_p
,
7566 else if (dataref_offset
)
7567 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7568 TYPE_SIZE_UNIT (aggr_type
));
7570 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7571 TYPE_SIZE_UNIT (aggr_type
));
7573 if (grouped_load
|| slp_perm
)
7574 dr_chain
.create (vec_num
);
7576 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7580 vec_array
= create_vector_array (vectype
, vec_num
);
7583 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7584 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
7585 gcall
*call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1,
7587 gimple_call_set_lhs (call
, vec_array
);
7588 gimple_call_set_nothrow (call
, true);
7590 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7592 /* Extract each vector into an SSA_NAME. */
7593 for (i
= 0; i
< vec_num
; i
++)
7595 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7597 dr_chain
.quick_push (new_temp
);
7600 /* Record the mapping between SSA_NAMEs and statements. */
7601 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7605 for (i
= 0; i
< vec_num
; i
++)
7608 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7611 /* 2. Create the vector-load in the loop. */
7612 switch (alignment_support_scheme
)
7615 case dr_unaligned_supported
:
7617 unsigned int align
, misalign
;
7620 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7623 : build_int_cst (ref_type
, 0));
7624 align
= DR_TARGET_ALIGNMENT (dr
);
7625 if (alignment_support_scheme
== dr_aligned
)
7627 gcc_assert (aligned_access_p (first_dr
));
7630 else if (DR_MISALIGNMENT (first_dr
) == -1)
7632 align
= dr_alignment (vect_dr_behavior (first_dr
));
7634 TREE_TYPE (data_ref
)
7635 = build_aligned_type (TREE_TYPE (data_ref
),
7636 align
* BITS_PER_UNIT
);
7640 TREE_TYPE (data_ref
)
7641 = build_aligned_type (TREE_TYPE (data_ref
),
7642 TYPE_ALIGN (elem_type
));
7643 misalign
= DR_MISALIGNMENT (first_dr
);
7645 if (dataref_offset
== NULL_TREE
7646 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7647 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7651 case dr_explicit_realign
:
7655 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7657 if (compute_in_loop
)
7658 msq
= vect_setup_realignment (first_stmt
, gsi
,
7660 dr_explicit_realign
,
7663 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7664 ptr
= copy_ssa_name (dataref_ptr
);
7666 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7667 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
7668 new_stmt
= gimple_build_assign
7669 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7671 (TREE_TYPE (dataref_ptr
),
7672 -(HOST_WIDE_INT
) align
));
7673 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7675 = build2 (MEM_REF
, vectype
, ptr
,
7676 build_int_cst (ref_type
, 0));
7677 vec_dest
= vect_create_destination_var (scalar_dest
,
7679 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7680 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7681 gimple_assign_set_lhs (new_stmt
, new_temp
);
7682 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7683 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7684 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7687 bump
= size_binop (MULT_EXPR
, vs
,
7688 TYPE_SIZE_UNIT (elem_type
));
7689 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7690 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7691 new_stmt
= gimple_build_assign
7692 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7694 (TREE_TYPE (ptr
), -(HOST_WIDE_INT
) align
));
7695 ptr
= copy_ssa_name (ptr
, new_stmt
);
7696 gimple_assign_set_lhs (new_stmt
, ptr
);
7697 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7699 = build2 (MEM_REF
, vectype
, ptr
,
7700 build_int_cst (ref_type
, 0));
7703 case dr_explicit_realign_optimized
:
7705 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7706 new_temp
= copy_ssa_name (dataref_ptr
);
7708 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7709 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
7710 new_stmt
= gimple_build_assign
7711 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7712 build_int_cst (TREE_TYPE (dataref_ptr
),
7713 -(HOST_WIDE_INT
) align
));
7714 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7716 = build2 (MEM_REF
, vectype
, new_temp
,
7717 build_int_cst (ref_type
, 0));
7723 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7724 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7725 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7726 gimple_assign_set_lhs (new_stmt
, new_temp
);
7727 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7729 /* 3. Handle explicit realignment if necessary/supported.
7731 vec_dest = realign_load (msq, lsq, realignment_token) */
7732 if (alignment_support_scheme
== dr_explicit_realign_optimized
7733 || alignment_support_scheme
== dr_explicit_realign
)
7735 lsq
= gimple_assign_lhs (new_stmt
);
7736 if (!realignment_token
)
7737 realignment_token
= dataref_ptr
;
7738 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7739 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7740 msq
, lsq
, realignment_token
);
7741 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7742 gimple_assign_set_lhs (new_stmt
, new_temp
);
7743 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7745 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7748 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7749 add_phi_arg (phi
, lsq
,
7750 loop_latch_edge (containing_loop
),
7756 /* 4. Handle invariant-load. */
7757 if (inv_p
&& !bb_vinfo
)
7759 gcc_assert (!grouped_load
);
7760 /* If we have versioned for aliasing or the loop doesn't
7761 have any data dependencies that would preclude this,
7762 then we are sure this is a loop invariant load and
7763 thus we can insert it on the preheader edge. */
7764 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7765 && !nested_in_vect_loop
7766 && hoist_defs_of_uses (stmt
, loop
))
7768 if (dump_enabled_p ())
7770 dump_printf_loc (MSG_NOTE
, vect_location
,
7771 "hoisting out of the vectorized "
7773 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7775 tree tem
= copy_ssa_name (scalar_dest
);
7776 gsi_insert_on_edge_immediate
7777 (loop_preheader_edge (loop
),
7778 gimple_build_assign (tem
,
7780 (gimple_assign_rhs1 (stmt
))));
7781 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7782 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7783 set_vinfo_for_stmt (new_stmt
,
7784 new_stmt_vec_info (new_stmt
, vinfo
));
7788 gimple_stmt_iterator gsi2
= *gsi
;
7790 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7792 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7796 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7798 tree perm_mask
= perm_mask_for_reverse (vectype
);
7799 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7800 perm_mask
, stmt
, gsi
);
7801 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7804 /* Collect vector loads and later create their permutation in
7805 vect_transform_grouped_load (). */
7806 if (grouped_load
|| slp_perm
)
7807 dr_chain
.quick_push (new_temp
);
7809 /* Store vector loads in the corresponding SLP_NODE. */
7810 if (slp
&& !slp_perm
)
7811 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7813 /* With SLP permutation we load the gaps as well, without
7814 we need to skip the gaps after we manage to fully load
7815 all elements. group_gap_adj is GROUP_SIZE here. */
7816 group_elt
+= nunits
;
7817 if (maybe_ne (group_gap_adj
, 0U)
7819 && known_eq (group_elt
, group_size
- group_gap_adj
))
7821 poly_wide_int bump_val
7822 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
7824 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
7825 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7830 /* Bump the vector pointer to account for a gap or for excess
7831 elements loaded for a permuted SLP load. */
7832 if (maybe_ne (group_gap_adj
, 0U) && slp_perm
)
7834 poly_wide_int bump_val
7835 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
7837 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
7838 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7843 if (slp
&& !slp_perm
)
7849 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7850 slp_node_instance
, false,
7853 dr_chain
.release ();
7861 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
7862 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7863 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7868 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7870 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7871 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7874 dr_chain
.release ();
7880 /* Function vect_is_simple_cond.
7883 LOOP - the loop that is being vectorized.
7884 COND - Condition that is checked for simple use.
7887 *COMP_VECTYPE - the vector type for the comparison.
7888 *DTS - The def types for the arguments of the comparison
7890 Returns whether a COND can be vectorized. Checks whether
7891 condition operands are supportable using vec_is_simple_use. */
7894 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
7895 tree
*comp_vectype
, enum vect_def_type
*dts
,
7899 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7902 if (TREE_CODE (cond
) == SSA_NAME
7903 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
7905 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7906 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7907 &dts
[0], comp_vectype
)
7909 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7914 if (!COMPARISON_CLASS_P (cond
))
7917 lhs
= TREE_OPERAND (cond
, 0);
7918 rhs
= TREE_OPERAND (cond
, 1);
7920 if (TREE_CODE (lhs
) == SSA_NAME
)
7922 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7923 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
7926 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
7927 || TREE_CODE (lhs
) == FIXED_CST
)
7928 dts
[0] = vect_constant_def
;
7932 if (TREE_CODE (rhs
) == SSA_NAME
)
7934 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7935 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
7938 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
7939 || TREE_CODE (rhs
) == FIXED_CST
)
7940 dts
[1] = vect_constant_def
;
7944 if (vectype1
&& vectype2
7945 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
7946 TYPE_VECTOR_SUBPARTS (vectype2
)))
7949 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7950 /* Invariant comparison. */
7951 if (! *comp_vectype
)
7953 tree scalar_type
= TREE_TYPE (lhs
);
7954 /* If we can widen the comparison to match vectype do so. */
7955 if (INTEGRAL_TYPE_P (scalar_type
)
7956 && tree_int_cst_lt (TYPE_SIZE (scalar_type
),
7957 TYPE_SIZE (TREE_TYPE (vectype
))))
7958 scalar_type
= build_nonstandard_integer_type
7959 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype
))),
7960 TYPE_UNSIGNED (scalar_type
));
7961 *comp_vectype
= get_vectype_for_scalar_type (scalar_type
);
7967 /* vectorizable_condition.
7969 Check if STMT is conditional modify expression that can be vectorized.
7970 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7971 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7974 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7975 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7976 else clause if it is 2).
7978 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7981 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7982 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7985 tree scalar_dest
= NULL_TREE
;
7986 tree vec_dest
= NULL_TREE
;
7987 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
7988 tree then_clause
, else_clause
;
7989 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7990 tree comp_vectype
= NULL_TREE
;
7991 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7992 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7995 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7996 enum vect_def_type dts
[4]
7997 = {vect_unknown_def_type
, vect_unknown_def_type
,
7998 vect_unknown_def_type
, vect_unknown_def_type
};
8001 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8002 stmt_vec_info prev_stmt_info
= NULL
;
8004 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8005 vec
<tree
> vec_oprnds0
= vNULL
;
8006 vec
<tree
> vec_oprnds1
= vNULL
;
8007 vec
<tree
> vec_oprnds2
= vNULL
;
8008 vec
<tree
> vec_oprnds3
= vNULL
;
8010 bool masked
= false;
8012 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
8015 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
8017 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8020 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8021 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8025 /* FORNOW: not yet supported. */
8026 if (STMT_VINFO_LIVE_P (stmt_info
))
8028 if (dump_enabled_p ())
8029 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8030 "value used after loop.\n");
8035 /* Is vectorizable conditional operation? */
8036 if (!is_gimple_assign (stmt
))
8039 code
= gimple_assign_rhs_code (stmt
);
8041 if (code
!= COND_EXPR
)
8044 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8045 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8050 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8052 gcc_assert (ncopies
>= 1);
8053 if (reduc_index
&& ncopies
> 1)
8054 return false; /* FORNOW */
8056 cond_expr
= gimple_assign_rhs1 (stmt
);
8057 then_clause
= gimple_assign_rhs2 (stmt
);
8058 else_clause
= gimple_assign_rhs3 (stmt
);
8060 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
8061 &comp_vectype
, &dts
[0], vectype
)
8066 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
8069 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
8073 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
8076 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
8079 masked
= !COMPARISON_CLASS_P (cond_expr
);
8080 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
8082 if (vec_cmp_type
== NULL_TREE
)
8085 cond_code
= TREE_CODE (cond_expr
);
8088 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
8089 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
8092 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
8094 /* Boolean values may have another representation in vectors
8095 and therefore we prefer bit operations over comparison for
8096 them (which also works for scalar masks). We store opcodes
8097 to use in bitop1 and bitop2. Statement is vectorized as
8098 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8099 depending on bitop1 and bitop2 arity. */
8103 bitop1
= BIT_NOT_EXPR
;
8104 bitop2
= BIT_AND_EXPR
;
8107 bitop1
= BIT_NOT_EXPR
;
8108 bitop2
= BIT_IOR_EXPR
;
8111 bitop1
= BIT_NOT_EXPR
;
8112 bitop2
= BIT_AND_EXPR
;
8113 std::swap (cond_expr0
, cond_expr1
);
8116 bitop1
= BIT_NOT_EXPR
;
8117 bitop2
= BIT_IOR_EXPR
;
8118 std::swap (cond_expr0
, cond_expr1
);
8121 bitop1
= BIT_XOR_EXPR
;
8124 bitop1
= BIT_XOR_EXPR
;
8125 bitop2
= BIT_NOT_EXPR
;
8130 cond_code
= SSA_NAME
;
8135 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8136 if (bitop1
!= NOP_EXPR
)
8138 machine_mode mode
= TYPE_MODE (comp_vectype
);
8141 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8142 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8145 if (bitop2
!= NOP_EXPR
)
8147 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8149 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8153 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8156 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8166 vec_oprnds0
.create (1);
8167 vec_oprnds1
.create (1);
8168 vec_oprnds2
.create (1);
8169 vec_oprnds3
.create (1);
8173 scalar_dest
= gimple_assign_lhs (stmt
);
8174 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8176 /* Handle cond expr. */
8177 for (j
= 0; j
< ncopies
; j
++)
8179 gassign
*new_stmt
= NULL
;
8184 auto_vec
<tree
, 4> ops
;
8185 auto_vec
<vec
<tree
>, 4> vec_defs
;
8188 ops
.safe_push (cond_expr
);
8191 ops
.safe_push (cond_expr0
);
8192 ops
.safe_push (cond_expr1
);
8194 ops
.safe_push (then_clause
);
8195 ops
.safe_push (else_clause
);
8196 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8197 vec_oprnds3
= vec_defs
.pop ();
8198 vec_oprnds2
= vec_defs
.pop ();
8200 vec_oprnds1
= vec_defs
.pop ();
8201 vec_oprnds0
= vec_defs
.pop ();
8209 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8211 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8217 = vect_get_vec_def_for_operand (cond_expr0
,
8218 stmt
, comp_vectype
);
8219 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8222 = vect_get_vec_def_for_operand (cond_expr1
,
8223 stmt
, comp_vectype
);
8224 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8226 if (reduc_index
== 1)
8227 vec_then_clause
= reduc_def
;
8230 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8232 vect_is_simple_use (then_clause
, loop_vinfo
,
8235 if (reduc_index
== 2)
8236 vec_else_clause
= reduc_def
;
8239 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8241 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8248 = vect_get_vec_def_for_stmt_copy (dts
[0],
8249 vec_oprnds0
.pop ());
8252 = vect_get_vec_def_for_stmt_copy (dts
[1],
8253 vec_oprnds1
.pop ());
8255 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8256 vec_oprnds2
.pop ());
8257 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8258 vec_oprnds3
.pop ());
8263 vec_oprnds0
.quick_push (vec_cond_lhs
);
8265 vec_oprnds1
.quick_push (vec_cond_rhs
);
8266 vec_oprnds2
.quick_push (vec_then_clause
);
8267 vec_oprnds3
.quick_push (vec_else_clause
);
8270 /* Arguments are ready. Create the new vector stmt. */
8271 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8273 vec_then_clause
= vec_oprnds2
[i
];
8274 vec_else_clause
= vec_oprnds3
[i
];
8277 vec_compare
= vec_cond_lhs
;
8280 vec_cond_rhs
= vec_oprnds1
[i
];
8281 if (bitop1
== NOP_EXPR
)
8282 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8283 vec_cond_lhs
, vec_cond_rhs
);
8286 new_temp
= make_ssa_name (vec_cmp_type
);
8287 if (bitop1
== BIT_NOT_EXPR
)
8288 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8292 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8294 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8295 if (bitop2
== NOP_EXPR
)
8296 vec_compare
= new_temp
;
8297 else if (bitop2
== BIT_NOT_EXPR
)
8299 /* Instead of doing ~x ? y : z do x ? z : y. */
8300 vec_compare
= new_temp
;
8301 std::swap (vec_then_clause
, vec_else_clause
);
8305 vec_compare
= make_ssa_name (vec_cmp_type
);
8307 = gimple_build_assign (vec_compare
, bitop2
,
8308 vec_cond_lhs
, new_temp
);
8309 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8313 new_temp
= make_ssa_name (vec_dest
);
8314 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8315 vec_compare
, vec_then_clause
,
8317 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8319 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8326 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8328 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8330 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8333 vec_oprnds0
.release ();
8334 vec_oprnds1
.release ();
8335 vec_oprnds2
.release ();
8336 vec_oprnds3
.release ();
8341 /* vectorizable_comparison.
8343 Check if STMT is comparison expression that can be vectorized.
8344 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8345 comparison, put it in VEC_STMT, and insert it at GSI.
8347 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8350 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8351 gimple
**vec_stmt
, tree reduc_def
,
8354 tree lhs
, rhs1
, rhs2
;
8355 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8356 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8357 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8358 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8360 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8361 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8365 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8366 stmt_vec_info prev_stmt_info
= NULL
;
8368 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8369 vec
<tree
> vec_oprnds0
= vNULL
;
8370 vec
<tree
> vec_oprnds1
= vNULL
;
8375 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8378 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8381 mask_type
= vectype
;
8382 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8387 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8389 gcc_assert (ncopies
>= 1);
8390 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8391 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8395 if (STMT_VINFO_LIVE_P (stmt_info
))
8397 if (dump_enabled_p ())
8398 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8399 "value used after loop.\n");
8403 if (!is_gimple_assign (stmt
))
8406 code
= gimple_assign_rhs_code (stmt
);
8408 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
8411 rhs1
= gimple_assign_rhs1 (stmt
);
8412 rhs2
= gimple_assign_rhs2 (stmt
);
8414 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
8415 &dts
[0], &vectype1
))
8418 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
8419 &dts
[1], &vectype2
))
8422 if (vectype1
&& vectype2
8423 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
8424 TYPE_VECTOR_SUBPARTS (vectype2
)))
8427 vectype
= vectype1
? vectype1
: vectype2
;
8429 /* Invariant comparison. */
8432 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
8433 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype
), nunits
))
8436 else if (maybe_ne (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)))
8439 /* Can't compare mask and non-mask types. */
8440 if (vectype1
&& vectype2
8441 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
8444 /* Boolean values may have another representation in vectors
8445 and therefore we prefer bit operations over comparison for
8446 them (which also works for scalar masks). We store opcodes
8447 to use in bitop1 and bitop2. Statement is vectorized as
8448 BITOP2 (rhs1 BITOP1 rhs2) or
8449 rhs1 BITOP2 (BITOP1 rhs2)
8450 depending on bitop1 and bitop2 arity. */
8451 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
8453 if (code
== GT_EXPR
)
8455 bitop1
= BIT_NOT_EXPR
;
8456 bitop2
= BIT_AND_EXPR
;
8458 else if (code
== GE_EXPR
)
8460 bitop1
= BIT_NOT_EXPR
;
8461 bitop2
= BIT_IOR_EXPR
;
8463 else if (code
== LT_EXPR
)
8465 bitop1
= BIT_NOT_EXPR
;
8466 bitop2
= BIT_AND_EXPR
;
8467 std::swap (rhs1
, rhs2
);
8468 std::swap (dts
[0], dts
[1]);
8470 else if (code
== LE_EXPR
)
8472 bitop1
= BIT_NOT_EXPR
;
8473 bitop2
= BIT_IOR_EXPR
;
8474 std::swap (rhs1
, rhs2
);
8475 std::swap (dts
[0], dts
[1]);
8479 bitop1
= BIT_XOR_EXPR
;
8480 if (code
== EQ_EXPR
)
8481 bitop2
= BIT_NOT_EXPR
;
8487 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
8488 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
8489 dts
, ndts
, NULL
, NULL
);
8490 if (bitop1
== NOP_EXPR
)
8491 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
8494 machine_mode mode
= TYPE_MODE (vectype
);
8497 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
8498 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8501 if (bitop2
!= NOP_EXPR
)
8503 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
8504 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8514 vec_oprnds0
.create (1);
8515 vec_oprnds1
.create (1);
8519 lhs
= gimple_assign_lhs (stmt
);
8520 mask
= vect_create_destination_var (lhs
, mask_type
);
8522 /* Handle cmp expr. */
8523 for (j
= 0; j
< ncopies
; j
++)
8525 gassign
*new_stmt
= NULL
;
8530 auto_vec
<tree
, 2> ops
;
8531 auto_vec
<vec
<tree
>, 2> vec_defs
;
8533 ops
.safe_push (rhs1
);
8534 ops
.safe_push (rhs2
);
8535 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8536 vec_oprnds1
= vec_defs
.pop ();
8537 vec_oprnds0
= vec_defs
.pop ();
8541 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
8542 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
8547 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
8548 vec_oprnds0
.pop ());
8549 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
8550 vec_oprnds1
.pop ());
8555 vec_oprnds0
.quick_push (vec_rhs1
);
8556 vec_oprnds1
.quick_push (vec_rhs2
);
8559 /* Arguments are ready. Create the new vector stmt. */
8560 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
8562 vec_rhs2
= vec_oprnds1
[i
];
8564 new_temp
= make_ssa_name (mask
);
8565 if (bitop1
== NOP_EXPR
)
8567 new_stmt
= gimple_build_assign (new_temp
, code
,
8568 vec_rhs1
, vec_rhs2
);
8569 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8573 if (bitop1
== BIT_NOT_EXPR
)
8574 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
8576 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
8578 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8579 if (bitop2
!= NOP_EXPR
)
8581 tree res
= make_ssa_name (mask
);
8582 if (bitop2
== BIT_NOT_EXPR
)
8583 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
8585 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
8587 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8591 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8598 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8600 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8602 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8605 vec_oprnds0
.release ();
8606 vec_oprnds1
.release ();
8611 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8612 can handle all live statements in the node. Otherwise return true
8613 if STMT is not live or if vectorizable_live_operation can handle it.
8614 GSI and VEC_STMT are as for vectorizable_live_operation. */
8617 can_vectorize_live_stmts (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8618 slp_tree slp_node
, gimple
**vec_stmt
)
8624 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
8626 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
8627 if (STMT_VINFO_LIVE_P (slp_stmt_info
)
8628 && !vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
8633 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt
))
8634 && !vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, vec_stmt
))
8640 /* Make sure the statement is vectorizable. */
8643 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
8644 slp_instance node_instance
)
8646 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8647 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8648 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
8650 gimple
*pattern_stmt
;
8651 gimple_seq pattern_def_seq
;
8653 if (dump_enabled_p ())
8655 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
8656 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8659 if (gimple_has_volatile_ops (stmt
))
8661 if (dump_enabled_p ())
8662 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8663 "not vectorized: stmt has volatile operands\n");
8668 /* Skip stmts that do not need to be vectorized. In loops this is expected
8670 - the COND_EXPR which is the loop exit condition
8671 - any LABEL_EXPRs in the loop
8672 - computations that are used only for array indexing or loop control.
8673 In basic blocks we only analyze statements that are a part of some SLP
8674 instance, therefore, all the statements are relevant.
8676 Pattern statement needs to be analyzed instead of the original statement
8677 if the original statement is not relevant. Otherwise, we analyze both
8678 statements. In basic blocks we are called from some SLP instance
8679 traversal, don't analyze pattern stmts instead, the pattern stmts
8680 already will be part of SLP instance. */
8682 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8683 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8684 && !STMT_VINFO_LIVE_P (stmt_info
))
8686 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8688 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8689 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8691 /* Analyze PATTERN_STMT instead of the original stmt. */
8692 stmt
= pattern_stmt
;
8693 stmt_info
= vinfo_for_stmt (pattern_stmt
);
8694 if (dump_enabled_p ())
8696 dump_printf_loc (MSG_NOTE
, vect_location
,
8697 "==> examining pattern statement: ");
8698 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8703 if (dump_enabled_p ())
8704 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
8709 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8712 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8713 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8715 /* Analyze PATTERN_STMT too. */
8716 if (dump_enabled_p ())
8718 dump_printf_loc (MSG_NOTE
, vect_location
,
8719 "==> examining pattern statement: ");
8720 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8723 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
8728 if (is_pattern_stmt_p (stmt_info
)
8730 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
8732 gimple_stmt_iterator si
;
8734 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
8736 gimple
*pattern_def_stmt
= gsi_stmt (si
);
8737 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
8738 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
8740 /* Analyze def stmt of STMT if it's a pattern stmt. */
8741 if (dump_enabled_p ())
8743 dump_printf_loc (MSG_NOTE
, vect_location
,
8744 "==> examining pattern def statement: ");
8745 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
8748 if (!vect_analyze_stmt (pattern_def_stmt
,
8749 need_to_vectorize
, node
, node_instance
))
8755 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
8757 case vect_internal_def
:
8760 case vect_reduction_def
:
8761 case vect_nested_cycle
:
8762 gcc_assert (!bb_vinfo
8763 && (relevance
== vect_used_in_outer
8764 || relevance
== vect_used_in_outer_by_reduction
8765 || relevance
== vect_used_by_reduction
8766 || relevance
== vect_unused_in_scope
8767 || relevance
== vect_used_only_live
));
8770 case vect_induction_def
:
8771 gcc_assert (!bb_vinfo
);
8774 case vect_constant_def
:
8775 case vect_external_def
:
8776 case vect_unknown_def_type
:
8781 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8783 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8784 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8785 || (is_gimple_call (stmt
)
8786 && gimple_call_lhs (stmt
) == NULL_TREE
));
8787 *need_to_vectorize
= true;
8790 if (PURE_SLP_STMT (stmt_info
) && !node
)
8792 dump_printf_loc (MSG_NOTE
, vect_location
,
8793 "handled only by SLP analysis\n");
8799 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8800 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8801 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8802 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8803 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8804 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8805 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8806 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8807 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8808 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8809 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
8810 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
8811 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8812 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8816 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8817 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8818 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8819 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8820 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8821 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8822 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8823 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8824 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8825 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8830 if (dump_enabled_p ())
8832 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8833 "not vectorized: relevant stmt not ");
8834 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8835 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8844 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8845 need extra handling, except for vectorizable reductions. */
8846 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8847 && !can_vectorize_live_stmts (stmt
, NULL
, node
, NULL
))
8849 if (dump_enabled_p ())
8851 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8852 "not vectorized: live stmt not supported: ");
8853 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8863 /* Function vect_transform_stmt.
8865 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8868 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8869 bool *grouped_store
, slp_tree slp_node
,
8870 slp_instance slp_node_instance
)
8872 bool is_store
= false;
8873 gimple
*vec_stmt
= NULL
;
8874 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8877 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
8878 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8880 switch (STMT_VINFO_TYPE (stmt_info
))
8882 case type_demotion_vec_info_type
:
8883 case type_promotion_vec_info_type
:
8884 case type_conversion_vec_info_type
:
8885 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8889 case induc_vec_info_type
:
8890 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
8894 case shift_vec_info_type
:
8895 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8899 case op_vec_info_type
:
8900 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8904 case assignment_vec_info_type
:
8905 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8909 case load_vec_info_type
:
8910 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8915 case store_vec_info_type
:
8916 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8918 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8920 /* In case of interleaving, the whole chain is vectorized when the
8921 last store in the chain is reached. Store stmts before the last
8922 one are skipped, and there vec_stmt_info shouldn't be freed
8924 *grouped_store
= true;
8925 if (STMT_VINFO_VEC_STMT (stmt_info
))
8932 case condition_vec_info_type
:
8933 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8937 case comparison_vec_info_type
:
8938 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8942 case call_vec_info_type
:
8943 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8944 stmt
= gsi_stmt (*gsi
);
8945 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8949 case call_simd_clone_vec_info_type
:
8950 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8951 stmt
= gsi_stmt (*gsi
);
8954 case reduc_vec_info_type
:
8955 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
8961 if (!STMT_VINFO_LIVE_P (stmt_info
))
8963 if (dump_enabled_p ())
8964 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8965 "stmt not supported.\n");
8970 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8971 This would break hybrid SLP vectorization. */
8973 gcc_assert (!vec_stmt
8974 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8976 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8977 is being vectorized, but outside the immediately enclosing loop. */
8979 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8980 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8981 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8982 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8983 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8984 || STMT_VINFO_RELEVANT (stmt_info
) ==
8985 vect_used_in_outer_by_reduction
))
8987 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8988 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8989 imm_use_iterator imm_iter
;
8990 use_operand_p use_p
;
8994 if (dump_enabled_p ())
8995 dump_printf_loc (MSG_NOTE
, vect_location
,
8996 "Record the vdef for outer-loop vectorization.\n");
8998 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8999 (to be used when vectorizing outer-loop stmts that use the DEF of
9001 if (gimple_code (stmt
) == GIMPLE_PHI
)
9002 scalar_dest
= PHI_RESULT (stmt
);
9004 scalar_dest
= gimple_assign_lhs (stmt
);
9006 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
9008 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
9010 exit_phi
= USE_STMT (use_p
);
9011 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
9016 /* Handle stmts whose DEF is used outside the loop-nest that is
9017 being vectorized. */
9018 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
9020 done
= can_vectorize_live_stmts (stmt
, gsi
, slp_node
, &vec_stmt
);
9025 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
9031 /* Remove a group of stores (for SLP or interleaving), free their
9035 vect_remove_stores (gimple
*first_stmt
)
9037 gimple
*next
= first_stmt
;
9039 gimple_stmt_iterator next_si
;
9043 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
9045 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
9046 if (is_pattern_stmt_p (stmt_info
))
9047 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
9048 /* Free the attached stmt_vec_info and remove the stmt. */
9049 next_si
= gsi_for_stmt (next
);
9050 unlink_stmt_vdef (next
);
9051 gsi_remove (&next_si
, true);
9052 release_defs (next
);
9053 free_stmt_vec_info (next
);
9059 /* Function new_stmt_vec_info.
9061 Create and initialize a new stmt_vec_info struct for STMT. */
9064 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
9067 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
9069 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
9070 STMT_VINFO_STMT (res
) = stmt
;
9072 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
9073 STMT_VINFO_LIVE_P (res
) = false;
9074 STMT_VINFO_VECTYPE (res
) = NULL
;
9075 STMT_VINFO_VEC_STMT (res
) = NULL
;
9076 STMT_VINFO_VECTORIZABLE (res
) = true;
9077 STMT_VINFO_IN_PATTERN_P (res
) = false;
9078 STMT_VINFO_RELATED_STMT (res
) = NULL
;
9079 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
9080 STMT_VINFO_DATA_REF (res
) = NULL
;
9081 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
9082 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
9084 if (gimple_code (stmt
) == GIMPLE_PHI
9085 && is_loop_header_bb_p (gimple_bb (stmt
)))
9086 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
9088 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
9090 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
9091 STMT_SLP_TYPE (res
) = loop_vect
;
9092 STMT_VINFO_NUM_SLP_USES (res
) = 0;
9094 GROUP_FIRST_ELEMENT (res
) = NULL
;
9095 GROUP_NEXT_ELEMENT (res
) = NULL
;
9096 GROUP_SIZE (res
) = 0;
9097 GROUP_STORE_COUNT (res
) = 0;
9098 GROUP_GAP (res
) = 0;
9099 GROUP_SAME_DR_STMT (res
) = NULL
;
9105 /* Create a hash table for stmt_vec_info. */
9108 init_stmt_vec_info_vec (void)
9110 gcc_assert (!stmt_vec_info_vec
.exists ());
9111 stmt_vec_info_vec
.create (50);
9115 /* Free hash table for stmt_vec_info. */
9118 free_stmt_vec_info_vec (void)
9122 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
9124 free_stmt_vec_info (STMT_VINFO_STMT (info
));
9125 gcc_assert (stmt_vec_info_vec
.exists ());
9126 stmt_vec_info_vec
.release ();
9130 /* Free stmt vectorization related info. */
9133 free_stmt_vec_info (gimple
*stmt
)
9135 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9140 /* Check if this statement has a related "pattern stmt"
9141 (introduced by the vectorizer during the pattern recognition
9142 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9144 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9146 stmt_vec_info patt_info
9147 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9150 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9151 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9152 gimple_set_bb (patt_stmt
, NULL
);
9153 tree lhs
= gimple_get_lhs (patt_stmt
);
9154 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9155 release_ssa_name (lhs
);
9158 gimple_stmt_iterator si
;
9159 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9161 gimple
*seq_stmt
= gsi_stmt (si
);
9162 gimple_set_bb (seq_stmt
, NULL
);
9163 lhs
= gimple_get_lhs (seq_stmt
);
9164 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9165 release_ssa_name (lhs
);
9166 free_stmt_vec_info (seq_stmt
);
9169 free_stmt_vec_info (patt_stmt
);
9173 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9174 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9175 set_vinfo_for_stmt (stmt
, NULL
);
9180 /* Function get_vectype_for_scalar_type_and_size.
9182 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9186 get_vectype_for_scalar_type_and_size (tree scalar_type
, poly_uint64 size
)
9188 tree orig_scalar_type
= scalar_type
;
9189 scalar_mode inner_mode
;
9190 machine_mode simd_mode
;
9194 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9195 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9198 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9200 /* For vector types of elements whose mode precision doesn't
9201 match their types precision we use a element type of mode
9202 precision. The vectorization routines will have to make sure
9203 they support the proper result truncation/extension.
9204 We also make sure to build vector types with INTEGER_TYPE
9205 component type only. */
9206 if (INTEGRAL_TYPE_P (scalar_type
)
9207 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9208 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9209 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9210 TYPE_UNSIGNED (scalar_type
));
9212 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9213 When the component mode passes the above test simply use a type
9214 corresponding to that mode. The theory is that any use that
9215 would cause problems with this will disable vectorization anyway. */
9216 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9217 && !INTEGRAL_TYPE_P (scalar_type
))
9218 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9220 /* We can't build a vector type of elements with alignment bigger than
9222 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9223 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9224 TYPE_UNSIGNED (scalar_type
));
9226 /* If we felt back to using the mode fail if there was
9227 no scalar type for it. */
9228 if (scalar_type
== NULL_TREE
)
9231 /* If no size was supplied use the mode the target prefers. Otherwise
9232 lookup a vector mode of the specified size. */
9233 if (known_eq (size
, 0U))
9234 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9235 else if (!multiple_p (size
, nbytes
, &nunits
)
9236 || !mode_for_vector (inner_mode
, nunits
).exists (&simd_mode
))
9238 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9239 if (!multiple_p (GET_MODE_SIZE (simd_mode
), nbytes
, &nunits
))
9242 vectype
= build_vector_type (scalar_type
, nunits
);
9244 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9245 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9248 /* Re-attach the address-space qualifier if we canonicalized the scalar
9250 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9251 return build_qualified_type
9252 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9257 poly_uint64 current_vector_size
;
9259 /* Function get_vectype_for_scalar_type.
9261 Returns the vector type corresponding to SCALAR_TYPE as supported
9265 get_vectype_for_scalar_type (tree scalar_type
)
9268 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9269 current_vector_size
);
9271 && known_eq (current_vector_size
, 0U))
9272 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9276 /* Function get_mask_type_for_scalar_type.
9278 Returns the mask type corresponding to a result of comparison
9279 of vectors of specified SCALAR_TYPE as supported by target. */
9282 get_mask_type_for_scalar_type (tree scalar_type
)
9284 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9289 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9290 current_vector_size
);
9293 /* Function get_same_sized_vectype
9295 Returns a vector type corresponding to SCALAR_TYPE of size
9296 VECTOR_TYPE if supported by the target. */
9299 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9301 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9302 return build_same_sized_truth_vector_type (vector_type
);
9304 return get_vectype_for_scalar_type_and_size
9305 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9308 /* Function vect_is_simple_use.
9311 VINFO - the vect info of the loop or basic block that is being vectorized.
9312 OPERAND - operand in the loop or bb.
9314 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9315 DT - the type of definition
9317 Returns whether a stmt with OPERAND can be vectorized.
9318 For loops, supportable operands are constants, loop invariants, and operands
9319 that are defined by the current iteration of the loop. Unsupportable
9320 operands are those that are defined by a previous iteration of the loop (as
9321 is the case in reduction/induction computations).
9322 For basic blocks, supportable operands are constants and bb invariants.
9323 For now, operands defined outside the basic block are not supported. */
9326 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9327 gimple
**def_stmt
, enum vect_def_type
*dt
)
9330 *dt
= vect_unknown_def_type
;
9332 if (dump_enabled_p ())
9334 dump_printf_loc (MSG_NOTE
, vect_location
,
9335 "vect_is_simple_use: operand ");
9336 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9337 dump_printf (MSG_NOTE
, "\n");
9340 if (CONSTANT_CLASS_P (operand
))
9342 *dt
= vect_constant_def
;
9346 if (is_gimple_min_invariant (operand
))
9348 *dt
= vect_external_def
;
9352 if (TREE_CODE (operand
) != SSA_NAME
)
9354 if (dump_enabled_p ())
9355 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9360 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9362 *dt
= vect_external_def
;
9366 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9367 if (dump_enabled_p ())
9369 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9370 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9373 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9374 *dt
= vect_external_def
;
9377 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9378 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9381 if (dump_enabled_p ())
9383 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9386 case vect_uninitialized_def
:
9387 dump_printf (MSG_NOTE
, "uninitialized\n");
9389 case vect_constant_def
:
9390 dump_printf (MSG_NOTE
, "constant\n");
9392 case vect_external_def
:
9393 dump_printf (MSG_NOTE
, "external\n");
9395 case vect_internal_def
:
9396 dump_printf (MSG_NOTE
, "internal\n");
9398 case vect_induction_def
:
9399 dump_printf (MSG_NOTE
, "induction\n");
9401 case vect_reduction_def
:
9402 dump_printf (MSG_NOTE
, "reduction\n");
9404 case vect_double_reduction_def
:
9405 dump_printf (MSG_NOTE
, "double reduction\n");
9407 case vect_nested_cycle
:
9408 dump_printf (MSG_NOTE
, "nested cycle\n");
9410 case vect_unknown_def_type
:
9411 dump_printf (MSG_NOTE
, "unknown\n");
9416 if (*dt
== vect_unknown_def_type
)
9418 if (dump_enabled_p ())
9419 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9420 "Unsupported pattern.\n");
9424 switch (gimple_code (*def_stmt
))
9431 if (dump_enabled_p ())
9432 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9433 "unsupported defining stmt:\n");
9440 /* Function vect_is_simple_use.
9442 Same as vect_is_simple_use but also determines the vector operand
9443 type of OPERAND and stores it to *VECTYPE. If the definition of
9444 OPERAND is vect_uninitialized_def, vect_constant_def or
9445 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9446 is responsible to compute the best suited vector type for the
9450 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9451 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
9453 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
9456 /* Now get a vector type if the def is internal, otherwise supply
9457 NULL_TREE and leave it up to the caller to figure out a proper
9458 type for the use stmt. */
9459 if (*dt
== vect_internal_def
9460 || *dt
== vect_induction_def
9461 || *dt
== vect_reduction_def
9462 || *dt
== vect_double_reduction_def
9463 || *dt
== vect_nested_cycle
)
9465 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
9467 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9468 && !STMT_VINFO_RELEVANT (stmt_info
)
9469 && !STMT_VINFO_LIVE_P (stmt_info
))
9470 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9472 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
9473 gcc_assert (*vectype
!= NULL_TREE
);
9475 else if (*dt
== vect_uninitialized_def
9476 || *dt
== vect_constant_def
9477 || *dt
== vect_external_def
)
9478 *vectype
= NULL_TREE
;
9486 /* Function supportable_widening_operation
9488 Check whether an operation represented by the code CODE is a
9489 widening operation that is supported by the target platform in
9490 vector form (i.e., when operating on arguments of type VECTYPE_IN
9491 producing a result of type VECTYPE_OUT).
9493 Widening operations we currently support are NOP (CONVERT), FLOAT
9494 and WIDEN_MULT. This function checks if these operations are supported
9495 by the target platform either directly (via vector tree-codes), or via
9499 - CODE1 and CODE2 are codes of vector operations to be used when
9500 vectorizing the operation, if available.
9501 - MULTI_STEP_CVT determines the number of required intermediate steps in
9502 case of multi-step conversion (like char->short->int - in that case
9503 MULTI_STEP_CVT will be 1).
9504 - INTERM_TYPES contains the intermediate type required to perform the
9505 widening operation (short in the above example). */
9508 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
9509 tree vectype_out
, tree vectype_in
,
9510 enum tree_code
*code1
, enum tree_code
*code2
,
9511 int *multi_step_cvt
,
9512 vec
<tree
> *interm_types
)
9514 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9515 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
9516 struct loop
*vect_loop
= NULL
;
9517 machine_mode vec_mode
;
9518 enum insn_code icode1
, icode2
;
9519 optab optab1
, optab2
;
9520 tree vectype
= vectype_in
;
9521 tree wide_vectype
= vectype_out
;
9522 enum tree_code c1
, c2
;
9524 tree prev_type
, intermediate_type
;
9525 machine_mode intermediate_mode
, prev_mode
;
9526 optab optab3
, optab4
;
9528 *multi_step_cvt
= 0;
9530 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
9534 case WIDEN_MULT_EXPR
:
9535 /* The result of a vectorized widening operation usually requires
9536 two vectors (because the widened results do not fit into one vector).
9537 The generated vector results would normally be expected to be
9538 generated in the same order as in the original scalar computation,
9539 i.e. if 8 results are generated in each vector iteration, they are
9540 to be organized as follows:
9541 vect1: [res1,res2,res3,res4],
9542 vect2: [res5,res6,res7,res8].
9544 However, in the special case that the result of the widening
9545 operation is used in a reduction computation only, the order doesn't
9546 matter (because when vectorizing a reduction we change the order of
9547 the computation). Some targets can take advantage of this and
9548 generate more efficient code. For example, targets like Altivec,
9549 that support widen_mult using a sequence of {mult_even,mult_odd}
9550 generate the following vectors:
9551 vect1: [res1,res3,res5,res7],
9552 vect2: [res2,res4,res6,res8].
9554 When vectorizing outer-loops, we execute the inner-loop sequentially
9555 (each vectorized inner-loop iteration contributes to VF outer-loop
9556 iterations in parallel). We therefore don't allow to change the
9557 order of the computation in the inner-loop during outer-loop
9559 /* TODO: Another case in which order doesn't *really* matter is when we
9560 widen and then contract again, e.g. (short)((int)x * y >> 8).
9561 Normally, pack_trunc performs an even/odd permute, whereas the
9562 repack from an even/odd expansion would be an interleave, which
9563 would be significantly simpler for e.g. AVX2. */
9564 /* In any case, in order to avoid duplicating the code below, recurse
9565 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9566 are properly set up for the caller. If we fail, we'll continue with
9567 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9569 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
9570 && !nested_in_vect_loop_p (vect_loop
, stmt
)
9571 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
9572 stmt
, vectype_out
, vectype_in
,
9573 code1
, code2
, multi_step_cvt
,
9576 /* Elements in a vector with vect_used_by_reduction property cannot
9577 be reordered if the use chain with this property does not have the
9578 same operation. One such an example is s += a * b, where elements
9579 in a and b cannot be reordered. Here we check if the vector defined
9580 by STMT is only directly used in the reduction statement. */
9581 tree lhs
= gimple_assign_lhs (stmt
);
9582 use_operand_p dummy
;
9584 stmt_vec_info use_stmt_info
= NULL
;
9585 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
9586 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
9587 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
9590 c1
= VEC_WIDEN_MULT_LO_EXPR
;
9591 c2
= VEC_WIDEN_MULT_HI_EXPR
;
9604 case VEC_WIDEN_MULT_EVEN_EXPR
:
9605 /* Support the recursion induced just above. */
9606 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
9607 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
9610 case WIDEN_LSHIFT_EXPR
:
9611 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
9612 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
9616 c1
= VEC_UNPACK_LO_EXPR
;
9617 c2
= VEC_UNPACK_HI_EXPR
;
9621 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
9622 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
9625 case FIX_TRUNC_EXPR
:
9626 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9627 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9628 computing the operation. */
9635 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
9638 if (code
== FIX_TRUNC_EXPR
)
9640 /* The signedness is determined from output operand. */
9641 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9642 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
9646 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9647 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
9650 if (!optab1
|| !optab2
)
9653 vec_mode
= TYPE_MODE (vectype
);
9654 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
9655 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
9661 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9662 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9663 /* For scalar masks we may have different boolean
9664 vector types having the same QImode. Thus we
9665 add additional check for elements number. */
9666 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9667 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
9668 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
9670 /* Check if it's a multi-step conversion that can be done using intermediate
9673 prev_type
= vectype
;
9674 prev_mode
= vec_mode
;
9676 if (!CONVERT_EXPR_CODE_P (code
))
9679 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9680 intermediate steps in promotion sequence. We try
9681 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9683 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9684 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9686 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9687 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9689 poly_uint64 intermediate_nelts
9690 = exact_div (TYPE_VECTOR_SUBPARTS (prev_type
), 2);
9692 = build_truth_vector_type (intermediate_nelts
,
9693 current_vector_size
);
9694 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9699 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
9700 TYPE_UNSIGNED (prev_type
));
9702 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9703 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
9705 if (!optab3
|| !optab4
9706 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
9707 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9708 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
9709 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
9710 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
9711 == CODE_FOR_nothing
)
9712 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
9713 == CODE_FOR_nothing
))
9716 interm_types
->quick_push (intermediate_type
);
9717 (*multi_step_cvt
)++;
9719 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9720 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9721 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9722 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
),
9723 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
9725 prev_type
= intermediate_type
;
9726 prev_mode
= intermediate_mode
;
9729 interm_types
->release ();
9734 /* Function supportable_narrowing_operation
9736 Check whether an operation represented by the code CODE is a
9737 narrowing operation that is supported by the target platform in
9738 vector form (i.e., when operating on arguments of type VECTYPE_IN
9739 and producing a result of type VECTYPE_OUT).
9741 Narrowing operations we currently support are NOP (CONVERT) and
9742 FIX_TRUNC. This function checks if these operations are supported by
9743 the target platform directly via vector tree-codes.
9746 - CODE1 is the code of a vector operation to be used when
9747 vectorizing the operation, if available.
9748 - MULTI_STEP_CVT determines the number of required intermediate steps in
9749 case of multi-step conversion (like int->short->char - in that case
9750 MULTI_STEP_CVT will be 1).
9751 - INTERM_TYPES contains the intermediate type required to perform the
9752 narrowing operation (short in the above example). */
9755 supportable_narrowing_operation (enum tree_code code
,
9756 tree vectype_out
, tree vectype_in
,
9757 enum tree_code
*code1
, int *multi_step_cvt
,
9758 vec
<tree
> *interm_types
)
9760 machine_mode vec_mode
;
9761 enum insn_code icode1
;
9762 optab optab1
, interm_optab
;
9763 tree vectype
= vectype_in
;
9764 tree narrow_vectype
= vectype_out
;
9766 tree intermediate_type
, prev_type
;
9767 machine_mode intermediate_mode
, prev_mode
;
9771 *multi_step_cvt
= 0;
9775 c1
= VEC_PACK_TRUNC_EXPR
;
9778 case FIX_TRUNC_EXPR
:
9779 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9783 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9784 tree code and optabs used for computing the operation. */
9791 if (code
== FIX_TRUNC_EXPR
)
9792 /* The signedness is determined from output operand. */
9793 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9795 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9800 vec_mode
= TYPE_MODE (vectype
);
9801 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9806 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9807 /* For scalar masks we may have different boolean
9808 vector types having the same QImode. Thus we
9809 add additional check for elements number. */
9810 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9811 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
) * 2,
9812 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9814 /* Check if it's a multi-step conversion that can be done using intermediate
9816 prev_mode
= vec_mode
;
9817 prev_type
= vectype
;
9818 if (code
== FIX_TRUNC_EXPR
)
9819 uns
= TYPE_UNSIGNED (vectype_out
);
9821 uns
= TYPE_UNSIGNED (vectype
);
9823 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9824 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9825 costly than signed. */
9826 if (code
== FIX_TRUNC_EXPR
&& uns
)
9828 enum insn_code icode2
;
9831 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9833 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9834 if (interm_optab
!= unknown_optab
9835 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9836 && insn_data
[icode1
].operand
[0].mode
9837 == insn_data
[icode2
].operand
[0].mode
)
9840 optab1
= interm_optab
;
9845 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9846 intermediate steps in promotion sequence. We try
9847 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9848 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9849 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9851 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9852 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9855 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9856 current_vector_size
);
9857 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9862 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9864 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9867 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9868 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9869 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9870 == CODE_FOR_nothing
))
9873 interm_types
->quick_push (intermediate_type
);
9874 (*multi_step_cvt
)++;
9876 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9877 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9878 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2,
9879 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9881 prev_mode
= intermediate_mode
;
9882 prev_type
= intermediate_type
;
9883 optab1
= interm_optab
;
9886 interm_types
->release ();