]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
Udate Darwin's LTO specs to match what's in gcc/gcc.c
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
85ec4feb 2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
b8698a0f 3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
4 and Ira Rosen <irar@il.ibm.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
c7131fb2 25#include "backend.h"
957060b5
AM
26#include "target.h"
27#include "rtl.h"
ebfd146a 28#include "tree.h"
c7131fb2 29#include "gimple.h"
c7131fb2 30#include "ssa.h"
957060b5
AM
31#include "optabs-tree.h"
32#include "insn-config.h"
33#include "recog.h" /* FIXME: for insn_data */
34#include "cgraph.h"
957060b5 35#include "dumpfile.h"
c7131fb2 36#include "alias.h"
40e23961 37#include "fold-const.h"
d8a2d370 38#include "stor-layout.h"
2fb9a547 39#include "tree-eh.h"
45b0be94 40#include "gimplify.h"
5be5c238 41#include "gimple-iterator.h"
18f429e2 42#include "gimplify-me.h"
442b4905 43#include "tree-cfg.h"
e28030cf 44#include "tree-ssa-loop-manip.h"
ebfd146a 45#include "cfgloop.h"
0136f8f0
AH
46#include "tree-ssa-loop.h"
47#include "tree-scalar-evolution.h"
ebfd146a 48#include "tree-vectorizer.h"
9b2b7279 49#include "builtins.h"
70439f0d 50#include "internal-fn.h"
5ebaa477 51#include "tree-vector-builder.h"
f151c9e1 52#include "vec-perm-indices.h"
7cfb4d93
RS
53#include "tree-ssa-loop-niter.h"
54#include "gimple-fold.h"
ebfd146a 55
7ee2468b
SB
56/* For lang_hooks.types.type_for_mode. */
57#include "langhooks.h"
ebfd146a 58
c3e7ee41
BS
59/* Return the vectorized type for the given statement. */
60
61tree
62stmt_vectype (struct _stmt_vec_info *stmt_info)
63{
64 return STMT_VINFO_VECTYPE (stmt_info);
65}
66
67/* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69bool
70stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
71{
355fe088 72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
c3e7ee41
BS
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
76
77 if (!loop_vinfo)
78 return false;
79
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
81
82 return (bb->loop_father == loop->inner);
83}
84
85/* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
88
89unsigned
92345349 90record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
c3e7ee41 91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92345349 92 int misalign, enum vect_cost_model_location where)
c3e7ee41 93{
cc9fe6bb
JH
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
68435eb2 100
211ee39b 101 stmt_info_for_cost si = { count, kind, where, stmt_info, misalign };
68435eb2
RB
102 body_cost_vec->safe_push (si);
103
104 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
105 return (unsigned)
106 (builtin_vectorization_cost (kind, vectype, misalign) * count);
c3e7ee41
BS
107}
108
272c6793
RS
109/* Return a variable of type ELEM_TYPE[NELEMS]. */
110
111static tree
112create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
113{
114 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
115 "vect_array");
116}
117
118/* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
82570274 120 is part of the vectorization of STMT_INFO and the vector is associated
272c6793
RS
121 with scalar destination SCALAR_DEST. */
122
123static tree
82570274
RS
124read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
125 tree scalar_dest, tree array, unsigned HOST_WIDE_INT n)
272c6793
RS
126{
127 tree vect_type, vect, vect_name, array_ref;
355fe088 128 gimple *new_stmt;
272c6793
RS
129
130 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
131 vect_type = TREE_TYPE (TREE_TYPE (array));
132 vect = vect_create_destination_var (scalar_dest, vect_type);
133 array_ref = build4 (ARRAY_REF, vect_type, array,
134 build_int_cst (size_type_node, n),
135 NULL_TREE, NULL_TREE);
136
137 new_stmt = gimple_build_assign (vect, array_ref);
138 vect_name = make_ssa_name (vect, new_stmt);
139 gimple_assign_set_lhs (new_stmt, vect_name);
82570274 140 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
272c6793
RS
141
142 return vect_name;
143}
144
145/* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
82570274 147 The store is part of the vectorization of STMT_INFO. */
272c6793
RS
148
149static void
82570274
RS
150write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
151 tree vect, tree array, unsigned HOST_WIDE_INT n)
272c6793
RS
152{
153 tree array_ref;
355fe088 154 gimple *new_stmt;
272c6793
RS
155
156 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
157 build_int_cst (size_type_node, n),
158 NULL_TREE, NULL_TREE);
159
160 new_stmt = gimple_build_assign (array_ref, vect);
82570274 161 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
272c6793
RS
162}
163
164/* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
166 (and its group). */
167
168static tree
44fc7854 169create_array_ref (tree type, tree ptr, tree alias_ptr_type)
272c6793 170{
44fc7854 171 tree mem_ref;
272c6793 172
272c6793
RS
173 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
174 /* Arrays have the same alignment as their type. */
644ffefd 175 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
272c6793
RS
176 return mem_ref;
177}
178
82570274 179/* Add a clobber of variable VAR to the vectorization of STMT_INFO.
3ba4ff41
RS
180 Emit the clobber before *GSI. */
181
182static void
82570274
RS
183vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
184 tree var)
3ba4ff41
RS
185{
186 tree clobber = build_clobber (TREE_TYPE (var));
187 gimple *new_stmt = gimple_build_assign (var, clobber);
82570274 188 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3ba4ff41
RS
189}
190
ebfd146a
IR
191/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
192
193/* Function vect_mark_relevant.
194
32e8e429 195 Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
ebfd146a
IR
196
197static void
32e8e429 198vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info,
97ecdb46 199 enum vect_relevant relevant, bool live_p)
ebfd146a 200{
ebfd146a
IR
201 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
202 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
203
73fbfcad 204 if (dump_enabled_p ())
66c16fd9
RB
205 {
206 dump_printf_loc (MSG_NOTE, vect_location,
207 "mark relevant %d, live %d: ", relevant, live_p);
86a91c0a 208 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
66c16fd9 209 }
ebfd146a 210
83197f37
IR
211 /* If this stmt is an original stmt in a pattern, we might need to mark its
212 related pattern stmt instead of the original stmt. However, such stmts
213 may have their own uses that are not in any pattern, in such cases the
214 stmt itself should be marked. */
ebfd146a
IR
215 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
216 {
97ecdb46
JJ
217 /* This is the last stmt in a sequence that was detected as a
218 pattern that can potentially be vectorized. Don't mark the stmt
219 as relevant/live because it's not going to be vectorized.
220 Instead mark the pattern-stmt that replaces it. */
83197f37 221
97ecdb46
JJ
222 if (dump_enabled_p ())
223 dump_printf_loc (MSG_NOTE, vect_location,
224 "last stmt in pattern. don't mark"
225 " relevant/live.\n");
10681ce8
RS
226 stmt_vec_info old_stmt_info = stmt_info;
227 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
228 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info);
97ecdb46
JJ
229 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
230 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
ebfd146a
IR
231 }
232
233 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
234 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
235 STMT_VINFO_RELEVANT (stmt_info) = relevant;
236
237 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
238 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
239 {
73fbfcad 240 if (dump_enabled_p ())
78c60e3d 241 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 242 "already marked relevant/live.\n");
ebfd146a
IR
243 return;
244 }
245
86a91c0a 246 worklist->safe_push (stmt_info);
ebfd146a
IR
247}
248
249
b28ead45
AH
250/* Function is_simple_and_all_uses_invariant
251
32e8e429 252 Return true if STMT_INFO is simple and all uses of it are invariant. */
b28ead45
AH
253
254bool
32e8e429
RS
255is_simple_and_all_uses_invariant (stmt_vec_info stmt_info,
256 loop_vec_info loop_vinfo)
b28ead45
AH
257{
258 tree op;
b28ead45
AH
259 ssa_op_iter iter;
260
32e8e429
RS
261 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
262 if (!stmt)
b28ead45
AH
263 return false;
264
265 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
266 {
267 enum vect_def_type dt = vect_uninitialized_def;
268
894dd753 269 if (!vect_is_simple_use (op, loop_vinfo, &dt))
b28ead45
AH
270 {
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
273 "use not simple.\n");
274 return false;
275 }
276
277 if (dt != vect_external_def && dt != vect_constant_def)
278 return false;
279 }
280 return true;
281}
282
ebfd146a
IR
283/* Function vect_stmt_relevant_p.
284
82570274
RS
285 Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO,
286 is "relevant for vectorization".
ebfd146a
IR
287
288 A stmt is considered "relevant for vectorization" if:
289 - it has uses outside the loop.
290 - it has vdefs (it alters memory).
291 - control stmts in the loop (except for the exit condition).
292
293 CHECKME: what other side effects would the vectorizer allow? */
294
295static bool
82570274 296vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
ebfd146a
IR
297 enum vect_relevant *relevant, bool *live_p)
298{
299 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
300 ssa_op_iter op_iter;
301 imm_use_iterator imm_iter;
302 use_operand_p use_p;
303 def_operand_p def_p;
304
8644a673 305 *relevant = vect_unused_in_scope;
ebfd146a
IR
306 *live_p = false;
307
308 /* cond stmt other than loop exit cond. */
82570274
RS
309 if (is_ctrl_stmt (stmt_info->stmt)
310 && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type)
8644a673 311 *relevant = vect_used_in_scope;
ebfd146a
IR
312
313 /* changing memory. */
82570274
RS
314 if (gimple_code (stmt_info->stmt) != GIMPLE_PHI)
315 if (gimple_vdef (stmt_info->stmt)
316 && !gimple_clobber_p (stmt_info->stmt))
ebfd146a 317 {
73fbfcad 318 if (dump_enabled_p ())
78c60e3d 319 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 320 "vec_stmt_relevant_p: stmt has vdefs.\n");
8644a673 321 *relevant = vect_used_in_scope;
ebfd146a
IR
322 }
323
324 /* uses outside the loop. */
82570274 325 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
ebfd146a
IR
326 {
327 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
328 {
329 basic_block bb = gimple_bb (USE_STMT (use_p));
330 if (!flow_bb_inside_loop_p (loop, bb))
331 {
73fbfcad 332 if (dump_enabled_p ())
78c60e3d 333 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 334 "vec_stmt_relevant_p: used out of loop.\n");
ebfd146a 335
3157b0c2
AO
336 if (is_gimple_debug (USE_STMT (use_p)))
337 continue;
338
ebfd146a
IR
339 /* We expect all such uses to be in the loop exit phis
340 (because of loop closed form) */
341 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
342 gcc_assert (bb == single_exit (loop)->dest);
343
344 *live_p = true;
345 }
346 }
347 }
348
3a2edf4c 349 if (*live_p && *relevant == vect_unused_in_scope
82570274 350 && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo))
b28ead45
AH
351 {
352 if (dump_enabled_p ())
353 dump_printf_loc (MSG_NOTE, vect_location,
354 "vec_stmt_relevant_p: stmt live but not relevant.\n");
355 *relevant = vect_used_only_live;
356 }
357
ebfd146a
IR
358 return (*live_p || *relevant);
359}
360
361
b8698a0f 362/* Function exist_non_indexing_operands_for_use_p
ebfd146a 363
32e8e429
RS
364 USE is one of the uses attached to STMT_INFO. Check if USE is
365 used in STMT_INFO for anything other than indexing an array. */
ebfd146a
IR
366
367static bool
32e8e429 368exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info)
ebfd146a
IR
369{
370 tree operand;
59a05b0c 371
ff802fa1 372 /* USE corresponds to some operand in STMT. If there is no data
ebfd146a
IR
373 reference in STMT, then any operand that corresponds to USE
374 is not indexing an array. */
375 if (!STMT_VINFO_DATA_REF (stmt_info))
376 return true;
59a05b0c 377
ebfd146a
IR
378 /* STMT has a data_ref. FORNOW this means that its of one of
379 the following forms:
380 -1- ARRAY_REF = var
381 -2- var = ARRAY_REF
382 (This should have been verified in analyze_data_refs).
383
384 'var' in the second case corresponds to a def, not a use,
b8698a0f 385 so USE cannot correspond to any operands that are not used
ebfd146a
IR
386 for array indexing.
387
388 Therefore, all we need to check is if STMT falls into the
389 first case, and whether var corresponds to USE. */
ebfd146a 390
86a91c0a 391 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
beb456c3 392 if (!assign || !gimple_assign_copy_p (assign))
5ce9450f 393 {
86a91c0a 394 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
beb456c3 395 if (call && gimple_call_internal_p (call))
bfaa08b7 396 {
beb456c3 397 internal_fn ifn = gimple_call_internal_fn (call);
bfaa08b7
RS
398 int mask_index = internal_fn_mask_index (ifn);
399 if (mask_index >= 0
beb456c3 400 && use == gimple_call_arg (call, mask_index))
bfaa08b7 401 return true;
f307441a
RS
402 int stored_value_index = internal_fn_stored_value_index (ifn);
403 if (stored_value_index >= 0
beb456c3 404 && use == gimple_call_arg (call, stored_value_index))
f307441a 405 return true;
bfaa08b7 406 if (internal_gather_scatter_fn_p (ifn)
beb456c3 407 && use == gimple_call_arg (call, 1))
bfaa08b7 408 return true;
bfaa08b7 409 }
5ce9450f
JJ
410 return false;
411 }
412
beb456c3 413 if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME)
59a05b0c 414 return false;
beb456c3 415 operand = gimple_assign_rhs1 (assign);
ebfd146a
IR
416 if (TREE_CODE (operand) != SSA_NAME)
417 return false;
418
419 if (operand == use)
420 return true;
421
422 return false;
423}
424
425
b8698a0f 426/*
ebfd146a
IR
427 Function process_use.
428
429 Inputs:
32e8e429 430 - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
b28ead45 431 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
ff802fa1 432 that defined USE. This is done by calling mark_relevant and passing it
ebfd146a 433 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
aec7ae7d
JJ
434 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 be performed.
ebfd146a
IR
436
437 Outputs:
438 Generally, LIVE_P and RELEVANT are used to define the liveness and
439 relevance info of the DEF_STMT of this USE:
32e8e429
RS
440 STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
441 STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
ebfd146a
IR
442 Exceptions:
443 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 444 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 445 of the respective DEF_STMT is left unchanged.
32e8e429
RS
446 - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
447 we skip DEF_STMT cause it had already been processed.
448 - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
449 "relevant" will be modified accordingly.
ebfd146a
IR
450
451 Return true if everything is as expected. Return false otherwise. */
452
453static bool
32e8e429 454process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
eca52fdd 455 enum vect_relevant relevant, vec<stmt_vec_info> *worklist,
aec7ae7d 456 bool force)
ebfd146a 457{
ebfd146a
IR
458 stmt_vec_info dstmt_vinfo;
459 basic_block bb, def_bb;
ebfd146a
IR
460 enum vect_def_type dt;
461
b8698a0f 462 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a 463 that are used for address computation are not considered relevant. */
86a91c0a 464 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo))
ebfd146a
IR
465 return true;
466
fef96d8e 467 if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo))
b8698a0f 468 {
73fbfcad 469 if (dump_enabled_p ())
78c60e3d 470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 471 "not vectorized: unsupported use in stmt.\n");
ebfd146a
IR
472 return false;
473 }
474
fef96d8e 475 if (!dstmt_vinfo)
ebfd146a
IR
476 return true;
477
fef96d8e 478 def_bb = gimple_bb (dstmt_vinfo->stmt);
ebfd146a 479
fef96d8e
RS
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO).
481 DSTMT_VINFO must have already been processed, because this should be the
b8698a0f 482 only way that STMT, which is a reduction-phi, was put in the worklist,
fef96d8e 483 as there should be no other uses for DSTMT_VINFO in the loop. So we just
ebfd146a 484 check that everything is as expected, and we are done. */
86a91c0a
RS
485 bb = gimple_bb (stmt_vinfo->stmt);
486 if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
ebfd146a 487 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
fef96d8e 488 && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI
ebfd146a
IR
489 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
490 && bb->loop_father == def_bb->loop_father)
491 {
73fbfcad 492 if (dump_enabled_p ())
78c60e3d 493 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 494 "reduc-stmt defining reduc-phi in the same nest.\n");
ebfd146a 495 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 496 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 497 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
498 return true;
499 }
500
501 /* case 3a: outer-loop stmt defining an inner-loop stmt:
502 outer-loop-header-bb:
fef96d8e 503 d = dstmt_vinfo
ebfd146a
IR
504 inner-loop:
505 stmt # use (d)
506 outer-loop-tail-bb:
507 ... */
508 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
509 {
73fbfcad 510 if (dump_enabled_p ())
78c60e3d 511 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 512 "outer-loop def-stmt defining inner-loop stmt.\n");
7c5222ff 513
ebfd146a
IR
514 switch (relevant)
515 {
8644a673 516 case vect_unused_in_scope:
7c5222ff
IR
517 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
518 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 519 break;
7c5222ff 520
ebfd146a 521 case vect_used_in_outer_by_reduction:
7c5222ff 522 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
523 relevant = vect_used_by_reduction;
524 break;
7c5222ff 525
ebfd146a 526 case vect_used_in_outer:
7c5222ff 527 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 528 relevant = vect_used_in_scope;
ebfd146a 529 break;
7c5222ff 530
8644a673 531 case vect_used_in_scope:
ebfd146a
IR
532 break;
533
534 default:
535 gcc_unreachable ();
b8698a0f 536 }
ebfd146a
IR
537 }
538
539 /* case 3b: inner-loop stmt defining an outer-loop stmt:
540 outer-loop-header-bb:
541 ...
542 inner-loop:
fef96d8e 543 d = dstmt_vinfo
06066f92 544 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
545 stmt # use (d) */
546 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
547 {
73fbfcad 548 if (dump_enabled_p ())
78c60e3d 549 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 550 "inner-loop def-stmt defining outer-loop stmt.\n");
7c5222ff 551
ebfd146a
IR
552 switch (relevant)
553 {
8644a673 554 case vect_unused_in_scope:
b8698a0f 555 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 556 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 557 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
558 break;
559
ebfd146a 560 case vect_used_by_reduction:
b28ead45 561 case vect_used_only_live:
ebfd146a
IR
562 relevant = vect_used_in_outer_by_reduction;
563 break;
564
8644a673 565 case vect_used_in_scope:
ebfd146a
IR
566 relevant = vect_used_in_outer;
567 break;
568
569 default:
570 gcc_unreachable ();
571 }
572 }
643a9684
RB
573 /* We are also not interested in uses on loop PHI backedges that are
574 inductions. Otherwise we'll needlessly vectorize the IV increment
e294f495
RB
575 and cause hybrid SLP for SLP inductions. Unless the PHI is live
576 of course. */
86a91c0a 577 else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
643a9684 578 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
e294f495 579 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
86a91c0a
RS
580 && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
581 loop_latch_edge (bb->loop_father))
643a9684
RB
582 == use))
583 {
584 if (dump_enabled_p ())
585 dump_printf_loc (MSG_NOTE, vect_location,
586 "induction value on backedge.\n");
587 return true;
588 }
589
ebfd146a 590
fef96d8e 591 vect_mark_relevant (worklist, dstmt_vinfo, relevant, false);
ebfd146a
IR
592 return true;
593}
594
595
596/* Function vect_mark_stmts_to_be_vectorized.
597
598 Not all stmts in the loop need to be vectorized. For example:
599
600 for i...
601 for j...
602 1. T0 = i + j
603 2. T1 = a[T0]
604
605 3. j = j + 1
606
607 Stmt 1 and 3 do not need to be vectorized, because loop control and
608 addressing of vectorized data-refs are handled differently.
609
610 This pass detects such stmts. */
611
612bool
613vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
614{
ebfd146a
IR
615 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
616 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
617 unsigned int nbbs = loop->num_nodes;
618 gimple_stmt_iterator si;
ebfd146a 619 unsigned int i;
ebfd146a 620 basic_block bb;
ebfd146a 621 bool live_p;
b28ead45 622 enum vect_relevant relevant;
ebfd146a 623
adac3a68 624 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
ebfd146a 625
eca52fdd 626 auto_vec<stmt_vec_info, 64> worklist;
ebfd146a
IR
627
628 /* 1. Init worklist. */
629 for (i = 0; i < nbbs; i++)
630 {
631 bb = bbs[i];
632 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 633 {
a1824cfd 634 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
73fbfcad 635 if (dump_enabled_p ())
ebfd146a 636 {
78c60e3d 637 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
a1824cfd 638 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0);
ebfd146a
IR
639 }
640
a1824cfd
RS
641 if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p))
642 vect_mark_relevant (&worklist, phi_info, relevant, live_p);
ebfd146a
IR
643 }
644 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
645 {
a1824cfd 646 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
73fbfcad 647 if (dump_enabled_p ())
ebfd146a 648 {
78c60e3d 649 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
a1824cfd 650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
b8698a0f 651 }
ebfd146a 652
a1824cfd
RS
653 if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p))
654 vect_mark_relevant (&worklist, stmt_info, relevant, live_p);
ebfd146a
IR
655 }
656 }
657
658 /* 2. Process_worklist */
9771b263 659 while (worklist.length () > 0)
ebfd146a
IR
660 {
661 use_operand_p use_p;
662 ssa_op_iter iter;
663
eca52fdd 664 stmt_vec_info stmt_vinfo = worklist.pop ();
73fbfcad 665 if (dump_enabled_p ())
ebfd146a 666 {
eca52fdd
RS
667 dump_printf_loc (MSG_NOTE, vect_location,
668 "worklist: examine stmt: ");
669 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0);
ebfd146a
IR
670 }
671
b8698a0f 672 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
b28ead45
AH
673 (DEF_STMT) as relevant/irrelevant according to the relevance property
674 of STMT. */
ebfd146a 675 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
ebfd146a 676
b28ead45
AH
677 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
678 propagated as is to the DEF_STMTs of its USEs.
ebfd146a
IR
679
680 One exception is when STMT has been identified as defining a reduction
b28ead45 681 variable; in this case we set the relevance to vect_used_by_reduction.
ebfd146a 682 This is because we distinguish between two kinds of relevant stmts -
b8698a0f 683 those that are used by a reduction computation, and those that are
ff802fa1 684 (also) used by a regular computation. This allows us later on to
b8698a0f 685 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 686 order of the results that they produce does not have to be kept. */
ebfd146a 687
b28ead45 688 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
ebfd146a 689 {
06066f92 690 case vect_reduction_def:
b28ead45
AH
691 gcc_assert (relevant != vect_unused_in_scope);
692 if (relevant != vect_unused_in_scope
693 && relevant != vect_used_in_scope
694 && relevant != vect_used_by_reduction
695 && relevant != vect_used_only_live)
06066f92 696 {
b28ead45
AH
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "unsupported use of reduction.\n");
700 return false;
06066f92 701 }
06066f92 702 break;
b8698a0f 703
06066f92 704 case vect_nested_cycle:
b28ead45
AH
705 if (relevant != vect_unused_in_scope
706 && relevant != vect_used_in_outer_by_reduction
707 && relevant != vect_used_in_outer)
06066f92 708 {
73fbfcad 709 if (dump_enabled_p ())
78c60e3d 710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 711 "unsupported use of nested cycle.\n");
7c5222ff 712
06066f92
IR
713 return false;
714 }
b8698a0f
L
715 break;
716
06066f92 717 case vect_double_reduction_def:
b28ead45
AH
718 if (relevant != vect_unused_in_scope
719 && relevant != vect_used_by_reduction
720 && relevant != vect_used_only_live)
06066f92 721 {
73fbfcad 722 if (dump_enabled_p ())
78c60e3d 723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 724 "unsupported use of double reduction.\n");
7c5222ff 725
7c5222ff 726 return false;
06066f92 727 }
b8698a0f 728 break;
7c5222ff 729
06066f92
IR
730 default:
731 break;
7c5222ff 732 }
b8698a0f 733
aec7ae7d 734 if (is_pattern_stmt_p (stmt_vinfo))
9d5e7640
IR
735 {
736 /* Pattern statements are not inserted into the code, so
737 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
738 have to scan the RHS or function arguments instead. */
86a91c0a 739 if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt))
beb456c3
RS
740 {
741 enum tree_code rhs_code = gimple_assign_rhs_code (assign);
742 tree op = gimple_assign_rhs1 (assign);
69d2aade
JJ
743
744 i = 1;
745 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
746 {
86a91c0a
RS
747 if (!process_use (stmt_vinfo, TREE_OPERAND (op, 0),
748 loop_vinfo, relevant, &worklist, false)
749 || !process_use (stmt_vinfo, TREE_OPERAND (op, 1),
750 loop_vinfo, relevant, &worklist, false))
566d377a 751 return false;
69d2aade
JJ
752 i = 2;
753 }
beb456c3
RS
754 for (; i < gimple_num_ops (assign); i++)
755 {
756 op = gimple_op (assign, i);
afbe6325 757 if (TREE_CODE (op) == SSA_NAME
86a91c0a 758 && !process_use (stmt_vinfo, op, loop_vinfo, relevant,
afbe6325 759 &worklist, false))
07687835 760 return false;
9d5e7640
IR
761 }
762 }
86a91c0a 763 else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt))
beb456c3
RS
764 {
765 for (i = 0; i < gimple_call_num_args (call); i++)
766 {
767 tree arg = gimple_call_arg (call, i);
86a91c0a 768 if (!process_use (stmt_vinfo, arg, loop_vinfo, relevant,
aec7ae7d 769 &worklist, false))
07687835 770 return false;
beb456c3
RS
771 }
772 }
9d5e7640
IR
773 }
774 else
86a91c0a 775 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE)
9d5e7640
IR
776 {
777 tree op = USE_FROM_PTR (use_p);
86a91c0a 778 if (!process_use (stmt_vinfo, op, loop_vinfo, relevant,
aec7ae7d 779 &worklist, false))
07687835 780 return false;
9d5e7640 781 }
aec7ae7d 782
3bab6342 783 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
aec7ae7d 784 {
134c85ca 785 gather_scatter_info gs_info;
86a91c0a 786 if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info))
134c85ca 787 gcc_unreachable ();
86a91c0a 788 if (!process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant,
134c85ca 789 &worklist, true))
566d377a 790 return false;
aec7ae7d 791 }
ebfd146a
IR
792 } /* while worklist */
793
ebfd146a
IR
794 return true;
795}
796
68435eb2
RB
797/* Compute the prologue cost for invariant or constant operands. */
798
799static unsigned
800vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
801 unsigned opno, enum vect_def_type dt,
802 stmt_vector_for_cost *cost_vec)
803{
b9787581 804 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
68435eb2
RB
805 tree op = gimple_op (stmt, opno);
806 unsigned prologue_cost = 0;
807
808 /* Without looking at the actual initializer a vector of
809 constants can be implemented as load from the constant pool.
810 When all elements are the same we can use a splat. */
811 tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op));
812 unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length ();
813 unsigned num_vects_to_check;
814 unsigned HOST_WIDE_INT const_nunits;
815 unsigned nelt_limit;
816 if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits)
817 && ! multiple_p (const_nunits, group_size))
818 {
819 num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
820 nelt_limit = const_nunits;
821 }
822 else
823 {
824 /* If either the vector has variable length or the vectors
825 are composed of repeated whole groups we only need to
826 cost construction once. All vectors will be the same. */
827 num_vects_to_check = 1;
828 nelt_limit = group_size;
829 }
830 tree elt = NULL_TREE;
831 unsigned nelt = 0;
832 for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j)
833 {
834 unsigned si = j % group_size;
835 if (nelt == 0)
b9787581 836 elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno);
68435eb2
RB
837 /* ??? We're just tracking whether all operands of a single
838 vector initializer are the same, ideally we'd check if
839 we emitted the same one already. */
b9787581 840 else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt,
68435eb2
RB
841 opno))
842 elt = NULL_TREE;
843 nelt++;
844 if (nelt == nelt_limit)
845 {
846 /* ??? We need to pass down stmt_info for a vector type
847 even if it points to the wrong stmt. */
848 prologue_cost += record_stmt_cost
849 (cost_vec, 1,
850 dt == vect_external_def
851 ? (elt ? scalar_to_vec : vec_construct)
852 : vector_load,
853 stmt_info, 0, vect_prologue);
854 nelt = 0;
855 }
856 }
857
858 return prologue_cost;
859}
ebfd146a 860
b8698a0f 861/* Function vect_model_simple_cost.
ebfd146a 862
b8698a0f 863 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
864 single op. Right now, this does not account for multiple insns that could
865 be generated for the single vector op. We will handle that shortly. */
866
68435eb2 867static void
b8698a0f 868vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
92345349 869 enum vect_def_type *dt,
4fc5ebf1 870 int ndts,
68435eb2
RB
871 slp_tree node,
872 stmt_vector_for_cost *cost_vec)
ebfd146a 873{
92345349 874 int inside_cost = 0, prologue_cost = 0;
ebfd146a 875
68435eb2 876 gcc_assert (cost_vec != NULL);
ebfd146a 877
68435eb2
RB
878 /* ??? Somehow we need to fix this at the callers. */
879 if (node)
880 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
881
882 if (node)
883 {
884 /* Scan operands and account for prologue cost of constants/externals.
885 ??? This over-estimates cost for multiple uses and should be
886 re-engineered. */
b9787581 887 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
68435eb2
RB
888 tree lhs = gimple_get_lhs (stmt);
889 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
890 {
891 tree op = gimple_op (stmt, i);
68435eb2
RB
892 enum vect_def_type dt;
893 if (!op || op == lhs)
894 continue;
894dd753 895 if (vect_is_simple_use (op, stmt_info->vinfo, &dt)
68435eb2
RB
896 && (dt == vect_constant_def || dt == vect_external_def))
897 prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info,
898 i, dt, cost_vec);
899 }
900 }
901 else
902 /* Cost the "broadcast" of a scalar operand in to a vector operand.
903 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
904 cost model. */
905 for (int i = 0; i < ndts; i++)
906 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
907 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
908 stmt_info, 0, vect_prologue);
909
910 /* Adjust for two-operator SLP nodes. */
911 if (node && SLP_TREE_TWO_OPERATORS (node))
912 {
913 ncopies *= 2;
914 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm,
915 stmt_info, 0, vect_body);
916 }
c3e7ee41
BS
917
918 /* Pass the inside-of-loop statements to the target-specific cost model. */
68435eb2
RB
919 inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt,
920 stmt_info, 0, vect_body);
c3e7ee41 921
73fbfcad 922 if (dump_enabled_p ())
78c60e3d
SS
923 dump_printf_loc (MSG_NOTE, vect_location,
924 "vect_model_simple_cost: inside_cost = %d, "
e645e942 925 "prologue_cost = %d .\n", inside_cost, prologue_cost);
ebfd146a
IR
926}
927
928
8bd37302
BS
929/* Model cost for type demotion and promotion operations. PWR is normally
930 zero for single-step promotions and demotions. It will be one if
931 two-step promotion/demotion is required, and so on. Each additional
932 step doubles the number of instructions required. */
933
934static void
935vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
68435eb2
RB
936 enum vect_def_type *dt, int pwr,
937 stmt_vector_for_cost *cost_vec)
8bd37302
BS
938{
939 int i, tmp;
92345349 940 int inside_cost = 0, prologue_cost = 0;
c3e7ee41 941
8bd37302
BS
942 for (i = 0; i < pwr + 1; i++)
943 {
944 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
945 (i + 1) : i;
68435eb2
RB
946 inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp),
947 vec_promote_demote, stmt_info, 0,
948 vect_body);
8bd37302
BS
949 }
950
951 /* FORNOW: Assuming maximum 2 args per stmts. */
952 for (i = 0; i < 2; i++)
92345349 953 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
68435eb2
RB
954 prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
955 stmt_info, 0, vect_prologue);
8bd37302 956
73fbfcad 957 if (dump_enabled_p ())
78c60e3d
SS
958 dump_printf_loc (MSG_NOTE, vect_location,
959 "vect_model_promotion_demotion_cost: inside_cost = %d, "
e645e942 960 "prologue_cost = %d .\n", inside_cost, prologue_cost);
8bd37302
BS
961}
962
ebfd146a
IR
963/* Function vect_model_store_cost
964
0d0293ac
MM
965 Models cost for stores. In the case of grouped accesses, one access
966 has the overhead of the grouped access attributed to it. */
ebfd146a 967
68435eb2 968static void
b8698a0f 969vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
68435eb2 970 enum vect_def_type dt,
2de001ee 971 vect_memory_access_type memory_access_type,
9ce4345a 972 vec_load_store_type vls_type, slp_tree slp_node,
68435eb2 973 stmt_vector_for_cost *cost_vec)
ebfd146a 974{
92345349 975 unsigned int inside_cost = 0, prologue_cost = 0;
bffb8014 976 stmt_vec_info first_stmt_info = stmt_info;
892a981f 977 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
ebfd146a 978
68435eb2
RB
979 /* ??? Somehow we need to fix this at the callers. */
980 if (slp_node)
981 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
982
9ce4345a 983 if (vls_type == VLS_STORE_INVARIANT)
68435eb2
RB
984 {
985 if (slp_node)
986 prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info,
987 1, dt, cost_vec);
988 else
989 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
990 stmt_info, 0, vect_prologue);
991 }
ebfd146a 992
892a981f
RS
993 /* Grouped stores update all elements in the group at once,
994 so we want the DR for the first statement. */
995 if (!slp_node && grouped_access_p)
bffb8014 996 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
ebfd146a 997
892a981f
RS
998 /* True if we should include any once-per-group costs as well as
999 the cost of the statement itself. For SLP we only get called
1000 once per group anyhow. */
bffb8014 1001 bool first_stmt_p = (first_stmt_info == stmt_info);
892a981f 1002
272c6793 1003 /* We assume that the cost of a single store-lanes instruction is
2c53b149 1004 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
272c6793 1005 access is instead being provided by a permute-and-store operation,
2de001ee
RS
1006 include the cost of the permutes. */
1007 if (first_stmt_p
1008 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
ebfd146a 1009 {
e1377713
ES
1010 /* Uses a high and low interleave or shuffle operations for each
1011 needed permute. */
bffb8014 1012 int group_size = DR_GROUP_SIZE (first_stmt_info);
e1377713 1013 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
68435eb2 1014 inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
92345349 1015 stmt_info, 0, vect_body);
ebfd146a 1016
73fbfcad 1017 if (dump_enabled_p ())
78c60e3d 1018 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1019 "vect_model_store_cost: strided group_size = %d .\n",
78c60e3d 1020 group_size);
ebfd146a
IR
1021 }
1022
cee62fee 1023 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 1024 /* Costs of the stores. */
067bc855
RB
1025 if (memory_access_type == VMAT_ELEMENTWISE
1026 || memory_access_type == VMAT_GATHER_SCATTER)
c5126ce8
RS
1027 {
1028 /* N scalar stores plus extracting the elements. */
1029 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
68435eb2 1030 inside_cost += record_stmt_cost (cost_vec,
c5126ce8
RS
1031 ncopies * assumed_nunits,
1032 scalar_store, stmt_info, 0, vect_body);
1033 }
f2e2a985 1034 else
57c454d2 1035 vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
ebfd146a 1036
2de001ee
RS
1037 if (memory_access_type == VMAT_ELEMENTWISE
1038 || memory_access_type == VMAT_STRIDED_SLP)
c5126ce8
RS
1039 {
1040 /* N scalar stores plus extracting the elements. */
1041 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
68435eb2 1042 inside_cost += record_stmt_cost (cost_vec,
c5126ce8
RS
1043 ncopies * assumed_nunits,
1044 vec_to_scalar, stmt_info, 0, vect_body);
1045 }
cee62fee 1046
73fbfcad 1047 if (dump_enabled_p ())
78c60e3d
SS
1048 dump_printf_loc (MSG_NOTE, vect_location,
1049 "vect_model_store_cost: inside_cost = %d, "
e645e942 1050 "prologue_cost = %d .\n", inside_cost, prologue_cost);
ebfd146a
IR
1051}
1052
1053
720f5239
IR
1054/* Calculate cost of DR's memory access. */
1055void
57c454d2 1056vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
c3e7ee41 1057 unsigned int *inside_cost,
92345349 1058 stmt_vector_for_cost *body_cost_vec)
720f5239 1059{
89fa689a
RS
1060 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1061 int alignment_support_scheme
1062 = vect_supportable_dr_alignment (dr_info, false);
720f5239
IR
1063
1064 switch (alignment_support_scheme)
1065 {
1066 case dr_aligned:
1067 {
92345349
BS
1068 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1069 vector_store, stmt_info, 0,
1070 vect_body);
720f5239 1071
73fbfcad 1072 if (dump_enabled_p ())
78c60e3d 1073 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1074 "vect_model_store_cost: aligned.\n");
720f5239
IR
1075 break;
1076 }
1077
1078 case dr_unaligned_supported:
1079 {
720f5239 1080 /* Here, we assign an additional cost for the unaligned store. */
92345349 1081 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
c3e7ee41 1082 unaligned_store, stmt_info,
89fa689a
RS
1083 DR_MISALIGNMENT (dr_info),
1084 vect_body);
73fbfcad 1085 if (dump_enabled_p ())
78c60e3d
SS
1086 dump_printf_loc (MSG_NOTE, vect_location,
1087 "vect_model_store_cost: unaligned supported by "
e645e942 1088 "hardware.\n");
720f5239
IR
1089 break;
1090 }
1091
38eec4c6
UW
1092 case dr_unaligned_unsupported:
1093 {
1094 *inside_cost = VECT_MAX_COST;
1095
73fbfcad 1096 if (dump_enabled_p ())
78c60e3d 1097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 1098 "vect_model_store_cost: unsupported access.\n");
38eec4c6
UW
1099 break;
1100 }
1101
720f5239
IR
1102 default:
1103 gcc_unreachable ();
1104 }
1105}
1106
1107
ebfd146a
IR
1108/* Function vect_model_load_cost
1109
892a981f
RS
1110 Models cost for loads. In the case of grouped accesses, one access has
1111 the overhead of the grouped access attributed to it. Since unaligned
b8698a0f 1112 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
1113 access scheme chosen. */
1114
68435eb2
RB
1115static void
1116vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
2de001ee 1117 vect_memory_access_type memory_access_type,
68435eb2 1118 slp_instance instance,
2de001ee 1119 slp_tree slp_node,
68435eb2 1120 stmt_vector_for_cost *cost_vec)
ebfd146a 1121{
92345349 1122 unsigned int inside_cost = 0, prologue_cost = 0;
892a981f 1123 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
ebfd146a 1124
68435eb2
RB
1125 gcc_assert (cost_vec);
1126
1127 /* ??? Somehow we need to fix this at the callers. */
1128 if (slp_node)
1129 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1130
1131 if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
1132 {
1133 /* If the load is permuted then the alignment is determined by
1134 the first group element not by the first scalar stmt DR. */
bffb8014 1135 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
68435eb2
RB
1136 /* Record the cost for the permutation. */
1137 unsigned n_perms;
1138 unsigned assumed_nunits
bffb8014 1139 = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
68435eb2
RB
1140 unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
1141 vect_transform_slp_perm_load (slp_node, vNULL, NULL,
1142 slp_vf, instance, true,
1143 &n_perms);
1144 inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
bffb8014 1145 first_stmt_info, 0, vect_body);
68435eb2
RB
1146 /* And adjust the number of loads performed. This handles
1147 redundancies as well as loads that are later dead. */
bffb8014 1148 auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info));
68435eb2
RB
1149 bitmap_clear (perm);
1150 for (unsigned i = 0;
1151 i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
1152 bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
1153 ncopies = 0;
1154 bool load_seen = false;
bffb8014 1155 for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i)
68435eb2
RB
1156 {
1157 if (i % assumed_nunits == 0)
1158 {
1159 if (load_seen)
1160 ncopies++;
1161 load_seen = false;
1162 }
1163 if (bitmap_bit_p (perm, i))
1164 load_seen = true;
1165 }
1166 if (load_seen)
1167 ncopies++;
1168 gcc_assert (ncopies
bffb8014
RS
1169 <= (DR_GROUP_SIZE (first_stmt_info)
1170 - DR_GROUP_GAP (first_stmt_info)
68435eb2
RB
1171 + assumed_nunits - 1) / assumed_nunits);
1172 }
1173
892a981f
RS
1174 /* Grouped loads read all elements in the group at once,
1175 so we want the DR for the first statement. */
bffb8014 1176 stmt_vec_info first_stmt_info = stmt_info;
892a981f 1177 if (!slp_node && grouped_access_p)
bffb8014 1178 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
ebfd146a 1179
892a981f
RS
1180 /* True if we should include any once-per-group costs as well as
1181 the cost of the statement itself. For SLP we only get called
1182 once per group anyhow. */
bffb8014 1183 bool first_stmt_p = (first_stmt_info == stmt_info);
892a981f 1184
272c6793 1185 /* We assume that the cost of a single load-lanes instruction is
2c53b149 1186 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
272c6793 1187 access is instead being provided by a load-and-permute operation,
2de001ee
RS
1188 include the cost of the permutes. */
1189 if (first_stmt_p
1190 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
ebfd146a 1191 {
2c23db6d
ES
1192 /* Uses an even and odd extract operations or shuffle operations
1193 for each needed permute. */
bffb8014 1194 int group_size = DR_GROUP_SIZE (first_stmt_info);
2c23db6d 1195 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
68435eb2
RB
1196 inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
1197 stmt_info, 0, vect_body);
ebfd146a 1198
73fbfcad 1199 if (dump_enabled_p ())
e645e942
TJ
1200 dump_printf_loc (MSG_NOTE, vect_location,
1201 "vect_model_load_cost: strided group_size = %d .\n",
78c60e3d 1202 group_size);
ebfd146a
IR
1203 }
1204
1205 /* The loads themselves. */
067bc855
RB
1206 if (memory_access_type == VMAT_ELEMENTWISE
1207 || memory_access_type == VMAT_GATHER_SCATTER)
a82960aa 1208 {
a21892ad
BS
1209 /* N scalar loads plus gathering them into a vector. */
1210 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
c5126ce8 1211 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
68435eb2 1212 inside_cost += record_stmt_cost (cost_vec,
c5126ce8 1213 ncopies * assumed_nunits,
92345349 1214 scalar_load, stmt_info, 0, vect_body);
a82960aa
RG
1215 }
1216 else
57c454d2 1217 vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
92345349 1218 &inside_cost, &prologue_cost,
68435eb2 1219 cost_vec, cost_vec, true);
2de001ee
RS
1220 if (memory_access_type == VMAT_ELEMENTWISE
1221 || memory_access_type == VMAT_STRIDED_SLP)
68435eb2 1222 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
892a981f 1223 stmt_info, 0, vect_body);
720f5239 1224
73fbfcad 1225 if (dump_enabled_p ())
78c60e3d
SS
1226 dump_printf_loc (MSG_NOTE, vect_location,
1227 "vect_model_load_cost: inside_cost = %d, "
e645e942 1228 "prologue_cost = %d .\n", inside_cost, prologue_cost);
720f5239
IR
1229}
1230
1231
1232/* Calculate cost of DR's memory access. */
1233void
57c454d2 1234vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
c3e7ee41 1235 bool add_realign_cost, unsigned int *inside_cost,
92345349
BS
1236 unsigned int *prologue_cost,
1237 stmt_vector_for_cost *prologue_cost_vec,
1238 stmt_vector_for_cost *body_cost_vec,
1239 bool record_prologue_costs)
720f5239 1240{
89fa689a
RS
1241 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1242 int alignment_support_scheme
1243 = vect_supportable_dr_alignment (dr_info, false);
720f5239
IR
1244
1245 switch (alignment_support_scheme)
ebfd146a
IR
1246 {
1247 case dr_aligned:
1248 {
92345349
BS
1249 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1250 stmt_info, 0, vect_body);
ebfd146a 1251
73fbfcad 1252 if (dump_enabled_p ())
78c60e3d 1253 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1254 "vect_model_load_cost: aligned.\n");
ebfd146a
IR
1255
1256 break;
1257 }
1258 case dr_unaligned_supported:
1259 {
720f5239 1260 /* Here, we assign an additional cost for the unaligned load. */
92345349 1261 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
c3e7ee41 1262 unaligned_load, stmt_info,
89fa689a
RS
1263 DR_MISALIGNMENT (dr_info),
1264 vect_body);
c3e7ee41 1265
73fbfcad 1266 if (dump_enabled_p ())
78c60e3d
SS
1267 dump_printf_loc (MSG_NOTE, vect_location,
1268 "vect_model_load_cost: unaligned supported by "
e645e942 1269 "hardware.\n");
ebfd146a
IR
1270
1271 break;
1272 }
1273 case dr_explicit_realign:
1274 {
92345349
BS
1275 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1276 vector_load, stmt_info, 0, vect_body);
1277 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1278 vec_perm, stmt_info, 0, vect_body);
ebfd146a
IR
1279
1280 /* FIXME: If the misalignment remains fixed across the iterations of
1281 the containing loop, the following cost should be added to the
92345349 1282 prologue costs. */
ebfd146a 1283 if (targetm.vectorize.builtin_mask_for_load)
92345349
BS
1284 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1285 stmt_info, 0, vect_body);
ebfd146a 1286
73fbfcad 1287 if (dump_enabled_p ())
e645e942
TJ
1288 dump_printf_loc (MSG_NOTE, vect_location,
1289 "vect_model_load_cost: explicit realign\n");
8bd37302 1290
ebfd146a
IR
1291 break;
1292 }
1293 case dr_explicit_realign_optimized:
1294 {
73fbfcad 1295 if (dump_enabled_p ())
e645e942 1296 dump_printf_loc (MSG_NOTE, vect_location,
78c60e3d 1297 "vect_model_load_cost: unaligned software "
e645e942 1298 "pipelined.\n");
ebfd146a
IR
1299
1300 /* Unaligned software pipeline has a load of an address, an initial
ff802fa1 1301 load, and possibly a mask operation to "prime" the loop. However,
0d0293ac 1302 if this is an access in a group of loads, which provide grouped
ebfd146a 1303 access, then the above cost should only be considered for one
ff802fa1 1304 access in the group. Inside the loop, there is a load op
ebfd146a
IR
1305 and a realignment op. */
1306
92345349 1307 if (add_realign_cost && record_prologue_costs)
ebfd146a 1308 {
92345349
BS
1309 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1310 vector_stmt, stmt_info,
1311 0, vect_prologue);
ebfd146a 1312 if (targetm.vectorize.builtin_mask_for_load)
92345349
BS
1313 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1314 vector_stmt, stmt_info,
1315 0, vect_prologue);
ebfd146a
IR
1316 }
1317
92345349
BS
1318 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1319 stmt_info, 0, vect_body);
1320 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1321 stmt_info, 0, vect_body);
8bd37302 1322
73fbfcad 1323 if (dump_enabled_p ())
78c60e3d 1324 dump_printf_loc (MSG_NOTE, vect_location,
e645e942
TJ
1325 "vect_model_load_cost: explicit realign optimized"
1326 "\n");
8bd37302 1327
ebfd146a
IR
1328 break;
1329 }
1330
38eec4c6
UW
1331 case dr_unaligned_unsupported:
1332 {
1333 *inside_cost = VECT_MAX_COST;
1334
73fbfcad 1335 if (dump_enabled_p ())
78c60e3d 1336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 1337 "vect_model_load_cost: unsupported access.\n");
38eec4c6
UW
1338 break;
1339 }
1340
ebfd146a
IR
1341 default:
1342 gcc_unreachable ();
1343 }
ebfd146a
IR
1344}
1345
418b7df3 1346/* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
32e8e429 1347 the loop preheader for the vectorized stmt STMT_VINFO. */
ebfd146a 1348
418b7df3 1349static void
32e8e429
RS
1350vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
1351 gimple_stmt_iterator *gsi)
ebfd146a 1352{
ebfd146a 1353 if (gsi)
a1824cfd 1354 vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi);
ebfd146a
IR
1355 else
1356 {
1357 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 1358
a70d6342
IR
1359 if (loop_vinfo)
1360 {
1361 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
418b7df3
RG
1362 basic_block new_bb;
1363 edge pe;
a70d6342 1364
86a91c0a
RS
1365 if (nested_in_vect_loop_p (loop, stmt_vinfo))
1366 loop = loop->inner;
b8698a0f 1367
a70d6342 1368 pe = loop_preheader_edge (loop);
418b7df3 1369 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
a70d6342
IR
1370 gcc_assert (!new_bb);
1371 }
1372 else
1373 {
1374 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1375 basic_block bb;
1376 gimple_stmt_iterator gsi_bb_start;
1377
1378 gcc_assert (bb_vinfo);
1379 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 1380 gsi_bb_start = gsi_after_labels (bb);
418b7df3 1381 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
a70d6342 1382 }
ebfd146a
IR
1383 }
1384
73fbfcad 1385 if (dump_enabled_p ())
ebfd146a 1386 {
78c60e3d
SS
1387 dump_printf_loc (MSG_NOTE, vect_location,
1388 "created new init_stmt: ");
1389 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
ebfd146a 1390 }
418b7df3
RG
1391}
1392
1393/* Function vect_init_vector.
ebfd146a 1394
5467ee52
RG
1395 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1396 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1397 vector type a vector with all elements equal to VAL is created first.
1398 Place the initialization at BSI if it is not NULL. Otherwise, place the
1399 initialization at the loop preheader.
418b7df3 1400 Return the DEF of INIT_STMT.
32e8e429 1401 It will be used in the vectorization of STMT_INFO. */
418b7df3
RG
1402
1403tree
32e8e429
RS
1404vect_init_vector (stmt_vec_info stmt_info, tree val, tree type,
1405 gimple_stmt_iterator *gsi)
418b7df3 1406{
355fe088 1407 gimple *init_stmt;
418b7df3
RG
1408 tree new_temp;
1409
e412ece4
RB
1410 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1411 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
418b7df3 1412 {
e412ece4
RB
1413 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1414 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
418b7df3 1415 {
5a308cf1
IE
1416 /* Scalar boolean value should be transformed into
1417 all zeros or all ones value before building a vector. */
1418 if (VECTOR_BOOLEAN_TYPE_P (type))
1419 {
b3d51f23
IE
1420 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1421 tree false_val = build_zero_cst (TREE_TYPE (type));
5a308cf1
IE
1422
1423 if (CONSTANT_CLASS_P (val))
1424 val = integer_zerop (val) ? false_val : true_val;
1425 else
1426 {
1427 new_temp = make_ssa_name (TREE_TYPE (type));
1428 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1429 val, true_val, false_val);
a1824cfd 1430 vect_init_vector_1 (stmt_info, init_stmt, gsi);
5a308cf1
IE
1431 val = new_temp;
1432 }
1433 }
1434 else if (CONSTANT_CLASS_P (val))
42fd8198 1435 val = fold_convert (TREE_TYPE (type), val);
418b7df3
RG
1436 else
1437 {
b731b390 1438 new_temp = make_ssa_name (TREE_TYPE (type));
e412ece4
RB
1439 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1440 init_stmt = gimple_build_assign (new_temp,
1441 fold_build1 (VIEW_CONVERT_EXPR,
1442 TREE_TYPE (type),
1443 val));
1444 else
1445 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
a1824cfd 1446 vect_init_vector_1 (stmt_info, init_stmt, gsi);
5467ee52 1447 val = new_temp;
418b7df3
RG
1448 }
1449 }
5467ee52 1450 val = build_vector_from_val (type, val);
418b7df3
RG
1451 }
1452
0e22bb5a
RB
1453 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1454 init_stmt = gimple_build_assign (new_temp, val);
a1824cfd 1455 vect_init_vector_1 (stmt_info, init_stmt, gsi);
0e22bb5a 1456 return new_temp;
ebfd146a
IR
1457}
1458
c83a894c 1459/* Function vect_get_vec_def_for_operand_1.
a70d6342 1460
32e8e429
RS
1461 For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def
1462 with type DT that will be used in the vectorized stmt. */
ebfd146a
IR
1463
1464tree
32e8e429
RS
1465vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info,
1466 enum vect_def_type dt)
ebfd146a
IR
1467{
1468 tree vec_oprnd;
1eede195 1469 stmt_vec_info vec_stmt_info;
ebfd146a
IR
1470
1471 switch (dt)
1472 {
81c40241 1473 /* operand is a constant or a loop invariant. */
ebfd146a 1474 case vect_constant_def:
81c40241 1475 case vect_external_def:
c83a894c
AH
1476 /* Code should use vect_get_vec_def_for_operand. */
1477 gcc_unreachable ();
ebfd146a 1478
81c40241 1479 /* operand is defined inside the loop. */
8644a673 1480 case vect_internal_def:
ebfd146a 1481 {
ebfd146a 1482 /* Get the def from the vectorized stmt. */
1eede195
RS
1483 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1484 /* Get vectorized pattern statement. */
1485 if (!vec_stmt_info
1486 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1487 && !STMT_VINFO_RELEVANT (def_stmt_info))
1488 vec_stmt_info = (STMT_VINFO_VEC_STMT
1489 (STMT_VINFO_RELATED_STMT (def_stmt_info)));
1490 gcc_assert (vec_stmt_info);
1491 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1492 vec_oprnd = PHI_RESULT (phi);
ebfd146a 1493 else
1eede195
RS
1494 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1495 return vec_oprnd;
ebfd146a
IR
1496 }
1497
c78e3652 1498 /* operand is defined by a loop header phi. */
ebfd146a 1499 case vect_reduction_def:
06066f92 1500 case vect_double_reduction_def:
7c5222ff 1501 case vect_nested_cycle:
ebfd146a
IR
1502 case vect_induction_def:
1503 {
32e8e429 1504 gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI);
ebfd146a 1505
1eede195 1506 /* Get the def from the vectorized stmt. */
1eede195
RS
1507 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1508 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1509 vec_oprnd = PHI_RESULT (phi);
6dbbece6 1510 else
1eede195
RS
1511 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1512 return vec_oprnd;
ebfd146a
IR
1513 }
1514
1515 default:
1516 gcc_unreachable ();
1517 }
1518}
1519
1520
c83a894c
AH
1521/* Function vect_get_vec_def_for_operand.
1522
32e8e429
RS
1523 OP is an operand in STMT_VINFO. This function returns a (vector) def
1524 that will be used in the vectorized stmt for STMT_VINFO.
c83a894c
AH
1525
1526 In the case that OP is an SSA_NAME which is defined in the loop, then
1527 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1528
1529 In case OP is an invariant or constant, a new stmt that creates a vector def
1530 needs to be introduced. VECTYPE may be used to specify a required type for
1531 vector invariant. */
1532
1533tree
32e8e429 1534vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype)
c83a894c
AH
1535{
1536 gimple *def_stmt;
1537 enum vect_def_type dt;
1538 bool is_simple_use;
c83a894c
AH
1539 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1540
1541 if (dump_enabled_p ())
1542 {
1543 dump_printf_loc (MSG_NOTE, vect_location,
1544 "vect_get_vec_def_for_operand: ");
1545 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1546 dump_printf (MSG_NOTE, "\n");
1547 }
1548
fef96d8e
RS
1549 stmt_vec_info def_stmt_info;
1550 is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt,
1551 &def_stmt_info, &def_stmt);
c83a894c
AH
1552 gcc_assert (is_simple_use);
1553 if (def_stmt && dump_enabled_p ())
1554 {
1555 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1556 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1557 }
1558
1559 if (dt == vect_constant_def || dt == vect_external_def)
1560 {
1561 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1562 tree vector_type;
1563
1564 if (vectype)
1565 vector_type = vectype;
2568d8a1 1566 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
c83a894c
AH
1567 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1568 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1569 else
1570 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1571
1572 gcc_assert (vector_type);
86a91c0a 1573 return vect_init_vector (stmt_vinfo, op, vector_type, NULL);
c83a894c
AH
1574 }
1575 else
fef96d8e 1576 return vect_get_vec_def_for_operand_1 (def_stmt_info, dt);
c83a894c
AH
1577}
1578
1579
ebfd146a
IR
1580/* Function vect_get_vec_def_for_stmt_copy
1581
ff802fa1 1582 Return a vector-def for an operand. This function is used when the
b8698a0f
L
1583 vectorized stmt to be created (by the caller to this function) is a "copy"
1584 created in case the vectorized result cannot fit in one vector, and several
ff802fa1 1585 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1586 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
e4057a39 1587 of the stmt that defines VEC_OPRND. VINFO describes the vectorization.
ebfd146a
IR
1588
1589 Context:
1590 In case the vectorization factor (VF) is bigger than the number
1591 of elements that can fit in a vectype (nunits), we have to generate
ff802fa1 1592 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1593 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1594 smallest data-type determines the VF, and as a result, when vectorizing
1595 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1596 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1597 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1598 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1599 which VF=16 and nunits=4, so the number of copies required is 4):
1600
1601 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1602
ebfd146a
IR
1603 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1604 VS1.1: vx.1 = memref1 VS1.2
1605 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1606 VS1.3: vx.3 = memref3
ebfd146a
IR
1607
1608 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1609 VSnew.1: vz1 = vx.1 + ... VSnew.2
1610 VSnew.2: vz2 = vx.2 + ... VSnew.3
1611 VSnew.3: vz3 = vx.3 + ...
1612
1613 The vectorization of S1 is explained in vectorizable_load.
1614 The vectorization of S2:
b8698a0f
L
1615 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1616 the function 'vect_get_vec_def_for_operand' is called to
ff802fa1 1617 get the relevant vector-def for each operand of S2. For operand x it
ebfd146a
IR
1618 returns the vector-def 'vx.0'.
1619
b8698a0f
L
1620 To create the remaining copies of the vector-stmt (VSnew.j), this
1621 function is called to get the relevant vector-def for each operand. It is
1622 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1623 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1624
b8698a0f
L
1625 For example, to obtain the vector-def 'vx.1' in order to create the
1626 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1627 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1628 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1629 and return its def ('vx.1').
1630 Overall, to create the above sequence this function will be called 3 times:
e4057a39
RS
1631 vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0);
1632 vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1);
1633 vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */
ebfd146a
IR
1634
1635tree
e4057a39 1636vect_get_vec_def_for_stmt_copy (vec_info *vinfo, tree vec_oprnd)
ebfd146a 1637{
e4057a39
RS
1638 stmt_vec_info def_stmt_info = vinfo->lookup_def (vec_oprnd);
1639 if (!def_stmt_info)
1640 /* Do nothing; can reuse same def. */
ebfd146a
IR
1641 return vec_oprnd;
1642
e4057a39 1643 def_stmt_info = STMT_VINFO_RELATED_STMT (def_stmt_info);
ebfd146a 1644 gcc_assert (def_stmt_info);
e4057a39
RS
1645 if (gphi *phi = dyn_cast <gphi *> (def_stmt_info->stmt))
1646 vec_oprnd = PHI_RESULT (phi);
ebfd146a 1647 else
e4057a39 1648 vec_oprnd = gimple_get_lhs (def_stmt_info->stmt);
ebfd146a
IR
1649 return vec_oprnd;
1650}
1651
1652
1653/* Get vectorized definitions for the operands to create a copy of an original
ff802fa1 1654 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
ebfd146a 1655
c78e3652 1656void
e4057a39 1657vect_get_vec_defs_for_stmt_copy (vec_info *vinfo,
9771b263
DN
1658 vec<tree> *vec_oprnds0,
1659 vec<tree> *vec_oprnds1)
ebfd146a 1660{
9771b263 1661 tree vec_oprnd = vec_oprnds0->pop ();
ebfd146a 1662
e4057a39 1663 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
9771b263 1664 vec_oprnds0->quick_push (vec_oprnd);
ebfd146a 1665
9771b263 1666 if (vec_oprnds1 && vec_oprnds1->length ())
ebfd146a 1667 {
9771b263 1668 vec_oprnd = vec_oprnds1->pop ();
e4057a39 1669 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
9771b263 1670 vec_oprnds1->quick_push (vec_oprnd);
ebfd146a
IR
1671 }
1672}
1673
1674
c78e3652 1675/* Get vectorized definitions for OP0 and OP1. */
ebfd146a 1676
c78e3652 1677void
32e8e429 1678vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info,
9771b263
DN
1679 vec<tree> *vec_oprnds0,
1680 vec<tree> *vec_oprnds1,
306b0c92 1681 slp_tree slp_node)
ebfd146a
IR
1682{
1683 if (slp_node)
d092494c
IR
1684 {
1685 int nops = (op1 == NULL_TREE) ? 1 : 2;
ef062b13
TS
1686 auto_vec<tree> ops (nops);
1687 auto_vec<vec<tree> > vec_defs (nops);
d092494c 1688
9771b263 1689 ops.quick_push (op0);
d092494c 1690 if (op1)
9771b263 1691 ops.quick_push (op1);
d092494c 1692
306b0c92 1693 vect_get_slp_defs (ops, slp_node, &vec_defs);
d092494c 1694
37b5ec8f 1695 *vec_oprnds0 = vec_defs[0];
d092494c 1696 if (op1)
37b5ec8f 1697 *vec_oprnds1 = vec_defs[1];
d092494c 1698 }
ebfd146a
IR
1699 else
1700 {
1701 tree vec_oprnd;
1702
9771b263 1703 vec_oprnds0->create (1);
a1824cfd 1704 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info);
9771b263 1705 vec_oprnds0->quick_push (vec_oprnd);
ebfd146a
IR
1706
1707 if (op1)
1708 {
9771b263 1709 vec_oprnds1->create (1);
a1824cfd 1710 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info);
9771b263 1711 vec_oprnds1->quick_push (vec_oprnd);
ebfd146a
IR
1712 }
1713 }
1714}
1715
bb6c2b68
RS
1716/* Helper function called by vect_finish_replace_stmt and
1717 vect_finish_stmt_generation. Set the location of the new
e1bd7296 1718 statement and create and return a stmt_vec_info for it. */
bb6c2b68 1719
e1bd7296 1720static stmt_vec_info
32e8e429 1721vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt)
bb6c2b68 1722{
bb6c2b68
RS
1723 vec_info *vinfo = stmt_info->vinfo;
1724
e1bd7296 1725 stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
bb6c2b68
RS
1726
1727 if (dump_enabled_p ())
1728 {
1729 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1730 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1731 }
1732
86a91c0a 1733 gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
bb6c2b68
RS
1734
1735 /* While EH edges will generally prevent vectorization, stmt might
1736 e.g. be in a must-not-throw region. Ensure newly created stmts
1737 that could throw are part of the same region. */
86a91c0a 1738 int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt);
bb6c2b68
RS
1739 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1740 add_stmt_to_eh_lp (vec_stmt, lp_nr);
e1bd7296
RS
1741
1742 return vec_stmt_info;
bb6c2b68
RS
1743}
1744
32e8e429
RS
1745/* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
1746 which sets the same scalar result as STMT_INFO did. Create and return a
e1bd7296 1747 stmt_vec_info for VEC_STMT. */
bb6c2b68 1748
e1bd7296 1749stmt_vec_info
32e8e429 1750vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt)
bb6c2b68 1751{
a1824cfd 1752 gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt));
bb6c2b68 1753
a1824cfd 1754 gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt);
bb6c2b68
RS
1755 gsi_replace (&gsi, vec_stmt, false);
1756
a1824cfd 1757 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
bb6c2b68 1758}
ebfd146a 1759
32e8e429 1760/* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
e1bd7296 1761 before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
ebfd146a 1762
e1bd7296 1763stmt_vec_info
32e8e429 1764vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt,
ebfd146a
IR
1765 gimple_stmt_iterator *gsi)
1766{
a1824cfd 1767 gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
ebfd146a 1768
54e8e2c3
RG
1769 if (!gsi_end_p (*gsi)
1770 && gimple_has_mem_ops (vec_stmt))
1771 {
355fe088 1772 gimple *at_stmt = gsi_stmt (*gsi);
54e8e2c3
RG
1773 tree vuse = gimple_vuse (at_stmt);
1774 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1775 {
1776 tree vdef = gimple_vdef (at_stmt);
1777 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1778 /* If we have an SSA vuse and insert a store, update virtual
1779 SSA form to avoid triggering the renamer. Do so only
1780 if we can easily see all uses - which is what almost always
1781 happens with the way vectorized stmts are inserted. */
1782 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1783 && ((is_gimple_assign (vec_stmt)
1784 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1785 || (is_gimple_call (vec_stmt)
1786 && !(gimple_call_flags (vec_stmt)
1787 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1788 {
1789 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1790 gimple_set_vdef (vec_stmt, new_vdef);
1791 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1792 }
1793 }
1794 }
ebfd146a 1795 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
a1824cfd 1796 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
ebfd146a
IR
1797}
1798
70439f0d
RS
1799/* We want to vectorize a call to combined function CFN with function
1800 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1801 as the types of all inputs. Check whether this is possible using
1802 an internal function, returning its code if so or IFN_LAST if not. */
ebfd146a 1803
70439f0d
RS
1804static internal_fn
1805vectorizable_internal_function (combined_fn cfn, tree fndecl,
1806 tree vectype_out, tree vectype_in)
ebfd146a 1807{
70439f0d
RS
1808 internal_fn ifn;
1809 if (internal_fn_p (cfn))
1810 ifn = as_internal_fn (cfn);
1811 else
1812 ifn = associated_internal_fn (fndecl);
1813 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1814 {
1815 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1816 if (info.vectorizable)
1817 {
1818 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1819 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
d95ab70a
RS
1820 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1821 OPTIMIZE_FOR_SPEED))
70439f0d
RS
1822 return ifn;
1823 }
1824 }
1825 return IFN_LAST;
ebfd146a
IR
1826}
1827
5ce9450f 1828
82570274 1829static tree permute_vec_elements (tree, tree, tree, stmt_vec_info,
5ce9450f
JJ
1830 gimple_stmt_iterator *);
1831
7cfb4d93
RS
1832/* Check whether a load or store statement in the loop described by
1833 LOOP_VINFO is possible in a fully-masked loop. This is testing
1834 whether the vectorizer pass has the appropriate support, as well as
1835 whether the target does.
1836
1837 VLS_TYPE says whether the statement is a load or store and VECTYPE
1838 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1839 says how the load or store is going to be implemented and GROUP_SIZE
1840 is the number of load or store statements in the containing group.
bfaa08b7
RS
1841 If the access is a gather load or scatter store, GS_INFO describes
1842 its arguments.
7cfb4d93
RS
1843
1844 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1845 supported, otherwise record the required mask types. */
1846
1847static void
1848check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1849 vec_load_store_type vls_type, int group_size,
bfaa08b7
RS
1850 vect_memory_access_type memory_access_type,
1851 gather_scatter_info *gs_info)
7cfb4d93
RS
1852{
1853 /* Invariant loads need no special support. */
1854 if (memory_access_type == VMAT_INVARIANT)
1855 return;
1856
1857 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1858 machine_mode vecmode = TYPE_MODE (vectype);
1859 bool is_load = (vls_type == VLS_LOAD);
1860 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1861 {
1862 if (is_load
1863 ? !vect_load_lanes_supported (vectype, group_size, true)
1864 : !vect_store_lanes_supported (vectype, group_size, true))
1865 {
1866 if (dump_enabled_p ())
1867 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1868 "can't use a fully-masked loop because the"
1869 " target doesn't have an appropriate masked"
1870 " load/store-lanes instruction.\n");
1871 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1872 return;
1873 }
1874 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1875 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1876 return;
1877 }
1878
bfaa08b7
RS
1879 if (memory_access_type == VMAT_GATHER_SCATTER)
1880 {
f307441a
RS
1881 internal_fn ifn = (is_load
1882 ? IFN_MASK_GATHER_LOAD
1883 : IFN_MASK_SCATTER_STORE);
bfaa08b7 1884 tree offset_type = TREE_TYPE (gs_info->offset);
f307441a 1885 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
bfaa08b7
RS
1886 gs_info->memory_type,
1887 TYPE_SIGN (offset_type),
1888 gs_info->scale))
1889 {
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1892 "can't use a fully-masked loop because the"
1893 " target doesn't have an appropriate masked"
f307441a 1894 " gather load or scatter store instruction.\n");
bfaa08b7
RS
1895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1896 return;
1897 }
1898 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1899 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1900 return;
1901 }
1902
7cfb4d93
RS
1903 if (memory_access_type != VMAT_CONTIGUOUS
1904 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1905 {
1906 /* Element X of the data must come from iteration i * VF + X of the
1907 scalar loop. We need more work to support other mappings. */
1908 if (dump_enabled_p ())
1909 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1910 "can't use a fully-masked loop because an access"
1911 " isn't contiguous.\n");
1912 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1913 return;
1914 }
1915
1916 machine_mode mask_mode;
1917 if (!(targetm.vectorize.get_mask_mode
1918 (GET_MODE_NUNITS (vecmode),
1919 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1920 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1921 {
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1924 "can't use a fully-masked loop because the target"
1925 " doesn't have the appropriate masked load or"
1926 " store.\n");
1927 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1928 return;
1929 }
1930 /* We might load more scalars than we need for permuting SLP loads.
1931 We checked in get_group_load_store_type that the extra elements
1932 don't leak into a new vector. */
1933 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1934 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1935 unsigned int nvectors;
1936 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1937 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1938 else
1939 gcc_unreachable ();
1940}
1941
1942/* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1943 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1944 that needs to be applied to all loads and stores in a vectorized loop.
1945 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1946
1947 MASK_TYPE is the type of both masks. If new statements are needed,
1948 insert them before GSI. */
1949
1950static tree
1951prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1952 gimple_stmt_iterator *gsi)
1953{
1954 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1955 if (!loop_mask)
1956 return vec_mask;
1957
1958 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1959 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1960 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1961 vec_mask, loop_mask);
1962 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1963 return and_res;
1964}
1965
429ef523 1966/* Determine whether we can use a gather load or scatter store to vectorize
32e8e429
RS
1967 strided load or store STMT_INFO by truncating the current offset to a
1968 smaller width. We need to be able to construct an offset vector:
429ef523
RS
1969
1970 { 0, X, X*2, X*3, ... }
1971
32e8e429 1972 without loss of precision, where X is STMT_INFO's DR_STEP.
429ef523
RS
1973
1974 Return true if this is possible, describing the gather load or scatter
1975 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1976
1977static bool
32e8e429
RS
1978vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
1979 loop_vec_info loop_vinfo, bool masked_p,
429ef523
RS
1980 gather_scatter_info *gs_info)
1981{
89fa689a
RS
1982 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1983 data_reference *dr = dr_info->dr;
429ef523
RS
1984 tree step = DR_STEP (dr);
1985 if (TREE_CODE (step) != INTEGER_CST)
1986 {
1987 /* ??? Perhaps we could use range information here? */
1988 if (dump_enabled_p ())
1989 dump_printf_loc (MSG_NOTE, vect_location,
1990 "cannot truncate variable step.\n");
1991 return false;
1992 }
1993
1994 /* Get the number of bits in an element. */
1995 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1996 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1997 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1998
1999 /* Set COUNT to the upper limit on the number of elements - 1.
2000 Start with the maximum vectorization factor. */
2001 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
2002
2003 /* Try lowering COUNT to the number of scalar latch iterations. */
2004 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2005 widest_int max_iters;
2006 if (max_loop_iterations (loop, &max_iters)
2007 && max_iters < count)
2008 count = max_iters.to_shwi ();
2009
2010 /* Try scales of 1 and the element size. */
89fa689a 2011 int scales[] = { 1, vect_get_scalar_dr_size (dr_info) };
4a669ac3 2012 wi::overflow_type overflow = wi::OVF_NONE;
429ef523
RS
2013 for (int i = 0; i < 2; ++i)
2014 {
2015 int scale = scales[i];
2016 widest_int factor;
2017 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
2018 continue;
2019
2020 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
2021 in OFFSET_BITS bits. */
4a669ac3
AH
2022 widest_int range = wi::mul (count, factor, SIGNED, &overflow);
2023 if (overflow)
429ef523
RS
2024 continue;
2025 signop sign = range >= 0 ? UNSIGNED : SIGNED;
2026 if (wi::min_precision (range, sign) > element_bits)
2027 {
4a669ac3 2028 overflow = wi::OVF_UNKNOWN;
429ef523
RS
2029 continue;
2030 }
2031
2032 /* See whether the target supports the operation. */
2033 tree memory_type = TREE_TYPE (DR_REF (dr));
2034 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
2035 memory_type, element_bits, sign, scale,
2036 &gs_info->ifn, &gs_info->element_type))
2037 continue;
2038
2039 tree offset_type = build_nonstandard_integer_type (element_bits,
2040 sign == UNSIGNED);
2041
2042 gs_info->decl = NULL_TREE;
2043 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
2044 but we don't need to store that here. */
2045 gs_info->base = NULL_TREE;
2046 gs_info->offset = fold_convert (offset_type, step);
929b4411 2047 gs_info->offset_dt = vect_constant_def;
429ef523
RS
2048 gs_info->offset_vectype = NULL_TREE;
2049 gs_info->scale = scale;
2050 gs_info->memory_type = memory_type;
2051 return true;
2052 }
2053
4a669ac3 2054 if (overflow && dump_enabled_p ())
429ef523
RS
2055 dump_printf_loc (MSG_NOTE, vect_location,
2056 "truncating gather/scatter offset to %d bits"
2057 " might change its value.\n", element_bits);
2058
2059 return false;
2060}
2061
ab2fc782 2062/* Return true if we can use gather/scatter internal functions to
82570274 2063 vectorize STMT_INFO, which is a grouped or strided load or store.
429ef523
RS
2064 MASKED_P is true if load or store is conditional. When returning
2065 true, fill in GS_INFO with the information required to perform the
2066 operation. */
ab2fc782
RS
2067
2068static bool
82570274
RS
2069vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info,
2070 loop_vec_info loop_vinfo, bool masked_p,
ab2fc782
RS
2071 gather_scatter_info *gs_info)
2072{
82570274 2073 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)
ab2fc782 2074 || gs_info->decl)
82570274 2075 return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo,
429ef523 2076 masked_p, gs_info);
ab2fc782
RS
2077
2078 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
2079 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
2080 tree offset_type = TREE_TYPE (gs_info->offset);
2081 unsigned int offset_bits = TYPE_PRECISION (offset_type);
2082
2083 /* Enforced by vect_check_gather_scatter. */
2084 gcc_assert (element_bits >= offset_bits);
2085
2086 /* If the elements are wider than the offset, convert the offset to the
2087 same width, without changing its sign. */
2088 if (element_bits > offset_bits)
2089 {
2090 bool unsigned_p = TYPE_UNSIGNED (offset_type);
2091 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
2092 gs_info->offset = fold_convert (offset_type, gs_info->offset);
2093 }
2094
2095 if (dump_enabled_p ())
2096 dump_printf_loc (MSG_NOTE, vect_location,
2097 "using gather/scatter for strided/grouped access,"
2098 " scale = %d\n", gs_info->scale);
2099
2100 return true;
2101}
2102
32e8e429 2103/* STMT_INFO is a non-strided load or store, meaning that it accesses
62da9e14
RS
2104 elements with a known constant step. Return -1 if that step
2105 is negative, 0 if it is zero, and 1 if it is greater than zero. */
2106
2107static int
32e8e429 2108compare_step_with_zero (stmt_vec_info stmt_info)
62da9e14 2109{
89fa689a
RS
2110 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2111 return tree_int_cst_compare (vect_dr_behavior (dr_info)->step,
3f5e8a76 2112 size_zero_node);
62da9e14
RS
2113}
2114
2115/* If the target supports a permute mask that reverses the elements in
2116 a vector of type VECTYPE, return that mask, otherwise return null. */
2117
2118static tree
2119perm_mask_for_reverse (tree vectype)
2120{
928686b1 2121 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
62da9e14 2122
d980067b
RS
2123 /* The encoding has a single stepped pattern. */
2124 vec_perm_builder sel (nunits, 1, 3);
928686b1 2125 for (int i = 0; i < 3; ++i)
908a1a16 2126 sel.quick_push (nunits - 1 - i);
62da9e14 2127
e3342de4
RS
2128 vec_perm_indices indices (sel, 1, nunits);
2129 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
62da9e14 2130 return NULL_TREE;
e3342de4 2131 return vect_gen_perm_mask_checked (vectype, indices);
62da9e14 2132}
5ce9450f 2133
32e8e429 2134/* STMT_INFO is either a masked or unconditional store. Return the value
c3a8f964
RS
2135 being stored. */
2136
f307441a 2137tree
32e8e429 2138vect_get_store_rhs (stmt_vec_info stmt_info)
c3a8f964 2139{
32e8e429 2140 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
c3a8f964
RS
2141 {
2142 gcc_assert (gimple_assign_single_p (assign));
2143 return gimple_assign_rhs1 (assign);
2144 }
32e8e429 2145 if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
c3a8f964
RS
2146 {
2147 internal_fn ifn = gimple_call_internal_fn (call);
f307441a
RS
2148 int index = internal_fn_stored_value_index (ifn);
2149 gcc_assert (index >= 0);
32e8e429 2150 return gimple_call_arg (call, index);
c3a8f964
RS
2151 }
2152 gcc_unreachable ();
2153}
2154
2de001ee 2155/* A subroutine of get_load_store_type, with a subset of the same
32e8e429 2156 arguments. Handle the case where STMT_INFO is part of a grouped load
2de001ee
RS
2157 or store.
2158
2159 For stores, the statements in the group are all consecutive
2160 and there is no gap at the end. For loads, the statements in the
2161 group might not be consecutive; there can be gaps between statements
2162 as well as at the end. */
2163
2164static bool
32e8e429 2165get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
7e11fc7f 2166 bool masked_p, vec_load_store_type vls_type,
429ef523
RS
2167 vect_memory_access_type *memory_access_type,
2168 gather_scatter_info *gs_info)
2de001ee 2169{
2de001ee
RS
2170 vec_info *vinfo = stmt_info->vinfo;
2171 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2172 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
bffb8014 2173 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
89fa689a 2174 dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
bffb8014
RS
2175 unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
2176 bool single_element_p = (stmt_info == first_stmt_info
2c53b149 2177 && !DR_GROUP_NEXT_ELEMENT (stmt_info));
bffb8014 2178 unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
928686b1 2179 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2de001ee
RS
2180
2181 /* True if the vectorized statements would access beyond the last
2182 statement in the group. */
2183 bool overrun_p = false;
2184
2185 /* True if we can cope with such overrun by peeling for gaps, so that
2186 there is at least one final scalar iteration after the vector loop. */
7e11fc7f
RS
2187 bool can_overrun_p = (!masked_p
2188 && vls_type == VLS_LOAD
2189 && loop_vinfo
2190 && !loop->inner);
2de001ee
RS
2191
2192 /* There can only be a gap at the end of the group if the stride is
2193 known at compile time. */
2194 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
2195
2196 /* Stores can't yet have gaps. */
2197 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2198
2199 if (slp)
2200 {
2201 if (STMT_VINFO_STRIDED_P (stmt_info))
2202 {
2c53b149 2203 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2de001ee
RS
2204 separated by the stride, until we have a complete vector.
2205 Fall back to scalar accesses if that isn't possible. */
928686b1 2206 if (multiple_p (nunits, group_size))
2de001ee
RS
2207 *memory_access_type = VMAT_STRIDED_SLP;
2208 else
2209 *memory_access_type = VMAT_ELEMENTWISE;
2210 }
2211 else
2212 {
2213 overrun_p = loop_vinfo && gap != 0;
2214 if (overrun_p && vls_type != VLS_LOAD)
2215 {
2216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2217 "Grouped store with gaps requires"
2218 " non-consecutive accesses\n");
2219 return false;
2220 }
f702e7d4
RS
2221 /* An overrun is fine if the trailing elements are smaller
2222 than the alignment boundary B. Every vector access will
2223 be a multiple of B and so we are guaranteed to access a
2224 non-gap element in the same B-sized block. */
f9ef2c76 2225 if (overrun_p
89fa689a
RS
2226 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2227 / vect_get_scalar_dr_size (first_dr_info)))
f9ef2c76 2228 overrun_p = false;
2de001ee
RS
2229 if (overrun_p && !can_overrun_p)
2230 {
2231 if (dump_enabled_p ())
2232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2233 "Peeling for outer loop is not supported\n");
2234 return false;
2235 }
2236 *memory_access_type = VMAT_CONTIGUOUS;
2237 }
2238 }
2239 else
2240 {
2241 /* We can always handle this case using elementwise accesses,
2242 but see if something more efficient is available. */
2243 *memory_access_type = VMAT_ELEMENTWISE;
2244
2245 /* If there is a gap at the end of the group then these optimizations
2246 would access excess elements in the last iteration. */
2247 bool would_overrun_p = (gap != 0);
f702e7d4
RS
2248 /* An overrun is fine if the trailing elements are smaller than the
2249 alignment boundary B. Every vector access will be a multiple of B
2250 and so we are guaranteed to access a non-gap element in the
2251 same B-sized block. */
f9ef2c76 2252 if (would_overrun_p
7e11fc7f 2253 && !masked_p
89fa689a
RS
2254 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2255 / vect_get_scalar_dr_size (first_dr_info)))
f9ef2c76 2256 would_overrun_p = false;
f702e7d4 2257
2de001ee 2258 if (!STMT_VINFO_STRIDED_P (stmt_info)
62da9e14 2259 && (can_overrun_p || !would_overrun_p)
86a91c0a 2260 && compare_step_with_zero (stmt_info) > 0)
2de001ee 2261 {
6737facb
RS
2262 /* First cope with the degenerate case of a single-element
2263 vector. */
2264 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2265 *memory_access_type = VMAT_CONTIGUOUS;
2266
2267 /* Otherwise try using LOAD/STORE_LANES. */
2268 if (*memory_access_type == VMAT_ELEMENTWISE
2269 && (vls_type == VLS_LOAD
7e11fc7f
RS
2270 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2271 : vect_store_lanes_supported (vectype, group_size,
2272 masked_p)))
2de001ee
RS
2273 {
2274 *memory_access_type = VMAT_LOAD_STORE_LANES;
2275 overrun_p = would_overrun_p;
2276 }
2277
2278 /* If that fails, try using permuting loads. */
2279 if (*memory_access_type == VMAT_ELEMENTWISE
2280 && (vls_type == VLS_LOAD
2281 ? vect_grouped_load_supported (vectype, single_element_p,
2282 group_size)
2283 : vect_grouped_store_supported (vectype, group_size)))
2284 {
2285 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2286 overrun_p = would_overrun_p;
2287 }
2288 }
429ef523
RS
2289
2290 /* As a last resort, trying using a gather load or scatter store.
2291
2292 ??? Although the code can handle all group sizes correctly,
2293 it probably isn't a win to use separate strided accesses based
2294 on nearby locations. Or, even if it's a win over scalar code,
2295 it might not be a win over vectorizing at a lower VF, if that
2296 allows us to use contiguous accesses. */
2297 if (*memory_access_type == VMAT_ELEMENTWISE
2298 && single_element_p
2299 && loop_vinfo
86a91c0a 2300 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
429ef523
RS
2301 masked_p, gs_info))
2302 *memory_access_type = VMAT_GATHER_SCATTER;
2de001ee
RS
2303 }
2304
bffb8014 2305 if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
2de001ee
RS
2306 {
2307 /* STMT is the leader of the group. Check the operands of all the
2308 stmts of the group. */
bffb8014
RS
2309 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
2310 while (next_stmt_info)
2de001ee 2311 {
bffb8014 2312 tree op = vect_get_store_rhs (next_stmt_info);
2de001ee 2313 enum vect_def_type dt;
894dd753 2314 if (!vect_is_simple_use (op, vinfo, &dt))
2de001ee
RS
2315 {
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "use not simple.\n");
2319 return false;
2320 }
bffb8014 2321 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
2de001ee
RS
2322 }
2323 }
2324
2325 if (overrun_p)
2326 {
2327 gcc_assert (can_overrun_p);
2328 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2330 "Data access with gaps requires scalar "
2331 "epilogue loop\n");
2332 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2333 }
2334
2335 return true;
2336}
2337
62da9e14 2338/* A subroutine of get_load_store_type, with a subset of the same
32e8e429 2339 arguments. Handle the case where STMT_INFO is a load or store that
62da9e14
RS
2340 accesses consecutive elements with a negative step. */
2341
2342static vect_memory_access_type
32e8e429 2343get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype,
62da9e14
RS
2344 vec_load_store_type vls_type,
2345 unsigned int ncopies)
2346{
89fa689a 2347 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
62da9e14
RS
2348 dr_alignment_support alignment_support_scheme;
2349
2350 if (ncopies > 1)
2351 {
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2354 "multiple types with negative step.\n");
2355 return VMAT_ELEMENTWISE;
2356 }
2357
89fa689a 2358 alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false);
62da9e14
RS
2359 if (alignment_support_scheme != dr_aligned
2360 && alignment_support_scheme != dr_unaligned_supported)
2361 {
2362 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2364 "negative step but alignment required.\n");
2365 return VMAT_ELEMENTWISE;
2366 }
2367
2368 if (vls_type == VLS_STORE_INVARIANT)
2369 {
2370 if (dump_enabled_p ())
2371 dump_printf_loc (MSG_NOTE, vect_location,
2372 "negative step with invariant source;"
2373 " no permute needed.\n");
2374 return VMAT_CONTIGUOUS_DOWN;
2375 }
2376
2377 if (!perm_mask_for_reverse (vectype))
2378 {
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2381 "negative step and reversing not supported.\n");
2382 return VMAT_ELEMENTWISE;
2383 }
2384
2385 return VMAT_CONTIGUOUS_REVERSE;
2386}
2387
32e8e429 2388/* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
2de001ee
RS
2389 if there is a memory access type that the vectorized form can use,
2390 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2391 or scatters, fill in GS_INFO accordingly.
2392
2393 SLP says whether we're performing SLP rather than loop vectorization.
7e11fc7f 2394 MASKED_P is true if the statement is conditional on a vectorized mask.
62da9e14
RS
2395 VECTYPE is the vector type that the vectorized statements will use.
2396 NCOPIES is the number of vector statements that will be needed. */
2de001ee
RS
2397
2398static bool
32e8e429
RS
2399get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2400 bool masked_p, vec_load_store_type vls_type,
2401 unsigned int ncopies,
2de001ee
RS
2402 vect_memory_access_type *memory_access_type,
2403 gather_scatter_info *gs_info)
2404{
2de001ee
RS
2405 vec_info *vinfo = stmt_info->vinfo;
2406 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4d694b27 2407 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2de001ee
RS
2408 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2409 {
2410 *memory_access_type = VMAT_GATHER_SCATTER;
86a91c0a 2411 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info))
2de001ee 2412 gcc_unreachable ();
894dd753 2413 else if (!vect_is_simple_use (gs_info->offset, vinfo,
2de001ee
RS
2414 &gs_info->offset_dt,
2415 &gs_info->offset_vectype))
2416 {
2417 if (dump_enabled_p ())
2418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2419 "%s index use not simple.\n",
2420 vls_type == VLS_LOAD ? "gather" : "scatter");
2421 return false;
2422 }
2423 }
2424 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2425 {
86a91c0a
RS
2426 if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p,
2427 vls_type, memory_access_type, gs_info))
2de001ee
RS
2428 return false;
2429 }
2430 else if (STMT_VINFO_STRIDED_P (stmt_info))
2431 {
2432 gcc_assert (!slp);
ab2fc782 2433 if (loop_vinfo
86a91c0a 2434 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
429ef523 2435 masked_p, gs_info))
ab2fc782
RS
2436 *memory_access_type = VMAT_GATHER_SCATTER;
2437 else
2438 *memory_access_type = VMAT_ELEMENTWISE;
2de001ee
RS
2439 }
2440 else
62da9e14 2441 {
86a91c0a 2442 int cmp = compare_step_with_zero (stmt_info);
62da9e14
RS
2443 if (cmp < 0)
2444 *memory_access_type = get_negative_load_store_type
86a91c0a 2445 (stmt_info, vectype, vls_type, ncopies);
62da9e14
RS
2446 else if (cmp == 0)
2447 {
2448 gcc_assert (vls_type == VLS_LOAD);
2449 *memory_access_type = VMAT_INVARIANT;
2450 }
2451 else
2452 *memory_access_type = VMAT_CONTIGUOUS;
2453 }
2de001ee 2454
4d694b27
RS
2455 if ((*memory_access_type == VMAT_ELEMENTWISE
2456 || *memory_access_type == VMAT_STRIDED_SLP)
2457 && !nunits.is_constant ())
2458 {
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2461 "Not using elementwise accesses due to variable "
2462 "vectorization factor.\n");
2463 return false;
2464 }
2465
2de001ee
RS
2466 /* FIXME: At the moment the cost model seems to underestimate the
2467 cost of using elementwise accesses. This check preserves the
2468 traditional behavior until that can be fixed. */
2469 if (*memory_access_type == VMAT_ELEMENTWISE
4aa157e8 2470 && !STMT_VINFO_STRIDED_P (stmt_info)
bffb8014 2471 && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
2c53b149
RB
2472 && !DR_GROUP_NEXT_ELEMENT (stmt_info)
2473 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
2de001ee
RS
2474 {
2475 if (dump_enabled_p ())
2476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2477 "not falling back to elementwise accesses\n");
2478 return false;
2479 }
2480 return true;
2481}
2482
aaeefd88 2483/* Return true if boolean argument MASK is suitable for vectorizing
32e8e429 2484 conditional load or store STMT_INFO. When returning true, store the type
929b4411
RS
2485 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2486 in *MASK_VECTYPE_OUT. */
aaeefd88
RS
2487
2488static bool
32e8e429 2489vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask,
929b4411
RS
2490 vect_def_type *mask_dt_out,
2491 tree *mask_vectype_out)
aaeefd88
RS
2492{
2493 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2494 {
2495 if (dump_enabled_p ())
2496 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2497 "mask argument is not a boolean.\n");
2498 return false;
2499 }
2500
2501 if (TREE_CODE (mask) != SSA_NAME)
2502 {
2503 if (dump_enabled_p ())
2504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2505 "mask argument is not an SSA name.\n");
2506 return false;
2507 }
2508
929b4411 2509 enum vect_def_type mask_dt;
aaeefd88 2510 tree mask_vectype;
894dd753 2511 if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype))
aaeefd88
RS
2512 {
2513 if (dump_enabled_p ())
2514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2515 "mask use not simple.\n");
2516 return false;
2517 }
2518
2519 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2520 if (!mask_vectype)
2521 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2522
2523 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2524 {
2525 if (dump_enabled_p ())
2526 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2527 "could not find an appropriate vector mask type.\n");
2528 return false;
2529 }
2530
2531 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2532 TYPE_VECTOR_SUBPARTS (vectype)))
2533 {
2534 if (dump_enabled_p ())
2535 {
2536 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2537 "vector mask type ");
2538 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
2539 dump_printf (MSG_MISSED_OPTIMIZATION,
2540 " does not match vector data type ");
2541 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
2542 dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
2543 }
2544 return false;
2545 }
2546
929b4411 2547 *mask_dt_out = mask_dt;
aaeefd88
RS
2548 *mask_vectype_out = mask_vectype;
2549 return true;
2550}
2551
3133c3b6 2552/* Return true if stored value RHS is suitable for vectorizing store
32e8e429 2553 statement STMT_INFO. When returning true, store the type of the
929b4411
RS
2554 definition in *RHS_DT_OUT, the type of the vectorized store value in
2555 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
3133c3b6
RS
2556
2557static bool
32e8e429
RS
2558vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs,
2559 vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
2560 vec_load_store_type *vls_type_out)
3133c3b6
RS
2561{
2562 /* In the case this is a store from a constant make sure
2563 native_encode_expr can handle it. */
2564 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2565 {
2566 if (dump_enabled_p ())
2567 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2568 "cannot encode constant as a byte sequence.\n");
2569 return false;
2570 }
2571
929b4411 2572 enum vect_def_type rhs_dt;
3133c3b6 2573 tree rhs_vectype;
894dd753 2574 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype))
3133c3b6
RS
2575 {
2576 if (dump_enabled_p ())
2577 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2578 "use not simple.\n");
2579 return false;
2580 }
2581
2582 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2583 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2584 {
2585 if (dump_enabled_p ())
2586 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2587 "incompatible vector types.\n");
2588 return false;
2589 }
2590
929b4411 2591 *rhs_dt_out = rhs_dt;
3133c3b6 2592 *rhs_vectype_out = rhs_vectype;
929b4411 2593 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
3133c3b6
RS
2594 *vls_type_out = VLS_STORE_INVARIANT;
2595 else
2596 *vls_type_out = VLS_STORE;
2597 return true;
2598}
2599
82570274 2600/* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO.
bc9587eb
RS
2601 Note that we support masks with floating-point type, in which case the
2602 floats are interpreted as a bitmask. */
2603
2604static tree
82570274 2605vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype)
bc9587eb
RS
2606{
2607 if (TREE_CODE (masktype) == INTEGER_TYPE)
2608 return build_int_cst (masktype, -1);
2609 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2610 {
2611 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2612 mask = build_vector_from_val (masktype, mask);
82570274 2613 return vect_init_vector (stmt_info, mask, masktype, NULL);
bc9587eb
RS
2614 }
2615 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2616 {
2617 REAL_VALUE_TYPE r;
2618 long tmp[6];
2619 for (int j = 0; j < 6; ++j)
2620 tmp[j] = -1;
2621 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2622 tree mask = build_real (TREE_TYPE (masktype), r);
2623 mask = build_vector_from_val (masktype, mask);
82570274 2624 return vect_init_vector (stmt_info, mask, masktype, NULL);
bc9587eb
RS
2625 }
2626 gcc_unreachable ();
2627}
2628
2629/* Build an all-zero merge value of type VECTYPE while vectorizing
82570274 2630 STMT_INFO as a gather load. */
bc9587eb
RS
2631
2632static tree
82570274 2633vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype)
bc9587eb
RS
2634{
2635 tree merge;
2636 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2637 merge = build_int_cst (TREE_TYPE (vectype), 0);
2638 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2639 {
2640 REAL_VALUE_TYPE r;
2641 long tmp[6];
2642 for (int j = 0; j < 6; ++j)
2643 tmp[j] = 0;
2644 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2645 merge = build_real (TREE_TYPE (vectype), r);
2646 }
2647 else
2648 gcc_unreachable ();
2649 merge = build_vector_from_val (vectype, merge);
82570274 2650 return vect_init_vector (stmt_info, merge, vectype, NULL);
bc9587eb
RS
2651}
2652
32e8e429
RS
2653/* Build a gather load call while vectorizing STMT_INFO. Insert new
2654 instructions before GSI and add them to VEC_STMT. GS_INFO describes
2655 the gather load operation. If the load is conditional, MASK is the
2656 unvectorized condition and MASK_DT is its definition type, otherwise
2657 MASK is null. */
c48d2d35
RS
2658
2659static void
32e8e429
RS
2660vect_build_gather_load_calls (stmt_vec_info stmt_info,
2661 gimple_stmt_iterator *gsi,
1eede195 2662 stmt_vec_info *vec_stmt,
32e8e429 2663 gather_scatter_info *gs_info,
e4057a39 2664 tree mask)
c48d2d35 2665{
c48d2d35
RS
2666 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2667 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2668 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2669 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2670 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2671 edge pe = loop_preheader_edge (loop);
2672 enum { NARROW, NONE, WIDEN } modifier;
2673 poly_uint64 gather_off_nunits
2674 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2675
2676 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2677 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2678 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2679 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2680 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2681 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2682 tree scaletype = TREE_VALUE (arglist);
2683 gcc_checking_assert (types_compatible_p (srctype, rettype)
2684 && (!mask || types_compatible_p (srctype, masktype)));
2685
2686 tree perm_mask = NULL_TREE;
2687 tree mask_perm_mask = NULL_TREE;
2688 if (known_eq (nunits, gather_off_nunits))
2689 modifier = NONE;
2690 else if (known_eq (nunits * 2, gather_off_nunits))
2691 {
2692 modifier = WIDEN;
2693
2694 /* Currently widening gathers and scatters are only supported for
2695 fixed-length vectors. */
2696 int count = gather_off_nunits.to_constant ();
2697 vec_perm_builder sel (count, count, 1);
2698 for (int i = 0; i < count; ++i)
2699 sel.quick_push (i | (count / 2));
2700
2701 vec_perm_indices indices (sel, 1, count);
2702 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2703 indices);
2704 }
2705 else if (known_eq (nunits, gather_off_nunits * 2))
2706 {
2707 modifier = NARROW;
2708
2709 /* Currently narrowing gathers and scatters are only supported for
2710 fixed-length vectors. */
2711 int count = nunits.to_constant ();
2712 vec_perm_builder sel (count, count, 1);
2713 sel.quick_grow (count);
2714 for (int i = 0; i < count; ++i)
2715 sel[i] = i < count / 2 ? i : i + count / 2;
2716 vec_perm_indices indices (sel, 2, count);
2717 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2718
2719 ncopies *= 2;
2720
2721 if (mask)
2722 {
2723 for (int i = 0; i < count; ++i)
2724 sel[i] = i | (count / 2);
2725 indices.new_vector (sel, 2, count);
2726 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2727 }
2728 }
2729 else
2730 gcc_unreachable ();
2731
86a91c0a
RS
2732 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
2733 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
c48d2d35
RS
2734
2735 tree ptr = fold_convert (ptrtype, gs_info->base);
2736 if (!is_gimple_min_invariant (ptr))
2737 {
2738 gimple_seq seq;
2739 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2740 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2741 gcc_assert (!new_bb);
2742 }
2743
2744 tree scale = build_int_cst (scaletype, gs_info->scale);
2745
2746 tree vec_oprnd0 = NULL_TREE;
2747 tree vec_mask = NULL_TREE;
2748 tree src_op = NULL_TREE;
2749 tree mask_op = NULL_TREE;
2750 tree prev_res = NULL_TREE;
2751 stmt_vec_info prev_stmt_info = NULL;
2752
2753 if (!mask)
2754 {
86a91c0a
RS
2755 src_op = vect_build_zero_merge_argument (stmt_info, rettype);
2756 mask_op = vect_build_all_ones_mask (stmt_info, masktype);
c48d2d35
RS
2757 }
2758
2759 for (int j = 0; j < ncopies; ++j)
2760 {
2761 tree op, var;
c48d2d35
RS
2762 if (modifier == WIDEN && (j & 1))
2763 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
86a91c0a 2764 perm_mask, stmt_info, gsi);
c48d2d35
RS
2765 else if (j == 0)
2766 op = vec_oprnd0
86a91c0a 2767 = vect_get_vec_def_for_operand (gs_info->offset, stmt_info);
c48d2d35 2768 else
e4057a39
RS
2769 op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2770 vec_oprnd0);
c48d2d35
RS
2771
2772 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2773 {
2774 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2775 TYPE_VECTOR_SUBPARTS (idxtype)));
2776 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2777 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
e1bd7296 2778 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
86a91c0a 2779 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
c48d2d35
RS
2780 op = var;
2781 }
2782
2783 if (mask)
2784 {
2785 if (mask_perm_mask && (j & 1))
2786 mask_op = permute_vec_elements (mask_op, mask_op,
86a91c0a 2787 mask_perm_mask, stmt_info, gsi);
c48d2d35
RS
2788 else
2789 {
2790 if (j == 0)
86a91c0a 2791 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info);
c48d2d35 2792 else
e4057a39
RS
2793 vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2794 vec_mask);
c48d2d35
RS
2795
2796 mask_op = vec_mask;
2797 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2798 {
2799 gcc_assert
2800 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2801 TYPE_VECTOR_SUBPARTS (masktype)));
2802 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2803 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
e1bd7296
RS
2804 gassign *new_stmt
2805 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
86a91c0a 2806 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
c48d2d35
RS
2807 mask_op = var;
2808 }
2809 }
2810 src_op = mask_op;
2811 }
2812
e1bd7296
RS
2813 gcall *new_call = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2814 mask_op, scale);
c48d2d35 2815
e1bd7296 2816 stmt_vec_info new_stmt_info;
c48d2d35
RS
2817 if (!useless_type_conversion_p (vectype, rettype))
2818 {
2819 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2820 TYPE_VECTOR_SUBPARTS (rettype)));
2821 op = vect_get_new_ssa_name (rettype, vect_simple_var);
e1bd7296 2822 gimple_call_set_lhs (new_call, op);
86a91c0a 2823 vect_finish_stmt_generation (stmt_info, new_call, gsi);
c48d2d35
RS
2824 var = make_ssa_name (vec_dest);
2825 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
e1bd7296 2826 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
86a91c0a
RS
2827 new_stmt_info
2828 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
c48d2d35
RS
2829 }
2830 else
2831 {
e1bd7296
RS
2832 var = make_ssa_name (vec_dest, new_call);
2833 gimple_call_set_lhs (new_call, var);
86a91c0a
RS
2834 new_stmt_info
2835 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
c48d2d35
RS
2836 }
2837
c48d2d35
RS
2838 if (modifier == NARROW)
2839 {
2840 if ((j & 1) == 0)
2841 {
2842 prev_res = var;
2843 continue;
2844 }
86a91c0a
RS
2845 var = permute_vec_elements (prev_res, var, perm_mask,
2846 stmt_info, gsi);
e1bd7296 2847 new_stmt_info = loop_vinfo->lookup_def (var);
c48d2d35
RS
2848 }
2849
ddf98a96 2850 if (prev_stmt_info == NULL)
e1bd7296 2851 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
c48d2d35 2852 else
e1bd7296
RS
2853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
2854 prev_stmt_info = new_stmt_info;
c48d2d35
RS
2855 }
2856}
2857
bfaa08b7
RS
2858/* Prepare the base and offset in GS_INFO for vectorization.
2859 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
82570274
RS
2860 to the vectorized offset argument for the first copy of STMT_INFO.
2861 STMT_INFO is the statement described by GS_INFO and LOOP is the
2862 containing loop. */
bfaa08b7
RS
2863
2864static void
82570274 2865vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info,
bfaa08b7
RS
2866 gather_scatter_info *gs_info,
2867 tree *dataref_ptr, tree *vec_offset)
2868{
2869 gimple_seq stmts = NULL;
2870 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2871 if (stmts != NULL)
2872 {
2873 basic_block new_bb;
2874 edge pe = loop_preheader_edge (loop);
2875 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2876 gcc_assert (!new_bb);
2877 }
2878 tree offset_type = TREE_TYPE (gs_info->offset);
2879 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
82570274 2880 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info,
bfaa08b7
RS
2881 offset_vectype);
2882}
2883
ab2fc782
RS
2884/* Prepare to implement a grouped or strided load or store using
2885 the gather load or scatter store operation described by GS_INFO.
32e8e429 2886 STMT_INFO is the load or store statement.
ab2fc782
RS
2887
2888 Set *DATAREF_BUMP to the amount that should be added to the base
2889 address after each copy of the vectorized statement. Set *VEC_OFFSET
2890 to an invariant offset vector in which element I has the value
2891 I * DR_STEP / SCALE. */
2892
2893static void
32e8e429
RS
2894vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
2895 loop_vec_info loop_vinfo,
ab2fc782
RS
2896 gather_scatter_info *gs_info,
2897 tree *dataref_bump, tree *vec_offset)
2898{
ab2fc782
RS
2899 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2900 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2901 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2902 gimple_seq stmts;
2903
2904 tree bump = size_binop (MULT_EXPR,
2905 fold_convert (sizetype, DR_STEP (dr)),
2906 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2907 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2908 if (stmts)
2909 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2910
2911 /* The offset given in GS_INFO can have pointer type, so use the element
2912 type of the vector instead. */
2913 tree offset_type = TREE_TYPE (gs_info->offset);
2914 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2915 offset_type = TREE_TYPE (offset_vectype);
2916
2917 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2918 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2919 ssize_int (gs_info->scale));
2920 step = fold_convert (offset_type, step);
2921 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2922
2923 /* Create {0, X, X*2, X*3, ...}. */
2924 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2925 build_zero_cst (offset_type), step);
2926 if (stmts)
2927 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2928}
2929
2930/* Return the amount that should be added to a vector pointer to move
89fa689a 2931 to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference
ab2fc782
RS
2932 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2933 vectorization. */
2934
2935static tree
89fa689a 2936vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type,
ab2fc782
RS
2937 vect_memory_access_type memory_access_type)
2938{
2939 if (memory_access_type == VMAT_INVARIANT)
2940 return size_zero_node;
2941
2942 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
89fa689a 2943 tree step = vect_dr_behavior (dr_info)->step;
ab2fc782
RS
2944 if (tree_int_cst_sgn (step) == -1)
2945 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2946 return iv_step;
2947}
2948
37b14185
RB
2949/* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2950
2951static bool
32e8e429 2952vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 2953 stmt_vec_info *vec_stmt, slp_tree slp_node,
e4057a39 2954 tree vectype_in, stmt_vector_for_cost *cost_vec)
37b14185
RB
2955{
2956 tree op, vectype;
32e8e429 2957 gcall *stmt = as_a <gcall *> (stmt_info->stmt);
e4057a39 2958 vec_info *vinfo = stmt_info->vinfo;
37b14185 2959 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
928686b1
RS
2960 unsigned ncopies;
2961 unsigned HOST_WIDE_INT nunits, num_bytes;
37b14185
RB
2962
2963 op = gimple_call_arg (stmt, 0);
2964 vectype = STMT_VINFO_VECTYPE (stmt_info);
928686b1
RS
2965
2966 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2967 return false;
37b14185
RB
2968
2969 /* Multiple types in SLP are handled by creating the appropriate number of
2970 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2971 case of SLP. */
2972 if (slp_node)
2973 ncopies = 1;
2974 else
e8f142e2 2975 ncopies = vect_get_num_copies (loop_vinfo, vectype);
37b14185
RB
2976
2977 gcc_assert (ncopies >= 1);
2978
2979 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2980 if (! char_vectype)
2981 return false;
2982
928686b1
RS
2983 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2984 return false;
2985
794e3180 2986 unsigned word_bytes = num_bytes / nunits;
908a1a16 2987
d980067b
RS
2988 /* The encoding uses one stepped pattern for each byte in the word. */
2989 vec_perm_builder elts (num_bytes, word_bytes, 3);
2990 for (unsigned i = 0; i < 3; ++i)
37b14185 2991 for (unsigned j = 0; j < word_bytes; ++j)
908a1a16 2992 elts.quick_push ((i + 1) * word_bytes - j - 1);
37b14185 2993
e3342de4
RS
2994 vec_perm_indices indices (elts, 1, num_bytes);
2995 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
37b14185
RB
2996 return false;
2997
2998 if (! vec_stmt)
2999 {
3000 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
adac3a68 3001 DUMP_VECT_SCOPE ("vectorizable_bswap");
78604de0 3002 if (! slp_node)
37b14185 3003 {
68435eb2
RB
3004 record_stmt_cost (cost_vec,
3005 1, vector_stmt, stmt_info, 0, vect_prologue);
3006 record_stmt_cost (cost_vec,
3007 ncopies, vec_perm, stmt_info, 0, vect_body);
37b14185
RB
3008 }
3009 return true;
3010 }
3011
736d0f28 3012 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
37b14185
RB
3013
3014 /* Transform. */
3015 vec<tree> vec_oprnds = vNULL;
e1bd7296 3016 stmt_vec_info new_stmt_info = NULL;
37b14185
RB
3017 stmt_vec_info prev_stmt_info = NULL;
3018 for (unsigned j = 0; j < ncopies; j++)
3019 {
3020 /* Handle uses. */
3021 if (j == 0)
86a91c0a 3022 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
37b14185 3023 else
e4057a39 3024 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
37b14185
RB
3025
3026 /* Arguments are ready. create the new vector stmt. */
3027 unsigned i;
3028 tree vop;
3029 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
3030 {
e1bd7296 3031 gimple *new_stmt;
37b14185
RB
3032 tree tem = make_ssa_name (char_vectype);
3033 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3034 char_vectype, vop));
86a91c0a 3035 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
37b14185
RB
3036 tree tem2 = make_ssa_name (char_vectype);
3037 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
3038 tem, tem, bswap_vconst);
86a91c0a 3039 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
37b14185
RB
3040 tem = make_ssa_name (vectype);
3041 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3042 vectype, tem2));
86a91c0a
RS
3043 new_stmt_info
3044 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
37b14185 3045 if (slp_node)
e1bd7296 3046 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
37b14185
RB
3047 }
3048
3049 if (slp_node)
3050 continue;
3051
3052 if (j == 0)
e1bd7296 3053 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
37b14185 3054 else
e1bd7296 3055 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
37b14185 3056
e1bd7296 3057 prev_stmt_info = new_stmt_info;
37b14185
RB
3058 }
3059
3060 vec_oprnds.release ();
3061 return true;
3062}
3063
b1b6836e
RS
3064/* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3065 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3066 in a single step. On success, store the binary pack code in
3067 *CONVERT_CODE. */
3068
3069static bool
3070simple_integer_narrowing (tree vectype_out, tree vectype_in,
3071 tree_code *convert_code)
3072{
3073 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
3074 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
3075 return false;
3076
3077 tree_code code;
3078 int multi_step_cvt = 0;
3079 auto_vec <tree, 8> interm_types;
3080 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
3081 &code, &multi_step_cvt,
3082 &interm_types)
3083 || multi_step_cvt)
3084 return false;
3085
3086 *convert_code = code;
3087 return true;
3088}
5ce9450f 3089
ebfd146a
IR
3090/* Function vectorizable_call.
3091
32e8e429
RS
3092 Check if STMT_INFO performs a function call that can be vectorized.
3093 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3094 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3095 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a
IR
3096
3097static bool
32e8e429 3098vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195
RS
3099 stmt_vec_info *vec_stmt, slp_tree slp_node,
3100 stmt_vector_for_cost *cost_vec)
ebfd146a 3101{
538dd0b7 3102 gcall *stmt;
ebfd146a
IR
3103 tree vec_dest;
3104 tree scalar_dest;
0267732b 3105 tree op;
ebfd146a 3106 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
32e8e429 3107 stmt_vec_info prev_stmt_info;
ebfd146a 3108 tree vectype_out, vectype_in;
c7bda0f4
RS
3109 poly_uint64 nunits_in;
3110 poly_uint64 nunits_out;
ebfd146a 3111 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
190c2236 3112 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 3113 vec_info *vinfo = stmt_info->vinfo;
81c40241 3114 tree fndecl, new_temp, rhs_type;
2c58d42c
RS
3115 enum vect_def_type dt[4]
3116 = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
3117 vect_unknown_def_type };
3118 int ndts = ARRAY_SIZE (dt);
ebfd146a 3119 int ncopies, j;
2c58d42c
RS
3120 auto_vec<tree, 8> vargs;
3121 auto_vec<tree, 8> orig_vargs;
ebfd146a
IR
3122 enum { NARROW, NONE, WIDEN } modifier;
3123 size_t i, nargs;
9d5e7640 3124 tree lhs;
ebfd146a 3125
190c2236 3126 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3127 return false;
3128
66c16fd9
RB
3129 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3130 && ! vec_stmt)
ebfd146a
IR
3131 return false;
3132
86a91c0a
RS
3133 /* Is STMT_INFO a vectorizable call? */
3134 stmt = dyn_cast <gcall *> (stmt_info->stmt);
538dd0b7 3135 if (!stmt)
ebfd146a
IR
3136 return false;
3137
5ce9450f 3138 if (gimple_call_internal_p (stmt)
bfaa08b7 3139 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
f307441a 3140 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
c3a8f964
RS
3141 /* Handled by vectorizable_load and vectorizable_store. */
3142 return false;
5ce9450f 3143
0136f8f0
AH
3144 if (gimple_call_lhs (stmt) == NULL_TREE
3145 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
ebfd146a
IR
3146 return false;
3147
0136f8f0 3148 gcc_checking_assert (!stmt_can_throw_internal (stmt));
5a2c1986 3149
b690cc0f
RG
3150 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3151
ebfd146a
IR
3152 /* Process function arguments. */
3153 rhs_type = NULL_TREE;
b690cc0f 3154 vectype_in = NULL_TREE;
ebfd146a
IR
3155 nargs = gimple_call_num_args (stmt);
3156
1b1562a5
MM
3157 /* Bail out if the function has more than three arguments, we do not have
3158 interesting builtin functions to vectorize with more than two arguments
3159 except for fma. No arguments is also not good. */
2c58d42c 3160 if (nargs == 0 || nargs > 4)
ebfd146a
IR
3161 return false;
3162
74bf76ed 3163 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2c58d42c
RS
3164 combined_fn cfn = gimple_call_combined_fn (stmt);
3165 if (cfn == CFN_GOMP_SIMD_LANE)
74bf76ed
JJ
3166 {
3167 nargs = 0;
3168 rhs_type = unsigned_type_node;
3169 }
3170
2c58d42c
RS
3171 int mask_opno = -1;
3172 if (internal_fn_p (cfn))
3173 mask_opno = internal_fn_mask_index (as_internal_fn (cfn));
3174
ebfd146a
IR
3175 for (i = 0; i < nargs; i++)
3176 {
b690cc0f
RG
3177 tree opvectype;
3178
ebfd146a 3179 op = gimple_call_arg (stmt, i);
2c58d42c
RS
3180 if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype))
3181 {
3182 if (dump_enabled_p ())
3183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3184 "use not simple.\n");
3185 return false;
3186 }
3187
3188 /* Skip the mask argument to an internal function. This operand
3189 has been converted via a pattern if necessary. */
3190 if ((int) i == mask_opno)
3191 continue;
ebfd146a
IR
3192
3193 /* We can only handle calls with arguments of the same type. */
3194 if (rhs_type
8533c9d8 3195 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a 3196 {
73fbfcad 3197 if (dump_enabled_p ())
78c60e3d 3198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3199 "argument types differ.\n");
ebfd146a
IR
3200 return false;
3201 }
b690cc0f
RG
3202 if (!rhs_type)
3203 rhs_type = TREE_TYPE (op);
ebfd146a 3204
b690cc0f
RG
3205 if (!vectype_in)
3206 vectype_in = opvectype;
3207 else if (opvectype
3208 && opvectype != vectype_in)
3209 {
73fbfcad 3210 if (dump_enabled_p ())
78c60e3d 3211 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3212 "argument vector types differ.\n");
b690cc0f
RG
3213 return false;
3214 }
3215 }
3216 /* If all arguments are external or constant defs use a vector type with
3217 the same size as the output vector type. */
ebfd146a 3218 if (!vectype_in)
b690cc0f 3219 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
3220 if (vec_stmt)
3221 gcc_assert (vectype_in);
3222 if (!vectype_in)
3223 {
73fbfcad 3224 if (dump_enabled_p ())
7d8930a0 3225 {
78c60e3d
SS
3226 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3227 "no vectype for scalar type ");
3228 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
e645e942 3229 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7d8930a0
IR
3230 }
3231
3232 return false;
3233 }
ebfd146a
IR
3234
3235 /* FORNOW */
b690cc0f
RG
3236 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3237 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
c7bda0f4 3238 if (known_eq (nunits_in * 2, nunits_out))
ebfd146a 3239 modifier = NARROW;
c7bda0f4 3240 else if (known_eq (nunits_out, nunits_in))
ebfd146a 3241 modifier = NONE;
c7bda0f4 3242 else if (known_eq (nunits_out * 2, nunits_in))
ebfd146a
IR
3243 modifier = WIDEN;
3244 else
3245 return false;
3246
70439f0d
RS
3247 /* We only handle functions that do not read or clobber memory. */
3248 if (gimple_vuse (stmt))
3249 {
3250 if (dump_enabled_p ())
3251 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3252 "function reads from or writes to memory.\n");
3253 return false;
3254 }
3255
ebfd146a
IR
3256 /* For now, we only vectorize functions if a target specific builtin
3257 is available. TODO -- in some cases, it might be profitable to
3258 insert the calls for pieces of the vector, in order to be able
3259 to vectorize other operations in the loop. */
70439f0d
RS
3260 fndecl = NULL_TREE;
3261 internal_fn ifn = IFN_LAST;
70439f0d
RS
3262 tree callee = gimple_call_fndecl (stmt);
3263
3264 /* First try using an internal function. */
b1b6836e
RS
3265 tree_code convert_code = ERROR_MARK;
3266 if (cfn != CFN_LAST
3267 && (modifier == NONE
3268 || (modifier == NARROW
3269 && simple_integer_narrowing (vectype_out, vectype_in,
3270 &convert_code))))
70439f0d
RS
3271 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3272 vectype_in);
3273
3274 /* If that fails, try asking for a target-specific built-in function. */
3275 if (ifn == IFN_LAST)
3276 {
3277 if (cfn != CFN_LAST)
3278 fndecl = targetm.vectorize.builtin_vectorized_function
3279 (cfn, vectype_out, vectype_in);
7672aa9b 3280 else if (callee)
70439f0d
RS
3281 fndecl = targetm.vectorize.builtin_md_vectorized_function
3282 (callee, vectype_out, vectype_in);
3283 }
3284
3285 if (ifn == IFN_LAST && !fndecl)
ebfd146a 3286 {
70439f0d 3287 if (cfn == CFN_GOMP_SIMD_LANE
74bf76ed
JJ
3288 && !slp_node
3289 && loop_vinfo
3290 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3291 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3292 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3293 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3294 {
3295 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3296 { 0, 1, 2, ... vf - 1 } vector. */
3297 gcc_assert (nargs == 0);
3298 }
37b14185
RB
3299 else if (modifier == NONE
3300 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3301 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3302 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
86a91c0a 3303 return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node,
e4057a39 3304 vectype_in, cost_vec);
74bf76ed
JJ
3305 else
3306 {
3307 if (dump_enabled_p ())
3308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3309 "function is not vectorizable.\n");
74bf76ed
JJ
3310 return false;
3311 }
ebfd146a
IR
3312 }
3313
fce57248 3314 if (slp_node)
190c2236 3315 ncopies = 1;
b1b6836e 3316 else if (modifier == NARROW && ifn == IFN_LAST)
e8f142e2 3317 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
ebfd146a 3318 else
e8f142e2 3319 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
ebfd146a
IR
3320
3321 /* Sanity check: make sure that at least one copy of the vectorized stmt
3322 needs to be generated. */
3323 gcc_assert (ncopies >= 1);
3324
ed623edb 3325 vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
ebfd146a
IR
3326 if (!vec_stmt) /* transformation not required. */
3327 {
3328 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
adac3a68 3329 DUMP_VECT_SCOPE ("vectorizable_call");
68435eb2
RB
3330 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
3331 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3332 record_stmt_cost (cost_vec, ncopies / 2,
3333 vec_promote_demote, stmt_info, 0, vect_body);
b1b6836e 3334
2c58d42c
RS
3335 if (loop_vinfo && mask_opno >= 0)
3336 {
3337 unsigned int nvectors = (slp_node
3338 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
3339 : ncopies);
3340 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out);
3341 }
ebfd146a
IR
3342 return true;
3343 }
3344
67b8dbac 3345 /* Transform. */
ebfd146a 3346
73fbfcad 3347 if (dump_enabled_p ())
e645e942 3348 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
ebfd146a
IR
3349
3350 /* Handle def. */
3351 scalar_dest = gimple_call_lhs (stmt);
3352 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3353
2c58d42c
RS
3354 bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
3355
e1bd7296 3356 stmt_vec_info new_stmt_info = NULL;
ebfd146a 3357 prev_stmt_info = NULL;
b1b6836e 3358 if (modifier == NONE || ifn != IFN_LAST)
ebfd146a 3359 {
b1b6836e 3360 tree prev_res = NULL_TREE;
2c58d42c
RS
3361 vargs.safe_grow (nargs);
3362 orig_vargs.safe_grow (nargs);
ebfd146a
IR
3363 for (j = 0; j < ncopies; ++j)
3364 {
3365 /* Build argument list for the vectorized call. */
190c2236
JJ
3366 if (slp_node)
3367 {
ef062b13 3368 auto_vec<vec<tree> > vec_defs (nargs);
9771b263 3369 vec<tree> vec_oprnds0;
190c2236
JJ
3370
3371 for (i = 0; i < nargs; i++)
2c58d42c 3372 vargs[i] = gimple_call_arg (stmt, i);
306b0c92 3373 vect_get_slp_defs (vargs, slp_node, &vec_defs);
37b5ec8f 3374 vec_oprnds0 = vec_defs[0];
190c2236
JJ
3375
3376 /* Arguments are ready. Create the new vector stmt. */
9771b263 3377 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
190c2236
JJ
3378 {
3379 size_t k;
3380 for (k = 0; k < nargs; k++)
3381 {
37b5ec8f 3382 vec<tree> vec_oprndsk = vec_defs[k];
9771b263 3383 vargs[k] = vec_oprndsk[i];
190c2236 3384 }
b1b6836e
RS
3385 if (modifier == NARROW)
3386 {
2c58d42c
RS
3387 /* We don't define any narrowing conditional functions
3388 at present. */
3389 gcc_assert (mask_opno < 0);
b1b6836e 3390 tree half_res = make_ssa_name (vectype_in);
a844293d
RS
3391 gcall *call
3392 = gimple_build_call_internal_vec (ifn, vargs);
3393 gimple_call_set_lhs (call, half_res);
3394 gimple_call_set_nothrow (call, true);
e1bd7296 3395 new_stmt_info
86a91c0a 3396 = vect_finish_stmt_generation (stmt_info, call, gsi);
b1b6836e
RS
3397 if ((i & 1) == 0)
3398 {
3399 prev_res = half_res;
3400 continue;
3401 }
3402 new_temp = make_ssa_name (vec_dest);
e1bd7296
RS
3403 gimple *new_stmt
3404 = gimple_build_assign (new_temp, convert_code,
3405 prev_res, half_res);
3406 new_stmt_info
86a91c0a
RS
3407 = vect_finish_stmt_generation (stmt_info, new_stmt,
3408 gsi);
b1b6836e 3409 }
70439f0d 3410 else
b1b6836e 3411 {
2c58d42c
RS
3412 if (mask_opno >= 0 && masked_loop_p)
3413 {
3414 unsigned int vec_num = vec_oprnds0.length ();
3415 /* Always true for SLP. */
3416 gcc_assert (ncopies == 1);
3417 tree mask = vect_get_loop_mask (gsi, masks, vec_num,
3418 vectype_out, i);
3419 vargs[mask_opno] = prepare_load_store_mask
3420 (TREE_TYPE (mask), mask, vargs[mask_opno], gsi);
3421 }
3422
a844293d 3423 gcall *call;
b1b6836e 3424 if (ifn != IFN_LAST)
a844293d 3425 call = gimple_build_call_internal_vec (ifn, vargs);
b1b6836e 3426 else
a844293d
RS
3427 call = gimple_build_call_vec (fndecl, vargs);
3428 new_temp = make_ssa_name (vec_dest, call);
3429 gimple_call_set_lhs (call, new_temp);
3430 gimple_call_set_nothrow (call, true);
e1bd7296 3431 new_stmt_info
86a91c0a 3432 = vect_finish_stmt_generation (stmt_info, call, gsi);
b1b6836e 3433 }
e1bd7296 3434 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
190c2236
JJ
3435 }
3436
3437 for (i = 0; i < nargs; i++)
3438 {
37b5ec8f 3439 vec<tree> vec_oprndsi = vec_defs[i];
9771b263 3440 vec_oprndsi.release ();
190c2236 3441 }
190c2236
JJ
3442 continue;
3443 }
3444
ebfd146a
IR
3445 for (i = 0; i < nargs; i++)
3446 {
3447 op = gimple_call_arg (stmt, i);
3448 if (j == 0)
3449 vec_oprnd0
86a91c0a 3450 = vect_get_vec_def_for_operand (op, stmt_info);
ebfd146a 3451 else
2c58d42c 3452 vec_oprnd0
e4057a39 3453 = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]);
2c58d42c
RS
3454
3455 orig_vargs[i] = vargs[i] = vec_oprnd0;
3456 }
ebfd146a 3457
2c58d42c
RS
3458 if (mask_opno >= 0 && masked_loop_p)
3459 {
3460 tree mask = vect_get_loop_mask (gsi, masks, ncopies,
3461 vectype_out, j);
3462 vargs[mask_opno]
3463 = prepare_load_store_mask (TREE_TYPE (mask), mask,
3464 vargs[mask_opno], gsi);
ebfd146a
IR
3465 }
3466
2c58d42c 3467 if (cfn == CFN_GOMP_SIMD_LANE)
74bf76ed 3468 {
c7bda0f4 3469 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
74bf76ed 3470 tree new_var
0e22bb5a 3471 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
355fe088 3472 gimple *init_stmt = gimple_build_assign (new_var, cst);
86a91c0a 3473 vect_init_vector_1 (stmt_info, init_stmt, NULL);
b731b390 3474 new_temp = make_ssa_name (vec_dest);
e1bd7296
RS
3475 gimple *new_stmt = gimple_build_assign (new_temp, new_var);
3476 new_stmt_info
86a91c0a 3477 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
74bf76ed 3478 }
b1b6836e
RS
3479 else if (modifier == NARROW)
3480 {
2c58d42c
RS
3481 /* We don't define any narrowing conditional functions at
3482 present. */
3483 gcc_assert (mask_opno < 0);
b1b6836e 3484 tree half_res = make_ssa_name (vectype_in);
a844293d
RS
3485 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3486 gimple_call_set_lhs (call, half_res);
3487 gimple_call_set_nothrow (call, true);
86a91c0a
RS
3488 new_stmt_info
3489 = vect_finish_stmt_generation (stmt_info, call, gsi);
b1b6836e
RS
3490 if ((j & 1) == 0)
3491 {
3492 prev_res = half_res;
3493 continue;
3494 }
3495 new_temp = make_ssa_name (vec_dest);
e1bd7296
RS
3496 gassign *new_stmt = gimple_build_assign (new_temp, convert_code,
3497 prev_res, half_res);
3498 new_stmt_info
86a91c0a 3499 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
b1b6836e 3500 }
74bf76ed
JJ
3501 else
3502 {
a844293d 3503 gcall *call;
70439f0d 3504 if (ifn != IFN_LAST)
a844293d 3505 call = gimple_build_call_internal_vec (ifn, vargs);
70439f0d 3506 else
a844293d 3507 call = gimple_build_call_vec (fndecl, vargs);
e1bd7296 3508 new_temp = make_ssa_name (vec_dest, call);
a844293d
RS
3509 gimple_call_set_lhs (call, new_temp);
3510 gimple_call_set_nothrow (call, true);
86a91c0a
RS
3511 new_stmt_info
3512 = vect_finish_stmt_generation (stmt_info, call, gsi);
74bf76ed 3513 }
ebfd146a 3514
b1b6836e 3515 if (j == (modifier == NARROW ? 1 : 0))
e1bd7296 3516 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
ebfd146a 3517 else
e1bd7296 3518 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
ebfd146a 3519
e1bd7296 3520 prev_stmt_info = new_stmt_info;
ebfd146a 3521 }
b1b6836e
RS
3522 }
3523 else if (modifier == NARROW)
3524 {
2c58d42c
RS
3525 /* We don't define any narrowing conditional functions at present. */
3526 gcc_assert (mask_opno < 0);
ebfd146a
IR
3527 for (j = 0; j < ncopies; ++j)
3528 {
3529 /* Build argument list for the vectorized call. */
3530 if (j == 0)
9771b263 3531 vargs.create (nargs * 2);
ebfd146a 3532 else
9771b263 3533 vargs.truncate (0);
ebfd146a 3534
190c2236
JJ
3535 if (slp_node)
3536 {
ef062b13 3537 auto_vec<vec<tree> > vec_defs (nargs);
9771b263 3538 vec<tree> vec_oprnds0;
190c2236
JJ
3539
3540 for (i = 0; i < nargs; i++)
9771b263 3541 vargs.quick_push (gimple_call_arg (stmt, i));
306b0c92 3542 vect_get_slp_defs (vargs, slp_node, &vec_defs);
37b5ec8f 3543 vec_oprnds0 = vec_defs[0];
190c2236
JJ
3544
3545 /* Arguments are ready. Create the new vector stmt. */
9771b263 3546 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
190c2236
JJ
3547 {
3548 size_t k;
9771b263 3549 vargs.truncate (0);
190c2236
JJ
3550 for (k = 0; k < nargs; k++)
3551 {
37b5ec8f 3552 vec<tree> vec_oprndsk = vec_defs[k];
9771b263
DN
3553 vargs.quick_push (vec_oprndsk[i]);
3554 vargs.quick_push (vec_oprndsk[i + 1]);
190c2236 3555 }
a844293d 3556 gcall *call;
70439f0d 3557 if (ifn != IFN_LAST)
a844293d 3558 call = gimple_build_call_internal_vec (ifn, vargs);
70439f0d 3559 else
a844293d
RS
3560 call = gimple_build_call_vec (fndecl, vargs);
3561 new_temp = make_ssa_name (vec_dest, call);
3562 gimple_call_set_lhs (call, new_temp);
3563 gimple_call_set_nothrow (call, true);
e1bd7296 3564 new_stmt_info
86a91c0a 3565 = vect_finish_stmt_generation (stmt_info, call, gsi);
e1bd7296 3566 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
190c2236
JJ
3567 }
3568
3569 for (i = 0; i < nargs; i++)
3570 {
37b5ec8f 3571 vec<tree> vec_oprndsi = vec_defs[i];
9771b263 3572 vec_oprndsi.release ();
190c2236 3573 }
190c2236
JJ
3574 continue;
3575 }
3576
ebfd146a
IR
3577 for (i = 0; i < nargs; i++)
3578 {
3579 op = gimple_call_arg (stmt, i);
3580 if (j == 0)
3581 {
3582 vec_oprnd0
86a91c0a 3583 = vect_get_vec_def_for_operand (op, stmt_info);
ebfd146a 3584 vec_oprnd1
e4057a39 3585 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
ebfd146a
IR
3586 }
3587 else
3588 {
e1bd7296
RS
3589 vec_oprnd1 = gimple_call_arg (new_stmt_info->stmt,
3590 2 * i + 1);
ebfd146a 3591 vec_oprnd0
e4057a39 3592 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
ebfd146a 3593 vec_oprnd1
e4057a39 3594 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
ebfd146a
IR
3595 }
3596
9771b263
DN
3597 vargs.quick_push (vec_oprnd0);
3598 vargs.quick_push (vec_oprnd1);
ebfd146a
IR
3599 }
3600
e1bd7296 3601 gcall *new_stmt = gimple_build_call_vec (fndecl, vargs);
ebfd146a
IR
3602 new_temp = make_ssa_name (vec_dest, new_stmt);
3603 gimple_call_set_lhs (new_stmt, new_temp);
86a91c0a
RS
3604 new_stmt_info
3605 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
ebfd146a
IR
3606
3607 if (j == 0)
e1bd7296 3608 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
ebfd146a 3609 else
e1bd7296 3610 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
ebfd146a 3611
e1bd7296 3612 prev_stmt_info = new_stmt_info;
ebfd146a
IR
3613 }
3614
3615 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a 3616 }
b1b6836e
RS
3617 else
3618 /* No current target implements this case. */
3619 return false;
ebfd146a 3620
9771b263 3621 vargs.release ();
ebfd146a 3622
ebfd146a
IR
3623 /* The call in STMT might prevent it from being removed in dce.
3624 We however cannot remove it here, due to the way the ssa name
3625 it defines is mapped to the new definition. So just replace
3626 rhs of the statement with something harmless. */
3627
dd34c087
JJ
3628 if (slp_node)
3629 return true;
3630
211cd1e2 3631 stmt_info = vect_orig_stmt (stmt_info);
ed7b8123 3632 lhs = gimple_get_lhs (stmt_info->stmt);
3cc2fa2a 3633
e1bd7296
RS
3634 gassign *new_stmt
3635 = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
9d97912b 3636 vinfo->replace_stmt (gsi, stmt_info, new_stmt);
ebfd146a
IR
3637
3638 return true;
3639}
3640
3641
0136f8f0
AH
3642struct simd_call_arg_info
3643{
3644 tree vectype;
3645 tree op;
0136f8f0 3646 HOST_WIDE_INT linear_step;
34e82342 3647 enum vect_def_type dt;
0136f8f0 3648 unsigned int align;
17b658af 3649 bool simd_lane_linear;
0136f8f0
AH
3650};
3651
17b658af
JJ
3652/* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3653 is linear within simd lane (but not within whole loop), note it in
3654 *ARGINFO. */
3655
3656static void
3657vect_simd_lane_linear (tree op, struct loop *loop,
3658 struct simd_call_arg_info *arginfo)
3659{
355fe088 3660 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
17b658af
JJ
3661
3662 if (!is_gimple_assign (def_stmt)
3663 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3664 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3665 return;
3666
3667 tree base = gimple_assign_rhs1 (def_stmt);
3668 HOST_WIDE_INT linear_step = 0;
3669 tree v = gimple_assign_rhs2 (def_stmt);
3670 while (TREE_CODE (v) == SSA_NAME)
3671 {
3672 tree t;
3673 def_stmt = SSA_NAME_DEF_STMT (v);
3674 if (is_gimple_assign (def_stmt))
3675 switch (gimple_assign_rhs_code (def_stmt))
3676 {
3677 case PLUS_EXPR:
3678 t = gimple_assign_rhs2 (def_stmt);
3679 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3680 return;
3681 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3682 v = gimple_assign_rhs1 (def_stmt);
3683 continue;
3684 case MULT_EXPR:
3685 t = gimple_assign_rhs2 (def_stmt);
3686 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3687 return;
3688 linear_step = tree_to_shwi (t);
3689 v = gimple_assign_rhs1 (def_stmt);
3690 continue;
3691 CASE_CONVERT:
3692 t = gimple_assign_rhs1 (def_stmt);
3693 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3694 || (TYPE_PRECISION (TREE_TYPE (v))
3695 < TYPE_PRECISION (TREE_TYPE (t))))
3696 return;
3697 if (!linear_step)
3698 linear_step = 1;
3699 v = t;
3700 continue;
3701 default:
3702 return;
3703 }
8e4284d0 3704 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
17b658af
JJ
3705 && loop->simduid
3706 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3707 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3708 == loop->simduid))
3709 {
3710 if (!linear_step)
3711 linear_step = 1;
3712 arginfo->linear_step = linear_step;
3713 arginfo->op = base;
3714 arginfo->simd_lane_linear = true;
3715 return;
3716 }
3717 }
3718}
3719
cf1b2ba4
RS
3720/* Return the number of elements in vector type VECTYPE, which is associated
3721 with a SIMD clone. At present these vectors always have a constant
3722 length. */
3723
3724static unsigned HOST_WIDE_INT
3725simd_clone_subparts (tree vectype)
3726{
928686b1 3727 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
cf1b2ba4
RS
3728}
3729
0136f8f0
AH
3730/* Function vectorizable_simd_clone_call.
3731
32e8e429 3732 Check if STMT_INFO performs a function call that can be vectorized
0136f8f0 3733 by calling a simd clone of the function.
32e8e429
RS
3734 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3735 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3736 Return true if STMT_INFO is vectorizable in this way. */
0136f8f0
AH
3737
3738static bool
32e8e429
RS
3739vectorizable_simd_clone_call (stmt_vec_info stmt_info,
3740 gimple_stmt_iterator *gsi,
1eede195 3741 stmt_vec_info *vec_stmt, slp_tree slp_node,
68435eb2 3742 stmt_vector_for_cost *)
0136f8f0
AH
3743{
3744 tree vec_dest;
3745 tree scalar_dest;
3746 tree op, type;
3747 tree vec_oprnd0 = NULL_TREE;
32e8e429 3748 stmt_vec_info prev_stmt_info;
0136f8f0
AH
3749 tree vectype;
3750 unsigned int nunits;
3751 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3752 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 3753 vec_info *vinfo = stmt_info->vinfo;
0136f8f0 3754 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
81c40241 3755 tree fndecl, new_temp;
0136f8f0 3756 int ncopies, j;
00426f9a 3757 auto_vec<simd_call_arg_info> arginfo;
0136f8f0
AH
3758 vec<tree> vargs = vNULL;
3759 size_t i, nargs;
3760 tree lhs, rtype, ratype;
e7a74006 3761 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
0136f8f0
AH
3762
3763 /* Is STMT a vectorizable call? */
32e8e429
RS
3764 gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt);
3765 if (!stmt)
0136f8f0
AH
3766 return false;
3767
3768 fndecl = gimple_call_fndecl (stmt);
3769 if (fndecl == NULL_TREE)
3770 return false;
3771
d52f5295 3772 struct cgraph_node *node = cgraph_node::get (fndecl);
0136f8f0
AH
3773 if (node == NULL || node->simd_clones == NULL)
3774 return false;
3775
3776 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3777 return false;
3778
66c16fd9
RB
3779 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3780 && ! vec_stmt)
0136f8f0
AH
3781 return false;
3782
3783 if (gimple_call_lhs (stmt)
3784 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3785 return false;
3786
3787 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3788
3789 vectype = STMT_VINFO_VECTYPE (stmt_info);
3790
86a91c0a 3791 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
0136f8f0
AH
3792 return false;
3793
3794 /* FORNOW */
fce57248 3795 if (slp_node)
0136f8f0
AH
3796 return false;
3797
3798 /* Process function arguments. */
3799 nargs = gimple_call_num_args (stmt);
3800
3801 /* Bail out if the function has zero arguments. */
3802 if (nargs == 0)
3803 return false;
3804
00426f9a 3805 arginfo.reserve (nargs, true);
0136f8f0
AH
3806
3807 for (i = 0; i < nargs; i++)
3808 {
3809 simd_call_arg_info thisarginfo;
3810 affine_iv iv;
3811
3812 thisarginfo.linear_step = 0;
3813 thisarginfo.align = 0;
3814 thisarginfo.op = NULL_TREE;
17b658af 3815 thisarginfo.simd_lane_linear = false;
0136f8f0
AH
3816
3817 op = gimple_call_arg (stmt, i);
894dd753 3818 if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt,
81c40241 3819 &thisarginfo.vectype)
0136f8f0
AH
3820 || thisarginfo.dt == vect_uninitialized_def)
3821 {
3822 if (dump_enabled_p ())
3823 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3824 "use not simple.\n");
0136f8f0
AH
3825 return false;
3826 }
3827
3828 if (thisarginfo.dt == vect_constant_def
3829 || thisarginfo.dt == vect_external_def)
3830 gcc_assert (thisarginfo.vectype == NULL_TREE);
3831 else
3832 gcc_assert (thisarginfo.vectype != NULL_TREE);
3833
6c9e85fb
JJ
3834 /* For linear arguments, the analyze phase should have saved
3835 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
17b658af
JJ
3836 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3837 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
6c9e85fb
JJ
3838 {
3839 gcc_assert (vec_stmt);
3840 thisarginfo.linear_step
17b658af 3841 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
6c9e85fb 3842 thisarginfo.op
17b658af
JJ
3843 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3844 thisarginfo.simd_lane_linear
3845 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3846 == boolean_true_node);
6c9e85fb
JJ
3847 /* If loop has been peeled for alignment, we need to adjust it. */
3848 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3849 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
17b658af 3850 if (n1 != n2 && !thisarginfo.simd_lane_linear)
6c9e85fb
JJ
3851 {
3852 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
17b658af 3853 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
6c9e85fb
JJ
3854 tree opt = TREE_TYPE (thisarginfo.op);
3855 bias = fold_convert (TREE_TYPE (step), bias);
3856 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3857 thisarginfo.op
3858 = fold_build2 (POINTER_TYPE_P (opt)
3859 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3860 thisarginfo.op, bias);
3861 }
3862 }
3863 else if (!vec_stmt
3864 && thisarginfo.dt != vect_constant_def
3865 && thisarginfo.dt != vect_external_def
3866 && loop_vinfo
3867 && TREE_CODE (op) == SSA_NAME
3868 && simple_iv (loop, loop_containing_stmt (stmt), op,
3869 &iv, false)
3870 && tree_fits_shwi_p (iv.step))
0136f8f0
AH
3871 {
3872 thisarginfo.linear_step = tree_to_shwi (iv.step);
3873 thisarginfo.op = iv.base;
3874 }
3875 else if ((thisarginfo.dt == vect_constant_def
3876 || thisarginfo.dt == vect_external_def)
3877 && POINTER_TYPE_P (TREE_TYPE (op)))
3878 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
17b658af
JJ
3879 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3880 linear too. */
3881 if (POINTER_TYPE_P (TREE_TYPE (op))
3882 && !thisarginfo.linear_step
3883 && !vec_stmt
3884 && thisarginfo.dt != vect_constant_def
3885 && thisarginfo.dt != vect_external_def
3886 && loop_vinfo
3887 && !slp_node
3888 && TREE_CODE (op) == SSA_NAME)
3889 vect_simd_lane_linear (op, loop, &thisarginfo);
0136f8f0
AH
3890
3891 arginfo.quick_push (thisarginfo);
3892 }
3893
d9f21f6a
RS
3894 unsigned HOST_WIDE_INT vf;
3895 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3896 {
3897 if (dump_enabled_p ())
3898 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3899 "not considering SIMD clones; not yet supported"
3900 " for variable-width vectors.\n");
3901 return NULL;
3902 }
3903
0136f8f0
AH
3904 unsigned int badness = 0;
3905 struct cgraph_node *bestn = NULL;
6c9e85fb
JJ
3906 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3907 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
0136f8f0
AH
3908 else
3909 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3910 n = n->simdclone->next_clone)
3911 {
3912 unsigned int this_badness = 0;
d9f21f6a 3913 if (n->simdclone->simdlen > vf
0136f8f0
AH
3914 || n->simdclone->nargs != nargs)
3915 continue;
d9f21f6a
RS
3916 if (n->simdclone->simdlen < vf)
3917 this_badness += (exact_log2 (vf)
0136f8f0
AH
3918 - exact_log2 (n->simdclone->simdlen)) * 1024;
3919 if (n->simdclone->inbranch)
3920 this_badness += 2048;
3921 int target_badness = targetm.simd_clone.usable (n);
3922 if (target_badness < 0)
3923 continue;
3924 this_badness += target_badness * 512;
3925 /* FORNOW: Have to add code to add the mask argument. */
3926 if (n->simdclone->inbranch)
3927 continue;
3928 for (i = 0; i < nargs; i++)
3929 {
3930 switch (n->simdclone->args[i].arg_type)
3931 {
3932 case SIMD_CLONE_ARG_TYPE_VECTOR:
3933 if (!useless_type_conversion_p
3934 (n->simdclone->args[i].orig_type,
3935 TREE_TYPE (gimple_call_arg (stmt, i))))
3936 i = -1;
3937 else if (arginfo[i].dt == vect_constant_def
3938 || arginfo[i].dt == vect_external_def
3939 || arginfo[i].linear_step)
3940 this_badness += 64;
3941 break;
3942 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3943 if (arginfo[i].dt != vect_constant_def
3944 && arginfo[i].dt != vect_external_def)
3945 i = -1;
3946 break;
3947 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
d9a6bd32 3948 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
0136f8f0
AH
3949 if (arginfo[i].dt == vect_constant_def
3950 || arginfo[i].dt == vect_external_def
3951 || (arginfo[i].linear_step
3952 != n->simdclone->args[i].linear_step))
3953 i = -1;
3954 break;
3955 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
d9a6bd32
JJ
3956 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3957 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
e01d41e5
JJ
3958 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3959 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3960 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
0136f8f0
AH
3961 /* FORNOW */
3962 i = -1;
3963 break;
3964 case SIMD_CLONE_ARG_TYPE_MASK:
3965 gcc_unreachable ();
3966 }
3967 if (i == (size_t) -1)
3968 break;
3969 if (n->simdclone->args[i].alignment > arginfo[i].align)
3970 {
3971 i = -1;
3972 break;
3973 }
3974 if (arginfo[i].align)
3975 this_badness += (exact_log2 (arginfo[i].align)
3976 - exact_log2 (n->simdclone->args[i].alignment));
3977 }
3978 if (i == (size_t) -1)
3979 continue;
3980 if (bestn == NULL || this_badness < badness)
3981 {
3982 bestn = n;
3983 badness = this_badness;
3984 }
3985 }
3986
3987 if (bestn == NULL)
00426f9a 3988 return false;
0136f8f0
AH
3989
3990 for (i = 0; i < nargs; i++)
3991 if ((arginfo[i].dt == vect_constant_def
3992 || arginfo[i].dt == vect_external_def)
3993 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3994 {
3995 arginfo[i].vectype
3996 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3997 i)));
3998 if (arginfo[i].vectype == NULL
cf1b2ba4 3999 || (simd_clone_subparts (arginfo[i].vectype)
0136f8f0 4000 > bestn->simdclone->simdlen))
00426f9a 4001 return false;
0136f8f0
AH
4002 }
4003
4004 fndecl = bestn->decl;
4005 nunits = bestn->simdclone->simdlen;
d9f21f6a 4006 ncopies = vf / nunits;
0136f8f0
AH
4007
4008 /* If the function isn't const, only allow it in simd loops where user
4009 has asserted that at least nunits consecutive iterations can be
4010 performed using SIMD instructions. */
4011 if ((loop == NULL || (unsigned) loop->safelen < nunits)
4012 && gimple_vuse (stmt))
00426f9a 4013 return false;
0136f8f0
AH
4014
4015 /* Sanity check: make sure that at least one copy of the vectorized stmt
4016 needs to be generated. */
4017 gcc_assert (ncopies >= 1);
4018
4019 if (!vec_stmt) /* transformation not required. */
4020 {
6c9e85fb
JJ
4021 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
4022 for (i = 0; i < nargs; i++)
7adb26f2
JJ
4023 if ((bestn->simdclone->args[i].arg_type
4024 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
4025 || (bestn->simdclone->args[i].arg_type
4026 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
6c9e85fb 4027 {
17b658af 4028 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
6c9e85fb
JJ
4029 + 1);
4030 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
4031 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
4032 ? size_type_node : TREE_TYPE (arginfo[i].op);
4033 tree ls = build_int_cst (lst, arginfo[i].linear_step);
4034 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
17b658af
JJ
4035 tree sll = arginfo[i].simd_lane_linear
4036 ? boolean_true_node : boolean_false_node;
4037 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
6c9e85fb 4038 }
0136f8f0 4039 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
adac3a68 4040 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
68435eb2 4041/* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
0136f8f0
AH
4042 return true;
4043 }
4044
67b8dbac 4045 /* Transform. */
0136f8f0
AH
4046
4047 if (dump_enabled_p ())
4048 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
4049
4050 /* Handle def. */
4051 scalar_dest = gimple_call_lhs (stmt);
4052 vec_dest = NULL_TREE;
4053 rtype = NULL_TREE;
4054 ratype = NULL_TREE;
4055 if (scalar_dest)
4056 {
4057 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4058 rtype = TREE_TYPE (TREE_TYPE (fndecl));
4059 if (TREE_CODE (rtype) == ARRAY_TYPE)
4060 {
4061 ratype = rtype;
4062 rtype = TREE_TYPE (ratype);
4063 }
4064 }
4065
4066 prev_stmt_info = NULL;
4067 for (j = 0; j < ncopies; ++j)
4068 {
4069 /* Build argument list for the vectorized call. */
4070 if (j == 0)
4071 vargs.create (nargs);
4072 else
4073 vargs.truncate (0);
4074
4075 for (i = 0; i < nargs; i++)
4076 {
4077 unsigned int k, l, m, o;
4078 tree atype;
4079 op = gimple_call_arg (stmt, i);
4080 switch (bestn->simdclone->args[i].arg_type)
4081 {
4082 case SIMD_CLONE_ARG_TYPE_VECTOR:
4083 atype = bestn->simdclone->args[i].vector_type;
cf1b2ba4 4084 o = nunits / simd_clone_subparts (atype);
0136f8f0
AH
4085 for (m = j * o; m < (j + 1) * o; m++)
4086 {
cf1b2ba4
RS
4087 if (simd_clone_subparts (atype)
4088 < simd_clone_subparts (arginfo[i].vectype))
0136f8f0 4089 {
73a699ae 4090 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
cf1b2ba4
RS
4091 k = (simd_clone_subparts (arginfo[i].vectype)
4092 / simd_clone_subparts (atype));
0136f8f0
AH
4093 gcc_assert ((k & (k - 1)) == 0);
4094 if (m == 0)
4095 vec_oprnd0
86a91c0a 4096 = vect_get_vec_def_for_operand (op, stmt_info);
0136f8f0
AH
4097 else
4098 {
4099 vec_oprnd0 = arginfo[i].op;
4100 if ((m & (k - 1)) == 0)
4101 vec_oprnd0
e4057a39 4102 = vect_get_vec_def_for_stmt_copy (vinfo,
0136f8f0
AH
4103 vec_oprnd0);
4104 }
4105 arginfo[i].op = vec_oprnd0;
4106 vec_oprnd0
4107 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
92e29a5e 4108 bitsize_int (prec),
0136f8f0 4109 bitsize_int ((m & (k - 1)) * prec));
e1bd7296 4110 gassign *new_stmt
b731b390 4111 = gimple_build_assign (make_ssa_name (atype),
0136f8f0 4112 vec_oprnd0);
86a91c0a 4113 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
0136f8f0
AH
4114 vargs.safe_push (gimple_assign_lhs (new_stmt));
4115 }
4116 else
4117 {
cf1b2ba4
RS
4118 k = (simd_clone_subparts (atype)
4119 / simd_clone_subparts (arginfo[i].vectype));
0136f8f0
AH
4120 gcc_assert ((k & (k - 1)) == 0);
4121 vec<constructor_elt, va_gc> *ctor_elts;
4122 if (k != 1)
4123 vec_alloc (ctor_elts, k);
4124 else
4125 ctor_elts = NULL;
4126 for (l = 0; l < k; l++)
4127 {
4128 if (m == 0 && l == 0)
4129 vec_oprnd0
86a91c0a 4130 = vect_get_vec_def_for_operand (op, stmt_info);
0136f8f0
AH
4131 else
4132 vec_oprnd0
e4057a39 4133 = vect_get_vec_def_for_stmt_copy (vinfo,
0136f8f0
AH
4134 arginfo[i].op);
4135 arginfo[i].op = vec_oprnd0;
4136 if (k == 1)
4137 break;
4138 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
4139 vec_oprnd0);
4140 }
4141 if (k == 1)
4142 vargs.safe_push (vec_oprnd0);
4143 else
4144 {
4145 vec_oprnd0 = build_constructor (atype, ctor_elts);
e1bd7296 4146 gassign *new_stmt
b731b390 4147 = gimple_build_assign (make_ssa_name (atype),
0136f8f0 4148 vec_oprnd0);
86a91c0a
RS
4149 vect_finish_stmt_generation (stmt_info, new_stmt,
4150 gsi);
0136f8f0
AH
4151 vargs.safe_push (gimple_assign_lhs (new_stmt));
4152 }
4153 }
4154 }
4155 break;
4156 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4157 vargs.safe_push (op);
4158 break;
4159 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
7adb26f2 4160 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
0136f8f0
AH
4161 if (j == 0)
4162 {
4163 gimple_seq stmts;
4164 arginfo[i].op
4165 = force_gimple_operand (arginfo[i].op, &stmts, true,
4166 NULL_TREE);
4167 if (stmts != NULL)
4168 {
4169 basic_block new_bb;
4170 edge pe = loop_preheader_edge (loop);
4171 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4172 gcc_assert (!new_bb);
4173 }
17b658af
JJ
4174 if (arginfo[i].simd_lane_linear)
4175 {
4176 vargs.safe_push (arginfo[i].op);
4177 break;
4178 }
b731b390 4179 tree phi_res = copy_ssa_name (op);
538dd0b7 4180 gphi *new_phi = create_phi_node (phi_res, loop->header);
4fbeb363 4181 loop_vinfo->add_stmt (new_phi);
0136f8f0
AH
4182 add_phi_arg (new_phi, arginfo[i].op,
4183 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4184 enum tree_code code
4185 = POINTER_TYPE_P (TREE_TYPE (op))
4186 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4187 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4188 ? sizetype : TREE_TYPE (op);
807e902e
KZ
4189 widest_int cst
4190 = wi::mul (bestn->simdclone->args[i].linear_step,
4191 ncopies * nunits);
4192 tree tcst = wide_int_to_tree (type, cst);
b731b390 4193 tree phi_arg = copy_ssa_name (op);
e1bd7296 4194 gassign *new_stmt
0d0e4a03 4195 = gimple_build_assign (phi_arg, code, phi_res, tcst);
0136f8f0
AH
4196 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4197 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4fbeb363 4198 loop_vinfo->add_stmt (new_stmt);
0136f8f0
AH
4199 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4200 UNKNOWN_LOCATION);
4201 arginfo[i].op = phi_res;
4202 vargs.safe_push (phi_res);
4203 }
4204 else
4205 {
4206 enum tree_code code
4207 = POINTER_TYPE_P (TREE_TYPE (op))
4208 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4209 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4210 ? sizetype : TREE_TYPE (op);
807e902e
KZ
4211 widest_int cst
4212 = wi::mul (bestn->simdclone->args[i].linear_step,
4213 j * nunits);
4214 tree tcst = wide_int_to_tree (type, cst);
b731b390 4215 new_temp = make_ssa_name (TREE_TYPE (op));
e1bd7296
RS
4216 gassign *new_stmt
4217 = gimple_build_assign (new_temp, code,
4218 arginfo[i].op, tcst);
86a91c0a 4219 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
0136f8f0
AH
4220 vargs.safe_push (new_temp);
4221 }
4222 break;
7adb26f2
JJ
4223 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4224 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
0136f8f0 4225 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
e01d41e5
JJ
4226 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4227 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4228 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
0136f8f0
AH
4229 default:
4230 gcc_unreachable ();
4231 }
4232 }
4233
e1bd7296 4234 gcall *new_call = gimple_build_call_vec (fndecl, vargs);
0136f8f0
AH
4235 if (vec_dest)
4236 {
cf1b2ba4 4237 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
0136f8f0 4238 if (ratype)
b731b390 4239 new_temp = create_tmp_var (ratype);
cf1b2ba4
RS
4240 else if (simd_clone_subparts (vectype)
4241 == simd_clone_subparts (rtype))
e1bd7296 4242 new_temp = make_ssa_name (vec_dest, new_call);
0136f8f0 4243 else
e1bd7296
RS
4244 new_temp = make_ssa_name (rtype, new_call);
4245 gimple_call_set_lhs (new_call, new_temp);
0136f8f0 4246 }
e1bd7296 4247 stmt_vec_info new_stmt_info
86a91c0a 4248 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
0136f8f0
AH
4249
4250 if (vec_dest)
4251 {
cf1b2ba4 4252 if (simd_clone_subparts (vectype) < nunits)
0136f8f0
AH
4253 {
4254 unsigned int k, l;
73a699ae
RS
4255 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4256 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
cf1b2ba4 4257 k = nunits / simd_clone_subparts (vectype);
0136f8f0
AH
4258 gcc_assert ((k & (k - 1)) == 0);
4259 for (l = 0; l < k; l++)
4260 {
4261 tree t;
4262 if (ratype)
4263 {
4264 t = build_fold_addr_expr (new_temp);
4265 t = build2 (MEM_REF, vectype, t,
73a699ae 4266 build_int_cst (TREE_TYPE (t), l * bytes));
0136f8f0
AH
4267 }
4268 else
4269 t = build3 (BIT_FIELD_REF, vectype, new_temp,
92e29a5e 4270 bitsize_int (prec), bitsize_int (l * prec));
e1bd7296 4271 gimple *new_stmt
b731b390 4272 = gimple_build_assign (make_ssa_name (vectype), t);
e1bd7296 4273 new_stmt_info
86a91c0a 4274 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
e1bd7296 4275
0136f8f0 4276 if (j == 0 && l == 0)
e1bd7296
RS
4277 STMT_VINFO_VEC_STMT (stmt_info)
4278 = *vec_stmt = new_stmt_info;
0136f8f0 4279 else
e1bd7296 4280 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
0136f8f0 4281
e1bd7296 4282 prev_stmt_info = new_stmt_info;
0136f8f0
AH
4283 }
4284
4285 if (ratype)
86a91c0a 4286 vect_clobber_variable (stmt_info, gsi, new_temp);
0136f8f0
AH
4287 continue;
4288 }
cf1b2ba4 4289 else if (simd_clone_subparts (vectype) > nunits)
0136f8f0 4290 {
cf1b2ba4
RS
4291 unsigned int k = (simd_clone_subparts (vectype)
4292 / simd_clone_subparts (rtype));
0136f8f0
AH
4293 gcc_assert ((k & (k - 1)) == 0);
4294 if ((j & (k - 1)) == 0)
4295 vec_alloc (ret_ctor_elts, k);
4296 if (ratype)
4297 {
cf1b2ba4 4298 unsigned int m, o = nunits / simd_clone_subparts (rtype);
0136f8f0
AH
4299 for (m = 0; m < o; m++)
4300 {
4301 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4302 size_int (m), NULL_TREE, NULL_TREE);
e1bd7296 4303 gimple *new_stmt
b731b390 4304 = gimple_build_assign (make_ssa_name (rtype), tem);
e1bd7296 4305 new_stmt_info
86a91c0a
RS
4306 = vect_finish_stmt_generation (stmt_info, new_stmt,
4307 gsi);
0136f8f0
AH
4308 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4309 gimple_assign_lhs (new_stmt));
4310 }
86a91c0a 4311 vect_clobber_variable (stmt_info, gsi, new_temp);
0136f8f0
AH
4312 }
4313 else
4314 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4315 if ((j & (k - 1)) != k - 1)
4316 continue;
4317 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
e1bd7296 4318 gimple *new_stmt
b731b390 4319 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
e1bd7296 4320 new_stmt_info
86a91c0a 4321 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
0136f8f0
AH
4322
4323 if ((unsigned) j == k - 1)
e1bd7296 4324 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
0136f8f0 4325 else
e1bd7296 4326 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
0136f8f0 4327
e1bd7296 4328 prev_stmt_info = new_stmt_info;
0136f8f0
AH
4329 continue;
4330 }
4331 else if (ratype)
4332 {
4333 tree t = build_fold_addr_expr (new_temp);
4334 t = build2 (MEM_REF, vectype, t,
4335 build_int_cst (TREE_TYPE (t), 0));
e1bd7296 4336 gimple *new_stmt
b731b390 4337 = gimple_build_assign (make_ssa_name (vec_dest), t);
e1bd7296 4338 new_stmt_info
86a91c0a
RS
4339 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4340 vect_clobber_variable (stmt_info, gsi, new_temp);
0136f8f0
AH
4341 }
4342 }
4343
4344 if (j == 0)
e1bd7296 4345 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
0136f8f0 4346 else
e1bd7296 4347 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
0136f8f0 4348
e1bd7296 4349 prev_stmt_info = new_stmt_info;
0136f8f0
AH
4350 }
4351
4352 vargs.release ();
4353
4354 /* The call in STMT might prevent it from being removed in dce.
4355 We however cannot remove it here, due to the way the ssa name
4356 it defines is mapped to the new definition. So just replace
4357 rhs of the statement with something harmless. */
4358
4359 if (slp_node)
4360 return true;
4361
e1bd7296 4362 gimple *new_stmt;
0136f8f0
AH
4363 if (scalar_dest)
4364 {
4365 type = TREE_TYPE (scalar_dest);
211cd1e2 4366 lhs = gimple_call_lhs (vect_orig_stmt (stmt_info)->stmt);
0136f8f0
AH
4367 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4368 }
4369 else
4370 new_stmt = gimple_build_nop ();
41b6b80e 4371 vinfo->replace_stmt (gsi, vect_orig_stmt (stmt_info), new_stmt);
0136f8f0
AH
4372 unlink_stmt_vdef (stmt);
4373
4374 return true;
4375}
4376
4377
ebfd146a
IR
4378/* Function vect_gen_widened_results_half
4379
4380 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 4381 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ff802fa1 4382 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
ebfd146a
IR
4383 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4384 needs to be created (DECL is a function-decl of a target-builtin).
82570274 4385 STMT_INFO is the original scalar stmt that we are vectorizing. */
ebfd146a 4386
355fe088 4387static gimple *
ebfd146a
IR
4388vect_gen_widened_results_half (enum tree_code code,
4389 tree decl,
4390 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4391 tree vec_dest, gimple_stmt_iterator *gsi,
82570274 4392 stmt_vec_info stmt_info)
b8698a0f 4393{
355fe088 4394 gimple *new_stmt;
b8698a0f
L
4395 tree new_temp;
4396
4397 /* Generate half of the widened result: */
4398 if (code == CALL_EXPR)
4399 {
4400 /* Target specific support */
ebfd146a
IR
4401 if (op_type == binary_op)
4402 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4403 else
4404 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4405 new_temp = make_ssa_name (vec_dest, new_stmt);
4406 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
4407 }
4408 else
ebfd146a 4409 {
b8698a0f
L
4410 /* Generic support */
4411 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
4412 if (op_type != binary_op)
4413 vec_oprnd1 = NULL;
0d0e4a03 4414 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
ebfd146a
IR
4415 new_temp = make_ssa_name (vec_dest, new_stmt);
4416 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 4417 }
82570274 4418 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
ebfd146a 4419
ebfd146a
IR
4420 return new_stmt;
4421}
4422
4a00c761 4423
82570274
RS
4424/* Get vectorized definitions for loop-based vectorization of STMT_INFO.
4425 For the first operand we call vect_get_vec_def_for_operand (with OPRND
4426 containing scalar operand), and for the rest we get a copy with
4a00c761
JJ
4427 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4428 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4429 The vectors are collected into VEC_OPRNDS. */
4430
4431static void
82570274 4432vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info,
e4057a39 4433 vec<tree> *vec_oprnds, int multi_step_cvt)
4a00c761 4434{
e4057a39 4435 vec_info *vinfo = stmt_info->vinfo;
4a00c761
JJ
4436 tree vec_oprnd;
4437
4438 /* Get first vector operand. */
4439 /* All the vector operands except the very first one (that is scalar oprnd)
4440 are stmt copies. */
4441 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
82570274 4442 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info);
4a00c761 4443 else
e4057a39 4444 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd);
4a00c761 4445
9771b263 4446 vec_oprnds->quick_push (vec_oprnd);
4a00c761
JJ
4447
4448 /* Get second vector operand. */
e4057a39 4449 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
9771b263 4450 vec_oprnds->quick_push (vec_oprnd);
4a00c761
JJ
4451
4452 *oprnd = vec_oprnd;
4453
4454 /* For conversion in multiple steps, continue to get operands
4455 recursively. */
4456 if (multi_step_cvt)
e4057a39 4457 vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds,
82570274 4458 multi_step_cvt - 1);
4a00c761
JJ
4459}
4460
4461
4462/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4463 For multi-step conversions store the resulting vectors and call the function
4464 recursively. */
4465
4466static void
9771b263 4467vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
32e8e429
RS
4468 int multi_step_cvt,
4469 stmt_vec_info stmt_info,
9771b263 4470 vec<tree> vec_dsts,
4a00c761
JJ
4471 gimple_stmt_iterator *gsi,
4472 slp_tree slp_node, enum tree_code code,
4473 stmt_vec_info *prev_stmt_info)
4474{
4475 unsigned int i;
4476 tree vop0, vop1, new_tmp, vec_dest;
4a00c761 4477
9771b263 4478 vec_dest = vec_dsts.pop ();
4a00c761 4479
9771b263 4480 for (i = 0; i < vec_oprnds->length (); i += 2)
4a00c761
JJ
4481 {
4482 /* Create demotion operation. */
9771b263
DN
4483 vop0 = (*vec_oprnds)[i];
4484 vop1 = (*vec_oprnds)[i + 1];
e1bd7296 4485 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4a00c761
JJ
4486 new_tmp = make_ssa_name (vec_dest, new_stmt);
4487 gimple_assign_set_lhs (new_stmt, new_tmp);
e1bd7296 4488 stmt_vec_info new_stmt_info
86a91c0a 4489 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4a00c761
JJ
4490
4491 if (multi_step_cvt)
4492 /* Store the resulting vector for next recursive call. */
9771b263 4493 (*vec_oprnds)[i/2] = new_tmp;
4a00c761
JJ
4494 else
4495 {
4496 /* This is the last step of the conversion sequence. Store the
4497 vectors in SLP_NODE or in vector info of the scalar statement
4498 (or in STMT_VINFO_RELATED_STMT chain). */
4499 if (slp_node)
e1bd7296 4500 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
4a00c761 4501 else
c689ce1e
RB
4502 {
4503 if (!*prev_stmt_info)
e1bd7296 4504 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
c689ce1e 4505 else
e1bd7296 4506 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt_info;
4a00c761 4507
e1bd7296 4508 *prev_stmt_info = new_stmt_info;
c689ce1e 4509 }
4a00c761
JJ
4510 }
4511 }
4512
4513 /* For multi-step demotion operations we first generate demotion operations
4514 from the source type to the intermediate types, and then combine the
4515 results (stored in VEC_OPRNDS) in demotion operation to the destination
4516 type. */
4517 if (multi_step_cvt)
4518 {
4519 /* At each level of recursion we have half of the operands we had at the
4520 previous level. */
9771b263 4521 vec_oprnds->truncate ((i+1)/2);
4a00c761 4522 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
86a91c0a
RS
4523 stmt_info, vec_dsts, gsi,
4524 slp_node, VEC_PACK_TRUNC_EXPR,
4a00c761
JJ
4525 prev_stmt_info);
4526 }
4527
9771b263 4528 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
4529}
4530
4531
4532/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
82570274
RS
4533 and VEC_OPRNDS1, for a binary operation associated with scalar statement
4534 STMT_INFO. For multi-step conversions store the resulting vectors and
4535 call the function recursively. */
4a00c761
JJ
4536
4537static void
9771b263
DN
4538vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4539 vec<tree> *vec_oprnds1,
82570274 4540 stmt_vec_info stmt_info, tree vec_dest,
4a00c761
JJ
4541 gimple_stmt_iterator *gsi,
4542 enum tree_code code1,
4543 enum tree_code code2, tree decl1,
4544 tree decl2, int op_type)
4545{
4546 int i;
4547 tree vop0, vop1, new_tmp1, new_tmp2;
355fe088 4548 gimple *new_stmt1, *new_stmt2;
6e1aa848 4549 vec<tree> vec_tmp = vNULL;
4a00c761 4550
9771b263
DN
4551 vec_tmp.create (vec_oprnds0->length () * 2);
4552 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4a00c761
JJ
4553 {
4554 if (op_type == binary_op)
9771b263 4555 vop1 = (*vec_oprnds1)[i];
4a00c761
JJ
4556 else
4557 vop1 = NULL_TREE;
4558
4559 /* Generate the two halves of promotion operation. */
4560 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
82570274
RS
4561 op_type, vec_dest, gsi,
4562 stmt_info);
4a00c761 4563 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
82570274
RS
4564 op_type, vec_dest, gsi,
4565 stmt_info);
4a00c761
JJ
4566 if (is_gimple_call (new_stmt1))
4567 {
4568 new_tmp1 = gimple_call_lhs (new_stmt1);
4569 new_tmp2 = gimple_call_lhs (new_stmt2);
4570 }
4571 else
4572 {
4573 new_tmp1 = gimple_assign_lhs (new_stmt1);
4574 new_tmp2 = gimple_assign_lhs (new_stmt2);
4575 }
4576
4577 /* Store the results for the next step. */
9771b263
DN
4578 vec_tmp.quick_push (new_tmp1);
4579 vec_tmp.quick_push (new_tmp2);
4a00c761
JJ
4580 }
4581
689eaba3 4582 vec_oprnds0->release ();
4a00c761
JJ
4583 *vec_oprnds0 = vec_tmp;
4584}
4585
4586
32e8e429
RS
4587/* Check if STMT_INFO performs a conversion operation that can be vectorized.
4588 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
4a00c761 4589 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
32e8e429 4590 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a
IR
4591
4592static bool
32e8e429 4593vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 4594 stmt_vec_info *vec_stmt, slp_tree slp_node,
68435eb2 4595 stmt_vector_for_cost *cost_vec)
ebfd146a
IR
4596{
4597 tree vec_dest;
4598 tree scalar_dest;
4a00c761 4599 tree op0, op1 = NULL_TREE;
ebfd146a 4600 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
ebfd146a
IR
4601 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4602 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4a00c761 4603 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
ebfd146a
IR
4604 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4605 tree new_temp;
ebfd146a 4606 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4fc5ebf1 4607 int ndts = 2;
ebfd146a 4608 stmt_vec_info prev_stmt_info;
062d5ccc
RS
4609 poly_uint64 nunits_in;
4610 poly_uint64 nunits_out;
ebfd146a 4611 tree vectype_out, vectype_in;
4a00c761
JJ
4612 int ncopies, i, j;
4613 tree lhs_type, rhs_type;
ebfd146a 4614 enum { NARROW, NONE, WIDEN } modifier;
6e1aa848
DN
4615 vec<tree> vec_oprnds0 = vNULL;
4616 vec<tree> vec_oprnds1 = vNULL;
ebfd146a 4617 tree vop0;
4a00c761 4618 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4619 vec_info *vinfo = stmt_info->vinfo;
4a00c761 4620 int multi_step_cvt = 0;
6e1aa848 4621 vec<tree> interm_types = vNULL;
4a00c761
JJ
4622 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4623 int op_type;
4a00c761 4624 unsigned short fltsz;
ebfd146a
IR
4625
4626 /* Is STMT a vectorizable conversion? */
4627
4a00c761 4628 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
4629 return false;
4630
66c16fd9
RB
4631 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4632 && ! vec_stmt)
ebfd146a
IR
4633 return false;
4634
32e8e429
RS
4635 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
4636 if (!stmt)
ebfd146a
IR
4637 return false;
4638
4639 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4640 return false;
4641
4642 code = gimple_assign_rhs_code (stmt);
4a00c761
JJ
4643 if (!CONVERT_EXPR_CODE_P (code)
4644 && code != FIX_TRUNC_EXPR
4645 && code != FLOAT_EXPR
4646 && code != WIDEN_MULT_EXPR
4647 && code != WIDEN_LSHIFT_EXPR)
ebfd146a
IR
4648 return false;
4649
4a00c761
JJ
4650 op_type = TREE_CODE_LENGTH (code);
4651
ebfd146a 4652 /* Check types of lhs and rhs. */
b690cc0f 4653 scalar_dest = gimple_assign_lhs (stmt);
4a00c761 4654 lhs_type = TREE_TYPE (scalar_dest);
b690cc0f
RG
4655 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4656
ebfd146a
IR
4657 op0 = gimple_assign_rhs1 (stmt);
4658 rhs_type = TREE_TYPE (op0);
4a00c761
JJ
4659
4660 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4661 && !((INTEGRAL_TYPE_P (lhs_type)
4662 && INTEGRAL_TYPE_P (rhs_type))
4663 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4664 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4665 return false;
4666
e6f5c25d
IE
4667 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4668 && ((INTEGRAL_TYPE_P (lhs_type)
2be65d9e 4669 && !type_has_mode_precision_p (lhs_type))
e6f5c25d 4670 || (INTEGRAL_TYPE_P (rhs_type)
2be65d9e 4671 && !type_has_mode_precision_p (rhs_type))))
4a00c761 4672 {
73fbfcad 4673 if (dump_enabled_p ())
78c60e3d 4674 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
4675 "type conversion to/from bit-precision unsupported."
4676 "\n");
4a00c761
JJ
4677 return false;
4678 }
4679
b690cc0f 4680 /* Check the operands of the operation. */
894dd753 4681 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype_in))
b690cc0f 4682 {
73fbfcad 4683 if (dump_enabled_p ())
78c60e3d 4684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4685 "use not simple.\n");
b690cc0f
RG
4686 return false;
4687 }
4a00c761
JJ
4688 if (op_type == binary_op)
4689 {
4690 bool ok;
4691
4692 op1 = gimple_assign_rhs2 (stmt);
4693 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4694 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4695 OP1. */
4696 if (CONSTANT_CLASS_P (op0))
894dd753 4697 ok = vect_is_simple_use (op1, vinfo, &dt[1], &vectype_in);
4a00c761 4698 else
894dd753 4699 ok = vect_is_simple_use (op1, vinfo, &dt[1]);
4a00c761
JJ
4700
4701 if (!ok)
4702 {
73fbfcad 4703 if (dump_enabled_p ())
78c60e3d 4704 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4705 "use not simple.\n");
4a00c761
JJ
4706 return false;
4707 }
4708 }
4709
b690cc0f
RG
4710 /* If op0 is an external or constant defs use a vector type of
4711 the same size as the output vector type. */
ebfd146a 4712 if (!vectype_in)
b690cc0f 4713 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
4714 if (vec_stmt)
4715 gcc_assert (vectype_in);
4716 if (!vectype_in)
4717 {
73fbfcad 4718 if (dump_enabled_p ())
4a00c761 4719 {
78c60e3d
SS
4720 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4721 "no vectype for scalar type ");
4722 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
e645e942 4723 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4a00c761 4724 }
7d8930a0
IR
4725
4726 return false;
4727 }
ebfd146a 4728
e6f5c25d
IE
4729 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4730 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4731 {
4732 if (dump_enabled_p ())
4733 {
4734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4735 "can't convert between boolean and non "
4736 "boolean vectors");
4737 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4738 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4739 }
4740
4741 return false;
4742 }
4743
b690cc0f
RG
4744 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4745 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
062d5ccc 4746 if (known_eq (nunits_out, nunits_in))
ebfd146a 4747 modifier = NONE;
062d5ccc
RS
4748 else if (multiple_p (nunits_out, nunits_in))
4749 modifier = NARROW;
ebfd146a 4750 else
062d5ccc
RS
4751 {
4752 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4753 modifier = WIDEN;
4754 }
ebfd146a 4755
ff802fa1
IR
4756 /* Multiple types in SLP are handled by creating the appropriate number of
4757 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4758 case of SLP. */
fce57248 4759 if (slp_node)
ebfd146a 4760 ncopies = 1;
4a00c761 4761 else if (modifier == NARROW)
e8f142e2 4762 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4a00c761 4763 else
e8f142e2 4764 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
b8698a0f 4765
ebfd146a
IR
4766 /* Sanity check: make sure that at least one copy of the vectorized stmt
4767 needs to be generated. */
4768 gcc_assert (ncopies >= 1);
4769
16d22000
RS
4770 bool found_mode = false;
4771 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4772 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4773 opt_scalar_mode rhs_mode_iter;
b397965c 4774
ebfd146a 4775 /* Supportable by target? */
4a00c761 4776 switch (modifier)
ebfd146a 4777 {
4a00c761
JJ
4778 case NONE:
4779 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4780 return false;
4781 if (supportable_convert_operation (code, vectype_out, vectype_in,
4782 &decl1, &code1))
4783 break;
4784 /* FALLTHRU */
4785 unsupported:
73fbfcad 4786 if (dump_enabled_p ())
78c60e3d 4787 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4788 "conversion not supported by target.\n");
ebfd146a 4789 return false;
ebfd146a 4790
4a00c761 4791 case WIDEN:
86a91c0a
RS
4792 if (supportable_widening_operation (code, stmt_info, vectype_out,
4793 vectype_in, &code1, &code2,
4794 &multi_step_cvt, &interm_types))
4a00c761
JJ
4795 {
4796 /* Binary widening operation can only be supported directly by the
4797 architecture. */
4798 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4799 break;
4800 }
4801
4802 if (code != FLOAT_EXPR
b397965c 4803 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4a00c761
JJ
4804 goto unsupported;
4805
b397965c 4806 fltsz = GET_MODE_SIZE (lhs_mode);
16d22000 4807 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4a00c761 4808 {
16d22000 4809 rhs_mode = rhs_mode_iter.require ();
c94843d2
RS
4810 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4811 break;
4812
4a00c761
JJ
4813 cvt_type
4814 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4815 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4816 if (cvt_type == NULL_TREE)
4817 goto unsupported;
4818
4819 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4820 {
4821 if (!supportable_convert_operation (code, vectype_out,
4822 cvt_type, &decl1, &codecvt1))
4823 goto unsupported;
4824 }
86a91c0a
RS
4825 else if (!supportable_widening_operation (code, stmt_info,
4826 vectype_out, cvt_type,
4827 &codecvt1, &codecvt2,
4828 &multi_step_cvt,
4a00c761
JJ
4829 &interm_types))
4830 continue;
4831 else
4832 gcc_assert (multi_step_cvt == 0);
4833
86a91c0a 4834 if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type,
a86ec597
RH
4835 vectype_in, &code1, &code2,
4836 &multi_step_cvt, &interm_types))
16d22000
RS
4837 {
4838 found_mode = true;
4839 break;
4840 }
4a00c761
JJ
4841 }
4842
16d22000 4843 if (!found_mode)
4a00c761
JJ
4844 goto unsupported;
4845
4846 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4847 codecvt2 = ERROR_MARK;
4848 else
4849 {
4850 multi_step_cvt++;
9771b263 4851 interm_types.safe_push (cvt_type);
4a00c761
JJ
4852 cvt_type = NULL_TREE;
4853 }
4854 break;
4855
4856 case NARROW:
4857 gcc_assert (op_type == unary_op);
4858 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4859 &code1, &multi_step_cvt,
4860 &interm_types))
4861 break;
4862
4863 if (code != FIX_TRUNC_EXPR
b397965c 4864 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4a00c761
JJ
4865 goto unsupported;
4866
4a00c761
JJ
4867 cvt_type
4868 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4869 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4870 if (cvt_type == NULL_TREE)
4871 goto unsupported;
4872 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4873 &decl1, &codecvt1))
4874 goto unsupported;
4875 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4876 &code1, &multi_step_cvt,
4877 &interm_types))
4878 break;
4879 goto unsupported;
4880
4881 default:
4882 gcc_unreachable ();
ebfd146a
IR
4883 }
4884
4885 if (!vec_stmt) /* transformation not required. */
4886 {
adac3a68 4887 DUMP_VECT_SCOPE ("vectorizable_conversion");
4a00c761 4888 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
8bd37302
BS
4889 {
4890 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
68435eb2
RB
4891 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
4892 cost_vec);
8bd37302 4893 }
4a00c761
JJ
4894 else if (modifier == NARROW)
4895 {
4896 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
68435eb2
RB
4897 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4898 cost_vec);
4a00c761
JJ
4899 }
4900 else
4901 {
4902 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
68435eb2
RB
4903 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4904 cost_vec);
4a00c761 4905 }
9771b263 4906 interm_types.release ();
ebfd146a
IR
4907 return true;
4908 }
4909
67b8dbac 4910 /* Transform. */
73fbfcad 4911 if (dump_enabled_p ())
78c60e3d 4912 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4913 "transform conversion. ncopies = %d.\n", ncopies);
ebfd146a 4914
4a00c761
JJ
4915 if (op_type == binary_op)
4916 {
4917 if (CONSTANT_CLASS_P (op0))
4918 op0 = fold_convert (TREE_TYPE (op1), op0);
4919 else if (CONSTANT_CLASS_P (op1))
4920 op1 = fold_convert (TREE_TYPE (op0), op1);
4921 }
4922
4923 /* In case of multi-step conversion, we first generate conversion operations
4924 to the intermediate types, and then from that types to the final one.
4925 We create vector destinations for the intermediate type (TYPES) received
4926 from supportable_*_operation, and store them in the correct order
4927 for future use in vect_create_vectorized_*_stmts (). */
8c681247 4928 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
82294ec1
JJ
4929 vec_dest = vect_create_destination_var (scalar_dest,
4930 (cvt_type && modifier == WIDEN)
4931 ? cvt_type : vectype_out);
9771b263 4932 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
4933
4934 if (multi_step_cvt)
4935 {
9771b263
DN
4936 for (i = interm_types.length () - 1;
4937 interm_types.iterate (i, &intermediate_type); i--)
4a00c761
JJ
4938 {
4939 vec_dest = vect_create_destination_var (scalar_dest,
4940 intermediate_type);
9771b263 4941 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
4942 }
4943 }
ebfd146a 4944
4a00c761 4945 if (cvt_type)
82294ec1
JJ
4946 vec_dest = vect_create_destination_var (scalar_dest,
4947 modifier == WIDEN
4948 ? vectype_out : cvt_type);
4a00c761
JJ
4949
4950 if (!slp_node)
4951 {
30862efc 4952 if (modifier == WIDEN)
4a00c761 4953 {
c3284718 4954 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4a00c761 4955 if (op_type == binary_op)
9771b263 4956 vec_oprnds1.create (1);
4a00c761 4957 }
30862efc 4958 else if (modifier == NARROW)
9771b263
DN
4959 vec_oprnds0.create (
4960 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4a00c761
JJ
4961 }
4962 else if (code == WIDEN_LSHIFT_EXPR)
9771b263 4963 vec_oprnds1.create (slp_node->vec_stmts_size);
ebfd146a 4964
4a00c761 4965 last_oprnd = op0;
ebfd146a
IR
4966 prev_stmt_info = NULL;
4967 switch (modifier)
4968 {
4969 case NONE:
4970 for (j = 0; j < ncopies; j++)
4971 {
ebfd146a 4972 if (j == 0)
86a91c0a
RS
4973 vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0,
4974 NULL, slp_node);
ebfd146a 4975 else
e4057a39 4976 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL);
ebfd146a 4977
9771b263 4978 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761 4979 {
e1bd7296 4980 stmt_vec_info new_stmt_info;
4a00c761
JJ
4981 /* Arguments are ready, create the new vector stmt. */
4982 if (code1 == CALL_EXPR)
4983 {
e1bd7296 4984 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
4a00c761
JJ
4985 new_temp = make_ssa_name (vec_dest, new_stmt);
4986 gimple_call_set_lhs (new_stmt, new_temp);
e1bd7296 4987 new_stmt_info
86a91c0a 4988 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4a00c761
JJ
4989 }
4990 else
4991 {
4992 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
e1bd7296
RS
4993 gassign *new_stmt
4994 = gimple_build_assign (vec_dest, code1, vop0);
4a00c761
JJ
4995 new_temp = make_ssa_name (vec_dest, new_stmt);
4996 gimple_assign_set_lhs (new_stmt, new_temp);
e1bd7296 4997 new_stmt_info
86a91c0a 4998 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4a00c761
JJ
4999 }
5000
4a00c761 5001 if (slp_node)
e1bd7296 5002 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
225ce44b
RB
5003 else
5004 {
5005 if (!prev_stmt_info)
e1bd7296
RS
5006 STMT_VINFO_VEC_STMT (stmt_info)
5007 = *vec_stmt = new_stmt_info;
225ce44b 5008 else
e1bd7296
RS
5009 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5010 prev_stmt_info = new_stmt_info;
225ce44b 5011 }
4a00c761 5012 }
ebfd146a
IR
5013 }
5014 break;
5015
5016 case WIDEN:
5017 /* In case the vectorization factor (VF) is bigger than the number
5018 of elements that we can fit in a vectype (nunits), we have to
5019 generate more than one vector stmt - i.e - we need to "unroll"
5020 the vector stmt by a factor VF/nunits. */
5021 for (j = 0; j < ncopies; j++)
5022 {
4a00c761 5023 /* Handle uses. */
ebfd146a 5024 if (j == 0)
4a00c761
JJ
5025 {
5026 if (slp_node)
5027 {
5028 if (code == WIDEN_LSHIFT_EXPR)
5029 {
5030 unsigned int k;
ebfd146a 5031
4a00c761
JJ
5032 vec_oprnd1 = op1;
5033 /* Store vec_oprnd1 for every vector stmt to be created
5034 for SLP_NODE. We check during the analysis that all
5035 the shift arguments are the same. */
5036 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
9771b263 5037 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761 5038
86a91c0a
RS
5039 vect_get_vec_defs (op0, NULL_TREE, stmt_info,
5040 &vec_oprnds0, NULL, slp_node);
4a00c761
JJ
5041 }
5042 else
86a91c0a 5043 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
306b0c92 5044 &vec_oprnds1, slp_node);
4a00c761
JJ
5045 }
5046 else
5047 {
86a91c0a 5048 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info);
9771b263 5049 vec_oprnds0.quick_push (vec_oprnd0);
4a00c761
JJ
5050 if (op_type == binary_op)
5051 {
5052 if (code == WIDEN_LSHIFT_EXPR)
5053 vec_oprnd1 = op1;
5054 else
86a91c0a
RS
5055 vec_oprnd1
5056 = vect_get_vec_def_for_operand (op1, stmt_info);
9771b263 5057 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
5058 }
5059 }
5060 }
ebfd146a 5061 else
4a00c761 5062 {
e4057a39 5063 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
9771b263
DN
5064 vec_oprnds0.truncate (0);
5065 vec_oprnds0.quick_push (vec_oprnd0);
4a00c761
JJ
5066 if (op_type == binary_op)
5067 {
5068 if (code == WIDEN_LSHIFT_EXPR)
5069 vec_oprnd1 = op1;
5070 else
e4057a39 5071 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
4a00c761 5072 vec_oprnd1);
9771b263
DN
5073 vec_oprnds1.truncate (0);
5074 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
5075 }
5076 }
ebfd146a 5077
4a00c761
JJ
5078 /* Arguments are ready. Create the new vector stmts. */
5079 for (i = multi_step_cvt; i >= 0; i--)
5080 {
9771b263 5081 tree this_dest = vec_dsts[i];
4a00c761
JJ
5082 enum tree_code c1 = code1, c2 = code2;
5083 if (i == 0 && codecvt2 != ERROR_MARK)
5084 {
5085 c1 = codecvt1;
5086 c2 = codecvt2;
5087 }
5088 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
86a91c0a
RS
5089 &vec_oprnds1, stmt_info,
5090 this_dest, gsi,
4a00c761
JJ
5091 c1, c2, decl1, decl2,
5092 op_type);
5093 }
5094
9771b263 5095 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761 5096 {
e1bd7296 5097 stmt_vec_info new_stmt_info;
4a00c761
JJ
5098 if (cvt_type)
5099 {
5100 if (codecvt1 == CALL_EXPR)
5101 {
e1bd7296 5102 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
4a00c761
JJ
5103 new_temp = make_ssa_name (vec_dest, new_stmt);
5104 gimple_call_set_lhs (new_stmt, new_temp);
e1bd7296 5105 new_stmt_info
86a91c0a
RS
5106 = vect_finish_stmt_generation (stmt_info, new_stmt,
5107 gsi);
4a00c761
JJ
5108 }
5109 else
5110 {
5111 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
b731b390 5112 new_temp = make_ssa_name (vec_dest);
e1bd7296
RS
5113 gassign *new_stmt
5114 = gimple_build_assign (new_temp, codecvt1, vop0);
5115 new_stmt_info
86a91c0a
RS
5116 = vect_finish_stmt_generation (stmt_info, new_stmt,
5117 gsi);
4a00c761 5118 }
4a00c761
JJ
5119 }
5120 else
e1bd7296 5121 new_stmt_info = vinfo->lookup_def (vop0);
4a00c761
JJ
5122
5123 if (slp_node)
e1bd7296 5124 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
4a00c761 5125 else
c689ce1e
RB
5126 {
5127 if (!prev_stmt_info)
e1bd7296 5128 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
c689ce1e 5129 else
e1bd7296
RS
5130 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5131 prev_stmt_info = new_stmt_info;
c689ce1e 5132 }
4a00c761 5133 }
ebfd146a 5134 }
4a00c761
JJ
5135
5136 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
5137 break;
5138
5139 case NARROW:
5140 /* In case the vectorization factor (VF) is bigger than the number
5141 of elements that we can fit in a vectype (nunits), we have to
5142 generate more than one vector stmt - i.e - we need to "unroll"
5143 the vector stmt by a factor VF/nunits. */
5144 for (j = 0; j < ncopies; j++)
5145 {
5146 /* Handle uses. */
4a00c761 5147 if (slp_node)
86a91c0a 5148 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
306b0c92 5149 slp_node);
ebfd146a
IR
5150 else
5151 {
9771b263 5152 vec_oprnds0.truncate (0);
e4057a39 5153 vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0,
4a00c761 5154 vect_pow2 (multi_step_cvt) - 1);
ebfd146a
IR
5155 }
5156
4a00c761
JJ
5157 /* Arguments are ready. Create the new vector stmts. */
5158 if (cvt_type)
9771b263 5159 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
5160 {
5161 if (codecvt1 == CALL_EXPR)
5162 {
e1bd7296 5163 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
4a00c761
JJ
5164 new_temp = make_ssa_name (vec_dest, new_stmt);
5165 gimple_call_set_lhs (new_stmt, new_temp);
86a91c0a 5166 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4a00c761
JJ
5167 }
5168 else
5169 {
5170 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
b731b390 5171 new_temp = make_ssa_name (vec_dest);
e1bd7296
RS
5172 gassign *new_stmt
5173 = gimple_build_assign (new_temp, codecvt1, vop0);
86a91c0a 5174 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4a00c761 5175 }
ebfd146a 5176
9771b263 5177 vec_oprnds0[i] = new_temp;
4a00c761 5178 }
ebfd146a 5179
4a00c761 5180 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
86a91c0a 5181 stmt_info, vec_dsts, gsi,
4a00c761
JJ
5182 slp_node, code1,
5183 &prev_stmt_info);
ebfd146a
IR
5184 }
5185
5186 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4a00c761 5187 break;
ebfd146a
IR
5188 }
5189
9771b263
DN
5190 vec_oprnds0.release ();
5191 vec_oprnds1.release ();
9771b263 5192 interm_types.release ();
ebfd146a
IR
5193
5194 return true;
5195}
ff802fa1
IR
5196
5197
ebfd146a
IR
5198/* Function vectorizable_assignment.
5199
32e8e429
RS
5200 Check if STMT_INFO performs an assignment (copy) that can be vectorized.
5201 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5202 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5203 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a
IR
5204
5205static bool
32e8e429 5206vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 5207 stmt_vec_info *vec_stmt, slp_tree slp_node,
68435eb2 5208 stmt_vector_for_cost *cost_vec)
ebfd146a
IR
5209{
5210 tree vec_dest;
5211 tree scalar_dest;
5212 tree op;
ebfd146a
IR
5213 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5214 tree new_temp;
4fc5ebf1
JG
5215 enum vect_def_type dt[1] = {vect_unknown_def_type};
5216 int ndts = 1;
ebfd146a 5217 int ncopies;
f18b55bd 5218 int i, j;
6e1aa848 5219 vec<tree> vec_oprnds = vNULL;
ebfd146a 5220 tree vop;
a70d6342 5221 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 5222 vec_info *vinfo = stmt_info->vinfo;
f18b55bd 5223 stmt_vec_info prev_stmt_info = NULL;
fde9c428
RG
5224 enum tree_code code;
5225 tree vectype_in;
ebfd146a 5226
a70d6342 5227 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
5228 return false;
5229
66c16fd9
RB
5230 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5231 && ! vec_stmt)
ebfd146a
IR
5232 return false;
5233
5234 /* Is vectorizable assignment? */
32e8e429
RS
5235 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5236 if (!stmt)
ebfd146a
IR
5237 return false;
5238
5239 scalar_dest = gimple_assign_lhs (stmt);
5240 if (TREE_CODE (scalar_dest) != SSA_NAME)
5241 return false;
5242
fde9c428 5243 code = gimple_assign_rhs_code (stmt);
ebfd146a 5244 if (gimple_assign_single_p (stmt)
fde9c428
RG
5245 || code == PAREN_EXPR
5246 || CONVERT_EXPR_CODE_P (code))
ebfd146a
IR
5247 op = gimple_assign_rhs1 (stmt);
5248 else
5249 return false;
5250
7b7ec6c5
RG
5251 if (code == VIEW_CONVERT_EXPR)
5252 op = TREE_OPERAND (op, 0);
5253
465c8c19 5254 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
928686b1 5255 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
465c8c19
JJ
5256
5257 /* Multiple types in SLP are handled by creating the appropriate number of
5258 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5259 case of SLP. */
fce57248 5260 if (slp_node)
465c8c19
JJ
5261 ncopies = 1;
5262 else
e8f142e2 5263 ncopies = vect_get_num_copies (loop_vinfo, vectype);
465c8c19
JJ
5264
5265 gcc_assert (ncopies >= 1);
5266
894dd753 5267 if (!vect_is_simple_use (op, vinfo, &dt[0], &vectype_in))
ebfd146a 5268 {
73fbfcad 5269 if (dump_enabled_p ())
78c60e3d 5270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5271 "use not simple.\n");
ebfd146a
IR
5272 return false;
5273 }
5274
fde9c428
RG
5275 /* We can handle NOP_EXPR conversions that do not change the number
5276 of elements or the vector size. */
7b7ec6c5
RG
5277 if ((CONVERT_EXPR_CODE_P (code)
5278 || code == VIEW_CONVERT_EXPR)
fde9c428 5279 && (!vectype_in
928686b1 5280 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
cf098191
RS
5281 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5282 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
fde9c428
RG
5283 return false;
5284
7b7b1813
RG
5285 /* We do not handle bit-precision changes. */
5286 if ((CONVERT_EXPR_CODE_P (code)
5287 || code == VIEW_CONVERT_EXPR)
5288 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2be65d9e
RS
5289 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5290 || !type_has_mode_precision_p (TREE_TYPE (op)))
7b7b1813
RG
5291 /* But a conversion that does not change the bit-pattern is ok. */
5292 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5293 > TYPE_PRECISION (TREE_TYPE (op)))
2dab46d5
IE
5294 && TYPE_UNSIGNED (TREE_TYPE (op)))
5295 /* Conversion between boolean types of different sizes is
5296 a simple assignment in case their vectypes are same
5297 boolean vectors. */
5298 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5299 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
7b7b1813 5300 {
73fbfcad 5301 if (dump_enabled_p ())
78c60e3d
SS
5302 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5303 "type conversion to/from bit-precision "
e645e942 5304 "unsupported.\n");
7b7b1813
RG
5305 return false;
5306 }
5307
ebfd146a
IR
5308 if (!vec_stmt) /* transformation not required. */
5309 {
5310 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
adac3a68 5311 DUMP_VECT_SCOPE ("vectorizable_assignment");
68435eb2 5312 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
ebfd146a
IR
5313 return true;
5314 }
5315
67b8dbac 5316 /* Transform. */
73fbfcad 5317 if (dump_enabled_p ())
e645e942 5318 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
ebfd146a
IR
5319
5320 /* Handle def. */
5321 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5322
5323 /* Handle use. */
f18b55bd 5324 for (j = 0; j < ncopies; j++)
ebfd146a 5325 {
f18b55bd
IR
5326 /* Handle uses. */
5327 if (j == 0)
86a91c0a 5328 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
f18b55bd 5329 else
e4057a39 5330 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
f18b55bd
IR
5331
5332 /* Arguments are ready. create the new vector stmt. */
e1bd7296 5333 stmt_vec_info new_stmt_info = NULL;
9771b263 5334 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
f18b55bd 5335 {
7b7ec6c5
RG
5336 if (CONVERT_EXPR_CODE_P (code)
5337 || code == VIEW_CONVERT_EXPR)
4a73490d 5338 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
e1bd7296 5339 gassign *new_stmt = gimple_build_assign (vec_dest, vop);
f18b55bd
IR
5340 new_temp = make_ssa_name (vec_dest, new_stmt);
5341 gimple_assign_set_lhs (new_stmt, new_temp);
86a91c0a
RS
5342 new_stmt_info
5343 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
f18b55bd 5344 if (slp_node)
e1bd7296 5345 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
f18b55bd 5346 }
ebfd146a
IR
5347
5348 if (slp_node)
f18b55bd
IR
5349 continue;
5350
5351 if (j == 0)
e1bd7296 5352 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
f18b55bd 5353 else
e1bd7296 5354 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
f18b55bd 5355
e1bd7296 5356 prev_stmt_info = new_stmt_info;
f18b55bd 5357 }
b8698a0f 5358
9771b263 5359 vec_oprnds.release ();
ebfd146a
IR
5360 return true;
5361}
5362
9dc3f7de 5363
1107f3ae
IR
5364/* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5365 either as shift by a scalar or by a vector. */
5366
5367bool
5368vect_supportable_shift (enum tree_code code, tree scalar_type)
5369{
5370
ef4bddc2 5371 machine_mode vec_mode;
1107f3ae
IR
5372 optab optab;
5373 int icode;
5374 tree vectype;
5375
5376 vectype = get_vectype_for_scalar_type (scalar_type);
5377 if (!vectype)
5378 return false;
5379
5380 optab = optab_for_tree_code (code, vectype, optab_scalar);
5381 if (!optab
5382 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5383 {
5384 optab = optab_for_tree_code (code, vectype, optab_vector);
5385 if (!optab
5386 || (optab_handler (optab, TYPE_MODE (vectype))
5387 == CODE_FOR_nothing))
5388 return false;
5389 }
5390
5391 vec_mode = TYPE_MODE (vectype);
5392 icode = (int) optab_handler (optab, vec_mode);
5393 if (icode == CODE_FOR_nothing)
5394 return false;
5395
5396 return true;
5397}
5398
5399
9dc3f7de
IR
5400/* Function vectorizable_shift.
5401
32e8e429
RS
5402 Check if STMT_INFO performs a shift operation that can be vectorized.
5403 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5404 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5405 Return true if STMT_INFO is vectorizable in this way. */
9dc3f7de
IR
5406
5407static bool
32e8e429 5408vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 5409 stmt_vec_info *vec_stmt, slp_tree slp_node,
68435eb2 5410 stmt_vector_for_cost *cost_vec)
9dc3f7de
IR
5411{
5412 tree vec_dest;
5413 tree scalar_dest;
5414 tree op0, op1 = NULL;
5415 tree vec_oprnd1 = NULL_TREE;
9dc3f7de
IR
5416 tree vectype;
5417 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5418 enum tree_code code;
ef4bddc2 5419 machine_mode vec_mode;
9dc3f7de
IR
5420 tree new_temp;
5421 optab optab;
5422 int icode;
ef4bddc2 5423 machine_mode optab_op2_mode;
9dc3f7de 5424 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4fc5ebf1 5425 int ndts = 2;
9dc3f7de 5426 stmt_vec_info prev_stmt_info;
928686b1
RS
5427 poly_uint64 nunits_in;
5428 poly_uint64 nunits_out;
9dc3f7de 5429 tree vectype_out;
cede2577 5430 tree op1_vectype;
9dc3f7de
IR
5431 int ncopies;
5432 int j, i;
6e1aa848
DN
5433 vec<tree> vec_oprnds0 = vNULL;
5434 vec<tree> vec_oprnds1 = vNULL;
9dc3f7de
IR
5435 tree vop0, vop1;
5436 unsigned int k;
49eab32e 5437 bool scalar_shift_arg = true;
9dc3f7de 5438 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 5439 vec_info *vinfo = stmt_info->vinfo;
9dc3f7de
IR
5440
5441 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5442 return false;
5443
66c16fd9
RB
5444 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5445 && ! vec_stmt)
9dc3f7de
IR
5446 return false;
5447
5448 /* Is STMT a vectorizable binary/unary operation? */
32e8e429
RS
5449 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5450 if (!stmt)
9dc3f7de
IR
5451 return false;
5452
5453 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5454 return false;
5455
5456 code = gimple_assign_rhs_code (stmt);
5457
5458 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5459 || code == RROTATE_EXPR))
5460 return false;
5461
5462 scalar_dest = gimple_assign_lhs (stmt);
5463 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2be65d9e 5464 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
7b7b1813 5465 {
73fbfcad 5466 if (dump_enabled_p ())
78c60e3d 5467 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5468 "bit-precision shifts not supported.\n");
7b7b1813
RG
5469 return false;
5470 }
9dc3f7de
IR
5471
5472 op0 = gimple_assign_rhs1 (stmt);
894dd753 5473 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
9dc3f7de 5474 {
73fbfcad 5475 if (dump_enabled_p ())
78c60e3d 5476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5477 "use not simple.\n");
9dc3f7de
IR
5478 return false;
5479 }
5480 /* If op0 is an external or constant def use a vector type with
5481 the same size as the output vector type. */
5482 if (!vectype)
5483 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5484 if (vec_stmt)
5485 gcc_assert (vectype);
5486 if (!vectype)
5487 {
73fbfcad 5488 if (dump_enabled_p ())
78c60e3d 5489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5490 "no vectype for scalar type\n");
9dc3f7de
IR
5491 return false;
5492 }
5493
5494 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5495 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
928686b1 5496 if (maybe_ne (nunits_out, nunits_in))
9dc3f7de
IR
5497 return false;
5498
5499 op1 = gimple_assign_rhs2 (stmt);
fef96d8e
RS
5500 stmt_vec_info op1_def_stmt_info;
5501 if (!vect_is_simple_use (op1, vinfo, &dt[1], &op1_vectype,
5502 &op1_def_stmt_info))
9dc3f7de 5503 {
73fbfcad 5504 if (dump_enabled_p ())
78c60e3d 5505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5506 "use not simple.\n");
9dc3f7de
IR
5507 return false;
5508 }
5509
9dc3f7de
IR
5510 /* Multiple types in SLP are handled by creating the appropriate number of
5511 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5512 case of SLP. */
fce57248 5513 if (slp_node)
9dc3f7de
IR
5514 ncopies = 1;
5515 else
e8f142e2 5516 ncopies = vect_get_num_copies (loop_vinfo, vectype);
9dc3f7de
IR
5517
5518 gcc_assert (ncopies >= 1);
5519
5520 /* Determine whether the shift amount is a vector, or scalar. If the
5521 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5522
dbfa87aa
YR
5523 if ((dt[1] == vect_internal_def
5524 || dt[1] == vect_induction_def)
5525 && !slp_node)
49eab32e
JJ
5526 scalar_shift_arg = false;
5527 else if (dt[1] == vect_constant_def
5528 || dt[1] == vect_external_def
5529 || dt[1] == vect_internal_def)
5530 {
5531 /* In SLP, need to check whether the shift count is the same,
5532 in loops if it is a constant or invariant, it is always
5533 a scalar shift. */
5534 if (slp_node)
5535 {
b9787581
RS
5536 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5537 stmt_vec_info slpstmt_info;
49eab32e 5538
b9787581
RS
5539 FOR_EACH_VEC_ELT (stmts, k, slpstmt_info)
5540 {
5541 gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt);
5542 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5543 scalar_shift_arg = false;
5544 }
49eab32e 5545 }
60d393e8
RB
5546
5547 /* If the shift amount is computed by a pattern stmt we cannot
5548 use the scalar amount directly thus give up and use a vector
5549 shift. */
fef96d8e
RS
5550 if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info))
5551 scalar_shift_arg = false;
49eab32e
JJ
5552 }
5553 else
5554 {
73fbfcad 5555 if (dump_enabled_p ())
78c60e3d 5556 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5557 "operand mode requires invariant argument.\n");
49eab32e
JJ
5558 return false;
5559 }
5560
9dc3f7de 5561 /* Vector shifted by vector. */
49eab32e 5562 if (!scalar_shift_arg)
9dc3f7de
IR
5563 {
5564 optab = optab_for_tree_code (code, vectype, optab_vector);
73fbfcad 5565 if (dump_enabled_p ())
78c60e3d 5566 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5567 "vector/vector shift/rotate found.\n");
78c60e3d 5568
aa948027
JJ
5569 if (!op1_vectype)
5570 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5571 if (op1_vectype == NULL_TREE
5572 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
cede2577 5573 {
73fbfcad 5574 if (dump_enabled_p ())
78c60e3d
SS
5575 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5576 "unusable type for last operand in"
e645e942 5577 " vector/vector shift/rotate.\n");
cede2577
JJ
5578 return false;
5579 }
9dc3f7de
IR
5580 }
5581 /* See if the machine has a vector shifted by scalar insn and if not
5582 then see if it has a vector shifted by vector insn. */
49eab32e 5583 else
9dc3f7de
IR
5584 {
5585 optab = optab_for_tree_code (code, vectype, optab_scalar);
5586 if (optab
5587 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5588 {
73fbfcad 5589 if (dump_enabled_p ())
78c60e3d 5590 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5591 "vector/scalar shift/rotate found.\n");
9dc3f7de
IR
5592 }
5593 else
5594 {
5595 optab = optab_for_tree_code (code, vectype, optab_vector);
5596 if (optab
5597 && (optab_handler (optab, TYPE_MODE (vectype))
5598 != CODE_FOR_nothing))
5599 {
49eab32e
JJ
5600 scalar_shift_arg = false;
5601
73fbfcad 5602 if (dump_enabled_p ())
78c60e3d 5603 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5604 "vector/vector shift/rotate found.\n");
9dc3f7de
IR
5605
5606 /* Unlike the other binary operators, shifts/rotates have
5607 the rhs being int, instead of the same type as the lhs,
5608 so make sure the scalar is the right type if we are
aa948027 5609 dealing with vectors of long long/long/short/char. */
9dc3f7de
IR
5610 if (dt[1] == vect_constant_def)
5611 op1 = fold_convert (TREE_TYPE (vectype), op1);
aa948027
JJ
5612 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5613 TREE_TYPE (op1)))
5614 {
5615 if (slp_node
5616 && TYPE_MODE (TREE_TYPE (vectype))
5617 != TYPE_MODE (TREE_TYPE (op1)))
5618 {
73fbfcad 5619 if (dump_enabled_p ())
78c60e3d
SS
5620 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5621 "unusable type for last operand in"
e645e942 5622 " vector/vector shift/rotate.\n");
21c0a521 5623 return false;
aa948027
JJ
5624 }
5625 if (vec_stmt && !slp_node)
5626 {
5627 op1 = fold_convert (TREE_TYPE (vectype), op1);
86a91c0a 5628 op1 = vect_init_vector (stmt_info, op1,
aa948027
JJ
5629 TREE_TYPE (vectype), NULL);
5630 }
5631 }
9dc3f7de
IR
5632 }
5633 }
5634 }
9dc3f7de
IR
5635
5636 /* Supportable by target? */
5637 if (!optab)
5638 {
73fbfcad 5639 if (dump_enabled_p ())
78c60e3d 5640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5641 "no optab.\n");
9dc3f7de
IR
5642 return false;
5643 }
5644 vec_mode = TYPE_MODE (vectype);
5645 icode = (int) optab_handler (optab, vec_mode);
5646 if (icode == CODE_FOR_nothing)
5647 {
73fbfcad 5648 if (dump_enabled_p ())
78c60e3d 5649 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5650 "op not supported by target.\n");
9dc3f7de 5651 /* Check only during analysis. */
cf098191 5652 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
ca09abcb
RS
5653 || (!vec_stmt
5654 && !vect_worthwhile_without_simd_p (vinfo, code)))
9dc3f7de 5655 return false;
73fbfcad 5656 if (dump_enabled_p ())
e645e942
TJ
5657 dump_printf_loc (MSG_NOTE, vect_location,
5658 "proceeding using word mode.\n");
9dc3f7de
IR
5659 }
5660
5661 /* Worthwhile without SIMD support? Check only during analysis. */
ca09abcb
RS
5662 if (!vec_stmt
5663 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5664 && !vect_worthwhile_without_simd_p (vinfo, code))
9dc3f7de 5665 {
73fbfcad 5666 if (dump_enabled_p ())
78c60e3d 5667 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5668 "not worthwhile without SIMD support.\n");
9dc3f7de
IR
5669 return false;
5670 }
5671
5672 if (!vec_stmt) /* transformation not required. */
5673 {
5674 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
adac3a68 5675 DUMP_VECT_SCOPE ("vectorizable_shift");
68435eb2 5676 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
9dc3f7de
IR
5677 return true;
5678 }
5679
67b8dbac 5680 /* Transform. */
9dc3f7de 5681
73fbfcad 5682 if (dump_enabled_p ())
78c60e3d 5683 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5684 "transform binary/unary operation.\n");
9dc3f7de
IR
5685
5686 /* Handle def. */
5687 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5688
9dc3f7de
IR
5689 prev_stmt_info = NULL;
5690 for (j = 0; j < ncopies; j++)
5691 {
5692 /* Handle uses. */
5693 if (j == 0)
5694 {
5695 if (scalar_shift_arg)
5696 {
5697 /* Vector shl and shr insn patterns can be defined with scalar
5698 operand 2 (shift operand). In this case, use constant or loop
5699 invariant op1 directly, without extending it to vector mode
5700 first. */
5701 optab_op2_mode = insn_data[icode].operand[2].mode;
5702 if (!VECTOR_MODE_P (optab_op2_mode))
5703 {
73fbfcad 5704 if (dump_enabled_p ())
78c60e3d 5705 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5706 "operand 1 using scalar mode.\n");
9dc3f7de 5707 vec_oprnd1 = op1;
8930f723 5708 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
9771b263 5709 vec_oprnds1.quick_push (vec_oprnd1);
9dc3f7de
IR
5710 if (slp_node)
5711 {
5712 /* Store vec_oprnd1 for every vector stmt to be created
5713 for SLP_NODE. We check during the analysis that all
5714 the shift arguments are the same.
5715 TODO: Allow different constants for different vector
5716 stmts generated for an SLP instance. */
5717 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
9771b263 5718 vec_oprnds1.quick_push (vec_oprnd1);
9dc3f7de
IR
5719 }
5720 }
5721 }
5722
5723 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5724 (a special case for certain kind of vector shifts); otherwise,
5725 operand 1 should be of a vector type (the usual case). */
5726 if (vec_oprnd1)
86a91c0a
RS
5727 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5728 slp_node);
9dc3f7de 5729 else
86a91c0a
RS
5730 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
5731 slp_node);
9dc3f7de
IR
5732 }
5733 else
e4057a39 5734 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
9dc3f7de
IR
5735
5736 /* Arguments are ready. Create the new vector stmt. */
e1bd7296 5737 stmt_vec_info new_stmt_info = NULL;
9771b263 5738 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
9dc3f7de 5739 {
9771b263 5740 vop1 = vec_oprnds1[i];
e1bd7296 5741 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
9dc3f7de
IR
5742 new_temp = make_ssa_name (vec_dest, new_stmt);
5743 gimple_assign_set_lhs (new_stmt, new_temp);
86a91c0a
RS
5744 new_stmt_info
5745 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9dc3f7de 5746 if (slp_node)
e1bd7296 5747 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9dc3f7de
IR
5748 }
5749
5750 if (slp_node)
5751 continue;
5752
5753 if (j == 0)
e1bd7296 5754 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9dc3f7de 5755 else
e1bd7296
RS
5756 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5757 prev_stmt_info = new_stmt_info;
9dc3f7de
IR
5758 }
5759
9771b263
DN
5760 vec_oprnds0.release ();
5761 vec_oprnds1.release ();
9dc3f7de
IR
5762
5763 return true;
5764}
5765
5766
ebfd146a
IR
5767/* Function vectorizable_operation.
5768
32e8e429 5769 Check if STMT_INFO performs a binary, unary or ternary operation that can
16949072 5770 be vectorized.
32e8e429
RS
5771 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5772 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5773 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a
IR
5774
5775static bool
32e8e429 5776vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 5777 stmt_vec_info *vec_stmt, slp_tree slp_node,
68435eb2 5778 stmt_vector_for_cost *cost_vec)
ebfd146a 5779{
00f07b86 5780 tree vec_dest;
ebfd146a 5781 tree scalar_dest;
16949072 5782 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
00f07b86 5783 tree vectype;
ebfd146a 5784 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
0eb952ea 5785 enum tree_code code, orig_code;
ef4bddc2 5786 machine_mode vec_mode;
ebfd146a
IR
5787 tree new_temp;
5788 int op_type;
00f07b86 5789 optab optab;
523ba738 5790 bool target_support_p;
16949072
RG
5791 enum vect_def_type dt[3]
5792 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4fc5ebf1 5793 int ndts = 3;
ebfd146a 5794 stmt_vec_info prev_stmt_info;
928686b1
RS
5795 poly_uint64 nunits_in;
5796 poly_uint64 nunits_out;
ebfd146a
IR
5797 tree vectype_out;
5798 int ncopies;
5799 int j, i;
6e1aa848
DN
5800 vec<tree> vec_oprnds0 = vNULL;
5801 vec<tree> vec_oprnds1 = vNULL;
5802 vec<tree> vec_oprnds2 = vNULL;
16949072 5803 tree vop0, vop1, vop2;
a70d6342 5804 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 5805 vec_info *vinfo = stmt_info->vinfo;
a70d6342 5806
a70d6342 5807 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
5808 return false;
5809
66c16fd9
RB
5810 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5811 && ! vec_stmt)
ebfd146a
IR
5812 return false;
5813
5814 /* Is STMT a vectorizable binary/unary operation? */
32e8e429
RS
5815 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5816 if (!stmt)
ebfd146a
IR
5817 return false;
5818
5819 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5820 return false;
5821
0eb952ea 5822 orig_code = code = gimple_assign_rhs_code (stmt);
ebfd146a 5823
1af4ebf5
MG
5824 /* For pointer addition and subtraction, we should use the normal
5825 plus and minus for the vector operation. */
ebfd146a
IR
5826 if (code == POINTER_PLUS_EXPR)
5827 code = PLUS_EXPR;
1af4ebf5
MG
5828 if (code == POINTER_DIFF_EXPR)
5829 code = MINUS_EXPR;
ebfd146a
IR
5830
5831 /* Support only unary or binary operations. */
5832 op_type = TREE_CODE_LENGTH (code);
16949072 5833 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
ebfd146a 5834 {
73fbfcad 5835 if (dump_enabled_p ())
78c60e3d 5836 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5837 "num. args = %d (not unary/binary/ternary op).\n",
78c60e3d 5838 op_type);
ebfd146a
IR
5839 return false;
5840 }
5841
b690cc0f
RG
5842 scalar_dest = gimple_assign_lhs (stmt);
5843 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5844
7b7b1813
RG
5845 /* Most operations cannot handle bit-precision types without extra
5846 truncations. */
045c1278 5847 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
2be65d9e 5848 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
7b7b1813
RG
5849 /* Exception are bitwise binary operations. */
5850 && code != BIT_IOR_EXPR
5851 && code != BIT_XOR_EXPR
5852 && code != BIT_AND_EXPR)
5853 {
73fbfcad 5854 if (dump_enabled_p ())
78c60e3d 5855 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5856 "bit-precision arithmetic not supported.\n");
7b7b1813
RG
5857 return false;
5858 }
5859
ebfd146a 5860 op0 = gimple_assign_rhs1 (stmt);
894dd753 5861 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
ebfd146a 5862 {
73fbfcad 5863 if (dump_enabled_p ())
78c60e3d 5864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5865 "use not simple.\n");
ebfd146a
IR
5866 return false;
5867 }
b690cc0f
RG
5868 /* If op0 is an external or constant def use a vector type with
5869 the same size as the output vector type. */
5870 if (!vectype)
b036c6c5
IE
5871 {
5872 /* For boolean type we cannot determine vectype by
5873 invariant value (don't know whether it is a vector
5874 of booleans or vector of integers). We use output
5875 vectype because operations on boolean don't change
5876 type. */
2568d8a1 5877 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
b036c6c5 5878 {
2568d8a1 5879 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
b036c6c5
IE
5880 {
5881 if (dump_enabled_p ())
5882 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5883 "not supported operation on bool value.\n");
5884 return false;
5885 }
5886 vectype = vectype_out;
5887 }
5888 else
5889 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5890 }
7d8930a0
IR
5891 if (vec_stmt)
5892 gcc_assert (vectype);
5893 if (!vectype)
5894 {
73fbfcad 5895 if (dump_enabled_p ())
7d8930a0 5896 {
78c60e3d
SS
5897 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5898 "no vectype for scalar type ");
5899 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5900 TREE_TYPE (op0));
e645e942 5901 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7d8930a0
IR
5902 }
5903
5904 return false;
5905 }
b690cc0f
RG
5906
5907 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5908 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
928686b1 5909 if (maybe_ne (nunits_out, nunits_in))
b690cc0f 5910 return false;
ebfd146a 5911
16949072 5912 if (op_type == binary_op || op_type == ternary_op)
ebfd146a
IR
5913 {
5914 op1 = gimple_assign_rhs2 (stmt);
894dd753 5915 if (!vect_is_simple_use (op1, vinfo, &dt[1]))
ebfd146a 5916 {
73fbfcad 5917 if (dump_enabled_p ())
78c60e3d 5918 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5919 "use not simple.\n");
ebfd146a
IR
5920 return false;
5921 }
5922 }
16949072
RG
5923 if (op_type == ternary_op)
5924 {
5925 op2 = gimple_assign_rhs3 (stmt);
894dd753 5926 if (!vect_is_simple_use (op2, vinfo, &dt[2]))
16949072 5927 {
73fbfcad 5928 if (dump_enabled_p ())
78c60e3d 5929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5930 "use not simple.\n");
16949072
RG
5931 return false;
5932 }
5933 }
ebfd146a 5934
b690cc0f 5935 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 5936 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
b690cc0f 5937 case of SLP. */
fce57248 5938 if (slp_node)
b690cc0f
RG
5939 ncopies = 1;
5940 else
e8f142e2 5941 ncopies = vect_get_num_copies (loop_vinfo, vectype);
b690cc0f
RG
5942
5943 gcc_assert (ncopies >= 1);
5944
9dc3f7de 5945 /* Shifts are handled in vectorizable_shift (). */
ebfd146a
IR
5946 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5947 || code == RROTATE_EXPR)
9dc3f7de 5948 return false;
ebfd146a 5949
ebfd146a 5950 /* Supportable by target? */
00f07b86
RH
5951
5952 vec_mode = TYPE_MODE (vectype);
5953 if (code == MULT_HIGHPART_EXPR)
523ba738 5954 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
00f07b86
RH
5955 else
5956 {
5957 optab = optab_for_tree_code (code, vectype, optab_default);
5958 if (!optab)
5deb57cb 5959 {
73fbfcad 5960 if (dump_enabled_p ())
78c60e3d 5961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5962 "no optab.\n");
00f07b86 5963 return false;
5deb57cb 5964 }
523ba738
RS
5965 target_support_p = (optab_handler (optab, vec_mode)
5966 != CODE_FOR_nothing);
5deb57cb
JJ
5967 }
5968
523ba738 5969 if (!target_support_p)
ebfd146a 5970 {
73fbfcad 5971 if (dump_enabled_p ())
78c60e3d 5972 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5973 "op not supported by target.\n");
ebfd146a 5974 /* Check only during analysis. */
cf098191 5975 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
ca09abcb 5976 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
ebfd146a 5977 return false;
73fbfcad 5978 if (dump_enabled_p ())
e645e942
TJ
5979 dump_printf_loc (MSG_NOTE, vect_location,
5980 "proceeding using word mode.\n");
383d9c83
IR
5981 }
5982
4a00c761 5983 /* Worthwhile without SIMD support? Check only during analysis. */
5deb57cb
JJ
5984 if (!VECTOR_MODE_P (vec_mode)
5985 && !vec_stmt
ca09abcb 5986 && !vect_worthwhile_without_simd_p (vinfo, code))
7d8930a0 5987 {
73fbfcad 5988 if (dump_enabled_p ())
78c60e3d 5989 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5990 "not worthwhile without SIMD support.\n");
e34842c6 5991 return false;
7d8930a0 5992 }
ebfd146a 5993
ebfd146a
IR
5994 if (!vec_stmt) /* transformation not required. */
5995 {
4a00c761 5996 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
adac3a68 5997 DUMP_VECT_SCOPE ("vectorizable_operation");
68435eb2 5998 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
ebfd146a
IR
5999 return true;
6000 }
6001
67b8dbac 6002 /* Transform. */
ebfd146a 6003
73fbfcad 6004 if (dump_enabled_p ())
78c60e3d 6005 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 6006 "transform binary/unary operation.\n");
383d9c83 6007
0eb952ea
JJ
6008 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
6009 vectors with unsigned elements, but the result is signed. So, we
6010 need to compute the MINUS_EXPR into vectype temporary and
6011 VIEW_CONVERT_EXPR it into the final vectype_out result. */
6012 tree vec_cvt_dest = NULL_TREE;
6013 if (orig_code == POINTER_DIFF_EXPR)
7b76867b
RB
6014 {
6015 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6016 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
6017 }
6018 /* Handle def. */
6019 else
6020 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
0eb952ea 6021
ebfd146a
IR
6022 /* In case the vectorization factor (VF) is bigger than the number
6023 of elements that we can fit in a vectype (nunits), we have to generate
6024 more than one vector stmt - i.e - we need to "unroll" the
4a00c761
JJ
6025 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6026 from one copy of the vector stmt to the next, in the field
6027 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6028 stages to find the correct vector defs to be used when vectorizing
6029 stmts that use the defs of the current stmt. The example below
6030 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
6031 we need to create 4 vectorized stmts):
6032
6033 before vectorization:
6034 RELATED_STMT VEC_STMT
6035 S1: x = memref - -
6036 S2: z = x + 1 - -
6037
6038 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
6039 there):
6040 RELATED_STMT VEC_STMT
6041 VS1_0: vx0 = memref0 VS1_1 -
6042 VS1_1: vx1 = memref1 VS1_2 -
6043 VS1_2: vx2 = memref2 VS1_3 -
6044 VS1_3: vx3 = memref3 - -
6045 S1: x = load - VS1_0
6046 S2: z = x + 1 - -
6047
6048 step2: vectorize stmt S2 (done here):
6049 To vectorize stmt S2 we first need to find the relevant vector
6050 def for the first operand 'x'. This is, as usual, obtained from
6051 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6052 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6053 relevant vector def 'vx0'. Having found 'vx0' we can generate
6054 the vector stmt VS2_0, and as usual, record it in the
6055 STMT_VINFO_VEC_STMT of stmt S2.
6056 When creating the second copy (VS2_1), we obtain the relevant vector
6057 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6058 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6059 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6060 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6061 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6062 chain of stmts and pointers:
6063 RELATED_STMT VEC_STMT
6064 VS1_0: vx0 = memref0 VS1_1 -
6065 VS1_1: vx1 = memref1 VS1_2 -
6066 VS1_2: vx2 = memref2 VS1_3 -
6067 VS1_3: vx3 = memref3 - -
6068 S1: x = load - VS1_0
6069 VS2_0: vz0 = vx0 + v1 VS2_1 -
6070 VS2_1: vz1 = vx1 + v1 VS2_2 -
6071 VS2_2: vz2 = vx2 + v1 VS2_3 -
6072 VS2_3: vz3 = vx3 + v1 - -
6073 S2: z = x + 1 - VS2_0 */
ebfd146a
IR
6074
6075 prev_stmt_info = NULL;
6076 for (j = 0; j < ncopies; j++)
6077 {
6078 /* Handle uses. */
6079 if (j == 0)
4a00c761 6080 {
d6476f90 6081 if (op_type == binary_op)
86a91c0a 6082 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
306b0c92 6083 slp_node);
d6476f90
RB
6084 else if (op_type == ternary_op)
6085 {
6086 if (slp_node)
6087 {
6088 auto_vec<tree> ops(3);
6089 ops.quick_push (op0);
6090 ops.quick_push (op1);
6091 ops.quick_push (op2);
6092 auto_vec<vec<tree> > vec_defs(3);
6093 vect_get_slp_defs (ops, slp_node, &vec_defs);
6094 vec_oprnds0 = vec_defs[0];
6095 vec_oprnds1 = vec_defs[1];
6096 vec_oprnds2 = vec_defs[2];
6097 }
6098 else
6099 {
86a91c0a
RS
6100 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
6101 &vec_oprnds1, NULL);
6102 vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2,
6103 NULL, NULL);
d6476f90
RB
6104 }
6105 }
4a00c761 6106 else
86a91c0a 6107 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
306b0c92 6108 slp_node);
4a00c761 6109 }
ebfd146a 6110 else
4a00c761 6111 {
e4057a39 6112 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
4a00c761
JJ
6113 if (op_type == ternary_op)
6114 {
9771b263 6115 tree vec_oprnd = vec_oprnds2.pop ();
e4057a39 6116 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (vinfo,
9771b263 6117 vec_oprnd));
4a00c761
JJ
6118 }
6119 }
6120
6121 /* Arguments are ready. Create the new vector stmt. */
e1bd7296 6122 stmt_vec_info new_stmt_info = NULL;
9771b263 6123 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
ebfd146a 6124 {
4a00c761 6125 vop1 = ((op_type == binary_op || op_type == ternary_op)
9771b263 6126 ? vec_oprnds1[i] : NULL_TREE);
4a00c761 6127 vop2 = ((op_type == ternary_op)
9771b263 6128 ? vec_oprnds2[i] : NULL_TREE);
e1bd7296
RS
6129 gassign *new_stmt = gimple_build_assign (vec_dest, code,
6130 vop0, vop1, vop2);
4a00c761
JJ
6131 new_temp = make_ssa_name (vec_dest, new_stmt);
6132 gimple_assign_set_lhs (new_stmt, new_temp);
86a91c0a
RS
6133 new_stmt_info
6134 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
0eb952ea
JJ
6135 if (vec_cvt_dest)
6136 {
6137 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
e1bd7296
RS
6138 gassign *new_stmt
6139 = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
6140 new_temp);
0eb952ea
JJ
6141 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
6142 gimple_assign_set_lhs (new_stmt, new_temp);
e1bd7296 6143 new_stmt_info
86a91c0a 6144 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
0eb952ea 6145 }
4a00c761 6146 if (slp_node)
e1bd7296 6147 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
ebfd146a
IR
6148 }
6149
4a00c761
JJ
6150 if (slp_node)
6151 continue;
6152
6153 if (j == 0)
e1bd7296 6154 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4a00c761 6155 else
e1bd7296
RS
6156 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6157 prev_stmt_info = new_stmt_info;
ebfd146a
IR
6158 }
6159
9771b263
DN
6160 vec_oprnds0.release ();
6161 vec_oprnds1.release ();
6162 vec_oprnds2.release ();
ebfd146a 6163
ebfd146a
IR
6164 return true;
6165}
6166
89fa689a 6167/* A helper function to ensure data reference DR_INFO's base alignment. */
c716e67f
XDL
6168
6169static void
89fa689a 6170ensure_base_align (dr_vec_info *dr_info)
c716e67f 6171{
89fa689a 6172 if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
c716e67f
XDL
6173 return;
6174
89fa689a 6175 if (dr_info->base_misaligned)
c716e67f 6176 {
89fa689a 6177 tree base_decl = dr_info->base_decl;
c716e67f 6178
89fa689a
RS
6179 unsigned int align_base_to
6180 = DR_TARGET_ALIGNMENT (dr_info) * BITS_PER_UNIT;
f702e7d4 6181
428f0c67 6182 if (decl_in_symtab_p (base_decl))
f702e7d4 6183 symtab_node::get (base_decl)->increase_alignment (align_base_to);
428f0c67
JH
6184 else
6185 {
f702e7d4 6186 SET_DECL_ALIGN (base_decl, align_base_to);
428f0c67
JH
6187 DECL_USER_ALIGN (base_decl) = 1;
6188 }
89fa689a 6189 dr_info->base_misaligned = false;
c716e67f
XDL
6190 }
6191}
6192
ebfd146a 6193
44fc7854
BE
6194/* Function get_group_alias_ptr_type.
6195
32e8e429 6196 Return the alias type for the group starting at FIRST_STMT_INFO. */
44fc7854
BE
6197
6198static tree
32e8e429 6199get_group_alias_ptr_type (stmt_vec_info first_stmt_info)
44fc7854
BE
6200{
6201 struct data_reference *first_dr, *next_dr;
44fc7854 6202
91987857
RS
6203 first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
6204 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info);
bffb8014 6205 while (next_stmt_info)
44fc7854 6206 {
bffb8014 6207 next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
44fc7854
BE
6208 if (get_alias_set (DR_REF (first_dr))
6209 != get_alias_set (DR_REF (next_dr)))
6210 {
6211 if (dump_enabled_p ())
6212 dump_printf_loc (MSG_NOTE, vect_location,
6213 "conflicting alias set types.\n");
6214 return ptr_type_node;
6215 }
bffb8014 6216 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
44fc7854
BE
6217 }
6218 return reference_alias_ptr_type (DR_REF (first_dr));
6219}
6220
6221
ebfd146a
IR
6222/* Function vectorizable_store.
6223
32e8e429
RS
6224 Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
6225 that can be vectorized.
6226 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6227 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6228 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a
IR
6229
6230static bool
32e8e429 6231vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195
RS
6232 stmt_vec_info *vec_stmt, slp_tree slp_node,
6233 stmt_vector_for_cost *cost_vec)
ebfd146a 6234{
ebfd146a
IR
6235 tree data_ref;
6236 tree op;
6237 tree vec_oprnd = NULL_TREE;
272c6793 6238 tree elem_type;
ebfd146a 6239 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 6240 struct loop *loop = NULL;
ef4bddc2 6241 machine_mode vec_mode;
ebfd146a
IR
6242 tree dummy;
6243 enum dr_alignment_support alignment_support_scheme;
929b4411
RS
6244 enum vect_def_type rhs_dt = vect_unknown_def_type;
6245 enum vect_def_type mask_dt = vect_unknown_def_type;
ebfd146a
IR
6246 stmt_vec_info prev_stmt_info = NULL;
6247 tree dataref_ptr = NULL_TREE;
74bf76ed 6248 tree dataref_offset = NULL_TREE;
355fe088 6249 gimple *ptr_incr = NULL;
ebfd146a
IR
6250 int ncopies;
6251 int j;
bffb8014 6252 stmt_vec_info first_stmt_info;
2de001ee 6253 bool grouped_store;
ebfd146a 6254 unsigned int group_size, i;
6e1aa848
DN
6255 vec<tree> oprnds = vNULL;
6256 vec<tree> result_chain = vNULL;
09dfa495 6257 tree offset = NULL_TREE;
6e1aa848 6258 vec<tree> vec_oprnds = vNULL;
ebfd146a 6259 bool slp = (slp_node != NULL);
ebfd146a 6260 unsigned int vec_num;
a70d6342 6261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 6262 vec_info *vinfo = stmt_info->vinfo;
272c6793 6263 tree aggr_type;
134c85ca 6264 gather_scatter_info gs_info;
d9f21f6a 6265 poly_uint64 vf;
2de001ee 6266 vec_load_store_type vls_type;
44fc7854 6267 tree ref_type;
a70d6342 6268
a70d6342 6269 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
6270 return false;
6271
66c16fd9
RB
6272 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6273 && ! vec_stmt)
ebfd146a
IR
6274 return false;
6275
6276 /* Is vectorizable store? */
6277
c3a8f964 6278 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
86a91c0a 6279 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
c3a8f964 6280 {
beb456c3 6281 tree scalar_dest = gimple_assign_lhs (assign);
c3a8f964
RS
6282 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6283 && is_pattern_stmt_p (stmt_info))
6284 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6285 if (TREE_CODE (scalar_dest) != ARRAY_REF
6286 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6287 && TREE_CODE (scalar_dest) != INDIRECT_REF
6288 && TREE_CODE (scalar_dest) != COMPONENT_REF
6289 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6290 && TREE_CODE (scalar_dest) != REALPART_EXPR
6291 && TREE_CODE (scalar_dest) != MEM_REF)
6292 return false;
6293 }
6294 else
6295 {
86a91c0a 6296 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
f307441a
RS
6297 if (!call || !gimple_call_internal_p (call))
6298 return false;
6299
6300 internal_fn ifn = gimple_call_internal_fn (call);
6301 if (!internal_store_fn_p (ifn))
c3a8f964 6302 return false;
ebfd146a 6303
c3a8f964
RS
6304 if (slp_node != NULL)
6305 {
6306 if (dump_enabled_p ())
6307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6308 "SLP of masked stores not supported.\n");
6309 return false;
6310 }
6311
f307441a
RS
6312 int mask_index = internal_fn_mask_index (ifn);
6313 if (mask_index >= 0)
6314 {
6315 mask = gimple_call_arg (call, mask_index);
86a91c0a 6316 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
929b4411 6317 &mask_vectype))
f307441a
RS
6318 return false;
6319 }
c3a8f964
RS
6320 }
6321
86a91c0a 6322 op = vect_get_store_rhs (stmt_info);
ebfd146a 6323
fce57248
RS
6324 /* Cannot have hybrid store SLP -- that would mean storing to the
6325 same location twice. */
6326 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6327
f4d09712 6328 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
4d694b27 6329 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
465c8c19
JJ
6330
6331 if (loop_vinfo)
b17dc4d4
RB
6332 {
6333 loop = LOOP_VINFO_LOOP (loop_vinfo);
6334 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6335 }
6336 else
6337 vf = 1;
465c8c19
JJ
6338
6339 /* Multiple types in SLP are handled by creating the appropriate number of
6340 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6341 case of SLP. */
fce57248 6342 if (slp)
465c8c19
JJ
6343 ncopies = 1;
6344 else
e8f142e2 6345 ncopies = vect_get_num_copies (loop_vinfo, vectype);
465c8c19
JJ
6346
6347 gcc_assert (ncopies >= 1);
6348
6349 /* FORNOW. This restriction should be relaxed. */
86a91c0a 6350 if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1)
465c8c19
JJ
6351 {
6352 if (dump_enabled_p ())
6353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6354 "multiple types in nested loop.\n");
6355 return false;
6356 }
6357
86a91c0a 6358 if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type))
f4d09712
KY
6359 return false;
6360
272c6793 6361 elem_type = TREE_TYPE (vectype);
ebfd146a 6362 vec_mode = TYPE_MODE (vectype);
7b7b1813 6363
ebfd146a
IR
6364 if (!STMT_VINFO_DATA_REF (stmt_info))
6365 return false;
6366
2de001ee 6367 vect_memory_access_type memory_access_type;
86a91c0a 6368 if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies,
2de001ee
RS
6369 &memory_access_type, &gs_info))
6370 return false;
3bab6342 6371
c3a8f964
RS
6372 if (mask)
6373 {
7e11fc7f
RS
6374 if (memory_access_type == VMAT_CONTIGUOUS)
6375 {
6376 if (!VECTOR_MODE_P (vec_mode)
6377 || !can_vec_mask_load_store_p (vec_mode,
6378 TYPE_MODE (mask_vectype), false))
6379 return false;
6380 }
f307441a
RS
6381 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6382 && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
c3a8f964
RS
6383 {
6384 if (dump_enabled_p ())
6385 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6386 "unsupported access type for masked store.\n");
6387 return false;
6388 }
c3a8f964
RS
6389 }
6390 else
6391 {
6392 /* FORNOW. In some cases can vectorize even if data-type not supported
6393 (e.g. - array initialization with 0). */
6394 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6395 return false;
6396 }
6397
89fa689a 6398 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
f307441a 6399 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
b5ec4de7
RS
6400 && memory_access_type != VMAT_GATHER_SCATTER
6401 && (slp || memory_access_type != VMAT_CONTIGUOUS));
7cfb4d93
RS
6402 if (grouped_store)
6403 {
bffb8014 6404 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
89fa689a 6405 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
bffb8014 6406 group_size = DR_GROUP_SIZE (first_stmt_info);
7cfb4d93
RS
6407 }
6408 else
6409 {
bffb8014 6410 first_stmt_info = stmt_info;
89fa689a 6411 first_dr_info = dr_info;
7cfb4d93
RS
6412 group_size = vec_num = 1;
6413 }
6414
ebfd146a
IR
6415 if (!vec_stmt) /* transformation not required. */
6416 {
2de001ee 6417 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7cfb4d93
RS
6418
6419 if (loop_vinfo
6420 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6421 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
bfaa08b7 6422 memory_access_type, &gs_info);
7cfb4d93 6423
ebfd146a 6424 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
68435eb2
RB
6425 vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type,
6426 vls_type, slp_node, cost_vec);
ebfd146a
IR
6427 return true;
6428 }
2de001ee 6429 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
ebfd146a 6430
67b8dbac 6431 /* Transform. */
ebfd146a 6432
89fa689a 6433 ensure_base_align (dr_info);
c716e67f 6434
f307441a 6435 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
3bab6342 6436 {
c3a8f964 6437 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
134c85ca 6438 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
3bab6342
AT
6439 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6440 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
6441 edge pe = loop_preheader_edge (loop);
6442 gimple_seq seq;
6443 basic_block new_bb;
6444 enum { NARROW, NONE, WIDEN } modifier;
4d694b27
RS
6445 poly_uint64 scatter_off_nunits
6446 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
3bab6342 6447
4d694b27 6448 if (known_eq (nunits, scatter_off_nunits))
3bab6342 6449 modifier = NONE;
4d694b27 6450 else if (known_eq (nunits * 2, scatter_off_nunits))
3bab6342 6451 {
3bab6342
AT
6452 modifier = WIDEN;
6453
4d694b27
RS
6454 /* Currently gathers and scatters are only supported for
6455 fixed-length vectors. */
6456 unsigned int count = scatter_off_nunits.to_constant ();
6457 vec_perm_builder sel (count, count, 1);
6458 for (i = 0; i < (unsigned int) count; ++i)
6459 sel.quick_push (i | (count / 2));
3bab6342 6460
4d694b27 6461 vec_perm_indices indices (sel, 1, count);
e3342de4
RS
6462 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6463 indices);
3bab6342
AT
6464 gcc_assert (perm_mask != NULL_TREE);
6465 }
4d694b27 6466 else if (known_eq (nunits, scatter_off_nunits * 2))
3bab6342 6467 {
3bab6342
AT
6468 modifier = NARROW;
6469
4d694b27
RS
6470 /* Currently gathers and scatters are only supported for
6471 fixed-length vectors. */
6472 unsigned int count = nunits.to_constant ();
6473 vec_perm_builder sel (count, count, 1);
6474 for (i = 0; i < (unsigned int) count; ++i)
6475 sel.quick_push (i | (count / 2));
3bab6342 6476
4d694b27 6477 vec_perm_indices indices (sel, 2, count);
e3342de4 6478 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
3bab6342
AT
6479 gcc_assert (perm_mask != NULL_TREE);
6480 ncopies *= 2;
6481 }
6482 else
6483 gcc_unreachable ();
6484
134c85ca 6485 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
3bab6342
AT
6486 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6487 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6488 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6489 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6490 scaletype = TREE_VALUE (arglist);
6491
6492 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6493 && TREE_CODE (rettype) == VOID_TYPE);
6494
134c85ca 6495 ptr = fold_convert (ptrtype, gs_info.base);
3bab6342
AT
6496 if (!is_gimple_min_invariant (ptr))
6497 {
6498 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6499 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6500 gcc_assert (!new_bb);
6501 }
6502
6503 /* Currently we support only unconditional scatter stores,
6504 so mask should be all ones. */
6505 mask = build_int_cst (masktype, -1);
86a91c0a 6506 mask = vect_init_vector (stmt_info, mask, masktype, NULL);
3bab6342 6507
134c85ca 6508 scale = build_int_cst (scaletype, gs_info.scale);
3bab6342
AT
6509
6510 prev_stmt_info = NULL;
6511 for (j = 0; j < ncopies; ++j)
6512 {
6513 if (j == 0)
6514 {
6515 src = vec_oprnd1
86a91c0a 6516 = vect_get_vec_def_for_operand (op, stmt_info);
3bab6342 6517 op = vec_oprnd0
86a91c0a 6518 = vect_get_vec_def_for_operand (gs_info.offset, stmt_info);
3bab6342
AT
6519 }
6520 else if (modifier != NONE && (j & 1))
6521 {
6522 if (modifier == WIDEN)
6523 {
6524 src = vec_oprnd1
e4057a39 6525 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
3bab6342 6526 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
86a91c0a 6527 stmt_info, gsi);
3bab6342
AT
6528 }
6529 else if (modifier == NARROW)
6530 {
6531 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
86a91c0a 6532 stmt_info, gsi);
3bab6342 6533 op = vec_oprnd0
e4057a39 6534 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3bab6342
AT
6535 }
6536 else
6537 gcc_unreachable ();
6538 }
6539 else
6540 {
6541 src = vec_oprnd1
e4057a39 6542 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
3bab6342 6543 op = vec_oprnd0
e4057a39 6544 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3bab6342
AT
6545 }
6546
6547 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6548 {
928686b1
RS
6549 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6550 TYPE_VECTOR_SUBPARTS (srctype)));
0e22bb5a 6551 var = vect_get_new_ssa_name (srctype, vect_simple_var);
3bab6342 6552 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
e1bd7296
RS
6553 gassign *new_stmt
6554 = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
86a91c0a 6555 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3bab6342
AT
6556 src = var;
6557 }
6558
6559 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6560 {
928686b1
RS
6561 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6562 TYPE_VECTOR_SUBPARTS (idxtype)));
0e22bb5a 6563 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
3bab6342 6564 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
e1bd7296
RS
6565 gassign *new_stmt
6566 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
86a91c0a 6567 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3bab6342
AT
6568 op = var;
6569 }
6570
e1bd7296 6571 gcall *new_stmt
134c85ca 6572 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
e1bd7296 6573 stmt_vec_info new_stmt_info
86a91c0a 6574 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3bab6342 6575
ddf98a96 6576 if (prev_stmt_info == NULL)
e1bd7296 6577 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3bab6342 6578 else
e1bd7296
RS
6579 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6580 prev_stmt_info = new_stmt_info;
3bab6342
AT
6581 }
6582 return true;
6583 }
6584
f307441a 6585 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
bffb8014 6586 DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
ebfd146a 6587
f307441a
RS
6588 if (grouped_store)
6589 {
ebfd146a 6590 /* FORNOW */
86a91c0a 6591 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info));
ebfd146a
IR
6592
6593 /* We vectorize all the stmts of the interleaving group when we
6594 reach the last stmt in the group. */
bffb8014
RS
6595 if (DR_GROUP_STORE_COUNT (first_stmt_info)
6596 < DR_GROUP_SIZE (first_stmt_info)
ebfd146a
IR
6597 && !slp)
6598 {
6599 *vec_stmt = NULL;
6600 return true;
6601 }
6602
6603 if (slp)
4b5caab7 6604 {
0d0293ac 6605 grouped_store = false;
4b5caab7
IR
6606 /* VEC_NUM is the number of vect stmts to be created for this
6607 group. */
6608 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
bffb8014
RS
6609 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6610 gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
6611 == first_stmt_info);
89fa689a 6612 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
bffb8014 6613 op = vect_get_store_rhs (first_stmt_info);
4b5caab7 6614 }
ebfd146a 6615 else
4b5caab7
IR
6616 /* VEC_NUM is the number of vect stmts to be created for this
6617 group. */
ebfd146a 6618 vec_num = group_size;
44fc7854 6619
bffb8014 6620 ref_type = get_group_alias_ptr_type (first_stmt_info);
ebfd146a 6621 }
b8698a0f 6622 else
89fa689a 6623 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
b8698a0f 6624
73fbfcad 6625 if (dump_enabled_p ())
78c60e3d 6626 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 6627 "transform store. ncopies = %d\n", ncopies);
ebfd146a 6628
2de001ee
RS
6629 if (memory_access_type == VMAT_ELEMENTWISE
6630 || memory_access_type == VMAT_STRIDED_SLP)
f2e2a985
MM
6631 {
6632 gimple_stmt_iterator incr_gsi;
6633 bool insert_after;
355fe088 6634 gimple *incr;
f2e2a985
MM
6635 tree offvar;
6636 tree ivstep;
6637 tree running_off;
f2e2a985
MM
6638 tree stride_base, stride_step, alias_off;
6639 tree vec_oprnd;
f502d50e 6640 unsigned int g;
4d694b27
RS
6641 /* Checked by get_load_store_type. */
6642 unsigned int const_nunits = nunits.to_constant ();
f2e2a985 6643
7cfb4d93 6644 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
86a91c0a 6645 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
f2e2a985
MM
6646
6647 stride_base
6648 = fold_build_pointer_plus
89fa689a 6649 (DR_BASE_ADDRESS (first_dr_info->dr),
f2e2a985 6650 size_binop (PLUS_EXPR,
89fa689a
RS
6651 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
6652 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
6653 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
f2e2a985
MM
6654
6655 /* For a store with loop-invariant (but other than power-of-2)
6656 stride (i.e. not a grouped access) like so:
6657
6658 for (i = 0; i < n; i += stride)
6659 array[i] = ...;
6660
6661 we generate a new induction variable and new stores from
6662 the components of the (vectorized) rhs:
6663
6664 for (j = 0; ; j += VF*stride)
6665 vectemp = ...;
6666 tmp1 = vectemp[0];
6667 array[j] = tmp1;
6668 tmp2 = vectemp[1];
6669 array[j + stride] = tmp2;
6670 ...
6671 */
6672
4d694b27 6673 unsigned nstores = const_nunits;
b17dc4d4 6674 unsigned lnel = 1;
cee62fee 6675 tree ltype = elem_type;
04199738 6676 tree lvectype = vectype;
cee62fee
MM
6677 if (slp)
6678 {
4d694b27
RS
6679 if (group_size < const_nunits
6680 && const_nunits % group_size == 0)
b17dc4d4 6681 {
4d694b27 6682 nstores = const_nunits / group_size;
b17dc4d4
RB
6683 lnel = group_size;
6684 ltype = build_vector_type (elem_type, group_size);
04199738
RB
6685 lvectype = vectype;
6686
6687 /* First check if vec_extract optab doesn't support extraction
6688 of vector elts directly. */
b397965c 6689 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
9da15d40
RS
6690 machine_mode vmode;
6691 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6692 || !VECTOR_MODE_P (vmode)
414fef4e 6693 || !targetm.vector_mode_supported_p (vmode)
04199738
RB
6694 || (convert_optab_handler (vec_extract_optab,
6695 TYPE_MODE (vectype), vmode)
6696 == CODE_FOR_nothing))
6697 {
6698 /* Try to avoid emitting an extract of vector elements
6699 by performing the extracts using an integer type of the
6700 same size, extracting from a vector of those and then
6701 re-interpreting it as the original vector type if
6702 supported. */
6703 unsigned lsize
6704 = group_size * GET_MODE_BITSIZE (elmode);
fffbab82 6705 elmode = int_mode_for_size (lsize, 0).require ();
4d694b27 6706 unsigned int lnunits = const_nunits / group_size;
04199738
RB
6707 /* If we can't construct such a vector fall back to
6708 element extracts from the original vector type and
6709 element size stores. */
4d694b27 6710 if (mode_for_vector (elmode, lnunits).exists (&vmode)
9da15d40 6711 && VECTOR_MODE_P (vmode)
414fef4e 6712 && targetm.vector_mode_supported_p (vmode)
04199738
RB
6713 && (convert_optab_handler (vec_extract_optab,
6714 vmode, elmode)
6715 != CODE_FOR_nothing))
6716 {
4d694b27 6717 nstores = lnunits;
04199738
RB
6718 lnel = group_size;
6719 ltype = build_nonstandard_integer_type (lsize, 1);
6720 lvectype = build_vector_type (ltype, nstores);
6721 }
6722 /* Else fall back to vector extraction anyway.
6723 Fewer stores are more important than avoiding spilling
6724 of the vector we extract from. Compared to the
6725 construction case in vectorizable_load no store-forwarding
6726 issue exists here for reasonable archs. */
6727 }
b17dc4d4 6728 }
4d694b27
RS
6729 else if (group_size >= const_nunits
6730 && group_size % const_nunits == 0)
b17dc4d4
RB
6731 {
6732 nstores = 1;
4d694b27 6733 lnel = const_nunits;
b17dc4d4 6734 ltype = vectype;
04199738 6735 lvectype = vectype;
b17dc4d4 6736 }
cee62fee
MM
6737 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6738 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6739 }
6740
f2e2a985
MM
6741 ivstep = stride_step;
6742 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
b17dc4d4 6743 build_int_cst (TREE_TYPE (ivstep), vf));
f2e2a985
MM
6744
6745 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6746
b210f45f
RB
6747 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6748 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
f2e2a985
MM
6749 create_iv (stride_base, ivstep, NULL,
6750 loop, &incr_gsi, insert_after,
6751 &offvar, NULL);
6752 incr = gsi_stmt (incr_gsi);
4fbeb363 6753 loop_vinfo->add_stmt (incr);
f2e2a985 6754
b210f45f 6755 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
f2e2a985
MM
6756
6757 prev_stmt_info = NULL;
44fc7854 6758 alias_off = build_int_cst (ref_type, 0);
bffb8014 6759 stmt_vec_info next_stmt_info = first_stmt_info;
f502d50e 6760 for (g = 0; g < group_size; g++)
f2e2a985 6761 {
f502d50e
MM
6762 running_off = offvar;
6763 if (g)
f2e2a985 6764 {
f502d50e
MM
6765 tree size = TYPE_SIZE_UNIT (ltype);
6766 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
f2e2a985 6767 size);
f502d50e 6768 tree newoff = copy_ssa_name (running_off, NULL);
f2e2a985 6769 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
f502d50e 6770 running_off, pos);
86a91c0a 6771 vect_finish_stmt_generation (stmt_info, incr, gsi);
f2e2a985 6772 running_off = newoff;
f502d50e 6773 }
b17dc4d4
RB
6774 unsigned int group_el = 0;
6775 unsigned HOST_WIDE_INT
6776 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
f502d50e
MM
6777 for (j = 0; j < ncopies; j++)
6778 {
c3a8f964 6779 /* We've set op and dt above, from vect_get_store_rhs,
bffb8014 6780 and first_stmt_info == stmt_info. */
f502d50e
MM
6781 if (j == 0)
6782 {
6783 if (slp)
6784 {
86a91c0a
RS
6785 vect_get_vec_defs (op, NULL_TREE, stmt_info,
6786 &vec_oprnds, NULL, slp_node);
f502d50e
MM
6787 vec_oprnd = vec_oprnds[0];
6788 }
6789 else
6790 {
bffb8014
RS
6791 op = vect_get_store_rhs (next_stmt_info);
6792 vec_oprnd = vect_get_vec_def_for_operand
6793 (op, next_stmt_info);
f502d50e
MM
6794 }
6795 }
f2e2a985 6796 else
f502d50e
MM
6797 {
6798 if (slp)
6799 vec_oprnd = vec_oprnds[j];
6800 else
e4057a39
RS
6801 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo,
6802 vec_oprnd);
f502d50e 6803 }
04199738
RB
6804 /* Pun the vector to extract from if necessary. */
6805 if (lvectype != vectype)
6806 {
6807 tree tem = make_ssa_name (lvectype);
6808 gimple *pun
6809 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6810 lvectype, vec_oprnd));
86a91c0a 6811 vect_finish_stmt_generation (stmt_info, pun, gsi);
04199738
RB
6812 vec_oprnd = tem;
6813 }
f502d50e
MM
6814 for (i = 0; i < nstores; i++)
6815 {
6816 tree newref, newoff;
355fe088 6817 gimple *incr, *assign;
f502d50e
MM
6818 tree size = TYPE_SIZE (ltype);
6819 /* Extract the i'th component. */
6820 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6821 bitsize_int (i), size);
6822 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6823 size, pos);
6824
6825 elem = force_gimple_operand_gsi (gsi, elem, true,
6826 NULL_TREE, true,
6827 GSI_SAME_STMT);
6828
b17dc4d4
RB
6829 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6830 group_el * elsz);
f502d50e 6831 newref = build2 (MEM_REF, ltype,
b17dc4d4 6832 running_off, this_off);
89fa689a 6833 vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
f502d50e
MM
6834
6835 /* And store it to *running_off. */
6836 assign = gimple_build_assign (newref, elem);
e1bd7296 6837 stmt_vec_info assign_info
86a91c0a 6838 = vect_finish_stmt_generation (stmt_info, assign, gsi);
f502d50e 6839
b17dc4d4
RB
6840 group_el += lnel;
6841 if (! slp
6842 || group_el == group_size)
6843 {
6844 newoff = copy_ssa_name (running_off, NULL);
6845 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6846 running_off, stride_step);
86a91c0a 6847 vect_finish_stmt_generation (stmt_info, incr, gsi);
f502d50e 6848
b17dc4d4
RB
6849 running_off = newoff;
6850 group_el = 0;
6851 }
225ce44b
RB
6852 if (g == group_size - 1
6853 && !slp)
f502d50e
MM
6854 {
6855 if (j == 0 && i == 0)
225ce44b 6856 STMT_VINFO_VEC_STMT (stmt_info)
e1bd7296 6857 = *vec_stmt = assign_info;
f502d50e 6858 else
e1bd7296
RS
6859 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign_info;
6860 prev_stmt_info = assign_info;
f502d50e
MM
6861 }
6862 }
f2e2a985 6863 }
bffb8014 6864 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
b17dc4d4
RB
6865 if (slp)
6866 break;
f2e2a985 6867 }
778dd3b6
RB
6868
6869 vec_oprnds.release ();
f2e2a985
MM
6870 return true;
6871 }
6872
8c681247 6873 auto_vec<tree> dr_chain (group_size);
9771b263 6874 oprnds.create (group_size);
ebfd146a 6875
89fa689a
RS
6876 alignment_support_scheme
6877 = vect_supportable_dr_alignment (first_dr_info, false);
ebfd146a 6878 gcc_assert (alignment_support_scheme);
70088b95
RS
6879 vec_loop_masks *loop_masks
6880 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
6881 ? &LOOP_VINFO_MASKS (loop_vinfo)
6882 : NULL);
272c6793 6883 /* Targets with store-lane instructions must not require explicit
c3a8f964
RS
6884 realignment. vect_supportable_dr_alignment always returns either
6885 dr_aligned or dr_unaligned_supported for masked operations. */
7cfb4d93
RS
6886 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6887 && !mask
70088b95 6888 && !loop_masks)
272c6793
RS
6889 || alignment_support_scheme == dr_aligned
6890 || alignment_support_scheme == dr_unaligned_supported);
6891
62da9e14
RS
6892 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6893 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
09dfa495
BM
6894 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6895
f307441a
RS
6896 tree bump;
6897 tree vec_offset = NULL_TREE;
6898 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6899 {
6900 aggr_type = NULL_TREE;
6901 bump = NULL_TREE;
6902 }
6903 else if (memory_access_type == VMAT_GATHER_SCATTER)
6904 {
6905 aggr_type = elem_type;
86a91c0a 6906 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
f307441a
RS
6907 &bump, &vec_offset);
6908 }
272c6793 6909 else
f307441a
RS
6910 {
6911 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6912 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6913 else
6914 aggr_type = vectype;
89fa689a
RS
6915 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
6916 memory_access_type);
f307441a 6917 }
ebfd146a 6918
c3a8f964
RS
6919 if (mask)
6920 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6921
ebfd146a
IR
6922 /* In case the vectorization factor (VF) is bigger than the number
6923 of elements that we can fit in a vectype (nunits), we have to generate
6924 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 6925 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
6926 vect_get_vec_def_for_copy_stmt. */
6927
0d0293ac 6928 /* In case of interleaving (non-unit grouped access):
ebfd146a
IR
6929
6930 S1: &base + 2 = x2
6931 S2: &base = x0
6932 S3: &base + 1 = x1
6933 S4: &base + 3 = x3
6934
6935 We create vectorized stores starting from base address (the access of the
6936 first stmt in the chain (S2 in the above example), when the last store stmt
6937 of the chain (S4) is reached:
6938
6939 VS1: &base = vx2
6940 VS2: &base + vec_size*1 = vx0
6941 VS3: &base + vec_size*2 = vx1
6942 VS4: &base + vec_size*3 = vx3
6943
6944 Then permutation statements are generated:
6945
3fcc1b55
JJ
6946 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6947 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
ebfd146a 6948 ...
b8698a0f 6949
ebfd146a
IR
6950 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6951 (the order of the data-refs in the output of vect_permute_store_chain
6952 corresponds to the order of scalar stmts in the interleaving chain - see
6953 the documentation of vect_permute_store_chain()).
6954
6955 In case of both multiple types and interleaving, above vector stores and
ff802fa1 6956 permutation stmts are created for every copy. The result vector stmts are
ebfd146a 6957 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 6958 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
6959 */
6960
6961 prev_stmt_info = NULL;
c3a8f964 6962 tree vec_mask = NULL_TREE;
ebfd146a
IR
6963 for (j = 0; j < ncopies; j++)
6964 {
e1bd7296 6965 stmt_vec_info new_stmt_info;
ebfd146a
IR
6966 if (j == 0)
6967 {
6968 if (slp)
6969 {
6970 /* Get vectorized arguments for SLP_NODE. */
86a91c0a
RS
6971 vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds,
6972 NULL, slp_node);
ebfd146a 6973
9771b263 6974 vec_oprnd = vec_oprnds[0];
ebfd146a
IR
6975 }
6976 else
6977 {
b8698a0f
L
6978 /* For interleaved stores we collect vectorized defs for all the
6979 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6980 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
6981 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6982
2c53b149 6983 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
ebfd146a 6984 OPRNDS are of size 1. */
bffb8014 6985 stmt_vec_info next_stmt_info = first_stmt_info;
ebfd146a
IR
6986 for (i = 0; i < group_size; i++)
6987 {
b8698a0f 6988 /* Since gaps are not supported for interleaved stores,
2c53b149 6989 DR_GROUP_SIZE is the exact number of stmts in the chain.
bffb8014
RS
6990 Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
6991 that there is no interleaving, DR_GROUP_SIZE is 1,
6992 and only one iteration of the loop will be executed. */
6993 op = vect_get_store_rhs (next_stmt_info);
6994 vec_oprnd = vect_get_vec_def_for_operand
6995 (op, next_stmt_info);
9771b263
DN
6996 dr_chain.quick_push (vec_oprnd);
6997 oprnds.quick_push (vec_oprnd);
bffb8014 6998 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
ebfd146a 6999 }
c3a8f964 7000 if (mask)
86a91c0a 7001 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
c3a8f964 7002 mask_vectype);
ebfd146a
IR
7003 }
7004
7005 /* We should have catched mismatched types earlier. */
7006 gcc_assert (useless_type_conversion_p (vectype,
7007 TREE_TYPE (vec_oprnd)));
74bf76ed
JJ
7008 bool simd_lane_access_p
7009 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7010 if (simd_lane_access_p
89fa689a
RS
7011 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
7012 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
7013 && integer_zerop (DR_OFFSET (first_dr_info->dr))
7014 && integer_zerop (DR_INIT (first_dr_info->dr))
74bf76ed 7015 && alias_sets_conflict_p (get_alias_set (aggr_type),
44fc7854 7016 get_alias_set (TREE_TYPE (ref_type))))
74bf76ed 7017 {
89fa689a 7018 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
44fc7854 7019 dataref_offset = build_int_cst (ref_type, 0);
74bf76ed 7020 }
f307441a 7021 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2d4bca81
RS
7022 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
7023 &dataref_ptr, &vec_offset);
74bf76ed
JJ
7024 else
7025 dataref_ptr
bffb8014 7026 = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
74bf76ed 7027 simd_lane_access_p ? loop : NULL,
09dfa495 7028 offset, &dummy, gsi, &ptr_incr,
2d4bca81 7029 simd_lane_access_p, NULL_TREE, bump);
ebfd146a 7030 }
b8698a0f 7031 else
ebfd146a 7032 {
b8698a0f
L
7033 /* For interleaved stores we created vectorized defs for all the
7034 defs stored in OPRNDS in the previous iteration (previous copy).
7035 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
7036 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
7037 next copy.
2c53b149 7038 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
ebfd146a
IR
7039 OPRNDS are of size 1. */
7040 for (i = 0; i < group_size; i++)
7041 {
9771b263 7042 op = oprnds[i];
e4057a39 7043 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, op);
9771b263
DN
7044 dr_chain[i] = vec_oprnd;
7045 oprnds[i] = vec_oprnd;
ebfd146a 7046 }
c3a8f964 7047 if (mask)
e4057a39 7048 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
74bf76ed
JJ
7049 if (dataref_offset)
7050 dataref_offset
f307441a
RS
7051 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7052 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
e4057a39 7053 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
74bf76ed 7054 else
86a91c0a
RS
7055 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7056 stmt_info, bump);
ebfd146a
IR
7057 }
7058
2de001ee 7059 if (memory_access_type == VMAT_LOAD_STORE_LANES)
ebfd146a 7060 {
272c6793 7061 tree vec_array;
267d3070 7062
3ba4ff41 7063 /* Get an array into which we can store the individual vectors. */
272c6793 7064 vec_array = create_vector_array (vectype, vec_num);
3ba4ff41
RS
7065
7066 /* Invalidate the current contents of VEC_ARRAY. This should
7067 become an RTL clobber too, which prevents the vector registers
7068 from being upward-exposed. */
86a91c0a 7069 vect_clobber_variable (stmt_info, gsi, vec_array);
3ba4ff41
RS
7070
7071 /* Store the individual vectors into the array. */
272c6793 7072 for (i = 0; i < vec_num; i++)
c2d7ab2a 7073 {
9771b263 7074 vec_oprnd = dr_chain[i];
86a91c0a 7075 write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i);
267d3070 7076 }
b8698a0f 7077
7cfb4d93 7078 tree final_mask = NULL;
70088b95
RS
7079 if (loop_masks)
7080 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
7081 vectype, j);
7cfb4d93
RS
7082 if (vec_mask)
7083 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7084 vec_mask, gsi);
7085
7e11fc7f 7086 gcall *call;
7cfb4d93 7087 if (final_mask)
7e11fc7f
RS
7088 {
7089 /* Emit:
7090 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
7091 VEC_ARRAY). */
7092 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
7093 tree alias_ptr = build_int_cst (ref_type, align);
7094 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
7095 dataref_ptr, alias_ptr,
7cfb4d93 7096 final_mask, vec_array);
7e11fc7f
RS
7097 }
7098 else
7099 {
7100 /* Emit:
7101 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
7102 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7103 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
7104 vec_array);
7105 gimple_call_set_lhs (call, data_ref);
7106 }
a844293d 7107 gimple_call_set_nothrow (call, true);
86a91c0a 7108 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
3ba4ff41
RS
7109
7110 /* Record that VEC_ARRAY is now dead. */
86a91c0a 7111 vect_clobber_variable (stmt_info, gsi, vec_array);
272c6793
RS
7112 }
7113 else
7114 {
e1bd7296 7115 new_stmt_info = NULL;
0d0293ac 7116 if (grouped_store)
272c6793 7117 {
b6b9227d
JJ
7118 if (j == 0)
7119 result_chain.create (group_size);
272c6793 7120 /* Permute. */
86a91c0a 7121 vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi,
272c6793
RS
7122 &result_chain);
7123 }
c2d7ab2a 7124
bffb8014 7125 stmt_vec_info next_stmt_info = first_stmt_info;
272c6793
RS
7126 for (i = 0; i < vec_num; i++)
7127 {
644ffefd 7128 unsigned align, misalign;
272c6793 7129
7cfb4d93 7130 tree final_mask = NULL_TREE;
70088b95
RS
7131 if (loop_masks)
7132 final_mask = vect_get_loop_mask (gsi, loop_masks,
7133 vec_num * ncopies,
7cfb4d93
RS
7134 vectype, vec_num * j + i);
7135 if (vec_mask)
7136 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7137 vec_mask, gsi);
7138
f307441a
RS
7139 if (memory_access_type == VMAT_GATHER_SCATTER)
7140 {
7141 tree scale = size_int (gs_info.scale);
7142 gcall *call;
70088b95 7143 if (loop_masks)
f307441a
RS
7144 call = gimple_build_call_internal
7145 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
7146 scale, vec_oprnd, final_mask);
7147 else
7148 call = gimple_build_call_internal
7149 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
7150 scale, vec_oprnd);
7151 gimple_call_set_nothrow (call, true);
e1bd7296 7152 new_stmt_info
86a91c0a 7153 = vect_finish_stmt_generation (stmt_info, call, gsi);
f307441a
RS
7154 break;
7155 }
7156
272c6793
RS
7157 if (i > 0)
7158 /* Bump the vector pointer. */
7159 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
86a91c0a 7160 stmt_info, bump);
272c6793
RS
7161
7162 if (slp)
9771b263 7163 vec_oprnd = vec_oprnds[i];
0d0293ac
MM
7164 else if (grouped_store)
7165 /* For grouped stores vectorized defs are interleaved in
272c6793 7166 vect_permute_store_chain(). */
9771b263 7167 vec_oprnd = result_chain[i];
272c6793 7168
89fa689a
RS
7169 align = DR_TARGET_ALIGNMENT (first_dr_info);
7170 if (aligned_access_p (first_dr_info))
644ffefd 7171 misalign = 0;
89fa689a 7172 else if (DR_MISALIGNMENT (first_dr_info) == -1)
272c6793 7173 {
89fa689a 7174 align = dr_alignment (vect_dr_behavior (first_dr_info));
52639a61 7175 misalign = 0;
272c6793
RS
7176 }
7177 else
89fa689a 7178 misalign = DR_MISALIGNMENT (first_dr_info);
aed93b23
RB
7179 if (dataref_offset == NULL_TREE
7180 && TREE_CODE (dataref_ptr) == SSA_NAME)
74bf76ed
JJ
7181 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
7182 misalign);
c2d7ab2a 7183
62da9e14 7184 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
09dfa495
BM
7185 {
7186 tree perm_mask = perm_mask_for_reverse (vectype);
86a91c0a
RS
7187 tree perm_dest = vect_create_destination_var
7188 (vect_get_store_rhs (stmt_info), vectype);
b731b390 7189 tree new_temp = make_ssa_name (perm_dest);
09dfa495
BM
7190
7191 /* Generate the permute statement. */
355fe088 7192 gimple *perm_stmt
0d0e4a03
JJ
7193 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7194 vec_oprnd, perm_mask);
86a91c0a 7195 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
09dfa495
BM
7196
7197 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7198 vec_oprnd = new_temp;
7199 }
7200
272c6793 7201 /* Arguments are ready. Create the new vector stmt. */
7cfb4d93 7202 if (final_mask)
c3a8f964
RS
7203 {
7204 align = least_bit_hwi (misalign | align);
7205 tree ptr = build_int_cst (ref_type, align);
7206 gcall *call
7207 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7208 dataref_ptr, ptr,
7cfb4d93 7209 final_mask, vec_oprnd);
c3a8f964 7210 gimple_call_set_nothrow (call, true);
e1bd7296 7211 new_stmt_info
86a91c0a 7212 = vect_finish_stmt_generation (stmt_info, call, gsi);
c3a8f964
RS
7213 }
7214 else
7215 {
7216 data_ref = fold_build2 (MEM_REF, vectype,
7217 dataref_ptr,
7218 dataref_offset
7219 ? dataref_offset
7220 : build_int_cst (ref_type, 0));
89fa689a 7221 if (aligned_access_p (first_dr_info))
c3a8f964 7222 ;
89fa689a 7223 else if (DR_MISALIGNMENT (first_dr_info) == -1)
c3a8f964
RS
7224 TREE_TYPE (data_ref)
7225 = build_aligned_type (TREE_TYPE (data_ref),
7226 align * BITS_PER_UNIT);
7227 else
7228 TREE_TYPE (data_ref)
7229 = build_aligned_type (TREE_TYPE (data_ref),
7230 TYPE_ALIGN (elem_type));
89fa689a 7231 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
e1bd7296
RS
7232 gassign *new_stmt
7233 = gimple_build_assign (data_ref, vec_oprnd);
7234 new_stmt_info
86a91c0a 7235 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
c3a8f964 7236 }
272c6793
RS
7237
7238 if (slp)
7239 continue;
7240
bffb8014
RS
7241 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7242 if (!next_stmt_info)
272c6793
RS
7243 break;
7244 }
ebfd146a 7245 }
1da0876c
RS
7246 if (!slp)
7247 {
7248 if (j == 0)
e1bd7296 7249 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
1da0876c 7250 else
e1bd7296
RS
7251 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7252 prev_stmt_info = new_stmt_info;
1da0876c 7253 }
ebfd146a
IR
7254 }
7255
9771b263
DN
7256 oprnds.release ();
7257 result_chain.release ();
7258 vec_oprnds.release ();
ebfd146a
IR
7259
7260 return true;
7261}
7262
557be5a8
AL
7263/* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7264 VECTOR_CST mask. No checks are made that the target platform supports the
7ac7e286 7265 mask, so callers may wish to test can_vec_perm_const_p separately, or use
557be5a8 7266 vect_gen_perm_mask_checked. */
a1e53f3f 7267
3fcc1b55 7268tree
4aae3cb3 7269vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
a1e53f3f 7270{
b00cb3bf 7271 tree mask_type;
a1e53f3f 7272
0ecc2b7d
RS
7273 poly_uint64 nunits = sel.length ();
7274 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
b00cb3bf
RS
7275
7276 mask_type = build_vector_type (ssizetype, nunits);
736d0f28 7277 return vec_perm_indices_to_tree (mask_type, sel);
a1e53f3f
L
7278}
7279
7ac7e286 7280/* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
cf7aa6a3 7281 i.e. that the target supports the pattern _for arbitrary input vectors_. */
557be5a8
AL
7282
7283tree
4aae3cb3 7284vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
557be5a8 7285{
7ac7e286 7286 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
557be5a8
AL
7287 return vect_gen_perm_mask_any (vectype, sel);
7288}
7289
aec7ae7d 7290/* Given a vector variable X and Y, that was generated for the scalar
82570274 7291 STMT_INFO, generate instructions to permute the vector elements of X and Y
aec7ae7d
JJ
7292 using permutation mask MASK_VEC, insert them at *GSI and return the
7293 permuted vector variable. */
a1e53f3f
L
7294
7295static tree
82570274 7296permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
aec7ae7d 7297 gimple_stmt_iterator *gsi)
a1e53f3f
L
7298{
7299 tree vectype = TREE_TYPE (x);
aec7ae7d 7300 tree perm_dest, data_ref;
355fe088 7301 gimple *perm_stmt;
a1e53f3f 7302
82570274 7303 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
7ad429a4
RS
7304 if (TREE_CODE (scalar_dest) == SSA_NAME)
7305 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7306 else
7307 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
b731b390 7308 data_ref = make_ssa_name (perm_dest);
a1e53f3f
L
7309
7310 /* Generate the permute statement. */
0d0e4a03 7311 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
82570274 7312 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
a1e53f3f
L
7313
7314 return data_ref;
7315}
7316
32e8e429 7317/* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
6b916b36 7318 inserting them on the loops preheader edge. Returns true if we
32e8e429 7319 were successful in doing so (and thus STMT_INFO can be moved then),
6b916b36
RB
7320 otherwise returns false. */
7321
7322static bool
32e8e429 7323hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
6b916b36
RB
7324{
7325 ssa_op_iter i;
7326 tree op;
7327 bool any = false;
7328
32e8e429 7329 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
6b916b36 7330 {
355fe088 7331 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6b916b36
RB
7332 if (!gimple_nop_p (def_stmt)
7333 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7334 {
7335 /* Make sure we don't need to recurse. While we could do
7336 so in simple cases when there are more complex use webs
7337 we don't have an easy way to preserve stmt order to fulfil
7338 dependencies within them. */
7339 tree op2;
7340 ssa_op_iter i2;
d1417442
JJ
7341 if (gimple_code (def_stmt) == GIMPLE_PHI)
7342 return false;
6b916b36
RB
7343 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7344 {
355fe088 7345 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6b916b36
RB
7346 if (!gimple_nop_p (def_stmt2)
7347 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7348 return false;
7349 }
7350 any = true;
7351 }
7352 }
7353
7354 if (!any)
7355 return true;
7356
32e8e429 7357 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
6b916b36 7358 {
355fe088 7359 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6b916b36
RB
7360 if (!gimple_nop_p (def_stmt)
7361 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7362 {
7363 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7364 gsi_remove (&gsi, false);
7365 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7366 }
7367 }
7368
7369 return true;
7370}
7371
ebfd146a
IR
7372/* vectorizable_load.
7373
32e8e429
RS
7374 Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
7375 that can be vectorized.
7376 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
7377 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
7378 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a
IR
7379
7380static bool
32e8e429 7381vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195
RS
7382 stmt_vec_info *vec_stmt, slp_tree slp_node,
7383 slp_instance slp_node_instance,
68435eb2 7384 stmt_vector_for_cost *cost_vec)
ebfd146a
IR
7385{
7386 tree scalar_dest;
7387 tree vec_dest = NULL;
7388 tree data_ref = NULL;
b8698a0f 7389 stmt_vec_info prev_stmt_info;
ebfd146a 7390 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 7391 struct loop *loop = NULL;
32e8e429 7392 struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
a70d6342 7393 bool nested_in_vect_loop = false;
272c6793 7394 tree elem_type;
ebfd146a 7395 tree new_temp;
ef4bddc2 7396 machine_mode mode;
ebfd146a
IR
7397 tree dummy;
7398 enum dr_alignment_support alignment_support_scheme;
7399 tree dataref_ptr = NULL_TREE;
74bf76ed 7400 tree dataref_offset = NULL_TREE;
355fe088 7401 gimple *ptr_incr = NULL;
ebfd146a 7402 int ncopies;
4d694b27
RS
7403 int i, j;
7404 unsigned int group_size;
7405 poly_uint64 group_gap_adj;
ebfd146a
IR
7406 tree msq = NULL_TREE, lsq;
7407 tree offset = NULL_TREE;
356bbc4c 7408 tree byte_offset = NULL_TREE;
ebfd146a 7409 tree realignment_token = NULL_TREE;
538dd0b7 7410 gphi *phi = NULL;
6e1aa848 7411 vec<tree> dr_chain = vNULL;
0d0293ac 7412 bool grouped_load = false;
bffb8014 7413 stmt_vec_info first_stmt_info;
b9787581 7414 stmt_vec_info first_stmt_info_for_drptr = NULL;
ebfd146a
IR
7415 bool compute_in_loop = false;
7416 struct loop *at_loop;
7417 int vec_num;
7418 bool slp = (slp_node != NULL);
7419 bool slp_perm = false;
a70d6342 7420 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
d9f21f6a 7421 poly_uint64 vf;
272c6793 7422 tree aggr_type;
134c85ca 7423 gather_scatter_info gs_info;
310213d4 7424 vec_info *vinfo = stmt_info->vinfo;
44fc7854 7425 tree ref_type;
929b4411 7426 enum vect_def_type mask_dt = vect_unknown_def_type;
a70d6342 7427
465c8c19
JJ
7428 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7429 return false;
7430
66c16fd9
RB
7431 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7432 && ! vec_stmt)
465c8c19
JJ
7433 return false;
7434
c3a8f964 7435 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
86a91c0a 7436 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
c3a8f964 7437 {
beb456c3 7438 scalar_dest = gimple_assign_lhs (assign);
c3a8f964
RS
7439 if (TREE_CODE (scalar_dest) != SSA_NAME)
7440 return false;
465c8c19 7441
beb456c3 7442 tree_code code = gimple_assign_rhs_code (assign);
c3a8f964
RS
7443 if (code != ARRAY_REF
7444 && code != BIT_FIELD_REF
7445 && code != INDIRECT_REF
7446 && code != COMPONENT_REF
7447 && code != IMAGPART_EXPR
7448 && code != REALPART_EXPR
7449 && code != MEM_REF
7450 && TREE_CODE_CLASS (code) != tcc_declaration)
7451 return false;
7452 }
7453 else
7454 {
86a91c0a 7455 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
bfaa08b7
RS
7456 if (!call || !gimple_call_internal_p (call))
7457 return false;
7458
7459 internal_fn ifn = gimple_call_internal_fn (call);
7460 if (!internal_load_fn_p (ifn))
c3a8f964 7461 return false;
465c8c19 7462
c3a8f964
RS
7463 scalar_dest = gimple_call_lhs (call);
7464 if (!scalar_dest)
7465 return false;
7466
7467 if (slp_node != NULL)
7468 {
7469 if (dump_enabled_p ())
7470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7471 "SLP of masked loads not supported.\n");
7472 return false;
7473 }
7474
bfaa08b7
RS
7475 int mask_index = internal_fn_mask_index (ifn);
7476 if (mask_index >= 0)
7477 {
7478 mask = gimple_call_arg (call, mask_index);
86a91c0a 7479 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
929b4411 7480 &mask_vectype))
bfaa08b7
RS
7481 return false;
7482 }
c3a8f964 7483 }
465c8c19
JJ
7484
7485 if (!STMT_VINFO_DATA_REF (stmt_info))
7486 return false;
7487
7488 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4d694b27 7489 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
465c8c19 7490
a70d6342
IR
7491 if (loop_vinfo)
7492 {
7493 loop = LOOP_VINFO_LOOP (loop_vinfo);
86a91c0a 7494 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
a70d6342
IR
7495 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7496 }
7497 else
3533e503 7498 vf = 1;
ebfd146a
IR
7499
7500 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 7501 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a 7502 case of SLP. */
fce57248 7503 if (slp)
ebfd146a
IR
7504 ncopies = 1;
7505 else
e8f142e2 7506 ncopies = vect_get_num_copies (loop_vinfo, vectype);
ebfd146a
IR
7507
7508 gcc_assert (ncopies >= 1);
7509
7510 /* FORNOW. This restriction should be relaxed. */
7511 if (nested_in_vect_loop && ncopies > 1)
7512 {
73fbfcad 7513 if (dump_enabled_p ())
78c60e3d 7514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 7515 "multiple types in nested loop.\n");
ebfd146a
IR
7516 return false;
7517 }
7518
f2556b68
RB
7519 /* Invalidate assumptions made by dependence analysis when vectorization
7520 on the unrolled body effectively re-orders stmts. */
7521 if (ncopies > 1
7522 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
d9f21f6a
RS
7523 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7524 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
f2556b68
RB
7525 {
7526 if (dump_enabled_p ())
7527 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7528 "cannot perform implicit CSE when unrolling "
7529 "with negative dependence distance\n");
7530 return false;
7531 }
7532
7b7b1813 7533 elem_type = TREE_TYPE (vectype);
947131ba 7534 mode = TYPE_MODE (vectype);
ebfd146a
IR
7535
7536 /* FORNOW. In some cases can vectorize even if data-type not supported
7537 (e.g. - data copies). */
947131ba 7538 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
ebfd146a 7539 {
73fbfcad 7540 if (dump_enabled_p ())
78c60e3d 7541 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 7542 "Aligned load, but unsupported type.\n");
ebfd146a
IR
7543 return false;
7544 }
7545
ebfd146a 7546 /* Check if the load is a part of an interleaving chain. */
0d0293ac 7547 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
ebfd146a 7548 {
0d0293ac 7549 grouped_load = true;
ebfd146a 7550 /* FORNOW */
2de001ee
RS
7551 gcc_assert (!nested_in_vect_loop);
7552 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
ebfd146a 7553
bffb8014
RS
7554 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7555 group_size = DR_GROUP_SIZE (first_stmt_info);
d5f035ea 7556
b1af7da6
RB
7557 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7558 slp_perm = true;
7559
f2556b68
RB
7560 /* Invalidate assumptions made by dependence analysis when vectorization
7561 on the unrolled body effectively re-orders stmts. */
7562 if (!PURE_SLP_STMT (stmt_info)
7563 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
d9f21f6a
RS
7564 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7565 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
f2556b68
RB
7566 {
7567 if (dump_enabled_p ())
7568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7569 "cannot perform implicit CSE when performing "
7570 "group loads with negative dependence distance\n");
7571 return false;
7572 }
96bb56b2
RB
7573
7574 /* Similarly when the stmt is a load that is both part of a SLP
7575 instance and a loop vectorized stmt via the same-dr mechanism
7576 we have to give up. */
2c53b149 7577 if (DR_GROUP_SAME_DR_STMT (stmt_info)
96bb56b2 7578 && (STMT_SLP_TYPE (stmt_info)
c26228d4 7579 != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info))))
96bb56b2
RB
7580 {
7581 if (dump_enabled_p ())
7582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7583 "conflicting SLP types for CSEd load\n");
7584 return false;
7585 }
ebfd146a 7586 }
7cfb4d93
RS
7587 else
7588 group_size = 1;
ebfd146a 7589
2de001ee 7590 vect_memory_access_type memory_access_type;
86a91c0a 7591 if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies,
2de001ee
RS
7592 &memory_access_type, &gs_info))
7593 return false;
a1e53f3f 7594
c3a8f964
RS
7595 if (mask)
7596 {
7597 if (memory_access_type == VMAT_CONTIGUOUS)
7598 {
7e11fc7f
RS
7599 machine_mode vec_mode = TYPE_MODE (vectype);
7600 if (!VECTOR_MODE_P (vec_mode)
7601 || !can_vec_mask_load_store_p (vec_mode,
c3a8f964
RS
7602 TYPE_MODE (mask_vectype), true))
7603 return false;
7604 }
bfaa08b7 7605 else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
c3a8f964
RS
7606 {
7607 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7608 tree masktype
7609 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
7610 if (TREE_CODE (masktype) == INTEGER_TYPE)
7611 {
7612 if (dump_enabled_p ())
7613 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7614 "masked gather with integer mask not"
7615 " supported.");
7616 return false;
7617 }
7618 }
bfaa08b7
RS
7619 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7620 && memory_access_type != VMAT_GATHER_SCATTER)
c3a8f964
RS
7621 {
7622 if (dump_enabled_p ())
7623 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7624 "unsupported access type for masked load.\n");
7625 return false;
7626 }
7627 }
7628
ebfd146a
IR
7629 if (!vec_stmt) /* transformation not required. */
7630 {
2de001ee
RS
7631 if (!slp)
7632 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7cfb4d93
RS
7633
7634 if (loop_vinfo
7635 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7636 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
bfaa08b7 7637 memory_access_type, &gs_info);
7cfb4d93 7638
ebfd146a 7639 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
68435eb2
RB
7640 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7641 slp_node_instance, slp_node, cost_vec);
ebfd146a
IR
7642 return true;
7643 }
7644
2de001ee
RS
7645 if (!slp)
7646 gcc_assert (memory_access_type
7647 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7648
73fbfcad 7649 if (dump_enabled_p ())
78c60e3d 7650 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 7651 "transform load. ncopies = %d\n", ncopies);
ebfd146a 7652
67b8dbac 7653 /* Transform. */
ebfd146a 7654
89fa689a
RS
7655 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
7656 ensure_base_align (dr_info);
c716e67f 7657
bfaa08b7 7658 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
aec7ae7d 7659 {
e4057a39 7660 vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask);
aec7ae7d
JJ
7661 return true;
7662 }
2de001ee 7663
2d4bca81
RS
7664 if (memory_access_type == VMAT_INVARIANT)
7665 {
7666 gcc_assert (!grouped_load && !mask && !bb_vinfo);
7667 /* If we have versioned for aliasing or the loop doesn't
7668 have any data dependencies that would preclude this,
7669 then we are sure this is a loop invariant load and
7670 thus we can insert it on the preheader edge. */
7671 bool hoist_p = (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7672 && !nested_in_vect_loop
7673 && hoist_defs_of_uses (stmt_info, loop));
7674 if (hoist_p)
7675 {
7676 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
7677 if (dump_enabled_p ())
7678 {
7679 dump_printf_loc (MSG_NOTE, vect_location,
7680 "hoisting out of the vectorized loop: ");
7681 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7682 }
7683 scalar_dest = copy_ssa_name (scalar_dest);
7684 tree rhs = unshare_expr (gimple_assign_rhs1 (stmt));
7685 gsi_insert_on_edge_immediate
7686 (loop_preheader_edge (loop),
7687 gimple_build_assign (scalar_dest, rhs));
7688 }
7689 /* These copies are all equivalent, but currently the representation
7690 requires a separate STMT_VINFO_VEC_STMT for each one. */
7691 prev_stmt_info = NULL;
7692 gimple_stmt_iterator gsi2 = *gsi;
7693 gsi_next (&gsi2);
7694 for (j = 0; j < ncopies; j++)
7695 {
7696 stmt_vec_info new_stmt_info;
7697 if (hoist_p)
7698 {
7699 new_temp = vect_init_vector (stmt_info, scalar_dest,
7700 vectype, NULL);
7701 gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
7702 new_stmt_info = vinfo->add_stmt (new_stmt);
7703 }
7704 else
7705 {
7706 new_temp = vect_init_vector (stmt_info, scalar_dest,
7707 vectype, &gsi2);
7708 new_stmt_info = vinfo->lookup_def (new_temp);
7709 }
7710 if (slp)
7711 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7712 else if (j == 0)
7713 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7714 else
7715 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7716 prev_stmt_info = new_stmt_info;
7717 }
7718 return true;
7719 }
7720
2de001ee
RS
7721 if (memory_access_type == VMAT_ELEMENTWISE
7722 || memory_access_type == VMAT_STRIDED_SLP)
7d75abc8
MM
7723 {
7724 gimple_stmt_iterator incr_gsi;
7725 bool insert_after;
355fe088 7726 gimple *incr;
7d75abc8 7727 tree offvar;
7d75abc8
MM
7728 tree ivstep;
7729 tree running_off;
9771b263 7730 vec<constructor_elt, va_gc> *v = NULL;
14ac6aa2 7731 tree stride_base, stride_step, alias_off;
4d694b27
RS
7732 /* Checked by get_load_store_type. */
7733 unsigned int const_nunits = nunits.to_constant ();
b210f45f 7734 unsigned HOST_WIDE_INT cst_offset = 0;
14ac6aa2 7735
7cfb4d93 7736 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
14ac6aa2 7737 gcc_assert (!nested_in_vect_loop);
7d75abc8 7738
b210f45f 7739 if (grouped_load)
44fc7854 7740 {
bffb8014 7741 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
89fa689a 7742 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
44fc7854 7743 }
ab313a8c 7744 else
44fc7854 7745 {
bffb8014 7746 first_stmt_info = stmt_info;
89fa689a 7747 first_dr_info = dr_info;
b210f45f
RB
7748 }
7749 if (slp && grouped_load)
7750 {
bffb8014
RS
7751 group_size = DR_GROUP_SIZE (first_stmt_info);
7752 ref_type = get_group_alias_ptr_type (first_stmt_info);
b210f45f
RB
7753 }
7754 else
7755 {
7756 if (grouped_load)
7757 cst_offset
7758 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
86a91c0a 7759 * vect_get_place_in_interleaving_chain (stmt_info,
bffb8014 7760 first_stmt_info));
44fc7854 7761 group_size = 1;
89fa689a 7762 ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
44fc7854 7763 }
ab313a8c 7764
14ac6aa2
RB
7765 stride_base
7766 = fold_build_pointer_plus
89fa689a 7767 (DR_BASE_ADDRESS (first_dr_info->dr),
14ac6aa2 7768 size_binop (PLUS_EXPR,
89fa689a
RS
7769 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
7770 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
7771 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
7d75abc8
MM
7772
7773 /* For a load with loop-invariant (but other than power-of-2)
7774 stride (i.e. not a grouped access) like so:
7775
7776 for (i = 0; i < n; i += stride)
7777 ... = array[i];
7778
7779 we generate a new induction variable and new accesses to
7780 form a new vector (or vectors, depending on ncopies):
7781
7782 for (j = 0; ; j += VF*stride)
7783 tmp1 = array[j];
7784 tmp2 = array[j + stride];
7785 ...
7786 vectemp = {tmp1, tmp2, ...}
7787 */
7788
ab313a8c
RB
7789 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7790 build_int_cst (TREE_TYPE (stride_step), vf));
7d75abc8
MM
7791
7792 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7793
b210f45f
RB
7794 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7795 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7796 create_iv (stride_base, ivstep, NULL,
7d75abc8
MM
7797 loop, &incr_gsi, insert_after,
7798 &offvar, NULL);
7799 incr = gsi_stmt (incr_gsi);
4fbeb363 7800 loop_vinfo->add_stmt (incr);
7d75abc8 7801
b210f45f 7802 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7d75abc8
MM
7803
7804 prev_stmt_info = NULL;
7805 running_off = offvar;
44fc7854 7806 alias_off = build_int_cst (ref_type, 0);
4d694b27 7807 int nloads = const_nunits;
e09b4c37 7808 int lnel = 1;
7b5fc413 7809 tree ltype = TREE_TYPE (vectype);
ea60dd34 7810 tree lvectype = vectype;
b266b968 7811 auto_vec<tree> dr_chain;
2de001ee 7812 if (memory_access_type == VMAT_STRIDED_SLP)
7b5fc413 7813 {
4d694b27 7814 if (group_size < const_nunits)
e09b4c37 7815 {
ff03930a
JJ
7816 /* First check if vec_init optab supports construction from
7817 vector elts directly. */
b397965c 7818 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
9da15d40
RS
7819 machine_mode vmode;
7820 if (mode_for_vector (elmode, group_size).exists (&vmode)
7821 && VECTOR_MODE_P (vmode)
414fef4e 7822 && targetm.vector_mode_supported_p (vmode)
ff03930a
JJ
7823 && (convert_optab_handler (vec_init_optab,
7824 TYPE_MODE (vectype), vmode)
7825 != CODE_FOR_nothing))
ea60dd34 7826 {
4d694b27 7827 nloads = const_nunits / group_size;
ea60dd34 7828 lnel = group_size;
ff03930a
JJ
7829 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7830 }
7831 else
7832 {
7833 /* Otherwise avoid emitting a constructor of vector elements
7834 by performing the loads using an integer type of the same
7835 size, constructing a vector of those and then
7836 re-interpreting it as the original vector type.
7837 This avoids a huge runtime penalty due to the general
7838 inability to perform store forwarding from smaller stores
7839 to a larger load. */
7840 unsigned lsize
7841 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
fffbab82 7842 elmode = int_mode_for_size (lsize, 0).require ();
4d694b27 7843 unsigned int lnunits = const_nunits / group_size;
ff03930a
JJ
7844 /* If we can't construct such a vector fall back to
7845 element loads of the original vector type. */
4d694b27 7846 if (mode_for_vector (elmode, lnunits).exists (&vmode)
9da15d40 7847 && VECTOR_MODE_P (vmode)
414fef4e 7848 && targetm.vector_mode_supported_p (vmode)
ff03930a
JJ
7849 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7850 != CODE_FOR_nothing))
7851 {
4d694b27 7852 nloads = lnunits;
ff03930a
JJ
7853 lnel = group_size;
7854 ltype = build_nonstandard_integer_type (lsize, 1);
7855 lvectype = build_vector_type (ltype, nloads);
7856 }
ea60dd34 7857 }
e09b4c37 7858 }
2de001ee 7859 else
e09b4c37 7860 {
ea60dd34 7861 nloads = 1;
4d694b27 7862 lnel = const_nunits;
e09b4c37 7863 ltype = vectype;
e09b4c37 7864 }
2de001ee
RS
7865 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7866 }
bb4e4747
BC
7867 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
7868 else if (nloads == 1)
7869 ltype = vectype;
7870
2de001ee
RS
7871 if (slp)
7872 {
66c16fd9
RB
7873 /* For SLP permutation support we need to load the whole group,
7874 not only the number of vector stmts the permutation result
7875 fits in. */
b266b968 7876 if (slp_perm)
66c16fd9 7877 {
d9f21f6a
RS
7878 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7879 variable VF. */
7880 unsigned int const_vf = vf.to_constant ();
4d694b27 7881 ncopies = CEIL (group_size * const_vf, const_nunits);
66c16fd9
RB
7882 dr_chain.create (ncopies);
7883 }
7884 else
7885 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7b5fc413 7886 }
4d694b27 7887 unsigned int group_el = 0;
e09b4c37
RB
7888 unsigned HOST_WIDE_INT
7889 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7d75abc8
MM
7890 for (j = 0; j < ncopies; j++)
7891 {
7b5fc413 7892 if (nloads > 1)
e09b4c37 7893 vec_alloc (v, nloads);
e1bd7296 7894 stmt_vec_info new_stmt_info = NULL;
e09b4c37 7895 for (i = 0; i < nloads; i++)
7b5fc413 7896 {
e09b4c37 7897 tree this_off = build_int_cst (TREE_TYPE (alias_off),
b210f45f 7898 group_el * elsz + cst_offset);
19986382 7899 tree data_ref = build2 (MEM_REF, ltype, running_off, this_off);
89fa689a 7900 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
e1bd7296
RS
7901 gassign *new_stmt
7902 = gimple_build_assign (make_ssa_name (ltype), data_ref);
7903 new_stmt_info
86a91c0a 7904 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
e09b4c37
RB
7905 if (nloads > 1)
7906 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7907 gimple_assign_lhs (new_stmt));
7908
7909 group_el += lnel;
7910 if (! slp
7911 || group_el == group_size)
7b5fc413 7912 {
e09b4c37
RB
7913 tree newoff = copy_ssa_name (running_off);
7914 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7915 running_off, stride_step);
86a91c0a 7916 vect_finish_stmt_generation (stmt_info, incr, gsi);
7b5fc413
RB
7917
7918 running_off = newoff;
e09b4c37 7919 group_el = 0;
7b5fc413 7920 }
7b5fc413 7921 }
e09b4c37 7922 if (nloads > 1)
7d75abc8 7923 {
ea60dd34 7924 tree vec_inv = build_constructor (lvectype, v);
86a91c0a 7925 new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi);
e1bd7296 7926 new_stmt_info = vinfo->lookup_def (new_temp);
ea60dd34
RB
7927 if (lvectype != vectype)
7928 {
e1bd7296
RS
7929 gassign *new_stmt
7930 = gimple_build_assign (make_ssa_name (vectype),
7931 VIEW_CONVERT_EXPR,
7932 build1 (VIEW_CONVERT_EXPR,
7933 vectype, new_temp));
7934 new_stmt_info
86a91c0a 7935 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
ea60dd34 7936 }
7d75abc8
MM
7937 }
7938
7b5fc413 7939 if (slp)
b266b968 7940 {
b266b968 7941 if (slp_perm)
e1bd7296 7942 dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt));
66c16fd9 7943 else
e1bd7296 7944 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
b266b968 7945 }
7d75abc8 7946 else
225ce44b
RB
7947 {
7948 if (j == 0)
e1bd7296 7949 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
225ce44b 7950 else
e1bd7296
RS
7951 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7952 prev_stmt_info = new_stmt_info;
225ce44b 7953 }
7d75abc8 7954 }
b266b968 7955 if (slp_perm)
29afecdf
RB
7956 {
7957 unsigned n_perms;
7958 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7959 slp_node_instance, false, &n_perms);
7960 }
7d75abc8
MM
7961 return true;
7962 }
aec7ae7d 7963
b5ec4de7
RS
7964 if (memory_access_type == VMAT_GATHER_SCATTER
7965 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
ab2fc782
RS
7966 grouped_load = false;
7967
0d0293ac 7968 if (grouped_load)
ebfd146a 7969 {
bffb8014
RS
7970 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7971 group_size = DR_GROUP_SIZE (first_stmt_info);
4f0a0218 7972 /* For SLP vectorization we directly vectorize a subchain
52eab378
RB
7973 without permutation. */
7974 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
bffb8014 7975 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
4f0a0218
RB
7976 /* For BB vectorization always use the first stmt to base
7977 the data ref pointer on. */
7978 if (bb_vinfo)
b9787581 7979 first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6aa904c4 7980
ebfd146a 7981 /* Check if the chain of loads is already vectorized. */
bffb8014 7982 if (STMT_VINFO_VEC_STMT (first_stmt_info)
01d8bf07
RB
7983 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7984 ??? But we can only do so if there is exactly one
7985 as we have no way to get at the rest. Leave the CSE
7986 opportunity alone.
7987 ??? With the group load eventually participating
7988 in multiple different permutations (having multiple
7989 slp nodes which refer to the same group) the CSE
7990 is even wrong code. See PR56270. */
7991 && !slp)
ebfd146a
IR
7992 {
7993 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7994 return true;
7995 }
89fa689a 7996 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
9b999e8c 7997 group_gap_adj = 0;
ebfd146a
IR
7998
7999 /* VEC_NUM is the number of vect stmts to be created for this group. */
8000 if (slp)
8001 {
0d0293ac 8002 grouped_load = false;
91ff1504
RB
8003 /* For SLP permutation support we need to load the whole group,
8004 not only the number of vector stmts the permutation result
8005 fits in. */
8006 if (slp_perm)
b267968e 8007 {
d9f21f6a
RS
8008 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
8009 variable VF. */
8010 unsigned int const_vf = vf.to_constant ();
4d694b27
RS
8011 unsigned int const_nunits = nunits.to_constant ();
8012 vec_num = CEIL (group_size * const_vf, const_nunits);
b267968e
RB
8013 group_gap_adj = vf * group_size - nunits * vec_num;
8014 }
91ff1504 8015 else
b267968e
RB
8016 {
8017 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
796bd467
RB
8018 group_gap_adj
8019 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
b267968e 8020 }
a70d6342 8021 }
ebfd146a 8022 else
9b999e8c 8023 vec_num = group_size;
44fc7854 8024
bffb8014 8025 ref_type = get_group_alias_ptr_type (first_stmt_info);
ebfd146a
IR
8026 }
8027 else
8028 {
bffb8014 8029 first_stmt_info = stmt_info;
89fa689a 8030 first_dr_info = dr_info;
ebfd146a 8031 group_size = vec_num = 1;
9b999e8c 8032 group_gap_adj = 0;
89fa689a 8033 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
ebfd146a
IR
8034 }
8035
89fa689a
RS
8036 alignment_support_scheme
8037 = vect_supportable_dr_alignment (first_dr_info, false);
ebfd146a 8038 gcc_assert (alignment_support_scheme);
70088b95
RS
8039 vec_loop_masks *loop_masks
8040 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8041 ? &LOOP_VINFO_MASKS (loop_vinfo)
8042 : NULL);
7cfb4d93
RS
8043 /* Targets with store-lane instructions must not require explicit
8044 realignment. vect_supportable_dr_alignment always returns either
8045 dr_aligned or dr_unaligned_supported for masked operations. */
8046 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
8047 && !mask
70088b95 8048 && !loop_masks)
272c6793
RS
8049 || alignment_support_scheme == dr_aligned
8050 || alignment_support_scheme == dr_unaligned_supported);
ebfd146a
IR
8051
8052 /* In case the vectorization factor (VF) is bigger than the number
8053 of elements that we can fit in a vectype (nunits), we have to generate
8054 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 8055 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 8056 from one copy of the vector stmt to the next, in the field
ff802fa1 8057 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 8058 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
8059 stmts that use the defs of the current stmt. The example below
8060 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
8061 need to create 4 vectorized stmts):
ebfd146a
IR
8062
8063 before vectorization:
8064 RELATED_STMT VEC_STMT
8065 S1: x = memref - -
8066 S2: z = x + 1 - -
8067
8068 step 1: vectorize stmt S1:
8069 We first create the vector stmt VS1_0, and, as usual, record a
8070 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
8071 Next, we create the vector stmt VS1_1, and record a pointer to
8072 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
ff802fa1 8073 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
ebfd146a
IR
8074 stmts and pointers:
8075 RELATED_STMT VEC_STMT
8076 VS1_0: vx0 = memref0 VS1_1 -
8077 VS1_1: vx1 = memref1 VS1_2 -
8078 VS1_2: vx2 = memref2 VS1_3 -
8079 VS1_3: vx3 = memref3 - -
8080 S1: x = load - VS1_0
8081 S2: z = x + 1 - -
8082
b8698a0f
L
8083 See in documentation in vect_get_vec_def_for_stmt_copy for how the
8084 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
8085 stmt S2. */
8086
0d0293ac 8087 /* In case of interleaving (non-unit grouped access):
ebfd146a
IR
8088
8089 S1: x2 = &base + 2
8090 S2: x0 = &base
8091 S3: x1 = &base + 1
8092 S4: x3 = &base + 3
8093
b8698a0f 8094 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
8095 starting from the access of the first stmt of the chain:
8096
8097 VS1: vx0 = &base
8098 VS2: vx1 = &base + vec_size*1
8099 VS3: vx3 = &base + vec_size*2
8100 VS4: vx4 = &base + vec_size*3
8101
8102 Then permutation statements are generated:
8103
e2c83630
RH
8104 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
8105 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
ebfd146a
IR
8106 ...
8107
8108 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8109 (the order of the data-refs in the output of vect_permute_load_chain
8110 corresponds to the order of scalar stmts in the interleaving chain - see
8111 the documentation of vect_permute_load_chain()).
8112 The generation of permutation stmts and recording them in
0d0293ac 8113 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
ebfd146a 8114
b8698a0f 8115 In case of both multiple types and interleaving, the vector loads and
ff802fa1
IR
8116 permutation stmts above are created for every copy. The result vector
8117 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
8118 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
ebfd146a
IR
8119
8120 /* If the data reference is aligned (dr_aligned) or potentially unaligned
8121 on a target that supports unaligned accesses (dr_unaligned_supported)
8122 we generate the following code:
8123 p = initial_addr;
8124 indx = 0;
8125 loop {
8126 p = p + indx * vectype_size;
8127 vec_dest = *(p);
8128 indx = indx + 1;
8129 }
8130
8131 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 8132 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
8133 then generate the following code, in which the data in each iteration is
8134 obtained by two vector loads, one from the previous iteration, and one
8135 from the current iteration:
8136 p1 = initial_addr;
8137 msq_init = *(floor(p1))
8138 p2 = initial_addr + VS - 1;
8139 realignment_token = call target_builtin;
8140 indx = 0;
8141 loop {
8142 p2 = p2 + indx * vectype_size
8143 lsq = *(floor(p2))
8144 vec_dest = realign_load (msq, lsq, realignment_token)
8145 indx = indx + 1;
8146 msq = lsq;
8147 } */
8148
8149 /* If the misalignment remains the same throughout the execution of the
8150 loop, we can create the init_addr and permutation mask at the loop
ff802fa1 8151 preheader. Otherwise, it needs to be created inside the loop.
ebfd146a
IR
8152 This can only occur when vectorizing memory accesses in the inner-loop
8153 nested within an outer-loop that is being vectorized. */
8154
d1e4b493 8155 if (nested_in_vect_loop
89fa689a 8156 && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr),
cf098191 8157 GET_MODE_SIZE (TYPE_MODE (vectype))))
ebfd146a
IR
8158 {
8159 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
8160 compute_in_loop = true;
8161 }
8162
8163 if ((alignment_support_scheme == dr_explicit_realign_optimized
8164 || alignment_support_scheme == dr_explicit_realign)
59fd17e3 8165 && !compute_in_loop)
ebfd146a 8166 {
bffb8014 8167 msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
ebfd146a
IR
8168 alignment_support_scheme, NULL_TREE,
8169 &at_loop);
8170 if (alignment_support_scheme == dr_explicit_realign_optimized)
8171 {
538dd0b7 8172 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
356bbc4c
JJ
8173 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
8174 size_one_node);
ebfd146a
IR
8175 }
8176 }
8177 else
8178 at_loop = loop;
8179
62da9e14 8180 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
a1e53f3f
L
8181 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
8182
ab2fc782
RS
8183 tree bump;
8184 tree vec_offset = NULL_TREE;
8185 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8186 {
8187 aggr_type = NULL_TREE;
8188 bump = NULL_TREE;
8189 }
8190 else if (memory_access_type == VMAT_GATHER_SCATTER)
8191 {
8192 aggr_type = elem_type;
86a91c0a 8193 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
ab2fc782
RS
8194 &bump, &vec_offset);
8195 }
272c6793 8196 else
ab2fc782
RS
8197 {
8198 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8199 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
8200 else
8201 aggr_type = vectype;
89fa689a
RS
8202 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
8203 memory_access_type);
ab2fc782 8204 }
272c6793 8205
c3a8f964 8206 tree vec_mask = NULL_TREE;
ebfd146a 8207 prev_stmt_info = NULL;
4d694b27 8208 poly_uint64 group_elt = 0;
ebfd146a 8209 for (j = 0; j < ncopies; j++)
b8698a0f 8210 {
e1bd7296 8211 stmt_vec_info new_stmt_info = NULL;
272c6793 8212 /* 1. Create the vector or array pointer update chain. */
ebfd146a 8213 if (j == 0)
74bf76ed
JJ
8214 {
8215 bool simd_lane_access_p
8216 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
8217 if (simd_lane_access_p
89fa689a
RS
8218 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
8219 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
8220 && integer_zerop (DR_OFFSET (first_dr_info->dr))
8221 && integer_zerop (DR_INIT (first_dr_info->dr))
74bf76ed 8222 && alias_sets_conflict_p (get_alias_set (aggr_type),
44fc7854 8223 get_alias_set (TREE_TYPE (ref_type)))
74bf76ed
JJ
8224 && (alignment_support_scheme == dr_aligned
8225 || alignment_support_scheme == dr_unaligned_supported))
8226 {
89fa689a 8227 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
44fc7854 8228 dataref_offset = build_int_cst (ref_type, 0);
74bf76ed 8229 }
b9787581 8230 else if (first_stmt_info_for_drptr
bffb8014 8231 && first_stmt_info != first_stmt_info_for_drptr)
4f0a0218
RB
8232 {
8233 dataref_ptr
b9787581
RS
8234 = vect_create_data_ref_ptr (first_stmt_info_for_drptr,
8235 aggr_type, at_loop, offset, &dummy,
8236 gsi, &ptr_incr, simd_lane_access_p,
2d4bca81 8237 byte_offset, bump);
4f0a0218
RB
8238 /* Adjust the pointer by the difference to first_stmt. */
8239 data_reference_p ptrdr
b9787581 8240 = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr);
89fa689a
RS
8241 tree diff
8242 = fold_convert (sizetype,
8243 size_binop (MINUS_EXPR,
8244 DR_INIT (first_dr_info->dr),
8245 DR_INIT (ptrdr)));
4f0a0218 8246 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
86a91c0a 8247 stmt_info, diff);
4f0a0218 8248 }
bfaa08b7 8249 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2d4bca81
RS
8250 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
8251 &dataref_ptr, &vec_offset);
74bf76ed
JJ
8252 else
8253 dataref_ptr
bffb8014 8254 = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
74bf76ed 8255 offset, &dummy, gsi, &ptr_incr,
2d4bca81 8256 simd_lane_access_p,
ab2fc782 8257 byte_offset, bump);
c3a8f964 8258 if (mask)
86a91c0a 8259 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
c3a8f964 8260 mask_vectype);
74bf76ed 8261 }
ebfd146a 8262 else
c3a8f964
RS
8263 {
8264 if (dataref_offset)
8265 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
ab2fc782 8266 bump);
bfaa08b7 8267 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
e4057a39 8268 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
c3a8f964 8269 else
ab2fc782 8270 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
86a91c0a 8271 stmt_info, bump);
c3a8f964 8272 if (mask)
e4057a39 8273 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
c3a8f964 8274 }
ebfd146a 8275
0d0293ac 8276 if (grouped_load || slp_perm)
9771b263 8277 dr_chain.create (vec_num);
5ce1ee7f 8278
2de001ee 8279 if (memory_access_type == VMAT_LOAD_STORE_LANES)
ebfd146a 8280 {
272c6793
RS
8281 tree vec_array;
8282
8283 vec_array = create_vector_array (vectype, vec_num);
8284
7cfb4d93 8285 tree final_mask = NULL_TREE;
70088b95
RS
8286 if (loop_masks)
8287 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
8288 vectype, j);
7cfb4d93
RS
8289 if (vec_mask)
8290 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8291 vec_mask, gsi);
8292
7e11fc7f 8293 gcall *call;
7cfb4d93 8294 if (final_mask)
7e11fc7f
RS
8295 {
8296 /* Emit:
8297 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8298 VEC_MASK). */
8299 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8300 tree alias_ptr = build_int_cst (ref_type, align);
8301 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8302 dataref_ptr, alias_ptr,
7cfb4d93 8303 final_mask);
7e11fc7f
RS
8304 }
8305 else
8306 {
8307 /* Emit:
8308 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8309 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8310 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8311 }
a844293d
RS
8312 gimple_call_set_lhs (call, vec_array);
8313 gimple_call_set_nothrow (call, true);
86a91c0a 8314 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
ebfd146a 8315
272c6793
RS
8316 /* Extract each vector into an SSA_NAME. */
8317 for (i = 0; i < vec_num; i++)
ebfd146a 8318 {
86a91c0a 8319 new_temp = read_vector_array (stmt_info, gsi, scalar_dest,
272c6793 8320 vec_array, i);
9771b263 8321 dr_chain.quick_push (new_temp);
272c6793
RS
8322 }
8323
8324 /* Record the mapping between SSA_NAMEs and statements. */
86a91c0a 8325 vect_record_grouped_load_vectors (stmt_info, dr_chain);
3ba4ff41
RS
8326
8327 /* Record that VEC_ARRAY is now dead. */
86a91c0a 8328 vect_clobber_variable (stmt_info, gsi, vec_array);
272c6793
RS
8329 }
8330 else
8331 {
8332 for (i = 0; i < vec_num; i++)
8333 {
7cfb4d93 8334 tree final_mask = NULL_TREE;
70088b95 8335 if (loop_masks
7cfb4d93 8336 && memory_access_type != VMAT_INVARIANT)
70088b95
RS
8337 final_mask = vect_get_loop_mask (gsi, loop_masks,
8338 vec_num * ncopies,
7cfb4d93
RS
8339 vectype, vec_num * j + i);
8340 if (vec_mask)
8341 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8342 vec_mask, gsi);
8343
272c6793
RS
8344 if (i > 0)
8345 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
86a91c0a 8346 stmt_info, bump);
272c6793
RS
8347
8348 /* 2. Create the vector-load in the loop. */
e1bd7296 8349 gimple *new_stmt = NULL;
272c6793
RS
8350 switch (alignment_support_scheme)
8351 {
8352 case dr_aligned:
8353 case dr_unaligned_supported:
be1ac4ec 8354 {
644ffefd
MJ
8355 unsigned int align, misalign;
8356
bfaa08b7
RS
8357 if (memory_access_type == VMAT_GATHER_SCATTER)
8358 {
8359 tree scale = size_int (gs_info.scale);
8360 gcall *call;
70088b95 8361 if (loop_masks)
bfaa08b7
RS
8362 call = gimple_build_call_internal
8363 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8364 vec_offset, scale, final_mask);
8365 else
8366 call = gimple_build_call_internal
8367 (IFN_GATHER_LOAD, 3, dataref_ptr,
8368 vec_offset, scale);
8369 gimple_call_set_nothrow (call, true);
8370 new_stmt = call;
8371 data_ref = NULL_TREE;
8372 break;
8373 }
8374
89fa689a 8375 align = DR_TARGET_ALIGNMENT (dr_info);
272c6793
RS
8376 if (alignment_support_scheme == dr_aligned)
8377 {
89fa689a 8378 gcc_assert (aligned_access_p (first_dr_info));
644ffefd 8379 misalign = 0;
272c6793 8380 }
89fa689a 8381 else if (DR_MISALIGNMENT (first_dr_info) == -1)
272c6793 8382 {
89fa689a
RS
8383 align = dr_alignment
8384 (vect_dr_behavior (first_dr_info));
52639a61 8385 misalign = 0;
272c6793
RS
8386 }
8387 else
89fa689a 8388 misalign = DR_MISALIGNMENT (first_dr_info);
aed93b23
RB
8389 if (dataref_offset == NULL_TREE
8390 && TREE_CODE (dataref_ptr) == SSA_NAME)
74bf76ed
JJ
8391 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8392 align, misalign);
c3a8f964 8393
7cfb4d93 8394 if (final_mask)
c3a8f964
RS
8395 {
8396 align = least_bit_hwi (misalign | align);
8397 tree ptr = build_int_cst (ref_type, align);
8398 gcall *call
8399 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8400 dataref_ptr, ptr,
7cfb4d93 8401 final_mask);
c3a8f964
RS
8402 gimple_call_set_nothrow (call, true);
8403 new_stmt = call;
8404 data_ref = NULL_TREE;
8405 }
8406 else
8407 {
8408 data_ref
8409 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8410 dataref_offset
8411 ? dataref_offset
8412 : build_int_cst (ref_type, 0));
8413 if (alignment_support_scheme == dr_aligned)
8414 ;
89fa689a 8415 else if (DR_MISALIGNMENT (first_dr_info) == -1)
c3a8f964
RS
8416 TREE_TYPE (data_ref)
8417 = build_aligned_type (TREE_TYPE (data_ref),
8418 align * BITS_PER_UNIT);
8419 else
8420 TREE_TYPE (data_ref)
8421 = build_aligned_type (TREE_TYPE (data_ref),
8422 TYPE_ALIGN (elem_type));
8423 }
272c6793 8424 break;
be1ac4ec 8425 }
272c6793 8426 case dr_explicit_realign:
267d3070 8427 {
272c6793 8428 tree ptr, bump;
272c6793 8429
d88981fc 8430 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
272c6793
RS
8431
8432 if (compute_in_loop)
bffb8014 8433 msq = vect_setup_realignment (first_stmt_info, gsi,
272c6793
RS
8434 &realignment_token,
8435 dr_explicit_realign,
8436 dataref_ptr, NULL);
8437
aed93b23
RB
8438 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8439 ptr = copy_ssa_name (dataref_ptr);
8440 else
8441 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
89fa689a 8442 unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
0d0e4a03
JJ
8443 new_stmt = gimple_build_assign
8444 (ptr, BIT_AND_EXPR, dataref_ptr,
272c6793
RS
8445 build_int_cst
8446 (TREE_TYPE (dataref_ptr),
f702e7d4 8447 -(HOST_WIDE_INT) align));
86a91c0a 8448 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
272c6793
RS
8449 data_ref
8450 = build2 (MEM_REF, vectype, ptr,
44fc7854 8451 build_int_cst (ref_type, 0));
89fa689a 8452 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
272c6793
RS
8453 vec_dest = vect_create_destination_var (scalar_dest,
8454 vectype);
8455 new_stmt = gimple_build_assign (vec_dest, data_ref);
8456 new_temp = make_ssa_name (vec_dest, new_stmt);
8457 gimple_assign_set_lhs (new_stmt, new_temp);
86a91c0a
RS
8458 gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt));
8459 gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt));
8460 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
272c6793
RS
8461 msq = new_temp;
8462
d88981fc 8463 bump = size_binop (MULT_EXPR, vs,
7b7b1813 8464 TYPE_SIZE_UNIT (elem_type));
d88981fc 8465 bump = size_binop (MINUS_EXPR, bump, size_one_node);
86a91c0a
RS
8466 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi,
8467 stmt_info, bump);
0d0e4a03
JJ
8468 new_stmt = gimple_build_assign
8469 (NULL_TREE, BIT_AND_EXPR, ptr,
272c6793 8470 build_int_cst
f702e7d4 8471 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
aed93b23 8472 ptr = copy_ssa_name (ptr, new_stmt);
272c6793 8473 gimple_assign_set_lhs (new_stmt, ptr);
86a91c0a 8474 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
272c6793
RS
8475 data_ref
8476 = build2 (MEM_REF, vectype, ptr,
44fc7854 8477 build_int_cst (ref_type, 0));
272c6793 8478 break;
267d3070 8479 }
272c6793 8480 case dr_explicit_realign_optimized:
f702e7d4
RS
8481 {
8482 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8483 new_temp = copy_ssa_name (dataref_ptr);
8484 else
8485 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
89fa689a 8486 unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
f702e7d4
RS
8487 new_stmt = gimple_build_assign
8488 (new_temp, BIT_AND_EXPR, dataref_ptr,
8489 build_int_cst (TREE_TYPE (dataref_ptr),
8490 -(HOST_WIDE_INT) align));
86a91c0a 8491 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
f702e7d4
RS
8492 data_ref
8493 = build2 (MEM_REF, vectype, new_temp,
8494 build_int_cst (ref_type, 0));
8495 break;
8496 }
272c6793
RS
8497 default:
8498 gcc_unreachable ();
8499 }
ebfd146a 8500 vec_dest = vect_create_destination_var (scalar_dest, vectype);
c3a8f964
RS
8501 /* DATA_REF is null if we've already built the statement. */
8502 if (data_ref)
19986382 8503 {
89fa689a 8504 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
19986382
RB
8505 new_stmt = gimple_build_assign (vec_dest, data_ref);
8506 }
ebfd146a 8507 new_temp = make_ssa_name (vec_dest, new_stmt);
c3a8f964 8508 gimple_set_lhs (new_stmt, new_temp);
e1bd7296 8509 new_stmt_info
86a91c0a 8510 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
ebfd146a 8511
272c6793
RS
8512 /* 3. Handle explicit realignment if necessary/supported.
8513 Create in loop:
8514 vec_dest = realign_load (msq, lsq, realignment_token) */
8515 if (alignment_support_scheme == dr_explicit_realign_optimized
8516 || alignment_support_scheme == dr_explicit_realign)
ebfd146a 8517 {
272c6793
RS
8518 lsq = gimple_assign_lhs (new_stmt);
8519 if (!realignment_token)
8520 realignment_token = dataref_ptr;
8521 vec_dest = vect_create_destination_var (scalar_dest, vectype);
0d0e4a03
JJ
8522 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8523 msq, lsq, realignment_token);
272c6793
RS
8524 new_temp = make_ssa_name (vec_dest, new_stmt);
8525 gimple_assign_set_lhs (new_stmt, new_temp);
e1bd7296 8526 new_stmt_info
86a91c0a 8527 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
272c6793
RS
8528
8529 if (alignment_support_scheme == dr_explicit_realign_optimized)
8530 {
8531 gcc_assert (phi);
8532 if (i == vec_num - 1 && j == ncopies - 1)
8533 add_phi_arg (phi, lsq,
8534 loop_latch_edge (containing_loop),
9e227d60 8535 UNKNOWN_LOCATION);
272c6793
RS
8536 msq = lsq;
8537 }
ebfd146a 8538 }
ebfd146a 8539
62da9e14 8540 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
272c6793 8541 {
aec7ae7d
JJ
8542 tree perm_mask = perm_mask_for_reverse (vectype);
8543 new_temp = permute_vec_elements (new_temp, new_temp,
86a91c0a 8544 perm_mask, stmt_info, gsi);
e1bd7296 8545 new_stmt_info = vinfo->lookup_def (new_temp);
ebfd146a 8546 }
267d3070 8547
272c6793 8548 /* Collect vector loads and later create their permutation in
0d0293ac
MM
8549 vect_transform_grouped_load (). */
8550 if (grouped_load || slp_perm)
9771b263 8551 dr_chain.quick_push (new_temp);
267d3070 8552
272c6793
RS
8553 /* Store vector loads in the corresponding SLP_NODE. */
8554 if (slp && !slp_perm)
e1bd7296 8555 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
b267968e
RB
8556
8557 /* With SLP permutation we load the gaps as well, without
8558 we need to skip the gaps after we manage to fully load
2c53b149 8559 all elements. group_gap_adj is DR_GROUP_SIZE here. */
b267968e 8560 group_elt += nunits;
d9f21f6a
RS
8561 if (maybe_ne (group_gap_adj, 0U)
8562 && !slp_perm
8563 && known_eq (group_elt, group_size - group_gap_adj))
b267968e 8564 {
d9f21f6a
RS
8565 poly_wide_int bump_val
8566 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8567 * group_gap_adj);
8e6cdc90 8568 tree bump = wide_int_to_tree (sizetype, bump_val);
b267968e 8569 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
86a91c0a 8570 stmt_info, bump);
b267968e
RB
8571 group_elt = 0;
8572 }
272c6793 8573 }
9b999e8c
RB
8574 /* Bump the vector pointer to account for a gap or for excess
8575 elements loaded for a permuted SLP load. */
d9f21f6a 8576 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
a64b9c26 8577 {
d9f21f6a
RS
8578 poly_wide_int bump_val
8579 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8580 * group_gap_adj);
8e6cdc90 8581 tree bump = wide_int_to_tree (sizetype, bump_val);
a64b9c26 8582 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
86a91c0a 8583 stmt_info, bump);
a64b9c26 8584 }
ebfd146a
IR
8585 }
8586
8587 if (slp && !slp_perm)
8588 continue;
8589
8590 if (slp_perm)
8591 {
29afecdf 8592 unsigned n_perms;
01d8bf07 8593 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
29afecdf
RB
8594 slp_node_instance, false,
8595 &n_perms))
ebfd146a 8596 {
9771b263 8597 dr_chain.release ();
ebfd146a
IR
8598 return false;
8599 }
8600 }
8601 else
8602 {
0d0293ac 8603 if (grouped_load)
ebfd146a 8604 {
2de001ee 8605 if (memory_access_type != VMAT_LOAD_STORE_LANES)
86a91c0a
RS
8606 vect_transform_grouped_load (stmt_info, dr_chain,
8607 group_size, gsi);
ebfd146a 8608 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
8609 }
8610 else
8611 {
8612 if (j == 0)
e1bd7296 8613 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
ebfd146a 8614 else
e1bd7296
RS
8615 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
8616 prev_stmt_info = new_stmt_info;
ebfd146a
IR
8617 }
8618 }
9771b263 8619 dr_chain.release ();
ebfd146a
IR
8620 }
8621
ebfd146a
IR
8622 return true;
8623}
8624
8625/* Function vect_is_simple_cond.
b8698a0f 8626
ebfd146a
IR
8627 Input:
8628 LOOP - the loop that is being vectorized.
8629 COND - Condition that is checked for simple use.
8630
e9e1d143
RG
8631 Output:
8632 *COMP_VECTYPE - the vector type for the comparison.
4fc5ebf1 8633 *DTS - The def types for the arguments of the comparison
e9e1d143 8634
ebfd146a
IR
8635 Returns whether a COND can be vectorized. Checks whether
8636 condition operands are supportable using vec_is_simple_use. */
8637
87aab9b2 8638static bool
4fc5ebf1 8639vect_is_simple_cond (tree cond, vec_info *vinfo,
8da4c8d8
RB
8640 tree *comp_vectype, enum vect_def_type *dts,
8641 tree vectype)
ebfd146a
IR
8642{
8643 tree lhs, rhs;
e9e1d143 8644 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
ebfd146a 8645
a414c77f
IE
8646 /* Mask case. */
8647 if (TREE_CODE (cond) == SSA_NAME
2568d8a1 8648 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
a414c77f 8649 {
894dd753 8650 if (!vect_is_simple_use (cond, vinfo, &dts[0], comp_vectype)
a414c77f
IE
8651 || !*comp_vectype
8652 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8653 return false;
8654 return true;
8655 }
8656
ebfd146a
IR
8657 if (!COMPARISON_CLASS_P (cond))
8658 return false;
8659
8660 lhs = TREE_OPERAND (cond, 0);
8661 rhs = TREE_OPERAND (cond, 1);
8662
8663 if (TREE_CODE (lhs) == SSA_NAME)
8664 {
894dd753 8665 if (!vect_is_simple_use (lhs, vinfo, &dts[0], &vectype1))
ebfd146a
IR
8666 return false;
8667 }
4fc5ebf1
JG
8668 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8669 || TREE_CODE (lhs) == FIXED_CST)
8670 dts[0] = vect_constant_def;
8671 else
ebfd146a
IR
8672 return false;
8673
8674 if (TREE_CODE (rhs) == SSA_NAME)
8675 {
894dd753 8676 if (!vect_is_simple_use (rhs, vinfo, &dts[1], &vectype2))
ebfd146a
IR
8677 return false;
8678 }
4fc5ebf1
JG
8679 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8680 || TREE_CODE (rhs) == FIXED_CST)
8681 dts[1] = vect_constant_def;
8682 else
ebfd146a
IR
8683 return false;
8684
28b33016 8685 if (vectype1 && vectype2
928686b1
RS
8686 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8687 TYPE_VECTOR_SUBPARTS (vectype2)))
28b33016
IE
8688 return false;
8689
e9e1d143 8690 *comp_vectype = vectype1 ? vectype1 : vectype2;
8da4c8d8 8691 /* Invariant comparison. */
4515e413 8692 if (! *comp_vectype && vectype)
8da4c8d8
RB
8693 {
8694 tree scalar_type = TREE_TYPE (lhs);
8695 /* If we can widen the comparison to match vectype do so. */
8696 if (INTEGRAL_TYPE_P (scalar_type)
8697 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8698 TYPE_SIZE (TREE_TYPE (vectype))))
8699 scalar_type = build_nonstandard_integer_type
8700 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8701 TYPE_UNSIGNED (scalar_type));
8702 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8703 }
8704
ebfd146a
IR
8705 return true;
8706}
8707
8708/* vectorizable_condition.
8709
32e8e429
RS
8710 Check if STMT_INFO is conditional modify expression that can be vectorized.
8711 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
b8698a0f 8712 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
8713 at GSI.
8714
32e8e429
RS
8715 When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector
8716 variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1,
8717 and in else clause if it is 2).
ebfd146a 8718
32e8e429 8719 Return true if STMT_INFO is vectorizable in this way. */
ebfd146a 8720
4bbe8262 8721bool
32e8e429 8722vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195
RS
8723 stmt_vec_info *vec_stmt, tree reduc_def,
8724 int reduc_index, slp_tree slp_node,
8725 stmt_vector_for_cost *cost_vec)
ebfd146a 8726{
e4057a39 8727 vec_info *vinfo = stmt_info->vinfo;
ebfd146a
IR
8728 tree scalar_dest = NULL_TREE;
8729 tree vec_dest = NULL_TREE;
01216d27
JJ
8730 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8731 tree then_clause, else_clause;
df11cc78 8732 tree comp_vectype = NULL_TREE;
ff802fa1
IR
8733 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8734 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
5958f9e2 8735 tree vec_compare;
ebfd146a
IR
8736 tree new_temp;
8737 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4fc5ebf1
JG
8738 enum vect_def_type dts[4]
8739 = {vect_unknown_def_type, vect_unknown_def_type,
8740 vect_unknown_def_type, vect_unknown_def_type};
8741 int ndts = 4;
f7e531cf 8742 int ncopies;
01216d27 8743 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
a855b1b1 8744 stmt_vec_info prev_stmt_info = NULL;
f7e531cf
IR
8745 int i, j;
8746 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6e1aa848
DN
8747 vec<tree> vec_oprnds0 = vNULL;
8748 vec<tree> vec_oprnds1 = vNULL;
8749 vec<tree> vec_oprnds2 = vNULL;
8750 vec<tree> vec_oprnds3 = vNULL;
74946978 8751 tree vec_cmp_type;
a414c77f 8752 bool masked = false;
b8698a0f 8753
f7e531cf
IR
8754 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8755 return false;
8756
bb6c2b68
RS
8757 vect_reduction_type reduction_type
8758 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8759 if (reduction_type == TREE_CODE_REDUCTION)
af29617a
AH
8760 {
8761 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8762 return false;
ebfd146a 8763
af29617a
AH
8764 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8765 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8766 && reduc_def))
8767 return false;
ebfd146a 8768
af29617a
AH
8769 /* FORNOW: not yet supported. */
8770 if (STMT_VINFO_LIVE_P (stmt_info))
8771 {
8772 if (dump_enabled_p ())
8773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8774 "value used after loop.\n");
8775 return false;
8776 }
ebfd146a
IR
8777 }
8778
8779 /* Is vectorizable conditional operation? */
32e8e429
RS
8780 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
8781 if (!stmt)
ebfd146a
IR
8782 return false;
8783
8784 code = gimple_assign_rhs_code (stmt);
8785
8786 if (code != COND_EXPR)
8787 return false;
8788
465c8c19 8789 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2947d3b2 8790 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
465c8c19 8791
fce57248 8792 if (slp_node)
465c8c19
JJ
8793 ncopies = 1;
8794 else
e8f142e2 8795 ncopies = vect_get_num_copies (loop_vinfo, vectype);
465c8c19
JJ
8796
8797 gcc_assert (ncopies >= 1);
8798 if (reduc_index && ncopies > 1)
8799 return false; /* FORNOW */
8800
4e71066d
RG
8801 cond_expr = gimple_assign_rhs1 (stmt);
8802 then_clause = gimple_assign_rhs2 (stmt);
8803 else_clause = gimple_assign_rhs3 (stmt);
ebfd146a 8804
4fc5ebf1 8805 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
4515e413 8806 &comp_vectype, &dts[0], slp_node ? NULL : vectype)
e9e1d143 8807 || !comp_vectype)
ebfd146a
IR
8808 return false;
8809
894dd753 8810 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1))
2947d3b2 8811 return false;
894dd753 8812 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2))
ebfd146a 8813 return false;
2947d3b2
IE
8814
8815 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8816 return false;
8817
8818 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
ebfd146a
IR
8819 return false;
8820
28b33016
IE
8821 masked = !COMPARISON_CLASS_P (cond_expr);
8822 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8823
74946978
MP
8824 if (vec_cmp_type == NULL_TREE)
8825 return false;
784fb9b3 8826
01216d27
JJ
8827 cond_code = TREE_CODE (cond_expr);
8828 if (!masked)
8829 {
8830 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8831 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8832 }
8833
8834 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8835 {
8836 /* Boolean values may have another representation in vectors
8837 and therefore we prefer bit operations over comparison for
8838 them (which also works for scalar masks). We store opcodes
8839 to use in bitop1 and bitop2. Statement is vectorized as
8840 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8841 depending on bitop1 and bitop2 arity. */
8842 switch (cond_code)
8843 {
8844 case GT_EXPR:
8845 bitop1 = BIT_NOT_EXPR;
8846 bitop2 = BIT_AND_EXPR;
8847 break;
8848 case GE_EXPR:
8849 bitop1 = BIT_NOT_EXPR;
8850 bitop2 = BIT_IOR_EXPR;
8851 break;
8852 case LT_EXPR:
8853 bitop1 = BIT_NOT_EXPR;
8854 bitop2 = BIT_AND_EXPR;
8855 std::swap (cond_expr0, cond_expr1);
8856 break;
8857 case LE_EXPR:
8858 bitop1 = BIT_NOT_EXPR;
8859 bitop2 = BIT_IOR_EXPR;
8860 std::swap (cond_expr0, cond_expr1);
8861 break;
8862 case NE_EXPR:
8863 bitop1 = BIT_XOR_EXPR;
8864 break;
8865 case EQ_EXPR:
8866 bitop1 = BIT_XOR_EXPR;
8867 bitop2 = BIT_NOT_EXPR;
8868 break;
8869 default:
8870 return false;
8871 }
8872 cond_code = SSA_NAME;
8873 }
8874
b8698a0f 8875 if (!vec_stmt)
ebfd146a 8876 {
01216d27
JJ
8877 if (bitop1 != NOP_EXPR)
8878 {
8879 machine_mode mode = TYPE_MODE (comp_vectype);
8880 optab optab;
8881
8882 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8883 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8884 return false;
8885
8886 if (bitop2 != NOP_EXPR)
8887 {
8888 optab = optab_for_tree_code (bitop2, comp_vectype,
8889 optab_default);
8890 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8891 return false;
8892 }
8893 }
4fc5ebf1
JG
8894 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8895 cond_code))
8896 {
68435eb2
RB
8897 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8898 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node,
8899 cost_vec);
4fc5ebf1
JG
8900 return true;
8901 }
8902 return false;
ebfd146a
IR
8903 }
8904
f7e531cf
IR
8905 /* Transform. */
8906
8907 if (!slp_node)
8908 {
9771b263
DN
8909 vec_oprnds0.create (1);
8910 vec_oprnds1.create (1);
8911 vec_oprnds2.create (1);
8912 vec_oprnds3.create (1);
f7e531cf 8913 }
ebfd146a
IR
8914
8915 /* Handle def. */
8916 scalar_dest = gimple_assign_lhs (stmt);
bb6c2b68
RS
8917 if (reduction_type != EXTRACT_LAST_REDUCTION)
8918 vec_dest = vect_create_destination_var (scalar_dest, vectype);
ebfd146a
IR
8919
8920 /* Handle cond expr. */
a855b1b1
MM
8921 for (j = 0; j < ncopies; j++)
8922 {
e1bd7296 8923 stmt_vec_info new_stmt_info = NULL;
a855b1b1
MM
8924 if (j == 0)
8925 {
f7e531cf
IR
8926 if (slp_node)
8927 {
00f96dc9
TS
8928 auto_vec<tree, 4> ops;
8929 auto_vec<vec<tree>, 4> vec_defs;
9771b263 8930
a414c77f 8931 if (masked)
01216d27 8932 ops.safe_push (cond_expr);
a414c77f
IE
8933 else
8934 {
01216d27
JJ
8935 ops.safe_push (cond_expr0);
8936 ops.safe_push (cond_expr1);
a414c77f 8937 }
9771b263
DN
8938 ops.safe_push (then_clause);
8939 ops.safe_push (else_clause);
306b0c92 8940 vect_get_slp_defs (ops, slp_node, &vec_defs);
37b5ec8f
JJ
8941 vec_oprnds3 = vec_defs.pop ();
8942 vec_oprnds2 = vec_defs.pop ();
a414c77f
IE
8943 if (!masked)
8944 vec_oprnds1 = vec_defs.pop ();
37b5ec8f 8945 vec_oprnds0 = vec_defs.pop ();
f7e531cf
IR
8946 }
8947 else
8948 {
a414c77f
IE
8949 if (masked)
8950 {
8951 vec_cond_lhs
86a91c0a 8952 = vect_get_vec_def_for_operand (cond_expr, stmt_info,
a414c77f 8953 comp_vectype);
894dd753 8954 vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]);
a414c77f
IE
8955 }
8956 else
8957 {
01216d27
JJ
8958 vec_cond_lhs
8959 = vect_get_vec_def_for_operand (cond_expr0,
86a91c0a 8960 stmt_info, comp_vectype);
894dd753 8961 vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]);
01216d27
JJ
8962
8963 vec_cond_rhs
8964 = vect_get_vec_def_for_operand (cond_expr1,
86a91c0a 8965 stmt_info, comp_vectype);
894dd753 8966 vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]);
a414c77f 8967 }
f7e531cf
IR
8968 if (reduc_index == 1)
8969 vec_then_clause = reduc_def;
8970 else
8971 {
8972 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
86a91c0a 8973 stmt_info);
894dd753 8974 vect_is_simple_use (then_clause, loop_vinfo, &dts[2]);
f7e531cf
IR
8975 }
8976 if (reduc_index == 2)
8977 vec_else_clause = reduc_def;
8978 else
8979 {
8980 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
86a91c0a 8981 stmt_info);
894dd753 8982 vect_is_simple_use (else_clause, loop_vinfo, &dts[3]);
f7e531cf 8983 }
a855b1b1
MM
8984 }
8985 }
8986 else
8987 {
a414c77f 8988 vec_cond_lhs
e4057a39 8989 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds0.pop ());
a414c77f
IE
8990 if (!masked)
8991 vec_cond_rhs
e4057a39 8992 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds1.pop ());
a414c77f 8993
e4057a39 8994 vec_then_clause = vect_get_vec_def_for_stmt_copy (vinfo,
9771b263 8995 vec_oprnds2.pop ());
e4057a39 8996 vec_else_clause = vect_get_vec_def_for_stmt_copy (vinfo,
9771b263 8997 vec_oprnds3.pop ());
f7e531cf
IR
8998 }
8999
9000 if (!slp_node)
9001 {
9771b263 9002 vec_oprnds0.quick_push (vec_cond_lhs);
a414c77f
IE
9003 if (!masked)
9004 vec_oprnds1.quick_push (vec_cond_rhs);
9771b263
DN
9005 vec_oprnds2.quick_push (vec_then_clause);
9006 vec_oprnds3.quick_push (vec_else_clause);
a855b1b1
MM
9007 }
9008
9dc3f7de 9009 /* Arguments are ready. Create the new vector stmt. */
9771b263 9010 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
f7e531cf 9011 {
9771b263
DN
9012 vec_then_clause = vec_oprnds2[i];
9013 vec_else_clause = vec_oprnds3[i];
a855b1b1 9014
a414c77f
IE
9015 if (masked)
9016 vec_compare = vec_cond_lhs;
9017 else
9018 {
9019 vec_cond_rhs = vec_oprnds1[i];
01216d27
JJ
9020 if (bitop1 == NOP_EXPR)
9021 vec_compare = build2 (cond_code, vec_cmp_type,
9022 vec_cond_lhs, vec_cond_rhs);
9023 else
9024 {
9025 new_temp = make_ssa_name (vec_cmp_type);
e1bd7296 9026 gassign *new_stmt;
01216d27
JJ
9027 if (bitop1 == BIT_NOT_EXPR)
9028 new_stmt = gimple_build_assign (new_temp, bitop1,
9029 vec_cond_rhs);
9030 else
9031 new_stmt
9032 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
9033 vec_cond_rhs);
86a91c0a 9034 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
01216d27
JJ
9035 if (bitop2 == NOP_EXPR)
9036 vec_compare = new_temp;
9037 else if (bitop2 == BIT_NOT_EXPR)
9038 {
9039 /* Instead of doing ~x ? y : z do x ? z : y. */
9040 vec_compare = new_temp;
9041 std::swap (vec_then_clause, vec_else_clause);
9042 }
9043 else
9044 {
9045 vec_compare = make_ssa_name (vec_cmp_type);
9046 new_stmt
9047 = gimple_build_assign (vec_compare, bitop2,
9048 vec_cond_lhs, new_temp);
86a91c0a 9049 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
01216d27
JJ
9050 }
9051 }
a414c77f 9052 }
bb6c2b68
RS
9053 if (reduction_type == EXTRACT_LAST_REDUCTION)
9054 {
9055 if (!is_gimple_val (vec_compare))
9056 {
9057 tree vec_compare_name = make_ssa_name (vec_cmp_type);
e1bd7296
RS
9058 gassign *new_stmt = gimple_build_assign (vec_compare_name,
9059 vec_compare);
86a91c0a 9060 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
bb6c2b68
RS
9061 vec_compare = vec_compare_name;
9062 }
9063 gcc_assert (reduc_index == 2);
e1bd7296 9064 gcall *new_stmt = gimple_build_call_internal
bb6c2b68
RS
9065 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
9066 vec_then_clause);
9067 gimple_call_set_lhs (new_stmt, scalar_dest);
9068 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
86a91c0a
RS
9069 if (stmt_info->stmt == gsi_stmt (*gsi))
9070 new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt);
bb6c2b68
RS
9071 else
9072 {
9073 /* In this case we're moving the definition to later in the
9074 block. That doesn't matter because the only uses of the
9075 lhs are in phi statements. */
86a91c0a
RS
9076 gimple_stmt_iterator old_gsi
9077 = gsi_for_stmt (stmt_info->stmt);
bb6c2b68 9078 gsi_remove (&old_gsi, true);
e1bd7296 9079 new_stmt_info
86a91c0a 9080 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
bb6c2b68
RS
9081 }
9082 }
9083 else
9084 {
9085 new_temp = make_ssa_name (vec_dest);
e1bd7296
RS
9086 gassign *new_stmt
9087 = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
9088 vec_then_clause, vec_else_clause);
9089 new_stmt_info
86a91c0a 9090 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
bb6c2b68 9091 }
f7e531cf 9092 if (slp_node)
e1bd7296 9093 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
f7e531cf
IR
9094 }
9095
9096 if (slp_node)
9097 continue;
9098
e1bd7296
RS
9099 if (j == 0)
9100 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9101 else
9102 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
f7e531cf 9103
e1bd7296 9104 prev_stmt_info = new_stmt_info;
a855b1b1 9105 }
b8698a0f 9106
9771b263
DN
9107 vec_oprnds0.release ();
9108 vec_oprnds1.release ();
9109 vec_oprnds2.release ();
9110 vec_oprnds3.release ();
f7e531cf 9111
ebfd146a
IR
9112 return true;
9113}
9114
42fd8198
IE
9115/* vectorizable_comparison.
9116
32e8e429
RS
9117 Check if STMT_INFO is comparison expression that can be vectorized.
9118 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
42fd8198
IE
9119 comparison, put it in VEC_STMT, and insert it at GSI.
9120
32e8e429 9121 Return true if STMT_INFO is vectorizable in this way. */
42fd8198 9122
fce57248 9123static bool
32e8e429 9124vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 9125 stmt_vec_info *vec_stmt, tree reduc_def,
68435eb2 9126 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
42fd8198 9127{
e4057a39 9128 vec_info *vinfo = stmt_info->vinfo;
42fd8198 9129 tree lhs, rhs1, rhs2;
42fd8198
IE
9130 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
9131 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
9132 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
9133 tree new_temp;
9134 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
9135 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
4fc5ebf1 9136 int ndts = 2;
928686b1 9137 poly_uint64 nunits;
42fd8198 9138 int ncopies;
49e76ff1 9139 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
42fd8198
IE
9140 stmt_vec_info prev_stmt_info = NULL;
9141 int i, j;
9142 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9143 vec<tree> vec_oprnds0 = vNULL;
9144 vec<tree> vec_oprnds1 = vNULL;
42fd8198
IE
9145 tree mask_type;
9146 tree mask;
9147
c245362b
IE
9148 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
9149 return false;
9150
30480bcd 9151 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
42fd8198
IE
9152 return false;
9153
9154 mask_type = vectype;
9155 nunits = TYPE_VECTOR_SUBPARTS (vectype);
9156
fce57248 9157 if (slp_node)
42fd8198
IE
9158 ncopies = 1;
9159 else
e8f142e2 9160 ncopies = vect_get_num_copies (loop_vinfo, vectype);
42fd8198
IE
9161
9162 gcc_assert (ncopies >= 1);
42fd8198
IE
9163 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
9164 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
9165 && reduc_def))
9166 return false;
9167
9168 if (STMT_VINFO_LIVE_P (stmt_info))
9169 {
9170 if (dump_enabled_p ())
9171 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9172 "value used after loop.\n");
9173 return false;
9174 }
9175
32e8e429
RS
9176 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
9177 if (!stmt)
42fd8198
IE
9178 return false;
9179
9180 code = gimple_assign_rhs_code (stmt);
9181
9182 if (TREE_CODE_CLASS (code) != tcc_comparison)
9183 return false;
9184
9185 rhs1 = gimple_assign_rhs1 (stmt);
9186 rhs2 = gimple_assign_rhs2 (stmt);
9187
894dd753 9188 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1))
42fd8198
IE
9189 return false;
9190
894dd753 9191 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2))
42fd8198
IE
9192 return false;
9193
9194 if (vectype1 && vectype2
928686b1
RS
9195 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
9196 TYPE_VECTOR_SUBPARTS (vectype2)))
42fd8198
IE
9197 return false;
9198
9199 vectype = vectype1 ? vectype1 : vectype2;
9200
9201 /* Invariant comparison. */
9202 if (!vectype)
9203 {
69a9a66f 9204 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
928686b1 9205 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
42fd8198
IE
9206 return false;
9207 }
928686b1 9208 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
42fd8198
IE
9209 return false;
9210
49e76ff1
IE
9211 /* Can't compare mask and non-mask types. */
9212 if (vectype1 && vectype2
9213 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
9214 return false;
9215
9216 /* Boolean values may have another representation in vectors
9217 and therefore we prefer bit operations over comparison for
9218 them (which also works for scalar masks). We store opcodes
9219 to use in bitop1 and bitop2. Statement is vectorized as
9220 BITOP2 (rhs1 BITOP1 rhs2) or
9221 rhs1 BITOP2 (BITOP1 rhs2)
9222 depending on bitop1 and bitop2 arity. */
9223 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9224 {
9225 if (code == GT_EXPR)
9226 {
9227 bitop1 = BIT_NOT_EXPR;
9228 bitop2 = BIT_AND_EXPR;
9229 }
9230 else if (code == GE_EXPR)
9231 {
9232 bitop1 = BIT_NOT_EXPR;
9233 bitop2 = BIT_IOR_EXPR;
9234 }
9235 else if (code == LT_EXPR)
9236 {
9237 bitop1 = BIT_NOT_EXPR;
9238 bitop2 = BIT_AND_EXPR;
9239 std::swap (rhs1, rhs2);
264d951a 9240 std::swap (dts[0], dts[1]);
49e76ff1
IE
9241 }
9242 else if (code == LE_EXPR)
9243 {
9244 bitop1 = BIT_NOT_EXPR;
9245 bitop2 = BIT_IOR_EXPR;
9246 std::swap (rhs1, rhs2);
264d951a 9247 std::swap (dts[0], dts[1]);
49e76ff1
IE
9248 }
9249 else
9250 {
9251 bitop1 = BIT_XOR_EXPR;
9252 if (code == EQ_EXPR)
9253 bitop2 = BIT_NOT_EXPR;
9254 }
9255 }
9256
42fd8198
IE
9257 if (!vec_stmt)
9258 {
49e76ff1 9259 if (bitop1 == NOP_EXPR)
68435eb2
RB
9260 {
9261 if (!expand_vec_cmp_expr_p (vectype, mask_type, code))
9262 return false;
9263 }
49e76ff1
IE
9264 else
9265 {
9266 machine_mode mode = TYPE_MODE (vectype);
9267 optab optab;
9268
9269 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9270 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9271 return false;
9272
9273 if (bitop2 != NOP_EXPR)
9274 {
9275 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9276 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9277 return false;
9278 }
49e76ff1 9279 }
68435eb2
RB
9280
9281 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9282 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9283 dts, ndts, slp_node, cost_vec);
9284 return true;
42fd8198
IE
9285 }
9286
9287 /* Transform. */
9288 if (!slp_node)
9289 {
9290 vec_oprnds0.create (1);
9291 vec_oprnds1.create (1);
9292 }
9293
9294 /* Handle def. */
9295 lhs = gimple_assign_lhs (stmt);
9296 mask = vect_create_destination_var (lhs, mask_type);
9297
9298 /* Handle cmp expr. */
9299 for (j = 0; j < ncopies; j++)
9300 {
e1bd7296 9301 stmt_vec_info new_stmt_info = NULL;
42fd8198
IE
9302 if (j == 0)
9303 {
9304 if (slp_node)
9305 {
9306 auto_vec<tree, 2> ops;
9307 auto_vec<vec<tree>, 2> vec_defs;
9308
9309 ops.safe_push (rhs1);
9310 ops.safe_push (rhs2);
306b0c92 9311 vect_get_slp_defs (ops, slp_node, &vec_defs);
42fd8198
IE
9312 vec_oprnds1 = vec_defs.pop ();
9313 vec_oprnds0 = vec_defs.pop ();
9314 }
9315 else
9316 {
86a91c0a
RS
9317 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info,
9318 vectype);
9319 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info,
9320 vectype);
42fd8198
IE
9321 }
9322 }
9323 else
9324 {
e4057a39 9325 vec_rhs1 = vect_get_vec_def_for_stmt_copy (vinfo,
42fd8198 9326 vec_oprnds0.pop ());
e4057a39 9327 vec_rhs2 = vect_get_vec_def_for_stmt_copy (vinfo,
42fd8198
IE
9328 vec_oprnds1.pop ());
9329 }
9330
9331 if (!slp_node)
9332 {
9333 vec_oprnds0.quick_push (vec_rhs1);
9334 vec_oprnds1.quick_push (vec_rhs2);
9335 }
9336
9337 /* Arguments are ready. Create the new vector stmt. */
9338 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9339 {
9340 vec_rhs2 = vec_oprnds1[i];
9341
9342 new_temp = make_ssa_name (mask);
49e76ff1
IE
9343 if (bitop1 == NOP_EXPR)
9344 {
e1bd7296
RS
9345 gassign *new_stmt = gimple_build_assign (new_temp, code,
9346 vec_rhs1, vec_rhs2);
9347 new_stmt_info
86a91c0a 9348 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
49e76ff1
IE
9349 }
9350 else
9351 {
e1bd7296 9352 gassign *new_stmt;
49e76ff1
IE
9353 if (bitop1 == BIT_NOT_EXPR)
9354 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9355 else
9356 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9357 vec_rhs2);
e1bd7296 9358 new_stmt_info
86a91c0a 9359 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
49e76ff1
IE
9360 if (bitop2 != NOP_EXPR)
9361 {
9362 tree res = make_ssa_name (mask);
9363 if (bitop2 == BIT_NOT_EXPR)
9364 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9365 else
9366 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9367 new_temp);
e1bd7296 9368 new_stmt_info
86a91c0a 9369 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
49e76ff1
IE
9370 }
9371 }
42fd8198 9372 if (slp_node)
e1bd7296 9373 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
42fd8198
IE
9374 }
9375
9376 if (slp_node)
9377 continue;
9378
9379 if (j == 0)
e1bd7296 9380 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
42fd8198 9381 else
e1bd7296 9382 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
42fd8198 9383
e1bd7296 9384 prev_stmt_info = new_stmt_info;
42fd8198
IE
9385 }
9386
9387 vec_oprnds0.release ();
9388 vec_oprnds1.release ();
9389
9390 return true;
9391}
ebfd146a 9392
68a0f2ff
RS
9393/* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9394 can handle all live statements in the node. Otherwise return true
82570274 9395 if STMT_INFO is not live or if vectorizable_live_operation can handle it.
68a0f2ff
RS
9396 GSI and VEC_STMT are as for vectorizable_live_operation. */
9397
9398static bool
82570274 9399can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
1eede195 9400 slp_tree slp_node, stmt_vec_info *vec_stmt,
68435eb2 9401 stmt_vector_for_cost *cost_vec)
68a0f2ff
RS
9402{
9403 if (slp_node)
9404 {
b9787581 9405 stmt_vec_info slp_stmt_info;
68a0f2ff 9406 unsigned int i;
b9787581 9407 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
68a0f2ff 9408 {
68a0f2ff 9409 if (STMT_VINFO_LIVE_P (slp_stmt_info)
b9787581 9410 && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i,
68435eb2 9411 vec_stmt, cost_vec))
68a0f2ff
RS
9412 return false;
9413 }
9414 }
82570274
RS
9415 else if (STMT_VINFO_LIVE_P (stmt_info)
9416 && !vectorizable_live_operation (stmt_info, gsi, slp_node, -1,
9417 vec_stmt, cost_vec))
68a0f2ff
RS
9418 return false;
9419
9420 return true;
9421}
9422
8644a673 9423/* Make sure the statement is vectorizable. */
ebfd146a
IR
9424
9425bool
32e8e429
RS
9426vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
9427 slp_tree node, slp_instance node_instance,
9428 stmt_vector_for_cost *cost_vec)
ebfd146a 9429{
6585ff8f 9430 vec_info *vinfo = stmt_info->vinfo;
a70d6342 9431 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 9432 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 9433 bool ok;
363477c0 9434 gimple_seq pattern_def_seq;
ebfd146a 9435
73fbfcad 9436 if (dump_enabled_p ())
ebfd146a 9437 {
78c60e3d 9438 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
86a91c0a 9439 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8644a673 9440 }
ebfd146a 9441
86a91c0a 9442 if (gimple_has_volatile_ops (stmt_info->stmt))
b8698a0f 9443 {
73fbfcad 9444 if (dump_enabled_p ())
78c60e3d 9445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 9446 "not vectorized: stmt has volatile operands\n");
1825a1f3
IR
9447
9448 return false;
9449 }
b8698a0f 9450
d54a098e
RS
9451 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9452 && node == NULL
9453 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9454 {
9455 gimple_stmt_iterator si;
9456
9457 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9458 {
6585ff8f
RS
9459 stmt_vec_info pattern_def_stmt_info
9460 = vinfo->lookup_stmt (gsi_stmt (si));
9461 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
9462 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
d54a098e
RS
9463 {
9464 /* Analyze def stmt of STMT if it's a pattern stmt. */
9465 if (dump_enabled_p ())
9466 {
9467 dump_printf_loc (MSG_NOTE, vect_location,
9468 "==> examining pattern def statement: ");
86a91c0a
RS
9469 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
9470 pattern_def_stmt_info->stmt, 0);
d54a098e
RS
9471 }
9472
86a91c0a 9473 if (!vect_analyze_stmt (pattern_def_stmt_info,
d54a098e
RS
9474 need_to_vectorize, node, node_instance,
9475 cost_vec))
9476 return false;
9477 }
9478 }
9479 }
9480
b8698a0f 9481 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
9482 to include:
9483 - the COND_EXPR which is the loop exit condition
9484 - any LABEL_EXPRs in the loop
b8698a0f 9485 - computations that are used only for array indexing or loop control.
8644a673 9486 In basic blocks we only analyze statements that are a part of some SLP
83197f37 9487 instance, therefore, all the statements are relevant.
ebfd146a 9488
d092494c 9489 Pattern statement needs to be analyzed instead of the original statement
83197f37 9490 if the original statement is not relevant. Otherwise, we analyze both
079c527f
JJ
9491 statements. In basic blocks we are called from some SLP instance
9492 traversal, don't analyze pattern stmts instead, the pattern stmts
9493 already will be part of SLP instance. */
83197f37 9494
10681ce8 9495 stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
b8698a0f 9496 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 9497 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a 9498 {
9d5e7640 9499 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
10681ce8
RS
9500 && pattern_stmt_info
9501 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9502 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9d5e7640 9503 {
83197f37 9504 /* Analyze PATTERN_STMT instead of the original stmt. */
10681ce8 9505 stmt_info = pattern_stmt_info;
73fbfcad 9506 if (dump_enabled_p ())
9d5e7640 9507 {
78c60e3d
SS
9508 dump_printf_loc (MSG_NOTE, vect_location,
9509 "==> examining pattern statement: ");
86a91c0a 9510 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
9d5e7640
IR
9511 }
9512 }
9513 else
9514 {
73fbfcad 9515 if (dump_enabled_p ())
e645e942 9516 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
ebfd146a 9517
9d5e7640
IR
9518 return true;
9519 }
8644a673 9520 }
83197f37 9521 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
079c527f 9522 && node == NULL
10681ce8
RS
9523 && pattern_stmt_info
9524 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9525 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
83197f37
IR
9526 {
9527 /* Analyze PATTERN_STMT too. */
73fbfcad 9528 if (dump_enabled_p ())
83197f37 9529 {
78c60e3d
SS
9530 dump_printf_loc (MSG_NOTE, vect_location,
9531 "==> examining pattern statement: ");
86a91c0a 9532 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0);
83197f37
IR
9533 }
9534
10681ce8 9535 if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
68435eb2 9536 node_instance, cost_vec))
83197f37
IR
9537 return false;
9538 }
ebfd146a 9539
8644a673
IR
9540 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9541 {
9542 case vect_internal_def:
9543 break;
ebfd146a 9544
8644a673 9545 case vect_reduction_def:
7c5222ff 9546 case vect_nested_cycle:
14a61437
RB
9547 gcc_assert (!bb_vinfo
9548 && (relevance == vect_used_in_outer
9549 || relevance == vect_used_in_outer_by_reduction
9550 || relevance == vect_used_by_reduction
b28ead45
AH
9551 || relevance == vect_unused_in_scope
9552 || relevance == vect_used_only_live));
8644a673
IR
9553 break;
9554
9555 case vect_induction_def:
e7baeb39
RB
9556 gcc_assert (!bb_vinfo);
9557 break;
9558
8644a673
IR
9559 case vect_constant_def:
9560 case vect_external_def:
9561 case vect_unknown_def_type:
9562 default:
9563 gcc_unreachable ();
9564 }
ebfd146a 9565
8644a673 9566 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 9567 {
86a91c0a
RS
9568 tree type = gimple_expr_type (stmt_info->stmt);
9569 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
9570 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
0136f8f0 9571 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
beb456c3 9572 || (call && gimple_call_lhs (call) == NULL_TREE));
8644a673 9573 *need_to_vectorize = true;
ebfd146a
IR
9574 }
9575
b1af7da6
RB
9576 if (PURE_SLP_STMT (stmt_info) && !node)
9577 {
9578 dump_printf_loc (MSG_NOTE, vect_location,
9579 "handled only by SLP analysis\n");
9580 return true;
9581 }
9582
9583 ok = true;
9584 if (!bb_vinfo
9585 && (STMT_VINFO_RELEVANT_P (stmt_info)
9586 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
86a91c0a
RS
9587 ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, cost_vec)
9588 || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec)
9589 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9590 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9591 || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec)
9592 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9593 cost_vec)
9594 || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9595 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9596 || vectorizable_reduction (stmt_info, NULL, NULL, node,
9597 node_instance, cost_vec)
9598 || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec)
9599 || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
68435eb2 9600 cost_vec)
86a91c0a
RS
9601 || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
9602 cost_vec));
b1af7da6
RB
9603 else
9604 {
9605 if (bb_vinfo)
86a91c0a
RS
9606 ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
9607 cost_vec)
9608 || vectorizable_conversion (stmt_info, NULL, NULL, node,
9609 cost_vec)
9610 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9611 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9612 || vectorizable_assignment (stmt_info, NULL, NULL, node,
9613 cost_vec)
9614 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
68435eb2 9615 cost_vec)
86a91c0a
RS
9616 || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9617 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9618 || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
68435eb2 9619 cost_vec)
86a91c0a 9620 || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
68435eb2 9621 cost_vec));
b1af7da6 9622 }
8644a673
IR
9623
9624 if (!ok)
ebfd146a 9625 {
73fbfcad 9626 if (dump_enabled_p ())
8644a673 9627 {
78c60e3d
SS
9628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9629 "not vectorized: relevant stmt not ");
9630 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
86a91c0a
RS
9631 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
9632 stmt_info->stmt, 0);
8644a673 9633 }
b8698a0f 9634
ebfd146a
IR
9635 return false;
9636 }
9637
8644a673
IR
9638 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9639 need extra handling, except for vectorizable reductions. */
68435eb2
RB
9640 if (!bb_vinfo
9641 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
86a91c0a 9642 && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec))
ebfd146a 9643 {
73fbfcad 9644 if (dump_enabled_p ())
8644a673 9645 {
78c60e3d 9646 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
68a0f2ff 9647 "not vectorized: live stmt not supported: ");
86a91c0a
RS
9648 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
9649 stmt_info->stmt, 0);
8644a673 9650 }
b8698a0f 9651
8644a673 9652 return false;
ebfd146a
IR
9653 }
9654
ebfd146a
IR
9655 return true;
9656}
9657
9658
9659/* Function vect_transform_stmt.
9660
32e8e429 9661 Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */
ebfd146a
IR
9662
9663bool
32e8e429 9664vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
b0b45e58 9665 slp_tree slp_node, slp_instance slp_node_instance)
ebfd146a 9666{
6585ff8f 9667 vec_info *vinfo = stmt_info->vinfo;
ebfd146a 9668 bool is_store = false;
1eede195 9669 stmt_vec_info vec_stmt = NULL;
ebfd146a 9670 bool done;
ebfd146a 9671
fce57248 9672 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
1eede195 9673 stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info);
225ce44b 9674
e57d9a82
RB
9675 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9676 && nested_in_vect_loop_p
9677 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
86a91c0a 9678 stmt_info));
e57d9a82 9679
32e8e429 9680 gimple *stmt = stmt_info->stmt;
ebfd146a
IR
9681 switch (STMT_VINFO_TYPE (stmt_info))
9682 {
9683 case type_demotion_vec_info_type:
ebfd146a 9684 case type_promotion_vec_info_type:
ebfd146a 9685 case type_conversion_vec_info_type:
86a91c0a
RS
9686 done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node,
9687 NULL);
ebfd146a
IR
9688 gcc_assert (done);
9689 break;
9690
9691 case induc_vec_info_type:
86a91c0a
RS
9692 done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node,
9693 NULL);
ebfd146a
IR
9694 gcc_assert (done);
9695 break;
9696
9dc3f7de 9697 case shift_vec_info_type:
86a91c0a 9698 done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9dc3f7de
IR
9699 gcc_assert (done);
9700 break;
9701
ebfd146a 9702 case op_vec_info_type:
86a91c0a
RS
9703 done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node,
9704 NULL);
ebfd146a
IR
9705 gcc_assert (done);
9706 break;
9707
9708 case assignment_vec_info_type:
86a91c0a
RS
9709 done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node,
9710 NULL);
ebfd146a
IR
9711 gcc_assert (done);
9712 break;
9713
9714 case load_vec_info_type:
86a91c0a 9715 done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node,
68435eb2 9716 slp_node_instance, NULL);
ebfd146a
IR
9717 gcc_assert (done);
9718 break;
9719
9720 case store_vec_info_type:
86a91c0a 9721 done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL);
ebfd146a 9722 gcc_assert (done);
0d0293ac 9723 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
ebfd146a
IR
9724 {
9725 /* In case of interleaving, the whole chain is vectorized when the
ff802fa1 9726 last store in the chain is reached. Store stmts before the last
ebfd146a
IR
9727 one are skipped, and there vec_stmt_info shouldn't be freed
9728 meanwhile. */
bffb8014 9729 stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2c53b149 9730 if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
ebfd146a 9731 is_store = true;
f307441a 9732 }
ebfd146a
IR
9733 else
9734 is_store = true;
9735 break;
9736
9737 case condition_vec_info_type:
86a91c0a
RS
9738 done = vectorizable_condition (stmt_info, gsi, &vec_stmt, NULL, 0,
9739 slp_node, NULL);
ebfd146a
IR
9740 gcc_assert (done);
9741 break;
9742
42fd8198 9743 case comparison_vec_info_type:
86a91c0a
RS
9744 done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, NULL,
9745 slp_node, NULL);
42fd8198
IE
9746 gcc_assert (done);
9747 break;
9748
ebfd146a 9749 case call_vec_info_type:
86a91c0a 9750 done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL);
039d9ea1 9751 stmt = gsi_stmt (*gsi);
ebfd146a
IR
9752 break;
9753
0136f8f0 9754 case call_simd_clone_vec_info_type:
86a91c0a
RS
9755 done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt,
9756 slp_node, NULL);
0136f8f0
AH
9757 stmt = gsi_stmt (*gsi);
9758 break;
9759
ebfd146a 9760 case reduc_vec_info_type:
86a91c0a 9761 done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node,
68435eb2 9762 slp_node_instance, NULL);
ebfd146a
IR
9763 gcc_assert (done);
9764 break;
9765
9766 default:
9767 if (!STMT_VINFO_LIVE_P (stmt_info))
9768 {
73fbfcad 9769 if (dump_enabled_p ())
78c60e3d 9770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 9771 "stmt not supported.\n");
ebfd146a
IR
9772 gcc_unreachable ();
9773 }
9774 }
9775
225ce44b
RB
9776 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9777 This would break hybrid SLP vectorization. */
9778 if (slp_node)
d90f8440 9779 gcc_assert (!vec_stmt
1eede195 9780 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info);
225ce44b 9781
ebfd146a
IR
9782 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9783 is being vectorized, but outside the immediately enclosing loop. */
9784 if (vec_stmt
e57d9a82 9785 && nested_p
ebfd146a
IR
9786 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9787 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 9788 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 9789 vect_used_in_outer_by_reduction))
ebfd146a 9790 {
a70d6342
IR
9791 struct loop *innerloop = LOOP_VINFO_LOOP (
9792 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
9793 imm_use_iterator imm_iter;
9794 use_operand_p use_p;
9795 tree scalar_dest;
ebfd146a 9796
73fbfcad 9797 if (dump_enabled_p ())
78c60e3d 9798 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 9799 "Record the vdef for outer-loop vectorization.\n");
ebfd146a
IR
9800
9801 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9802 (to be used when vectorizing outer-loop stmts that use the DEF of
9803 STMT). */
9804 if (gimple_code (stmt) == GIMPLE_PHI)
9805 scalar_dest = PHI_RESULT (stmt);
9806 else
4beb6642 9807 scalar_dest = gimple_get_lhs (stmt);
ebfd146a
IR
9808
9809 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
6585ff8f
RS
9810 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9811 {
9812 stmt_vec_info exit_phi_info
9813 = vinfo->lookup_stmt (USE_STMT (use_p));
9814 STMT_VINFO_VEC_STMT (exit_phi_info) = vec_stmt;
9815 }
ebfd146a
IR
9816 }
9817
9818 /* Handle stmts whose DEF is used outside the loop-nest that is
9819 being vectorized. */
68a0f2ff 9820 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
ebfd146a 9821 {
86a91c0a
RS
9822 done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt,
9823 NULL);
ebfd146a
IR
9824 gcc_assert (done);
9825 }
9826
9827 if (vec_stmt)
83197f37 9828 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
ebfd146a 9829
b8698a0f 9830 return is_store;
ebfd146a
IR
9831}
9832
9833
b8698a0f 9834/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
9835 stmt_vec_info. */
9836
9837void
32e8e429 9838vect_remove_stores (stmt_vec_info first_stmt_info)
ebfd146a 9839{
b5b56c2a 9840 vec_info *vinfo = first_stmt_info->vinfo;
32e8e429 9841 stmt_vec_info next_stmt_info = first_stmt_info;
ebfd146a 9842
a1824cfd 9843 while (next_stmt_info)
ebfd146a 9844 {
a1824cfd 9845 stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
211cd1e2 9846 next_stmt_info = vect_orig_stmt (next_stmt_info);
ebfd146a 9847 /* Free the attached stmt_vec_info and remove the stmt. */
b5b56c2a 9848 vinfo->remove_stmt (next_stmt_info);
a1824cfd 9849 next_stmt_info = tmp;
ebfd146a
IR
9850 }
9851}
9852
bb67d9c7 9853/* Function get_vectype_for_scalar_type_and_size.
ebfd146a 9854
bb67d9c7 9855 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
ebfd146a
IR
9856 by the target. */
9857
c803b2a9 9858tree
86e36728 9859get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
ebfd146a 9860{
c7d97b28 9861 tree orig_scalar_type = scalar_type;
3bd8f481 9862 scalar_mode inner_mode;
ef4bddc2 9863 machine_mode simd_mode;
86e36728 9864 poly_uint64 nunits;
ebfd146a
IR
9865 tree vectype;
9866
3bd8f481
RS
9867 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9868 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
ebfd146a
IR
9869 return NULL_TREE;
9870
3bd8f481 9871 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
48f2e373 9872
7b7b1813
RG
9873 /* For vector types of elements whose mode precision doesn't
9874 match their types precision we use a element type of mode
9875 precision. The vectorization routines will have to make sure
48f2e373
RB
9876 they support the proper result truncation/extension.
9877 We also make sure to build vector types with INTEGER_TYPE
9878 component type only. */
6d7971b8 9879 if (INTEGRAL_TYPE_P (scalar_type)
48f2e373
RB
9880 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9881 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7b7b1813
RG
9882 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9883 TYPE_UNSIGNED (scalar_type));
6d7971b8 9884
ccbf5bb4
RG
9885 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9886 When the component mode passes the above test simply use a type
9887 corresponding to that mode. The theory is that any use that
9888 would cause problems with this will disable vectorization anyway. */
dfc2e2ac 9889 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
e67f39f7 9890 && !INTEGRAL_TYPE_P (scalar_type))
60b95d28
RB
9891 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9892
9893 /* We can't build a vector type of elements with alignment bigger than
9894 their size. */
dfc2e2ac 9895 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
aca43c6c
JJ
9896 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9897 TYPE_UNSIGNED (scalar_type));
ccbf5bb4 9898
dfc2e2ac
RB
9899 /* If we felt back to using the mode fail if there was
9900 no scalar type for it. */
9901 if (scalar_type == NULL_TREE)
9902 return NULL_TREE;
9903
bb67d9c7
RG
9904 /* If no size was supplied use the mode the target prefers. Otherwise
9905 lookup a vector mode of the specified size. */
86e36728 9906 if (known_eq (size, 0U))
bb67d9c7 9907 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
86e36728
RS
9908 else if (!multiple_p (size, nbytes, &nunits)
9909 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9da15d40 9910 return NULL_TREE;
4c8fd8ac 9911 /* NOTE: nunits == 1 is allowed to support single element vector types. */
86e36728 9912 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
cc4b5170 9913 return NULL_TREE;
ebfd146a
IR
9914
9915 vectype = build_vector_type (scalar_type, nunits);
ebfd146a
IR
9916
9917 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9918 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
451dabda 9919 return NULL_TREE;
ebfd146a 9920
c7d97b28
RB
9921 /* Re-attach the address-space qualifier if we canonicalized the scalar
9922 type. */
9923 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9924 return build_qualified_type
9925 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9926
ebfd146a
IR
9927 return vectype;
9928}
9929
86e36728 9930poly_uint64 current_vector_size;
bb67d9c7
RG
9931
9932/* Function get_vectype_for_scalar_type.
9933
9934 Returns the vector type corresponding to SCALAR_TYPE as supported
9935 by the target. */
9936
9937tree
9938get_vectype_for_scalar_type (tree scalar_type)
9939{
9940 tree vectype;
9941 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9942 current_vector_size);
9943 if (vectype
86e36728 9944 && known_eq (current_vector_size, 0U))
bb67d9c7
RG
9945 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9946 return vectype;
9947}
9948
42fd8198
IE
9949/* Function get_mask_type_for_scalar_type.
9950
9951 Returns the mask type corresponding to a result of comparison
9952 of vectors of specified SCALAR_TYPE as supported by target. */
9953
9954tree
9955get_mask_type_for_scalar_type (tree scalar_type)
9956{
9957 tree vectype = get_vectype_for_scalar_type (scalar_type);
9958
9959 if (!vectype)
9960 return NULL;
9961
9962 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9963 current_vector_size);
9964}
9965
b690cc0f
RG
9966/* Function get_same_sized_vectype
9967
9968 Returns a vector type corresponding to SCALAR_TYPE of size
9969 VECTOR_TYPE if supported by the target. */
9970
9971tree
bb67d9c7 9972get_same_sized_vectype (tree scalar_type, tree vector_type)
b690cc0f 9973{
2568d8a1 9974 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9f47c7e5
IE
9975 return build_same_sized_truth_vector_type (vector_type);
9976
bb67d9c7
RG
9977 return get_vectype_for_scalar_type_and_size
9978 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
b690cc0f
RG
9979}
9980
ebfd146a
IR
9981/* Function vect_is_simple_use.
9982
9983 Input:
81c40241
RB
9984 VINFO - the vect info of the loop or basic block that is being vectorized.
9985 OPERAND - operand in the loop or bb.
9986 Output:
fef96d8e
RS
9987 DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
9988 case OPERAND is an SSA_NAME that is defined in the vectorizable region
9989 DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
9990 the definition could be anywhere in the function
81c40241 9991 DT - the type of definition
ebfd146a
IR
9992
9993 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f 9994 For loops, supportable operands are constants, loop invariants, and operands
ff802fa1 9995 that are defined by the current iteration of the loop. Unsupportable
b8698a0f 9996 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
9997 is the case in reduction/induction computations).
9998 For basic blocks, supportable operands are constants and bb invariants.
9999 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
10000
10001bool
894dd753 10002vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
fef96d8e 10003 stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out)
b8698a0f 10004{
fef96d8e
RS
10005 if (def_stmt_info_out)
10006 *def_stmt_info_out = NULL;
894dd753
RS
10007 if (def_stmt_out)
10008 *def_stmt_out = NULL;
3fc356dc 10009 *dt = vect_unknown_def_type;
b8698a0f 10010
73fbfcad 10011 if (dump_enabled_p ())
ebfd146a 10012 {
78c60e3d
SS
10013 dump_printf_loc (MSG_NOTE, vect_location,
10014 "vect_is_simple_use: operand ");
30f502ed
RB
10015 if (TREE_CODE (operand) == SSA_NAME
10016 && !SSA_NAME_IS_DEFAULT_DEF (operand))
10017 dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0);
10018 else
10019 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
ebfd146a 10020 }
b8698a0f 10021
b758f602 10022 if (CONSTANT_CLASS_P (operand))
30f502ed
RB
10023 *dt = vect_constant_def;
10024 else if (is_gimple_min_invariant (operand))
10025 *dt = vect_external_def;
10026 else if (TREE_CODE (operand) != SSA_NAME)
10027 *dt = vect_unknown_def_type;
10028 else if (SSA_NAME_IS_DEFAULT_DEF (operand))
8644a673 10029 *dt = vect_external_def;
ebfd146a
IR
10030 else
10031 {
30f502ed 10032 gimple *def_stmt = SSA_NAME_DEF_STMT (operand);
c98d0595
RS
10033 stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand);
10034 if (!stmt_vinfo)
30f502ed
RB
10035 *dt = vect_external_def;
10036 else
0f8c840c 10037 {
6e6b18e5
RS
10038 stmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
10039 def_stmt = stmt_vinfo->stmt;
30f502ed
RB
10040 switch (gimple_code (def_stmt))
10041 {
10042 case GIMPLE_PHI:
10043 case GIMPLE_ASSIGN:
10044 case GIMPLE_CALL:
10045 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
10046 break;
10047 default:
10048 *dt = vect_unknown_def_type;
10049 break;
10050 }
fef96d8e
RS
10051 if (def_stmt_info_out)
10052 *def_stmt_info_out = stmt_vinfo;
0f8c840c 10053 }
30f502ed
RB
10054 if (def_stmt_out)
10055 *def_stmt_out = def_stmt;
ebfd146a
IR
10056 }
10057
2e8ab70c
RB
10058 if (dump_enabled_p ())
10059 {
30f502ed 10060 dump_printf (MSG_NOTE, ", type of def: ");
2e8ab70c
RB
10061 switch (*dt)
10062 {
10063 case vect_uninitialized_def:
10064 dump_printf (MSG_NOTE, "uninitialized\n");
10065 break;
10066 case vect_constant_def:
10067 dump_printf (MSG_NOTE, "constant\n");
10068 break;
10069 case vect_external_def:
10070 dump_printf (MSG_NOTE, "external\n");
10071 break;
10072 case vect_internal_def:
10073 dump_printf (MSG_NOTE, "internal\n");
10074 break;
10075 case vect_induction_def:
10076 dump_printf (MSG_NOTE, "induction\n");
10077 break;
10078 case vect_reduction_def:
10079 dump_printf (MSG_NOTE, "reduction\n");
10080 break;
10081 case vect_double_reduction_def:
10082 dump_printf (MSG_NOTE, "double reduction\n");
10083 break;
10084 case vect_nested_cycle:
10085 dump_printf (MSG_NOTE, "nested cycle\n");
10086 break;
10087 case vect_unknown_def_type:
10088 dump_printf (MSG_NOTE, "unknown\n");
10089 break;
10090 }
10091 }
10092
81c40241 10093 if (*dt == vect_unknown_def_type)
ebfd146a 10094 {
73fbfcad 10095 if (dump_enabled_p ())
78c60e3d 10096 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 10097 "Unsupported pattern.\n");
ebfd146a
IR
10098 return false;
10099 }
10100
ebfd146a
IR
10101 return true;
10102}
10103
81c40241 10104/* Function vect_is_simple_use.
b690cc0f 10105
81c40241 10106 Same as vect_is_simple_use but also determines the vector operand
b690cc0f
RG
10107 type of OPERAND and stores it to *VECTYPE. If the definition of
10108 OPERAND is vect_uninitialized_def, vect_constant_def or
10109 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10110 is responsible to compute the best suited vector type for the
10111 scalar operand. */
10112
10113bool
894dd753 10114vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
fef96d8e
RS
10115 tree *vectype, stmt_vec_info *def_stmt_info_out,
10116 gimple **def_stmt_out)
b690cc0f 10117{
fef96d8e 10118 stmt_vec_info def_stmt_info;
894dd753 10119 gimple *def_stmt;
fef96d8e 10120 if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt))
b690cc0f
RG
10121 return false;
10122
894dd753
RS
10123 if (def_stmt_out)
10124 *def_stmt_out = def_stmt;
fef96d8e
RS
10125 if (def_stmt_info_out)
10126 *def_stmt_info_out = def_stmt_info;
894dd753 10127
b690cc0f
RG
10128 /* Now get a vector type if the def is internal, otherwise supply
10129 NULL_TREE and leave it up to the caller to figure out a proper
10130 type for the use stmt. */
10131 if (*dt == vect_internal_def
10132 || *dt == vect_induction_def
10133 || *dt == vect_reduction_def
10134 || *dt == vect_double_reduction_def
10135 || *dt == vect_nested_cycle)
10136 {
fef96d8e 10137 *vectype = STMT_VINFO_VECTYPE (def_stmt_info);
b690cc0f 10138 gcc_assert (*vectype != NULL_TREE);
30f502ed
RB
10139 if (dump_enabled_p ())
10140 {
10141 dump_printf_loc (MSG_NOTE, vect_location,
10142 "vect_is_simple_use: vectype ");
10143 dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype);
10144 dump_printf (MSG_NOTE, "\n");
10145 }
b690cc0f
RG
10146 }
10147 else if (*dt == vect_uninitialized_def
10148 || *dt == vect_constant_def
10149 || *dt == vect_external_def)
10150 *vectype = NULL_TREE;
10151 else
10152 gcc_unreachable ();
10153
10154 return true;
10155}
10156
ebfd146a
IR
10157
10158/* Function supportable_widening_operation
10159
b8698a0f
L
10160 Check whether an operation represented by the code CODE is a
10161 widening operation that is supported by the target platform in
b690cc0f
RG
10162 vector form (i.e., when operating on arguments of type VECTYPE_IN
10163 producing a result of type VECTYPE_OUT).
b8698a0f 10164
1bda738b
JJ
10165 Widening operations we currently support are NOP (CONVERT), FLOAT,
10166 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
10167 are supported by the target platform either directly (via vector
10168 tree-codes), or via target builtins.
ebfd146a
IR
10169
10170 Output:
b8698a0f
L
10171 - CODE1 and CODE2 are codes of vector operations to be used when
10172 vectorizing the operation, if available.
ebfd146a
IR
10173 - MULTI_STEP_CVT determines the number of required intermediate steps in
10174 case of multi-step conversion (like char->short->int - in that case
10175 MULTI_STEP_CVT will be 1).
b8698a0f
L
10176 - INTERM_TYPES contains the intermediate type required to perform the
10177 widening operation (short in the above example). */
ebfd146a
IR
10178
10179bool
32e8e429 10180supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
b690cc0f 10181 tree vectype_out, tree vectype_in,
ebfd146a
IR
10182 enum tree_code *code1, enum tree_code *code2,
10183 int *multi_step_cvt,
9771b263 10184 vec<tree> *interm_types)
ebfd146a 10185{
ebfd146a 10186 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4ef69dfc 10187 struct loop *vect_loop = NULL;
ef4bddc2 10188 machine_mode vec_mode;
81f40b79 10189 enum insn_code icode1, icode2;
ebfd146a 10190 optab optab1, optab2;
b690cc0f
RG
10191 tree vectype = vectype_in;
10192 tree wide_vectype = vectype_out;
ebfd146a 10193 enum tree_code c1, c2;
4a00c761
JJ
10194 int i;
10195 tree prev_type, intermediate_type;
ef4bddc2 10196 machine_mode intermediate_mode, prev_mode;
4a00c761 10197 optab optab3, optab4;
ebfd146a 10198
4a00c761 10199 *multi_step_cvt = 0;
4ef69dfc
IR
10200 if (loop_info)
10201 vect_loop = LOOP_VINFO_LOOP (loop_info);
10202
ebfd146a
IR
10203 switch (code)
10204 {
10205 case WIDEN_MULT_EXPR:
6ae6116f
RH
10206 /* The result of a vectorized widening operation usually requires
10207 two vectors (because the widened results do not fit into one vector).
10208 The generated vector results would normally be expected to be
10209 generated in the same order as in the original scalar computation,
10210 i.e. if 8 results are generated in each vector iteration, they are
10211 to be organized as follows:
10212 vect1: [res1,res2,res3,res4],
10213 vect2: [res5,res6,res7,res8].
10214
10215 However, in the special case that the result of the widening
10216 operation is used in a reduction computation only, the order doesn't
10217 matter (because when vectorizing a reduction we change the order of
10218 the computation). Some targets can take advantage of this and
10219 generate more efficient code. For example, targets like Altivec,
10220 that support widen_mult using a sequence of {mult_even,mult_odd}
10221 generate the following vectors:
10222 vect1: [res1,res3,res5,res7],
10223 vect2: [res2,res4,res6,res8].
10224
10225 When vectorizing outer-loops, we execute the inner-loop sequentially
10226 (each vectorized inner-loop iteration contributes to VF outer-loop
10227 iterations in parallel). We therefore don't allow to change the
10228 order of the computation in the inner-loop during outer-loop
10229 vectorization. */
10230 /* TODO: Another case in which order doesn't *really* matter is when we
10231 widen and then contract again, e.g. (short)((int)x * y >> 8).
10232 Normally, pack_trunc performs an even/odd permute, whereas the
10233 repack from an even/odd expansion would be an interleave, which
10234 would be significantly simpler for e.g. AVX2. */
10235 /* In any case, in order to avoid duplicating the code below, recurse
10236 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10237 are properly set up for the caller. If we fail, we'll continue with
10238 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10239 if (vect_loop
10240 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
86a91c0a 10241 && !nested_in_vect_loop_p (vect_loop, stmt_info)
6ae6116f 10242 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
86a91c0a
RS
10243 stmt_info, vectype_out,
10244 vectype_in, code1, code2,
10245 multi_step_cvt, interm_types))
ebc047a2
CH
10246 {
10247 /* Elements in a vector with vect_used_by_reduction property cannot
10248 be reordered if the use chain with this property does not have the
10249 same operation. One such an example is s += a * b, where elements
10250 in a and b cannot be reordered. Here we check if the vector defined
10251 by STMT is only directly used in the reduction statement. */
86a91c0a 10252 tree lhs = gimple_assign_lhs (stmt_info->stmt);
0d0a4e20
RS
10253 stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
10254 if (use_stmt_info
10255 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10256 return true;
ebc047a2 10257 }
4a00c761
JJ
10258 c1 = VEC_WIDEN_MULT_LO_EXPR;
10259 c2 = VEC_WIDEN_MULT_HI_EXPR;
ebfd146a
IR
10260 break;
10261
81c40241
RB
10262 case DOT_PROD_EXPR:
10263 c1 = DOT_PROD_EXPR;
10264 c2 = DOT_PROD_EXPR;
10265 break;
10266
10267 case SAD_EXPR:
10268 c1 = SAD_EXPR;
10269 c2 = SAD_EXPR;
10270 break;
10271
6ae6116f
RH
10272 case VEC_WIDEN_MULT_EVEN_EXPR:
10273 /* Support the recursion induced just above. */
10274 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10275 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10276 break;
10277
36ba4aae 10278 case WIDEN_LSHIFT_EXPR:
4a00c761
JJ
10279 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10280 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
36ba4aae
IR
10281 break;
10282
ebfd146a 10283 CASE_CONVERT:
4a00c761
JJ
10284 c1 = VEC_UNPACK_LO_EXPR;
10285 c2 = VEC_UNPACK_HI_EXPR;
ebfd146a
IR
10286 break;
10287
10288 case FLOAT_EXPR:
4a00c761
JJ
10289 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10290 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
ebfd146a
IR
10291 break;
10292
10293 case FIX_TRUNC_EXPR:
1bda738b
JJ
10294 c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
10295 c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
10296 break;
ebfd146a
IR
10297
10298 default:
10299 gcc_unreachable ();
10300 }
10301
6ae6116f 10302 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
6b4db501 10303 std::swap (c1, c2);
4a00c761 10304
ebfd146a
IR
10305 if (code == FIX_TRUNC_EXPR)
10306 {
10307 /* The signedness is determined from output operand. */
b690cc0f
RG
10308 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10309 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
10310 }
10311 else
10312 {
10313 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10314 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10315 }
10316
10317 if (!optab1 || !optab2)
10318 return false;
10319
10320 vec_mode = TYPE_MODE (vectype);
947131ba
RS
10321 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10322 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
10323 return false;
10324
4a00c761
JJ
10325 *code1 = c1;
10326 *code2 = c2;
10327
10328 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10329 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5e8d6dff
IE
10330 /* For scalar masks we may have different boolean
10331 vector types having the same QImode. Thus we
10332 add additional check for elements number. */
10333 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
928686b1
RS
10334 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10335 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
4a00c761 10336
b8698a0f 10337 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a 10338 types. */
ebfd146a 10339
4a00c761
JJ
10340 prev_type = vectype;
10341 prev_mode = vec_mode;
b8698a0f 10342
4a00c761
JJ
10343 if (!CONVERT_EXPR_CODE_P (code))
10344 return false;
b8698a0f 10345
4a00c761
JJ
10346 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10347 intermediate steps in promotion sequence. We try
10348 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10349 not. */
9771b263 10350 interm_types->create (MAX_INTERM_CVT_STEPS);
4a00c761
JJ
10351 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10352 {
10353 intermediate_mode = insn_data[icode1].operand[0].mode;
3ae0661a
IE
10354 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10355 {
7cfb4d93 10356 intermediate_type = vect_halve_mask_nunits (prev_type);
3ae0661a
IE
10357 if (intermediate_mode != TYPE_MODE (intermediate_type))
10358 return false;
10359 }
10360 else
10361 intermediate_type
10362 = lang_hooks.types.type_for_mode (intermediate_mode,
10363 TYPE_UNSIGNED (prev_type));
10364
4a00c761
JJ
10365 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10366 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10367
10368 if (!optab3 || !optab4
10369 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10370 || insn_data[icode1].operand[0].mode != intermediate_mode
10371 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10372 || insn_data[icode2].operand[0].mode != intermediate_mode
10373 || ((icode1 = optab_handler (optab3, intermediate_mode))
10374 == CODE_FOR_nothing)
10375 || ((icode2 = optab_handler (optab4, intermediate_mode))
10376 == CODE_FOR_nothing))
10377 break;
ebfd146a 10378
9771b263 10379 interm_types->quick_push (intermediate_type);
4a00c761
JJ
10380 (*multi_step_cvt)++;
10381
10382 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10383 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5e8d6dff 10384 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
928686b1
RS
10385 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10386 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
4a00c761
JJ
10387
10388 prev_type = intermediate_type;
10389 prev_mode = intermediate_mode;
ebfd146a
IR
10390 }
10391
9771b263 10392 interm_types->release ();
4a00c761 10393 return false;
ebfd146a
IR
10394}
10395
10396
10397/* Function supportable_narrowing_operation
10398
b8698a0f
L
10399 Check whether an operation represented by the code CODE is a
10400 narrowing operation that is supported by the target platform in
b690cc0f
RG
10401 vector form (i.e., when operating on arguments of type VECTYPE_IN
10402 and producing a result of type VECTYPE_OUT).
b8698a0f 10403
1bda738b
JJ
10404 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
10405 and FLOAT. This function checks if these operations are supported by
ebfd146a
IR
10406 the target platform directly via vector tree-codes.
10407
10408 Output:
b8698a0f
L
10409 - CODE1 is the code of a vector operation to be used when
10410 vectorizing the operation, if available.
ebfd146a
IR
10411 - MULTI_STEP_CVT determines the number of required intermediate steps in
10412 case of multi-step conversion (like int->short->char - in that case
10413 MULTI_STEP_CVT will be 1).
10414 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 10415 narrowing operation (short in the above example). */
ebfd146a
IR
10416
10417bool
10418supportable_narrowing_operation (enum tree_code code,
b690cc0f 10419 tree vectype_out, tree vectype_in,
ebfd146a 10420 enum tree_code *code1, int *multi_step_cvt,
9771b263 10421 vec<tree> *interm_types)
ebfd146a 10422{
ef4bddc2 10423 machine_mode vec_mode;
ebfd146a
IR
10424 enum insn_code icode1;
10425 optab optab1, interm_optab;
b690cc0f
RG
10426 tree vectype = vectype_in;
10427 tree narrow_vectype = vectype_out;
ebfd146a 10428 enum tree_code c1;
3ae0661a 10429 tree intermediate_type, prev_type;
ef4bddc2 10430 machine_mode intermediate_mode, prev_mode;
ebfd146a 10431 int i;
4a00c761 10432 bool uns;
ebfd146a 10433
4a00c761 10434 *multi_step_cvt = 0;
ebfd146a
IR
10435 switch (code)
10436 {
10437 CASE_CONVERT:
10438 c1 = VEC_PACK_TRUNC_EXPR;
10439 break;
10440
10441 case FIX_TRUNC_EXPR:
10442 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10443 break;
10444
10445 case FLOAT_EXPR:
1bda738b
JJ
10446 c1 = VEC_PACK_FLOAT_EXPR;
10447 break;
ebfd146a
IR
10448
10449 default:
10450 gcc_unreachable ();
10451 }
10452
10453 if (code == FIX_TRUNC_EXPR)
10454 /* The signedness is determined from output operand. */
b690cc0f 10455 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
10456 else
10457 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10458
10459 if (!optab1)
10460 return false;
10461
10462 vec_mode = TYPE_MODE (vectype);
947131ba 10463 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
10464 return false;
10465
4a00c761
JJ
10466 *code1 = c1;
10467
10468 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5e8d6dff
IE
10469 /* For scalar masks we may have different boolean
10470 vector types having the same QImode. Thus we
10471 add additional check for elements number. */
10472 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
928686b1
RS
10473 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10474 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
4a00c761 10475
1bda738b
JJ
10476 if (code == FLOAT_EXPR)
10477 return false;
10478
ebfd146a
IR
10479 /* Check if it's a multi-step conversion that can be done using intermediate
10480 types. */
4a00c761 10481 prev_mode = vec_mode;
3ae0661a 10482 prev_type = vectype;
4a00c761
JJ
10483 if (code == FIX_TRUNC_EXPR)
10484 uns = TYPE_UNSIGNED (vectype_out);
10485 else
10486 uns = TYPE_UNSIGNED (vectype);
10487
10488 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10489 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10490 costly than signed. */
10491 if (code == FIX_TRUNC_EXPR && uns)
10492 {
10493 enum insn_code icode2;
10494
10495 intermediate_type
10496 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10497 interm_optab
10498 = optab_for_tree_code (c1, intermediate_type, optab_default);
2225b9f2 10499 if (interm_optab != unknown_optab
4a00c761
JJ
10500 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10501 && insn_data[icode1].operand[0].mode
10502 == insn_data[icode2].operand[0].mode)
10503 {
10504 uns = false;
10505 optab1 = interm_optab;
10506 icode1 = icode2;
10507 }
10508 }
ebfd146a 10509
4a00c761
JJ
10510 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10511 intermediate steps in promotion sequence. We try
10512 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9771b263 10513 interm_types->create (MAX_INTERM_CVT_STEPS);
4a00c761
JJ
10514 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10515 {
10516 intermediate_mode = insn_data[icode1].operand[0].mode;
3ae0661a
IE
10517 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10518 {
7cfb4d93 10519 intermediate_type = vect_double_mask_nunits (prev_type);
3ae0661a 10520 if (intermediate_mode != TYPE_MODE (intermediate_type))
7cfb4d93 10521 return false;
3ae0661a
IE
10522 }
10523 else
10524 intermediate_type
10525 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
4a00c761
JJ
10526 interm_optab
10527 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10528 optab_default);
10529 if (!interm_optab
10530 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10531 || insn_data[icode1].operand[0].mode != intermediate_mode
10532 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10533 == CODE_FOR_nothing))
10534 break;
10535
9771b263 10536 interm_types->quick_push (intermediate_type);
4a00c761
JJ
10537 (*multi_step_cvt)++;
10538
10539 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5e8d6dff 10540 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
928686b1
RS
10541 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10542 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
4a00c761
JJ
10543
10544 prev_mode = intermediate_mode;
3ae0661a 10545 prev_type = intermediate_type;
4a00c761 10546 optab1 = interm_optab;
ebfd146a
IR
10547 }
10548
9771b263 10549 interm_types->release ();
4a00c761 10550 return false;
ebfd146a 10551}
7cfb4d93
RS
10552
10553/* Generate and return a statement that sets vector mask MASK such that
10554 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10555
10556gcall *
10557vect_gen_while (tree mask, tree start_index, tree end_index)
10558{
10559 tree cmp_type = TREE_TYPE (start_index);
10560 tree mask_type = TREE_TYPE (mask);
10561 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10562 cmp_type, mask_type,
10563 OPTIMIZE_FOR_SPEED));
10564 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10565 start_index, end_index,
10566 build_zero_cst (mask_type));
10567 gimple_call_set_lhs (call, mask);
10568 return call;
10569}
535e7c11
RS
10570
10571/* Generate a vector mask of type MASK_TYPE for which index I is false iff
10572 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10573
10574tree
10575vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10576 tree end_index)
10577{
10578 tree tmp = make_ssa_name (mask_type);
10579 gcall *call = vect_gen_while (tmp, start_index, end_index);
10580 gimple_seq_add_stmt (seq, call);
10581 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);
10582}
1f3cb663
RS
10583
10584/* Try to compute the vector types required to vectorize STMT_INFO,
10585 returning true on success and false if vectorization isn't possible.
10586
10587 On success:
10588
10589 - Set *STMT_VECTYPE_OUT to:
10590 - NULL_TREE if the statement doesn't need to be vectorized;
10591 - boolean_type_node if the statement is a boolean operation whose
10592 vector type can only be determined once all the other vector types
10593 are known; and
10594 - the equivalent of STMT_VINFO_VECTYPE otherwise.
10595
10596 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
10597 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
10598 statement does not help to determine the overall number of units. */
10599
10600bool
10601vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
10602 tree *stmt_vectype_out,
10603 tree *nunits_vectype_out)
10604{
10605 gimple *stmt = stmt_info->stmt;
10606
10607 *stmt_vectype_out = NULL_TREE;
10608 *nunits_vectype_out = NULL_TREE;
10609
10610 if (gimple_get_lhs (stmt) == NULL_TREE
10611 /* MASK_STORE has no lhs, but is ok. */
10612 && !gimple_call_internal_p (stmt, IFN_MASK_STORE))
10613 {
10614 if (is_a <gcall *> (stmt))
10615 {
10616 /* Ignore calls with no lhs. These must be calls to
10617 #pragma omp simd functions, and what vectorization factor
10618 it really needs can't be determined until
10619 vectorizable_simd_clone_call. */
10620 if (dump_enabled_p ())
10621 dump_printf_loc (MSG_NOTE, vect_location,
10622 "defer to SIMD clone analysis.\n");
10623 return true;
10624 }
10625
10626 if (dump_enabled_p ())
10627 {
10628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10629 "not vectorized: irregular stmt.");
10630 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10631 }
10632 return false;
10633 }
10634
10635 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
10636 {
10637 if (dump_enabled_p ())
10638 {
10639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10640 "not vectorized: vector stmt in loop:");
10641 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10642 }
10643 return false;
10644 }
10645
10646 tree vectype;
10647 tree scalar_type = NULL_TREE;
10648 if (STMT_VINFO_VECTYPE (stmt_info))
10649 *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
10650 else
10651 {
10652 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
10653 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
10654 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
10655 else
10656 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
10657
10658 /* Pure bool ops don't participate in number-of-units computation.
10659 For comparisons use the types being compared. */
10660 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
10661 && is_gimple_assign (stmt)
10662 && gimple_assign_rhs_code (stmt) != COND_EXPR)
10663 {
10664 *stmt_vectype_out = boolean_type_node;
10665
10666 tree rhs1 = gimple_assign_rhs1 (stmt);
10667 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10668 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
10669 scalar_type = TREE_TYPE (rhs1);
10670 else
10671 {
10672 if (dump_enabled_p ())
10673 dump_printf_loc (MSG_NOTE, vect_location,
10674 "pure bool operation.\n");
10675 return true;
10676 }
10677 }
10678
10679 if (dump_enabled_p ())
10680 {
10681 dump_printf_loc (MSG_NOTE, vect_location,
10682 "get vectype for scalar type: ");
10683 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10684 dump_printf (MSG_NOTE, "\n");
10685 }
10686 vectype = get_vectype_for_scalar_type (scalar_type);
10687 if (!vectype)
10688 {
10689 if (dump_enabled_p ())
10690 {
10691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10692 "not vectorized: unsupported data-type ");
10693 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10694 scalar_type);
10695 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10696 }
10697 return false;
10698 }
10699
10700 if (!*stmt_vectype_out)
10701 *stmt_vectype_out = vectype;
10702
10703 if (dump_enabled_p ())
10704 {
10705 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10706 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
10707 dump_printf (MSG_NOTE, "\n");
10708 }
10709 }
10710
10711 /* Don't try to compute scalar types if the stmt produces a boolean
10712 vector; use the existing vector type instead. */
10713 tree nunits_vectype;
10714 if (VECTOR_BOOLEAN_TYPE_P (vectype))
10715 nunits_vectype = vectype;
10716 else
10717 {
10718 /* The number of units is set according to the smallest scalar
10719 type (or the largest vector size, but we only support one
10720 vector size per vectorization). */
10721 if (*stmt_vectype_out != boolean_type_node)
10722 {
10723 HOST_WIDE_INT dummy;
86a91c0a
RS
10724 scalar_type = vect_get_smallest_scalar_type (stmt_info,
10725 &dummy, &dummy);
1f3cb663
RS
10726 }
10727 if (dump_enabled_p ())
10728 {
10729 dump_printf_loc (MSG_NOTE, vect_location,
10730 "get vectype for scalar type: ");
10731 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10732 dump_printf (MSG_NOTE, "\n");
10733 }
10734 nunits_vectype = get_vectype_for_scalar_type (scalar_type);
10735 }
10736 if (!nunits_vectype)
10737 {
10738 if (dump_enabled_p ())
10739 {
10740 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10741 "not vectorized: unsupported data-type ");
10742 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
10743 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10744 }
10745 return false;
10746 }
10747
10748 if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
10749 GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
10750 {
10751 if (dump_enabled_p ())
10752 {
10753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10754 "not vectorized: different sized vector "
10755 "types in statement, ");
10756 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
10757 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10758 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype);
10759 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10760 }
10761 return false;
10762 }
10763
10764 if (dump_enabled_p ())
10765 {
10766 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10767 dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype);
10768 dump_printf (MSG_NOTE, "\n");
10769
10770 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
10771 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
10772 dump_printf (MSG_NOTE, "\n");
10773 }
10774
10775 *nunits_vectype_out = nunits_vectype;
10776 return true;
10777}
10778
10779/* Try to determine the correct vector type for STMT_INFO, which is a
10780 statement that produces a scalar boolean result. Return the vector
10781 type on success, otherwise return NULL_TREE. */
10782
10783tree
10784vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
10785{
10786 gimple *stmt = stmt_info->stmt;
10787 tree mask_type = NULL;
10788 tree vectype, scalar_type;
10789
10790 if (is_gimple_assign (stmt)
10791 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10792 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
10793 {
10794 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
10795 mask_type = get_mask_type_for_scalar_type (scalar_type);
10796
10797 if (!mask_type)
10798 {
10799 if (dump_enabled_p ())
10800 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10801 "not vectorized: unsupported mask\n");
10802 return NULL_TREE;
10803 }
10804 }
10805 else
10806 {
10807 tree rhs;
10808 ssa_op_iter iter;
1f3cb663
RS
10809 enum vect_def_type dt;
10810
10811 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
10812 {
894dd753 10813 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype))
1f3cb663
RS
10814 {
10815 if (dump_enabled_p ())
10816 {
10817 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10818 "not vectorized: can't compute mask type "
10819 "for statement, ");
10820 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
10821 0);
10822 }
10823 return NULL_TREE;
10824 }
10825
10826 /* No vectype probably means external definition.
10827 Allow it in case there is another operand which
10828 allows to determine mask type. */
10829 if (!vectype)
10830 continue;
10831
10832 if (!mask_type)
10833 mask_type = vectype;
10834 else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
10835 TYPE_VECTOR_SUBPARTS (vectype)))
10836 {
10837 if (dump_enabled_p ())
10838 {
10839 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10840 "not vectorized: different sized masks "
10841 "types in statement, ");
10842 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10843 mask_type);
10844 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10845 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10846 vectype);
10847 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10848 }
10849 return NULL_TREE;
10850 }
10851 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
10852 != VECTOR_BOOLEAN_TYPE_P (vectype))
10853 {
10854 if (dump_enabled_p ())
10855 {
10856 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10857 "not vectorized: mixed mask and "
10858 "nonmask vector types in statement, ");
10859 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10860 mask_type);
10861 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10862 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10863 vectype);
10864 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10865 }
10866 return NULL_TREE;
10867 }
10868 }
10869
10870 /* We may compare boolean value loaded as vector of integers.
10871 Fix mask_type in such case. */
10872 if (mask_type
10873 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
10874 && gimple_code (stmt) == GIMPLE_ASSIGN
10875 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10876 mask_type = build_same_sized_truth_vector_type (mask_type);
10877 }
10878
10879 /* No mask_type should mean loop invariant predicate.
10880 This is probably a subject for optimization in if-conversion. */
10881 if (!mask_type && dump_enabled_p ())
10882 {
10883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10884 "not vectorized: can't compute mask type "
10885 "for statement, ");
10886 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10887 }
10888 return mask_type;
10889}