]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
Make builtin_vectorized_function take a combined_fn
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
5624e564 2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
b8698a0f 3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
4 and Ira Rosen <irar@il.ibm.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
c7131fb2 25#include "backend.h"
957060b5
AM
26#include "target.h"
27#include "rtl.h"
ebfd146a 28#include "tree.h"
c7131fb2 29#include "gimple.h"
c7131fb2 30#include "ssa.h"
957060b5
AM
31#include "optabs-tree.h"
32#include "insn-config.h"
33#include "recog.h" /* FIXME: for insn_data */
34#include "cgraph.h"
957060b5 35#include "dumpfile.h"
c7131fb2 36#include "alias.h"
40e23961 37#include "fold-const.h"
d8a2d370 38#include "stor-layout.h"
2fb9a547 39#include "tree-eh.h"
45b0be94 40#include "gimplify.h"
5be5c238 41#include "gimple-iterator.h"
18f429e2 42#include "gimplify-me.h"
442b4905 43#include "tree-cfg.h"
e28030cf 44#include "tree-ssa-loop-manip.h"
ebfd146a 45#include "cfgloop.h"
0136f8f0
AH
46#include "tree-ssa-loop.h"
47#include "tree-scalar-evolution.h"
ebfd146a 48#include "tree-vectorizer.h"
9b2b7279 49#include "builtins.h"
ebfd146a 50
7ee2468b
SB
51/* For lang_hooks.types.type_for_mode. */
52#include "langhooks.h"
ebfd146a 53
c3e7ee41
BS
54/* Return the vectorized type for the given statement. */
55
56tree
57stmt_vectype (struct _stmt_vec_info *stmt_info)
58{
59 return STMT_VINFO_VECTYPE (stmt_info);
60}
61
62/* Return TRUE iff the given statement is in an inner loop relative to
63 the loop being vectorized. */
64bool
65stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
66{
355fe088 67 gimple *stmt = STMT_VINFO_STMT (stmt_info);
c3e7ee41
BS
68 basic_block bb = gimple_bb (stmt);
69 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
70 struct loop* loop;
71
72 if (!loop_vinfo)
73 return false;
74
75 loop = LOOP_VINFO_LOOP (loop_vinfo);
76
77 return (bb->loop_father == loop->inner);
78}
79
80/* Record the cost of a statement, either by directly informing the
81 target model or by saving it in a vector for later processing.
82 Return a preliminary estimate of the statement's cost. */
83
84unsigned
92345349 85record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
c3e7ee41 86 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92345349 87 int misalign, enum vect_cost_model_location where)
c3e7ee41 88{
92345349 89 if (body_cost_vec)
c3e7ee41 90 {
92345349 91 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
ddf56386
RB
92 stmt_info_for_cost si = { count, kind,
93 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
94 misalign };
95 body_cost_vec->safe_push (si);
c3e7ee41 96 return (unsigned)
92345349 97 (builtin_vectorization_cost (kind, vectype, misalign) * count);
c3e7ee41
BS
98 }
99 else
310213d4
RB
100 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
101 count, kind, stmt_info, misalign, where);
c3e7ee41
BS
102}
103
272c6793
RS
104/* Return a variable of type ELEM_TYPE[NELEMS]. */
105
106static tree
107create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
108{
109 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
110 "vect_array");
111}
112
113/* ARRAY is an array of vectors created by create_vector_array.
114 Return an SSA_NAME for the vector in index N. The reference
115 is part of the vectorization of STMT and the vector is associated
116 with scalar destination SCALAR_DEST. */
117
118static tree
355fe088 119read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
272c6793
RS
120 tree array, unsigned HOST_WIDE_INT n)
121{
122 tree vect_type, vect, vect_name, array_ref;
355fe088 123 gimple *new_stmt;
272c6793
RS
124
125 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
126 vect_type = TREE_TYPE (TREE_TYPE (array));
127 vect = vect_create_destination_var (scalar_dest, vect_type);
128 array_ref = build4 (ARRAY_REF, vect_type, array,
129 build_int_cst (size_type_node, n),
130 NULL_TREE, NULL_TREE);
131
132 new_stmt = gimple_build_assign (vect, array_ref);
133 vect_name = make_ssa_name (vect, new_stmt);
134 gimple_assign_set_lhs (new_stmt, vect_name);
135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
136
137 return vect_name;
138}
139
140/* ARRAY is an array of vectors created by create_vector_array.
141 Emit code to store SSA_NAME VECT in index N of the array.
142 The store is part of the vectorization of STMT. */
143
144static void
355fe088 145write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
272c6793
RS
146 tree array, unsigned HOST_WIDE_INT n)
147{
148 tree array_ref;
355fe088 149 gimple *new_stmt;
272c6793
RS
150
151 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
152 build_int_cst (size_type_node, n),
153 NULL_TREE, NULL_TREE);
154
155 new_stmt = gimple_build_assign (array_ref, vect);
156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
157}
158
159/* PTR is a pointer to an array of type TYPE. Return a representation
160 of *PTR. The memory reference replaces those in FIRST_DR
161 (and its group). */
162
163static tree
164create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
165{
272c6793
RS
166 tree mem_ref, alias_ptr_type;
167
168 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
169 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
170 /* Arrays have the same alignment as their type. */
644ffefd 171 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
272c6793
RS
172 return mem_ref;
173}
174
ebfd146a
IR
175/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
176
177/* Function vect_mark_relevant.
178
179 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
180
181static void
355fe088 182vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
83197f37
IR
183 enum vect_relevant relevant, bool live_p,
184 bool used_in_pattern)
ebfd146a
IR
185{
186 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
187 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
188 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
355fe088 189 gimple *pattern_stmt;
ebfd146a 190
73fbfcad 191 if (dump_enabled_p ())
78c60e3d 192 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 193 "mark relevant %d, live %d.\n", relevant, live_p);
ebfd146a 194
83197f37
IR
195 /* If this stmt is an original stmt in a pattern, we might need to mark its
196 related pattern stmt instead of the original stmt. However, such stmts
197 may have their own uses that are not in any pattern, in such cases the
198 stmt itself should be marked. */
ebfd146a
IR
199 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
200 {
83197f37
IR
201 bool found = false;
202 if (!used_in_pattern)
203 {
204 imm_use_iterator imm_iter;
205 use_operand_p use_p;
355fe088 206 gimple *use_stmt;
83197f37 207 tree lhs;
13c931c9
JJ
208 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
209 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a 210
83197f37
IR
211 if (is_gimple_assign (stmt))
212 lhs = gimple_assign_lhs (stmt);
213 else
214 lhs = gimple_call_lhs (stmt);
ebfd146a 215
83197f37
IR
216 /* This use is out of pattern use, if LHS has other uses that are
217 pattern uses, we should mark the stmt itself, and not the pattern
218 stmt. */
5ce9450f 219 if (lhs && TREE_CODE (lhs) == SSA_NAME)
ab0ef706
JJ
220 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
221 {
222 if (is_gimple_debug (USE_STMT (use_p)))
223 continue;
224 use_stmt = USE_STMT (use_p);
225
13c931c9
JJ
226 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
227 continue;
228
ab0ef706
JJ
229 if (vinfo_for_stmt (use_stmt)
230 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
231 {
232 found = true;
233 break;
234 }
235 }
83197f37
IR
236 }
237
238 if (!found)
239 {
240 /* This is the last stmt in a sequence that was detected as a
241 pattern that can potentially be vectorized. Don't mark the stmt
242 as relevant/live because it's not going to be vectorized.
243 Instead mark the pattern-stmt that replaces it. */
244
245 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
246
73fbfcad 247 if (dump_enabled_p ())
78c60e3d
SS
248 dump_printf_loc (MSG_NOTE, vect_location,
249 "last stmt in pattern. don't mark"
e645e942 250 " relevant/live.\n");
83197f37
IR
251 stmt_info = vinfo_for_stmt (pattern_stmt);
252 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
253 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
254 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
255 stmt = pattern_stmt;
256 }
ebfd146a
IR
257 }
258
259 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
260 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
261 STMT_VINFO_RELEVANT (stmt_info) = relevant;
262
263 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
264 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
265 {
73fbfcad 266 if (dump_enabled_p ())
78c60e3d 267 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 268 "already marked relevant/live.\n");
ebfd146a
IR
269 return;
270 }
271
9771b263 272 worklist->safe_push (stmt);
ebfd146a
IR
273}
274
275
276/* Function vect_stmt_relevant_p.
277
278 Return true if STMT in loop that is represented by LOOP_VINFO is
279 "relevant for vectorization".
280
281 A stmt is considered "relevant for vectorization" if:
282 - it has uses outside the loop.
283 - it has vdefs (it alters memory).
284 - control stmts in the loop (except for the exit condition).
285
286 CHECKME: what other side effects would the vectorizer allow? */
287
288static bool
355fe088 289vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
ebfd146a
IR
290 enum vect_relevant *relevant, bool *live_p)
291{
292 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
293 ssa_op_iter op_iter;
294 imm_use_iterator imm_iter;
295 use_operand_p use_p;
296 def_operand_p def_p;
297
8644a673 298 *relevant = vect_unused_in_scope;
ebfd146a
IR
299 *live_p = false;
300
301 /* cond stmt other than loop exit cond. */
b8698a0f
L
302 if (is_ctrl_stmt (stmt)
303 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
304 != loop_exit_ctrl_vec_info_type)
8644a673 305 *relevant = vect_used_in_scope;
ebfd146a
IR
306
307 /* changing memory. */
308 if (gimple_code (stmt) != GIMPLE_PHI)
ac6aeab4
RB
309 if (gimple_vdef (stmt)
310 && !gimple_clobber_p (stmt))
ebfd146a 311 {
73fbfcad 312 if (dump_enabled_p ())
78c60e3d 313 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 314 "vec_stmt_relevant_p: stmt has vdefs.\n");
8644a673 315 *relevant = vect_used_in_scope;
ebfd146a
IR
316 }
317
318 /* uses outside the loop. */
319 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
320 {
321 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
322 {
323 basic_block bb = gimple_bb (USE_STMT (use_p));
324 if (!flow_bb_inside_loop_p (loop, bb))
325 {
73fbfcad 326 if (dump_enabled_p ())
78c60e3d 327 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 328 "vec_stmt_relevant_p: used out of loop.\n");
ebfd146a 329
3157b0c2
AO
330 if (is_gimple_debug (USE_STMT (use_p)))
331 continue;
332
ebfd146a
IR
333 /* We expect all such uses to be in the loop exit phis
334 (because of loop closed form) */
335 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
336 gcc_assert (bb == single_exit (loop)->dest);
337
338 *live_p = true;
339 }
340 }
341 }
342
343 return (*live_p || *relevant);
344}
345
346
b8698a0f 347/* Function exist_non_indexing_operands_for_use_p
ebfd146a 348
ff802fa1 349 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
350 used in STMT for anything other than indexing an array. */
351
352static bool
355fe088 353exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
ebfd146a
IR
354{
355 tree operand;
356 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 357
ff802fa1 358 /* USE corresponds to some operand in STMT. If there is no data
ebfd146a
IR
359 reference in STMT, then any operand that corresponds to USE
360 is not indexing an array. */
361 if (!STMT_VINFO_DATA_REF (stmt_info))
362 return true;
59a05b0c 363
ebfd146a
IR
364 /* STMT has a data_ref. FORNOW this means that its of one of
365 the following forms:
366 -1- ARRAY_REF = var
367 -2- var = ARRAY_REF
368 (This should have been verified in analyze_data_refs).
369
370 'var' in the second case corresponds to a def, not a use,
b8698a0f 371 so USE cannot correspond to any operands that are not used
ebfd146a
IR
372 for array indexing.
373
374 Therefore, all we need to check is if STMT falls into the
375 first case, and whether var corresponds to USE. */
ebfd146a
IR
376
377 if (!gimple_assign_copy_p (stmt))
5ce9450f
JJ
378 {
379 if (is_gimple_call (stmt)
380 && gimple_call_internal_p (stmt))
381 switch (gimple_call_internal_fn (stmt))
382 {
383 case IFN_MASK_STORE:
384 operand = gimple_call_arg (stmt, 3);
385 if (operand == use)
386 return true;
387 /* FALLTHRU */
388 case IFN_MASK_LOAD:
389 operand = gimple_call_arg (stmt, 2);
390 if (operand == use)
391 return true;
392 break;
393 default:
394 break;
395 }
396 return false;
397 }
398
59a05b0c
EB
399 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
400 return false;
ebfd146a 401 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
402 if (TREE_CODE (operand) != SSA_NAME)
403 return false;
404
405 if (operand == use)
406 return true;
407
408 return false;
409}
410
411
b8698a0f 412/*
ebfd146a
IR
413 Function process_use.
414
415 Inputs:
416 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 417 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ff802fa1 418 that defined USE. This is done by calling mark_relevant and passing it
ebfd146a 419 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
aec7ae7d
JJ
420 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
421 be performed.
ebfd146a
IR
422
423 Outputs:
424 Generally, LIVE_P and RELEVANT are used to define the liveness and
425 relevance info of the DEF_STMT of this USE:
426 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
427 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
428 Exceptions:
429 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 430 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 431 of the respective DEF_STMT is left unchanged.
b8698a0f
L
432 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
433 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
434 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
435 be modified accordingly.
436
437 Return true if everything is as expected. Return false otherwise. */
438
439static bool
355fe088
TS
440process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
441 enum vect_relevant relevant, vec<gimple *> *worklist,
aec7ae7d 442 bool force)
ebfd146a
IR
443{
444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
445 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
446 stmt_vec_info dstmt_vinfo;
447 basic_block bb, def_bb;
355fe088 448 gimple *def_stmt;
ebfd146a
IR
449 enum vect_def_type dt;
450
b8698a0f 451 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a 452 that are used for address computation are not considered relevant. */
aec7ae7d 453 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
ebfd146a
IR
454 return true;
455
81c40241 456 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
b8698a0f 457 {
73fbfcad 458 if (dump_enabled_p ())
78c60e3d 459 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 460 "not vectorized: unsupported use in stmt.\n");
ebfd146a
IR
461 return false;
462 }
463
464 if (!def_stmt || gimple_nop_p (def_stmt))
465 return true;
466
467 def_bb = gimple_bb (def_stmt);
468 if (!flow_bb_inside_loop_p (loop, def_bb))
469 {
73fbfcad 470 if (dump_enabled_p ())
e645e942 471 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
ebfd146a
IR
472 return true;
473 }
474
b8698a0f
L
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
476 DEF_STMT must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
479 check that everything is as expected, and we are done. */
480 dstmt_vinfo = vinfo_for_stmt (def_stmt);
481 bb = gimple_bb (stmt);
482 if (gimple_code (stmt) == GIMPLE_PHI
483 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
484 && gimple_code (def_stmt) != GIMPLE_PHI
485 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
486 && bb->loop_father == def_bb->loop_father)
487 {
73fbfcad 488 if (dump_enabled_p ())
78c60e3d 489 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 490 "reduc-stmt defining reduc-phi in the same nest.\n");
ebfd146a
IR
491 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
492 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
493 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 494 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 495 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
496 return true;
497 }
498
499 /* case 3a: outer-loop stmt defining an inner-loop stmt:
500 outer-loop-header-bb:
501 d = def_stmt
502 inner-loop:
503 stmt # use (d)
504 outer-loop-tail-bb:
505 ... */
506 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
507 {
73fbfcad 508 if (dump_enabled_p ())
78c60e3d 509 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 510 "outer-loop def-stmt defining inner-loop stmt.\n");
7c5222ff 511
ebfd146a
IR
512 switch (relevant)
513 {
8644a673 514 case vect_unused_in_scope:
7c5222ff
IR
515 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
516 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 517 break;
7c5222ff 518
ebfd146a 519 case vect_used_in_outer_by_reduction:
7c5222ff 520 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
521 relevant = vect_used_by_reduction;
522 break;
7c5222ff 523
ebfd146a 524 case vect_used_in_outer:
7c5222ff 525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 526 relevant = vect_used_in_scope;
ebfd146a 527 break;
7c5222ff 528
8644a673 529 case vect_used_in_scope:
ebfd146a
IR
530 break;
531
532 default:
533 gcc_unreachable ();
b8698a0f 534 }
ebfd146a
IR
535 }
536
537 /* case 3b: inner-loop stmt defining an outer-loop stmt:
538 outer-loop-header-bb:
539 ...
540 inner-loop:
541 d = def_stmt
06066f92 542 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
543 stmt # use (d) */
544 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
545 {
73fbfcad 546 if (dump_enabled_p ())
78c60e3d 547 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 548 "inner-loop def-stmt defining outer-loop stmt.\n");
7c5222ff 549
ebfd146a
IR
550 switch (relevant)
551 {
8644a673 552 case vect_unused_in_scope:
b8698a0f 553 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 554 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 555 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
556 break;
557
ebfd146a
IR
558 case vect_used_by_reduction:
559 relevant = vect_used_in_outer_by_reduction;
560 break;
561
8644a673 562 case vect_used_in_scope:
ebfd146a
IR
563 relevant = vect_used_in_outer;
564 break;
565
566 default:
567 gcc_unreachable ();
568 }
569 }
570
83197f37
IR
571 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
572 is_pattern_stmt_p (stmt_vinfo));
ebfd146a
IR
573 return true;
574}
575
576
577/* Function vect_mark_stmts_to_be_vectorized.
578
579 Not all stmts in the loop need to be vectorized. For example:
580
581 for i...
582 for j...
583 1. T0 = i + j
584 2. T1 = a[T0]
585
586 3. j = j + 1
587
588 Stmt 1 and 3 do not need to be vectorized, because loop control and
589 addressing of vectorized data-refs are handled differently.
590
591 This pass detects such stmts. */
592
593bool
594vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
595{
ebfd146a
IR
596 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
597 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
598 unsigned int nbbs = loop->num_nodes;
599 gimple_stmt_iterator si;
355fe088 600 gimple *stmt;
ebfd146a
IR
601 unsigned int i;
602 stmt_vec_info stmt_vinfo;
603 basic_block bb;
355fe088 604 gimple *phi;
ebfd146a 605 bool live_p;
06066f92
IR
606 enum vect_relevant relevant, tmp_relevant;
607 enum vect_def_type def_type;
ebfd146a 608
73fbfcad 609 if (dump_enabled_p ())
78c60e3d 610 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 611 "=== vect_mark_stmts_to_be_vectorized ===\n");
ebfd146a 612
355fe088 613 auto_vec<gimple *, 64> worklist;
ebfd146a
IR
614
615 /* 1. Init worklist. */
616 for (i = 0; i < nbbs; i++)
617 {
618 bb = bbs[i];
619 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 620 {
ebfd146a 621 phi = gsi_stmt (si);
73fbfcad 622 if (dump_enabled_p ())
ebfd146a 623 {
78c60e3d
SS
624 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
625 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
ebfd146a
IR
626 }
627
628 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
83197f37 629 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
ebfd146a
IR
630 }
631 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
632 {
633 stmt = gsi_stmt (si);
73fbfcad 634 if (dump_enabled_p ())
ebfd146a 635 {
78c60e3d
SS
636 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
637 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
b8698a0f 638 }
ebfd146a
IR
639
640 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
83197f37 641 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
ebfd146a
IR
642 }
643 }
644
645 /* 2. Process_worklist */
9771b263 646 while (worklist.length () > 0)
ebfd146a
IR
647 {
648 use_operand_p use_p;
649 ssa_op_iter iter;
650
9771b263 651 stmt = worklist.pop ();
73fbfcad 652 if (dump_enabled_p ())
ebfd146a 653 {
78c60e3d
SS
654 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
655 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
ebfd146a
IR
656 }
657
b8698a0f
L
658 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
659 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
660 liveness and relevance properties of STMT. */
661 stmt_vinfo = vinfo_for_stmt (stmt);
662 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
663 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
664
665 /* Generally, the liveness and relevance properties of STMT are
666 propagated as is to the DEF_STMTs of its USEs:
667 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
668 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
669
670 One exception is when STMT has been identified as defining a reduction
671 variable; in this case we set the liveness/relevance as follows:
672 live_p = false
673 relevant = vect_used_by_reduction
674 This is because we distinguish between two kinds of relevant stmts -
b8698a0f 675 those that are used by a reduction computation, and those that are
ff802fa1 676 (also) used by a regular computation. This allows us later on to
b8698a0f 677 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 678 order of the results that they produce does not have to be kept. */
ebfd146a 679
06066f92
IR
680 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
681 tmp_relevant = relevant;
682 switch (def_type)
ebfd146a 683 {
06066f92
IR
684 case vect_reduction_def:
685 switch (tmp_relevant)
686 {
687 case vect_unused_in_scope:
688 relevant = vect_used_by_reduction;
689 break;
690
691 case vect_used_by_reduction:
692 if (gimple_code (stmt) == GIMPLE_PHI)
693 break;
694 /* fall through */
695
696 default:
73fbfcad 697 if (dump_enabled_p ())
78c60e3d 698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 699 "unsupported use of reduction.\n");
06066f92
IR
700 return false;
701 }
702
b8698a0f 703 live_p = false;
06066f92 704 break;
b8698a0f 705
06066f92
IR
706 case vect_nested_cycle:
707 if (tmp_relevant != vect_unused_in_scope
708 && tmp_relevant != vect_used_in_outer_by_reduction
709 && tmp_relevant != vect_used_in_outer)
710 {
73fbfcad 711 if (dump_enabled_p ())
78c60e3d 712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 713 "unsupported use of nested cycle.\n");
7c5222ff 714
06066f92
IR
715 return false;
716 }
7c5222ff 717
b8698a0f
L
718 live_p = false;
719 break;
720
06066f92
IR
721 case vect_double_reduction_def:
722 if (tmp_relevant != vect_unused_in_scope
723 && tmp_relevant != vect_used_by_reduction)
724 {
73fbfcad 725 if (dump_enabled_p ())
78c60e3d 726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 727 "unsupported use of double reduction.\n");
7c5222ff 728
7c5222ff 729 return false;
06066f92
IR
730 }
731
732 live_p = false;
b8698a0f 733 break;
7c5222ff 734
06066f92
IR
735 default:
736 break;
7c5222ff 737 }
b8698a0f 738
aec7ae7d 739 if (is_pattern_stmt_p (stmt_vinfo))
9d5e7640
IR
740 {
741 /* Pattern statements are not inserted into the code, so
742 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
743 have to scan the RHS or function arguments instead. */
744 if (is_gimple_assign (stmt))
745 {
69d2aade
JJ
746 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
747 tree op = gimple_assign_rhs1 (stmt);
748
749 i = 1;
750 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
751 {
752 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
aec7ae7d 753 live_p, relevant, &worklist, false)
69d2aade 754 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
aec7ae7d 755 live_p, relevant, &worklist, false))
566d377a 756 return false;
69d2aade
JJ
757 i = 2;
758 }
759 for (; i < gimple_num_ops (stmt); i++)
9d5e7640 760 {
69d2aade 761 op = gimple_op (stmt, i);
afbe6325
RB
762 if (TREE_CODE (op) == SSA_NAME
763 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
764 &worklist, false))
07687835 765 return false;
9d5e7640
IR
766 }
767 }
768 else if (is_gimple_call (stmt))
769 {
770 for (i = 0; i < gimple_call_num_args (stmt); i++)
771 {
772 tree arg = gimple_call_arg (stmt, i);
773 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
aec7ae7d 774 &worklist, false))
07687835 775 return false;
9d5e7640
IR
776 }
777 }
778 }
779 else
780 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
781 {
782 tree op = USE_FROM_PTR (use_p);
783 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
aec7ae7d 784 &worklist, false))
07687835 785 return false;
9d5e7640 786 }
aec7ae7d 787
3bab6342 788 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
aec7ae7d
JJ
789 {
790 tree off;
3bab6342 791 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
aec7ae7d
JJ
792 gcc_assert (decl);
793 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
794 &worklist, true))
566d377a 795 return false;
aec7ae7d 796 }
ebfd146a
IR
797 } /* while worklist */
798
ebfd146a
IR
799 return true;
800}
801
802
b8698a0f 803/* Function vect_model_simple_cost.
ebfd146a 804
b8698a0f 805 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
806 single op. Right now, this does not account for multiple insns that could
807 be generated for the single vector op. We will handle that shortly. */
808
809void
b8698a0f 810vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
92345349
BS
811 enum vect_def_type *dt,
812 stmt_vector_for_cost *prologue_cost_vec,
813 stmt_vector_for_cost *body_cost_vec)
ebfd146a
IR
814{
815 int i;
92345349 816 int inside_cost = 0, prologue_cost = 0;
ebfd146a
IR
817
818 /* The SLP costs were already calculated during SLP tree build. */
819 if (PURE_SLP_STMT (stmt_info))
820 return;
821
ebfd146a
IR
822 /* FORNOW: Assuming maximum 2 args per stmts. */
823 for (i = 0; i < 2; i++)
92345349
BS
824 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
825 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
826 stmt_info, 0, vect_prologue);
c3e7ee41
BS
827
828 /* Pass the inside-of-loop statements to the target-specific cost model. */
92345349
BS
829 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
830 stmt_info, 0, vect_body);
c3e7ee41 831
73fbfcad 832 if (dump_enabled_p ())
78c60e3d
SS
833 dump_printf_loc (MSG_NOTE, vect_location,
834 "vect_model_simple_cost: inside_cost = %d, "
e645e942 835 "prologue_cost = %d .\n", inside_cost, prologue_cost);
ebfd146a
IR
836}
837
838
8bd37302
BS
839/* Model cost for type demotion and promotion operations. PWR is normally
840 zero for single-step promotions and demotions. It will be one if
841 two-step promotion/demotion is required, and so on. Each additional
842 step doubles the number of instructions required. */
843
844static void
845vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
846 enum vect_def_type *dt, int pwr)
847{
848 int i, tmp;
92345349 849 int inside_cost = 0, prologue_cost = 0;
c3e7ee41
BS
850 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
852 void *target_cost_data;
8bd37302
BS
853
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info))
856 return;
857
c3e7ee41
BS
858 if (loop_vinfo)
859 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
860 else
861 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
862
8bd37302
BS
863 for (i = 0; i < pwr + 1; i++)
864 {
865 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
866 (i + 1) : i;
c3e7ee41 867 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
92345349
BS
868 vec_promote_demote, stmt_info, 0,
869 vect_body);
8bd37302
BS
870 }
871
872 /* FORNOW: Assuming maximum 2 args per stmts. */
873 for (i = 0; i < 2; i++)
92345349
BS
874 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
875 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
876 stmt_info, 0, vect_prologue);
8bd37302 877
73fbfcad 878 if (dump_enabled_p ())
78c60e3d
SS
879 dump_printf_loc (MSG_NOTE, vect_location,
880 "vect_model_promotion_demotion_cost: inside_cost = %d, "
e645e942 881 "prologue_cost = %d .\n", inside_cost, prologue_cost);
8bd37302
BS
882}
883
0d0293ac 884/* Function vect_cost_group_size
b8698a0f 885
0d0293ac 886 For grouped load or store, return the group_size only if it is the first
ebfd146a
IR
887 load or store of a group, else return 1. This ensures that group size is
888 only returned once per group. */
889
890static int
0d0293ac 891vect_cost_group_size (stmt_vec_info stmt_info)
ebfd146a 892{
355fe088 893 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
ebfd146a
IR
894
895 if (first_stmt == STMT_VINFO_STMT (stmt_info))
e14c1050 896 return GROUP_SIZE (stmt_info);
ebfd146a
IR
897
898 return 1;
899}
900
901
902/* Function vect_model_store_cost
903
0d0293ac
MM
904 Models cost for stores. In the case of grouped accesses, one access
905 has the overhead of the grouped access attributed to it. */
ebfd146a
IR
906
907void
b8698a0f 908vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
272c6793 909 bool store_lanes_p, enum vect_def_type dt,
92345349
BS
910 slp_tree slp_node,
911 stmt_vector_for_cost *prologue_cost_vec,
912 stmt_vector_for_cost *body_cost_vec)
ebfd146a
IR
913{
914 int group_size;
92345349 915 unsigned int inside_cost = 0, prologue_cost = 0;
720f5239 916 struct data_reference *first_dr;
355fe088 917 gimple *first_stmt;
ebfd146a 918
8644a673 919 if (dt == vect_constant_def || dt == vect_external_def)
92345349
BS
920 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
921 stmt_info, 0, vect_prologue);
ebfd146a 922
0d0293ac
MM
923 /* Grouped access? */
924 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
720f5239
IR
925 {
926 if (slp_node)
927 {
9771b263 928 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
720f5239
IR
929 group_size = 1;
930 }
931 else
932 {
e14c1050 933 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
0d0293ac 934 group_size = vect_cost_group_size (stmt_info);
720f5239
IR
935 }
936
937 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
938 }
0d0293ac 939 /* Not a grouped access. */
ebfd146a 940 else
720f5239
IR
941 {
942 group_size = 1;
943 first_dr = STMT_VINFO_DATA_REF (stmt_info);
944 }
ebfd146a 945
272c6793 946 /* We assume that the cost of a single store-lanes instruction is
0d0293ac 947 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
272c6793
RS
948 access is instead being provided by a permute-and-store operation,
949 include the cost of the permutes. */
cee62fee
MM
950 if (!store_lanes_p && group_size > 1
951 && !STMT_VINFO_STRIDED_P (stmt_info))
ebfd146a 952 {
e1377713
ES
953 /* Uses a high and low interleave or shuffle operations for each
954 needed permute. */
955 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
92345349
BS
956 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
957 stmt_info, 0, vect_body);
ebfd146a 958
73fbfcad 959 if (dump_enabled_p ())
78c60e3d 960 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 961 "vect_model_store_cost: strided group_size = %d .\n",
78c60e3d 962 group_size);
ebfd146a
IR
963 }
964
cee62fee 965 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 966 /* Costs of the stores. */
cee62fee
MM
967 if (STMT_VINFO_STRIDED_P (stmt_info)
968 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
f2e2a985
MM
969 {
970 /* N scalar stores plus extracting the elements. */
f2e2a985
MM
971 inside_cost += record_stmt_cost (body_cost_vec,
972 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
973 scalar_store, stmt_info, 0, vect_body);
f2e2a985
MM
974 }
975 else
976 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
ebfd146a 977
cee62fee
MM
978 if (STMT_VINFO_STRIDED_P (stmt_info))
979 inside_cost += record_stmt_cost (body_cost_vec,
980 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
981 vec_to_scalar, stmt_info, 0, vect_body);
982
73fbfcad 983 if (dump_enabled_p ())
78c60e3d
SS
984 dump_printf_loc (MSG_NOTE, vect_location,
985 "vect_model_store_cost: inside_cost = %d, "
e645e942 986 "prologue_cost = %d .\n", inside_cost, prologue_cost);
ebfd146a
IR
987}
988
989
720f5239
IR
990/* Calculate cost of DR's memory access. */
991void
992vect_get_store_cost (struct data_reference *dr, int ncopies,
c3e7ee41 993 unsigned int *inside_cost,
92345349 994 stmt_vector_for_cost *body_cost_vec)
720f5239
IR
995{
996 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
355fe088 997 gimple *stmt = DR_STMT (dr);
c3e7ee41 998 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
720f5239
IR
999
1000 switch (alignment_support_scheme)
1001 {
1002 case dr_aligned:
1003 {
92345349
BS
1004 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1005 vector_store, stmt_info, 0,
1006 vect_body);
720f5239 1007
73fbfcad 1008 if (dump_enabled_p ())
78c60e3d 1009 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1010 "vect_model_store_cost: aligned.\n");
720f5239
IR
1011 break;
1012 }
1013
1014 case dr_unaligned_supported:
1015 {
720f5239 1016 /* Here, we assign an additional cost for the unaligned store. */
92345349 1017 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
c3e7ee41 1018 unaligned_store, stmt_info,
92345349 1019 DR_MISALIGNMENT (dr), vect_body);
73fbfcad 1020 if (dump_enabled_p ())
78c60e3d
SS
1021 dump_printf_loc (MSG_NOTE, vect_location,
1022 "vect_model_store_cost: unaligned supported by "
e645e942 1023 "hardware.\n");
720f5239
IR
1024 break;
1025 }
1026
38eec4c6
UW
1027 case dr_unaligned_unsupported:
1028 {
1029 *inside_cost = VECT_MAX_COST;
1030
73fbfcad 1031 if (dump_enabled_p ())
78c60e3d 1032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 1033 "vect_model_store_cost: unsupported access.\n");
38eec4c6
UW
1034 break;
1035 }
1036
720f5239
IR
1037 default:
1038 gcc_unreachable ();
1039 }
1040}
1041
1042
ebfd146a
IR
1043/* Function vect_model_load_cost
1044
0d0293ac
MM
1045 Models cost for loads. In the case of grouped accesses, the last access
1046 has the overhead of the grouped access attributed to it. Since unaligned
b8698a0f 1047 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
1048 access scheme chosen. */
1049
1050void
92345349
BS
1051vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1052 bool load_lanes_p, slp_tree slp_node,
1053 stmt_vector_for_cost *prologue_cost_vec,
1054 stmt_vector_for_cost *body_cost_vec)
ebfd146a
IR
1055{
1056 int group_size;
355fe088 1057 gimple *first_stmt;
ebfd146a 1058 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
92345349 1059 unsigned int inside_cost = 0, prologue_cost = 0;
ebfd146a 1060
0d0293ac 1061 /* Grouped accesses? */
e14c1050 1062 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
0d0293ac 1063 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
ebfd146a 1064 {
0d0293ac 1065 group_size = vect_cost_group_size (stmt_info);
ebfd146a
IR
1066 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1067 }
0d0293ac 1068 /* Not a grouped access. */
ebfd146a
IR
1069 else
1070 {
1071 group_size = 1;
1072 first_dr = dr;
1073 }
1074
272c6793 1075 /* We assume that the cost of a single load-lanes instruction is
0d0293ac 1076 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
272c6793
RS
1077 access is instead being provided by a load-and-permute operation,
1078 include the cost of the permutes. */
7b5fc413 1079 if (!load_lanes_p && group_size > 1
f2e2a985 1080 && !STMT_VINFO_STRIDED_P (stmt_info))
ebfd146a 1081 {
2c23db6d
ES
1082 /* Uses an even and odd extract operations or shuffle operations
1083 for each needed permute. */
1084 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1085 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1086 stmt_info, 0, vect_body);
ebfd146a 1087
73fbfcad 1088 if (dump_enabled_p ())
e645e942
TJ
1089 dump_printf_loc (MSG_NOTE, vect_location,
1090 "vect_model_load_cost: strided group_size = %d .\n",
78c60e3d 1091 group_size);
ebfd146a
IR
1092 }
1093
1094 /* The loads themselves. */
f2e2a985 1095 if (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413 1096 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
a82960aa 1097 {
a21892ad
BS
1098 /* N scalar loads plus gathering them into a vector. */
1099 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
92345349 1100 inside_cost += record_stmt_cost (body_cost_vec,
c3e7ee41 1101 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
92345349 1102 scalar_load, stmt_info, 0, vect_body);
a82960aa
RG
1103 }
1104 else
1105 vect_get_load_cost (first_dr, ncopies,
1106 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1107 || group_size > 1 || slp_node),
92345349
BS
1108 &inside_cost, &prologue_cost,
1109 prologue_cost_vec, body_cost_vec, true);
f2e2a985 1110 if (STMT_VINFO_STRIDED_P (stmt_info))
7b5fc413
RB
1111 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1112 stmt_info, 0, vect_body);
720f5239 1113
73fbfcad 1114 if (dump_enabled_p ())
78c60e3d
SS
1115 dump_printf_loc (MSG_NOTE, vect_location,
1116 "vect_model_load_cost: inside_cost = %d, "
e645e942 1117 "prologue_cost = %d .\n", inside_cost, prologue_cost);
720f5239
IR
1118}
1119
1120
1121/* Calculate cost of DR's memory access. */
1122void
1123vect_get_load_cost (struct data_reference *dr, int ncopies,
c3e7ee41 1124 bool add_realign_cost, unsigned int *inside_cost,
92345349
BS
1125 unsigned int *prologue_cost,
1126 stmt_vector_for_cost *prologue_cost_vec,
1127 stmt_vector_for_cost *body_cost_vec,
1128 bool record_prologue_costs)
720f5239
IR
1129{
1130 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
355fe088 1131 gimple *stmt = DR_STMT (dr);
c3e7ee41 1132 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
720f5239
IR
1133
1134 switch (alignment_support_scheme)
ebfd146a
IR
1135 {
1136 case dr_aligned:
1137 {
92345349
BS
1138 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1139 stmt_info, 0, vect_body);
ebfd146a 1140
73fbfcad 1141 if (dump_enabled_p ())
78c60e3d 1142 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1143 "vect_model_load_cost: aligned.\n");
ebfd146a
IR
1144
1145 break;
1146 }
1147 case dr_unaligned_supported:
1148 {
720f5239 1149 /* Here, we assign an additional cost for the unaligned load. */
92345349 1150 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
c3e7ee41 1151 unaligned_load, stmt_info,
92345349 1152 DR_MISALIGNMENT (dr), vect_body);
c3e7ee41 1153
73fbfcad 1154 if (dump_enabled_p ())
78c60e3d
SS
1155 dump_printf_loc (MSG_NOTE, vect_location,
1156 "vect_model_load_cost: unaligned supported by "
e645e942 1157 "hardware.\n");
ebfd146a
IR
1158
1159 break;
1160 }
1161 case dr_explicit_realign:
1162 {
92345349
BS
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1164 vector_load, stmt_info, 0, vect_body);
1165 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1166 vec_perm, stmt_info, 0, vect_body);
ebfd146a
IR
1167
1168 /* FIXME: If the misalignment remains fixed across the iterations of
1169 the containing loop, the following cost should be added to the
92345349 1170 prologue costs. */
ebfd146a 1171 if (targetm.vectorize.builtin_mask_for_load)
92345349
BS
1172 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1173 stmt_info, 0, vect_body);
ebfd146a 1174
73fbfcad 1175 if (dump_enabled_p ())
e645e942
TJ
1176 dump_printf_loc (MSG_NOTE, vect_location,
1177 "vect_model_load_cost: explicit realign\n");
8bd37302 1178
ebfd146a
IR
1179 break;
1180 }
1181 case dr_explicit_realign_optimized:
1182 {
73fbfcad 1183 if (dump_enabled_p ())
e645e942 1184 dump_printf_loc (MSG_NOTE, vect_location,
78c60e3d 1185 "vect_model_load_cost: unaligned software "
e645e942 1186 "pipelined.\n");
ebfd146a
IR
1187
1188 /* Unaligned software pipeline has a load of an address, an initial
ff802fa1 1189 load, and possibly a mask operation to "prime" the loop. However,
0d0293ac 1190 if this is an access in a group of loads, which provide grouped
ebfd146a 1191 access, then the above cost should only be considered for one
ff802fa1 1192 access in the group. Inside the loop, there is a load op
ebfd146a
IR
1193 and a realignment op. */
1194
92345349 1195 if (add_realign_cost && record_prologue_costs)
ebfd146a 1196 {
92345349
BS
1197 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1198 vector_stmt, stmt_info,
1199 0, vect_prologue);
ebfd146a 1200 if (targetm.vectorize.builtin_mask_for_load)
92345349
BS
1201 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1202 vector_stmt, stmt_info,
1203 0, vect_prologue);
ebfd146a
IR
1204 }
1205
92345349
BS
1206 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1207 stmt_info, 0, vect_body);
1208 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1209 stmt_info, 0, vect_body);
8bd37302 1210
73fbfcad 1211 if (dump_enabled_p ())
78c60e3d 1212 dump_printf_loc (MSG_NOTE, vect_location,
e645e942
TJ
1213 "vect_model_load_cost: explicit realign optimized"
1214 "\n");
8bd37302 1215
ebfd146a
IR
1216 break;
1217 }
1218
38eec4c6
UW
1219 case dr_unaligned_unsupported:
1220 {
1221 *inside_cost = VECT_MAX_COST;
1222
73fbfcad 1223 if (dump_enabled_p ())
78c60e3d 1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 1225 "vect_model_load_cost: unsupported access.\n");
38eec4c6
UW
1226 break;
1227 }
1228
ebfd146a
IR
1229 default:
1230 gcc_unreachable ();
1231 }
ebfd146a
IR
1232}
1233
418b7df3
RG
1234/* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1235 the loop preheader for the vectorized stmt STMT. */
ebfd146a 1236
418b7df3 1237static void
355fe088 1238vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
ebfd146a 1239{
ebfd146a 1240 if (gsi)
418b7df3 1241 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1242 else
1243 {
418b7df3 1244 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
ebfd146a 1245 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 1246
a70d6342
IR
1247 if (loop_vinfo)
1248 {
1249 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
418b7df3
RG
1250 basic_block new_bb;
1251 edge pe;
a70d6342
IR
1252
1253 if (nested_in_vect_loop_p (loop, stmt))
1254 loop = loop->inner;
b8698a0f 1255
a70d6342 1256 pe = loop_preheader_edge (loop);
418b7df3 1257 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
a70d6342
IR
1258 gcc_assert (!new_bb);
1259 }
1260 else
1261 {
1262 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1263 basic_block bb;
1264 gimple_stmt_iterator gsi_bb_start;
1265
1266 gcc_assert (bb_vinfo);
1267 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 1268 gsi_bb_start = gsi_after_labels (bb);
418b7df3 1269 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
a70d6342 1270 }
ebfd146a
IR
1271 }
1272
73fbfcad 1273 if (dump_enabled_p ())
ebfd146a 1274 {
78c60e3d
SS
1275 dump_printf_loc (MSG_NOTE, vect_location,
1276 "created new init_stmt: ");
1277 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
ebfd146a 1278 }
418b7df3
RG
1279}
1280
1281/* Function vect_init_vector.
ebfd146a 1282
5467ee52
RG
1283 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1284 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1285 vector type a vector with all elements equal to VAL is created first.
1286 Place the initialization at BSI if it is not NULL. Otherwise, place the
1287 initialization at the loop preheader.
418b7df3
RG
1288 Return the DEF of INIT_STMT.
1289 It will be used in the vectorization of STMT. */
1290
1291tree
355fe088 1292vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
418b7df3 1293{
355fe088 1294 gimple *init_stmt;
418b7df3
RG
1295 tree new_temp;
1296
5467ee52
RG
1297 if (TREE_CODE (type) == VECTOR_TYPE
1298 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
418b7df3 1299 {
5467ee52 1300 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
418b7df3 1301 {
5467ee52 1302 if (CONSTANT_CLASS_P (val))
42fd8198 1303 val = fold_convert (TREE_TYPE (type), val);
418b7df3
RG
1304 else
1305 {
b731b390 1306 new_temp = make_ssa_name (TREE_TYPE (type));
0d0e4a03 1307 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
418b7df3 1308 vect_init_vector_1 (stmt, init_stmt, gsi);
5467ee52 1309 val = new_temp;
418b7df3
RG
1310 }
1311 }
5467ee52 1312 val = build_vector_from_val (type, val);
418b7df3
RG
1313 }
1314
0e22bb5a
RB
1315 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1316 init_stmt = gimple_build_assign (new_temp, val);
418b7df3 1317 vect_init_vector_1 (stmt, init_stmt, gsi);
0e22bb5a 1318 return new_temp;
ebfd146a
IR
1319}
1320
a70d6342 1321
ebfd146a
IR
1322/* Function vect_get_vec_def_for_operand.
1323
ff802fa1 1324 OP is an operand in STMT. This function returns a (vector) def that will be
ebfd146a
IR
1325 used in the vectorized stmt for STMT.
1326
1327 In the case that OP is an SSA_NAME which is defined in the loop, then
1328 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1329
1330 In case OP is an invariant or constant, a new stmt that creates a vector def
42fd8198
IE
1331 needs to be introduced. VECTYPE may be used to specify a required type for
1332 vector invariant. */
ebfd146a
IR
1333
1334tree
42fd8198 1335vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
ebfd146a
IR
1336{
1337 tree vec_oprnd;
355fe088
TS
1338 gimple *vec_stmt;
1339 gimple *def_stmt;
ebfd146a
IR
1340 stmt_vec_info def_stmt_info = NULL;
1341 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
42fd8198 1342 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
ebfd146a 1343 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
ebfd146a
IR
1344 enum vect_def_type dt;
1345 bool is_simple_use;
1346 tree vector_type;
1347
73fbfcad 1348 if (dump_enabled_p ())
ebfd146a 1349 {
78c60e3d
SS
1350 dump_printf_loc (MSG_NOTE, vect_location,
1351 "vect_get_vec_def_for_operand: ");
1352 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
e645e942 1353 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
1354 }
1355
81c40241 1356 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
ebfd146a 1357 gcc_assert (is_simple_use);
73fbfcad 1358 if (dump_enabled_p ())
ebfd146a 1359 {
78c60e3d 1360 int loc_printed = 0;
ebfd146a
IR
1361 if (def_stmt)
1362 {
78c60e3d
SS
1363 if (loc_printed)
1364 dump_printf (MSG_NOTE, " def_stmt = ");
1365 else
1366 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1367 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
ebfd146a
IR
1368 }
1369 }
1370
1371 switch (dt)
1372 {
81c40241 1373 /* operand is a constant or a loop invariant. */
ebfd146a 1374 case vect_constant_def:
81c40241 1375 case vect_external_def:
ebfd146a 1376 {
42fd8198
IE
1377 if (vectype)
1378 vector_type = vectype;
1379 else if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
1380 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1381 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1382 else
1383 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1384
7569a6cc 1385 gcc_assert (vector_type);
418b7df3 1386 return vect_init_vector (stmt, op, vector_type, NULL);
ebfd146a
IR
1387 }
1388
81c40241 1389 /* operand is defined inside the loop. */
8644a673 1390 case vect_internal_def:
ebfd146a 1391 {
ebfd146a
IR
1392 /* Get the def from the vectorized stmt. */
1393 def_stmt_info = vinfo_for_stmt (def_stmt);
83197f37 1394
ebfd146a 1395 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
83197f37
IR
1396 /* Get vectorized pattern statement. */
1397 if (!vec_stmt
1398 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1399 && !STMT_VINFO_RELEVANT (def_stmt_info))
1400 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1401 STMT_VINFO_RELATED_STMT (def_stmt_info)));
ebfd146a
IR
1402 gcc_assert (vec_stmt);
1403 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1404 vec_oprnd = PHI_RESULT (vec_stmt);
1405 else if (is_gimple_call (vec_stmt))
1406 vec_oprnd = gimple_call_lhs (vec_stmt);
1407 else
1408 vec_oprnd = gimple_assign_lhs (vec_stmt);
1409 return vec_oprnd;
1410 }
1411
81c40241 1412 /* operand is defined by a loop header phi - reduction */
ebfd146a 1413 case vect_reduction_def:
06066f92 1414 case vect_double_reduction_def:
7c5222ff 1415 case vect_nested_cycle:
81c40241
RB
1416 /* Code should use get_initial_def_for_reduction. */
1417 gcc_unreachable ();
ebfd146a 1418
81c40241 1419 /* operand is defined by loop-header phi - induction. */
ebfd146a
IR
1420 case vect_induction_def:
1421 {
1422 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1423
1424 /* Get the def from the vectorized stmt. */
1425 def_stmt_info = vinfo_for_stmt (def_stmt);
1426 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
6dbbece6
RG
1427 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1428 vec_oprnd = PHI_RESULT (vec_stmt);
1429 else
1430 vec_oprnd = gimple_get_lhs (vec_stmt);
ebfd146a
IR
1431 return vec_oprnd;
1432 }
1433
1434 default:
1435 gcc_unreachable ();
1436 }
1437}
1438
1439
1440/* Function vect_get_vec_def_for_stmt_copy
1441
ff802fa1 1442 Return a vector-def for an operand. This function is used when the
b8698a0f
L
1443 vectorized stmt to be created (by the caller to this function) is a "copy"
1444 created in case the vectorized result cannot fit in one vector, and several
ff802fa1 1445 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1446 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1447 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1448 DT is the type of the vector def VEC_OPRND.
1449
1450 Context:
1451 In case the vectorization factor (VF) is bigger than the number
1452 of elements that can fit in a vectype (nunits), we have to generate
ff802fa1 1453 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1454 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1455 smallest data-type determines the VF, and as a result, when vectorizing
1456 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1457 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1458 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1459 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1460 which VF=16 and nunits=4, so the number of copies required is 4):
1461
1462 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1463
ebfd146a
IR
1464 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1465 VS1.1: vx.1 = memref1 VS1.2
1466 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1467 VS1.3: vx.3 = memref3
ebfd146a
IR
1468
1469 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1470 VSnew.1: vz1 = vx.1 + ... VSnew.2
1471 VSnew.2: vz2 = vx.2 + ... VSnew.3
1472 VSnew.3: vz3 = vx.3 + ...
1473
1474 The vectorization of S1 is explained in vectorizable_load.
1475 The vectorization of S2:
b8698a0f
L
1476 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1477 the function 'vect_get_vec_def_for_operand' is called to
ff802fa1 1478 get the relevant vector-def for each operand of S2. For operand x it
ebfd146a
IR
1479 returns the vector-def 'vx.0'.
1480
b8698a0f
L
1481 To create the remaining copies of the vector-stmt (VSnew.j), this
1482 function is called to get the relevant vector-def for each operand. It is
1483 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1484 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1485
b8698a0f
L
1486 For example, to obtain the vector-def 'vx.1' in order to create the
1487 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1488 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1489 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1490 and return its def ('vx.1').
1491 Overall, to create the above sequence this function will be called 3 times:
1492 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1493 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1494 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1495
1496tree
1497vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1498{
355fe088 1499 gimple *vec_stmt_for_operand;
ebfd146a
IR
1500 stmt_vec_info def_stmt_info;
1501
1502 /* Do nothing; can reuse same def. */
8644a673 1503 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1504 return vec_oprnd;
1505
1506 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1507 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1508 gcc_assert (def_stmt_info);
1509 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1510 gcc_assert (vec_stmt_for_operand);
ebfd146a
IR
1511 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1512 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1513 else
1514 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1515 return vec_oprnd;
1516}
1517
1518
1519/* Get vectorized definitions for the operands to create a copy of an original
ff802fa1 1520 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
ebfd146a
IR
1521
1522static void
b8698a0f 1523vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
9771b263
DN
1524 vec<tree> *vec_oprnds0,
1525 vec<tree> *vec_oprnds1)
ebfd146a 1526{
9771b263 1527 tree vec_oprnd = vec_oprnds0->pop ();
ebfd146a
IR
1528
1529 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
9771b263 1530 vec_oprnds0->quick_push (vec_oprnd);
ebfd146a 1531
9771b263 1532 if (vec_oprnds1 && vec_oprnds1->length ())
ebfd146a 1533 {
9771b263 1534 vec_oprnd = vec_oprnds1->pop ();
ebfd146a 1535 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
9771b263 1536 vec_oprnds1->quick_push (vec_oprnd);
ebfd146a
IR
1537 }
1538}
1539
1540
d092494c
IR
1541/* Get vectorized definitions for OP0 and OP1.
1542 REDUC_INDEX is the index of reduction operand in case of reduction,
1543 and -1 otherwise. */
ebfd146a 1544
d092494c 1545void
355fe088 1546vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
9771b263
DN
1547 vec<tree> *vec_oprnds0,
1548 vec<tree> *vec_oprnds1,
d092494c 1549 slp_tree slp_node, int reduc_index)
ebfd146a
IR
1550{
1551 if (slp_node)
d092494c
IR
1552 {
1553 int nops = (op1 == NULL_TREE) ? 1 : 2;
ef062b13
TS
1554 auto_vec<tree> ops (nops);
1555 auto_vec<vec<tree> > vec_defs (nops);
d092494c 1556
9771b263 1557 ops.quick_push (op0);
d092494c 1558 if (op1)
9771b263 1559 ops.quick_push (op1);
d092494c
IR
1560
1561 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1562
37b5ec8f 1563 *vec_oprnds0 = vec_defs[0];
d092494c 1564 if (op1)
37b5ec8f 1565 *vec_oprnds1 = vec_defs[1];
d092494c 1566 }
ebfd146a
IR
1567 else
1568 {
1569 tree vec_oprnd;
1570
9771b263 1571 vec_oprnds0->create (1);
81c40241 1572 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
9771b263 1573 vec_oprnds0->quick_push (vec_oprnd);
ebfd146a
IR
1574
1575 if (op1)
1576 {
9771b263 1577 vec_oprnds1->create (1);
81c40241 1578 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
9771b263 1579 vec_oprnds1->quick_push (vec_oprnd);
ebfd146a
IR
1580 }
1581 }
1582}
1583
1584
1585/* Function vect_finish_stmt_generation.
1586
1587 Insert a new stmt. */
1588
1589void
355fe088 1590vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
ebfd146a
IR
1591 gimple_stmt_iterator *gsi)
1592{
1593 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
310213d4 1594 vec_info *vinfo = stmt_info->vinfo;
ebfd146a
IR
1595
1596 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1597
54e8e2c3
RG
1598 if (!gsi_end_p (*gsi)
1599 && gimple_has_mem_ops (vec_stmt))
1600 {
355fe088 1601 gimple *at_stmt = gsi_stmt (*gsi);
54e8e2c3
RG
1602 tree vuse = gimple_vuse (at_stmt);
1603 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1604 {
1605 tree vdef = gimple_vdef (at_stmt);
1606 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1607 /* If we have an SSA vuse and insert a store, update virtual
1608 SSA form to avoid triggering the renamer. Do so only
1609 if we can easily see all uses - which is what almost always
1610 happens with the way vectorized stmts are inserted. */
1611 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1612 && ((is_gimple_assign (vec_stmt)
1613 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1614 || (is_gimple_call (vec_stmt)
1615 && !(gimple_call_flags (vec_stmt)
1616 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1617 {
1618 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1619 gimple_set_vdef (vec_stmt, new_vdef);
1620 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1621 }
1622 }
1623 }
ebfd146a
IR
1624 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1625
310213d4 1626 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
ebfd146a 1627
73fbfcad 1628 if (dump_enabled_p ())
ebfd146a 1629 {
78c60e3d
SS
1630 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1631 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
ebfd146a
IR
1632 }
1633
ad885386 1634 gimple_set_location (vec_stmt, gimple_location (stmt));
8e91d222
JJ
1635
1636 /* While EH edges will generally prevent vectorization, stmt might
1637 e.g. be in a must-not-throw region. Ensure newly created stmts
1638 that could throw are part of the same region. */
1639 int lp_nr = lookup_stmt_eh_lp (stmt);
1640 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1641 add_stmt_to_eh_lp (vec_stmt, lp_nr);
ebfd146a
IR
1642}
1643
1644/* Checks if CALL can be vectorized in type VECTYPE. Returns
1645 a function declaration if the target has a vectorized version
1646 of the function, or NULL_TREE if the function cannot be vectorized. */
1647
1648tree
538dd0b7 1649vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
ebfd146a 1650{
10766209
RS
1651 /* We only handle functions that do not read or clobber memory. */
1652 if (gimple_vuse (call))
ebfd146a
IR
1653 return NULL_TREE;
1654
10766209
RS
1655 combined_fn fn = gimple_call_combined_fn (call);
1656 if (fn != CFN_LAST)
1657 return targetm.vectorize.builtin_vectorized_function
1658 (fn, vectype_out, vectype_in);
1659
1660 if (gimple_call_builtin_p (call, BUILT_IN_MD))
1661 return targetm.vectorize.builtin_md_vectorized_function
1662 (gimple_call_fndecl (call), vectype_out, vectype_in);
ebfd146a 1663
10766209 1664 return NULL_TREE;
ebfd146a
IR
1665}
1666
5ce9450f 1667
355fe088 1668static tree permute_vec_elements (tree, tree, tree, gimple *,
5ce9450f
JJ
1669 gimple_stmt_iterator *);
1670
1671
1672/* Function vectorizable_mask_load_store.
1673
1674 Check if STMT performs a conditional load or store that can be vectorized.
1675 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1676 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1677 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1678
1679static bool
355fe088
TS
1680vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1681 gimple **vec_stmt, slp_tree slp_node)
5ce9450f
JJ
1682{
1683 tree vec_dest = NULL;
1684 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1685 stmt_vec_info prev_stmt_info;
1686 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1687 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1688 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1689 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1690 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
57e2f6ad 1691 tree rhs_vectype = NULL_TREE;
045c1278 1692 tree mask_vectype;
5ce9450f 1693 tree elem_type;
355fe088 1694 gimple *new_stmt;
5ce9450f
JJ
1695 tree dummy;
1696 tree dataref_ptr = NULL_TREE;
355fe088 1697 gimple *ptr_incr;
5ce9450f
JJ
1698 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1699 int ncopies;
1700 int i, j;
1701 bool inv_p;
1702 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1703 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1704 int gather_scale = 1;
1705 enum vect_def_type gather_dt = vect_unknown_def_type;
1706 bool is_store;
1707 tree mask;
355fe088 1708 gimple *def_stmt;
5ce9450f
JJ
1709 enum vect_def_type dt;
1710
1711 if (slp_node != NULL)
1712 return false;
1713
1714 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1715 gcc_assert (ncopies >= 1);
1716
1717 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1718 mask = gimple_call_arg (stmt, 2);
045c1278
IE
1719
1720 if (TREE_CODE (TREE_TYPE (mask)) != BOOLEAN_TYPE)
5ce9450f
JJ
1721 return false;
1722
1723 /* FORNOW. This restriction should be relaxed. */
1724 if (nested_in_vect_loop && ncopies > 1)
1725 {
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1728 "multiple types in nested loop.");
1729 return false;
1730 }
1731
1732 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1733 return false;
1734
1735 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1736 return false;
1737
1738 if (!STMT_VINFO_DATA_REF (stmt_info))
1739 return false;
1740
1741 elem_type = TREE_TYPE (vectype);
1742
1743 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1744 return false;
1745
f2e2a985 1746 if (STMT_VINFO_STRIDED_P (stmt_info))
5ce9450f
JJ
1747 return false;
1748
045c1278
IE
1749 if (TREE_CODE (mask) != SSA_NAME)
1750 return false;
1751
1752 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
1753 return false;
1754
1755 if (!mask_vectype)
1756 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
1757
1758 if (!mask_vectype)
1759 return false;
1760
57e2f6ad
IE
1761 if (is_store)
1762 {
1763 tree rhs = gimple_call_arg (stmt, 3);
1764 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
1765 return false;
1766 }
1767
3bab6342 1768 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5ce9450f 1769 {
355fe088 1770 gimple *def_stmt;
3bab6342 1771 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
5ce9450f
JJ
1772 &gather_off, &gather_scale);
1773 gcc_assert (gather_decl);
81c40241
RB
1774 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1775 &gather_off_vectype))
5ce9450f
JJ
1776 {
1777 if (dump_enabled_p ())
1778 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1779 "gather index use not simple.");
1780 return false;
1781 }
03b9e8e4
JJ
1782
1783 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1784 tree masktype
1785 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1786 if (TREE_CODE (masktype) == INTEGER_TYPE)
1787 {
1788 if (dump_enabled_p ())
1789 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1790 "masked gather with integer mask not supported.");
1791 return false;
1792 }
5ce9450f
JJ
1793 }
1794 else if (tree_int_cst_compare (nested_in_vect_loop
1795 ? STMT_VINFO_DR_STEP (stmt_info)
1796 : DR_STEP (dr), size_zero_node) <= 0)
1797 return false;
1798 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
045c1278
IE
1799 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
1800 TYPE_MODE (mask_vectype),
57e2f6ad
IE
1801 !is_store)
1802 || (rhs_vectype
1803 && !useless_type_conversion_p (vectype, rhs_vectype)))
5ce9450f
JJ
1804 return false;
1805
5ce9450f
JJ
1806 if (!vec_stmt) /* transformation not required. */
1807 {
1808 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1809 if (is_store)
1810 vect_model_store_cost (stmt_info, ncopies, false, dt,
1811 NULL, NULL, NULL);
1812 else
1813 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1814 return true;
1815 }
1816
1817 /** Transform. **/
1818
3bab6342 1819 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5ce9450f
JJ
1820 {
1821 tree vec_oprnd0 = NULL_TREE, op;
1822 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1823 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
acdcd61b 1824 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
5ce9450f 1825 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
acdcd61b 1826 tree mask_perm_mask = NULL_TREE;
5ce9450f
JJ
1827 edge pe = loop_preheader_edge (loop);
1828 gimple_seq seq;
1829 basic_block new_bb;
1830 enum { NARROW, NONE, WIDEN } modifier;
1831 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1832
acdcd61b
JJ
1833 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1834 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1835 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1836 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1837 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1838 scaletype = TREE_VALUE (arglist);
1839 gcc_checking_assert (types_compatible_p (srctype, rettype)
1840 && types_compatible_p (srctype, masktype));
1841
5ce9450f
JJ
1842 if (nunits == gather_off_nunits)
1843 modifier = NONE;
1844 else if (nunits == gather_off_nunits / 2)
1845 {
1846 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1847 modifier = WIDEN;
1848
1849 for (i = 0; i < gather_off_nunits; ++i)
1850 sel[i] = i | nunits;
1851
557be5a8 1852 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
5ce9450f
JJ
1853 }
1854 else if (nunits == gather_off_nunits * 2)
1855 {
1856 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1857 modifier = NARROW;
1858
1859 for (i = 0; i < nunits; ++i)
1860 sel[i] = i < gather_off_nunits
1861 ? i : i + nunits - gather_off_nunits;
1862
557be5a8 1863 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5ce9450f 1864 ncopies *= 2;
acdcd61b
JJ
1865 for (i = 0; i < nunits; ++i)
1866 sel[i] = i | gather_off_nunits;
557be5a8 1867 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
5ce9450f
JJ
1868 }
1869 else
1870 gcc_unreachable ();
1871
5ce9450f
JJ
1872 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1873
1874 ptr = fold_convert (ptrtype, gather_base);
1875 if (!is_gimple_min_invariant (ptr))
1876 {
1877 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1878 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1879 gcc_assert (!new_bb);
1880 }
1881
1882 scale = build_int_cst (scaletype, gather_scale);
1883
1884 prev_stmt_info = NULL;
1885 for (j = 0; j < ncopies; ++j)
1886 {
1887 if (modifier == WIDEN && (j & 1))
1888 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1889 perm_mask, stmt, gsi);
1890 else if (j == 0)
1891 op = vec_oprnd0
81c40241 1892 = vect_get_vec_def_for_operand (gather_off, stmt);
5ce9450f
JJ
1893 else
1894 op = vec_oprnd0
1895 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1896
1897 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1898 {
1899 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1900 == TYPE_VECTOR_SUBPARTS (idxtype));
0e22bb5a 1901 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5ce9450f
JJ
1902 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1903 new_stmt
0d0e4a03 1904 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5ce9450f
JJ
1905 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1906 op = var;
1907 }
1908
acdcd61b
JJ
1909 if (mask_perm_mask && (j & 1))
1910 mask_op = permute_vec_elements (mask_op, mask_op,
1911 mask_perm_mask, stmt, gsi);
5ce9450f
JJ
1912 else
1913 {
acdcd61b 1914 if (j == 0)
81c40241 1915 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
acdcd61b
JJ
1916 else
1917 {
81c40241 1918 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
acdcd61b
JJ
1919 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1920 }
5ce9450f 1921
acdcd61b
JJ
1922 mask_op = vec_mask;
1923 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1924 {
1925 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1926 == TYPE_VECTOR_SUBPARTS (masktype));
0e22bb5a 1927 var = vect_get_new_ssa_name (masktype, vect_simple_var);
acdcd61b
JJ
1928 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1929 new_stmt
0d0e4a03 1930 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
acdcd61b
JJ
1931 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1932 mask_op = var;
1933 }
5ce9450f
JJ
1934 }
1935
1936 new_stmt
1937 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1938 scale);
1939
1940 if (!useless_type_conversion_p (vectype, rettype))
1941 {
1942 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1943 == TYPE_VECTOR_SUBPARTS (rettype));
0e22bb5a 1944 op = vect_get_new_ssa_name (rettype, vect_simple_var);
5ce9450f
JJ
1945 gimple_call_set_lhs (new_stmt, op);
1946 vect_finish_stmt_generation (stmt, new_stmt, gsi);
b731b390 1947 var = make_ssa_name (vec_dest);
5ce9450f 1948 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
0d0e4a03 1949 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5ce9450f
JJ
1950 }
1951 else
1952 {
1953 var = make_ssa_name (vec_dest, new_stmt);
1954 gimple_call_set_lhs (new_stmt, var);
1955 }
1956
1957 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1958
1959 if (modifier == NARROW)
1960 {
1961 if ((j & 1) == 0)
1962 {
1963 prev_res = var;
1964 continue;
1965 }
1966 var = permute_vec_elements (prev_res, var,
1967 perm_mask, stmt, gsi);
1968 new_stmt = SSA_NAME_DEF_STMT (var);
1969 }
1970
1971 if (prev_stmt_info == NULL)
1972 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1973 else
1974 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1975 prev_stmt_info = vinfo_for_stmt (new_stmt);
1976 }
3efe2e2c
JJ
1977
1978 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1979 from the IL. */
e6f5c25d
IE
1980 if (STMT_VINFO_RELATED_STMT (stmt_info))
1981 {
1982 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1983 stmt_info = vinfo_for_stmt (stmt);
1984 }
3efe2e2c
JJ
1985 tree lhs = gimple_call_lhs (stmt);
1986 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
1987 set_vinfo_for_stmt (new_stmt, stmt_info);
1988 set_vinfo_for_stmt (stmt, NULL);
1989 STMT_VINFO_STMT (stmt_info) = new_stmt;
1990 gsi_replace (gsi, new_stmt, true);
5ce9450f
JJ
1991 return true;
1992 }
1993 else if (is_store)
1994 {
1995 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
1996 prev_stmt_info = NULL;
1997 for (i = 0; i < ncopies; i++)
1998 {
1999 unsigned align, misalign;
2000
2001 if (i == 0)
2002 {
2003 tree rhs = gimple_call_arg (stmt, 3);
81c40241
RB
2004 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2005 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
5ce9450f
JJ
2006 /* We should have catched mismatched types earlier. */
2007 gcc_assert (useless_type_conversion_p (vectype,
2008 TREE_TYPE (vec_rhs)));
2009 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2010 NULL_TREE, &dummy, gsi,
2011 &ptr_incr, false, &inv_p);
2012 gcc_assert (!inv_p);
2013 }
2014 else
2015 {
81c40241 2016 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
5ce9450f 2017 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
81c40241 2018 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
5ce9450f
JJ
2019 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2020 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2021 TYPE_SIZE_UNIT (vectype));
2022 }
2023
2024 align = TYPE_ALIGN_UNIT (vectype);
2025 if (aligned_access_p (dr))
2026 misalign = 0;
2027 else if (DR_MISALIGNMENT (dr) == -1)
2028 {
2029 align = TYPE_ALIGN_UNIT (elem_type);
2030 misalign = 0;
2031 }
2032 else
2033 misalign = DR_MISALIGNMENT (dr);
2034 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2035 misalign);
2036 new_stmt
2037 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2038 gimple_call_arg (stmt, 1),
2039 vec_mask, vec_rhs);
2040 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2041 if (i == 0)
2042 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2043 else
2044 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2045 prev_stmt_info = vinfo_for_stmt (new_stmt);
2046 }
2047 }
2048 else
2049 {
2050 tree vec_mask = NULL_TREE;
2051 prev_stmt_info = NULL;
2052 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2053 for (i = 0; i < ncopies; i++)
2054 {
2055 unsigned align, misalign;
2056
2057 if (i == 0)
2058 {
81c40241 2059 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
5ce9450f
JJ
2060 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2061 NULL_TREE, &dummy, gsi,
2062 &ptr_incr, false, &inv_p);
2063 gcc_assert (!inv_p);
2064 }
2065 else
2066 {
81c40241 2067 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
5ce9450f
JJ
2068 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2069 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2070 TYPE_SIZE_UNIT (vectype));
2071 }
2072
2073 align = TYPE_ALIGN_UNIT (vectype);
2074 if (aligned_access_p (dr))
2075 misalign = 0;
2076 else if (DR_MISALIGNMENT (dr) == -1)
2077 {
2078 align = TYPE_ALIGN_UNIT (elem_type);
2079 misalign = 0;
2080 }
2081 else
2082 misalign = DR_MISALIGNMENT (dr);
2083 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2084 misalign);
2085 new_stmt
2086 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2087 gimple_call_arg (stmt, 1),
2088 vec_mask);
b731b390 2089 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
5ce9450f
JJ
2090 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2091 if (i == 0)
2092 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2093 else
2094 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2095 prev_stmt_info = vinfo_for_stmt (new_stmt);
2096 }
2097 }
2098
3efe2e2c
JJ
2099 if (!is_store)
2100 {
2101 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2102 from the IL. */
e6f5c25d
IE
2103 if (STMT_VINFO_RELATED_STMT (stmt_info))
2104 {
2105 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2106 stmt_info = vinfo_for_stmt (stmt);
2107 }
3efe2e2c
JJ
2108 tree lhs = gimple_call_lhs (stmt);
2109 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2110 set_vinfo_for_stmt (new_stmt, stmt_info);
2111 set_vinfo_for_stmt (stmt, NULL);
2112 STMT_VINFO_STMT (stmt_info) = new_stmt;
2113 gsi_replace (gsi, new_stmt, true);
2114 }
2115
5ce9450f
JJ
2116 return true;
2117}
2118
2119
ebfd146a
IR
2120/* Function vectorizable_call.
2121
538dd0b7 2122 Check if GS performs a function call that can be vectorized.
b8698a0f 2123 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2124 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2125 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2126
2127static bool
355fe088 2128vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
190c2236 2129 slp_tree slp_node)
ebfd146a 2130{
538dd0b7 2131 gcall *stmt;
ebfd146a
IR
2132 tree vec_dest;
2133 tree scalar_dest;
2134 tree op, type;
2135 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
538dd0b7 2136 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
ebfd146a
IR
2137 tree vectype_out, vectype_in;
2138 int nunits_in;
2139 int nunits_out;
2140 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
190c2236 2141 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 2142 vec_info *vinfo = stmt_info->vinfo;
81c40241 2143 tree fndecl, new_temp, rhs_type;
355fe088 2144 gimple *def_stmt;
0502fb85
UB
2145 enum vect_def_type dt[3]
2146 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
355fe088 2147 gimple *new_stmt = NULL;
ebfd146a 2148 int ncopies, j;
6e1aa848 2149 vec<tree> vargs = vNULL;
ebfd146a
IR
2150 enum { NARROW, NONE, WIDEN } modifier;
2151 size_t i, nargs;
9d5e7640 2152 tree lhs;
ebfd146a 2153
190c2236 2154 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2155 return false;
2156
8644a673 2157 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2158 return false;
2159
538dd0b7
DM
2160 /* Is GS a vectorizable call? */
2161 stmt = dyn_cast <gcall *> (gs);
2162 if (!stmt)
ebfd146a
IR
2163 return false;
2164
5ce9450f
JJ
2165 if (gimple_call_internal_p (stmt)
2166 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2167 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2168 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2169 slp_node);
2170
0136f8f0
AH
2171 if (gimple_call_lhs (stmt) == NULL_TREE
2172 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
ebfd146a
IR
2173 return false;
2174
0136f8f0 2175 gcc_checking_assert (!stmt_can_throw_internal (stmt));
5a2c1986 2176
b690cc0f
RG
2177 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2178
ebfd146a
IR
2179 /* Process function arguments. */
2180 rhs_type = NULL_TREE;
b690cc0f 2181 vectype_in = NULL_TREE;
ebfd146a
IR
2182 nargs = gimple_call_num_args (stmt);
2183
1b1562a5
MM
2184 /* Bail out if the function has more than three arguments, we do not have
2185 interesting builtin functions to vectorize with more than two arguments
2186 except for fma. No arguments is also not good. */
2187 if (nargs == 0 || nargs > 3)
ebfd146a
IR
2188 return false;
2189
74bf76ed
JJ
2190 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2191 if (gimple_call_internal_p (stmt)
2192 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2193 {
2194 nargs = 0;
2195 rhs_type = unsigned_type_node;
2196 }
2197
ebfd146a
IR
2198 for (i = 0; i < nargs; i++)
2199 {
b690cc0f
RG
2200 tree opvectype;
2201
ebfd146a
IR
2202 op = gimple_call_arg (stmt, i);
2203
2204 /* We can only handle calls with arguments of the same type. */
2205 if (rhs_type
8533c9d8 2206 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a 2207 {
73fbfcad 2208 if (dump_enabled_p ())
78c60e3d 2209 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2210 "argument types differ.\n");
ebfd146a
IR
2211 return false;
2212 }
b690cc0f
RG
2213 if (!rhs_type)
2214 rhs_type = TREE_TYPE (op);
ebfd146a 2215
81c40241 2216 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
ebfd146a 2217 {
73fbfcad 2218 if (dump_enabled_p ())
78c60e3d 2219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2220 "use not simple.\n");
ebfd146a
IR
2221 return false;
2222 }
ebfd146a 2223
b690cc0f
RG
2224 if (!vectype_in)
2225 vectype_in = opvectype;
2226 else if (opvectype
2227 && opvectype != vectype_in)
2228 {
73fbfcad 2229 if (dump_enabled_p ())
78c60e3d 2230 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2231 "argument vector types differ.\n");
b690cc0f
RG
2232 return false;
2233 }
2234 }
2235 /* If all arguments are external or constant defs use a vector type with
2236 the same size as the output vector type. */
ebfd146a 2237 if (!vectype_in)
b690cc0f 2238 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
2239 if (vec_stmt)
2240 gcc_assert (vectype_in);
2241 if (!vectype_in)
2242 {
73fbfcad 2243 if (dump_enabled_p ())
7d8930a0 2244 {
78c60e3d
SS
2245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2246 "no vectype for scalar type ");
2247 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
e645e942 2248 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7d8930a0
IR
2249 }
2250
2251 return false;
2252 }
ebfd146a
IR
2253
2254 /* FORNOW */
b690cc0f
RG
2255 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2256 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
2257 if (nunits_in == nunits_out / 2)
2258 modifier = NARROW;
2259 else if (nunits_out == nunits_in)
2260 modifier = NONE;
2261 else if (nunits_out == nunits_in / 2)
2262 modifier = WIDEN;
2263 else
2264 return false;
2265
2266 /* For now, we only vectorize functions if a target specific builtin
2267 is available. TODO -- in some cases, it might be profitable to
2268 insert the calls for pieces of the vector, in order to be able
2269 to vectorize other operations in the loop. */
2270 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2271 if (fndecl == NULL_TREE)
2272 {
74bf76ed
JJ
2273 if (gimple_call_internal_p (stmt)
2274 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2275 && !slp_node
2276 && loop_vinfo
2277 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2278 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2279 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2280 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2281 {
2282 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2283 { 0, 1, 2, ... vf - 1 } vector. */
2284 gcc_assert (nargs == 0);
2285 }
2286 else
2287 {
2288 if (dump_enabled_p ())
2289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2290 "function is not vectorizable.\n");
74bf76ed
JJ
2291 return false;
2292 }
ebfd146a
IR
2293 }
2294
5006671f 2295 gcc_assert (!gimple_vuse (stmt));
ebfd146a 2296
190c2236
JJ
2297 if (slp_node || PURE_SLP_STMT (stmt_info))
2298 ncopies = 1;
2299 else if (modifier == NARROW)
ebfd146a
IR
2300 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2301 else
2302 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2303
2304 /* Sanity check: make sure that at least one copy of the vectorized stmt
2305 needs to be generated. */
2306 gcc_assert (ncopies >= 1);
2307
2308 if (!vec_stmt) /* transformation not required. */
2309 {
2310 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
73fbfcad 2311 if (dump_enabled_p ())
e645e942
TJ
2312 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2313 "\n");
c3e7ee41 2314 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
ebfd146a
IR
2315 return true;
2316 }
2317
2318 /** Transform. **/
2319
73fbfcad 2320 if (dump_enabled_p ())
e645e942 2321 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
ebfd146a
IR
2322
2323 /* Handle def. */
2324 scalar_dest = gimple_call_lhs (stmt);
2325 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2326
2327 prev_stmt_info = NULL;
2328 switch (modifier)
2329 {
2330 case NONE:
2331 for (j = 0; j < ncopies; ++j)
2332 {
2333 /* Build argument list for the vectorized call. */
2334 if (j == 0)
9771b263 2335 vargs.create (nargs);
ebfd146a 2336 else
9771b263 2337 vargs.truncate (0);
ebfd146a 2338
190c2236
JJ
2339 if (slp_node)
2340 {
ef062b13 2341 auto_vec<vec<tree> > vec_defs (nargs);
9771b263 2342 vec<tree> vec_oprnds0;
190c2236
JJ
2343
2344 for (i = 0; i < nargs; i++)
9771b263 2345 vargs.quick_push (gimple_call_arg (stmt, i));
190c2236 2346 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
37b5ec8f 2347 vec_oprnds0 = vec_defs[0];
190c2236
JJ
2348
2349 /* Arguments are ready. Create the new vector stmt. */
9771b263 2350 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
190c2236
JJ
2351 {
2352 size_t k;
2353 for (k = 0; k < nargs; k++)
2354 {
37b5ec8f 2355 vec<tree> vec_oprndsk = vec_defs[k];
9771b263 2356 vargs[k] = vec_oprndsk[i];
190c2236
JJ
2357 }
2358 new_stmt = gimple_build_call_vec (fndecl, vargs);
2359 new_temp = make_ssa_name (vec_dest, new_stmt);
2360 gimple_call_set_lhs (new_stmt, new_temp);
2361 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9771b263 2362 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
190c2236
JJ
2363 }
2364
2365 for (i = 0; i < nargs; i++)
2366 {
37b5ec8f 2367 vec<tree> vec_oprndsi = vec_defs[i];
9771b263 2368 vec_oprndsi.release ();
190c2236 2369 }
190c2236
JJ
2370 continue;
2371 }
2372
ebfd146a
IR
2373 for (i = 0; i < nargs; i++)
2374 {
2375 op = gimple_call_arg (stmt, i);
2376 if (j == 0)
2377 vec_oprnd0
81c40241 2378 = vect_get_vec_def_for_operand (op, stmt);
ebfd146a 2379 else
63827fb8
IR
2380 {
2381 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2382 vec_oprnd0
2383 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2384 }
ebfd146a 2385
9771b263 2386 vargs.quick_push (vec_oprnd0);
ebfd146a
IR
2387 }
2388
74bf76ed
JJ
2389 if (gimple_call_internal_p (stmt)
2390 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2391 {
2392 tree *v = XALLOCAVEC (tree, nunits_out);
2393 int k;
2394 for (k = 0; k < nunits_out; ++k)
2395 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2396 tree cst = build_vector (vectype_out, v);
2397 tree new_var
0e22bb5a 2398 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
355fe088 2399 gimple *init_stmt = gimple_build_assign (new_var, cst);
74bf76ed 2400 vect_init_vector_1 (stmt, init_stmt, NULL);
b731b390 2401 new_temp = make_ssa_name (vec_dest);
0e22bb5a 2402 new_stmt = gimple_build_assign (new_temp, new_var);
74bf76ed
JJ
2403 }
2404 else
2405 {
2406 new_stmt = gimple_build_call_vec (fndecl, vargs);
2407 new_temp = make_ssa_name (vec_dest, new_stmt);
2408 gimple_call_set_lhs (new_stmt, new_temp);
2409 }
ebfd146a
IR
2410 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2411
2412 if (j == 0)
2413 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2414 else
2415 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2416
2417 prev_stmt_info = vinfo_for_stmt (new_stmt);
2418 }
2419
2420 break;
2421
2422 case NARROW:
2423 for (j = 0; j < ncopies; ++j)
2424 {
2425 /* Build argument list for the vectorized call. */
2426 if (j == 0)
9771b263 2427 vargs.create (nargs * 2);
ebfd146a 2428 else
9771b263 2429 vargs.truncate (0);
ebfd146a 2430
190c2236
JJ
2431 if (slp_node)
2432 {
ef062b13 2433 auto_vec<vec<tree> > vec_defs (nargs);
9771b263 2434 vec<tree> vec_oprnds0;
190c2236
JJ
2435
2436 for (i = 0; i < nargs; i++)
9771b263 2437 vargs.quick_push (gimple_call_arg (stmt, i));
190c2236 2438 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
37b5ec8f 2439 vec_oprnds0 = vec_defs[0];
190c2236
JJ
2440
2441 /* Arguments are ready. Create the new vector stmt. */
9771b263 2442 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
190c2236
JJ
2443 {
2444 size_t k;
9771b263 2445 vargs.truncate (0);
190c2236
JJ
2446 for (k = 0; k < nargs; k++)
2447 {
37b5ec8f 2448 vec<tree> vec_oprndsk = vec_defs[k];
9771b263
DN
2449 vargs.quick_push (vec_oprndsk[i]);
2450 vargs.quick_push (vec_oprndsk[i + 1]);
190c2236
JJ
2451 }
2452 new_stmt = gimple_build_call_vec (fndecl, vargs);
2453 new_temp = make_ssa_name (vec_dest, new_stmt);
2454 gimple_call_set_lhs (new_stmt, new_temp);
2455 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9771b263 2456 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
190c2236
JJ
2457 }
2458
2459 for (i = 0; i < nargs; i++)
2460 {
37b5ec8f 2461 vec<tree> vec_oprndsi = vec_defs[i];
9771b263 2462 vec_oprndsi.release ();
190c2236 2463 }
190c2236
JJ
2464 continue;
2465 }
2466
ebfd146a
IR
2467 for (i = 0; i < nargs; i++)
2468 {
2469 op = gimple_call_arg (stmt, i);
2470 if (j == 0)
2471 {
2472 vec_oprnd0
81c40241 2473 = vect_get_vec_def_for_operand (op, stmt);
ebfd146a 2474 vec_oprnd1
63827fb8 2475 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
2476 }
2477 else
2478 {
336ecb65 2479 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
ebfd146a 2480 vec_oprnd0
63827fb8 2481 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 2482 vec_oprnd1
63827fb8 2483 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
2484 }
2485
9771b263
DN
2486 vargs.quick_push (vec_oprnd0);
2487 vargs.quick_push (vec_oprnd1);
ebfd146a
IR
2488 }
2489
2490 new_stmt = gimple_build_call_vec (fndecl, vargs);
2491 new_temp = make_ssa_name (vec_dest, new_stmt);
2492 gimple_call_set_lhs (new_stmt, new_temp);
ebfd146a
IR
2493 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2494
2495 if (j == 0)
2496 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2497 else
2498 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2499
2500 prev_stmt_info = vinfo_for_stmt (new_stmt);
2501 }
2502
2503 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2504
2505 break;
2506
2507 case WIDEN:
2508 /* No current target implements this case. */
2509 return false;
2510 }
2511
9771b263 2512 vargs.release ();
ebfd146a 2513
ebfd146a
IR
2514 /* The call in STMT might prevent it from being removed in dce.
2515 We however cannot remove it here, due to the way the ssa name
2516 it defines is mapped to the new definition. So just replace
2517 rhs of the statement with something harmless. */
2518
dd34c087
JJ
2519 if (slp_node)
2520 return true;
2521
ebfd146a 2522 type = TREE_TYPE (scalar_dest);
9d5e7640
IR
2523 if (is_pattern_stmt_p (stmt_info))
2524 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2525 else
2526 lhs = gimple_call_lhs (stmt);
3cc2fa2a
JJ
2527
2528 if (gimple_call_internal_p (stmt)
2529 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2530 {
2531 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2532 with vf - 1 rather than 0, that is the last iteration of the
2533 vectorized loop. */
2534 imm_use_iterator iter;
2535 use_operand_p use_p;
355fe088 2536 gimple *use_stmt;
3cc2fa2a
JJ
2537 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2538 {
2539 basic_block use_bb = gimple_bb (use_stmt);
2540 if (use_bb
2541 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2542 {
2543 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2544 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2545 ncopies * nunits_out - 1));
2546 update_stmt (use_stmt);
2547 }
2548 }
2549 }
2550
9d5e7640 2551 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
ebfd146a 2552 set_vinfo_for_stmt (new_stmt, stmt_info);
dd34c087 2553 set_vinfo_for_stmt (stmt, NULL);
ebfd146a
IR
2554 STMT_VINFO_STMT (stmt_info) = new_stmt;
2555 gsi_replace (gsi, new_stmt, false);
ebfd146a
IR
2556
2557 return true;
2558}
2559
2560
0136f8f0
AH
2561struct simd_call_arg_info
2562{
2563 tree vectype;
2564 tree op;
2565 enum vect_def_type dt;
2566 HOST_WIDE_INT linear_step;
2567 unsigned int align;
17b658af 2568 bool simd_lane_linear;
0136f8f0
AH
2569};
2570
17b658af
JJ
2571/* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2572 is linear within simd lane (but not within whole loop), note it in
2573 *ARGINFO. */
2574
2575static void
2576vect_simd_lane_linear (tree op, struct loop *loop,
2577 struct simd_call_arg_info *arginfo)
2578{
355fe088 2579 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
17b658af
JJ
2580
2581 if (!is_gimple_assign (def_stmt)
2582 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2583 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2584 return;
2585
2586 tree base = gimple_assign_rhs1 (def_stmt);
2587 HOST_WIDE_INT linear_step = 0;
2588 tree v = gimple_assign_rhs2 (def_stmt);
2589 while (TREE_CODE (v) == SSA_NAME)
2590 {
2591 tree t;
2592 def_stmt = SSA_NAME_DEF_STMT (v);
2593 if (is_gimple_assign (def_stmt))
2594 switch (gimple_assign_rhs_code (def_stmt))
2595 {
2596 case PLUS_EXPR:
2597 t = gimple_assign_rhs2 (def_stmt);
2598 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2599 return;
2600 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2601 v = gimple_assign_rhs1 (def_stmt);
2602 continue;
2603 case MULT_EXPR:
2604 t = gimple_assign_rhs2 (def_stmt);
2605 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2606 return;
2607 linear_step = tree_to_shwi (t);
2608 v = gimple_assign_rhs1 (def_stmt);
2609 continue;
2610 CASE_CONVERT:
2611 t = gimple_assign_rhs1 (def_stmt);
2612 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2613 || (TYPE_PRECISION (TREE_TYPE (v))
2614 < TYPE_PRECISION (TREE_TYPE (t))))
2615 return;
2616 if (!linear_step)
2617 linear_step = 1;
2618 v = t;
2619 continue;
2620 default:
2621 return;
2622 }
2623 else if (is_gimple_call (def_stmt)
2624 && gimple_call_internal_p (def_stmt)
2625 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2626 && loop->simduid
2627 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2628 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2629 == loop->simduid))
2630 {
2631 if (!linear_step)
2632 linear_step = 1;
2633 arginfo->linear_step = linear_step;
2634 arginfo->op = base;
2635 arginfo->simd_lane_linear = true;
2636 return;
2637 }
2638 }
2639}
2640
0136f8f0
AH
2641/* Function vectorizable_simd_clone_call.
2642
2643 Check if STMT performs a function call that can be vectorized
2644 by calling a simd clone of the function.
2645 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2646 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2647 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2648
2649static bool
355fe088
TS
2650vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2651 gimple **vec_stmt, slp_tree slp_node)
0136f8f0
AH
2652{
2653 tree vec_dest;
2654 tree scalar_dest;
2655 tree op, type;
2656 tree vec_oprnd0 = NULL_TREE;
2657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2658 tree vectype;
2659 unsigned int nunits;
2660 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2661 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 2662 vec_info *vinfo = stmt_info->vinfo;
0136f8f0 2663 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
81c40241 2664 tree fndecl, new_temp;
355fe088
TS
2665 gimple *def_stmt;
2666 gimple *new_stmt = NULL;
0136f8f0
AH
2667 int ncopies, j;
2668 vec<simd_call_arg_info> arginfo = vNULL;
2669 vec<tree> vargs = vNULL;
2670 size_t i, nargs;
2671 tree lhs, rtype, ratype;
2672 vec<constructor_elt, va_gc> *ret_ctor_elts;
2673
2674 /* Is STMT a vectorizable call? */
2675 if (!is_gimple_call (stmt))
2676 return false;
2677
2678 fndecl = gimple_call_fndecl (stmt);
2679 if (fndecl == NULL_TREE)
2680 return false;
2681
d52f5295 2682 struct cgraph_node *node = cgraph_node::get (fndecl);
0136f8f0
AH
2683 if (node == NULL || node->simd_clones == NULL)
2684 return false;
2685
2686 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2687 return false;
2688
2689 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2690 return false;
2691
2692 if (gimple_call_lhs (stmt)
2693 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2694 return false;
2695
2696 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2697
2698 vectype = STMT_VINFO_VECTYPE (stmt_info);
2699
2700 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2701 return false;
2702
2703 /* FORNOW */
2704 if (slp_node || PURE_SLP_STMT (stmt_info))
2705 return false;
2706
2707 /* Process function arguments. */
2708 nargs = gimple_call_num_args (stmt);
2709
2710 /* Bail out if the function has zero arguments. */
2711 if (nargs == 0)
2712 return false;
2713
2714 arginfo.create (nargs);
2715
2716 for (i = 0; i < nargs; i++)
2717 {
2718 simd_call_arg_info thisarginfo;
2719 affine_iv iv;
2720
2721 thisarginfo.linear_step = 0;
2722 thisarginfo.align = 0;
2723 thisarginfo.op = NULL_TREE;
17b658af 2724 thisarginfo.simd_lane_linear = false;
0136f8f0
AH
2725
2726 op = gimple_call_arg (stmt, i);
81c40241
RB
2727 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2728 &thisarginfo.vectype)
0136f8f0
AH
2729 || thisarginfo.dt == vect_uninitialized_def)
2730 {
2731 if (dump_enabled_p ())
2732 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2733 "use not simple.\n");
2734 arginfo.release ();
2735 return false;
2736 }
2737
2738 if (thisarginfo.dt == vect_constant_def
2739 || thisarginfo.dt == vect_external_def)
2740 gcc_assert (thisarginfo.vectype == NULL_TREE);
2741 else
2742 gcc_assert (thisarginfo.vectype != NULL_TREE);
2743
6c9e85fb
JJ
2744 /* For linear arguments, the analyze phase should have saved
2745 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
17b658af
JJ
2746 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2747 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
6c9e85fb
JJ
2748 {
2749 gcc_assert (vec_stmt);
2750 thisarginfo.linear_step
17b658af 2751 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
6c9e85fb 2752 thisarginfo.op
17b658af
JJ
2753 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2754 thisarginfo.simd_lane_linear
2755 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2756 == boolean_true_node);
6c9e85fb
JJ
2757 /* If loop has been peeled for alignment, we need to adjust it. */
2758 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2759 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
17b658af 2760 if (n1 != n2 && !thisarginfo.simd_lane_linear)
6c9e85fb
JJ
2761 {
2762 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
17b658af 2763 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
6c9e85fb
JJ
2764 tree opt = TREE_TYPE (thisarginfo.op);
2765 bias = fold_convert (TREE_TYPE (step), bias);
2766 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2767 thisarginfo.op
2768 = fold_build2 (POINTER_TYPE_P (opt)
2769 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2770 thisarginfo.op, bias);
2771 }
2772 }
2773 else if (!vec_stmt
2774 && thisarginfo.dt != vect_constant_def
2775 && thisarginfo.dt != vect_external_def
2776 && loop_vinfo
2777 && TREE_CODE (op) == SSA_NAME
2778 && simple_iv (loop, loop_containing_stmt (stmt), op,
2779 &iv, false)
2780 && tree_fits_shwi_p (iv.step))
0136f8f0
AH
2781 {
2782 thisarginfo.linear_step = tree_to_shwi (iv.step);
2783 thisarginfo.op = iv.base;
2784 }
2785 else if ((thisarginfo.dt == vect_constant_def
2786 || thisarginfo.dt == vect_external_def)
2787 && POINTER_TYPE_P (TREE_TYPE (op)))
2788 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
17b658af
JJ
2789 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2790 linear too. */
2791 if (POINTER_TYPE_P (TREE_TYPE (op))
2792 && !thisarginfo.linear_step
2793 && !vec_stmt
2794 && thisarginfo.dt != vect_constant_def
2795 && thisarginfo.dt != vect_external_def
2796 && loop_vinfo
2797 && !slp_node
2798 && TREE_CODE (op) == SSA_NAME)
2799 vect_simd_lane_linear (op, loop, &thisarginfo);
0136f8f0
AH
2800
2801 arginfo.quick_push (thisarginfo);
2802 }
2803
2804 unsigned int badness = 0;
2805 struct cgraph_node *bestn = NULL;
6c9e85fb
JJ
2806 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2807 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
0136f8f0
AH
2808 else
2809 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2810 n = n->simdclone->next_clone)
2811 {
2812 unsigned int this_badness = 0;
2813 if (n->simdclone->simdlen
2814 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2815 || n->simdclone->nargs != nargs)
2816 continue;
2817 if (n->simdclone->simdlen
2818 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2819 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2820 - exact_log2 (n->simdclone->simdlen)) * 1024;
2821 if (n->simdclone->inbranch)
2822 this_badness += 2048;
2823 int target_badness = targetm.simd_clone.usable (n);
2824 if (target_badness < 0)
2825 continue;
2826 this_badness += target_badness * 512;
2827 /* FORNOW: Have to add code to add the mask argument. */
2828 if (n->simdclone->inbranch)
2829 continue;
2830 for (i = 0; i < nargs; i++)
2831 {
2832 switch (n->simdclone->args[i].arg_type)
2833 {
2834 case SIMD_CLONE_ARG_TYPE_VECTOR:
2835 if (!useless_type_conversion_p
2836 (n->simdclone->args[i].orig_type,
2837 TREE_TYPE (gimple_call_arg (stmt, i))))
2838 i = -1;
2839 else if (arginfo[i].dt == vect_constant_def
2840 || arginfo[i].dt == vect_external_def
2841 || arginfo[i].linear_step)
2842 this_badness += 64;
2843 break;
2844 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2845 if (arginfo[i].dt != vect_constant_def
2846 && arginfo[i].dt != vect_external_def)
2847 i = -1;
2848 break;
2849 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
d9a6bd32 2850 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
0136f8f0
AH
2851 if (arginfo[i].dt == vect_constant_def
2852 || arginfo[i].dt == vect_external_def
2853 || (arginfo[i].linear_step
2854 != n->simdclone->args[i].linear_step))
2855 i = -1;
2856 break;
2857 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
d9a6bd32
JJ
2858 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2859 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
e01d41e5
JJ
2860 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
2861 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
2862 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
0136f8f0
AH
2863 /* FORNOW */
2864 i = -1;
2865 break;
2866 case SIMD_CLONE_ARG_TYPE_MASK:
2867 gcc_unreachable ();
2868 }
2869 if (i == (size_t) -1)
2870 break;
2871 if (n->simdclone->args[i].alignment > arginfo[i].align)
2872 {
2873 i = -1;
2874 break;
2875 }
2876 if (arginfo[i].align)
2877 this_badness += (exact_log2 (arginfo[i].align)
2878 - exact_log2 (n->simdclone->args[i].alignment));
2879 }
2880 if (i == (size_t) -1)
2881 continue;
2882 if (bestn == NULL || this_badness < badness)
2883 {
2884 bestn = n;
2885 badness = this_badness;
2886 }
2887 }
2888
2889 if (bestn == NULL)
2890 {
2891 arginfo.release ();
2892 return false;
2893 }
2894
2895 for (i = 0; i < nargs; i++)
2896 if ((arginfo[i].dt == vect_constant_def
2897 || arginfo[i].dt == vect_external_def)
2898 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2899 {
2900 arginfo[i].vectype
2901 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2902 i)));
2903 if (arginfo[i].vectype == NULL
2904 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2905 > bestn->simdclone->simdlen))
2906 {
2907 arginfo.release ();
2908 return false;
2909 }
2910 }
2911
2912 fndecl = bestn->decl;
2913 nunits = bestn->simdclone->simdlen;
2914 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2915
2916 /* If the function isn't const, only allow it in simd loops where user
2917 has asserted that at least nunits consecutive iterations can be
2918 performed using SIMD instructions. */
2919 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2920 && gimple_vuse (stmt))
2921 {
2922 arginfo.release ();
2923 return false;
2924 }
2925
2926 /* Sanity check: make sure that at least one copy of the vectorized stmt
2927 needs to be generated. */
2928 gcc_assert (ncopies >= 1);
2929
2930 if (!vec_stmt) /* transformation not required. */
2931 {
6c9e85fb
JJ
2932 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2933 for (i = 0; i < nargs; i++)
2934 if (bestn->simdclone->args[i].arg_type
2935 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2936 {
17b658af 2937 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
6c9e85fb
JJ
2938 + 1);
2939 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2940 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2941 ? size_type_node : TREE_TYPE (arginfo[i].op);
2942 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2943 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
17b658af
JJ
2944 tree sll = arginfo[i].simd_lane_linear
2945 ? boolean_true_node : boolean_false_node;
2946 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
6c9e85fb 2947 }
0136f8f0
AH
2948 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2949 if (dump_enabled_p ())
2950 dump_printf_loc (MSG_NOTE, vect_location,
2951 "=== vectorizable_simd_clone_call ===\n");
2952/* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2953 arginfo.release ();
2954 return true;
2955 }
2956
2957 /** Transform. **/
2958
2959 if (dump_enabled_p ())
2960 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2961
2962 /* Handle def. */
2963 scalar_dest = gimple_call_lhs (stmt);
2964 vec_dest = NULL_TREE;
2965 rtype = NULL_TREE;
2966 ratype = NULL_TREE;
2967 if (scalar_dest)
2968 {
2969 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2970 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2971 if (TREE_CODE (rtype) == ARRAY_TYPE)
2972 {
2973 ratype = rtype;
2974 rtype = TREE_TYPE (ratype);
2975 }
2976 }
2977
2978 prev_stmt_info = NULL;
2979 for (j = 0; j < ncopies; ++j)
2980 {
2981 /* Build argument list for the vectorized call. */
2982 if (j == 0)
2983 vargs.create (nargs);
2984 else
2985 vargs.truncate (0);
2986
2987 for (i = 0; i < nargs; i++)
2988 {
2989 unsigned int k, l, m, o;
2990 tree atype;
2991 op = gimple_call_arg (stmt, i);
2992 switch (bestn->simdclone->args[i].arg_type)
2993 {
2994 case SIMD_CLONE_ARG_TYPE_VECTOR:
2995 atype = bestn->simdclone->args[i].vector_type;
2996 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2997 for (m = j * o; m < (j + 1) * o; m++)
2998 {
2999 if (TYPE_VECTOR_SUBPARTS (atype)
3000 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3001 {
3002 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3003 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3004 / TYPE_VECTOR_SUBPARTS (atype));
3005 gcc_assert ((k & (k - 1)) == 0);
3006 if (m == 0)
3007 vec_oprnd0
81c40241 3008 = vect_get_vec_def_for_operand (op, stmt);
0136f8f0
AH
3009 else
3010 {
3011 vec_oprnd0 = arginfo[i].op;
3012 if ((m & (k - 1)) == 0)
3013 vec_oprnd0
3014 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3015 vec_oprnd0);
3016 }
3017 arginfo[i].op = vec_oprnd0;
3018 vec_oprnd0
3019 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3020 size_int (prec),
3021 bitsize_int ((m & (k - 1)) * prec));
3022 new_stmt
b731b390 3023 = gimple_build_assign (make_ssa_name (atype),
0136f8f0
AH
3024 vec_oprnd0);
3025 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3026 vargs.safe_push (gimple_assign_lhs (new_stmt));
3027 }
3028 else
3029 {
3030 k = (TYPE_VECTOR_SUBPARTS (atype)
3031 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3032 gcc_assert ((k & (k - 1)) == 0);
3033 vec<constructor_elt, va_gc> *ctor_elts;
3034 if (k != 1)
3035 vec_alloc (ctor_elts, k);
3036 else
3037 ctor_elts = NULL;
3038 for (l = 0; l < k; l++)
3039 {
3040 if (m == 0 && l == 0)
3041 vec_oprnd0
81c40241 3042 = vect_get_vec_def_for_operand (op, stmt);
0136f8f0
AH
3043 else
3044 vec_oprnd0
3045 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3046 arginfo[i].op);
3047 arginfo[i].op = vec_oprnd0;
3048 if (k == 1)
3049 break;
3050 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3051 vec_oprnd0);
3052 }
3053 if (k == 1)
3054 vargs.safe_push (vec_oprnd0);
3055 else
3056 {
3057 vec_oprnd0 = build_constructor (atype, ctor_elts);
3058 new_stmt
b731b390 3059 = gimple_build_assign (make_ssa_name (atype),
0136f8f0
AH
3060 vec_oprnd0);
3061 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3062 vargs.safe_push (gimple_assign_lhs (new_stmt));
3063 }
3064 }
3065 }
3066 break;
3067 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3068 vargs.safe_push (op);
3069 break;
3070 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3071 if (j == 0)
3072 {
3073 gimple_seq stmts;
3074 arginfo[i].op
3075 = force_gimple_operand (arginfo[i].op, &stmts, true,
3076 NULL_TREE);
3077 if (stmts != NULL)
3078 {
3079 basic_block new_bb;
3080 edge pe = loop_preheader_edge (loop);
3081 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3082 gcc_assert (!new_bb);
3083 }
17b658af
JJ
3084 if (arginfo[i].simd_lane_linear)
3085 {
3086 vargs.safe_push (arginfo[i].op);
3087 break;
3088 }
b731b390 3089 tree phi_res = copy_ssa_name (op);
538dd0b7 3090 gphi *new_phi = create_phi_node (phi_res, loop->header);
0136f8f0 3091 set_vinfo_for_stmt (new_phi,
310213d4 3092 new_stmt_vec_info (new_phi, loop_vinfo));
0136f8f0
AH
3093 add_phi_arg (new_phi, arginfo[i].op,
3094 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3095 enum tree_code code
3096 = POINTER_TYPE_P (TREE_TYPE (op))
3097 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3098 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3099 ? sizetype : TREE_TYPE (op);
807e902e
KZ
3100 widest_int cst
3101 = wi::mul (bestn->simdclone->args[i].linear_step,
3102 ncopies * nunits);
3103 tree tcst = wide_int_to_tree (type, cst);
b731b390 3104 tree phi_arg = copy_ssa_name (op);
0d0e4a03
JJ
3105 new_stmt
3106 = gimple_build_assign (phi_arg, code, phi_res, tcst);
0136f8f0
AH
3107 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3108 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3109 set_vinfo_for_stmt (new_stmt,
310213d4 3110 new_stmt_vec_info (new_stmt, loop_vinfo));
0136f8f0
AH
3111 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3112 UNKNOWN_LOCATION);
3113 arginfo[i].op = phi_res;
3114 vargs.safe_push (phi_res);
3115 }
3116 else
3117 {
3118 enum tree_code code
3119 = POINTER_TYPE_P (TREE_TYPE (op))
3120 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3121 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3122 ? sizetype : TREE_TYPE (op);
807e902e
KZ
3123 widest_int cst
3124 = wi::mul (bestn->simdclone->args[i].linear_step,
3125 j * nunits);
3126 tree tcst = wide_int_to_tree (type, cst);
b731b390 3127 new_temp = make_ssa_name (TREE_TYPE (op));
0d0e4a03
JJ
3128 new_stmt = gimple_build_assign (new_temp, code,
3129 arginfo[i].op, tcst);
0136f8f0
AH
3130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3131 vargs.safe_push (new_temp);
3132 }
3133 break;
3134 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
e01d41e5
JJ
3135 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3136 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3137 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
0136f8f0
AH
3138 default:
3139 gcc_unreachable ();
3140 }
3141 }
3142
3143 new_stmt = gimple_build_call_vec (fndecl, vargs);
3144 if (vec_dest)
3145 {
3146 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3147 if (ratype)
b731b390 3148 new_temp = create_tmp_var (ratype);
0136f8f0
AH
3149 else if (TYPE_VECTOR_SUBPARTS (vectype)
3150 == TYPE_VECTOR_SUBPARTS (rtype))
3151 new_temp = make_ssa_name (vec_dest, new_stmt);
3152 else
3153 new_temp = make_ssa_name (rtype, new_stmt);
3154 gimple_call_set_lhs (new_stmt, new_temp);
3155 }
3156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3157
3158 if (vec_dest)
3159 {
3160 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3161 {
3162 unsigned int k, l;
3163 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3164 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3165 gcc_assert ((k & (k - 1)) == 0);
3166 for (l = 0; l < k; l++)
3167 {
3168 tree t;
3169 if (ratype)
3170 {
3171 t = build_fold_addr_expr (new_temp);
3172 t = build2 (MEM_REF, vectype, t,
3173 build_int_cst (TREE_TYPE (t),
3174 l * prec / BITS_PER_UNIT));
3175 }
3176 else
3177 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3178 size_int (prec), bitsize_int (l * prec));
3179 new_stmt
b731b390 3180 = gimple_build_assign (make_ssa_name (vectype), t);
0136f8f0
AH
3181 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3182 if (j == 0 && l == 0)
3183 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3184 else
3185 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3186
3187 prev_stmt_info = vinfo_for_stmt (new_stmt);
3188 }
3189
3190 if (ratype)
3191 {
3192 tree clobber = build_constructor (ratype, NULL);
3193 TREE_THIS_VOLATILE (clobber) = 1;
3194 new_stmt = gimple_build_assign (new_temp, clobber);
3195 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3196 }
3197 continue;
3198 }
3199 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3200 {
3201 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3202 / TYPE_VECTOR_SUBPARTS (rtype));
3203 gcc_assert ((k & (k - 1)) == 0);
3204 if ((j & (k - 1)) == 0)
3205 vec_alloc (ret_ctor_elts, k);
3206 if (ratype)
3207 {
3208 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3209 for (m = 0; m < o; m++)
3210 {
3211 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3212 size_int (m), NULL_TREE, NULL_TREE);
3213 new_stmt
b731b390 3214 = gimple_build_assign (make_ssa_name (rtype), tem);
0136f8f0
AH
3215 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3216 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3217 gimple_assign_lhs (new_stmt));
3218 }
3219 tree clobber = build_constructor (ratype, NULL);
3220 TREE_THIS_VOLATILE (clobber) = 1;
3221 new_stmt = gimple_build_assign (new_temp, clobber);
3222 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3223 }
3224 else
3225 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3226 if ((j & (k - 1)) != k - 1)
3227 continue;
3228 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3229 new_stmt
b731b390 3230 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
0136f8f0
AH
3231 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3232
3233 if ((unsigned) j == k - 1)
3234 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3235 else
3236 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3237
3238 prev_stmt_info = vinfo_for_stmt (new_stmt);
3239 continue;
3240 }
3241 else if (ratype)
3242 {
3243 tree t = build_fold_addr_expr (new_temp);
3244 t = build2 (MEM_REF, vectype, t,
3245 build_int_cst (TREE_TYPE (t), 0));
3246 new_stmt
b731b390 3247 = gimple_build_assign (make_ssa_name (vec_dest), t);
0136f8f0
AH
3248 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3249 tree clobber = build_constructor (ratype, NULL);
3250 TREE_THIS_VOLATILE (clobber) = 1;
3251 vect_finish_stmt_generation (stmt,
3252 gimple_build_assign (new_temp,
3253 clobber), gsi);
3254 }
3255 }
3256
3257 if (j == 0)
3258 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3259 else
3260 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3261
3262 prev_stmt_info = vinfo_for_stmt (new_stmt);
3263 }
3264
3265 vargs.release ();
3266
3267 /* The call in STMT might prevent it from being removed in dce.
3268 We however cannot remove it here, due to the way the ssa name
3269 it defines is mapped to the new definition. So just replace
3270 rhs of the statement with something harmless. */
3271
3272 if (slp_node)
3273 return true;
3274
3275 if (scalar_dest)
3276 {
3277 type = TREE_TYPE (scalar_dest);
3278 if (is_pattern_stmt_p (stmt_info))
3279 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3280 else
3281 lhs = gimple_call_lhs (stmt);
3282 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3283 }
3284 else
3285 new_stmt = gimple_build_nop ();
3286 set_vinfo_for_stmt (new_stmt, stmt_info);
3287 set_vinfo_for_stmt (stmt, NULL);
3288 STMT_VINFO_STMT (stmt_info) = new_stmt;
2865f32a 3289 gsi_replace (gsi, new_stmt, true);
0136f8f0
AH
3290 unlink_stmt_vdef (stmt);
3291
3292 return true;
3293}
3294
3295
ebfd146a
IR
3296/* Function vect_gen_widened_results_half
3297
3298 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 3299 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ff802fa1 3300 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
ebfd146a
IR
3301 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3302 needs to be created (DECL is a function-decl of a target-builtin).
3303 STMT is the original scalar stmt that we are vectorizing. */
3304
355fe088 3305static gimple *
ebfd146a
IR
3306vect_gen_widened_results_half (enum tree_code code,
3307 tree decl,
3308 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3309 tree vec_dest, gimple_stmt_iterator *gsi,
355fe088 3310 gimple *stmt)
b8698a0f 3311{
355fe088 3312 gimple *new_stmt;
b8698a0f
L
3313 tree new_temp;
3314
3315 /* Generate half of the widened result: */
3316 if (code == CALL_EXPR)
3317 {
3318 /* Target specific support */
ebfd146a
IR
3319 if (op_type == binary_op)
3320 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3321 else
3322 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3323 new_temp = make_ssa_name (vec_dest, new_stmt);
3324 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
3325 }
3326 else
ebfd146a 3327 {
b8698a0f
L
3328 /* Generic support */
3329 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
3330 if (op_type != binary_op)
3331 vec_oprnd1 = NULL;
0d0e4a03 3332 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
ebfd146a
IR
3333 new_temp = make_ssa_name (vec_dest, new_stmt);
3334 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 3335 }
ebfd146a
IR
3336 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3337
ebfd146a
IR
3338 return new_stmt;
3339}
3340
4a00c761
JJ
3341
3342/* Get vectorized definitions for loop-based vectorization. For the first
3343 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3344 scalar operand), and for the rest we get a copy with
3345 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3346 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3347 The vectors are collected into VEC_OPRNDS. */
3348
3349static void
355fe088 3350vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
9771b263 3351 vec<tree> *vec_oprnds, int multi_step_cvt)
4a00c761
JJ
3352{
3353 tree vec_oprnd;
3354
3355 /* Get first vector operand. */
3356 /* All the vector operands except the very first one (that is scalar oprnd)
3357 are stmt copies. */
3358 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
81c40241 3359 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
4a00c761
JJ
3360 else
3361 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3362
9771b263 3363 vec_oprnds->quick_push (vec_oprnd);
4a00c761
JJ
3364
3365 /* Get second vector operand. */
3366 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
9771b263 3367 vec_oprnds->quick_push (vec_oprnd);
4a00c761
JJ
3368
3369 *oprnd = vec_oprnd;
3370
3371 /* For conversion in multiple steps, continue to get operands
3372 recursively. */
3373 if (multi_step_cvt)
3374 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3375}
3376
3377
3378/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3379 For multi-step conversions store the resulting vectors and call the function
3380 recursively. */
3381
3382static void
9771b263 3383vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
355fe088 3384 int multi_step_cvt, gimple *stmt,
9771b263 3385 vec<tree> vec_dsts,
4a00c761
JJ
3386 gimple_stmt_iterator *gsi,
3387 slp_tree slp_node, enum tree_code code,
3388 stmt_vec_info *prev_stmt_info)
3389{
3390 unsigned int i;
3391 tree vop0, vop1, new_tmp, vec_dest;
355fe088 3392 gimple *new_stmt;
4a00c761
JJ
3393 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3394
9771b263 3395 vec_dest = vec_dsts.pop ();
4a00c761 3396
9771b263 3397 for (i = 0; i < vec_oprnds->length (); i += 2)
4a00c761
JJ
3398 {
3399 /* Create demotion operation. */
9771b263
DN
3400 vop0 = (*vec_oprnds)[i];
3401 vop1 = (*vec_oprnds)[i + 1];
0d0e4a03 3402 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4a00c761
JJ
3403 new_tmp = make_ssa_name (vec_dest, new_stmt);
3404 gimple_assign_set_lhs (new_stmt, new_tmp);
3405 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3406
3407 if (multi_step_cvt)
3408 /* Store the resulting vector for next recursive call. */
9771b263 3409 (*vec_oprnds)[i/2] = new_tmp;
4a00c761
JJ
3410 else
3411 {
3412 /* This is the last step of the conversion sequence. Store the
3413 vectors in SLP_NODE or in vector info of the scalar statement
3414 (or in STMT_VINFO_RELATED_STMT chain). */
3415 if (slp_node)
9771b263 3416 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4a00c761 3417 else
c689ce1e
RB
3418 {
3419 if (!*prev_stmt_info)
3420 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3421 else
3422 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4a00c761 3423
c689ce1e
RB
3424 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3425 }
4a00c761
JJ
3426 }
3427 }
3428
3429 /* For multi-step demotion operations we first generate demotion operations
3430 from the source type to the intermediate types, and then combine the
3431 results (stored in VEC_OPRNDS) in demotion operation to the destination
3432 type. */
3433 if (multi_step_cvt)
3434 {
3435 /* At each level of recursion we have half of the operands we had at the
3436 previous level. */
9771b263 3437 vec_oprnds->truncate ((i+1)/2);
4a00c761
JJ
3438 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3439 stmt, vec_dsts, gsi, slp_node,
3440 VEC_PACK_TRUNC_EXPR,
3441 prev_stmt_info);
3442 }
3443
9771b263 3444 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
3445}
3446
3447
3448/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3449 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3450 the resulting vectors and call the function recursively. */
3451
3452static void
9771b263
DN
3453vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3454 vec<tree> *vec_oprnds1,
355fe088 3455 gimple *stmt, tree vec_dest,
4a00c761
JJ
3456 gimple_stmt_iterator *gsi,
3457 enum tree_code code1,
3458 enum tree_code code2, tree decl1,
3459 tree decl2, int op_type)
3460{
3461 int i;
3462 tree vop0, vop1, new_tmp1, new_tmp2;
355fe088 3463 gimple *new_stmt1, *new_stmt2;
6e1aa848 3464 vec<tree> vec_tmp = vNULL;
4a00c761 3465
9771b263
DN
3466 vec_tmp.create (vec_oprnds0->length () * 2);
3467 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4a00c761
JJ
3468 {
3469 if (op_type == binary_op)
9771b263 3470 vop1 = (*vec_oprnds1)[i];
4a00c761
JJ
3471 else
3472 vop1 = NULL_TREE;
3473
3474 /* Generate the two halves of promotion operation. */
3475 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3476 op_type, vec_dest, gsi, stmt);
3477 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3478 op_type, vec_dest, gsi, stmt);
3479 if (is_gimple_call (new_stmt1))
3480 {
3481 new_tmp1 = gimple_call_lhs (new_stmt1);
3482 new_tmp2 = gimple_call_lhs (new_stmt2);
3483 }
3484 else
3485 {
3486 new_tmp1 = gimple_assign_lhs (new_stmt1);
3487 new_tmp2 = gimple_assign_lhs (new_stmt2);
3488 }
3489
3490 /* Store the results for the next step. */
9771b263
DN
3491 vec_tmp.quick_push (new_tmp1);
3492 vec_tmp.quick_push (new_tmp2);
4a00c761
JJ
3493 }
3494
689eaba3 3495 vec_oprnds0->release ();
4a00c761
JJ
3496 *vec_oprnds0 = vec_tmp;
3497}
3498
3499
b8698a0f
L
3500/* Check if STMT performs a conversion operation, that can be vectorized.
3501 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4a00c761 3502 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
ebfd146a
IR
3503 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3504
3505static bool
355fe088
TS
3506vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3507 gimple **vec_stmt, slp_tree slp_node)
ebfd146a
IR
3508{
3509 tree vec_dest;
3510 tree scalar_dest;
4a00c761 3511 tree op0, op1 = NULL_TREE;
ebfd146a
IR
3512 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3513 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3514 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3515 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4a00c761 3516 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
ebfd146a
IR
3517 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3518 tree new_temp;
355fe088 3519 gimple *def_stmt;
ebfd146a 3520 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
355fe088 3521 gimple *new_stmt = NULL;
ebfd146a
IR
3522 stmt_vec_info prev_stmt_info;
3523 int nunits_in;
3524 int nunits_out;
3525 tree vectype_out, vectype_in;
4a00c761
JJ
3526 int ncopies, i, j;
3527 tree lhs_type, rhs_type;
ebfd146a 3528 enum { NARROW, NONE, WIDEN } modifier;
6e1aa848
DN
3529 vec<tree> vec_oprnds0 = vNULL;
3530 vec<tree> vec_oprnds1 = vNULL;
ebfd146a 3531 tree vop0;
4a00c761 3532 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 3533 vec_info *vinfo = stmt_info->vinfo;
4a00c761 3534 int multi_step_cvt = 0;
6e1aa848
DN
3535 vec<tree> vec_dsts = vNULL;
3536 vec<tree> interm_types = vNULL;
4a00c761
JJ
3537 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3538 int op_type;
ef4bddc2 3539 machine_mode rhs_mode;
4a00c761 3540 unsigned short fltsz;
ebfd146a
IR
3541
3542 /* Is STMT a vectorizable conversion? */
3543
4a00c761 3544 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3545 return false;
3546
8644a673 3547 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3548 return false;
3549
3550 if (!is_gimple_assign (stmt))
3551 return false;
3552
3553 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3554 return false;
3555
3556 code = gimple_assign_rhs_code (stmt);
4a00c761
JJ
3557 if (!CONVERT_EXPR_CODE_P (code)
3558 && code != FIX_TRUNC_EXPR
3559 && code != FLOAT_EXPR
3560 && code != WIDEN_MULT_EXPR
3561 && code != WIDEN_LSHIFT_EXPR)
ebfd146a
IR
3562 return false;
3563
4a00c761
JJ
3564 op_type = TREE_CODE_LENGTH (code);
3565
ebfd146a 3566 /* Check types of lhs and rhs. */
b690cc0f 3567 scalar_dest = gimple_assign_lhs (stmt);
4a00c761 3568 lhs_type = TREE_TYPE (scalar_dest);
b690cc0f
RG
3569 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3570
ebfd146a
IR
3571 op0 = gimple_assign_rhs1 (stmt);
3572 rhs_type = TREE_TYPE (op0);
4a00c761
JJ
3573
3574 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3575 && !((INTEGRAL_TYPE_P (lhs_type)
3576 && INTEGRAL_TYPE_P (rhs_type))
3577 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3578 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3579 return false;
3580
e6f5c25d
IE
3581 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
3582 && ((INTEGRAL_TYPE_P (lhs_type)
3583 && (TYPE_PRECISION (lhs_type)
3584 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3585 || (INTEGRAL_TYPE_P (rhs_type)
3586 && (TYPE_PRECISION (rhs_type)
3587 != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
4a00c761 3588 {
73fbfcad 3589 if (dump_enabled_p ())
78c60e3d 3590 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
3591 "type conversion to/from bit-precision unsupported."
3592 "\n");
4a00c761
JJ
3593 return false;
3594 }
3595
b690cc0f 3596 /* Check the operands of the operation. */
81c40241 3597 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
b690cc0f 3598 {
73fbfcad 3599 if (dump_enabled_p ())
78c60e3d 3600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3601 "use not simple.\n");
b690cc0f
RG
3602 return false;
3603 }
4a00c761
JJ
3604 if (op_type == binary_op)
3605 {
3606 bool ok;
3607
3608 op1 = gimple_assign_rhs2 (stmt);
3609 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3610 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3611 OP1. */
3612 if (CONSTANT_CLASS_P (op0))
81c40241 3613 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4a00c761 3614 else
81c40241 3615 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4a00c761
JJ
3616
3617 if (!ok)
3618 {
73fbfcad 3619 if (dump_enabled_p ())
78c60e3d 3620 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3621 "use not simple.\n");
4a00c761
JJ
3622 return false;
3623 }
3624 }
3625
b690cc0f
RG
3626 /* If op0 is an external or constant defs use a vector type of
3627 the same size as the output vector type. */
ebfd146a 3628 if (!vectype_in)
b690cc0f 3629 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
3630 if (vec_stmt)
3631 gcc_assert (vectype_in);
3632 if (!vectype_in)
3633 {
73fbfcad 3634 if (dump_enabled_p ())
4a00c761 3635 {
78c60e3d
SS
3636 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3637 "no vectype for scalar type ");
3638 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
e645e942 3639 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4a00c761 3640 }
7d8930a0
IR
3641
3642 return false;
3643 }
ebfd146a 3644
e6f5c25d
IE
3645 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
3646 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
3647 {
3648 if (dump_enabled_p ())
3649 {
3650 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3651 "can't convert between boolean and non "
3652 "boolean vectors");
3653 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3654 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3655 }
3656
3657 return false;
3658 }
3659
b690cc0f
RG
3660 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3661 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4a00c761 3662 if (nunits_in < nunits_out)
ebfd146a
IR
3663 modifier = NARROW;
3664 else if (nunits_out == nunits_in)
3665 modifier = NONE;
ebfd146a 3666 else
4a00c761 3667 modifier = WIDEN;
ebfd146a 3668
ff802fa1
IR
3669 /* Multiple types in SLP are handled by creating the appropriate number of
3670 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3671 case of SLP. */
437f4a00 3672 if (slp_node || PURE_SLP_STMT (stmt_info))
ebfd146a 3673 ncopies = 1;
4a00c761
JJ
3674 else if (modifier == NARROW)
3675 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3676 else
3677 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
b8698a0f 3678
ebfd146a
IR
3679 /* Sanity check: make sure that at least one copy of the vectorized stmt
3680 needs to be generated. */
3681 gcc_assert (ncopies >= 1);
3682
ebfd146a 3683 /* Supportable by target? */
4a00c761 3684 switch (modifier)
ebfd146a 3685 {
4a00c761
JJ
3686 case NONE:
3687 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3688 return false;
3689 if (supportable_convert_operation (code, vectype_out, vectype_in,
3690 &decl1, &code1))
3691 break;
3692 /* FALLTHRU */
3693 unsupported:
73fbfcad 3694 if (dump_enabled_p ())
78c60e3d 3695 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3696 "conversion not supported by target.\n");
ebfd146a 3697 return false;
ebfd146a 3698
4a00c761
JJ
3699 case WIDEN:
3700 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
a86ec597
RH
3701 &code1, &code2, &multi_step_cvt,
3702 &interm_types))
4a00c761
JJ
3703 {
3704 /* Binary widening operation can only be supported directly by the
3705 architecture. */
3706 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3707 break;
3708 }
3709
3710 if (code != FLOAT_EXPR
3711 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3712 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3713 goto unsupported;
3714
3715 rhs_mode = TYPE_MODE (rhs_type);
3716 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3717 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3718 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3719 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3720 {
3721 cvt_type
3722 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3723 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3724 if (cvt_type == NULL_TREE)
3725 goto unsupported;
3726
3727 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3728 {
3729 if (!supportable_convert_operation (code, vectype_out,
3730 cvt_type, &decl1, &codecvt1))
3731 goto unsupported;
3732 }
3733 else if (!supportable_widening_operation (code, stmt, vectype_out,
a86ec597
RH
3734 cvt_type, &codecvt1,
3735 &codecvt2, &multi_step_cvt,
4a00c761
JJ
3736 &interm_types))
3737 continue;
3738 else
3739 gcc_assert (multi_step_cvt == 0);
3740
3741 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
a86ec597
RH
3742 vectype_in, &code1, &code2,
3743 &multi_step_cvt, &interm_types))
4a00c761
JJ
3744 break;
3745 }
3746
3747 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3748 goto unsupported;
3749
3750 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3751 codecvt2 = ERROR_MARK;
3752 else
3753 {
3754 multi_step_cvt++;
9771b263 3755 interm_types.safe_push (cvt_type);
4a00c761
JJ
3756 cvt_type = NULL_TREE;
3757 }
3758 break;
3759
3760 case NARROW:
3761 gcc_assert (op_type == unary_op);
3762 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3763 &code1, &multi_step_cvt,
3764 &interm_types))
3765 break;
3766
3767 if (code != FIX_TRUNC_EXPR
3768 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3769 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3770 goto unsupported;
3771
3772 rhs_mode = TYPE_MODE (rhs_type);
3773 cvt_type
3774 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3775 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3776 if (cvt_type == NULL_TREE)
3777 goto unsupported;
3778 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3779 &decl1, &codecvt1))
3780 goto unsupported;
3781 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3782 &code1, &multi_step_cvt,
3783 &interm_types))
3784 break;
3785 goto unsupported;
3786
3787 default:
3788 gcc_unreachable ();
ebfd146a
IR
3789 }
3790
3791 if (!vec_stmt) /* transformation not required. */
3792 {
73fbfcad 3793 if (dump_enabled_p ())
78c60e3d 3794 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 3795 "=== vectorizable_conversion ===\n");
4a00c761 3796 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
8bd37302
BS
3797 {
3798 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
c3e7ee41 3799 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
8bd37302 3800 }
4a00c761
JJ
3801 else if (modifier == NARROW)
3802 {
3803 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
8bd37302 3804 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4a00c761
JJ
3805 }
3806 else
3807 {
3808 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
8bd37302 3809 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4a00c761 3810 }
9771b263 3811 interm_types.release ();
ebfd146a
IR
3812 return true;
3813 }
3814
3815 /** Transform. **/
73fbfcad 3816 if (dump_enabled_p ())
78c60e3d 3817 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 3818 "transform conversion. ncopies = %d.\n", ncopies);
ebfd146a 3819
4a00c761
JJ
3820 if (op_type == binary_op)
3821 {
3822 if (CONSTANT_CLASS_P (op0))
3823 op0 = fold_convert (TREE_TYPE (op1), op0);
3824 else if (CONSTANT_CLASS_P (op1))
3825 op1 = fold_convert (TREE_TYPE (op0), op1);
3826 }
3827
3828 /* In case of multi-step conversion, we first generate conversion operations
3829 to the intermediate types, and then from that types to the final one.
3830 We create vector destinations for the intermediate type (TYPES) received
3831 from supportable_*_operation, and store them in the correct order
3832 for future use in vect_create_vectorized_*_stmts (). */
9771b263 3833 vec_dsts.create (multi_step_cvt + 1);
82294ec1
JJ
3834 vec_dest = vect_create_destination_var (scalar_dest,
3835 (cvt_type && modifier == WIDEN)
3836 ? cvt_type : vectype_out);
9771b263 3837 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
3838
3839 if (multi_step_cvt)
3840 {
9771b263
DN
3841 for (i = interm_types.length () - 1;
3842 interm_types.iterate (i, &intermediate_type); i--)
4a00c761
JJ
3843 {
3844 vec_dest = vect_create_destination_var (scalar_dest,
3845 intermediate_type);
9771b263 3846 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
3847 }
3848 }
ebfd146a 3849
4a00c761 3850 if (cvt_type)
82294ec1
JJ
3851 vec_dest = vect_create_destination_var (scalar_dest,
3852 modifier == WIDEN
3853 ? vectype_out : cvt_type);
4a00c761
JJ
3854
3855 if (!slp_node)
3856 {
30862efc 3857 if (modifier == WIDEN)
4a00c761 3858 {
c3284718 3859 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4a00c761 3860 if (op_type == binary_op)
9771b263 3861 vec_oprnds1.create (1);
4a00c761 3862 }
30862efc 3863 else if (modifier == NARROW)
9771b263
DN
3864 vec_oprnds0.create (
3865 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4a00c761
JJ
3866 }
3867 else if (code == WIDEN_LSHIFT_EXPR)
9771b263 3868 vec_oprnds1.create (slp_node->vec_stmts_size);
ebfd146a 3869
4a00c761 3870 last_oprnd = op0;
ebfd146a
IR
3871 prev_stmt_info = NULL;
3872 switch (modifier)
3873 {
3874 case NONE:
3875 for (j = 0; j < ncopies; j++)
3876 {
ebfd146a 3877 if (j == 0)
d092494c
IR
3878 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3879 -1);
ebfd146a
IR
3880 else
3881 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3882
9771b263 3883 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
3884 {
3885 /* Arguments are ready, create the new vector stmt. */
3886 if (code1 == CALL_EXPR)
3887 {
3888 new_stmt = gimple_build_call (decl1, 1, vop0);
3889 new_temp = make_ssa_name (vec_dest, new_stmt);
3890 gimple_call_set_lhs (new_stmt, new_temp);
3891 }
3892 else
3893 {
3894 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
0d0e4a03 3895 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4a00c761
JJ
3896 new_temp = make_ssa_name (vec_dest, new_stmt);
3897 gimple_assign_set_lhs (new_stmt, new_temp);
3898 }
3899
3900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3901 if (slp_node)
9771b263 3902 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
225ce44b
RB
3903 else
3904 {
3905 if (!prev_stmt_info)
3906 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3907 else
3908 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3909 prev_stmt_info = vinfo_for_stmt (new_stmt);
3910 }
4a00c761 3911 }
ebfd146a
IR
3912 }
3913 break;
3914
3915 case WIDEN:
3916 /* In case the vectorization factor (VF) is bigger than the number
3917 of elements that we can fit in a vectype (nunits), we have to
3918 generate more than one vector stmt - i.e - we need to "unroll"
3919 the vector stmt by a factor VF/nunits. */
3920 for (j = 0; j < ncopies; j++)
3921 {
4a00c761 3922 /* Handle uses. */
ebfd146a 3923 if (j == 0)
4a00c761
JJ
3924 {
3925 if (slp_node)
3926 {
3927 if (code == WIDEN_LSHIFT_EXPR)
3928 {
3929 unsigned int k;
ebfd146a 3930
4a00c761
JJ
3931 vec_oprnd1 = op1;
3932 /* Store vec_oprnd1 for every vector stmt to be created
3933 for SLP_NODE. We check during the analysis that all
3934 the shift arguments are the same. */
3935 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
9771b263 3936 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
3937
3938 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3939 slp_node, -1);
3940 }
3941 else
3942 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3943 &vec_oprnds1, slp_node, -1);
3944 }
3945 else
3946 {
81c40241 3947 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
9771b263 3948 vec_oprnds0.quick_push (vec_oprnd0);
4a00c761
JJ
3949 if (op_type == binary_op)
3950 {
3951 if (code == WIDEN_LSHIFT_EXPR)
3952 vec_oprnd1 = op1;
3953 else
81c40241 3954 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
9771b263 3955 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
3956 }
3957 }
3958 }
ebfd146a 3959 else
4a00c761
JJ
3960 {
3961 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
9771b263
DN
3962 vec_oprnds0.truncate (0);
3963 vec_oprnds0.quick_push (vec_oprnd0);
4a00c761
JJ
3964 if (op_type == binary_op)
3965 {
3966 if (code == WIDEN_LSHIFT_EXPR)
3967 vec_oprnd1 = op1;
3968 else
3969 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3970 vec_oprnd1);
9771b263
DN
3971 vec_oprnds1.truncate (0);
3972 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
3973 }
3974 }
ebfd146a 3975
4a00c761
JJ
3976 /* Arguments are ready. Create the new vector stmts. */
3977 for (i = multi_step_cvt; i >= 0; i--)
3978 {
9771b263 3979 tree this_dest = vec_dsts[i];
4a00c761
JJ
3980 enum tree_code c1 = code1, c2 = code2;
3981 if (i == 0 && codecvt2 != ERROR_MARK)
3982 {
3983 c1 = codecvt1;
3984 c2 = codecvt2;
3985 }
3986 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3987 &vec_oprnds1,
3988 stmt, this_dest, gsi,
3989 c1, c2, decl1, decl2,
3990 op_type);
3991 }
3992
9771b263 3993 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
3994 {
3995 if (cvt_type)
3996 {
3997 if (codecvt1 == CALL_EXPR)
3998 {
3999 new_stmt = gimple_build_call (decl1, 1, vop0);
4000 new_temp = make_ssa_name (vec_dest, new_stmt);
4001 gimple_call_set_lhs (new_stmt, new_temp);
4002 }
4003 else
4004 {
4005 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
b731b390 4006 new_temp = make_ssa_name (vec_dest);
0d0e4a03
JJ
4007 new_stmt = gimple_build_assign (new_temp, codecvt1,
4008 vop0);
4a00c761
JJ
4009 }
4010
4011 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4012 }
4013 else
4014 new_stmt = SSA_NAME_DEF_STMT (vop0);
4015
4016 if (slp_node)
9771b263 4017 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4a00c761 4018 else
c689ce1e
RB
4019 {
4020 if (!prev_stmt_info)
4021 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4022 else
4023 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4024 prev_stmt_info = vinfo_for_stmt (new_stmt);
4025 }
4a00c761 4026 }
ebfd146a 4027 }
4a00c761
JJ
4028
4029 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
4030 break;
4031
4032 case NARROW:
4033 /* In case the vectorization factor (VF) is bigger than the number
4034 of elements that we can fit in a vectype (nunits), we have to
4035 generate more than one vector stmt - i.e - we need to "unroll"
4036 the vector stmt by a factor VF/nunits. */
4037 for (j = 0; j < ncopies; j++)
4038 {
4039 /* Handle uses. */
4a00c761
JJ
4040 if (slp_node)
4041 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4042 slp_node, -1);
ebfd146a
IR
4043 else
4044 {
9771b263 4045 vec_oprnds0.truncate (0);
4a00c761
JJ
4046 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4047 vect_pow2 (multi_step_cvt) - 1);
ebfd146a
IR
4048 }
4049
4a00c761
JJ
4050 /* Arguments are ready. Create the new vector stmts. */
4051 if (cvt_type)
9771b263 4052 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
4053 {
4054 if (codecvt1 == CALL_EXPR)
4055 {
4056 new_stmt = gimple_build_call (decl1, 1, vop0);
4057 new_temp = make_ssa_name (vec_dest, new_stmt);
4058 gimple_call_set_lhs (new_stmt, new_temp);
4059 }
4060 else
4061 {
4062 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
b731b390 4063 new_temp = make_ssa_name (vec_dest);
0d0e4a03
JJ
4064 new_stmt = gimple_build_assign (new_temp, codecvt1,
4065 vop0);
4a00c761 4066 }
ebfd146a 4067
4a00c761 4068 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9771b263 4069 vec_oprnds0[i] = new_temp;
4a00c761 4070 }
ebfd146a 4071
4a00c761
JJ
4072 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4073 stmt, vec_dsts, gsi,
4074 slp_node, code1,
4075 &prev_stmt_info);
ebfd146a
IR
4076 }
4077
4078 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4a00c761 4079 break;
ebfd146a
IR
4080 }
4081
9771b263
DN
4082 vec_oprnds0.release ();
4083 vec_oprnds1.release ();
4084 vec_dsts.release ();
4085 interm_types.release ();
ebfd146a
IR
4086
4087 return true;
4088}
ff802fa1
IR
4089
4090
ebfd146a
IR
4091/* Function vectorizable_assignment.
4092
b8698a0f
L
4093 Check if STMT performs an assignment (copy) that can be vectorized.
4094 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
4095 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4096 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4097
4098static bool
355fe088
TS
4099vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4100 gimple **vec_stmt, slp_tree slp_node)
ebfd146a
IR
4101{
4102 tree vec_dest;
4103 tree scalar_dest;
4104 tree op;
4105 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
ebfd146a
IR
4106 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4107 tree new_temp;
355fe088 4108 gimple *def_stmt;
ebfd146a 4109 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
ebfd146a 4110 int ncopies;
f18b55bd 4111 int i, j;
6e1aa848 4112 vec<tree> vec_oprnds = vNULL;
ebfd146a 4113 tree vop;
a70d6342 4114 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4115 vec_info *vinfo = stmt_info->vinfo;
355fe088 4116 gimple *new_stmt = NULL;
f18b55bd 4117 stmt_vec_info prev_stmt_info = NULL;
fde9c428
RG
4118 enum tree_code code;
4119 tree vectype_in;
ebfd146a 4120
a70d6342 4121 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
4122 return false;
4123
8644a673 4124 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
4125 return false;
4126
4127 /* Is vectorizable assignment? */
4128 if (!is_gimple_assign (stmt))
4129 return false;
4130
4131 scalar_dest = gimple_assign_lhs (stmt);
4132 if (TREE_CODE (scalar_dest) != SSA_NAME)
4133 return false;
4134
fde9c428 4135 code = gimple_assign_rhs_code (stmt);
ebfd146a 4136 if (gimple_assign_single_p (stmt)
fde9c428
RG
4137 || code == PAREN_EXPR
4138 || CONVERT_EXPR_CODE_P (code))
ebfd146a
IR
4139 op = gimple_assign_rhs1 (stmt);
4140 else
4141 return false;
4142
7b7ec6c5
RG
4143 if (code == VIEW_CONVERT_EXPR)
4144 op = TREE_OPERAND (op, 0);
4145
465c8c19
JJ
4146 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4147 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4148
4149 /* Multiple types in SLP are handled by creating the appropriate number of
4150 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4151 case of SLP. */
4152 if (slp_node || PURE_SLP_STMT (stmt_info))
4153 ncopies = 1;
4154 else
4155 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4156
4157 gcc_assert (ncopies >= 1);
4158
81c40241 4159 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
ebfd146a 4160 {
73fbfcad 4161 if (dump_enabled_p ())
78c60e3d 4162 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4163 "use not simple.\n");
ebfd146a
IR
4164 return false;
4165 }
4166
fde9c428
RG
4167 /* We can handle NOP_EXPR conversions that do not change the number
4168 of elements or the vector size. */
7b7ec6c5
RG
4169 if ((CONVERT_EXPR_CODE_P (code)
4170 || code == VIEW_CONVERT_EXPR)
fde9c428
RG
4171 && (!vectype_in
4172 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4173 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4174 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4175 return false;
4176
7b7b1813
RG
4177 /* We do not handle bit-precision changes. */
4178 if ((CONVERT_EXPR_CODE_P (code)
4179 || code == VIEW_CONVERT_EXPR)
4180 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4181 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4182 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4183 || ((TYPE_PRECISION (TREE_TYPE (op))
4184 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4185 /* But a conversion that does not change the bit-pattern is ok. */
4186 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4187 > TYPE_PRECISION (TREE_TYPE (op)))
4188 && TYPE_UNSIGNED (TREE_TYPE (op))))
4189 {
73fbfcad 4190 if (dump_enabled_p ())
78c60e3d
SS
4191 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4192 "type conversion to/from bit-precision "
e645e942 4193 "unsupported.\n");
7b7b1813
RG
4194 return false;
4195 }
4196
ebfd146a
IR
4197 if (!vec_stmt) /* transformation not required. */
4198 {
4199 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
73fbfcad 4200 if (dump_enabled_p ())
78c60e3d 4201 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4202 "=== vectorizable_assignment ===\n");
c3e7ee41 4203 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
ebfd146a
IR
4204 return true;
4205 }
4206
4207 /** Transform. **/
73fbfcad 4208 if (dump_enabled_p ())
e645e942 4209 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
ebfd146a
IR
4210
4211 /* Handle def. */
4212 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4213
4214 /* Handle use. */
f18b55bd 4215 for (j = 0; j < ncopies; j++)
ebfd146a 4216 {
f18b55bd
IR
4217 /* Handle uses. */
4218 if (j == 0)
d092494c 4219 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
f18b55bd
IR
4220 else
4221 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4222
4223 /* Arguments are ready. create the new vector stmt. */
9771b263 4224 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
f18b55bd 4225 {
7b7ec6c5
RG
4226 if (CONVERT_EXPR_CODE_P (code)
4227 || code == VIEW_CONVERT_EXPR)
4a73490d 4228 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
f18b55bd
IR
4229 new_stmt = gimple_build_assign (vec_dest, vop);
4230 new_temp = make_ssa_name (vec_dest, new_stmt);
4231 gimple_assign_set_lhs (new_stmt, new_temp);
4232 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4233 if (slp_node)
9771b263 4234 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
f18b55bd 4235 }
ebfd146a
IR
4236
4237 if (slp_node)
f18b55bd
IR
4238 continue;
4239
4240 if (j == 0)
4241 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4242 else
4243 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4244
4245 prev_stmt_info = vinfo_for_stmt (new_stmt);
4246 }
b8698a0f 4247
9771b263 4248 vec_oprnds.release ();
ebfd146a
IR
4249 return true;
4250}
4251
9dc3f7de 4252
1107f3ae
IR
4253/* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4254 either as shift by a scalar or by a vector. */
4255
4256bool
4257vect_supportable_shift (enum tree_code code, tree scalar_type)
4258{
4259
ef4bddc2 4260 machine_mode vec_mode;
1107f3ae
IR
4261 optab optab;
4262 int icode;
4263 tree vectype;
4264
4265 vectype = get_vectype_for_scalar_type (scalar_type);
4266 if (!vectype)
4267 return false;
4268
4269 optab = optab_for_tree_code (code, vectype, optab_scalar);
4270 if (!optab
4271 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4272 {
4273 optab = optab_for_tree_code (code, vectype, optab_vector);
4274 if (!optab
4275 || (optab_handler (optab, TYPE_MODE (vectype))
4276 == CODE_FOR_nothing))
4277 return false;
4278 }
4279
4280 vec_mode = TYPE_MODE (vectype);
4281 icode = (int) optab_handler (optab, vec_mode);
4282 if (icode == CODE_FOR_nothing)
4283 return false;
4284
4285 return true;
4286}
4287
4288
9dc3f7de
IR
4289/* Function vectorizable_shift.
4290
4291 Check if STMT performs a shift operation that can be vectorized.
4292 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4293 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4294 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4295
4296static bool
355fe088
TS
4297vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4298 gimple **vec_stmt, slp_tree slp_node)
9dc3f7de
IR
4299{
4300 tree vec_dest;
4301 tree scalar_dest;
4302 tree op0, op1 = NULL;
4303 tree vec_oprnd1 = NULL_TREE;
4304 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4305 tree vectype;
4306 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4307 enum tree_code code;
ef4bddc2 4308 machine_mode vec_mode;
9dc3f7de
IR
4309 tree new_temp;
4310 optab optab;
4311 int icode;
ef4bddc2 4312 machine_mode optab_op2_mode;
355fe088 4313 gimple *def_stmt;
9dc3f7de 4314 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
355fe088 4315 gimple *new_stmt = NULL;
9dc3f7de
IR
4316 stmt_vec_info prev_stmt_info;
4317 int nunits_in;
4318 int nunits_out;
4319 tree vectype_out;
cede2577 4320 tree op1_vectype;
9dc3f7de
IR
4321 int ncopies;
4322 int j, i;
6e1aa848
DN
4323 vec<tree> vec_oprnds0 = vNULL;
4324 vec<tree> vec_oprnds1 = vNULL;
9dc3f7de
IR
4325 tree vop0, vop1;
4326 unsigned int k;
49eab32e 4327 bool scalar_shift_arg = true;
9dc3f7de 4328 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4329 vec_info *vinfo = stmt_info->vinfo;
9dc3f7de
IR
4330 int vf;
4331
4332 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4333 return false;
4334
4335 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4336 return false;
4337
4338 /* Is STMT a vectorizable binary/unary operation? */
4339 if (!is_gimple_assign (stmt))
4340 return false;
4341
4342 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4343 return false;
4344
4345 code = gimple_assign_rhs_code (stmt);
4346
4347 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4348 || code == RROTATE_EXPR))
4349 return false;
4350
4351 scalar_dest = gimple_assign_lhs (stmt);
4352 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
7b7b1813
RG
4353 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4354 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4355 {
73fbfcad 4356 if (dump_enabled_p ())
78c60e3d 4357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4358 "bit-precision shifts not supported.\n");
7b7b1813
RG
4359 return false;
4360 }
9dc3f7de
IR
4361
4362 op0 = gimple_assign_rhs1 (stmt);
81c40241 4363 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
9dc3f7de 4364 {
73fbfcad 4365 if (dump_enabled_p ())
78c60e3d 4366 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4367 "use not simple.\n");
9dc3f7de
IR
4368 return false;
4369 }
4370 /* If op0 is an external or constant def use a vector type with
4371 the same size as the output vector type. */
4372 if (!vectype)
4373 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4374 if (vec_stmt)
4375 gcc_assert (vectype);
4376 if (!vectype)
4377 {
73fbfcad 4378 if (dump_enabled_p ())
78c60e3d 4379 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4380 "no vectype for scalar type\n");
9dc3f7de
IR
4381 return false;
4382 }
4383
4384 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4385 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4386 if (nunits_out != nunits_in)
4387 return false;
4388
4389 op1 = gimple_assign_rhs2 (stmt);
81c40241 4390 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
9dc3f7de 4391 {
73fbfcad 4392 if (dump_enabled_p ())
78c60e3d 4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4394 "use not simple.\n");
9dc3f7de
IR
4395 return false;
4396 }
4397
4398 if (loop_vinfo)
4399 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4400 else
4401 vf = 1;
4402
4403 /* Multiple types in SLP are handled by creating the appropriate number of
4404 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4405 case of SLP. */
437f4a00 4406 if (slp_node || PURE_SLP_STMT (stmt_info))
9dc3f7de
IR
4407 ncopies = 1;
4408 else
4409 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4410
4411 gcc_assert (ncopies >= 1);
4412
4413 /* Determine whether the shift amount is a vector, or scalar. If the
4414 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4415
dbfa87aa
YR
4416 if ((dt[1] == vect_internal_def
4417 || dt[1] == vect_induction_def)
4418 && !slp_node)
49eab32e
JJ
4419 scalar_shift_arg = false;
4420 else if (dt[1] == vect_constant_def
4421 || dt[1] == vect_external_def
4422 || dt[1] == vect_internal_def)
4423 {
4424 /* In SLP, need to check whether the shift count is the same,
4425 in loops if it is a constant or invariant, it is always
4426 a scalar shift. */
4427 if (slp_node)
4428 {
355fe088
TS
4429 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4430 gimple *slpstmt;
49eab32e 4431
9771b263 4432 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
49eab32e
JJ
4433 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4434 scalar_shift_arg = false;
4435 }
4436 }
4437 else
4438 {
73fbfcad 4439 if (dump_enabled_p ())
78c60e3d 4440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4441 "operand mode requires invariant argument.\n");
49eab32e
JJ
4442 return false;
4443 }
4444
9dc3f7de 4445 /* Vector shifted by vector. */
49eab32e 4446 if (!scalar_shift_arg)
9dc3f7de
IR
4447 {
4448 optab = optab_for_tree_code (code, vectype, optab_vector);
73fbfcad 4449 if (dump_enabled_p ())
78c60e3d 4450 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4451 "vector/vector shift/rotate found.\n");
78c60e3d 4452
aa948027
JJ
4453 if (!op1_vectype)
4454 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4455 if (op1_vectype == NULL_TREE
4456 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
cede2577 4457 {
73fbfcad 4458 if (dump_enabled_p ())
78c60e3d
SS
4459 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4460 "unusable type for last operand in"
e645e942 4461 " vector/vector shift/rotate.\n");
cede2577
JJ
4462 return false;
4463 }
9dc3f7de
IR
4464 }
4465 /* See if the machine has a vector shifted by scalar insn and if not
4466 then see if it has a vector shifted by vector insn. */
49eab32e 4467 else
9dc3f7de
IR
4468 {
4469 optab = optab_for_tree_code (code, vectype, optab_scalar);
4470 if (optab
4471 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4472 {
73fbfcad 4473 if (dump_enabled_p ())
78c60e3d 4474 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4475 "vector/scalar shift/rotate found.\n");
9dc3f7de
IR
4476 }
4477 else
4478 {
4479 optab = optab_for_tree_code (code, vectype, optab_vector);
4480 if (optab
4481 && (optab_handler (optab, TYPE_MODE (vectype))
4482 != CODE_FOR_nothing))
4483 {
49eab32e
JJ
4484 scalar_shift_arg = false;
4485
73fbfcad 4486 if (dump_enabled_p ())
78c60e3d 4487 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4488 "vector/vector shift/rotate found.\n");
9dc3f7de
IR
4489
4490 /* Unlike the other binary operators, shifts/rotates have
4491 the rhs being int, instead of the same type as the lhs,
4492 so make sure the scalar is the right type if we are
aa948027 4493 dealing with vectors of long long/long/short/char. */
9dc3f7de
IR
4494 if (dt[1] == vect_constant_def)
4495 op1 = fold_convert (TREE_TYPE (vectype), op1);
aa948027
JJ
4496 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4497 TREE_TYPE (op1)))
4498 {
4499 if (slp_node
4500 && TYPE_MODE (TREE_TYPE (vectype))
4501 != TYPE_MODE (TREE_TYPE (op1)))
4502 {
73fbfcad 4503 if (dump_enabled_p ())
78c60e3d
SS
4504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4505 "unusable type for last operand in"
e645e942 4506 " vector/vector shift/rotate.\n");
21c0a521 4507 return false;
aa948027
JJ
4508 }
4509 if (vec_stmt && !slp_node)
4510 {
4511 op1 = fold_convert (TREE_TYPE (vectype), op1);
4512 op1 = vect_init_vector (stmt, op1,
4513 TREE_TYPE (vectype), NULL);
4514 }
4515 }
9dc3f7de
IR
4516 }
4517 }
4518 }
9dc3f7de
IR
4519
4520 /* Supportable by target? */
4521 if (!optab)
4522 {
73fbfcad 4523 if (dump_enabled_p ())
78c60e3d 4524 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4525 "no optab.\n");
9dc3f7de
IR
4526 return false;
4527 }
4528 vec_mode = TYPE_MODE (vectype);
4529 icode = (int) optab_handler (optab, vec_mode);
4530 if (icode == CODE_FOR_nothing)
4531 {
73fbfcad 4532 if (dump_enabled_p ())
78c60e3d 4533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4534 "op not supported by target.\n");
9dc3f7de
IR
4535 /* Check only during analysis. */
4536 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4537 || (vf < vect_min_worthwhile_factor (code)
4538 && !vec_stmt))
4539 return false;
73fbfcad 4540 if (dump_enabled_p ())
e645e942
TJ
4541 dump_printf_loc (MSG_NOTE, vect_location,
4542 "proceeding using word mode.\n");
9dc3f7de
IR
4543 }
4544
4545 /* Worthwhile without SIMD support? Check only during analysis. */
4546 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4547 && vf < vect_min_worthwhile_factor (code)
4548 && !vec_stmt)
4549 {
73fbfcad 4550 if (dump_enabled_p ())
78c60e3d 4551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4552 "not worthwhile without SIMD support.\n");
9dc3f7de
IR
4553 return false;
4554 }
4555
4556 if (!vec_stmt) /* transformation not required. */
4557 {
4558 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
73fbfcad 4559 if (dump_enabled_p ())
e645e942
TJ
4560 dump_printf_loc (MSG_NOTE, vect_location,
4561 "=== vectorizable_shift ===\n");
c3e7ee41 4562 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
9dc3f7de
IR
4563 return true;
4564 }
4565
4566 /** Transform. **/
4567
73fbfcad 4568 if (dump_enabled_p ())
78c60e3d 4569 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4570 "transform binary/unary operation.\n");
9dc3f7de
IR
4571
4572 /* Handle def. */
4573 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4574
9dc3f7de
IR
4575 prev_stmt_info = NULL;
4576 for (j = 0; j < ncopies; j++)
4577 {
4578 /* Handle uses. */
4579 if (j == 0)
4580 {
4581 if (scalar_shift_arg)
4582 {
4583 /* Vector shl and shr insn patterns can be defined with scalar
4584 operand 2 (shift operand). In this case, use constant or loop
4585 invariant op1 directly, without extending it to vector mode
4586 first. */
4587 optab_op2_mode = insn_data[icode].operand[2].mode;
4588 if (!VECTOR_MODE_P (optab_op2_mode))
4589 {
73fbfcad 4590 if (dump_enabled_p ())
78c60e3d 4591 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4592 "operand 1 using scalar mode.\n");
9dc3f7de 4593 vec_oprnd1 = op1;
8930f723 4594 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
9771b263 4595 vec_oprnds1.quick_push (vec_oprnd1);
9dc3f7de
IR
4596 if (slp_node)
4597 {
4598 /* Store vec_oprnd1 for every vector stmt to be created
4599 for SLP_NODE. We check during the analysis that all
4600 the shift arguments are the same.
4601 TODO: Allow different constants for different vector
4602 stmts generated for an SLP instance. */
4603 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
9771b263 4604 vec_oprnds1.quick_push (vec_oprnd1);
9dc3f7de
IR
4605 }
4606 }
4607 }
4608
4609 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4610 (a special case for certain kind of vector shifts); otherwise,
4611 operand 1 should be of a vector type (the usual case). */
4612 if (vec_oprnd1)
4613 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
d092494c 4614 slp_node, -1);
9dc3f7de
IR
4615 else
4616 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
d092494c 4617 slp_node, -1);
9dc3f7de
IR
4618 }
4619 else
4620 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4621
4622 /* Arguments are ready. Create the new vector stmt. */
9771b263 4623 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
9dc3f7de 4624 {
9771b263 4625 vop1 = vec_oprnds1[i];
0d0e4a03 4626 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
9dc3f7de
IR
4627 new_temp = make_ssa_name (vec_dest, new_stmt);
4628 gimple_assign_set_lhs (new_stmt, new_temp);
4629 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4630 if (slp_node)
9771b263 4631 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9dc3f7de
IR
4632 }
4633
4634 if (slp_node)
4635 continue;
4636
4637 if (j == 0)
4638 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4639 else
4640 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4641 prev_stmt_info = vinfo_for_stmt (new_stmt);
4642 }
4643
9771b263
DN
4644 vec_oprnds0.release ();
4645 vec_oprnds1.release ();
9dc3f7de
IR
4646
4647 return true;
4648}
4649
4650
ebfd146a
IR
4651/* Function vectorizable_operation.
4652
16949072
RG
4653 Check if STMT performs a binary, unary or ternary operation that can
4654 be vectorized.
b8698a0f 4655 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
4656 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4657 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4658
4659static bool
355fe088
TS
4660vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4661 gimple **vec_stmt, slp_tree slp_node)
ebfd146a 4662{
00f07b86 4663 tree vec_dest;
ebfd146a 4664 tree scalar_dest;
16949072 4665 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
ebfd146a 4666 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
00f07b86 4667 tree vectype;
ebfd146a
IR
4668 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4669 enum tree_code code;
ef4bddc2 4670 machine_mode vec_mode;
ebfd146a
IR
4671 tree new_temp;
4672 int op_type;
00f07b86 4673 optab optab;
523ba738 4674 bool target_support_p;
355fe088 4675 gimple *def_stmt;
16949072
RG
4676 enum vect_def_type dt[3]
4677 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
355fe088 4678 gimple *new_stmt = NULL;
ebfd146a 4679 stmt_vec_info prev_stmt_info;
b690cc0f 4680 int nunits_in;
ebfd146a
IR
4681 int nunits_out;
4682 tree vectype_out;
4683 int ncopies;
4684 int j, i;
6e1aa848
DN
4685 vec<tree> vec_oprnds0 = vNULL;
4686 vec<tree> vec_oprnds1 = vNULL;
4687 vec<tree> vec_oprnds2 = vNULL;
16949072 4688 tree vop0, vop1, vop2;
a70d6342 4689 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4690 vec_info *vinfo = stmt_info->vinfo;
a70d6342
IR
4691 int vf;
4692
a70d6342 4693 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
4694 return false;
4695
8644a673 4696 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
4697 return false;
4698
4699 /* Is STMT a vectorizable binary/unary operation? */
4700 if (!is_gimple_assign (stmt))
4701 return false;
4702
4703 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4704 return false;
4705
ebfd146a
IR
4706 code = gimple_assign_rhs_code (stmt);
4707
4708 /* For pointer addition, we should use the normal plus for
4709 the vector addition. */
4710 if (code == POINTER_PLUS_EXPR)
4711 code = PLUS_EXPR;
4712
4713 /* Support only unary or binary operations. */
4714 op_type = TREE_CODE_LENGTH (code);
16949072 4715 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
ebfd146a 4716 {
73fbfcad 4717 if (dump_enabled_p ())
78c60e3d 4718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4719 "num. args = %d (not unary/binary/ternary op).\n",
78c60e3d 4720 op_type);
ebfd146a
IR
4721 return false;
4722 }
4723
b690cc0f
RG
4724 scalar_dest = gimple_assign_lhs (stmt);
4725 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4726
7b7b1813
RG
4727 /* Most operations cannot handle bit-precision types without extra
4728 truncations. */
045c1278
IE
4729 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4730 && (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4731 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
7b7b1813
RG
4732 /* Exception are bitwise binary operations. */
4733 && code != BIT_IOR_EXPR
4734 && code != BIT_XOR_EXPR
4735 && code != BIT_AND_EXPR)
4736 {
73fbfcad 4737 if (dump_enabled_p ())
78c60e3d 4738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4739 "bit-precision arithmetic not supported.\n");
7b7b1813
RG
4740 return false;
4741 }
4742
ebfd146a 4743 op0 = gimple_assign_rhs1 (stmt);
81c40241 4744 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
ebfd146a 4745 {
73fbfcad 4746 if (dump_enabled_p ())
78c60e3d 4747 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4748 "use not simple.\n");
ebfd146a
IR
4749 return false;
4750 }
b690cc0f
RG
4751 /* If op0 is an external or constant def use a vector type with
4752 the same size as the output vector type. */
4753 if (!vectype)
b036c6c5
IE
4754 {
4755 /* For boolean type we cannot determine vectype by
4756 invariant value (don't know whether it is a vector
4757 of booleans or vector of integers). We use output
4758 vectype because operations on boolean don't change
4759 type. */
4760 if (TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE)
4761 {
4762 if (TREE_CODE (TREE_TYPE (scalar_dest)) != BOOLEAN_TYPE)
4763 {
4764 if (dump_enabled_p ())
4765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4766 "not supported operation on bool value.\n");
4767 return false;
4768 }
4769 vectype = vectype_out;
4770 }
4771 else
4772 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4773 }
7d8930a0
IR
4774 if (vec_stmt)
4775 gcc_assert (vectype);
4776 if (!vectype)
4777 {
73fbfcad 4778 if (dump_enabled_p ())
7d8930a0 4779 {
78c60e3d
SS
4780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4781 "no vectype for scalar type ");
4782 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4783 TREE_TYPE (op0));
e645e942 4784 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7d8930a0
IR
4785 }
4786
4787 return false;
4788 }
b690cc0f
RG
4789
4790 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4791 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4792 if (nunits_out != nunits_in)
4793 return false;
ebfd146a 4794
16949072 4795 if (op_type == binary_op || op_type == ternary_op)
ebfd146a
IR
4796 {
4797 op1 = gimple_assign_rhs2 (stmt);
81c40241 4798 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
ebfd146a 4799 {
73fbfcad 4800 if (dump_enabled_p ())
78c60e3d 4801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4802 "use not simple.\n");
ebfd146a
IR
4803 return false;
4804 }
4805 }
16949072
RG
4806 if (op_type == ternary_op)
4807 {
4808 op2 = gimple_assign_rhs3 (stmt);
81c40241 4809 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
16949072 4810 {
73fbfcad 4811 if (dump_enabled_p ())
78c60e3d 4812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4813 "use not simple.\n");
16949072
RG
4814 return false;
4815 }
4816 }
ebfd146a 4817
b690cc0f
RG
4818 if (loop_vinfo)
4819 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4820 else
4821 vf = 1;
4822
4823 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 4824 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
b690cc0f 4825 case of SLP. */
437f4a00 4826 if (slp_node || PURE_SLP_STMT (stmt_info))
b690cc0f
RG
4827 ncopies = 1;
4828 else
4829 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4830
4831 gcc_assert (ncopies >= 1);
4832
9dc3f7de 4833 /* Shifts are handled in vectorizable_shift (). */
ebfd146a
IR
4834 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4835 || code == RROTATE_EXPR)
9dc3f7de 4836 return false;
ebfd146a 4837
ebfd146a 4838 /* Supportable by target? */
00f07b86
RH
4839
4840 vec_mode = TYPE_MODE (vectype);
4841 if (code == MULT_HIGHPART_EXPR)
523ba738 4842 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
00f07b86
RH
4843 else
4844 {
4845 optab = optab_for_tree_code (code, vectype, optab_default);
4846 if (!optab)
5deb57cb 4847 {
73fbfcad 4848 if (dump_enabled_p ())
78c60e3d 4849 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4850 "no optab.\n");
00f07b86 4851 return false;
5deb57cb 4852 }
523ba738
RS
4853 target_support_p = (optab_handler (optab, vec_mode)
4854 != CODE_FOR_nothing);
5deb57cb
JJ
4855 }
4856
523ba738 4857 if (!target_support_p)
ebfd146a 4858 {
73fbfcad 4859 if (dump_enabled_p ())
78c60e3d 4860 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4861 "op not supported by target.\n");
ebfd146a
IR
4862 /* Check only during analysis. */
4863 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5deb57cb 4864 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
ebfd146a 4865 return false;
73fbfcad 4866 if (dump_enabled_p ())
e645e942
TJ
4867 dump_printf_loc (MSG_NOTE, vect_location,
4868 "proceeding using word mode.\n");
383d9c83
IR
4869 }
4870
4a00c761 4871 /* Worthwhile without SIMD support? Check only during analysis. */
5deb57cb
JJ
4872 if (!VECTOR_MODE_P (vec_mode)
4873 && !vec_stmt
4874 && vf < vect_min_worthwhile_factor (code))
7d8930a0 4875 {
73fbfcad 4876 if (dump_enabled_p ())
78c60e3d 4877 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4878 "not worthwhile without SIMD support.\n");
e34842c6 4879 return false;
7d8930a0 4880 }
ebfd146a 4881
ebfd146a
IR
4882 if (!vec_stmt) /* transformation not required. */
4883 {
4a00c761 4884 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
73fbfcad 4885 if (dump_enabled_p ())
78c60e3d 4886 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4887 "=== vectorizable_operation ===\n");
c3e7ee41 4888 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
ebfd146a
IR
4889 return true;
4890 }
4891
4892 /** Transform. **/
4893
73fbfcad 4894 if (dump_enabled_p ())
78c60e3d 4895 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4896 "transform binary/unary operation.\n");
383d9c83 4897
ebfd146a 4898 /* Handle def. */
00f07b86 4899 vec_dest = vect_create_destination_var (scalar_dest, vectype);
b8698a0f 4900
ebfd146a
IR
4901 /* In case the vectorization factor (VF) is bigger than the number
4902 of elements that we can fit in a vectype (nunits), we have to generate
4903 more than one vector stmt - i.e - we need to "unroll" the
4a00c761
JJ
4904 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4905 from one copy of the vector stmt to the next, in the field
4906 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4907 stages to find the correct vector defs to be used when vectorizing
4908 stmts that use the defs of the current stmt. The example below
4909 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4910 we need to create 4 vectorized stmts):
4911
4912 before vectorization:
4913 RELATED_STMT VEC_STMT
4914 S1: x = memref - -
4915 S2: z = x + 1 - -
4916
4917 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4918 there):
4919 RELATED_STMT VEC_STMT
4920 VS1_0: vx0 = memref0 VS1_1 -
4921 VS1_1: vx1 = memref1 VS1_2 -
4922 VS1_2: vx2 = memref2 VS1_3 -
4923 VS1_3: vx3 = memref3 - -
4924 S1: x = load - VS1_0
4925 S2: z = x + 1 - -
4926
4927 step2: vectorize stmt S2 (done here):
4928 To vectorize stmt S2 we first need to find the relevant vector
4929 def for the first operand 'x'. This is, as usual, obtained from
4930 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4931 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4932 relevant vector def 'vx0'. Having found 'vx0' we can generate
4933 the vector stmt VS2_0, and as usual, record it in the
4934 STMT_VINFO_VEC_STMT of stmt S2.
4935 When creating the second copy (VS2_1), we obtain the relevant vector
4936 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4937 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4938 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4939 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4940 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4941 chain of stmts and pointers:
4942 RELATED_STMT VEC_STMT
4943 VS1_0: vx0 = memref0 VS1_1 -
4944 VS1_1: vx1 = memref1 VS1_2 -
4945 VS1_2: vx2 = memref2 VS1_3 -
4946 VS1_3: vx3 = memref3 - -
4947 S1: x = load - VS1_0
4948 VS2_0: vz0 = vx0 + v1 VS2_1 -
4949 VS2_1: vz1 = vx1 + v1 VS2_2 -
4950 VS2_2: vz2 = vx2 + v1 VS2_3 -
4951 VS2_3: vz3 = vx3 + v1 - -
4952 S2: z = x + 1 - VS2_0 */
ebfd146a
IR
4953
4954 prev_stmt_info = NULL;
4955 for (j = 0; j < ncopies; j++)
4956 {
4957 /* Handle uses. */
4958 if (j == 0)
4a00c761
JJ
4959 {
4960 if (op_type == binary_op || op_type == ternary_op)
4961 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4962 slp_node, -1);
4963 else
4964 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4965 slp_node, -1);
4966 if (op_type == ternary_op)
36ba4aae 4967 {
9771b263
DN
4968 vec_oprnds2.create (1);
4969 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
81c40241 4970 stmt));
36ba4aae 4971 }
4a00c761 4972 }
ebfd146a 4973 else
4a00c761
JJ
4974 {
4975 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4976 if (op_type == ternary_op)
4977 {
9771b263
DN
4978 tree vec_oprnd = vec_oprnds2.pop ();
4979 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4980 vec_oprnd));
4a00c761
JJ
4981 }
4982 }
4983
4984 /* Arguments are ready. Create the new vector stmt. */
9771b263 4985 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
ebfd146a 4986 {
4a00c761 4987 vop1 = ((op_type == binary_op || op_type == ternary_op)
9771b263 4988 ? vec_oprnds1[i] : NULL_TREE);
4a00c761 4989 vop2 = ((op_type == ternary_op)
9771b263 4990 ? vec_oprnds2[i] : NULL_TREE);
0d0e4a03 4991 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4a00c761
JJ
4992 new_temp = make_ssa_name (vec_dest, new_stmt);
4993 gimple_assign_set_lhs (new_stmt, new_temp);
4994 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4995 if (slp_node)
9771b263 4996 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
ebfd146a
IR
4997 }
4998
4a00c761
JJ
4999 if (slp_node)
5000 continue;
5001
5002 if (j == 0)
5003 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5004 else
5005 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5006 prev_stmt_info = vinfo_for_stmt (new_stmt);
ebfd146a
IR
5007 }
5008
9771b263
DN
5009 vec_oprnds0.release ();
5010 vec_oprnds1.release ();
5011 vec_oprnds2.release ();
ebfd146a 5012
ebfd146a
IR
5013 return true;
5014}
5015
c716e67f
XDL
5016/* A helper function to ensure data reference DR's base alignment
5017 for STMT_INFO. */
5018
5019static void
5020ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
5021{
5022 if (!dr->aux)
5023 return;
5024
52639a61 5025 if (DR_VECT_AUX (dr)->base_misaligned)
c716e67f
XDL
5026 {
5027 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
52639a61 5028 tree base_decl = DR_VECT_AUX (dr)->base_decl;
c716e67f 5029
428f0c67
JH
5030 if (decl_in_symtab_p (base_decl))
5031 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
5032 else
5033 {
5034 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
5035 DECL_USER_ALIGN (base_decl) = 1;
5036 }
52639a61 5037 DR_VECT_AUX (dr)->base_misaligned = false;
c716e67f
XDL
5038 }
5039}
5040
ebfd146a 5041
09dfa495
BM
5042/* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5043 reversal of the vector elements. If that is impossible to do,
5044 returns NULL. */
5045
5046static tree
5047perm_mask_for_reverse (tree vectype)
5048{
5049 int i, nunits;
5050 unsigned char *sel;
5051
5052 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5053 sel = XALLOCAVEC (unsigned char, nunits);
5054
5055 for (i = 0; i < nunits; ++i)
5056 sel[i] = nunits - 1 - i;
5057
557be5a8
AL
5058 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5059 return NULL_TREE;
5060 return vect_gen_perm_mask_checked (vectype, sel);
09dfa495
BM
5061}
5062
ebfd146a
IR
5063/* Function vectorizable_store.
5064
b8698a0f
L
5065 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5066 can be vectorized.
5067 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
5068 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5069 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5070
5071static bool
355fe088 5072vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
c716e67f 5073 slp_tree slp_node)
ebfd146a
IR
5074{
5075 tree scalar_dest;
5076 tree data_ref;
5077 tree op;
5078 tree vec_oprnd = NULL_TREE;
5079 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5080 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
272c6793 5081 tree elem_type;
ebfd146a 5082 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 5083 struct loop *loop = NULL;
ef4bddc2 5084 machine_mode vec_mode;
ebfd146a
IR
5085 tree dummy;
5086 enum dr_alignment_support alignment_support_scheme;
355fe088 5087 gimple *def_stmt;
ebfd146a
IR
5088 enum vect_def_type dt;
5089 stmt_vec_info prev_stmt_info = NULL;
5090 tree dataref_ptr = NULL_TREE;
74bf76ed 5091 tree dataref_offset = NULL_TREE;
355fe088 5092 gimple *ptr_incr = NULL;
ebfd146a
IR
5093 int ncopies;
5094 int j;
355fe088 5095 gimple *next_stmt, *first_stmt = NULL;
0d0293ac 5096 bool grouped_store = false;
272c6793 5097 bool store_lanes_p = false;
ebfd146a 5098 unsigned int group_size, i;
6e1aa848
DN
5099 vec<tree> dr_chain = vNULL;
5100 vec<tree> oprnds = vNULL;
5101 vec<tree> result_chain = vNULL;
ebfd146a 5102 bool inv_p;
09dfa495
BM
5103 bool negative = false;
5104 tree offset = NULL_TREE;
6e1aa848 5105 vec<tree> vec_oprnds = vNULL;
ebfd146a 5106 bool slp = (slp_node != NULL);
ebfd146a 5107 unsigned int vec_num;
a70d6342 5108 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 5109 vec_info *vinfo = stmt_info->vinfo;
272c6793 5110 tree aggr_type;
3bab6342
AT
5111 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5112 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5113 int scatter_scale = 1;
5114 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5115 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
355fe088 5116 gimple *new_stmt;
a70d6342 5117
a70d6342 5118 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
5119 return false;
5120
8644a673 5121 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
5122 return false;
5123
5124 /* Is vectorizable store? */
5125
5126 if (!is_gimple_assign (stmt))
5127 return false;
5128
5129 scalar_dest = gimple_assign_lhs (stmt);
ab0ef706
JJ
5130 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5131 && is_pattern_stmt_p (stmt_info))
5132 scalar_dest = TREE_OPERAND (scalar_dest, 0);
ebfd146a 5133 if (TREE_CODE (scalar_dest) != ARRAY_REF
38000232 5134 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
ebfd146a 5135 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
5136 && TREE_CODE (scalar_dest) != COMPONENT_REF
5137 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
70f34814
RG
5138 && TREE_CODE (scalar_dest) != REALPART_EXPR
5139 && TREE_CODE (scalar_dest) != MEM_REF)
ebfd146a
IR
5140 return false;
5141
5142 gcc_assert (gimple_assign_single_p (stmt));
465c8c19
JJ
5143
5144 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5145 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5146
5147 if (loop_vinfo)
5148 loop = LOOP_VINFO_LOOP (loop_vinfo);
5149
5150 /* Multiple types in SLP are handled by creating the appropriate number of
5151 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5152 case of SLP. */
5153 if (slp || PURE_SLP_STMT (stmt_info))
5154 ncopies = 1;
5155 else
5156 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5157
5158 gcc_assert (ncopies >= 1);
5159
5160 /* FORNOW. This restriction should be relaxed. */
5161 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5162 {
5163 if (dump_enabled_p ())
5164 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5165 "multiple types in nested loop.\n");
5166 return false;
5167 }
5168
ebfd146a 5169 op = gimple_assign_rhs1 (stmt);
81c40241 5170 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
ebfd146a 5171 {
73fbfcad 5172 if (dump_enabled_p ())
78c60e3d 5173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5174 "use not simple.\n");
ebfd146a
IR
5175 return false;
5176 }
5177
272c6793 5178 elem_type = TREE_TYPE (vectype);
ebfd146a 5179 vec_mode = TYPE_MODE (vectype);
7b7b1813 5180
ebfd146a
IR
5181 /* FORNOW. In some cases can vectorize even if data-type not supported
5182 (e.g. - array initialization with 0). */
947131ba 5183 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
ebfd146a
IR
5184 return false;
5185
5186 if (!STMT_VINFO_DATA_REF (stmt_info))
5187 return false;
5188
f2e2a985 5189 if (!STMT_VINFO_STRIDED_P (stmt_info))
09dfa495 5190 {
f2e2a985
MM
5191 negative =
5192 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5193 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5194 size_zero_node) < 0;
5195 if (negative && ncopies > 1)
09dfa495
BM
5196 {
5197 if (dump_enabled_p ())
5198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
f2e2a985 5199 "multiple types with negative step.\n");
09dfa495
BM
5200 return false;
5201 }
f2e2a985 5202 if (negative)
09dfa495 5203 {
f2e2a985
MM
5204 gcc_assert (!grouped_store);
5205 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5206 if (alignment_support_scheme != dr_aligned
5207 && alignment_support_scheme != dr_unaligned_supported)
5208 {
5209 if (dump_enabled_p ())
5210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5211 "negative step but alignment required.\n");
5212 return false;
5213 }
5214 if (dt != vect_constant_def
5215 && dt != vect_external_def
5216 && !perm_mask_for_reverse (vectype))
5217 {
5218 if (dump_enabled_p ())
5219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5220 "negative step and reversing not supported.\n");
5221 return false;
5222 }
09dfa495
BM
5223 }
5224 }
5225
0d0293ac 5226 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
ebfd146a 5227 {
0d0293ac 5228 grouped_store = true;
e14c1050 5229 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
cee62fee
MM
5230 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5231 if (!slp
5232 && !PURE_SLP_STMT (stmt_info)
5233 && !STMT_VINFO_STRIDED_P (stmt_info))
b602d918 5234 {
272c6793
RS
5235 if (vect_store_lanes_supported (vectype, group_size))
5236 store_lanes_p = true;
0d0293ac 5237 else if (!vect_grouped_store_supported (vectype, group_size))
b602d918
RS
5238 return false;
5239 }
b8698a0f 5240
cee62fee
MM
5241 if (STMT_VINFO_STRIDED_P (stmt_info)
5242 && (slp || PURE_SLP_STMT (stmt_info))
5243 && (group_size > nunits
5244 || nunits % group_size != 0))
5245 {
5246 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5247 "unhandled strided group store\n");
5248 return false;
5249 }
5250
ebfd146a
IR
5251 if (first_stmt == stmt)
5252 {
5253 /* STMT is the leader of the group. Check the operands of all the
5254 stmts of the group. */
e14c1050 5255 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
ebfd146a
IR
5256 while (next_stmt)
5257 {
5258 gcc_assert (gimple_assign_single_p (next_stmt));
5259 op = gimple_assign_rhs1 (next_stmt);
81c40241 5260 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
ebfd146a 5261 {
73fbfcad 5262 if (dump_enabled_p ())
78c60e3d 5263 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5264 "use not simple.\n");
ebfd146a
IR
5265 return false;
5266 }
e14c1050 5267 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
5268 }
5269 }
5270 }
5271
3bab6342
AT
5272 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5273 {
355fe088 5274 gimple *def_stmt;
3bab6342
AT
5275 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5276 &scatter_off, &scatter_scale);
5277 gcc_assert (scatter_decl);
81c40241
RB
5278 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5279 &scatter_off_vectype))
3bab6342
AT
5280 {
5281 if (dump_enabled_p ())
5282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5283 "scatter index use not simple.");
5284 return false;
5285 }
5286 }
5287
ebfd146a
IR
5288 if (!vec_stmt) /* transformation not required. */
5289 {
5290 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
2e8ab70c
RB
5291 /* The SLP costs are calculated during SLP analysis. */
5292 if (!PURE_SLP_STMT (stmt_info))
5293 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5294 NULL, NULL, NULL);
ebfd146a
IR
5295 return true;
5296 }
5297
5298 /** Transform. **/
5299
c716e67f
XDL
5300 ensure_base_align (stmt_info, dr);
5301
3bab6342
AT
5302 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5303 {
5304 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5305 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5306 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5307 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5308 edge pe = loop_preheader_edge (loop);
5309 gimple_seq seq;
5310 basic_block new_bb;
5311 enum { NARROW, NONE, WIDEN } modifier;
5312 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5313
5314 if (nunits == (unsigned int) scatter_off_nunits)
5315 modifier = NONE;
5316 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5317 {
5318 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5319 modifier = WIDEN;
5320
5321 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5322 sel[i] = i | nunits;
5323
5324 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5325 gcc_assert (perm_mask != NULL_TREE);
5326 }
5327 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5328 {
5329 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5330 modifier = NARROW;
5331
5332 for (i = 0; i < (unsigned int) nunits; ++i)
5333 sel[i] = i | scatter_off_nunits;
5334
5335 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5336 gcc_assert (perm_mask != NULL_TREE);
5337 ncopies *= 2;
5338 }
5339 else
5340 gcc_unreachable ();
5341
5342 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5343 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5344 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5345 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5346 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5347 scaletype = TREE_VALUE (arglist);
5348
5349 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5350 && TREE_CODE (rettype) == VOID_TYPE);
5351
5352 ptr = fold_convert (ptrtype, scatter_base);
5353 if (!is_gimple_min_invariant (ptr))
5354 {
5355 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5356 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5357 gcc_assert (!new_bb);
5358 }
5359
5360 /* Currently we support only unconditional scatter stores,
5361 so mask should be all ones. */
5362 mask = build_int_cst (masktype, -1);
5363 mask = vect_init_vector (stmt, mask, masktype, NULL);
5364
5365 scale = build_int_cst (scaletype, scatter_scale);
5366
5367 prev_stmt_info = NULL;
5368 for (j = 0; j < ncopies; ++j)
5369 {
5370 if (j == 0)
5371 {
5372 src = vec_oprnd1
81c40241 5373 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
3bab6342 5374 op = vec_oprnd0
81c40241 5375 = vect_get_vec_def_for_operand (scatter_off, stmt);
3bab6342
AT
5376 }
5377 else if (modifier != NONE && (j & 1))
5378 {
5379 if (modifier == WIDEN)
5380 {
5381 src = vec_oprnd1
5382 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5383 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5384 stmt, gsi);
5385 }
5386 else if (modifier == NARROW)
5387 {
5388 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5389 stmt, gsi);
5390 op = vec_oprnd0
5391 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5392 }
5393 else
5394 gcc_unreachable ();
5395 }
5396 else
5397 {
5398 src = vec_oprnd1
5399 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5400 op = vec_oprnd0
5401 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5402 }
5403
5404 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5405 {
5406 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5407 == TYPE_VECTOR_SUBPARTS (srctype));
0e22bb5a 5408 var = vect_get_new_ssa_name (srctype, vect_simple_var);
3bab6342
AT
5409 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5410 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5411 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5412 src = var;
5413 }
5414
5415 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5416 {
5417 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5418 == TYPE_VECTOR_SUBPARTS (idxtype));
0e22bb5a 5419 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
3bab6342
AT
5420 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5421 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5422 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5423 op = var;
5424 }
5425
5426 new_stmt
5427 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5428
5429 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5430
5431 if (prev_stmt_info == NULL)
5432 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5433 else
5434 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5435 prev_stmt_info = vinfo_for_stmt (new_stmt);
5436 }
5437 return true;
5438 }
5439
0d0293ac 5440 if (grouped_store)
ebfd146a
IR
5441 {
5442 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
e14c1050 5443 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
ebfd146a 5444
e14c1050 5445 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
ebfd146a
IR
5446
5447 /* FORNOW */
a70d6342 5448 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
5449
5450 /* We vectorize all the stmts of the interleaving group when we
5451 reach the last stmt in the group. */
e14c1050
IR
5452 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5453 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
ebfd146a
IR
5454 && !slp)
5455 {
5456 *vec_stmt = NULL;
5457 return true;
5458 }
5459
5460 if (slp)
4b5caab7 5461 {
0d0293ac 5462 grouped_store = false;
4b5caab7
IR
5463 /* VEC_NUM is the number of vect stmts to be created for this
5464 group. */
5465 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9771b263 5466 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
52eab378 5467 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
4b5caab7 5468 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
d092494c 5469 op = gimple_assign_rhs1 (first_stmt);
4b5caab7 5470 }
ebfd146a 5471 else
4b5caab7
IR
5472 /* VEC_NUM is the number of vect stmts to be created for this
5473 group. */
ebfd146a
IR
5474 vec_num = group_size;
5475 }
b8698a0f 5476 else
ebfd146a
IR
5477 {
5478 first_stmt = stmt;
5479 first_dr = dr;
5480 group_size = vec_num = 1;
ebfd146a 5481 }
b8698a0f 5482
73fbfcad 5483 if (dump_enabled_p ())
78c60e3d 5484 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5485 "transform store. ncopies = %d\n", ncopies);
ebfd146a 5486
f2e2a985
MM
5487 if (STMT_VINFO_STRIDED_P (stmt_info))
5488 {
5489 gimple_stmt_iterator incr_gsi;
5490 bool insert_after;
355fe088 5491 gimple *incr;
f2e2a985
MM
5492 tree offvar;
5493 tree ivstep;
5494 tree running_off;
5495 gimple_seq stmts = NULL;
5496 tree stride_base, stride_step, alias_off;
5497 tree vec_oprnd;
f502d50e 5498 unsigned int g;
f2e2a985
MM
5499
5500 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5501
5502 stride_base
5503 = fold_build_pointer_plus
f502d50e 5504 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
f2e2a985 5505 size_binop (PLUS_EXPR,
f502d50e
MM
5506 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5507 convert_to_ptrofftype (DR_INIT(first_dr))));
5508 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
f2e2a985
MM
5509
5510 /* For a store with loop-invariant (but other than power-of-2)
5511 stride (i.e. not a grouped access) like so:
5512
5513 for (i = 0; i < n; i += stride)
5514 array[i] = ...;
5515
5516 we generate a new induction variable and new stores from
5517 the components of the (vectorized) rhs:
5518
5519 for (j = 0; ; j += VF*stride)
5520 vectemp = ...;
5521 tmp1 = vectemp[0];
5522 array[j] = tmp1;
5523 tmp2 = vectemp[1];
5524 array[j + stride] = tmp2;
5525 ...
5526 */
5527
cee62fee
MM
5528 unsigned nstores = nunits;
5529 tree ltype = elem_type;
5530 if (slp)
5531 {
5532 nstores = nunits / group_size;
5533 if (group_size < nunits)
5534 ltype = build_vector_type (elem_type, group_size);
5535 else
5536 ltype = vectype;
5537 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5538 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
f502d50e 5539 group_size = 1;
cee62fee
MM
5540 }
5541
f2e2a985
MM
5542 ivstep = stride_step;
5543 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5544 build_int_cst (TREE_TYPE (ivstep),
cee62fee 5545 ncopies * nstores));
f2e2a985
MM
5546
5547 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5548
5549 create_iv (stride_base, ivstep, NULL,
5550 loop, &incr_gsi, insert_after,
5551 &offvar, NULL);
5552 incr = gsi_stmt (incr_gsi);
310213d4 5553 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
f2e2a985
MM
5554
5555 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5556 if (stmts)
5557 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5558
5559 prev_stmt_info = NULL;
f502d50e
MM
5560 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5561 next_stmt = first_stmt;
5562 for (g = 0; g < group_size; g++)
f2e2a985 5563 {
f502d50e
MM
5564 running_off = offvar;
5565 if (g)
f2e2a985 5566 {
f502d50e
MM
5567 tree size = TYPE_SIZE_UNIT (ltype);
5568 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
f2e2a985 5569 size);
f502d50e 5570 tree newoff = copy_ssa_name (running_off, NULL);
f2e2a985 5571 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
f502d50e 5572 running_off, pos);
f2e2a985 5573 vect_finish_stmt_generation (stmt, incr, gsi);
f2e2a985 5574 running_off = newoff;
f502d50e
MM
5575 }
5576 for (j = 0; j < ncopies; j++)
5577 {
5578 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5579 and first_stmt == stmt. */
5580 if (j == 0)
5581 {
5582 if (slp)
5583 {
5584 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5585 slp_node, -1);
5586 vec_oprnd = vec_oprnds[0];
5587 }
5588 else
5589 {
5590 gcc_assert (gimple_assign_single_p (next_stmt));
5591 op = gimple_assign_rhs1 (next_stmt);
81c40241 5592 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
f502d50e
MM
5593 }
5594 }
f2e2a985 5595 else
f502d50e
MM
5596 {
5597 if (slp)
5598 vec_oprnd = vec_oprnds[j];
5599 else
c079cbac 5600 {
81c40241 5601 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
c079cbac
RB
5602 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5603 }
f502d50e
MM
5604 }
5605
5606 for (i = 0; i < nstores; i++)
5607 {
5608 tree newref, newoff;
355fe088 5609 gimple *incr, *assign;
f502d50e
MM
5610 tree size = TYPE_SIZE (ltype);
5611 /* Extract the i'th component. */
5612 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5613 bitsize_int (i), size);
5614 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5615 size, pos);
5616
5617 elem = force_gimple_operand_gsi (gsi, elem, true,
5618 NULL_TREE, true,
5619 GSI_SAME_STMT);
5620
5621 newref = build2 (MEM_REF, ltype,
5622 running_off, alias_off);
5623
5624 /* And store it to *running_off. */
5625 assign = gimple_build_assign (newref, elem);
5626 vect_finish_stmt_generation (stmt, assign, gsi);
5627
5628 newoff = copy_ssa_name (running_off, NULL);
5629 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5630 running_off, stride_step);
5631 vect_finish_stmt_generation (stmt, incr, gsi);
5632
5633 running_off = newoff;
225ce44b
RB
5634 if (g == group_size - 1
5635 && !slp)
f502d50e
MM
5636 {
5637 if (j == 0 && i == 0)
225ce44b
RB
5638 STMT_VINFO_VEC_STMT (stmt_info)
5639 = *vec_stmt = assign;
f502d50e
MM
5640 else
5641 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5642 prev_stmt_info = vinfo_for_stmt (assign);
5643 }
5644 }
f2e2a985 5645 }
f502d50e 5646 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
f2e2a985
MM
5647 }
5648 return true;
5649 }
5650
9771b263
DN
5651 dr_chain.create (group_size);
5652 oprnds.create (group_size);
ebfd146a 5653
720f5239 5654 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 5655 gcc_assert (alignment_support_scheme);
272c6793
RS
5656 /* Targets with store-lane instructions must not require explicit
5657 realignment. */
5658 gcc_assert (!store_lanes_p
5659 || alignment_support_scheme == dr_aligned
5660 || alignment_support_scheme == dr_unaligned_supported);
5661
09dfa495
BM
5662 if (negative)
5663 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5664
272c6793
RS
5665 if (store_lanes_p)
5666 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5667 else
5668 aggr_type = vectype;
ebfd146a
IR
5669
5670 /* In case the vectorization factor (VF) is bigger than the number
5671 of elements that we can fit in a vectype (nunits), we have to generate
5672 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 5673 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
5674 vect_get_vec_def_for_copy_stmt. */
5675
0d0293ac 5676 /* In case of interleaving (non-unit grouped access):
ebfd146a
IR
5677
5678 S1: &base + 2 = x2
5679 S2: &base = x0
5680 S3: &base + 1 = x1
5681 S4: &base + 3 = x3
5682
5683 We create vectorized stores starting from base address (the access of the
5684 first stmt in the chain (S2 in the above example), when the last store stmt
5685 of the chain (S4) is reached:
5686
5687 VS1: &base = vx2
5688 VS2: &base + vec_size*1 = vx0
5689 VS3: &base + vec_size*2 = vx1
5690 VS4: &base + vec_size*3 = vx3
5691
5692 Then permutation statements are generated:
5693
3fcc1b55
JJ
5694 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5695 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
ebfd146a 5696 ...
b8698a0f 5697
ebfd146a
IR
5698 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5699 (the order of the data-refs in the output of vect_permute_store_chain
5700 corresponds to the order of scalar stmts in the interleaving chain - see
5701 the documentation of vect_permute_store_chain()).
5702
5703 In case of both multiple types and interleaving, above vector stores and
ff802fa1 5704 permutation stmts are created for every copy. The result vector stmts are
ebfd146a 5705 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 5706 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
5707 */
5708
5709 prev_stmt_info = NULL;
5710 for (j = 0; j < ncopies; j++)
5711 {
ebfd146a
IR
5712
5713 if (j == 0)
5714 {
5715 if (slp)
5716 {
5717 /* Get vectorized arguments for SLP_NODE. */
d092494c
IR
5718 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5719 NULL, slp_node, -1);
ebfd146a 5720
9771b263 5721 vec_oprnd = vec_oprnds[0];
ebfd146a
IR
5722 }
5723 else
5724 {
b8698a0f
L
5725 /* For interleaved stores we collect vectorized defs for all the
5726 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5727 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
5728 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5729
0d0293ac 5730 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
ebfd146a 5731 OPRNDS are of size 1. */
b8698a0f 5732 next_stmt = first_stmt;
ebfd146a
IR
5733 for (i = 0; i < group_size; i++)
5734 {
b8698a0f
L
5735 /* Since gaps are not supported for interleaved stores,
5736 GROUP_SIZE is the exact number of stmts in the chain.
5737 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5738 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
5739 iteration of the loop will be executed. */
5740 gcc_assert (next_stmt
5741 && gimple_assign_single_p (next_stmt));
5742 op = gimple_assign_rhs1 (next_stmt);
5743
81c40241 5744 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
9771b263
DN
5745 dr_chain.quick_push (vec_oprnd);
5746 oprnds.quick_push (vec_oprnd);
e14c1050 5747 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
5748 }
5749 }
5750
5751 /* We should have catched mismatched types earlier. */
5752 gcc_assert (useless_type_conversion_p (vectype,
5753 TREE_TYPE (vec_oprnd)));
74bf76ed
JJ
5754 bool simd_lane_access_p
5755 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5756 if (simd_lane_access_p
5757 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5758 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5759 && integer_zerop (DR_OFFSET (first_dr))
5760 && integer_zerop (DR_INIT (first_dr))
5761 && alias_sets_conflict_p (get_alias_set (aggr_type),
5762 get_alias_set (DR_REF (first_dr))))
5763 {
5764 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5765 dataref_offset = build_int_cst (reference_alias_ptr_type
5766 (DR_REF (first_dr)), 0);
8928eff3 5767 inv_p = false;
74bf76ed
JJ
5768 }
5769 else
5770 dataref_ptr
5771 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5772 simd_lane_access_p ? loop : NULL,
09dfa495 5773 offset, &dummy, gsi, &ptr_incr,
74bf76ed 5774 simd_lane_access_p, &inv_p);
a70d6342 5775 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 5776 }
b8698a0f 5777 else
ebfd146a 5778 {
b8698a0f
L
5779 /* For interleaved stores we created vectorized defs for all the
5780 defs stored in OPRNDS in the previous iteration (previous copy).
5781 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
5782 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5783 next copy.
0d0293ac 5784 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
ebfd146a
IR
5785 OPRNDS are of size 1. */
5786 for (i = 0; i < group_size; i++)
5787 {
9771b263 5788 op = oprnds[i];
81c40241 5789 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
b8698a0f 5790 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
9771b263
DN
5791 dr_chain[i] = vec_oprnd;
5792 oprnds[i] = vec_oprnd;
ebfd146a 5793 }
74bf76ed
JJ
5794 if (dataref_offset)
5795 dataref_offset
5796 = int_const_binop (PLUS_EXPR, dataref_offset,
5797 TYPE_SIZE_UNIT (aggr_type));
5798 else
5799 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5800 TYPE_SIZE_UNIT (aggr_type));
ebfd146a
IR
5801 }
5802
272c6793 5803 if (store_lanes_p)
ebfd146a 5804 {
272c6793 5805 tree vec_array;
267d3070 5806
272c6793
RS
5807 /* Combine all the vectors into an array. */
5808 vec_array = create_vector_array (vectype, vec_num);
5809 for (i = 0; i < vec_num; i++)
c2d7ab2a 5810 {
9771b263 5811 vec_oprnd = dr_chain[i];
272c6793 5812 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
267d3070 5813 }
b8698a0f 5814
272c6793
RS
5815 /* Emit:
5816 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5817 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5818 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5819 gimple_call_set_lhs (new_stmt, data_ref);
267d3070 5820 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
5821 }
5822 else
5823 {
5824 new_stmt = NULL;
0d0293ac 5825 if (grouped_store)
272c6793 5826 {
b6b9227d
JJ
5827 if (j == 0)
5828 result_chain.create (group_size);
272c6793
RS
5829 /* Permute. */
5830 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5831 &result_chain);
5832 }
c2d7ab2a 5833
272c6793
RS
5834 next_stmt = first_stmt;
5835 for (i = 0; i < vec_num; i++)
5836 {
644ffefd 5837 unsigned align, misalign;
272c6793
RS
5838
5839 if (i > 0)
5840 /* Bump the vector pointer. */
5841 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5842 stmt, NULL_TREE);
5843
5844 if (slp)
9771b263 5845 vec_oprnd = vec_oprnds[i];
0d0293ac
MM
5846 else if (grouped_store)
5847 /* For grouped stores vectorized defs are interleaved in
272c6793 5848 vect_permute_store_chain(). */
9771b263 5849 vec_oprnd = result_chain[i];
272c6793 5850
aed93b23
RB
5851 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5852 dataref_ptr,
5853 dataref_offset
5854 ? dataref_offset
5855 : build_int_cst (reference_alias_ptr_type
5856 (DR_REF (first_dr)), 0));
644ffefd 5857 align = TYPE_ALIGN_UNIT (vectype);
272c6793 5858 if (aligned_access_p (first_dr))
644ffefd 5859 misalign = 0;
272c6793
RS
5860 else if (DR_MISALIGNMENT (first_dr) == -1)
5861 {
52639a61
RB
5862 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5863 align = TYPE_ALIGN_UNIT (elem_type);
5864 else
5865 align = get_object_alignment (DR_REF (first_dr))
5866 / BITS_PER_UNIT;
5867 misalign = 0;
272c6793
RS
5868 TREE_TYPE (data_ref)
5869 = build_aligned_type (TREE_TYPE (data_ref),
52639a61 5870 align * BITS_PER_UNIT);
272c6793
RS
5871 }
5872 else
5873 {
5874 TREE_TYPE (data_ref)
5875 = build_aligned_type (TREE_TYPE (data_ref),
5876 TYPE_ALIGN (elem_type));
644ffefd 5877 misalign = DR_MISALIGNMENT (first_dr);
272c6793 5878 }
aed93b23
RB
5879 if (dataref_offset == NULL_TREE
5880 && TREE_CODE (dataref_ptr) == SSA_NAME)
74bf76ed
JJ
5881 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5882 misalign);
c2d7ab2a 5883
f234d260
BM
5884 if (negative
5885 && dt != vect_constant_def
5886 && dt != vect_external_def)
09dfa495
BM
5887 {
5888 tree perm_mask = perm_mask_for_reverse (vectype);
5889 tree perm_dest
5890 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5891 vectype);
b731b390 5892 tree new_temp = make_ssa_name (perm_dest);
09dfa495
BM
5893
5894 /* Generate the permute statement. */
355fe088 5895 gimple *perm_stmt
0d0e4a03
JJ
5896 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5897 vec_oprnd, perm_mask);
09dfa495
BM
5898 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5899
5900 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5901 vec_oprnd = new_temp;
5902 }
5903
272c6793
RS
5904 /* Arguments are ready. Create the new vector stmt. */
5905 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5906 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
5907
5908 if (slp)
5909 continue;
5910
e14c1050 5911 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
272c6793
RS
5912 if (!next_stmt)
5913 break;
5914 }
ebfd146a 5915 }
1da0876c
RS
5916 if (!slp)
5917 {
5918 if (j == 0)
5919 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5920 else
5921 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5922 prev_stmt_info = vinfo_for_stmt (new_stmt);
5923 }
ebfd146a
IR
5924 }
5925
9771b263
DN
5926 dr_chain.release ();
5927 oprnds.release ();
5928 result_chain.release ();
5929 vec_oprnds.release ();
ebfd146a
IR
5930
5931 return true;
5932}
5933
557be5a8
AL
5934/* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5935 VECTOR_CST mask. No checks are made that the target platform supports the
5936 mask, so callers may wish to test can_vec_perm_p separately, or use
5937 vect_gen_perm_mask_checked. */
a1e53f3f 5938
3fcc1b55 5939tree
557be5a8 5940vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
a1e53f3f 5941{
d2a12ae7 5942 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
2635892a 5943 int i, nunits;
a1e53f3f 5944
22e4dee7 5945 nunits = TYPE_VECTOR_SUBPARTS (vectype);
22e4dee7 5946
96f9265a
RG
5947 mask_elt_type = lang_hooks.types.type_for_mode
5948 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
22e4dee7 5949 mask_type = get_vectype_for_scalar_type (mask_elt_type);
a1e53f3f 5950
d2a12ae7 5951 mask_elts = XALLOCAVEC (tree, nunits);
aec7ae7d 5952 for (i = nunits - 1; i >= 0; i--)
d2a12ae7
RG
5953 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5954 mask_vec = build_vector (mask_type, mask_elts);
a1e53f3f 5955
2635892a 5956 return mask_vec;
a1e53f3f
L
5957}
5958
cf7aa6a3
AL
5959/* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5960 i.e. that the target supports the pattern _for arbitrary input vectors_. */
557be5a8
AL
5961
5962tree
5963vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5964{
5965 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5966 return vect_gen_perm_mask_any (vectype, sel);
5967}
5968
aec7ae7d
JJ
5969/* Given a vector variable X and Y, that was generated for the scalar
5970 STMT, generate instructions to permute the vector elements of X and Y
5971 using permutation mask MASK_VEC, insert them at *GSI and return the
5972 permuted vector variable. */
a1e53f3f
L
5973
5974static tree
355fe088 5975permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
aec7ae7d 5976 gimple_stmt_iterator *gsi)
a1e53f3f
L
5977{
5978 tree vectype = TREE_TYPE (x);
aec7ae7d 5979 tree perm_dest, data_ref;
355fe088 5980 gimple *perm_stmt;
a1e53f3f 5981
acdcd61b 5982 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
b731b390 5983 data_ref = make_ssa_name (perm_dest);
a1e53f3f
L
5984
5985 /* Generate the permute statement. */
0d0e4a03 5986 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
a1e53f3f
L
5987 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5988
5989 return data_ref;
5990}
5991
6b916b36
RB
5992/* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5993 inserting them on the loops preheader edge. Returns true if we
5994 were successful in doing so (and thus STMT can be moved then),
5995 otherwise returns false. */
5996
5997static bool
355fe088 5998hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6b916b36
RB
5999{
6000 ssa_op_iter i;
6001 tree op;
6002 bool any = false;
6003
6004 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6005 {
355fe088 6006 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6b916b36
RB
6007 if (!gimple_nop_p (def_stmt)
6008 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6009 {
6010 /* Make sure we don't need to recurse. While we could do
6011 so in simple cases when there are more complex use webs
6012 we don't have an easy way to preserve stmt order to fulfil
6013 dependencies within them. */
6014 tree op2;
6015 ssa_op_iter i2;
d1417442
JJ
6016 if (gimple_code (def_stmt) == GIMPLE_PHI)
6017 return false;
6b916b36
RB
6018 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6019 {
355fe088 6020 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6b916b36
RB
6021 if (!gimple_nop_p (def_stmt2)
6022 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6023 return false;
6024 }
6025 any = true;
6026 }
6027 }
6028
6029 if (!any)
6030 return true;
6031
6032 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6033 {
355fe088 6034 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6b916b36
RB
6035 if (!gimple_nop_p (def_stmt)
6036 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6037 {
6038 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6039 gsi_remove (&gsi, false);
6040 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6041 }
6042 }
6043
6044 return true;
6045}
6046
ebfd146a
IR
6047/* vectorizable_load.
6048
b8698a0f
L
6049 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6050 can be vectorized.
6051 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
6052 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6053 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6054
6055static bool
355fe088 6056vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
c716e67f 6057 slp_tree slp_node, slp_instance slp_node_instance)
ebfd146a
IR
6058{
6059 tree scalar_dest;
6060 tree vec_dest = NULL;
6061 tree data_ref = NULL;
6062 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 6063 stmt_vec_info prev_stmt_info;
ebfd146a 6064 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 6065 struct loop *loop = NULL;
ebfd146a 6066 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 6067 bool nested_in_vect_loop = false;
c716e67f 6068 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
272c6793 6069 tree elem_type;
ebfd146a 6070 tree new_temp;
ef4bddc2 6071 machine_mode mode;
355fe088 6072 gimple *new_stmt = NULL;
ebfd146a
IR
6073 tree dummy;
6074 enum dr_alignment_support alignment_support_scheme;
6075 tree dataref_ptr = NULL_TREE;
74bf76ed 6076 tree dataref_offset = NULL_TREE;
355fe088 6077 gimple *ptr_incr = NULL;
ebfd146a 6078 int ncopies;
9b999e8c 6079 int i, j, group_size = -1, group_gap_adj;
ebfd146a
IR
6080 tree msq = NULL_TREE, lsq;
6081 tree offset = NULL_TREE;
356bbc4c 6082 tree byte_offset = NULL_TREE;
ebfd146a 6083 tree realignment_token = NULL_TREE;
538dd0b7 6084 gphi *phi = NULL;
6e1aa848 6085 vec<tree> dr_chain = vNULL;
0d0293ac 6086 bool grouped_load = false;
272c6793 6087 bool load_lanes_p = false;
355fe088 6088 gimple *first_stmt;
ebfd146a 6089 bool inv_p;
319e6439 6090 bool negative = false;
ebfd146a
IR
6091 bool compute_in_loop = false;
6092 struct loop *at_loop;
6093 int vec_num;
6094 bool slp = (slp_node != NULL);
6095 bool slp_perm = false;
6096 enum tree_code code;
a70d6342
IR
6097 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6098 int vf;
272c6793 6099 tree aggr_type;
aec7ae7d
JJ
6100 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6101 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6102 int gather_scale = 1;
6103 enum vect_def_type gather_dt = vect_unknown_def_type;
310213d4 6104 vec_info *vinfo = stmt_info->vinfo;
a70d6342 6105
465c8c19
JJ
6106 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6107 return false;
6108
6109 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
6110 return false;
6111
6112 /* Is vectorizable load? */
6113 if (!is_gimple_assign (stmt))
6114 return false;
6115
6116 scalar_dest = gimple_assign_lhs (stmt);
6117 if (TREE_CODE (scalar_dest) != SSA_NAME)
6118 return false;
6119
6120 code = gimple_assign_rhs_code (stmt);
6121 if (code != ARRAY_REF
6122 && code != BIT_FIELD_REF
6123 && code != INDIRECT_REF
6124 && code != COMPONENT_REF
6125 && code != IMAGPART_EXPR
6126 && code != REALPART_EXPR
6127 && code != MEM_REF
6128 && TREE_CODE_CLASS (code) != tcc_declaration)
6129 return false;
6130
6131 if (!STMT_VINFO_DATA_REF (stmt_info))
6132 return false;
6133
6134 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6135 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6136
a70d6342
IR
6137 if (loop_vinfo)
6138 {
6139 loop = LOOP_VINFO_LOOP (loop_vinfo);
6140 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6141 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6142 }
6143 else
3533e503 6144 vf = 1;
ebfd146a
IR
6145
6146 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 6147 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a 6148 case of SLP. */
437f4a00 6149 if (slp || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
6150 ncopies = 1;
6151 else
6152 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6153
6154 gcc_assert (ncopies >= 1);
6155
6156 /* FORNOW. This restriction should be relaxed. */
6157 if (nested_in_vect_loop && ncopies > 1)
6158 {
73fbfcad 6159 if (dump_enabled_p ())
78c60e3d 6160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6161 "multiple types in nested loop.\n");
ebfd146a
IR
6162 return false;
6163 }
6164
f2556b68
RB
6165 /* Invalidate assumptions made by dependence analysis when vectorization
6166 on the unrolled body effectively re-orders stmts. */
6167 if (ncopies > 1
6168 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6169 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6170 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6171 {
6172 if (dump_enabled_p ())
6173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6174 "cannot perform implicit CSE when unrolling "
6175 "with negative dependence distance\n");
6176 return false;
6177 }
6178
7b7b1813 6179 elem_type = TREE_TYPE (vectype);
947131ba 6180 mode = TYPE_MODE (vectype);
ebfd146a
IR
6181
6182 /* FORNOW. In some cases can vectorize even if data-type not supported
6183 (e.g. - data copies). */
947131ba 6184 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
ebfd146a 6185 {
73fbfcad 6186 if (dump_enabled_p ())
78c60e3d 6187 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6188 "Aligned load, but unsupported type.\n");
ebfd146a
IR
6189 return false;
6190 }
6191
ebfd146a 6192 /* Check if the load is a part of an interleaving chain. */
0d0293ac 6193 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
ebfd146a 6194 {
0d0293ac 6195 grouped_load = true;
ebfd146a 6196 /* FORNOW */
3bab6342 6197 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
ebfd146a 6198
e14c1050 6199 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
d5f035ea
RB
6200
6201 /* If this is single-element interleaving with an element distance
6202 that leaves unused vector loads around punt - we at least create
6203 very sub-optimal code in that case (and blow up memory,
6204 see PR65518). */
6205 if (first_stmt == stmt
6206 && !GROUP_NEXT_ELEMENT (stmt_info)
6207 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6208 {
6209 if (dump_enabled_p ())
6210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6211 "single-element interleaving not supported "
6212 "for not adjacent vector loads\n");
6213 return false;
6214 }
6215
b1af7da6
RB
6216 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6217 slp_perm = true;
6218
7b5fc413
RB
6219 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6220 if (!slp
6221 && !PURE_SLP_STMT (stmt_info)
f2e2a985 6222 && !STMT_VINFO_STRIDED_P (stmt_info))
b602d918 6223 {
272c6793
RS
6224 if (vect_load_lanes_supported (vectype, group_size))
6225 load_lanes_p = true;
0d0293ac 6226 else if (!vect_grouped_load_supported (vectype, group_size))
b602d918
RS
6227 return false;
6228 }
f2556b68
RB
6229
6230 /* Invalidate assumptions made by dependence analysis when vectorization
6231 on the unrolled body effectively re-orders stmts. */
6232 if (!PURE_SLP_STMT (stmt_info)
6233 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6234 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6235 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6236 {
6237 if (dump_enabled_p ())
6238 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6239 "cannot perform implicit CSE when performing "
6240 "group loads with negative dependence distance\n");
6241 return false;
6242 }
96bb56b2
RB
6243
6244 /* Similarly when the stmt is a load that is both part of a SLP
6245 instance and a loop vectorized stmt via the same-dr mechanism
6246 we have to give up. */
6247 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6248 && (STMT_SLP_TYPE (stmt_info)
6249 != STMT_SLP_TYPE (vinfo_for_stmt
6250 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6251 {
6252 if (dump_enabled_p ())
6253 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6254 "conflicting SLP types for CSEd load\n");
6255 return false;
6256 }
ebfd146a
IR
6257 }
6258
a1e53f3f 6259
3bab6342 6260 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
aec7ae7d 6261 {
355fe088 6262 gimple *def_stmt;
3bab6342
AT
6263 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6264 &gather_off, &gather_scale);
aec7ae7d 6265 gcc_assert (gather_decl);
81c40241
RB
6266 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6267 &gather_off_vectype))
aec7ae7d 6268 {
73fbfcad 6269 if (dump_enabled_p ())
78c60e3d 6270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6271 "gather index use not simple.\n");
aec7ae7d
JJ
6272 return false;
6273 }
6274 }
f2e2a985 6275 else if (STMT_VINFO_STRIDED_P (stmt_info))
7b5fc413
RB
6276 {
6277 if ((grouped_load
6278 && (slp || PURE_SLP_STMT (stmt_info)))
6279 && (group_size > nunits
b266b968 6280 || nunits % group_size != 0))
7b5fc413
RB
6281 {
6282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6283 "unhandled strided group load\n");
6284 return false;
6285 }
6286 }
319e6439
RG
6287 else
6288 {
6289 negative = tree_int_cst_compare (nested_in_vect_loop
6290 ? STMT_VINFO_DR_STEP (stmt_info)
6291 : DR_STEP (dr),
6292 size_zero_node) < 0;
6293 if (negative && ncopies > 1)
6294 {
73fbfcad 6295 if (dump_enabled_p ())
78c60e3d 6296 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6297 "multiple types with negative step.\n");
319e6439
RG
6298 return false;
6299 }
6300
6301 if (negative)
6302 {
08940f33
RB
6303 if (grouped_load)
6304 {
6305 if (dump_enabled_p ())
6306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
6307 "negative step for group load not supported"
6308 "\n");
08940f33
RB
6309 return false;
6310 }
319e6439
RG
6311 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6312 if (alignment_support_scheme != dr_aligned
6313 && alignment_support_scheme != dr_unaligned_supported)
6314 {
73fbfcad 6315 if (dump_enabled_p ())
78c60e3d 6316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6317 "negative step but alignment required.\n");
319e6439
RG
6318 return false;
6319 }
6320 if (!perm_mask_for_reverse (vectype))
6321 {
73fbfcad 6322 if (dump_enabled_p ())
78c60e3d 6323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
6324 "negative step and reversing not supported."
6325 "\n");
319e6439
RG
6326 return false;
6327 }
6328 }
7d75abc8 6329 }
aec7ae7d 6330
ebfd146a
IR
6331 if (!vec_stmt) /* transformation not required. */
6332 {
6333 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
2e8ab70c
RB
6334 /* The SLP costs are calculated during SLP analysis. */
6335 if (!PURE_SLP_STMT (stmt_info))
6336 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6337 NULL, NULL, NULL);
ebfd146a
IR
6338 return true;
6339 }
6340
73fbfcad 6341 if (dump_enabled_p ())
78c60e3d 6342 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 6343 "transform load. ncopies = %d\n", ncopies);
ebfd146a
IR
6344
6345 /** Transform. **/
6346
c716e67f
XDL
6347 ensure_base_align (stmt_info, dr);
6348
3bab6342 6349 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
aec7ae7d
JJ
6350 {
6351 tree vec_oprnd0 = NULL_TREE, op;
6352 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6353 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
d3c2fee0 6354 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
aec7ae7d
JJ
6355 edge pe = loop_preheader_edge (loop);
6356 gimple_seq seq;
6357 basic_block new_bb;
6358 enum { NARROW, NONE, WIDEN } modifier;
6359 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6360
6361 if (nunits == gather_off_nunits)
6362 modifier = NONE;
6363 else if (nunits == gather_off_nunits / 2)
6364 {
6365 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6366 modifier = WIDEN;
6367
6368 for (i = 0; i < gather_off_nunits; ++i)
6369 sel[i] = i | nunits;
6370
557be5a8 6371 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
aec7ae7d
JJ
6372 }
6373 else if (nunits == gather_off_nunits * 2)
6374 {
6375 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6376 modifier = NARROW;
6377
6378 for (i = 0; i < nunits; ++i)
6379 sel[i] = i < gather_off_nunits
6380 ? i : i + nunits - gather_off_nunits;
6381
557be5a8 6382 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
aec7ae7d
JJ
6383 ncopies *= 2;
6384 }
6385 else
6386 gcc_unreachable ();
6387
6388 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6389 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6390 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6391 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6392 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6393 scaletype = TREE_VALUE (arglist);
d3c2fee0 6394 gcc_checking_assert (types_compatible_p (srctype, rettype));
aec7ae7d
JJ
6395
6396 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6397
6398 ptr = fold_convert (ptrtype, gather_base);
6399 if (!is_gimple_min_invariant (ptr))
6400 {
6401 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6402 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6403 gcc_assert (!new_bb);
6404 }
6405
6406 /* Currently we support only unconditional gather loads,
6407 so mask should be all ones. */
d3c2fee0
AI
6408 if (TREE_CODE (masktype) == INTEGER_TYPE)
6409 mask = build_int_cst (masktype, -1);
6410 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6411 {
6412 mask = build_int_cst (TREE_TYPE (masktype), -1);
6413 mask = build_vector_from_val (masktype, mask);
03b9e8e4 6414 mask = vect_init_vector (stmt, mask, masktype, NULL);
d3c2fee0 6415 }
aec7ae7d
JJ
6416 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6417 {
6418 REAL_VALUE_TYPE r;
6419 long tmp[6];
6420 for (j = 0; j < 6; ++j)
6421 tmp[j] = -1;
6422 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6423 mask = build_real (TREE_TYPE (masktype), r);
d3c2fee0 6424 mask = build_vector_from_val (masktype, mask);
03b9e8e4 6425 mask = vect_init_vector (stmt, mask, masktype, NULL);
aec7ae7d
JJ
6426 }
6427 else
6428 gcc_unreachable ();
aec7ae7d
JJ
6429
6430 scale = build_int_cst (scaletype, gather_scale);
6431
d3c2fee0
AI
6432 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6433 merge = build_int_cst (TREE_TYPE (rettype), 0);
6434 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6435 {
6436 REAL_VALUE_TYPE r;
6437 long tmp[6];
6438 for (j = 0; j < 6; ++j)
6439 tmp[j] = 0;
6440 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6441 merge = build_real (TREE_TYPE (rettype), r);
6442 }
6443 else
6444 gcc_unreachable ();
6445 merge = build_vector_from_val (rettype, merge);
6446 merge = vect_init_vector (stmt, merge, rettype, NULL);
6447
aec7ae7d
JJ
6448 prev_stmt_info = NULL;
6449 for (j = 0; j < ncopies; ++j)
6450 {
6451 if (modifier == WIDEN && (j & 1))
6452 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6453 perm_mask, stmt, gsi);
6454 else if (j == 0)
6455 op = vec_oprnd0
81c40241 6456 = vect_get_vec_def_for_operand (gather_off, stmt);
aec7ae7d
JJ
6457 else
6458 op = vec_oprnd0
6459 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6460
6461 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6462 {
6463 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6464 == TYPE_VECTOR_SUBPARTS (idxtype));
0e22bb5a 6465 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
aec7ae7d
JJ
6466 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6467 new_stmt
0d0e4a03 6468 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
aec7ae7d
JJ
6469 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6470 op = var;
6471 }
6472
6473 new_stmt
d3c2fee0 6474 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
aec7ae7d
JJ
6475
6476 if (!useless_type_conversion_p (vectype, rettype))
6477 {
6478 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6479 == TYPE_VECTOR_SUBPARTS (rettype));
0e22bb5a 6480 op = vect_get_new_ssa_name (rettype, vect_simple_var);
aec7ae7d
JJ
6481 gimple_call_set_lhs (new_stmt, op);
6482 vect_finish_stmt_generation (stmt, new_stmt, gsi);
b731b390 6483 var = make_ssa_name (vec_dest);
aec7ae7d
JJ
6484 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6485 new_stmt
0d0e4a03 6486 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
aec7ae7d
JJ
6487 }
6488 else
6489 {
6490 var = make_ssa_name (vec_dest, new_stmt);
6491 gimple_call_set_lhs (new_stmt, var);
6492 }
6493
6494 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6495
6496 if (modifier == NARROW)
6497 {
6498 if ((j & 1) == 0)
6499 {
6500 prev_res = var;
6501 continue;
6502 }
6503 var = permute_vec_elements (prev_res, var,
6504 perm_mask, stmt, gsi);
6505 new_stmt = SSA_NAME_DEF_STMT (var);
6506 }
6507
6508 if (prev_stmt_info == NULL)
6509 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6510 else
6511 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6512 prev_stmt_info = vinfo_for_stmt (new_stmt);
6513 }
6514 return true;
6515 }
f2e2a985 6516 else if (STMT_VINFO_STRIDED_P (stmt_info))
7d75abc8
MM
6517 {
6518 gimple_stmt_iterator incr_gsi;
6519 bool insert_after;
355fe088 6520 gimple *incr;
7d75abc8 6521 tree offvar;
7d75abc8
MM
6522 tree ivstep;
6523 tree running_off;
9771b263 6524 vec<constructor_elt, va_gc> *v = NULL;
7d75abc8 6525 gimple_seq stmts = NULL;
14ac6aa2
RB
6526 tree stride_base, stride_step, alias_off;
6527
6528 gcc_assert (!nested_in_vect_loop);
7d75abc8 6529
f502d50e 6530 if (slp && grouped_load)
ab313a8c
RB
6531 first_dr = STMT_VINFO_DATA_REF
6532 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6533 else
6534 first_dr = dr;
6535
14ac6aa2
RB
6536 stride_base
6537 = fold_build_pointer_plus
ab313a8c 6538 (DR_BASE_ADDRESS (first_dr),
14ac6aa2 6539 size_binop (PLUS_EXPR,
ab313a8c
RB
6540 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6541 convert_to_ptrofftype (DR_INIT (first_dr))));
6542 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7d75abc8
MM
6543
6544 /* For a load with loop-invariant (but other than power-of-2)
6545 stride (i.e. not a grouped access) like so:
6546
6547 for (i = 0; i < n; i += stride)
6548 ... = array[i];
6549
6550 we generate a new induction variable and new accesses to
6551 form a new vector (or vectors, depending on ncopies):
6552
6553 for (j = 0; ; j += VF*stride)
6554 tmp1 = array[j];
6555 tmp2 = array[j + stride];
6556 ...
6557 vectemp = {tmp1, tmp2, ...}
6558 */
6559
ab313a8c
RB
6560 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6561 build_int_cst (TREE_TYPE (stride_step), vf));
7d75abc8
MM
6562
6563 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6564
ab313a8c 6565 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7d75abc8
MM
6566 loop, &incr_gsi, insert_after,
6567 &offvar, NULL);
6568 incr = gsi_stmt (incr_gsi);
310213d4 6569 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7d75abc8 6570
ab313a8c
RB
6571 stride_step = force_gimple_operand (unshare_expr (stride_step),
6572 &stmts, true, NULL_TREE);
7d75abc8
MM
6573 if (stmts)
6574 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6575
6576 prev_stmt_info = NULL;
6577 running_off = offvar;
ab313a8c 6578 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
7b5fc413
RB
6579 int nloads = nunits;
6580 tree ltype = TREE_TYPE (vectype);
b266b968 6581 auto_vec<tree> dr_chain;
7b5fc413
RB
6582 if (slp)
6583 {
6584 nloads = nunits / group_size;
6585 if (group_size < nunits)
6586 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6587 else
6588 ltype = vectype;
6589 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6590 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
b266b968
RB
6591 if (slp_perm)
6592 dr_chain.create (ncopies);
7b5fc413 6593 }
7d75abc8
MM
6594 for (j = 0; j < ncopies; j++)
6595 {
6596 tree vec_inv;
6597
7b5fc413
RB
6598 if (nloads > 1)
6599 {
6600 vec_alloc (v, nloads);
6601 for (i = 0; i < nloads; i++)
6602 {
6603 tree newref, newoff;
355fe088 6604 gimple *incr;
7b5fc413
RB
6605 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6606
6607 newref = force_gimple_operand_gsi (gsi, newref, true,
6608 NULL_TREE, true,
6609 GSI_SAME_STMT);
6610 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6611 newoff = copy_ssa_name (running_off);
6612 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6613 running_off, stride_step);
6614 vect_finish_stmt_generation (stmt, incr, gsi);
6615
6616 running_off = newoff;
6617 }
6618
6619 vec_inv = build_constructor (vectype, v);
6620 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6621 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6622 }
6623 else
7d75abc8 6624 {
7b5fc413
RB
6625 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6626 build2 (MEM_REF, ltype,
6627 running_off, alias_off));
6628 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6629
6630 tree newoff = copy_ssa_name (running_off);
355fe088 6631 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
0d0e4a03 6632 running_off, stride_step);
7d75abc8
MM
6633 vect_finish_stmt_generation (stmt, incr, gsi);
6634
6635 running_off = newoff;
6636 }
6637
7b5fc413 6638 if (slp)
b266b968
RB
6639 {
6640 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6641 if (slp_perm)
6642 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6643 }
7d75abc8 6644 else
225ce44b
RB
6645 {
6646 if (j == 0)
6647 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6648 else
6649 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6650 prev_stmt_info = vinfo_for_stmt (new_stmt);
6651 }
7d75abc8 6652 }
b266b968
RB
6653 if (slp_perm)
6654 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6655 slp_node_instance, false);
7d75abc8
MM
6656 return true;
6657 }
aec7ae7d 6658
0d0293ac 6659 if (grouped_load)
ebfd146a 6660 {
e14c1050 6661 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
52eab378
RB
6662 /* For BB vectorization we directly vectorize a subchain
6663 without permutation. */
6664 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
9771b263 6665 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6aa904c4 6666
ebfd146a 6667 /* Check if the chain of loads is already vectorized. */
01d8bf07
RB
6668 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6669 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6670 ??? But we can only do so if there is exactly one
6671 as we have no way to get at the rest. Leave the CSE
6672 opportunity alone.
6673 ??? With the group load eventually participating
6674 in multiple different permutations (having multiple
6675 slp nodes which refer to the same group) the CSE
6676 is even wrong code. See PR56270. */
6677 && !slp)
ebfd146a
IR
6678 {
6679 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6680 return true;
6681 }
6682 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
e14c1050 6683 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
9b999e8c 6684 group_gap_adj = 0;
ebfd146a
IR
6685
6686 /* VEC_NUM is the number of vect stmts to be created for this group. */
6687 if (slp)
6688 {
0d0293ac 6689 grouped_load = false;
91ff1504
RB
6690 /* For SLP permutation support we need to load the whole group,
6691 not only the number of vector stmts the permutation result
6692 fits in. */
6693 if (slp_perm)
6694 vec_num = (group_size * vf + nunits - 1) / nunits;
6695 else
6696 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9b999e8c 6697 group_gap_adj = vf * group_size - nunits * vec_num;
a70d6342 6698 }
ebfd146a 6699 else
9b999e8c 6700 vec_num = group_size;
ebfd146a
IR
6701 }
6702 else
6703 {
6704 first_stmt = stmt;
6705 first_dr = dr;
6706 group_size = vec_num = 1;
9b999e8c 6707 group_gap_adj = 0;
ebfd146a
IR
6708 }
6709
720f5239 6710 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 6711 gcc_assert (alignment_support_scheme);
272c6793
RS
6712 /* Targets with load-lane instructions must not require explicit
6713 realignment. */
6714 gcc_assert (!load_lanes_p
6715 || alignment_support_scheme == dr_aligned
6716 || alignment_support_scheme == dr_unaligned_supported);
ebfd146a
IR
6717
6718 /* In case the vectorization factor (VF) is bigger than the number
6719 of elements that we can fit in a vectype (nunits), we have to generate
6720 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 6721 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 6722 from one copy of the vector stmt to the next, in the field
ff802fa1 6723 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 6724 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
6725 stmts that use the defs of the current stmt. The example below
6726 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6727 need to create 4 vectorized stmts):
ebfd146a
IR
6728
6729 before vectorization:
6730 RELATED_STMT VEC_STMT
6731 S1: x = memref - -
6732 S2: z = x + 1 - -
6733
6734 step 1: vectorize stmt S1:
6735 We first create the vector stmt VS1_0, and, as usual, record a
6736 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6737 Next, we create the vector stmt VS1_1, and record a pointer to
6738 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
ff802fa1 6739 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
ebfd146a
IR
6740 stmts and pointers:
6741 RELATED_STMT VEC_STMT
6742 VS1_0: vx0 = memref0 VS1_1 -
6743 VS1_1: vx1 = memref1 VS1_2 -
6744 VS1_2: vx2 = memref2 VS1_3 -
6745 VS1_3: vx3 = memref3 - -
6746 S1: x = load - VS1_0
6747 S2: z = x + 1 - -
6748
b8698a0f
L
6749 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6750 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
6751 stmt S2. */
6752
0d0293ac 6753 /* In case of interleaving (non-unit grouped access):
ebfd146a
IR
6754
6755 S1: x2 = &base + 2
6756 S2: x0 = &base
6757 S3: x1 = &base + 1
6758 S4: x3 = &base + 3
6759
b8698a0f 6760 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
6761 starting from the access of the first stmt of the chain:
6762
6763 VS1: vx0 = &base
6764 VS2: vx1 = &base + vec_size*1
6765 VS3: vx3 = &base + vec_size*2
6766 VS4: vx4 = &base + vec_size*3
6767
6768 Then permutation statements are generated:
6769
e2c83630
RH
6770 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6771 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
ebfd146a
IR
6772 ...
6773
6774 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6775 (the order of the data-refs in the output of vect_permute_load_chain
6776 corresponds to the order of scalar stmts in the interleaving chain - see
6777 the documentation of vect_permute_load_chain()).
6778 The generation of permutation stmts and recording them in
0d0293ac 6779 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
ebfd146a 6780
b8698a0f 6781 In case of both multiple types and interleaving, the vector loads and
ff802fa1
IR
6782 permutation stmts above are created for every copy. The result vector
6783 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6784 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
ebfd146a
IR
6785
6786 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6787 on a target that supports unaligned accesses (dr_unaligned_supported)
6788 we generate the following code:
6789 p = initial_addr;
6790 indx = 0;
6791 loop {
6792 p = p + indx * vectype_size;
6793 vec_dest = *(p);
6794 indx = indx + 1;
6795 }
6796
6797 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 6798 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
6799 then generate the following code, in which the data in each iteration is
6800 obtained by two vector loads, one from the previous iteration, and one
6801 from the current iteration:
6802 p1 = initial_addr;
6803 msq_init = *(floor(p1))
6804 p2 = initial_addr + VS - 1;
6805 realignment_token = call target_builtin;
6806 indx = 0;
6807 loop {
6808 p2 = p2 + indx * vectype_size
6809 lsq = *(floor(p2))
6810 vec_dest = realign_load (msq, lsq, realignment_token)
6811 indx = indx + 1;
6812 msq = lsq;
6813 } */
6814
6815 /* If the misalignment remains the same throughout the execution of the
6816 loop, we can create the init_addr and permutation mask at the loop
ff802fa1 6817 preheader. Otherwise, it needs to be created inside the loop.
ebfd146a
IR
6818 This can only occur when vectorizing memory accesses in the inner-loop
6819 nested within an outer-loop that is being vectorized. */
6820
d1e4b493 6821 if (nested_in_vect_loop
211bea38 6822 && (TREE_INT_CST_LOW (DR_STEP (dr))
ebfd146a
IR
6823 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6824 {
6825 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6826 compute_in_loop = true;
6827 }
6828
6829 if ((alignment_support_scheme == dr_explicit_realign_optimized
6830 || alignment_support_scheme == dr_explicit_realign)
59fd17e3 6831 && !compute_in_loop)
ebfd146a
IR
6832 {
6833 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6834 alignment_support_scheme, NULL_TREE,
6835 &at_loop);
6836 if (alignment_support_scheme == dr_explicit_realign_optimized)
6837 {
538dd0b7 6838 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
356bbc4c
JJ
6839 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6840 size_one_node);
ebfd146a
IR
6841 }
6842 }
6843 else
6844 at_loop = loop;
6845
a1e53f3f
L
6846 if (negative)
6847 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6848
272c6793
RS
6849 if (load_lanes_p)
6850 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6851 else
6852 aggr_type = vectype;
6853
ebfd146a
IR
6854 prev_stmt_info = NULL;
6855 for (j = 0; j < ncopies; j++)
b8698a0f 6856 {
272c6793 6857 /* 1. Create the vector or array pointer update chain. */
ebfd146a 6858 if (j == 0)
74bf76ed
JJ
6859 {
6860 bool simd_lane_access_p
6861 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6862 if (simd_lane_access_p
6863 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6864 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6865 && integer_zerop (DR_OFFSET (first_dr))
6866 && integer_zerop (DR_INIT (first_dr))
6867 && alias_sets_conflict_p (get_alias_set (aggr_type),
6868 get_alias_set (DR_REF (first_dr)))
6869 && (alignment_support_scheme == dr_aligned
6870 || alignment_support_scheme == dr_unaligned_supported))
6871 {
6872 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6873 dataref_offset = build_int_cst (reference_alias_ptr_type
6874 (DR_REF (first_dr)), 0);
8928eff3 6875 inv_p = false;
74bf76ed
JJ
6876 }
6877 else
6878 dataref_ptr
6879 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6880 offset, &dummy, gsi, &ptr_incr,
356bbc4c
JJ
6881 simd_lane_access_p, &inv_p,
6882 byte_offset);
74bf76ed
JJ
6883 }
6884 else if (dataref_offset)
6885 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6886 TYPE_SIZE_UNIT (aggr_type));
ebfd146a 6887 else
272c6793
RS
6888 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6889 TYPE_SIZE_UNIT (aggr_type));
ebfd146a 6890
0d0293ac 6891 if (grouped_load || slp_perm)
9771b263 6892 dr_chain.create (vec_num);
5ce1ee7f 6893
272c6793 6894 if (load_lanes_p)
ebfd146a 6895 {
272c6793
RS
6896 tree vec_array;
6897
6898 vec_array = create_vector_array (vectype, vec_num);
6899
6900 /* Emit:
6901 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6902 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6903 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6904 gimple_call_set_lhs (new_stmt, vec_array);
6905 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a 6906
272c6793
RS
6907 /* Extract each vector into an SSA_NAME. */
6908 for (i = 0; i < vec_num; i++)
ebfd146a 6909 {
272c6793
RS
6910 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6911 vec_array, i);
9771b263 6912 dr_chain.quick_push (new_temp);
272c6793
RS
6913 }
6914
6915 /* Record the mapping between SSA_NAMEs and statements. */
0d0293ac 6916 vect_record_grouped_load_vectors (stmt, dr_chain);
272c6793
RS
6917 }
6918 else
6919 {
6920 for (i = 0; i < vec_num; i++)
6921 {
6922 if (i > 0)
6923 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6924 stmt, NULL_TREE);
6925
6926 /* 2. Create the vector-load in the loop. */
6927 switch (alignment_support_scheme)
6928 {
6929 case dr_aligned:
6930 case dr_unaligned_supported:
be1ac4ec 6931 {
644ffefd
MJ
6932 unsigned int align, misalign;
6933
272c6793 6934 data_ref
aed93b23
RB
6935 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6936 dataref_offset
6937 ? dataref_offset
6938 : build_int_cst (reference_alias_ptr_type
6939 (DR_REF (first_dr)), 0));
644ffefd 6940 align = TYPE_ALIGN_UNIT (vectype);
272c6793
RS
6941 if (alignment_support_scheme == dr_aligned)
6942 {
6943 gcc_assert (aligned_access_p (first_dr));
644ffefd 6944 misalign = 0;
272c6793
RS
6945 }
6946 else if (DR_MISALIGNMENT (first_dr) == -1)
6947 {
52639a61
RB
6948 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6949 align = TYPE_ALIGN_UNIT (elem_type);
6950 else
6951 align = (get_object_alignment (DR_REF (first_dr))
6952 / BITS_PER_UNIT);
6953 misalign = 0;
272c6793
RS
6954 TREE_TYPE (data_ref)
6955 = build_aligned_type (TREE_TYPE (data_ref),
52639a61 6956 align * BITS_PER_UNIT);
272c6793
RS
6957 }
6958 else
6959 {
6960 TREE_TYPE (data_ref)
6961 = build_aligned_type (TREE_TYPE (data_ref),
6962 TYPE_ALIGN (elem_type));
644ffefd 6963 misalign = DR_MISALIGNMENT (first_dr);
272c6793 6964 }
aed93b23
RB
6965 if (dataref_offset == NULL_TREE
6966 && TREE_CODE (dataref_ptr) == SSA_NAME)
74bf76ed
JJ
6967 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6968 align, misalign);
272c6793 6969 break;
be1ac4ec 6970 }
272c6793 6971 case dr_explicit_realign:
267d3070 6972 {
272c6793 6973 tree ptr, bump;
272c6793 6974
d88981fc 6975 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
272c6793
RS
6976
6977 if (compute_in_loop)
6978 msq = vect_setup_realignment (first_stmt, gsi,
6979 &realignment_token,
6980 dr_explicit_realign,
6981 dataref_ptr, NULL);
6982
aed93b23
RB
6983 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6984 ptr = copy_ssa_name (dataref_ptr);
6985 else
6986 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
0d0e4a03
JJ
6987 new_stmt = gimple_build_assign
6988 (ptr, BIT_AND_EXPR, dataref_ptr,
272c6793
RS
6989 build_int_cst
6990 (TREE_TYPE (dataref_ptr),
6991 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
272c6793
RS
6992 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6993 data_ref
6994 = build2 (MEM_REF, vectype, ptr,
6995 build_int_cst (reference_alias_ptr_type
6996 (DR_REF (first_dr)), 0));
6997 vec_dest = vect_create_destination_var (scalar_dest,
6998 vectype);
6999 new_stmt = gimple_build_assign (vec_dest, data_ref);
7000 new_temp = make_ssa_name (vec_dest, new_stmt);
7001 gimple_assign_set_lhs (new_stmt, new_temp);
7002 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7003 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7004 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7005 msq = new_temp;
7006
d88981fc 7007 bump = size_binop (MULT_EXPR, vs,
7b7b1813 7008 TYPE_SIZE_UNIT (elem_type));
d88981fc 7009 bump = size_binop (MINUS_EXPR, bump, size_one_node);
272c6793 7010 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
0d0e4a03
JJ
7011 new_stmt = gimple_build_assign
7012 (NULL_TREE, BIT_AND_EXPR, ptr,
272c6793
RS
7013 build_int_cst
7014 (TREE_TYPE (ptr),
7015 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
aed93b23 7016 ptr = copy_ssa_name (ptr, new_stmt);
272c6793
RS
7017 gimple_assign_set_lhs (new_stmt, ptr);
7018 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7019 data_ref
7020 = build2 (MEM_REF, vectype, ptr,
7021 build_int_cst (reference_alias_ptr_type
7022 (DR_REF (first_dr)), 0));
7023 break;
267d3070 7024 }
272c6793 7025 case dr_explicit_realign_optimized:
aed93b23
RB
7026 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7027 new_temp = copy_ssa_name (dataref_ptr);
7028 else
7029 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
0d0e4a03
JJ
7030 new_stmt = gimple_build_assign
7031 (new_temp, BIT_AND_EXPR, dataref_ptr,
272c6793
RS
7032 build_int_cst
7033 (TREE_TYPE (dataref_ptr),
7034 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
272c6793
RS
7035 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7036 data_ref
7037 = build2 (MEM_REF, vectype, new_temp,
7038 build_int_cst (reference_alias_ptr_type
7039 (DR_REF (first_dr)), 0));
7040 break;
7041 default:
7042 gcc_unreachable ();
7043 }
ebfd146a 7044 vec_dest = vect_create_destination_var (scalar_dest, vectype);
272c6793 7045 new_stmt = gimple_build_assign (vec_dest, data_ref);
ebfd146a
IR
7046 new_temp = make_ssa_name (vec_dest, new_stmt);
7047 gimple_assign_set_lhs (new_stmt, new_temp);
7048 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7049
272c6793
RS
7050 /* 3. Handle explicit realignment if necessary/supported.
7051 Create in loop:
7052 vec_dest = realign_load (msq, lsq, realignment_token) */
7053 if (alignment_support_scheme == dr_explicit_realign_optimized
7054 || alignment_support_scheme == dr_explicit_realign)
ebfd146a 7055 {
272c6793
RS
7056 lsq = gimple_assign_lhs (new_stmt);
7057 if (!realignment_token)
7058 realignment_token = dataref_ptr;
7059 vec_dest = vect_create_destination_var (scalar_dest, vectype);
0d0e4a03
JJ
7060 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7061 msq, lsq, realignment_token);
272c6793
RS
7062 new_temp = make_ssa_name (vec_dest, new_stmt);
7063 gimple_assign_set_lhs (new_stmt, new_temp);
7064 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7065
7066 if (alignment_support_scheme == dr_explicit_realign_optimized)
7067 {
7068 gcc_assert (phi);
7069 if (i == vec_num - 1 && j == ncopies - 1)
7070 add_phi_arg (phi, lsq,
7071 loop_latch_edge (containing_loop),
9e227d60 7072 UNKNOWN_LOCATION);
272c6793
RS
7073 msq = lsq;
7074 }
ebfd146a 7075 }
ebfd146a 7076
59fd17e3
RB
7077 /* 4. Handle invariant-load. */
7078 if (inv_p && !bb_vinfo)
7079 {
59fd17e3 7080 gcc_assert (!grouped_load);
d1417442
JJ
7081 /* If we have versioned for aliasing or the loop doesn't
7082 have any data dependencies that would preclude this,
7083 then we are sure this is a loop invariant load and
7084 thus we can insert it on the preheader edge. */
7085 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7086 && !nested_in_vect_loop
6b916b36 7087 && hoist_defs_of_uses (stmt, loop))
a0e35eb0
RB
7088 {
7089 if (dump_enabled_p ())
7090 {
7091 dump_printf_loc (MSG_NOTE, vect_location,
7092 "hoisting out of the vectorized "
7093 "loop: ");
7094 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
a0e35eb0 7095 }
b731b390 7096 tree tem = copy_ssa_name (scalar_dest);
a0e35eb0
RB
7097 gsi_insert_on_edge_immediate
7098 (loop_preheader_edge (loop),
7099 gimple_build_assign (tem,
7100 unshare_expr
7101 (gimple_assign_rhs1 (stmt))));
7102 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7103 }
7104 else
7105 {
7106 gimple_stmt_iterator gsi2 = *gsi;
7107 gsi_next (&gsi2);
7108 new_temp = vect_init_vector (stmt, scalar_dest,
7109 vectype, &gsi2);
7110 }
59fd17e3 7111 new_stmt = SSA_NAME_DEF_STMT (new_temp);
a0e35eb0 7112 set_vinfo_for_stmt (new_stmt,
310213d4 7113 new_stmt_vec_info (new_stmt, vinfo));
59fd17e3
RB
7114 }
7115
272c6793
RS
7116 if (negative)
7117 {
aec7ae7d
JJ
7118 tree perm_mask = perm_mask_for_reverse (vectype);
7119 new_temp = permute_vec_elements (new_temp, new_temp,
7120 perm_mask, stmt, gsi);
ebfd146a
IR
7121 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7122 }
267d3070 7123
272c6793 7124 /* Collect vector loads and later create their permutation in
0d0293ac
MM
7125 vect_transform_grouped_load (). */
7126 if (grouped_load || slp_perm)
9771b263 7127 dr_chain.quick_push (new_temp);
267d3070 7128
272c6793
RS
7129 /* Store vector loads in the corresponding SLP_NODE. */
7130 if (slp && !slp_perm)
9771b263 7131 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
272c6793 7132 }
9b999e8c
RB
7133 /* Bump the vector pointer to account for a gap or for excess
7134 elements loaded for a permuted SLP load. */
7135 if (group_gap_adj != 0)
a64b9c26 7136 {
9b999e8c
RB
7137 bool ovf;
7138 tree bump
7139 = wide_int_to_tree (sizetype,
7140 wi::smul (TYPE_SIZE_UNIT (elem_type),
7141 group_gap_adj, &ovf));
a64b9c26
RB
7142 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7143 stmt, bump);
7144 }
ebfd146a
IR
7145 }
7146
7147 if (slp && !slp_perm)
7148 continue;
7149
7150 if (slp_perm)
7151 {
01d8bf07 7152 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
ebfd146a
IR
7153 slp_node_instance, false))
7154 {
9771b263 7155 dr_chain.release ();
ebfd146a
IR
7156 return false;
7157 }
7158 }
7159 else
7160 {
0d0293ac 7161 if (grouped_load)
ebfd146a 7162 {
272c6793 7163 if (!load_lanes_p)
0d0293ac 7164 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
ebfd146a 7165 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
7166 }
7167 else
7168 {
7169 if (j == 0)
7170 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7171 else
7172 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7173 prev_stmt_info = vinfo_for_stmt (new_stmt);
7174 }
7175 }
9771b263 7176 dr_chain.release ();
ebfd146a
IR
7177 }
7178
ebfd146a
IR
7179 return true;
7180}
7181
7182/* Function vect_is_simple_cond.
b8698a0f 7183
ebfd146a
IR
7184 Input:
7185 LOOP - the loop that is being vectorized.
7186 COND - Condition that is checked for simple use.
7187
e9e1d143
RG
7188 Output:
7189 *COMP_VECTYPE - the vector type for the comparison.
7190
ebfd146a
IR
7191 Returns whether a COND can be vectorized. Checks whether
7192 condition operands are supportable using vec_is_simple_use. */
7193
87aab9b2 7194static bool
81c40241 7195vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
ebfd146a
IR
7196{
7197 tree lhs, rhs;
ebfd146a 7198 enum vect_def_type dt;
e9e1d143 7199 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
ebfd146a 7200
a414c77f
IE
7201 /* Mask case. */
7202 if (TREE_CODE (cond) == SSA_NAME
7203 && TREE_CODE (TREE_TYPE (cond)) == BOOLEAN_TYPE)
7204 {
7205 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7206 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7207 &dt, comp_vectype)
7208 || !*comp_vectype
7209 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7210 return false;
7211 return true;
7212 }
7213
ebfd146a
IR
7214 if (!COMPARISON_CLASS_P (cond))
7215 return false;
7216
7217 lhs = TREE_OPERAND (cond, 0);
7218 rhs = TREE_OPERAND (cond, 1);
7219
7220 if (TREE_CODE (lhs) == SSA_NAME)
7221 {
355fe088 7222 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
81c40241 7223 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
ebfd146a
IR
7224 return false;
7225 }
7226 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7227 && TREE_CODE (lhs) != FIXED_CST)
7228 return false;
7229
7230 if (TREE_CODE (rhs) == SSA_NAME)
7231 {
355fe088 7232 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
81c40241 7233 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
ebfd146a
IR
7234 return false;
7235 }
f7e531cf 7236 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
ebfd146a
IR
7237 && TREE_CODE (rhs) != FIXED_CST)
7238 return false;
7239
e9e1d143 7240 *comp_vectype = vectype1 ? vectype1 : vectype2;
ebfd146a
IR
7241 return true;
7242}
7243
7244/* vectorizable_condition.
7245
b8698a0f
L
7246 Check if STMT is conditional modify expression that can be vectorized.
7247 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7248 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
7249 at GSI.
7250
7251 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7252 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
0ad23163 7253 else clause if it is 2).
ebfd146a
IR
7254
7255 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7256
4bbe8262 7257bool
355fe088
TS
7258vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7259 gimple **vec_stmt, tree reduc_def, int reduc_index,
f7e531cf 7260 slp_tree slp_node)
ebfd146a
IR
7261{
7262 tree scalar_dest = NULL_TREE;
7263 tree vec_dest = NULL_TREE;
ebfd146a
IR
7264 tree cond_expr, then_clause, else_clause;
7265 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
df11cc78 7266 tree comp_vectype = NULL_TREE;
ff802fa1
IR
7267 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7268 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
ebfd146a
IR
7269 tree vec_compare, vec_cond_expr;
7270 tree new_temp;
7271 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a855b1b1 7272 enum vect_def_type dt, dts[4];
f7e531cf 7273 int ncopies;
ebfd146a 7274 enum tree_code code;
a855b1b1 7275 stmt_vec_info prev_stmt_info = NULL;
f7e531cf
IR
7276 int i, j;
7277 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6e1aa848
DN
7278 vec<tree> vec_oprnds0 = vNULL;
7279 vec<tree> vec_oprnds1 = vNULL;
7280 vec<tree> vec_oprnds2 = vNULL;
7281 vec<tree> vec_oprnds3 = vNULL;
74946978 7282 tree vec_cmp_type;
a414c77f 7283 bool masked = false;
b8698a0f 7284
f7e531cf
IR
7285 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7286 return false;
7287
af29617a
AH
7288 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7289 {
7290 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7291 return false;
ebfd146a 7292
af29617a
AH
7293 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7294 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7295 && reduc_def))
7296 return false;
ebfd146a 7297
af29617a
AH
7298 /* FORNOW: not yet supported. */
7299 if (STMT_VINFO_LIVE_P (stmt_info))
7300 {
7301 if (dump_enabled_p ())
7302 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7303 "value used after loop.\n");
7304 return false;
7305 }
ebfd146a
IR
7306 }
7307
7308 /* Is vectorizable conditional operation? */
7309 if (!is_gimple_assign (stmt))
7310 return false;
7311
7312 code = gimple_assign_rhs_code (stmt);
7313
7314 if (code != COND_EXPR)
7315 return false;
7316
465c8c19
JJ
7317 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7318 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7319
7320 if (slp_node || PURE_SLP_STMT (stmt_info))
7321 ncopies = 1;
7322 else
7323 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7324
7325 gcc_assert (ncopies >= 1);
7326 if (reduc_index && ncopies > 1)
7327 return false; /* FORNOW */
7328
4e71066d
RG
7329 cond_expr = gimple_assign_rhs1 (stmt);
7330 then_clause = gimple_assign_rhs2 (stmt);
7331 else_clause = gimple_assign_rhs3 (stmt);
ebfd146a 7332
81c40241 7333 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
e9e1d143 7334 || !comp_vectype)
ebfd146a
IR
7335 return false;
7336
81c40241
RB
7337 gimple *def_stmt;
7338 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt))
ebfd146a 7339 return false;
81c40241 7340 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt))
ebfd146a
IR
7341 return false;
7342
a414c77f
IE
7343 if (VECTOR_BOOLEAN_TYPE_P (comp_vectype))
7344 {
7345 vec_cmp_type = comp_vectype;
7346 masked = true;
7347 }
7348 else
7349 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
74946978
MP
7350 if (vec_cmp_type == NULL_TREE)
7351 return false;
784fb9b3 7352
b8698a0f 7353 if (!vec_stmt)
ebfd146a
IR
7354 {
7355 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
e9e1d143 7356 return expand_vec_cond_expr_p (vectype, comp_vectype);
ebfd146a
IR
7357 }
7358
f7e531cf
IR
7359 /* Transform. */
7360
7361 if (!slp_node)
7362 {
9771b263
DN
7363 vec_oprnds0.create (1);
7364 vec_oprnds1.create (1);
7365 vec_oprnds2.create (1);
7366 vec_oprnds3.create (1);
f7e531cf 7367 }
ebfd146a
IR
7368
7369 /* Handle def. */
7370 scalar_dest = gimple_assign_lhs (stmt);
7371 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7372
7373 /* Handle cond expr. */
a855b1b1
MM
7374 for (j = 0; j < ncopies; j++)
7375 {
538dd0b7 7376 gassign *new_stmt = NULL;
a855b1b1
MM
7377 if (j == 0)
7378 {
f7e531cf
IR
7379 if (slp_node)
7380 {
00f96dc9
TS
7381 auto_vec<tree, 4> ops;
7382 auto_vec<vec<tree>, 4> vec_defs;
9771b263 7383
a414c77f
IE
7384 if (masked)
7385 ops.safe_push (cond_expr);
7386 else
7387 {
7388 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7389 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7390 }
9771b263
DN
7391 ops.safe_push (then_clause);
7392 ops.safe_push (else_clause);
f7e531cf 7393 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
37b5ec8f
JJ
7394 vec_oprnds3 = vec_defs.pop ();
7395 vec_oprnds2 = vec_defs.pop ();
a414c77f
IE
7396 if (!masked)
7397 vec_oprnds1 = vec_defs.pop ();
37b5ec8f 7398 vec_oprnds0 = vec_defs.pop ();
f7e531cf 7399
9771b263
DN
7400 ops.release ();
7401 vec_defs.release ();
f7e531cf
IR
7402 }
7403 else
7404 {
355fe088 7405 gimple *gtemp;
a414c77f
IE
7406 if (masked)
7407 {
7408 vec_cond_lhs
7409 = vect_get_vec_def_for_operand (cond_expr, stmt,
7410 comp_vectype);
7411 vect_is_simple_use (cond_expr, stmt_info->vinfo,
7412 &gtemp, &dts[0]);
7413 }
7414 else
7415 {
7416 vec_cond_lhs =
7417 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7418 stmt, comp_vectype);
7419 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7420 loop_vinfo, &gtemp, &dts[0]);
7421
7422 vec_cond_rhs =
7423 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7424 stmt, comp_vectype);
7425 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7426 loop_vinfo, &gtemp, &dts[1]);
7427 }
f7e531cf
IR
7428 if (reduc_index == 1)
7429 vec_then_clause = reduc_def;
7430 else
7431 {
7432 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
81c40241
RB
7433 stmt);
7434 vect_is_simple_use (then_clause, loop_vinfo,
7435 &gtemp, &dts[2]);
f7e531cf
IR
7436 }
7437 if (reduc_index == 2)
7438 vec_else_clause = reduc_def;
7439 else
7440 {
7441 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
81c40241
RB
7442 stmt);
7443 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
f7e531cf 7444 }
a855b1b1
MM
7445 }
7446 }
7447 else
7448 {
a414c77f
IE
7449 vec_cond_lhs
7450 = vect_get_vec_def_for_stmt_copy (dts[0],
7451 vec_oprnds0.pop ());
7452 if (!masked)
7453 vec_cond_rhs
7454 = vect_get_vec_def_for_stmt_copy (dts[1],
7455 vec_oprnds1.pop ());
7456
a855b1b1 7457 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
9771b263 7458 vec_oprnds2.pop ());
a855b1b1 7459 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
9771b263 7460 vec_oprnds3.pop ());
f7e531cf
IR
7461 }
7462
7463 if (!slp_node)
7464 {
9771b263 7465 vec_oprnds0.quick_push (vec_cond_lhs);
a414c77f
IE
7466 if (!masked)
7467 vec_oprnds1.quick_push (vec_cond_rhs);
9771b263
DN
7468 vec_oprnds2.quick_push (vec_then_clause);
7469 vec_oprnds3.quick_push (vec_else_clause);
a855b1b1
MM
7470 }
7471
9dc3f7de 7472 /* Arguments are ready. Create the new vector stmt. */
9771b263 7473 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
f7e531cf 7474 {
9771b263
DN
7475 vec_then_clause = vec_oprnds2[i];
7476 vec_else_clause = vec_oprnds3[i];
a855b1b1 7477
a414c77f
IE
7478 if (masked)
7479 vec_compare = vec_cond_lhs;
7480 else
7481 {
7482 vec_cond_rhs = vec_oprnds1[i];
7483 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7484 vec_cond_lhs, vec_cond_rhs);
7485 }
f7e531cf
IR
7486 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7487 vec_compare, vec_then_clause, vec_else_clause);
a855b1b1 7488
f7e531cf
IR
7489 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7490 new_temp = make_ssa_name (vec_dest, new_stmt);
7491 gimple_assign_set_lhs (new_stmt, new_temp);
7492 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7493 if (slp_node)
9771b263 7494 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
f7e531cf
IR
7495 }
7496
7497 if (slp_node)
7498 continue;
7499
7500 if (j == 0)
7501 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7502 else
7503 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7504
7505 prev_stmt_info = vinfo_for_stmt (new_stmt);
a855b1b1 7506 }
b8698a0f 7507
9771b263
DN
7508 vec_oprnds0.release ();
7509 vec_oprnds1.release ();
7510 vec_oprnds2.release ();
7511 vec_oprnds3.release ();
f7e531cf 7512
ebfd146a
IR
7513 return true;
7514}
7515
42fd8198
IE
7516/* vectorizable_comparison.
7517
7518 Check if STMT is comparison expression that can be vectorized.
7519 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7520 comparison, put it in VEC_STMT, and insert it at GSI.
7521
7522 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7523
7524bool
7525vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
7526 gimple **vec_stmt, tree reduc_def,
7527 slp_tree slp_node)
7528{
7529 tree lhs, rhs1, rhs2;
7530 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7531 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7532 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7533 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
7534 tree new_temp;
7535 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7536 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
7537 unsigned nunits;
7538 int ncopies;
7539 enum tree_code code;
7540 stmt_vec_info prev_stmt_info = NULL;
7541 int i, j;
7542 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7543 vec<tree> vec_oprnds0 = vNULL;
7544 vec<tree> vec_oprnds1 = vNULL;
7545 gimple *def_stmt;
7546 tree mask_type;
7547 tree mask;
7548
7549 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
7550 return false;
7551
7552 mask_type = vectype;
7553 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7554
7555 if (slp_node || PURE_SLP_STMT (stmt_info))
7556 ncopies = 1;
7557 else
7558 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7559
7560 gcc_assert (ncopies >= 1);
7561 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7562 return false;
7563
7564 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7565 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7566 && reduc_def))
7567 return false;
7568
7569 if (STMT_VINFO_LIVE_P (stmt_info))
7570 {
7571 if (dump_enabled_p ())
7572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7573 "value used after loop.\n");
7574 return false;
7575 }
7576
7577 if (!is_gimple_assign (stmt))
7578 return false;
7579
7580 code = gimple_assign_rhs_code (stmt);
7581
7582 if (TREE_CODE_CLASS (code) != tcc_comparison)
7583 return false;
7584
7585 rhs1 = gimple_assign_rhs1 (stmt);
7586 rhs2 = gimple_assign_rhs2 (stmt);
7587
7588 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
7589 &dts[0], &vectype1))
7590 return false;
7591
7592 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
7593 &dts[1], &vectype2))
7594 return false;
7595
7596 if (vectype1 && vectype2
7597 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7598 return false;
7599
7600 vectype = vectype1 ? vectype1 : vectype2;
7601
7602 /* Invariant comparison. */
7603 if (!vectype)
7604 {
7605 vectype = build_vector_type (TREE_TYPE (rhs1), nunits);
7606 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype)) != current_vector_size)
7607 return false;
7608 }
7609 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
7610 return false;
7611
7612 if (!vec_stmt)
7613 {
7614 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
7615 vect_model_simple_cost (stmt_info, ncopies, dts, NULL, NULL);
7616 return expand_vec_cmp_expr_p (vectype, mask_type);
7617 }
7618
7619 /* Transform. */
7620 if (!slp_node)
7621 {
7622 vec_oprnds0.create (1);
7623 vec_oprnds1.create (1);
7624 }
7625
7626 /* Handle def. */
7627 lhs = gimple_assign_lhs (stmt);
7628 mask = vect_create_destination_var (lhs, mask_type);
7629
7630 /* Handle cmp expr. */
7631 for (j = 0; j < ncopies; j++)
7632 {
7633 gassign *new_stmt = NULL;
7634 if (j == 0)
7635 {
7636 if (slp_node)
7637 {
7638 auto_vec<tree, 2> ops;
7639 auto_vec<vec<tree>, 2> vec_defs;
7640
7641 ops.safe_push (rhs1);
7642 ops.safe_push (rhs2);
7643 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7644 vec_oprnds1 = vec_defs.pop ();
7645 vec_oprnds0 = vec_defs.pop ();
7646 }
7647 else
7648 {
e4af0bc4
IE
7649 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
7650 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
42fd8198
IE
7651 }
7652 }
7653 else
7654 {
7655 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
7656 vec_oprnds0.pop ());
7657 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
7658 vec_oprnds1.pop ());
7659 }
7660
7661 if (!slp_node)
7662 {
7663 vec_oprnds0.quick_push (vec_rhs1);
7664 vec_oprnds1.quick_push (vec_rhs2);
7665 }
7666
7667 /* Arguments are ready. Create the new vector stmt. */
7668 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
7669 {
7670 vec_rhs2 = vec_oprnds1[i];
7671
7672 new_temp = make_ssa_name (mask);
7673 new_stmt = gimple_build_assign (new_temp, code, vec_rhs1, vec_rhs2);
7674 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7675 if (slp_node)
7676 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7677 }
7678
7679 if (slp_node)
7680 continue;
7681
7682 if (j == 0)
7683 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7684 else
7685 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7686
7687 prev_stmt_info = vinfo_for_stmt (new_stmt);
7688 }
7689
7690 vec_oprnds0.release ();
7691 vec_oprnds1.release ();
7692
7693 return true;
7694}
ebfd146a 7695
8644a673 7696/* Make sure the statement is vectorizable. */
ebfd146a
IR
7697
7698bool
355fe088 7699vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 7700{
8644a673 7701 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 7702 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 7703 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 7704 bool ok;
a70d6342 7705 tree scalar_type, vectype;
355fe088 7706 gimple *pattern_stmt;
363477c0 7707 gimple_seq pattern_def_seq;
ebfd146a 7708
73fbfcad 7709 if (dump_enabled_p ())
ebfd146a 7710 {
78c60e3d
SS
7711 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7712 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8644a673 7713 }
ebfd146a 7714
1825a1f3 7715 if (gimple_has_volatile_ops (stmt))
b8698a0f 7716 {
73fbfcad 7717 if (dump_enabled_p ())
78c60e3d 7718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 7719 "not vectorized: stmt has volatile operands\n");
1825a1f3
IR
7720
7721 return false;
7722 }
b8698a0f
L
7723
7724 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
7725 to include:
7726 - the COND_EXPR which is the loop exit condition
7727 - any LABEL_EXPRs in the loop
b8698a0f 7728 - computations that are used only for array indexing or loop control.
8644a673 7729 In basic blocks we only analyze statements that are a part of some SLP
83197f37 7730 instance, therefore, all the statements are relevant.
ebfd146a 7731
d092494c 7732 Pattern statement needs to be analyzed instead of the original statement
83197f37 7733 if the original statement is not relevant. Otherwise, we analyze both
079c527f
JJ
7734 statements. In basic blocks we are called from some SLP instance
7735 traversal, don't analyze pattern stmts instead, the pattern stmts
7736 already will be part of SLP instance. */
83197f37
IR
7737
7738 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
b8698a0f 7739 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 7740 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a 7741 {
9d5e7640 7742 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
83197f37 7743 && pattern_stmt
9d5e7640
IR
7744 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7745 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7746 {
83197f37 7747 /* Analyze PATTERN_STMT instead of the original stmt. */
9d5e7640
IR
7748 stmt = pattern_stmt;
7749 stmt_info = vinfo_for_stmt (pattern_stmt);
73fbfcad 7750 if (dump_enabled_p ())
9d5e7640 7751 {
78c60e3d
SS
7752 dump_printf_loc (MSG_NOTE, vect_location,
7753 "==> examining pattern statement: ");
7754 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9d5e7640
IR
7755 }
7756 }
7757 else
7758 {
73fbfcad 7759 if (dump_enabled_p ())
e645e942 7760 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
ebfd146a 7761
9d5e7640
IR
7762 return true;
7763 }
8644a673 7764 }
83197f37 7765 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
079c527f 7766 && node == NULL
83197f37
IR
7767 && pattern_stmt
7768 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7769 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7770 {
7771 /* Analyze PATTERN_STMT too. */
73fbfcad 7772 if (dump_enabled_p ())
83197f37 7773 {
78c60e3d
SS
7774 dump_printf_loc (MSG_NOTE, vect_location,
7775 "==> examining pattern statement: ");
7776 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
83197f37
IR
7777 }
7778
7779 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7780 return false;
7781 }
ebfd146a 7782
1107f3ae 7783 if (is_pattern_stmt_p (stmt_info)
079c527f 7784 && node == NULL
363477c0 7785 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
1107f3ae 7786 {
363477c0 7787 gimple_stmt_iterator si;
1107f3ae 7788
363477c0
JJ
7789 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7790 {
355fe088 7791 gimple *pattern_def_stmt = gsi_stmt (si);
363477c0
JJ
7792 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7793 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7794 {
7795 /* Analyze def stmt of STMT if it's a pattern stmt. */
73fbfcad 7796 if (dump_enabled_p ())
363477c0 7797 {
78c60e3d
SS
7798 dump_printf_loc (MSG_NOTE, vect_location,
7799 "==> examining pattern def statement: ");
7800 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
363477c0 7801 }
1107f3ae 7802
363477c0
JJ
7803 if (!vect_analyze_stmt (pattern_def_stmt,
7804 need_to_vectorize, node))
7805 return false;
7806 }
7807 }
7808 }
1107f3ae 7809
8644a673
IR
7810 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7811 {
7812 case vect_internal_def:
7813 break;
ebfd146a 7814
8644a673 7815 case vect_reduction_def:
7c5222ff 7816 case vect_nested_cycle:
14a61437
RB
7817 gcc_assert (!bb_vinfo
7818 && (relevance == vect_used_in_outer
7819 || relevance == vect_used_in_outer_by_reduction
7820 || relevance == vect_used_by_reduction
7821 || relevance == vect_unused_in_scope));
8644a673
IR
7822 break;
7823
7824 case vect_induction_def:
7825 case vect_constant_def:
7826 case vect_external_def:
7827 case vect_unknown_def_type:
7828 default:
7829 gcc_unreachable ();
7830 }
ebfd146a 7831
a70d6342
IR
7832 if (bb_vinfo)
7833 {
7834 gcc_assert (PURE_SLP_STMT (stmt_info));
7835
b690cc0f 7836 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
73fbfcad 7837 if (dump_enabled_p ())
a70d6342 7838 {
78c60e3d
SS
7839 dump_printf_loc (MSG_NOTE, vect_location,
7840 "get vectype for scalar type: ");
7841 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
e645e942 7842 dump_printf (MSG_NOTE, "\n");
a70d6342
IR
7843 }
7844
7845 vectype = get_vectype_for_scalar_type (scalar_type);
7846 if (!vectype)
7847 {
73fbfcad 7848 if (dump_enabled_p ())
a70d6342 7849 {
78c60e3d
SS
7850 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7851 "not SLPed: unsupported data-type ");
7852 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7853 scalar_type);
e645e942 7854 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
a70d6342
IR
7855 }
7856 return false;
7857 }
7858
73fbfcad 7859 if (dump_enabled_p ())
a70d6342 7860 {
78c60e3d
SS
7861 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7862 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
e645e942 7863 dump_printf (MSG_NOTE, "\n");
a70d6342
IR
7864 }
7865
7866 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7867 }
7868
8644a673 7869 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 7870 {
8644a673 7871 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
0136f8f0
AH
7872 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7873 || (is_gimple_call (stmt)
7874 && gimple_call_lhs (stmt) == NULL_TREE));
8644a673 7875 *need_to_vectorize = true;
ebfd146a
IR
7876 }
7877
b1af7da6
RB
7878 if (PURE_SLP_STMT (stmt_info) && !node)
7879 {
7880 dump_printf_loc (MSG_NOTE, vect_location,
7881 "handled only by SLP analysis\n");
7882 return true;
7883 }
7884
7885 ok = true;
7886 if (!bb_vinfo
7887 && (STMT_VINFO_RELEVANT_P (stmt_info)
7888 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7889 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7890 || vectorizable_conversion (stmt, NULL, NULL, node)
7891 || vectorizable_shift (stmt, NULL, NULL, node)
7892 || vectorizable_operation (stmt, NULL, NULL, node)
7893 || vectorizable_assignment (stmt, NULL, NULL, node)
7894 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7895 || vectorizable_call (stmt, NULL, NULL, node)
7896 || vectorizable_store (stmt, NULL, NULL, node)
7897 || vectorizable_reduction (stmt, NULL, NULL, node)
42fd8198
IE
7898 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
7899 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
b1af7da6
RB
7900 else
7901 {
7902 if (bb_vinfo)
7903 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7904 || vectorizable_conversion (stmt, NULL, NULL, node)
7905 || vectorizable_shift (stmt, NULL, NULL, node)
7906 || vectorizable_operation (stmt, NULL, NULL, node)
7907 || vectorizable_assignment (stmt, NULL, NULL, node)
7908 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7909 || vectorizable_call (stmt, NULL, NULL, node)
7910 || vectorizable_store (stmt, NULL, NULL, node)
42fd8198
IE
7911 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
7912 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
b1af7da6 7913 }
8644a673
IR
7914
7915 if (!ok)
ebfd146a 7916 {
73fbfcad 7917 if (dump_enabled_p ())
8644a673 7918 {
78c60e3d
SS
7919 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7920 "not vectorized: relevant stmt not ");
7921 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7922 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8644a673 7923 }
b8698a0f 7924
ebfd146a
IR
7925 return false;
7926 }
7927
a70d6342
IR
7928 if (bb_vinfo)
7929 return true;
7930
8644a673
IR
7931 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7932 need extra handling, except for vectorizable reductions. */
7933 if (STMT_VINFO_LIVE_P (stmt_info)
7934 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7935 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 7936
8644a673 7937 if (!ok)
ebfd146a 7938 {
73fbfcad 7939 if (dump_enabled_p ())
8644a673 7940 {
78c60e3d
SS
7941 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7942 "not vectorized: live stmt not ");
7943 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7944 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8644a673 7945 }
b8698a0f 7946
8644a673 7947 return false;
ebfd146a
IR
7948 }
7949
ebfd146a
IR
7950 return true;
7951}
7952
7953
7954/* Function vect_transform_stmt.
7955
7956 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7957
7958bool
355fe088 7959vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
0d0293ac 7960 bool *grouped_store, slp_tree slp_node,
ebfd146a
IR
7961 slp_instance slp_node_instance)
7962{
7963 bool is_store = false;
355fe088 7964 gimple *vec_stmt = NULL;
ebfd146a 7965 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
ebfd146a 7966 bool done;
ebfd146a 7967
355fe088 7968 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
225ce44b 7969
ebfd146a
IR
7970 switch (STMT_VINFO_TYPE (stmt_info))
7971 {
7972 case type_demotion_vec_info_type:
ebfd146a 7973 case type_promotion_vec_info_type:
ebfd146a
IR
7974 case type_conversion_vec_info_type:
7975 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7976 gcc_assert (done);
7977 break;
7978
7979 case induc_vec_info_type:
7980 gcc_assert (!slp_node);
7981 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7982 gcc_assert (done);
7983 break;
7984
9dc3f7de
IR
7985 case shift_vec_info_type:
7986 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7987 gcc_assert (done);
7988 break;
7989
ebfd146a
IR
7990 case op_vec_info_type:
7991 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7992 gcc_assert (done);
7993 break;
7994
7995 case assignment_vec_info_type:
7996 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7997 gcc_assert (done);
7998 break;
7999
8000 case load_vec_info_type:
b8698a0f 8001 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
8002 slp_node_instance);
8003 gcc_assert (done);
8004 break;
8005
8006 case store_vec_info_type:
8007 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8008 gcc_assert (done);
0d0293ac 8009 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
ebfd146a
IR
8010 {
8011 /* In case of interleaving, the whole chain is vectorized when the
ff802fa1 8012 last store in the chain is reached. Store stmts before the last
ebfd146a
IR
8013 one are skipped, and there vec_stmt_info shouldn't be freed
8014 meanwhile. */
0d0293ac 8015 *grouped_store = true;
ebfd146a
IR
8016 if (STMT_VINFO_VEC_STMT (stmt_info))
8017 is_store = true;
8018 }
8019 else
8020 is_store = true;
8021 break;
8022
8023 case condition_vec_info_type:
f7e531cf 8024 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
ebfd146a
IR
8025 gcc_assert (done);
8026 break;
8027
42fd8198
IE
8028 case comparison_vec_info_type:
8029 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8030 gcc_assert (done);
8031 break;
8032
ebfd146a 8033 case call_vec_info_type:
190c2236 8034 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
039d9ea1 8035 stmt = gsi_stmt (*gsi);
5ce9450f
JJ
8036 if (is_gimple_call (stmt)
8037 && gimple_call_internal_p (stmt)
8038 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
8039 is_store = true;
ebfd146a
IR
8040 break;
8041
0136f8f0
AH
8042 case call_simd_clone_vec_info_type:
8043 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8044 stmt = gsi_stmt (*gsi);
8045 break;
8046
ebfd146a 8047 case reduc_vec_info_type:
b5aeb3bb 8048 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
ebfd146a
IR
8049 gcc_assert (done);
8050 break;
8051
8052 default:
8053 if (!STMT_VINFO_LIVE_P (stmt_info))
8054 {
73fbfcad 8055 if (dump_enabled_p ())
78c60e3d 8056 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 8057 "stmt not supported.\n");
ebfd146a
IR
8058 gcc_unreachable ();
8059 }
8060 }
8061
225ce44b
RB
8062 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8063 This would break hybrid SLP vectorization. */
8064 if (slp_node)
d90f8440
RB
8065 gcc_assert (!vec_stmt
8066 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
225ce44b 8067
ebfd146a
IR
8068 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8069 is being vectorized, but outside the immediately enclosing loop. */
8070 if (vec_stmt
a70d6342
IR
8071 && STMT_VINFO_LOOP_VINFO (stmt_info)
8072 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8073 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
8074 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8075 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 8076 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 8077 vect_used_in_outer_by_reduction))
ebfd146a 8078 {
a70d6342
IR
8079 struct loop *innerloop = LOOP_VINFO_LOOP (
8080 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
8081 imm_use_iterator imm_iter;
8082 use_operand_p use_p;
8083 tree scalar_dest;
355fe088 8084 gimple *exit_phi;
ebfd146a 8085
73fbfcad 8086 if (dump_enabled_p ())
78c60e3d 8087 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 8088 "Record the vdef for outer-loop vectorization.\n");
ebfd146a
IR
8089
8090 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8091 (to be used when vectorizing outer-loop stmts that use the DEF of
8092 STMT). */
8093 if (gimple_code (stmt) == GIMPLE_PHI)
8094 scalar_dest = PHI_RESULT (stmt);
8095 else
8096 scalar_dest = gimple_assign_lhs (stmt);
8097
8098 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8099 {
8100 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8101 {
8102 exit_phi = USE_STMT (use_p);
8103 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8104 }
8105 }
8106 }
8107
8108 /* Handle stmts whose DEF is used outside the loop-nest that is
8109 being vectorized. */
8110 if (STMT_VINFO_LIVE_P (stmt_info)
8111 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8112 {
8113 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
8114 gcc_assert (done);
8115 }
8116
8117 if (vec_stmt)
83197f37 8118 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
ebfd146a 8119
b8698a0f 8120 return is_store;
ebfd146a
IR
8121}
8122
8123
b8698a0f 8124/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
8125 stmt_vec_info. */
8126
8127void
355fe088 8128vect_remove_stores (gimple *first_stmt)
ebfd146a 8129{
355fe088
TS
8130 gimple *next = first_stmt;
8131 gimple *tmp;
ebfd146a
IR
8132 gimple_stmt_iterator next_si;
8133
8134 while (next)
8135 {
78048b1c
JJ
8136 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8137
8138 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8139 if (is_pattern_stmt_p (stmt_info))
8140 next = STMT_VINFO_RELATED_STMT (stmt_info);
ebfd146a
IR
8141 /* Free the attached stmt_vec_info and remove the stmt. */
8142 next_si = gsi_for_stmt (next);
3d3f2249 8143 unlink_stmt_vdef (next);
ebfd146a 8144 gsi_remove (&next_si, true);
3d3f2249 8145 release_defs (next);
ebfd146a
IR
8146 free_stmt_vec_info (next);
8147 next = tmp;
8148 }
8149}
8150
8151
8152/* Function new_stmt_vec_info.
8153
8154 Create and initialize a new stmt_vec_info struct for STMT. */
8155
8156stmt_vec_info
310213d4 8157new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
ebfd146a
IR
8158{
8159 stmt_vec_info res;
8160 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8161
8162 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8163 STMT_VINFO_STMT (res) = stmt;
310213d4 8164 res->vinfo = vinfo;
8644a673 8165 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
8166 STMT_VINFO_LIVE_P (res) = false;
8167 STMT_VINFO_VECTYPE (res) = NULL;
8168 STMT_VINFO_VEC_STMT (res) = NULL;
4b5caab7 8169 STMT_VINFO_VECTORIZABLE (res) = true;
ebfd146a
IR
8170 STMT_VINFO_IN_PATTERN_P (res) = false;
8171 STMT_VINFO_RELATED_STMT (res) = NULL;
363477c0 8172 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
ebfd146a 8173 STMT_VINFO_DATA_REF (res) = NULL;
af29617a 8174 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
ebfd146a
IR
8175
8176 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
8177 STMT_VINFO_DR_OFFSET (res) = NULL;
8178 STMT_VINFO_DR_INIT (res) = NULL;
8179 STMT_VINFO_DR_STEP (res) = NULL;
8180 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
8181
8182 if (gimple_code (stmt) == GIMPLE_PHI
8183 && is_loop_header_bb_p (gimple_bb (stmt)))
8184 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
8185 else
8644a673
IR
8186 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
8187
9771b263 8188 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
32e8bb8e 8189 STMT_SLP_TYPE (res) = loop_vect;
e14c1050
IR
8190 GROUP_FIRST_ELEMENT (res) = NULL;
8191 GROUP_NEXT_ELEMENT (res) = NULL;
8192 GROUP_SIZE (res) = 0;
8193 GROUP_STORE_COUNT (res) = 0;
8194 GROUP_GAP (res) = 0;
8195 GROUP_SAME_DR_STMT (res) = NULL;
ebfd146a
IR
8196
8197 return res;
8198}
8199
8200
8201/* Create a hash table for stmt_vec_info. */
8202
8203void
8204init_stmt_vec_info_vec (void)
8205{
9771b263
DN
8206 gcc_assert (!stmt_vec_info_vec.exists ());
8207 stmt_vec_info_vec.create (50);
ebfd146a
IR
8208}
8209
8210
8211/* Free hash table for stmt_vec_info. */
8212
8213void
8214free_stmt_vec_info_vec (void)
8215{
93675444 8216 unsigned int i;
3161455c 8217 stmt_vec_info info;
93675444
JJ
8218 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
8219 if (info != NULL)
3161455c 8220 free_stmt_vec_info (STMT_VINFO_STMT (info));
9771b263
DN
8221 gcc_assert (stmt_vec_info_vec.exists ());
8222 stmt_vec_info_vec.release ();
ebfd146a
IR
8223}
8224
8225
8226/* Free stmt vectorization related info. */
8227
8228void
355fe088 8229free_stmt_vec_info (gimple *stmt)
ebfd146a
IR
8230{
8231 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8232
8233 if (!stmt_info)
8234 return;
8235
78048b1c
JJ
8236 /* Check if this statement has a related "pattern stmt"
8237 (introduced by the vectorizer during the pattern recognition
8238 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8239 too. */
8240 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8241 {
8242 stmt_vec_info patt_info
8243 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8244 if (patt_info)
8245 {
363477c0 8246 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
355fe088 8247 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
f0281fde
RB
8248 gimple_set_bb (patt_stmt, NULL);
8249 tree lhs = gimple_get_lhs (patt_stmt);
e6f5c25d 8250 if (lhs && TREE_CODE (lhs) == SSA_NAME)
f0281fde 8251 release_ssa_name (lhs);
363477c0
JJ
8252 if (seq)
8253 {
8254 gimple_stmt_iterator si;
8255 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
f0281fde 8256 {
355fe088 8257 gimple *seq_stmt = gsi_stmt (si);
f0281fde 8258 gimple_set_bb (seq_stmt, NULL);
7532abf2 8259 lhs = gimple_get_lhs (seq_stmt);
e6f5c25d 8260 if (lhs && TREE_CODE (lhs) == SSA_NAME)
f0281fde
RB
8261 release_ssa_name (lhs);
8262 free_stmt_vec_info (seq_stmt);
8263 }
363477c0 8264 }
f0281fde 8265 free_stmt_vec_info (patt_stmt);
78048b1c
JJ
8266 }
8267 }
8268
9771b263 8269 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
6c9e85fb 8270 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
ebfd146a
IR
8271 set_vinfo_for_stmt (stmt, NULL);
8272 free (stmt_info);
8273}
8274
8275
bb67d9c7 8276/* Function get_vectype_for_scalar_type_and_size.
ebfd146a 8277
bb67d9c7 8278 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
ebfd146a
IR
8279 by the target. */
8280
bb67d9c7
RG
8281static tree
8282get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
ebfd146a 8283{
ef4bddc2
RS
8284 machine_mode inner_mode = TYPE_MODE (scalar_type);
8285 machine_mode simd_mode;
2f816591 8286 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
8287 int nunits;
8288 tree vectype;
8289
cc4b5170 8290 if (nbytes == 0)
ebfd146a
IR
8291 return NULL_TREE;
8292
48f2e373
RB
8293 if (GET_MODE_CLASS (inner_mode) != MODE_INT
8294 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
8295 return NULL_TREE;
8296
7b7b1813
RG
8297 /* For vector types of elements whose mode precision doesn't
8298 match their types precision we use a element type of mode
8299 precision. The vectorization routines will have to make sure
48f2e373
RB
8300 they support the proper result truncation/extension.
8301 We also make sure to build vector types with INTEGER_TYPE
8302 component type only. */
6d7971b8 8303 if (INTEGRAL_TYPE_P (scalar_type)
48f2e373
RB
8304 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8305 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7b7b1813
RG
8306 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8307 TYPE_UNSIGNED (scalar_type));
6d7971b8 8308
ccbf5bb4
RG
8309 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8310 When the component mode passes the above test simply use a type
8311 corresponding to that mode. The theory is that any use that
8312 would cause problems with this will disable vectorization anyway. */
dfc2e2ac 8313 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
e67f39f7 8314 && !INTEGRAL_TYPE_P (scalar_type))
60b95d28
RB
8315 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8316
8317 /* We can't build a vector type of elements with alignment bigger than
8318 their size. */
dfc2e2ac 8319 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
aca43c6c
JJ
8320 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8321 TYPE_UNSIGNED (scalar_type));
ccbf5bb4 8322
dfc2e2ac
RB
8323 /* If we felt back to using the mode fail if there was
8324 no scalar type for it. */
8325 if (scalar_type == NULL_TREE)
8326 return NULL_TREE;
8327
bb67d9c7
RG
8328 /* If no size was supplied use the mode the target prefers. Otherwise
8329 lookup a vector mode of the specified size. */
8330 if (size == 0)
8331 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8332 else
8333 simd_mode = mode_for_vector (inner_mode, size / nbytes);
cc4b5170
RG
8334 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8335 if (nunits <= 1)
8336 return NULL_TREE;
ebfd146a
IR
8337
8338 vectype = build_vector_type (scalar_type, nunits);
ebfd146a
IR
8339
8340 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8341 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
451dabda 8342 return NULL_TREE;
ebfd146a
IR
8343
8344 return vectype;
8345}
8346
bb67d9c7
RG
8347unsigned int current_vector_size;
8348
8349/* Function get_vectype_for_scalar_type.
8350
8351 Returns the vector type corresponding to SCALAR_TYPE as supported
8352 by the target. */
8353
8354tree
8355get_vectype_for_scalar_type (tree scalar_type)
8356{
8357 tree vectype;
8358 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8359 current_vector_size);
8360 if (vectype
8361 && current_vector_size == 0)
8362 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8363 return vectype;
8364}
8365
42fd8198
IE
8366/* Function get_mask_type_for_scalar_type.
8367
8368 Returns the mask type corresponding to a result of comparison
8369 of vectors of specified SCALAR_TYPE as supported by target. */
8370
8371tree
8372get_mask_type_for_scalar_type (tree scalar_type)
8373{
8374 tree vectype = get_vectype_for_scalar_type (scalar_type);
8375
8376 if (!vectype)
8377 return NULL;
8378
8379 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
8380 current_vector_size);
8381}
8382
b690cc0f
RG
8383/* Function get_same_sized_vectype
8384
8385 Returns a vector type corresponding to SCALAR_TYPE of size
8386 VECTOR_TYPE if supported by the target. */
8387
8388tree
bb67d9c7 8389get_same_sized_vectype (tree scalar_type, tree vector_type)
b690cc0f 8390{
9f47c7e5
IE
8391 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8392 return build_same_sized_truth_vector_type (vector_type);
8393
bb67d9c7
RG
8394 return get_vectype_for_scalar_type_and_size
8395 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
b690cc0f
RG
8396}
8397
ebfd146a
IR
8398/* Function vect_is_simple_use.
8399
8400 Input:
81c40241
RB
8401 VINFO - the vect info of the loop or basic block that is being vectorized.
8402 OPERAND - operand in the loop or bb.
8403 Output:
8404 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8405 DT - the type of definition
ebfd146a
IR
8406
8407 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f 8408 For loops, supportable operands are constants, loop invariants, and operands
ff802fa1 8409 that are defined by the current iteration of the loop. Unsupportable
b8698a0f 8410 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
8411 is the case in reduction/induction computations).
8412 For basic blocks, supportable operands are constants and bb invariants.
8413 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
8414
8415bool
81c40241
RB
8416vect_is_simple_use (tree operand, vec_info *vinfo,
8417 gimple **def_stmt, enum vect_def_type *dt)
b8698a0f 8418{
ebfd146a 8419 *def_stmt = NULL;
3fc356dc 8420 *dt = vect_unknown_def_type;
b8698a0f 8421
73fbfcad 8422 if (dump_enabled_p ())
ebfd146a 8423 {
78c60e3d
SS
8424 dump_printf_loc (MSG_NOTE, vect_location,
8425 "vect_is_simple_use: operand ");
8426 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
e645e942 8427 dump_printf (MSG_NOTE, "\n");
ebfd146a 8428 }
b8698a0f 8429
b758f602 8430 if (CONSTANT_CLASS_P (operand))
ebfd146a
IR
8431 {
8432 *dt = vect_constant_def;
8433 return true;
8434 }
b8698a0f 8435
ebfd146a
IR
8436 if (is_gimple_min_invariant (operand))
8437 {
8644a673 8438 *dt = vect_external_def;
ebfd146a
IR
8439 return true;
8440 }
8441
ebfd146a
IR
8442 if (TREE_CODE (operand) != SSA_NAME)
8443 {
73fbfcad 8444 if (dump_enabled_p ())
af29617a
AH
8445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8446 "not ssa-name.\n");
ebfd146a
IR
8447 return false;
8448 }
b8698a0f 8449
3fc356dc 8450 if (SSA_NAME_IS_DEFAULT_DEF (operand))
ebfd146a 8451 {
3fc356dc
RB
8452 *dt = vect_external_def;
8453 return true;
ebfd146a
IR
8454 }
8455
3fc356dc 8456 *def_stmt = SSA_NAME_DEF_STMT (operand);
73fbfcad 8457 if (dump_enabled_p ())
ebfd146a 8458 {
78c60e3d
SS
8459 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8460 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
ebfd146a
IR
8461 }
8462
61d371eb 8463 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
8644a673 8464 *dt = vect_external_def;
ebfd146a
IR
8465 else
8466 {
3fc356dc 8467 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
310213d4 8468 if (is_a <bb_vec_info> (vinfo) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
90dd6e3d
RB
8469 *dt = vect_external_def;
8470 else
8471 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
ebfd146a
IR
8472 }
8473
2e8ab70c
RB
8474 if (dump_enabled_p ())
8475 {
8476 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8477 switch (*dt)
8478 {
8479 case vect_uninitialized_def:
8480 dump_printf (MSG_NOTE, "uninitialized\n");
8481 break;
8482 case vect_constant_def:
8483 dump_printf (MSG_NOTE, "constant\n");
8484 break;
8485 case vect_external_def:
8486 dump_printf (MSG_NOTE, "external\n");
8487 break;
8488 case vect_internal_def:
8489 dump_printf (MSG_NOTE, "internal\n");
8490 break;
8491 case vect_induction_def:
8492 dump_printf (MSG_NOTE, "induction\n");
8493 break;
8494 case vect_reduction_def:
8495 dump_printf (MSG_NOTE, "reduction\n");
8496 break;
8497 case vect_double_reduction_def:
8498 dump_printf (MSG_NOTE, "double reduction\n");
8499 break;
8500 case vect_nested_cycle:
8501 dump_printf (MSG_NOTE, "nested cycle\n");
8502 break;
8503 case vect_unknown_def_type:
8504 dump_printf (MSG_NOTE, "unknown\n");
8505 break;
8506 }
8507 }
8508
81c40241 8509 if (*dt == vect_unknown_def_type)
ebfd146a 8510 {
73fbfcad 8511 if (dump_enabled_p ())
78c60e3d 8512 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 8513 "Unsupported pattern.\n");
ebfd146a
IR
8514 return false;
8515 }
8516
ebfd146a
IR
8517 switch (gimple_code (*def_stmt))
8518 {
8519 case GIMPLE_PHI:
ebfd146a 8520 case GIMPLE_ASSIGN:
ebfd146a 8521 case GIMPLE_CALL:
81c40241 8522 break;
ebfd146a 8523 default:
73fbfcad 8524 if (dump_enabled_p ())
78c60e3d 8525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 8526 "unsupported defining stmt:\n");
ebfd146a
IR
8527 return false;
8528 }
8529
8530 return true;
8531}
8532
81c40241 8533/* Function vect_is_simple_use.
b690cc0f 8534
81c40241 8535 Same as vect_is_simple_use but also determines the vector operand
b690cc0f
RG
8536 type of OPERAND and stores it to *VECTYPE. If the definition of
8537 OPERAND is vect_uninitialized_def, vect_constant_def or
8538 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8539 is responsible to compute the best suited vector type for the
8540 scalar operand. */
8541
8542bool
81c40241
RB
8543vect_is_simple_use (tree operand, vec_info *vinfo,
8544 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
b690cc0f 8545{
81c40241 8546 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
b690cc0f
RG
8547 return false;
8548
8549 /* Now get a vector type if the def is internal, otherwise supply
8550 NULL_TREE and leave it up to the caller to figure out a proper
8551 type for the use stmt. */
8552 if (*dt == vect_internal_def
8553 || *dt == vect_induction_def
8554 || *dt == vect_reduction_def
8555 || *dt == vect_double_reduction_def
8556 || *dt == vect_nested_cycle)
8557 {
8558 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
83197f37
IR
8559
8560 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8561 && !STMT_VINFO_RELEVANT (stmt_info)
8562 && !STMT_VINFO_LIVE_P (stmt_info))
b690cc0f 8563 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
83197f37 8564
b690cc0f
RG
8565 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8566 gcc_assert (*vectype != NULL_TREE);
8567 }
8568 else if (*dt == vect_uninitialized_def
8569 || *dt == vect_constant_def
8570 || *dt == vect_external_def)
8571 *vectype = NULL_TREE;
8572 else
8573 gcc_unreachable ();
8574
8575 return true;
8576}
8577
ebfd146a
IR
8578
8579/* Function supportable_widening_operation
8580
b8698a0f
L
8581 Check whether an operation represented by the code CODE is a
8582 widening operation that is supported by the target platform in
b690cc0f
RG
8583 vector form (i.e., when operating on arguments of type VECTYPE_IN
8584 producing a result of type VECTYPE_OUT).
b8698a0f 8585
ebfd146a
IR
8586 Widening operations we currently support are NOP (CONVERT), FLOAT
8587 and WIDEN_MULT. This function checks if these operations are supported
8588 by the target platform either directly (via vector tree-codes), or via
8589 target builtins.
8590
8591 Output:
b8698a0f
L
8592 - CODE1 and CODE2 are codes of vector operations to be used when
8593 vectorizing the operation, if available.
ebfd146a
IR
8594 - MULTI_STEP_CVT determines the number of required intermediate steps in
8595 case of multi-step conversion (like char->short->int - in that case
8596 MULTI_STEP_CVT will be 1).
b8698a0f
L
8597 - INTERM_TYPES contains the intermediate type required to perform the
8598 widening operation (short in the above example). */
ebfd146a
IR
8599
8600bool
355fe088 8601supportable_widening_operation (enum tree_code code, gimple *stmt,
b690cc0f 8602 tree vectype_out, tree vectype_in,
ebfd146a
IR
8603 enum tree_code *code1, enum tree_code *code2,
8604 int *multi_step_cvt,
9771b263 8605 vec<tree> *interm_types)
ebfd146a
IR
8606{
8607 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8608 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4ef69dfc 8609 struct loop *vect_loop = NULL;
ef4bddc2 8610 machine_mode vec_mode;
81f40b79 8611 enum insn_code icode1, icode2;
ebfd146a 8612 optab optab1, optab2;
b690cc0f
RG
8613 tree vectype = vectype_in;
8614 tree wide_vectype = vectype_out;
ebfd146a 8615 enum tree_code c1, c2;
4a00c761
JJ
8616 int i;
8617 tree prev_type, intermediate_type;
ef4bddc2 8618 machine_mode intermediate_mode, prev_mode;
4a00c761 8619 optab optab3, optab4;
ebfd146a 8620
4a00c761 8621 *multi_step_cvt = 0;
4ef69dfc
IR
8622 if (loop_info)
8623 vect_loop = LOOP_VINFO_LOOP (loop_info);
8624
ebfd146a
IR
8625 switch (code)
8626 {
8627 case WIDEN_MULT_EXPR:
6ae6116f
RH
8628 /* The result of a vectorized widening operation usually requires
8629 two vectors (because the widened results do not fit into one vector).
8630 The generated vector results would normally be expected to be
8631 generated in the same order as in the original scalar computation,
8632 i.e. if 8 results are generated in each vector iteration, they are
8633 to be organized as follows:
8634 vect1: [res1,res2,res3,res4],
8635 vect2: [res5,res6,res7,res8].
8636
8637 However, in the special case that the result of the widening
8638 operation is used in a reduction computation only, the order doesn't
8639 matter (because when vectorizing a reduction we change the order of
8640 the computation). Some targets can take advantage of this and
8641 generate more efficient code. For example, targets like Altivec,
8642 that support widen_mult using a sequence of {mult_even,mult_odd}
8643 generate the following vectors:
8644 vect1: [res1,res3,res5,res7],
8645 vect2: [res2,res4,res6,res8].
8646
8647 When vectorizing outer-loops, we execute the inner-loop sequentially
8648 (each vectorized inner-loop iteration contributes to VF outer-loop
8649 iterations in parallel). We therefore don't allow to change the
8650 order of the computation in the inner-loop during outer-loop
8651 vectorization. */
8652 /* TODO: Another case in which order doesn't *really* matter is when we
8653 widen and then contract again, e.g. (short)((int)x * y >> 8).
8654 Normally, pack_trunc performs an even/odd permute, whereas the
8655 repack from an even/odd expansion would be an interleave, which
8656 would be significantly simpler for e.g. AVX2. */
8657 /* In any case, in order to avoid duplicating the code below, recurse
8658 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8659 are properly set up for the caller. If we fail, we'll continue with
8660 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8661 if (vect_loop
8662 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8663 && !nested_in_vect_loop_p (vect_loop, stmt)
8664 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8665 stmt, vectype_out, vectype_in,
a86ec597
RH
8666 code1, code2, multi_step_cvt,
8667 interm_types))
ebc047a2
CH
8668 {
8669 /* Elements in a vector with vect_used_by_reduction property cannot
8670 be reordered if the use chain with this property does not have the
8671 same operation. One such an example is s += a * b, where elements
8672 in a and b cannot be reordered. Here we check if the vector defined
8673 by STMT is only directly used in the reduction statement. */
8674 tree lhs = gimple_assign_lhs (stmt);
8675 use_operand_p dummy;
355fe088 8676 gimple *use_stmt;
ebc047a2
CH
8677 stmt_vec_info use_stmt_info = NULL;
8678 if (single_imm_use (lhs, &dummy, &use_stmt)
8679 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8680 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8681 return true;
8682 }
4a00c761
JJ
8683 c1 = VEC_WIDEN_MULT_LO_EXPR;
8684 c2 = VEC_WIDEN_MULT_HI_EXPR;
ebfd146a
IR
8685 break;
8686
81c40241
RB
8687 case DOT_PROD_EXPR:
8688 c1 = DOT_PROD_EXPR;
8689 c2 = DOT_PROD_EXPR;
8690 break;
8691
8692 case SAD_EXPR:
8693 c1 = SAD_EXPR;
8694 c2 = SAD_EXPR;
8695 break;
8696
6ae6116f
RH
8697 case VEC_WIDEN_MULT_EVEN_EXPR:
8698 /* Support the recursion induced just above. */
8699 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8700 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8701 break;
8702
36ba4aae 8703 case WIDEN_LSHIFT_EXPR:
4a00c761
JJ
8704 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8705 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
36ba4aae
IR
8706 break;
8707
ebfd146a 8708 CASE_CONVERT:
4a00c761
JJ
8709 c1 = VEC_UNPACK_LO_EXPR;
8710 c2 = VEC_UNPACK_HI_EXPR;
ebfd146a
IR
8711 break;
8712
8713 case FLOAT_EXPR:
4a00c761
JJ
8714 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8715 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
ebfd146a
IR
8716 break;
8717
8718 case FIX_TRUNC_EXPR:
8719 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8720 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8721 computing the operation. */
8722 return false;
8723
8724 default:
8725 gcc_unreachable ();
8726 }
8727
6ae6116f 8728 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
6b4db501 8729 std::swap (c1, c2);
4a00c761 8730
ebfd146a
IR
8731 if (code == FIX_TRUNC_EXPR)
8732 {
8733 /* The signedness is determined from output operand. */
b690cc0f
RG
8734 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8735 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
8736 }
8737 else
8738 {
8739 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8740 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8741 }
8742
8743 if (!optab1 || !optab2)
8744 return false;
8745
8746 vec_mode = TYPE_MODE (vectype);
947131ba
RS
8747 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8748 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
8749 return false;
8750
4a00c761
JJ
8751 *code1 = c1;
8752 *code2 = c2;
8753
8754 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8755 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8756 return true;
8757
b8698a0f 8758 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a 8759 types. */
ebfd146a 8760
4a00c761
JJ
8761 prev_type = vectype;
8762 prev_mode = vec_mode;
b8698a0f 8763
4a00c761
JJ
8764 if (!CONVERT_EXPR_CODE_P (code))
8765 return false;
b8698a0f 8766
4a00c761
JJ
8767 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8768 intermediate steps in promotion sequence. We try
8769 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8770 not. */
9771b263 8771 interm_types->create (MAX_INTERM_CVT_STEPS);
4a00c761
JJ
8772 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8773 {
8774 intermediate_mode = insn_data[icode1].operand[0].mode;
8775 intermediate_type
8776 = lang_hooks.types.type_for_mode (intermediate_mode,
8777 TYPE_UNSIGNED (prev_type));
8778 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8779 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8780
8781 if (!optab3 || !optab4
8782 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8783 || insn_data[icode1].operand[0].mode != intermediate_mode
8784 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8785 || insn_data[icode2].operand[0].mode != intermediate_mode
8786 || ((icode1 = optab_handler (optab3, intermediate_mode))
8787 == CODE_FOR_nothing)
8788 || ((icode2 = optab_handler (optab4, intermediate_mode))
8789 == CODE_FOR_nothing))
8790 break;
ebfd146a 8791
9771b263 8792 interm_types->quick_push (intermediate_type);
4a00c761
JJ
8793 (*multi_step_cvt)++;
8794
8795 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8796 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8797 return true;
8798
8799 prev_type = intermediate_type;
8800 prev_mode = intermediate_mode;
ebfd146a
IR
8801 }
8802
9771b263 8803 interm_types->release ();
4a00c761 8804 return false;
ebfd146a
IR
8805}
8806
8807
8808/* Function supportable_narrowing_operation
8809
b8698a0f
L
8810 Check whether an operation represented by the code CODE is a
8811 narrowing operation that is supported by the target platform in
b690cc0f
RG
8812 vector form (i.e., when operating on arguments of type VECTYPE_IN
8813 and producing a result of type VECTYPE_OUT).
b8698a0f 8814
ebfd146a 8815 Narrowing operations we currently support are NOP (CONVERT) and
ff802fa1 8816 FIX_TRUNC. This function checks if these operations are supported by
ebfd146a
IR
8817 the target platform directly via vector tree-codes.
8818
8819 Output:
b8698a0f
L
8820 - CODE1 is the code of a vector operation to be used when
8821 vectorizing the operation, if available.
ebfd146a
IR
8822 - MULTI_STEP_CVT determines the number of required intermediate steps in
8823 case of multi-step conversion (like int->short->char - in that case
8824 MULTI_STEP_CVT will be 1).
8825 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 8826 narrowing operation (short in the above example). */
ebfd146a
IR
8827
8828bool
8829supportable_narrowing_operation (enum tree_code code,
b690cc0f 8830 tree vectype_out, tree vectype_in,
ebfd146a 8831 enum tree_code *code1, int *multi_step_cvt,
9771b263 8832 vec<tree> *interm_types)
ebfd146a 8833{
ef4bddc2 8834 machine_mode vec_mode;
ebfd146a
IR
8835 enum insn_code icode1;
8836 optab optab1, interm_optab;
b690cc0f
RG
8837 tree vectype = vectype_in;
8838 tree narrow_vectype = vectype_out;
ebfd146a 8839 enum tree_code c1;
4a00c761 8840 tree intermediate_type;
ef4bddc2 8841 machine_mode intermediate_mode, prev_mode;
ebfd146a 8842 int i;
4a00c761 8843 bool uns;
ebfd146a 8844
4a00c761 8845 *multi_step_cvt = 0;
ebfd146a
IR
8846 switch (code)
8847 {
8848 CASE_CONVERT:
8849 c1 = VEC_PACK_TRUNC_EXPR;
8850 break;
8851
8852 case FIX_TRUNC_EXPR:
8853 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8854 break;
8855
8856 case FLOAT_EXPR:
8857 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8858 tree code and optabs used for computing the operation. */
8859 return false;
8860
8861 default:
8862 gcc_unreachable ();
8863 }
8864
8865 if (code == FIX_TRUNC_EXPR)
8866 /* The signedness is determined from output operand. */
b690cc0f 8867 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
8868 else
8869 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8870
8871 if (!optab1)
8872 return false;
8873
8874 vec_mode = TYPE_MODE (vectype);
947131ba 8875 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
8876 return false;
8877
4a00c761
JJ
8878 *code1 = c1;
8879
8880 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8881 return true;
8882
ebfd146a
IR
8883 /* Check if it's a multi-step conversion that can be done using intermediate
8884 types. */
4a00c761
JJ
8885 prev_mode = vec_mode;
8886 if (code == FIX_TRUNC_EXPR)
8887 uns = TYPE_UNSIGNED (vectype_out);
8888 else
8889 uns = TYPE_UNSIGNED (vectype);
8890
8891 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8892 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8893 costly than signed. */
8894 if (code == FIX_TRUNC_EXPR && uns)
8895 {
8896 enum insn_code icode2;
8897
8898 intermediate_type
8899 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8900 interm_optab
8901 = optab_for_tree_code (c1, intermediate_type, optab_default);
2225b9f2 8902 if (interm_optab != unknown_optab
4a00c761
JJ
8903 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8904 && insn_data[icode1].operand[0].mode
8905 == insn_data[icode2].operand[0].mode)
8906 {
8907 uns = false;
8908 optab1 = interm_optab;
8909 icode1 = icode2;
8910 }
8911 }
ebfd146a 8912
4a00c761
JJ
8913 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8914 intermediate steps in promotion sequence. We try
8915 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9771b263 8916 interm_types->create (MAX_INTERM_CVT_STEPS);
4a00c761
JJ
8917 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8918 {
8919 intermediate_mode = insn_data[icode1].operand[0].mode;
8920 intermediate_type
8921 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8922 interm_optab
8923 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8924 optab_default);
8925 if (!interm_optab
8926 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8927 || insn_data[icode1].operand[0].mode != intermediate_mode
8928 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8929 == CODE_FOR_nothing))
8930 break;
8931
9771b263 8932 interm_types->quick_push (intermediate_type);
4a00c761
JJ
8933 (*multi_step_cvt)++;
8934
8935 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8936 return true;
8937
8938 prev_mode = intermediate_mode;
8939 optab1 = interm_optab;
ebfd146a
IR
8940 }
8941
9771b263 8942 interm_types->release ();
4a00c761 8943 return false;
ebfd146a 8944}