]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
S/390: Disallow SImode in s390_decompose_address
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
818ab71a 2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
b8698a0f 3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
4 and Ira Rosen <irar@il.ibm.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
c7131fb2 25#include "backend.h"
957060b5
AM
26#include "target.h"
27#include "rtl.h"
ebfd146a 28#include "tree.h"
c7131fb2 29#include "gimple.h"
c7131fb2 30#include "ssa.h"
957060b5
AM
31#include "optabs-tree.h"
32#include "insn-config.h"
33#include "recog.h" /* FIXME: for insn_data */
34#include "cgraph.h"
957060b5 35#include "dumpfile.h"
c7131fb2 36#include "alias.h"
40e23961 37#include "fold-const.h"
d8a2d370 38#include "stor-layout.h"
2fb9a547 39#include "tree-eh.h"
45b0be94 40#include "gimplify.h"
5be5c238 41#include "gimple-iterator.h"
18f429e2 42#include "gimplify-me.h"
442b4905 43#include "tree-cfg.h"
e28030cf 44#include "tree-ssa-loop-manip.h"
ebfd146a 45#include "cfgloop.h"
0136f8f0
AH
46#include "tree-ssa-loop.h"
47#include "tree-scalar-evolution.h"
ebfd146a 48#include "tree-vectorizer.h"
9b2b7279 49#include "builtins.h"
70439f0d 50#include "internal-fn.h"
ebfd146a 51
7ee2468b
SB
52/* For lang_hooks.types.type_for_mode. */
53#include "langhooks.h"
ebfd146a 54
c3e7ee41
BS
55/* Return the vectorized type for the given statement. */
56
57tree
58stmt_vectype (struct _stmt_vec_info *stmt_info)
59{
60 return STMT_VINFO_VECTYPE (stmt_info);
61}
62
63/* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
65bool
66stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
67{
355fe088 68 gimple *stmt = STMT_VINFO_STMT (stmt_info);
c3e7ee41
BS
69 basic_block bb = gimple_bb (stmt);
70 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
71 struct loop* loop;
72
73 if (!loop_vinfo)
74 return false;
75
76 loop = LOOP_VINFO_LOOP (loop_vinfo);
77
78 return (bb->loop_father == loop->inner);
79}
80
81/* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
84
85unsigned
92345349 86record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
c3e7ee41 87 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92345349 88 int misalign, enum vect_cost_model_location where)
c3e7ee41 89{
92345349 90 if (body_cost_vec)
c3e7ee41 91 {
92345349 92 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
ddf56386
RB
93 stmt_info_for_cost si = { count, kind,
94 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
95 misalign };
96 body_cost_vec->safe_push (si);
c3e7ee41 97 return (unsigned)
92345349 98 (builtin_vectorization_cost (kind, vectype, misalign) * count);
c3e7ee41
BS
99 }
100 else
310213d4
RB
101 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
102 count, kind, stmt_info, misalign, where);
c3e7ee41
BS
103}
104
272c6793
RS
105/* Return a variable of type ELEM_TYPE[NELEMS]. */
106
107static tree
108create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
109{
110 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
111 "vect_array");
112}
113
114/* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
118
119static tree
355fe088 120read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
272c6793
RS
121 tree array, unsigned HOST_WIDE_INT n)
122{
123 tree vect_type, vect, vect_name, array_ref;
355fe088 124 gimple *new_stmt;
272c6793
RS
125
126 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
127 vect_type = TREE_TYPE (TREE_TYPE (array));
128 vect = vect_create_destination_var (scalar_dest, vect_type);
129 array_ref = build4 (ARRAY_REF, vect_type, array,
130 build_int_cst (size_type_node, n),
131 NULL_TREE, NULL_TREE);
132
133 new_stmt = gimple_build_assign (vect, array_ref);
134 vect_name = make_ssa_name (vect, new_stmt);
135 gimple_assign_set_lhs (new_stmt, vect_name);
136 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
137
138 return vect_name;
139}
140
141/* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
144
145static void
355fe088 146write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
272c6793
RS
147 tree array, unsigned HOST_WIDE_INT n)
148{
149 tree array_ref;
355fe088 150 gimple *new_stmt;
272c6793
RS
151
152 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
153 build_int_cst (size_type_node, n),
154 NULL_TREE, NULL_TREE);
155
156 new_stmt = gimple_build_assign (array_ref, vect);
157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
158}
159
160/* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
162 (and its group). */
163
164static tree
165create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
166{
272c6793
RS
167 tree mem_ref, alias_ptr_type;
168
169 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
170 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
171 /* Arrays have the same alignment as their type. */
644ffefd 172 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
272c6793
RS
173 return mem_ref;
174}
175
ebfd146a
IR
176/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
177
178/* Function vect_mark_relevant.
179
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
181
182static void
355fe088 183vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
83197f37
IR
184 enum vect_relevant relevant, bool live_p,
185 bool used_in_pattern)
ebfd146a
IR
186{
187 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
188 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
189 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
355fe088 190 gimple *pattern_stmt;
ebfd146a 191
73fbfcad 192 if (dump_enabled_p ())
66c16fd9
RB
193 {
194 dump_printf_loc (MSG_NOTE, vect_location,
195 "mark relevant %d, live %d: ", relevant, live_p);
196 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
197 }
ebfd146a 198
83197f37
IR
199 /* If this stmt is an original stmt in a pattern, we might need to mark its
200 related pattern stmt instead of the original stmt. However, such stmts
201 may have their own uses that are not in any pattern, in such cases the
202 stmt itself should be marked. */
ebfd146a
IR
203 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
204 {
83197f37
IR
205 bool found = false;
206 if (!used_in_pattern)
207 {
208 imm_use_iterator imm_iter;
209 use_operand_p use_p;
355fe088 210 gimple *use_stmt;
83197f37 211 tree lhs;
13c931c9
JJ
212 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
213 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a 214
83197f37
IR
215 if (is_gimple_assign (stmt))
216 lhs = gimple_assign_lhs (stmt);
217 else
218 lhs = gimple_call_lhs (stmt);
ebfd146a 219
83197f37
IR
220 /* This use is out of pattern use, if LHS has other uses that are
221 pattern uses, we should mark the stmt itself, and not the pattern
222 stmt. */
5ce9450f 223 if (lhs && TREE_CODE (lhs) == SSA_NAME)
ab0ef706
JJ
224 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
225 {
226 if (is_gimple_debug (USE_STMT (use_p)))
227 continue;
228 use_stmt = USE_STMT (use_p);
229
13c931c9
JJ
230 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
231 continue;
232
ab0ef706
JJ
233 if (vinfo_for_stmt (use_stmt)
234 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
235 {
236 found = true;
237 break;
238 }
239 }
83197f37
IR
240 }
241
242 if (!found)
243 {
244 /* This is the last stmt in a sequence that was detected as a
245 pattern that can potentially be vectorized. Don't mark the stmt
246 as relevant/live because it's not going to be vectorized.
247 Instead mark the pattern-stmt that replaces it. */
248
249 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
250
73fbfcad 251 if (dump_enabled_p ())
78c60e3d
SS
252 dump_printf_loc (MSG_NOTE, vect_location,
253 "last stmt in pattern. don't mark"
e645e942 254 " relevant/live.\n");
83197f37
IR
255 stmt_info = vinfo_for_stmt (pattern_stmt);
256 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
257 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
258 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
259 stmt = pattern_stmt;
260 }
ebfd146a
IR
261 }
262
263 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
264 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
265 STMT_VINFO_RELEVANT (stmt_info) = relevant;
266
267 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
268 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
269 {
73fbfcad 270 if (dump_enabled_p ())
78c60e3d 271 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 272 "already marked relevant/live.\n");
ebfd146a
IR
273 return;
274 }
275
9771b263 276 worklist->safe_push (stmt);
ebfd146a
IR
277}
278
279
280/* Function vect_stmt_relevant_p.
281
282 Return true if STMT in loop that is represented by LOOP_VINFO is
283 "relevant for vectorization".
284
285 A stmt is considered "relevant for vectorization" if:
286 - it has uses outside the loop.
287 - it has vdefs (it alters memory).
288 - control stmts in the loop (except for the exit condition).
289
290 CHECKME: what other side effects would the vectorizer allow? */
291
292static bool
355fe088 293vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
ebfd146a
IR
294 enum vect_relevant *relevant, bool *live_p)
295{
296 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
297 ssa_op_iter op_iter;
298 imm_use_iterator imm_iter;
299 use_operand_p use_p;
300 def_operand_p def_p;
301
8644a673 302 *relevant = vect_unused_in_scope;
ebfd146a
IR
303 *live_p = false;
304
305 /* cond stmt other than loop exit cond. */
b8698a0f
L
306 if (is_ctrl_stmt (stmt)
307 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
308 != loop_exit_ctrl_vec_info_type)
8644a673 309 *relevant = vect_used_in_scope;
ebfd146a
IR
310
311 /* changing memory. */
312 if (gimple_code (stmt) != GIMPLE_PHI)
ac6aeab4
RB
313 if (gimple_vdef (stmt)
314 && !gimple_clobber_p (stmt))
ebfd146a 315 {
73fbfcad 316 if (dump_enabled_p ())
78c60e3d 317 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 318 "vec_stmt_relevant_p: stmt has vdefs.\n");
8644a673 319 *relevant = vect_used_in_scope;
ebfd146a
IR
320 }
321
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
324 {
325 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
326 {
327 basic_block bb = gimple_bb (USE_STMT (use_p));
328 if (!flow_bb_inside_loop_p (loop, bb))
329 {
73fbfcad 330 if (dump_enabled_p ())
78c60e3d 331 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 332 "vec_stmt_relevant_p: used out of loop.\n");
ebfd146a 333
3157b0c2
AO
334 if (is_gimple_debug (USE_STMT (use_p)))
335 continue;
336
ebfd146a
IR
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
340 gcc_assert (bb == single_exit (loop)->dest);
341
342 *live_p = true;
343 }
344 }
345 }
346
347 return (*live_p || *relevant);
348}
349
350
b8698a0f 351/* Function exist_non_indexing_operands_for_use_p
ebfd146a 352
ff802fa1 353 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
354 used in STMT for anything other than indexing an array. */
355
356static bool
355fe088 357exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
ebfd146a
IR
358{
359 tree operand;
360 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 361
ff802fa1 362 /* USE corresponds to some operand in STMT. If there is no data
ebfd146a
IR
363 reference in STMT, then any operand that corresponds to USE
364 is not indexing an array. */
365 if (!STMT_VINFO_DATA_REF (stmt_info))
366 return true;
59a05b0c 367
ebfd146a
IR
368 /* STMT has a data_ref. FORNOW this means that its of one of
369 the following forms:
370 -1- ARRAY_REF = var
371 -2- var = ARRAY_REF
372 (This should have been verified in analyze_data_refs).
373
374 'var' in the second case corresponds to a def, not a use,
b8698a0f 375 so USE cannot correspond to any operands that are not used
ebfd146a
IR
376 for array indexing.
377
378 Therefore, all we need to check is if STMT falls into the
379 first case, and whether var corresponds to USE. */
ebfd146a
IR
380
381 if (!gimple_assign_copy_p (stmt))
5ce9450f
JJ
382 {
383 if (is_gimple_call (stmt)
384 && gimple_call_internal_p (stmt))
385 switch (gimple_call_internal_fn (stmt))
386 {
387 case IFN_MASK_STORE:
388 operand = gimple_call_arg (stmt, 3);
389 if (operand == use)
390 return true;
391 /* FALLTHRU */
392 case IFN_MASK_LOAD:
393 operand = gimple_call_arg (stmt, 2);
394 if (operand == use)
395 return true;
396 break;
397 default:
398 break;
399 }
400 return false;
401 }
402
59a05b0c
EB
403 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
404 return false;
ebfd146a 405 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
406 if (TREE_CODE (operand) != SSA_NAME)
407 return false;
408
409 if (operand == use)
410 return true;
411
412 return false;
413}
414
415
b8698a0f 416/*
ebfd146a
IR
417 Function process_use.
418
419 Inputs:
420 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 421 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ff802fa1 422 that defined USE. This is done by calling mark_relevant and passing it
ebfd146a 423 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
aec7ae7d
JJ
424 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
425 be performed.
ebfd146a
IR
426
427 Outputs:
428 Generally, LIVE_P and RELEVANT are used to define the liveness and
429 relevance info of the DEF_STMT of this USE:
430 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
431 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
432 Exceptions:
433 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 434 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 435 of the respective DEF_STMT is left unchanged.
b8698a0f
L
436 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
437 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
438 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
439 be modified accordingly.
440
441 Return true if everything is as expected. Return false otherwise. */
442
443static bool
355fe088
TS
444process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
445 enum vect_relevant relevant, vec<gimple *> *worklist,
aec7ae7d 446 bool force)
ebfd146a
IR
447{
448 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
449 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
450 stmt_vec_info dstmt_vinfo;
451 basic_block bb, def_bb;
355fe088 452 gimple *def_stmt;
ebfd146a
IR
453 enum vect_def_type dt;
454
b8698a0f 455 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a 456 that are used for address computation are not considered relevant. */
aec7ae7d 457 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
ebfd146a
IR
458 return true;
459
81c40241 460 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
b8698a0f 461 {
73fbfcad 462 if (dump_enabled_p ())
78c60e3d 463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 464 "not vectorized: unsupported use in stmt.\n");
ebfd146a
IR
465 return false;
466 }
467
468 if (!def_stmt || gimple_nop_p (def_stmt))
469 return true;
470
471 def_bb = gimple_bb (def_stmt);
472 if (!flow_bb_inside_loop_p (loop, def_bb))
473 {
73fbfcad 474 if (dump_enabled_p ())
e645e942 475 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
ebfd146a
IR
476 return true;
477 }
478
b8698a0f
L
479 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
480 DEF_STMT must have already been processed, because this should be the
481 only way that STMT, which is a reduction-phi, was put in the worklist,
482 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
483 check that everything is as expected, and we are done. */
484 dstmt_vinfo = vinfo_for_stmt (def_stmt);
485 bb = gimple_bb (stmt);
486 if (gimple_code (stmt) == GIMPLE_PHI
487 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
488 && gimple_code (def_stmt) != GIMPLE_PHI
489 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
490 && bb->loop_father == def_bb->loop_father)
491 {
73fbfcad 492 if (dump_enabled_p ())
78c60e3d 493 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 494 "reduc-stmt defining reduc-phi in the same nest.\n");
ebfd146a
IR
495 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
496 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
497 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 498 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 499 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
500 return true;
501 }
502
503 /* case 3a: outer-loop stmt defining an inner-loop stmt:
504 outer-loop-header-bb:
505 d = def_stmt
506 inner-loop:
507 stmt # use (d)
508 outer-loop-tail-bb:
509 ... */
510 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
511 {
73fbfcad 512 if (dump_enabled_p ())
78c60e3d 513 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 514 "outer-loop def-stmt defining inner-loop stmt.\n");
7c5222ff 515
ebfd146a
IR
516 switch (relevant)
517 {
8644a673 518 case vect_unused_in_scope:
7c5222ff
IR
519 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
520 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 521 break;
7c5222ff 522
ebfd146a 523 case vect_used_in_outer_by_reduction:
7c5222ff 524 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
525 relevant = vect_used_by_reduction;
526 break;
7c5222ff 527
ebfd146a 528 case vect_used_in_outer:
7c5222ff 529 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 530 relevant = vect_used_in_scope;
ebfd146a 531 break;
7c5222ff 532
8644a673 533 case vect_used_in_scope:
ebfd146a
IR
534 break;
535
536 default:
537 gcc_unreachable ();
b8698a0f 538 }
ebfd146a
IR
539 }
540
541 /* case 3b: inner-loop stmt defining an outer-loop stmt:
542 outer-loop-header-bb:
543 ...
544 inner-loop:
545 d = def_stmt
06066f92 546 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
547 stmt # use (d) */
548 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
549 {
73fbfcad 550 if (dump_enabled_p ())
78c60e3d 551 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 552 "inner-loop def-stmt defining outer-loop stmt.\n");
7c5222ff 553
ebfd146a
IR
554 switch (relevant)
555 {
8644a673 556 case vect_unused_in_scope:
b8698a0f 557 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 558 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 559 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
560 break;
561
ebfd146a
IR
562 case vect_used_by_reduction:
563 relevant = vect_used_in_outer_by_reduction;
564 break;
565
8644a673 566 case vect_used_in_scope:
ebfd146a
IR
567 relevant = vect_used_in_outer;
568 break;
569
570 default:
571 gcc_unreachable ();
572 }
573 }
574
83197f37
IR
575 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
576 is_pattern_stmt_p (stmt_vinfo));
ebfd146a
IR
577 return true;
578}
579
580
581/* Function vect_mark_stmts_to_be_vectorized.
582
583 Not all stmts in the loop need to be vectorized. For example:
584
585 for i...
586 for j...
587 1. T0 = i + j
588 2. T1 = a[T0]
589
590 3. j = j + 1
591
592 Stmt 1 and 3 do not need to be vectorized, because loop control and
593 addressing of vectorized data-refs are handled differently.
594
595 This pass detects such stmts. */
596
597bool
598vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
599{
ebfd146a
IR
600 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
601 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
602 unsigned int nbbs = loop->num_nodes;
603 gimple_stmt_iterator si;
355fe088 604 gimple *stmt;
ebfd146a
IR
605 unsigned int i;
606 stmt_vec_info stmt_vinfo;
607 basic_block bb;
355fe088 608 gimple *phi;
ebfd146a 609 bool live_p;
06066f92
IR
610 enum vect_relevant relevant, tmp_relevant;
611 enum vect_def_type def_type;
ebfd146a 612
73fbfcad 613 if (dump_enabled_p ())
78c60e3d 614 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 615 "=== vect_mark_stmts_to_be_vectorized ===\n");
ebfd146a 616
355fe088 617 auto_vec<gimple *, 64> worklist;
ebfd146a
IR
618
619 /* 1. Init worklist. */
620 for (i = 0; i < nbbs; i++)
621 {
622 bb = bbs[i];
623 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 624 {
ebfd146a 625 phi = gsi_stmt (si);
73fbfcad 626 if (dump_enabled_p ())
ebfd146a 627 {
78c60e3d
SS
628 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
629 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
ebfd146a
IR
630 }
631
632 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
83197f37 633 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
ebfd146a
IR
634 }
635 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
636 {
637 stmt = gsi_stmt (si);
73fbfcad 638 if (dump_enabled_p ())
ebfd146a 639 {
78c60e3d
SS
640 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
641 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
b8698a0f 642 }
ebfd146a
IR
643
644 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
83197f37 645 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
ebfd146a
IR
646 }
647 }
648
649 /* 2. Process_worklist */
9771b263 650 while (worklist.length () > 0)
ebfd146a
IR
651 {
652 use_operand_p use_p;
653 ssa_op_iter iter;
654
9771b263 655 stmt = worklist.pop ();
73fbfcad 656 if (dump_enabled_p ())
ebfd146a 657 {
78c60e3d
SS
658 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
ebfd146a
IR
660 }
661
b8698a0f
L
662 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
663 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
664 liveness and relevance properties of STMT. */
665 stmt_vinfo = vinfo_for_stmt (stmt);
666 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
667 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
668
669 /* Generally, the liveness and relevance properties of STMT are
670 propagated as is to the DEF_STMTs of its USEs:
671 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
672 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
673
674 One exception is when STMT has been identified as defining a reduction
675 variable; in this case we set the liveness/relevance as follows:
676 live_p = false
677 relevant = vect_used_by_reduction
678 This is because we distinguish between two kinds of relevant stmts -
b8698a0f 679 those that are used by a reduction computation, and those that are
ff802fa1 680 (also) used by a regular computation. This allows us later on to
b8698a0f 681 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 682 order of the results that they produce does not have to be kept. */
ebfd146a 683
06066f92
IR
684 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
685 tmp_relevant = relevant;
686 switch (def_type)
ebfd146a 687 {
06066f92
IR
688 case vect_reduction_def:
689 switch (tmp_relevant)
690 {
691 case vect_unused_in_scope:
692 relevant = vect_used_by_reduction;
693 break;
694
695 case vect_used_by_reduction:
696 if (gimple_code (stmt) == GIMPLE_PHI)
697 break;
698 /* fall through */
699
700 default:
73fbfcad 701 if (dump_enabled_p ())
78c60e3d 702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 703 "unsupported use of reduction.\n");
06066f92
IR
704 return false;
705 }
706
b8698a0f 707 live_p = false;
06066f92 708 break;
b8698a0f 709
06066f92
IR
710 case vect_nested_cycle:
711 if (tmp_relevant != vect_unused_in_scope
712 && tmp_relevant != vect_used_in_outer_by_reduction
713 && tmp_relevant != vect_used_in_outer)
714 {
73fbfcad 715 if (dump_enabled_p ())
78c60e3d 716 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 717 "unsupported use of nested cycle.\n");
7c5222ff 718
06066f92
IR
719 return false;
720 }
7c5222ff 721
b8698a0f
L
722 live_p = false;
723 break;
724
06066f92
IR
725 case vect_double_reduction_def:
726 if (tmp_relevant != vect_unused_in_scope
727 && tmp_relevant != vect_used_by_reduction)
728 {
73fbfcad 729 if (dump_enabled_p ())
78c60e3d 730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 731 "unsupported use of double reduction.\n");
7c5222ff 732
7c5222ff 733 return false;
06066f92
IR
734 }
735
736 live_p = false;
b8698a0f 737 break;
7c5222ff 738
06066f92
IR
739 default:
740 break;
7c5222ff 741 }
b8698a0f 742
aec7ae7d 743 if (is_pattern_stmt_p (stmt_vinfo))
9d5e7640
IR
744 {
745 /* Pattern statements are not inserted into the code, so
746 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
747 have to scan the RHS or function arguments instead. */
748 if (is_gimple_assign (stmt))
749 {
69d2aade
JJ
750 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
751 tree op = gimple_assign_rhs1 (stmt);
752
753 i = 1;
754 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
755 {
756 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
aec7ae7d 757 live_p, relevant, &worklist, false)
69d2aade 758 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
aec7ae7d 759 live_p, relevant, &worklist, false))
566d377a 760 return false;
69d2aade
JJ
761 i = 2;
762 }
763 for (; i < gimple_num_ops (stmt); i++)
9d5e7640 764 {
69d2aade 765 op = gimple_op (stmt, i);
afbe6325
RB
766 if (TREE_CODE (op) == SSA_NAME
767 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
768 &worklist, false))
07687835 769 return false;
9d5e7640
IR
770 }
771 }
772 else if (is_gimple_call (stmt))
773 {
774 for (i = 0; i < gimple_call_num_args (stmt); i++)
775 {
776 tree arg = gimple_call_arg (stmt, i);
777 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
aec7ae7d 778 &worklist, false))
07687835 779 return false;
9d5e7640
IR
780 }
781 }
782 }
783 else
784 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
785 {
786 tree op = USE_FROM_PTR (use_p);
787 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
aec7ae7d 788 &worklist, false))
07687835 789 return false;
9d5e7640 790 }
aec7ae7d 791
3bab6342 792 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
aec7ae7d
JJ
793 {
794 tree off;
3bab6342 795 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
aec7ae7d
JJ
796 gcc_assert (decl);
797 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
798 &worklist, true))
566d377a 799 return false;
aec7ae7d 800 }
ebfd146a
IR
801 } /* while worklist */
802
ebfd146a
IR
803 return true;
804}
805
806
b8698a0f 807/* Function vect_model_simple_cost.
ebfd146a 808
b8698a0f 809 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
810 single op. Right now, this does not account for multiple insns that could
811 be generated for the single vector op. We will handle that shortly. */
812
813void
b8698a0f 814vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
92345349
BS
815 enum vect_def_type *dt,
816 stmt_vector_for_cost *prologue_cost_vec,
817 stmt_vector_for_cost *body_cost_vec)
ebfd146a
IR
818{
819 int i;
92345349 820 int inside_cost = 0, prologue_cost = 0;
ebfd146a
IR
821
822 /* The SLP costs were already calculated during SLP tree build. */
823 if (PURE_SLP_STMT (stmt_info))
824 return;
825
ebfd146a
IR
826 /* FORNOW: Assuming maximum 2 args per stmts. */
827 for (i = 0; i < 2; i++)
92345349
BS
828 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
829 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
830 stmt_info, 0, vect_prologue);
c3e7ee41
BS
831
832 /* Pass the inside-of-loop statements to the target-specific cost model. */
92345349
BS
833 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
834 stmt_info, 0, vect_body);
c3e7ee41 835
73fbfcad 836 if (dump_enabled_p ())
78c60e3d
SS
837 dump_printf_loc (MSG_NOTE, vect_location,
838 "vect_model_simple_cost: inside_cost = %d, "
e645e942 839 "prologue_cost = %d .\n", inside_cost, prologue_cost);
ebfd146a
IR
840}
841
842
8bd37302
BS
843/* Model cost for type demotion and promotion operations. PWR is normally
844 zero for single-step promotions and demotions. It will be one if
845 two-step promotion/demotion is required, and so on. Each additional
846 step doubles the number of instructions required. */
847
848static void
849vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
850 enum vect_def_type *dt, int pwr)
851{
852 int i, tmp;
92345349 853 int inside_cost = 0, prologue_cost = 0;
c3e7ee41
BS
854 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
855 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
856 void *target_cost_data;
8bd37302
BS
857
858 /* The SLP costs were already calculated during SLP tree build. */
859 if (PURE_SLP_STMT (stmt_info))
860 return;
861
c3e7ee41
BS
862 if (loop_vinfo)
863 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
864 else
865 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
866
8bd37302
BS
867 for (i = 0; i < pwr + 1; i++)
868 {
869 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
870 (i + 1) : i;
c3e7ee41 871 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
92345349
BS
872 vec_promote_demote, stmt_info, 0,
873 vect_body);
8bd37302
BS
874 }
875
876 /* FORNOW: Assuming maximum 2 args per stmts. */
877 for (i = 0; i < 2; i++)
92345349
BS
878 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
879 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
880 stmt_info, 0, vect_prologue);
8bd37302 881
73fbfcad 882 if (dump_enabled_p ())
78c60e3d
SS
883 dump_printf_loc (MSG_NOTE, vect_location,
884 "vect_model_promotion_demotion_cost: inside_cost = %d, "
e645e942 885 "prologue_cost = %d .\n", inside_cost, prologue_cost);
8bd37302
BS
886}
887
0d0293ac 888/* Function vect_cost_group_size
b8698a0f 889
0d0293ac 890 For grouped load or store, return the group_size only if it is the first
ebfd146a
IR
891 load or store of a group, else return 1. This ensures that group size is
892 only returned once per group. */
893
894static int
0d0293ac 895vect_cost_group_size (stmt_vec_info stmt_info)
ebfd146a 896{
355fe088 897 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
ebfd146a
IR
898
899 if (first_stmt == STMT_VINFO_STMT (stmt_info))
e14c1050 900 return GROUP_SIZE (stmt_info);
ebfd146a
IR
901
902 return 1;
903}
904
905
906/* Function vect_model_store_cost
907
0d0293ac
MM
908 Models cost for stores. In the case of grouped accesses, one access
909 has the overhead of the grouped access attributed to it. */
ebfd146a
IR
910
911void
b8698a0f 912vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
272c6793 913 bool store_lanes_p, enum vect_def_type dt,
92345349
BS
914 slp_tree slp_node,
915 stmt_vector_for_cost *prologue_cost_vec,
916 stmt_vector_for_cost *body_cost_vec)
ebfd146a
IR
917{
918 int group_size;
92345349 919 unsigned int inside_cost = 0, prologue_cost = 0;
720f5239 920 struct data_reference *first_dr;
355fe088 921 gimple *first_stmt;
ebfd146a 922
8644a673 923 if (dt == vect_constant_def || dt == vect_external_def)
92345349
BS
924 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
925 stmt_info, 0, vect_prologue);
ebfd146a 926
0d0293ac
MM
927 /* Grouped access? */
928 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
720f5239
IR
929 {
930 if (slp_node)
931 {
9771b263 932 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
720f5239
IR
933 group_size = 1;
934 }
935 else
936 {
e14c1050 937 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
0d0293ac 938 group_size = vect_cost_group_size (stmt_info);
720f5239
IR
939 }
940
941 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
942 }
0d0293ac 943 /* Not a grouped access. */
ebfd146a 944 else
720f5239
IR
945 {
946 group_size = 1;
947 first_dr = STMT_VINFO_DATA_REF (stmt_info);
948 }
ebfd146a 949
272c6793 950 /* We assume that the cost of a single store-lanes instruction is
0d0293ac 951 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
272c6793
RS
952 access is instead being provided by a permute-and-store operation,
953 include the cost of the permutes. */
cee62fee
MM
954 if (!store_lanes_p && group_size > 1
955 && !STMT_VINFO_STRIDED_P (stmt_info))
ebfd146a 956 {
e1377713
ES
957 /* Uses a high and low interleave or shuffle operations for each
958 needed permute. */
959 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
92345349
BS
960 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
961 stmt_info, 0, vect_body);
ebfd146a 962
73fbfcad 963 if (dump_enabled_p ())
78c60e3d 964 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 965 "vect_model_store_cost: strided group_size = %d .\n",
78c60e3d 966 group_size);
ebfd146a
IR
967 }
968
cee62fee 969 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 970 /* Costs of the stores. */
cee62fee
MM
971 if (STMT_VINFO_STRIDED_P (stmt_info)
972 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
f2e2a985
MM
973 {
974 /* N scalar stores plus extracting the elements. */
f2e2a985
MM
975 inside_cost += record_stmt_cost (body_cost_vec,
976 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
977 scalar_store, stmt_info, 0, vect_body);
f2e2a985
MM
978 }
979 else
980 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
ebfd146a 981
cee62fee
MM
982 if (STMT_VINFO_STRIDED_P (stmt_info))
983 inside_cost += record_stmt_cost (body_cost_vec,
984 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
985 vec_to_scalar, stmt_info, 0, vect_body);
986
73fbfcad 987 if (dump_enabled_p ())
78c60e3d
SS
988 dump_printf_loc (MSG_NOTE, vect_location,
989 "vect_model_store_cost: inside_cost = %d, "
e645e942 990 "prologue_cost = %d .\n", inside_cost, prologue_cost);
ebfd146a
IR
991}
992
993
720f5239
IR
994/* Calculate cost of DR's memory access. */
995void
996vect_get_store_cost (struct data_reference *dr, int ncopies,
c3e7ee41 997 unsigned int *inside_cost,
92345349 998 stmt_vector_for_cost *body_cost_vec)
720f5239
IR
999{
1000 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
355fe088 1001 gimple *stmt = DR_STMT (dr);
c3e7ee41 1002 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
720f5239
IR
1003
1004 switch (alignment_support_scheme)
1005 {
1006 case dr_aligned:
1007 {
92345349
BS
1008 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1009 vector_store, stmt_info, 0,
1010 vect_body);
720f5239 1011
73fbfcad 1012 if (dump_enabled_p ())
78c60e3d 1013 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1014 "vect_model_store_cost: aligned.\n");
720f5239
IR
1015 break;
1016 }
1017
1018 case dr_unaligned_supported:
1019 {
720f5239 1020 /* Here, we assign an additional cost for the unaligned store. */
92345349 1021 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
c3e7ee41 1022 unaligned_store, stmt_info,
92345349 1023 DR_MISALIGNMENT (dr), vect_body);
73fbfcad 1024 if (dump_enabled_p ())
78c60e3d
SS
1025 dump_printf_loc (MSG_NOTE, vect_location,
1026 "vect_model_store_cost: unaligned supported by "
e645e942 1027 "hardware.\n");
720f5239
IR
1028 break;
1029 }
1030
38eec4c6
UW
1031 case dr_unaligned_unsupported:
1032 {
1033 *inside_cost = VECT_MAX_COST;
1034
73fbfcad 1035 if (dump_enabled_p ())
78c60e3d 1036 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 1037 "vect_model_store_cost: unsupported access.\n");
38eec4c6
UW
1038 break;
1039 }
1040
720f5239
IR
1041 default:
1042 gcc_unreachable ();
1043 }
1044}
1045
1046
ebfd146a
IR
1047/* Function vect_model_load_cost
1048
0d0293ac
MM
1049 Models cost for loads. In the case of grouped accesses, the last access
1050 has the overhead of the grouped access attributed to it. Since unaligned
b8698a0f 1051 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
1052 access scheme chosen. */
1053
1054void
92345349
BS
1055vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1056 bool load_lanes_p, slp_tree slp_node,
1057 stmt_vector_for_cost *prologue_cost_vec,
1058 stmt_vector_for_cost *body_cost_vec)
ebfd146a
IR
1059{
1060 int group_size;
355fe088 1061 gimple *first_stmt;
ebfd146a 1062 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
92345349 1063 unsigned int inside_cost = 0, prologue_cost = 0;
ebfd146a 1064
0d0293ac 1065 /* Grouped accesses? */
e14c1050 1066 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
0d0293ac 1067 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
ebfd146a 1068 {
0d0293ac 1069 group_size = vect_cost_group_size (stmt_info);
ebfd146a
IR
1070 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1071 }
0d0293ac 1072 /* Not a grouped access. */
ebfd146a
IR
1073 else
1074 {
1075 group_size = 1;
1076 first_dr = dr;
1077 }
1078
272c6793 1079 /* We assume that the cost of a single load-lanes instruction is
0d0293ac 1080 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
272c6793
RS
1081 access is instead being provided by a load-and-permute operation,
1082 include the cost of the permutes. */
7b5fc413 1083 if (!load_lanes_p && group_size > 1
f2e2a985 1084 && !STMT_VINFO_STRIDED_P (stmt_info))
ebfd146a 1085 {
2c23db6d
ES
1086 /* Uses an even and odd extract operations or shuffle operations
1087 for each needed permute. */
1088 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1089 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1090 stmt_info, 0, vect_body);
ebfd146a 1091
73fbfcad 1092 if (dump_enabled_p ())
e645e942
TJ
1093 dump_printf_loc (MSG_NOTE, vect_location,
1094 "vect_model_load_cost: strided group_size = %d .\n",
78c60e3d 1095 group_size);
ebfd146a
IR
1096 }
1097
1098 /* The loads themselves. */
f2e2a985 1099 if (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413 1100 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
a82960aa 1101 {
a21892ad
BS
1102 /* N scalar loads plus gathering them into a vector. */
1103 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
92345349 1104 inside_cost += record_stmt_cost (body_cost_vec,
c3e7ee41 1105 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
92345349 1106 scalar_load, stmt_info, 0, vect_body);
a82960aa
RG
1107 }
1108 else
1109 vect_get_load_cost (first_dr, ncopies,
1110 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1111 || group_size > 1 || slp_node),
92345349
BS
1112 &inside_cost, &prologue_cost,
1113 prologue_cost_vec, body_cost_vec, true);
f2e2a985 1114 if (STMT_VINFO_STRIDED_P (stmt_info))
7b5fc413
RB
1115 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1116 stmt_info, 0, vect_body);
720f5239 1117
73fbfcad 1118 if (dump_enabled_p ())
78c60e3d
SS
1119 dump_printf_loc (MSG_NOTE, vect_location,
1120 "vect_model_load_cost: inside_cost = %d, "
e645e942 1121 "prologue_cost = %d .\n", inside_cost, prologue_cost);
720f5239
IR
1122}
1123
1124
1125/* Calculate cost of DR's memory access. */
1126void
1127vect_get_load_cost (struct data_reference *dr, int ncopies,
c3e7ee41 1128 bool add_realign_cost, unsigned int *inside_cost,
92345349
BS
1129 unsigned int *prologue_cost,
1130 stmt_vector_for_cost *prologue_cost_vec,
1131 stmt_vector_for_cost *body_cost_vec,
1132 bool record_prologue_costs)
720f5239
IR
1133{
1134 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
355fe088 1135 gimple *stmt = DR_STMT (dr);
c3e7ee41 1136 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
720f5239
IR
1137
1138 switch (alignment_support_scheme)
ebfd146a
IR
1139 {
1140 case dr_aligned:
1141 {
92345349
BS
1142 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1143 stmt_info, 0, vect_body);
ebfd146a 1144
73fbfcad 1145 if (dump_enabled_p ())
78c60e3d 1146 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1147 "vect_model_load_cost: aligned.\n");
ebfd146a
IR
1148
1149 break;
1150 }
1151 case dr_unaligned_supported:
1152 {
720f5239 1153 /* Here, we assign an additional cost for the unaligned load. */
92345349 1154 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
c3e7ee41 1155 unaligned_load, stmt_info,
92345349 1156 DR_MISALIGNMENT (dr), vect_body);
c3e7ee41 1157
73fbfcad 1158 if (dump_enabled_p ())
78c60e3d
SS
1159 dump_printf_loc (MSG_NOTE, vect_location,
1160 "vect_model_load_cost: unaligned supported by "
e645e942 1161 "hardware.\n");
ebfd146a
IR
1162
1163 break;
1164 }
1165 case dr_explicit_realign:
1166 {
92345349
BS
1167 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1168 vector_load, stmt_info, 0, vect_body);
1169 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1170 vec_perm, stmt_info, 0, vect_body);
ebfd146a
IR
1171
1172 /* FIXME: If the misalignment remains fixed across the iterations of
1173 the containing loop, the following cost should be added to the
92345349 1174 prologue costs. */
ebfd146a 1175 if (targetm.vectorize.builtin_mask_for_load)
92345349
BS
1176 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1177 stmt_info, 0, vect_body);
ebfd146a 1178
73fbfcad 1179 if (dump_enabled_p ())
e645e942
TJ
1180 dump_printf_loc (MSG_NOTE, vect_location,
1181 "vect_model_load_cost: explicit realign\n");
8bd37302 1182
ebfd146a
IR
1183 break;
1184 }
1185 case dr_explicit_realign_optimized:
1186 {
73fbfcad 1187 if (dump_enabled_p ())
e645e942 1188 dump_printf_loc (MSG_NOTE, vect_location,
78c60e3d 1189 "vect_model_load_cost: unaligned software "
e645e942 1190 "pipelined.\n");
ebfd146a
IR
1191
1192 /* Unaligned software pipeline has a load of an address, an initial
ff802fa1 1193 load, and possibly a mask operation to "prime" the loop. However,
0d0293ac 1194 if this is an access in a group of loads, which provide grouped
ebfd146a 1195 access, then the above cost should only be considered for one
ff802fa1 1196 access in the group. Inside the loop, there is a load op
ebfd146a
IR
1197 and a realignment op. */
1198
92345349 1199 if (add_realign_cost && record_prologue_costs)
ebfd146a 1200 {
92345349
BS
1201 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1202 vector_stmt, stmt_info,
1203 0, vect_prologue);
ebfd146a 1204 if (targetm.vectorize.builtin_mask_for_load)
92345349
BS
1205 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1206 vector_stmt, stmt_info,
1207 0, vect_prologue);
ebfd146a
IR
1208 }
1209
92345349
BS
1210 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1211 stmt_info, 0, vect_body);
1212 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1213 stmt_info, 0, vect_body);
8bd37302 1214
73fbfcad 1215 if (dump_enabled_p ())
78c60e3d 1216 dump_printf_loc (MSG_NOTE, vect_location,
e645e942
TJ
1217 "vect_model_load_cost: explicit realign optimized"
1218 "\n");
8bd37302 1219
ebfd146a
IR
1220 break;
1221 }
1222
38eec4c6
UW
1223 case dr_unaligned_unsupported:
1224 {
1225 *inside_cost = VECT_MAX_COST;
1226
73fbfcad 1227 if (dump_enabled_p ())
78c60e3d 1228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 1229 "vect_model_load_cost: unsupported access.\n");
38eec4c6
UW
1230 break;
1231 }
1232
ebfd146a
IR
1233 default:
1234 gcc_unreachable ();
1235 }
ebfd146a
IR
1236}
1237
418b7df3
RG
1238/* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1239 the loop preheader for the vectorized stmt STMT. */
ebfd146a 1240
418b7df3 1241static void
355fe088 1242vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
ebfd146a 1243{
ebfd146a 1244 if (gsi)
418b7df3 1245 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1246 else
1247 {
418b7df3 1248 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
ebfd146a 1249 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 1250
a70d6342
IR
1251 if (loop_vinfo)
1252 {
1253 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
418b7df3
RG
1254 basic_block new_bb;
1255 edge pe;
a70d6342
IR
1256
1257 if (nested_in_vect_loop_p (loop, stmt))
1258 loop = loop->inner;
b8698a0f 1259
a70d6342 1260 pe = loop_preheader_edge (loop);
418b7df3 1261 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
a70d6342
IR
1262 gcc_assert (!new_bb);
1263 }
1264 else
1265 {
1266 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1267 basic_block bb;
1268 gimple_stmt_iterator gsi_bb_start;
1269
1270 gcc_assert (bb_vinfo);
1271 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 1272 gsi_bb_start = gsi_after_labels (bb);
418b7df3 1273 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
a70d6342 1274 }
ebfd146a
IR
1275 }
1276
73fbfcad 1277 if (dump_enabled_p ())
ebfd146a 1278 {
78c60e3d
SS
1279 dump_printf_loc (MSG_NOTE, vect_location,
1280 "created new init_stmt: ");
1281 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
ebfd146a 1282 }
418b7df3
RG
1283}
1284
1285/* Function vect_init_vector.
ebfd146a 1286
5467ee52
RG
1287 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1288 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1289 vector type a vector with all elements equal to VAL is created first.
1290 Place the initialization at BSI if it is not NULL. Otherwise, place the
1291 initialization at the loop preheader.
418b7df3
RG
1292 Return the DEF of INIT_STMT.
1293 It will be used in the vectorization of STMT. */
1294
1295tree
355fe088 1296vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
418b7df3 1297{
355fe088 1298 gimple *init_stmt;
418b7df3
RG
1299 tree new_temp;
1300
5467ee52
RG
1301 if (TREE_CODE (type) == VECTOR_TYPE
1302 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
418b7df3 1303 {
5467ee52 1304 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
418b7df3 1305 {
5a308cf1
IE
1306 /* Scalar boolean value should be transformed into
1307 all zeros or all ones value before building a vector. */
1308 if (VECTOR_BOOLEAN_TYPE_P (type))
1309 {
b3d51f23
IE
1310 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1311 tree false_val = build_zero_cst (TREE_TYPE (type));
5a308cf1
IE
1312
1313 if (CONSTANT_CLASS_P (val))
1314 val = integer_zerop (val) ? false_val : true_val;
1315 else
1316 {
1317 new_temp = make_ssa_name (TREE_TYPE (type));
1318 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1319 val, true_val, false_val);
1320 vect_init_vector_1 (stmt, init_stmt, gsi);
1321 val = new_temp;
1322 }
1323 }
1324 else if (CONSTANT_CLASS_P (val))
42fd8198 1325 val = fold_convert (TREE_TYPE (type), val);
418b7df3
RG
1326 else
1327 {
b731b390 1328 new_temp = make_ssa_name (TREE_TYPE (type));
0d0e4a03 1329 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
418b7df3 1330 vect_init_vector_1 (stmt, init_stmt, gsi);
5467ee52 1331 val = new_temp;
418b7df3
RG
1332 }
1333 }
5467ee52 1334 val = build_vector_from_val (type, val);
418b7df3
RG
1335 }
1336
0e22bb5a
RB
1337 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1338 init_stmt = gimple_build_assign (new_temp, val);
418b7df3 1339 vect_init_vector_1 (stmt, init_stmt, gsi);
0e22bb5a 1340 return new_temp;
ebfd146a
IR
1341}
1342
a70d6342 1343
ebfd146a
IR
1344/* Function vect_get_vec_def_for_operand.
1345
ff802fa1 1346 OP is an operand in STMT. This function returns a (vector) def that will be
ebfd146a
IR
1347 used in the vectorized stmt for STMT.
1348
1349 In the case that OP is an SSA_NAME which is defined in the loop, then
1350 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1351
1352 In case OP is an invariant or constant, a new stmt that creates a vector def
42fd8198
IE
1353 needs to be introduced. VECTYPE may be used to specify a required type for
1354 vector invariant. */
ebfd146a
IR
1355
1356tree
42fd8198 1357vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
ebfd146a
IR
1358{
1359 tree vec_oprnd;
355fe088
TS
1360 gimple *vec_stmt;
1361 gimple *def_stmt;
ebfd146a
IR
1362 stmt_vec_info def_stmt_info = NULL;
1363 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
42fd8198 1364 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
ebfd146a 1365 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
ebfd146a
IR
1366 enum vect_def_type dt;
1367 bool is_simple_use;
1368 tree vector_type;
1369
73fbfcad 1370 if (dump_enabled_p ())
ebfd146a 1371 {
78c60e3d
SS
1372 dump_printf_loc (MSG_NOTE, vect_location,
1373 "vect_get_vec_def_for_operand: ");
1374 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
e645e942 1375 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
1376 }
1377
81c40241 1378 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
ebfd146a 1379 gcc_assert (is_simple_use);
73fbfcad 1380 if (dump_enabled_p ())
ebfd146a 1381 {
78c60e3d 1382 int loc_printed = 0;
ebfd146a
IR
1383 if (def_stmt)
1384 {
78c60e3d
SS
1385 if (loc_printed)
1386 dump_printf (MSG_NOTE, " def_stmt = ");
1387 else
1388 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1389 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
ebfd146a
IR
1390 }
1391 }
1392
1393 switch (dt)
1394 {
81c40241 1395 /* operand is a constant or a loop invariant. */
ebfd146a 1396 case vect_constant_def:
81c40241 1397 case vect_external_def:
ebfd146a 1398 {
42fd8198
IE
1399 if (vectype)
1400 vector_type = vectype;
1401 else if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
1402 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1403 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1404 else
1405 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1406
7569a6cc 1407 gcc_assert (vector_type);
418b7df3 1408 return vect_init_vector (stmt, op, vector_type, NULL);
ebfd146a
IR
1409 }
1410
81c40241 1411 /* operand is defined inside the loop. */
8644a673 1412 case vect_internal_def:
ebfd146a 1413 {
ebfd146a
IR
1414 /* Get the def from the vectorized stmt. */
1415 def_stmt_info = vinfo_for_stmt (def_stmt);
83197f37 1416
ebfd146a 1417 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
83197f37
IR
1418 /* Get vectorized pattern statement. */
1419 if (!vec_stmt
1420 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1421 && !STMT_VINFO_RELEVANT (def_stmt_info))
1422 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1423 STMT_VINFO_RELATED_STMT (def_stmt_info)));
ebfd146a
IR
1424 gcc_assert (vec_stmt);
1425 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1426 vec_oprnd = PHI_RESULT (vec_stmt);
1427 else if (is_gimple_call (vec_stmt))
1428 vec_oprnd = gimple_call_lhs (vec_stmt);
1429 else
1430 vec_oprnd = gimple_assign_lhs (vec_stmt);
1431 return vec_oprnd;
1432 }
1433
81c40241 1434 /* operand is defined by a loop header phi - reduction */
ebfd146a 1435 case vect_reduction_def:
06066f92 1436 case vect_double_reduction_def:
7c5222ff 1437 case vect_nested_cycle:
81c40241
RB
1438 /* Code should use get_initial_def_for_reduction. */
1439 gcc_unreachable ();
ebfd146a 1440
81c40241 1441 /* operand is defined by loop-header phi - induction. */
ebfd146a
IR
1442 case vect_induction_def:
1443 {
1444 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1445
1446 /* Get the def from the vectorized stmt. */
1447 def_stmt_info = vinfo_for_stmt (def_stmt);
1448 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
6dbbece6
RG
1449 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1450 vec_oprnd = PHI_RESULT (vec_stmt);
1451 else
1452 vec_oprnd = gimple_get_lhs (vec_stmt);
ebfd146a
IR
1453 return vec_oprnd;
1454 }
1455
1456 default:
1457 gcc_unreachable ();
1458 }
1459}
1460
1461
1462/* Function vect_get_vec_def_for_stmt_copy
1463
ff802fa1 1464 Return a vector-def for an operand. This function is used when the
b8698a0f
L
1465 vectorized stmt to be created (by the caller to this function) is a "copy"
1466 created in case the vectorized result cannot fit in one vector, and several
ff802fa1 1467 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1468 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1469 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1470 DT is the type of the vector def VEC_OPRND.
1471
1472 Context:
1473 In case the vectorization factor (VF) is bigger than the number
1474 of elements that can fit in a vectype (nunits), we have to generate
ff802fa1 1475 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1476 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1477 smallest data-type determines the VF, and as a result, when vectorizing
1478 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1479 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1480 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1481 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1482 which VF=16 and nunits=4, so the number of copies required is 4):
1483
1484 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1485
ebfd146a
IR
1486 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1487 VS1.1: vx.1 = memref1 VS1.2
1488 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1489 VS1.3: vx.3 = memref3
ebfd146a
IR
1490
1491 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1492 VSnew.1: vz1 = vx.1 + ... VSnew.2
1493 VSnew.2: vz2 = vx.2 + ... VSnew.3
1494 VSnew.3: vz3 = vx.3 + ...
1495
1496 The vectorization of S1 is explained in vectorizable_load.
1497 The vectorization of S2:
b8698a0f
L
1498 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1499 the function 'vect_get_vec_def_for_operand' is called to
ff802fa1 1500 get the relevant vector-def for each operand of S2. For operand x it
ebfd146a
IR
1501 returns the vector-def 'vx.0'.
1502
b8698a0f
L
1503 To create the remaining copies of the vector-stmt (VSnew.j), this
1504 function is called to get the relevant vector-def for each operand. It is
1505 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1506 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1507
b8698a0f
L
1508 For example, to obtain the vector-def 'vx.1' in order to create the
1509 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1510 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1511 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1512 and return its def ('vx.1').
1513 Overall, to create the above sequence this function will be called 3 times:
1514 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1515 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1516 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1517
1518tree
1519vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1520{
355fe088 1521 gimple *vec_stmt_for_operand;
ebfd146a
IR
1522 stmt_vec_info def_stmt_info;
1523
1524 /* Do nothing; can reuse same def. */
8644a673 1525 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1526 return vec_oprnd;
1527
1528 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1529 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1530 gcc_assert (def_stmt_info);
1531 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1532 gcc_assert (vec_stmt_for_operand);
ebfd146a
IR
1533 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1534 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1535 else
1536 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1537 return vec_oprnd;
1538}
1539
1540
1541/* Get vectorized definitions for the operands to create a copy of an original
ff802fa1 1542 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
ebfd146a
IR
1543
1544static void
b8698a0f 1545vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
9771b263
DN
1546 vec<tree> *vec_oprnds0,
1547 vec<tree> *vec_oprnds1)
ebfd146a 1548{
9771b263 1549 tree vec_oprnd = vec_oprnds0->pop ();
ebfd146a
IR
1550
1551 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
9771b263 1552 vec_oprnds0->quick_push (vec_oprnd);
ebfd146a 1553
9771b263 1554 if (vec_oprnds1 && vec_oprnds1->length ())
ebfd146a 1555 {
9771b263 1556 vec_oprnd = vec_oprnds1->pop ();
ebfd146a 1557 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
9771b263 1558 vec_oprnds1->quick_push (vec_oprnd);
ebfd146a
IR
1559 }
1560}
1561
1562
d092494c
IR
1563/* Get vectorized definitions for OP0 and OP1.
1564 REDUC_INDEX is the index of reduction operand in case of reduction,
1565 and -1 otherwise. */
ebfd146a 1566
d092494c 1567void
355fe088 1568vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
9771b263
DN
1569 vec<tree> *vec_oprnds0,
1570 vec<tree> *vec_oprnds1,
d092494c 1571 slp_tree slp_node, int reduc_index)
ebfd146a
IR
1572{
1573 if (slp_node)
d092494c
IR
1574 {
1575 int nops = (op1 == NULL_TREE) ? 1 : 2;
ef062b13
TS
1576 auto_vec<tree> ops (nops);
1577 auto_vec<vec<tree> > vec_defs (nops);
d092494c 1578
9771b263 1579 ops.quick_push (op0);
d092494c 1580 if (op1)
9771b263 1581 ops.quick_push (op1);
d092494c
IR
1582
1583 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1584
37b5ec8f 1585 *vec_oprnds0 = vec_defs[0];
d092494c 1586 if (op1)
37b5ec8f 1587 *vec_oprnds1 = vec_defs[1];
d092494c 1588 }
ebfd146a
IR
1589 else
1590 {
1591 tree vec_oprnd;
1592
9771b263 1593 vec_oprnds0->create (1);
81c40241 1594 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
9771b263 1595 vec_oprnds0->quick_push (vec_oprnd);
ebfd146a
IR
1596
1597 if (op1)
1598 {
9771b263 1599 vec_oprnds1->create (1);
81c40241 1600 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
9771b263 1601 vec_oprnds1->quick_push (vec_oprnd);
ebfd146a
IR
1602 }
1603 }
1604}
1605
1606
1607/* Function vect_finish_stmt_generation.
1608
1609 Insert a new stmt. */
1610
1611void
355fe088 1612vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
ebfd146a
IR
1613 gimple_stmt_iterator *gsi)
1614{
1615 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
310213d4 1616 vec_info *vinfo = stmt_info->vinfo;
ebfd146a
IR
1617
1618 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1619
54e8e2c3
RG
1620 if (!gsi_end_p (*gsi)
1621 && gimple_has_mem_ops (vec_stmt))
1622 {
355fe088 1623 gimple *at_stmt = gsi_stmt (*gsi);
54e8e2c3
RG
1624 tree vuse = gimple_vuse (at_stmt);
1625 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1626 {
1627 tree vdef = gimple_vdef (at_stmt);
1628 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1629 /* If we have an SSA vuse and insert a store, update virtual
1630 SSA form to avoid triggering the renamer. Do so only
1631 if we can easily see all uses - which is what almost always
1632 happens with the way vectorized stmts are inserted. */
1633 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1634 && ((is_gimple_assign (vec_stmt)
1635 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1636 || (is_gimple_call (vec_stmt)
1637 && !(gimple_call_flags (vec_stmt)
1638 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1639 {
1640 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1641 gimple_set_vdef (vec_stmt, new_vdef);
1642 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1643 }
1644 }
1645 }
ebfd146a
IR
1646 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1647
310213d4 1648 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
ebfd146a 1649
73fbfcad 1650 if (dump_enabled_p ())
ebfd146a 1651 {
78c60e3d
SS
1652 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1653 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
ebfd146a
IR
1654 }
1655
ad885386 1656 gimple_set_location (vec_stmt, gimple_location (stmt));
8e91d222
JJ
1657
1658 /* While EH edges will generally prevent vectorization, stmt might
1659 e.g. be in a must-not-throw region. Ensure newly created stmts
1660 that could throw are part of the same region. */
1661 int lp_nr = lookup_stmt_eh_lp (stmt);
1662 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1663 add_stmt_to_eh_lp (vec_stmt, lp_nr);
ebfd146a
IR
1664}
1665
70439f0d
RS
1666/* We want to vectorize a call to combined function CFN with function
1667 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1668 as the types of all inputs. Check whether this is possible using
1669 an internal function, returning its code if so or IFN_LAST if not. */
ebfd146a 1670
70439f0d
RS
1671static internal_fn
1672vectorizable_internal_function (combined_fn cfn, tree fndecl,
1673 tree vectype_out, tree vectype_in)
ebfd146a 1674{
70439f0d
RS
1675 internal_fn ifn;
1676 if (internal_fn_p (cfn))
1677 ifn = as_internal_fn (cfn);
1678 else
1679 ifn = associated_internal_fn (fndecl);
1680 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1681 {
1682 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1683 if (info.vectorizable)
1684 {
1685 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1686 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
d95ab70a
RS
1687 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1688 OPTIMIZE_FOR_SPEED))
70439f0d
RS
1689 return ifn;
1690 }
1691 }
1692 return IFN_LAST;
ebfd146a
IR
1693}
1694
5ce9450f 1695
355fe088 1696static tree permute_vec_elements (tree, tree, tree, gimple *,
5ce9450f
JJ
1697 gimple_stmt_iterator *);
1698
1699
1700/* Function vectorizable_mask_load_store.
1701
1702 Check if STMT performs a conditional load or store that can be vectorized.
1703 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1704 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1705 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1706
1707static bool
355fe088
TS
1708vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1709 gimple **vec_stmt, slp_tree slp_node)
5ce9450f
JJ
1710{
1711 tree vec_dest = NULL;
1712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1713 stmt_vec_info prev_stmt_info;
1714 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1715 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1716 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1717 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1718 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
57e2f6ad 1719 tree rhs_vectype = NULL_TREE;
045c1278 1720 tree mask_vectype;
5ce9450f 1721 tree elem_type;
355fe088 1722 gimple *new_stmt;
5ce9450f
JJ
1723 tree dummy;
1724 tree dataref_ptr = NULL_TREE;
355fe088 1725 gimple *ptr_incr;
5ce9450f
JJ
1726 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1727 int ncopies;
1728 int i, j;
1729 bool inv_p;
1730 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1731 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1732 int gather_scale = 1;
1733 enum vect_def_type gather_dt = vect_unknown_def_type;
1734 bool is_store;
1735 tree mask;
355fe088 1736 gimple *def_stmt;
5ce9450f
JJ
1737 enum vect_def_type dt;
1738
1739 if (slp_node != NULL)
1740 return false;
1741
1742 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1743 gcc_assert (ncopies >= 1);
1744
1745 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1746 mask = gimple_call_arg (stmt, 2);
045c1278
IE
1747
1748 if (TREE_CODE (TREE_TYPE (mask)) != BOOLEAN_TYPE)
5ce9450f
JJ
1749 return false;
1750
1751 /* FORNOW. This restriction should be relaxed. */
1752 if (nested_in_vect_loop && ncopies > 1)
1753 {
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "multiple types in nested loop.");
1757 return false;
1758 }
1759
1760 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1761 return false;
1762
66c16fd9
RB
1763 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
1764 && ! vec_stmt)
5ce9450f
JJ
1765 return false;
1766
1767 if (!STMT_VINFO_DATA_REF (stmt_info))
1768 return false;
1769
1770 elem_type = TREE_TYPE (vectype);
1771
1772 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1773 return false;
1774
f2e2a985 1775 if (STMT_VINFO_STRIDED_P (stmt_info))
5ce9450f
JJ
1776 return false;
1777
045c1278
IE
1778 if (TREE_CODE (mask) != SSA_NAME)
1779 return false;
1780
1781 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
1782 return false;
1783
1784 if (!mask_vectype)
1785 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
1786
56e39820 1787 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
045c1278
IE
1788 return false;
1789
57e2f6ad
IE
1790 if (is_store)
1791 {
1792 tree rhs = gimple_call_arg (stmt, 3);
1793 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
1794 return false;
1795 }
1796
3bab6342 1797 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5ce9450f 1798 {
355fe088 1799 gimple *def_stmt;
3bab6342 1800 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
5ce9450f
JJ
1801 &gather_off, &gather_scale);
1802 gcc_assert (gather_decl);
81c40241
RB
1803 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1804 &gather_off_vectype))
5ce9450f
JJ
1805 {
1806 if (dump_enabled_p ())
1807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1808 "gather index use not simple.");
1809 return false;
1810 }
03b9e8e4
JJ
1811
1812 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1813 tree masktype
1814 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1815 if (TREE_CODE (masktype) == INTEGER_TYPE)
1816 {
1817 if (dump_enabled_p ())
1818 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1819 "masked gather with integer mask not supported.");
1820 return false;
1821 }
5ce9450f
JJ
1822 }
1823 else if (tree_int_cst_compare (nested_in_vect_loop
1824 ? STMT_VINFO_DR_STEP (stmt_info)
1825 : DR_STEP (dr), size_zero_node) <= 0)
1826 return false;
1827 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
045c1278
IE
1828 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
1829 TYPE_MODE (mask_vectype),
57e2f6ad
IE
1830 !is_store)
1831 || (rhs_vectype
1832 && !useless_type_conversion_p (vectype, rhs_vectype)))
5ce9450f
JJ
1833 return false;
1834
5ce9450f
JJ
1835 if (!vec_stmt) /* transformation not required. */
1836 {
1837 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1838 if (is_store)
1839 vect_model_store_cost (stmt_info, ncopies, false, dt,
1840 NULL, NULL, NULL);
1841 else
1842 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1843 return true;
1844 }
1845
1846 /** Transform. **/
1847
3bab6342 1848 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5ce9450f
JJ
1849 {
1850 tree vec_oprnd0 = NULL_TREE, op;
1851 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1852 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
acdcd61b 1853 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
5ce9450f 1854 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
acdcd61b 1855 tree mask_perm_mask = NULL_TREE;
5ce9450f
JJ
1856 edge pe = loop_preheader_edge (loop);
1857 gimple_seq seq;
1858 basic_block new_bb;
1859 enum { NARROW, NONE, WIDEN } modifier;
1860 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1861
acdcd61b
JJ
1862 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1863 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1864 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1865 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1866 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1867 scaletype = TREE_VALUE (arglist);
1868 gcc_checking_assert (types_compatible_p (srctype, rettype)
1869 && types_compatible_p (srctype, masktype));
1870
5ce9450f
JJ
1871 if (nunits == gather_off_nunits)
1872 modifier = NONE;
1873 else if (nunits == gather_off_nunits / 2)
1874 {
1875 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1876 modifier = WIDEN;
1877
1878 for (i = 0; i < gather_off_nunits; ++i)
1879 sel[i] = i | nunits;
1880
557be5a8 1881 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
5ce9450f
JJ
1882 }
1883 else if (nunits == gather_off_nunits * 2)
1884 {
1885 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1886 modifier = NARROW;
1887
1888 for (i = 0; i < nunits; ++i)
1889 sel[i] = i < gather_off_nunits
1890 ? i : i + nunits - gather_off_nunits;
1891
557be5a8 1892 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5ce9450f 1893 ncopies *= 2;
acdcd61b
JJ
1894 for (i = 0; i < nunits; ++i)
1895 sel[i] = i | gather_off_nunits;
557be5a8 1896 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
5ce9450f
JJ
1897 }
1898 else
1899 gcc_unreachable ();
1900
5ce9450f
JJ
1901 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1902
1903 ptr = fold_convert (ptrtype, gather_base);
1904 if (!is_gimple_min_invariant (ptr))
1905 {
1906 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1907 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1908 gcc_assert (!new_bb);
1909 }
1910
1911 scale = build_int_cst (scaletype, gather_scale);
1912
1913 prev_stmt_info = NULL;
1914 for (j = 0; j < ncopies; ++j)
1915 {
1916 if (modifier == WIDEN && (j & 1))
1917 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1918 perm_mask, stmt, gsi);
1919 else if (j == 0)
1920 op = vec_oprnd0
81c40241 1921 = vect_get_vec_def_for_operand (gather_off, stmt);
5ce9450f
JJ
1922 else
1923 op = vec_oprnd0
1924 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1925
1926 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1927 {
1928 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1929 == TYPE_VECTOR_SUBPARTS (idxtype));
0e22bb5a 1930 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5ce9450f
JJ
1931 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1932 new_stmt
0d0e4a03 1933 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5ce9450f
JJ
1934 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1935 op = var;
1936 }
1937
acdcd61b
JJ
1938 if (mask_perm_mask && (j & 1))
1939 mask_op = permute_vec_elements (mask_op, mask_op,
1940 mask_perm_mask, stmt, gsi);
5ce9450f
JJ
1941 else
1942 {
acdcd61b 1943 if (j == 0)
81c40241 1944 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
acdcd61b
JJ
1945 else
1946 {
81c40241 1947 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
acdcd61b
JJ
1948 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1949 }
5ce9450f 1950
acdcd61b
JJ
1951 mask_op = vec_mask;
1952 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1953 {
1954 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1955 == TYPE_VECTOR_SUBPARTS (masktype));
0e22bb5a 1956 var = vect_get_new_ssa_name (masktype, vect_simple_var);
acdcd61b
JJ
1957 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1958 new_stmt
0d0e4a03 1959 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
acdcd61b
JJ
1960 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1961 mask_op = var;
1962 }
5ce9450f
JJ
1963 }
1964
1965 new_stmt
1966 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1967 scale);
1968
1969 if (!useless_type_conversion_p (vectype, rettype))
1970 {
1971 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1972 == TYPE_VECTOR_SUBPARTS (rettype));
0e22bb5a 1973 op = vect_get_new_ssa_name (rettype, vect_simple_var);
5ce9450f
JJ
1974 gimple_call_set_lhs (new_stmt, op);
1975 vect_finish_stmt_generation (stmt, new_stmt, gsi);
b731b390 1976 var = make_ssa_name (vec_dest);
5ce9450f 1977 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
0d0e4a03 1978 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5ce9450f
JJ
1979 }
1980 else
1981 {
1982 var = make_ssa_name (vec_dest, new_stmt);
1983 gimple_call_set_lhs (new_stmt, var);
1984 }
1985
1986 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1987
1988 if (modifier == NARROW)
1989 {
1990 if ((j & 1) == 0)
1991 {
1992 prev_res = var;
1993 continue;
1994 }
1995 var = permute_vec_elements (prev_res, var,
1996 perm_mask, stmt, gsi);
1997 new_stmt = SSA_NAME_DEF_STMT (var);
1998 }
1999
2000 if (prev_stmt_info == NULL)
2001 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2002 else
2003 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2004 prev_stmt_info = vinfo_for_stmt (new_stmt);
2005 }
3efe2e2c
JJ
2006
2007 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2008 from the IL. */
e6f5c25d
IE
2009 if (STMT_VINFO_RELATED_STMT (stmt_info))
2010 {
2011 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2012 stmt_info = vinfo_for_stmt (stmt);
2013 }
3efe2e2c
JJ
2014 tree lhs = gimple_call_lhs (stmt);
2015 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2016 set_vinfo_for_stmt (new_stmt, stmt_info);
2017 set_vinfo_for_stmt (stmt, NULL);
2018 STMT_VINFO_STMT (stmt_info) = new_stmt;
2019 gsi_replace (gsi, new_stmt, true);
5ce9450f
JJ
2020 return true;
2021 }
2022 else if (is_store)
2023 {
2024 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2025 prev_stmt_info = NULL;
2d4dc223 2026 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
5ce9450f
JJ
2027 for (i = 0; i < ncopies; i++)
2028 {
2029 unsigned align, misalign;
2030
2031 if (i == 0)
2032 {
2033 tree rhs = gimple_call_arg (stmt, 3);
81c40241
RB
2034 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2035 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
5ce9450f
JJ
2036 /* We should have catched mismatched types earlier. */
2037 gcc_assert (useless_type_conversion_p (vectype,
2038 TREE_TYPE (vec_rhs)));
2039 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2040 NULL_TREE, &dummy, gsi,
2041 &ptr_incr, false, &inv_p);
2042 gcc_assert (!inv_p);
2043 }
2044 else
2045 {
81c40241 2046 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
5ce9450f 2047 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
81c40241 2048 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
5ce9450f
JJ
2049 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2050 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2051 TYPE_SIZE_UNIT (vectype));
2052 }
2053
2054 align = TYPE_ALIGN_UNIT (vectype);
2055 if (aligned_access_p (dr))
2056 misalign = 0;
2057 else if (DR_MISALIGNMENT (dr) == -1)
2058 {
2059 align = TYPE_ALIGN_UNIT (elem_type);
2060 misalign = 0;
2061 }
2062 else
2063 misalign = DR_MISALIGNMENT (dr);
2064 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2065 misalign);
08554c26
JJ
2066 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2067 misalign ? misalign & -misalign : align);
5ce9450f
JJ
2068 new_stmt
2069 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
08554c26 2070 ptr, vec_mask, vec_rhs);
5ce9450f
JJ
2071 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2072 if (i == 0)
2073 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2074 else
2075 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2076 prev_stmt_info = vinfo_for_stmt (new_stmt);
2077 }
2078 }
2079 else
2080 {
2081 tree vec_mask = NULL_TREE;
2082 prev_stmt_info = NULL;
2083 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2084 for (i = 0; i < ncopies; i++)
2085 {
2086 unsigned align, misalign;
2087
2088 if (i == 0)
2089 {
81c40241 2090 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
5ce9450f
JJ
2091 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2092 NULL_TREE, &dummy, gsi,
2093 &ptr_incr, false, &inv_p);
2094 gcc_assert (!inv_p);
2095 }
2096 else
2097 {
81c40241 2098 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
5ce9450f
JJ
2099 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2100 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2101 TYPE_SIZE_UNIT (vectype));
2102 }
2103
2104 align = TYPE_ALIGN_UNIT (vectype);
2105 if (aligned_access_p (dr))
2106 misalign = 0;
2107 else if (DR_MISALIGNMENT (dr) == -1)
2108 {
2109 align = TYPE_ALIGN_UNIT (elem_type);
2110 misalign = 0;
2111 }
2112 else
2113 misalign = DR_MISALIGNMENT (dr);
2114 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2115 misalign);
08554c26
JJ
2116 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2117 misalign ? misalign & -misalign : align);
5ce9450f
JJ
2118 new_stmt
2119 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
08554c26 2120 ptr, vec_mask);
b731b390 2121 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
5ce9450f
JJ
2122 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2123 if (i == 0)
2124 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2125 else
2126 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2127 prev_stmt_info = vinfo_for_stmt (new_stmt);
2128 }
2129 }
2130
3efe2e2c
JJ
2131 if (!is_store)
2132 {
2133 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2134 from the IL. */
e6f5c25d
IE
2135 if (STMT_VINFO_RELATED_STMT (stmt_info))
2136 {
2137 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2138 stmt_info = vinfo_for_stmt (stmt);
2139 }
3efe2e2c
JJ
2140 tree lhs = gimple_call_lhs (stmt);
2141 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2142 set_vinfo_for_stmt (new_stmt, stmt_info);
2143 set_vinfo_for_stmt (stmt, NULL);
2144 STMT_VINFO_STMT (stmt_info) = new_stmt;
2145 gsi_replace (gsi, new_stmt, true);
2146 }
2147
5ce9450f
JJ
2148 return true;
2149}
2150
b1b6836e
RS
2151/* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2152 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2153 in a single step. On success, store the binary pack code in
2154 *CONVERT_CODE. */
2155
2156static bool
2157simple_integer_narrowing (tree vectype_out, tree vectype_in,
2158 tree_code *convert_code)
2159{
2160 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2161 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2162 return false;
2163
2164 tree_code code;
2165 int multi_step_cvt = 0;
2166 auto_vec <tree, 8> interm_types;
2167 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2168 &code, &multi_step_cvt,
2169 &interm_types)
2170 || multi_step_cvt)
2171 return false;
2172
2173 *convert_code = code;
2174 return true;
2175}
5ce9450f 2176
ebfd146a
IR
2177/* Function vectorizable_call.
2178
538dd0b7 2179 Check if GS performs a function call that can be vectorized.
b8698a0f 2180 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2181 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2182 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2183
2184static bool
355fe088 2185vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
190c2236 2186 slp_tree slp_node)
ebfd146a 2187{
538dd0b7 2188 gcall *stmt;
ebfd146a
IR
2189 tree vec_dest;
2190 tree scalar_dest;
2191 tree op, type;
2192 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
538dd0b7 2193 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
ebfd146a
IR
2194 tree vectype_out, vectype_in;
2195 int nunits_in;
2196 int nunits_out;
2197 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
190c2236 2198 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 2199 vec_info *vinfo = stmt_info->vinfo;
81c40241 2200 tree fndecl, new_temp, rhs_type;
355fe088 2201 gimple *def_stmt;
0502fb85
UB
2202 enum vect_def_type dt[3]
2203 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
355fe088 2204 gimple *new_stmt = NULL;
ebfd146a 2205 int ncopies, j;
6e1aa848 2206 vec<tree> vargs = vNULL;
ebfd146a
IR
2207 enum { NARROW, NONE, WIDEN } modifier;
2208 size_t i, nargs;
9d5e7640 2209 tree lhs;
ebfd146a 2210
190c2236 2211 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2212 return false;
2213
66c16fd9
RB
2214 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2215 && ! vec_stmt)
ebfd146a
IR
2216 return false;
2217
538dd0b7
DM
2218 /* Is GS a vectorizable call? */
2219 stmt = dyn_cast <gcall *> (gs);
2220 if (!stmt)
ebfd146a
IR
2221 return false;
2222
5ce9450f
JJ
2223 if (gimple_call_internal_p (stmt)
2224 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2225 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2226 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2227 slp_node);
2228
0136f8f0
AH
2229 if (gimple_call_lhs (stmt) == NULL_TREE
2230 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
ebfd146a
IR
2231 return false;
2232
0136f8f0 2233 gcc_checking_assert (!stmt_can_throw_internal (stmt));
5a2c1986 2234
b690cc0f
RG
2235 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2236
ebfd146a
IR
2237 /* Process function arguments. */
2238 rhs_type = NULL_TREE;
b690cc0f 2239 vectype_in = NULL_TREE;
ebfd146a
IR
2240 nargs = gimple_call_num_args (stmt);
2241
1b1562a5
MM
2242 /* Bail out if the function has more than three arguments, we do not have
2243 interesting builtin functions to vectorize with more than two arguments
2244 except for fma. No arguments is also not good. */
2245 if (nargs == 0 || nargs > 3)
ebfd146a
IR
2246 return false;
2247
74bf76ed
JJ
2248 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2249 if (gimple_call_internal_p (stmt)
2250 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2251 {
2252 nargs = 0;
2253 rhs_type = unsigned_type_node;
2254 }
2255
ebfd146a
IR
2256 for (i = 0; i < nargs; i++)
2257 {
b690cc0f
RG
2258 tree opvectype;
2259
ebfd146a
IR
2260 op = gimple_call_arg (stmt, i);
2261
2262 /* We can only handle calls with arguments of the same type. */
2263 if (rhs_type
8533c9d8 2264 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a 2265 {
73fbfcad 2266 if (dump_enabled_p ())
78c60e3d 2267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2268 "argument types differ.\n");
ebfd146a
IR
2269 return false;
2270 }
b690cc0f
RG
2271 if (!rhs_type)
2272 rhs_type = TREE_TYPE (op);
ebfd146a 2273
81c40241 2274 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
ebfd146a 2275 {
73fbfcad 2276 if (dump_enabled_p ())
78c60e3d 2277 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2278 "use not simple.\n");
ebfd146a
IR
2279 return false;
2280 }
ebfd146a 2281
b690cc0f
RG
2282 if (!vectype_in)
2283 vectype_in = opvectype;
2284 else if (opvectype
2285 && opvectype != vectype_in)
2286 {
73fbfcad 2287 if (dump_enabled_p ())
78c60e3d 2288 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2289 "argument vector types differ.\n");
b690cc0f
RG
2290 return false;
2291 }
2292 }
2293 /* If all arguments are external or constant defs use a vector type with
2294 the same size as the output vector type. */
ebfd146a 2295 if (!vectype_in)
b690cc0f 2296 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
2297 if (vec_stmt)
2298 gcc_assert (vectype_in);
2299 if (!vectype_in)
2300 {
73fbfcad 2301 if (dump_enabled_p ())
7d8930a0 2302 {
78c60e3d
SS
2303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2304 "no vectype for scalar type ");
2305 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
e645e942 2306 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7d8930a0
IR
2307 }
2308
2309 return false;
2310 }
ebfd146a
IR
2311
2312 /* FORNOW */
b690cc0f
RG
2313 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2314 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
2315 if (nunits_in == nunits_out / 2)
2316 modifier = NARROW;
2317 else if (nunits_out == nunits_in)
2318 modifier = NONE;
2319 else if (nunits_out == nunits_in / 2)
2320 modifier = WIDEN;
2321 else
2322 return false;
2323
70439f0d
RS
2324 /* We only handle functions that do not read or clobber memory. */
2325 if (gimple_vuse (stmt))
2326 {
2327 if (dump_enabled_p ())
2328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2329 "function reads from or writes to memory.\n");
2330 return false;
2331 }
2332
ebfd146a
IR
2333 /* For now, we only vectorize functions if a target specific builtin
2334 is available. TODO -- in some cases, it might be profitable to
2335 insert the calls for pieces of the vector, in order to be able
2336 to vectorize other operations in the loop. */
70439f0d
RS
2337 fndecl = NULL_TREE;
2338 internal_fn ifn = IFN_LAST;
2339 combined_fn cfn = gimple_call_combined_fn (stmt);
2340 tree callee = gimple_call_fndecl (stmt);
2341
2342 /* First try using an internal function. */
b1b6836e
RS
2343 tree_code convert_code = ERROR_MARK;
2344 if (cfn != CFN_LAST
2345 && (modifier == NONE
2346 || (modifier == NARROW
2347 && simple_integer_narrowing (vectype_out, vectype_in,
2348 &convert_code))))
70439f0d
RS
2349 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2350 vectype_in);
2351
2352 /* If that fails, try asking for a target-specific built-in function. */
2353 if (ifn == IFN_LAST)
2354 {
2355 if (cfn != CFN_LAST)
2356 fndecl = targetm.vectorize.builtin_vectorized_function
2357 (cfn, vectype_out, vectype_in);
2358 else
2359 fndecl = targetm.vectorize.builtin_md_vectorized_function
2360 (callee, vectype_out, vectype_in);
2361 }
2362
2363 if (ifn == IFN_LAST && !fndecl)
ebfd146a 2364 {
70439f0d 2365 if (cfn == CFN_GOMP_SIMD_LANE
74bf76ed
JJ
2366 && !slp_node
2367 && loop_vinfo
2368 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2369 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2370 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2371 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2372 {
2373 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2374 { 0, 1, 2, ... vf - 1 } vector. */
2375 gcc_assert (nargs == 0);
2376 }
2377 else
2378 {
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 2381 "function is not vectorizable.\n");
74bf76ed
JJ
2382 return false;
2383 }
ebfd146a
IR
2384 }
2385
190c2236
JJ
2386 if (slp_node || PURE_SLP_STMT (stmt_info))
2387 ncopies = 1;
b1b6836e 2388 else if (modifier == NARROW && ifn == IFN_LAST)
ebfd146a
IR
2389 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2390 else
2391 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2392
2393 /* Sanity check: make sure that at least one copy of the vectorized stmt
2394 needs to be generated. */
2395 gcc_assert (ncopies >= 1);
2396
2397 if (!vec_stmt) /* transformation not required. */
2398 {
2399 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
73fbfcad 2400 if (dump_enabled_p ())
e645e942
TJ
2401 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2402 "\n");
c3e7ee41 2403 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
b1b6836e
RS
2404 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2405 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2406 vec_promote_demote, stmt_info, 0, vect_body);
2407
ebfd146a
IR
2408 return true;
2409 }
2410
2411 /** Transform. **/
2412
73fbfcad 2413 if (dump_enabled_p ())
e645e942 2414 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
ebfd146a
IR
2415
2416 /* Handle def. */
2417 scalar_dest = gimple_call_lhs (stmt);
2418 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2419
2420 prev_stmt_info = NULL;
b1b6836e 2421 if (modifier == NONE || ifn != IFN_LAST)
ebfd146a 2422 {
b1b6836e 2423 tree prev_res = NULL_TREE;
ebfd146a
IR
2424 for (j = 0; j < ncopies; ++j)
2425 {
2426 /* Build argument list for the vectorized call. */
2427 if (j == 0)
9771b263 2428 vargs.create (nargs);
ebfd146a 2429 else
9771b263 2430 vargs.truncate (0);
ebfd146a 2431
190c2236
JJ
2432 if (slp_node)
2433 {
ef062b13 2434 auto_vec<vec<tree> > vec_defs (nargs);
9771b263 2435 vec<tree> vec_oprnds0;
190c2236
JJ
2436
2437 for (i = 0; i < nargs; i++)
9771b263 2438 vargs.quick_push (gimple_call_arg (stmt, i));
190c2236 2439 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
37b5ec8f 2440 vec_oprnds0 = vec_defs[0];
190c2236
JJ
2441
2442 /* Arguments are ready. Create the new vector stmt. */
9771b263 2443 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
190c2236
JJ
2444 {
2445 size_t k;
2446 for (k = 0; k < nargs; k++)
2447 {
37b5ec8f 2448 vec<tree> vec_oprndsk = vec_defs[k];
9771b263 2449 vargs[k] = vec_oprndsk[i];
190c2236 2450 }
b1b6836e
RS
2451 if (modifier == NARROW)
2452 {
2453 tree half_res = make_ssa_name (vectype_in);
2454 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2455 gimple_call_set_lhs (new_stmt, half_res);
2456 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2457 if ((i & 1) == 0)
2458 {
2459 prev_res = half_res;
2460 continue;
2461 }
2462 new_temp = make_ssa_name (vec_dest);
2463 new_stmt = gimple_build_assign (new_temp, convert_code,
2464 prev_res, half_res);
2465 }
70439f0d 2466 else
b1b6836e
RS
2467 {
2468 if (ifn != IFN_LAST)
2469 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2470 else
2471 new_stmt = gimple_build_call_vec (fndecl, vargs);
2472 new_temp = make_ssa_name (vec_dest, new_stmt);
2473 gimple_call_set_lhs (new_stmt, new_temp);
2474 }
190c2236 2475 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9771b263 2476 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
190c2236
JJ
2477 }
2478
2479 for (i = 0; i < nargs; i++)
2480 {
37b5ec8f 2481 vec<tree> vec_oprndsi = vec_defs[i];
9771b263 2482 vec_oprndsi.release ();
190c2236 2483 }
190c2236
JJ
2484 continue;
2485 }
2486
ebfd146a
IR
2487 for (i = 0; i < nargs; i++)
2488 {
2489 op = gimple_call_arg (stmt, i);
2490 if (j == 0)
2491 vec_oprnd0
81c40241 2492 = vect_get_vec_def_for_operand (op, stmt);
ebfd146a 2493 else
63827fb8
IR
2494 {
2495 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2496 vec_oprnd0
2497 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2498 }
ebfd146a 2499
9771b263 2500 vargs.quick_push (vec_oprnd0);
ebfd146a
IR
2501 }
2502
74bf76ed
JJ
2503 if (gimple_call_internal_p (stmt)
2504 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2505 {
2506 tree *v = XALLOCAVEC (tree, nunits_out);
2507 int k;
2508 for (k = 0; k < nunits_out; ++k)
2509 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2510 tree cst = build_vector (vectype_out, v);
2511 tree new_var
0e22bb5a 2512 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
355fe088 2513 gimple *init_stmt = gimple_build_assign (new_var, cst);
74bf76ed 2514 vect_init_vector_1 (stmt, init_stmt, NULL);
b731b390 2515 new_temp = make_ssa_name (vec_dest);
0e22bb5a 2516 new_stmt = gimple_build_assign (new_temp, new_var);
74bf76ed 2517 }
b1b6836e
RS
2518 else if (modifier == NARROW)
2519 {
2520 tree half_res = make_ssa_name (vectype_in);
2521 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2522 gimple_call_set_lhs (new_stmt, half_res);
2523 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2524 if ((j & 1) == 0)
2525 {
2526 prev_res = half_res;
2527 continue;
2528 }
2529 new_temp = make_ssa_name (vec_dest);
2530 new_stmt = gimple_build_assign (new_temp, convert_code,
2531 prev_res, half_res);
2532 }
74bf76ed
JJ
2533 else
2534 {
70439f0d
RS
2535 if (ifn != IFN_LAST)
2536 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2537 else
2538 new_stmt = gimple_build_call_vec (fndecl, vargs);
74bf76ed
JJ
2539 new_temp = make_ssa_name (vec_dest, new_stmt);
2540 gimple_call_set_lhs (new_stmt, new_temp);
2541 }
ebfd146a
IR
2542 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2543
b1b6836e 2544 if (j == (modifier == NARROW ? 1 : 0))
ebfd146a
IR
2545 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2546 else
2547 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2548
2549 prev_stmt_info = vinfo_for_stmt (new_stmt);
2550 }
b1b6836e
RS
2551 }
2552 else if (modifier == NARROW)
2553 {
ebfd146a
IR
2554 for (j = 0; j < ncopies; ++j)
2555 {
2556 /* Build argument list for the vectorized call. */
2557 if (j == 0)
9771b263 2558 vargs.create (nargs * 2);
ebfd146a 2559 else
9771b263 2560 vargs.truncate (0);
ebfd146a 2561
190c2236
JJ
2562 if (slp_node)
2563 {
ef062b13 2564 auto_vec<vec<tree> > vec_defs (nargs);
9771b263 2565 vec<tree> vec_oprnds0;
190c2236
JJ
2566
2567 for (i = 0; i < nargs; i++)
9771b263 2568 vargs.quick_push (gimple_call_arg (stmt, i));
190c2236 2569 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
37b5ec8f 2570 vec_oprnds0 = vec_defs[0];
190c2236
JJ
2571
2572 /* Arguments are ready. Create the new vector stmt. */
9771b263 2573 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
190c2236
JJ
2574 {
2575 size_t k;
9771b263 2576 vargs.truncate (0);
190c2236
JJ
2577 for (k = 0; k < nargs; k++)
2578 {
37b5ec8f 2579 vec<tree> vec_oprndsk = vec_defs[k];
9771b263
DN
2580 vargs.quick_push (vec_oprndsk[i]);
2581 vargs.quick_push (vec_oprndsk[i + 1]);
190c2236 2582 }
70439f0d
RS
2583 if (ifn != IFN_LAST)
2584 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2585 else
2586 new_stmt = gimple_build_call_vec (fndecl, vargs);
190c2236
JJ
2587 new_temp = make_ssa_name (vec_dest, new_stmt);
2588 gimple_call_set_lhs (new_stmt, new_temp);
2589 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9771b263 2590 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
190c2236
JJ
2591 }
2592
2593 for (i = 0; i < nargs; i++)
2594 {
37b5ec8f 2595 vec<tree> vec_oprndsi = vec_defs[i];
9771b263 2596 vec_oprndsi.release ();
190c2236 2597 }
190c2236
JJ
2598 continue;
2599 }
2600
ebfd146a
IR
2601 for (i = 0; i < nargs; i++)
2602 {
2603 op = gimple_call_arg (stmt, i);
2604 if (j == 0)
2605 {
2606 vec_oprnd0
81c40241 2607 = vect_get_vec_def_for_operand (op, stmt);
ebfd146a 2608 vec_oprnd1
63827fb8 2609 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
2610 }
2611 else
2612 {
336ecb65 2613 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
ebfd146a 2614 vec_oprnd0
63827fb8 2615 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 2616 vec_oprnd1
63827fb8 2617 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
2618 }
2619
9771b263
DN
2620 vargs.quick_push (vec_oprnd0);
2621 vargs.quick_push (vec_oprnd1);
ebfd146a
IR
2622 }
2623
b1b6836e 2624 new_stmt = gimple_build_call_vec (fndecl, vargs);
ebfd146a
IR
2625 new_temp = make_ssa_name (vec_dest, new_stmt);
2626 gimple_call_set_lhs (new_stmt, new_temp);
ebfd146a
IR
2627 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2628
2629 if (j == 0)
2630 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2631 else
2632 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2633
2634 prev_stmt_info = vinfo_for_stmt (new_stmt);
2635 }
2636
2637 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a 2638 }
b1b6836e
RS
2639 else
2640 /* No current target implements this case. */
2641 return false;
ebfd146a 2642
9771b263 2643 vargs.release ();
ebfd146a 2644
ebfd146a
IR
2645 /* The call in STMT might prevent it from being removed in dce.
2646 We however cannot remove it here, due to the way the ssa name
2647 it defines is mapped to the new definition. So just replace
2648 rhs of the statement with something harmless. */
2649
dd34c087
JJ
2650 if (slp_node)
2651 return true;
2652
ebfd146a 2653 type = TREE_TYPE (scalar_dest);
9d5e7640
IR
2654 if (is_pattern_stmt_p (stmt_info))
2655 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2656 else
2657 lhs = gimple_call_lhs (stmt);
3cc2fa2a
JJ
2658
2659 if (gimple_call_internal_p (stmt)
2660 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2661 {
2662 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2663 with vf - 1 rather than 0, that is the last iteration of the
2664 vectorized loop. */
2665 imm_use_iterator iter;
2666 use_operand_p use_p;
355fe088 2667 gimple *use_stmt;
3cc2fa2a
JJ
2668 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2669 {
2670 basic_block use_bb = gimple_bb (use_stmt);
2671 if (use_bb
2672 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2673 {
2674 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2675 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2676 ncopies * nunits_out - 1));
2677 update_stmt (use_stmt);
2678 }
2679 }
2680 }
2681
9d5e7640 2682 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
ebfd146a 2683 set_vinfo_for_stmt (new_stmt, stmt_info);
dd34c087 2684 set_vinfo_for_stmt (stmt, NULL);
ebfd146a
IR
2685 STMT_VINFO_STMT (stmt_info) = new_stmt;
2686 gsi_replace (gsi, new_stmt, false);
ebfd146a
IR
2687
2688 return true;
2689}
2690
2691
0136f8f0
AH
2692struct simd_call_arg_info
2693{
2694 tree vectype;
2695 tree op;
2696 enum vect_def_type dt;
2697 HOST_WIDE_INT linear_step;
2698 unsigned int align;
17b658af 2699 bool simd_lane_linear;
0136f8f0
AH
2700};
2701
17b658af
JJ
2702/* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2703 is linear within simd lane (but not within whole loop), note it in
2704 *ARGINFO. */
2705
2706static void
2707vect_simd_lane_linear (tree op, struct loop *loop,
2708 struct simd_call_arg_info *arginfo)
2709{
355fe088 2710 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
17b658af
JJ
2711
2712 if (!is_gimple_assign (def_stmt)
2713 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2714 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2715 return;
2716
2717 tree base = gimple_assign_rhs1 (def_stmt);
2718 HOST_WIDE_INT linear_step = 0;
2719 tree v = gimple_assign_rhs2 (def_stmt);
2720 while (TREE_CODE (v) == SSA_NAME)
2721 {
2722 tree t;
2723 def_stmt = SSA_NAME_DEF_STMT (v);
2724 if (is_gimple_assign (def_stmt))
2725 switch (gimple_assign_rhs_code (def_stmt))
2726 {
2727 case PLUS_EXPR:
2728 t = gimple_assign_rhs2 (def_stmt);
2729 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2730 return;
2731 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2732 v = gimple_assign_rhs1 (def_stmt);
2733 continue;
2734 case MULT_EXPR:
2735 t = gimple_assign_rhs2 (def_stmt);
2736 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2737 return;
2738 linear_step = tree_to_shwi (t);
2739 v = gimple_assign_rhs1 (def_stmt);
2740 continue;
2741 CASE_CONVERT:
2742 t = gimple_assign_rhs1 (def_stmt);
2743 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2744 || (TYPE_PRECISION (TREE_TYPE (v))
2745 < TYPE_PRECISION (TREE_TYPE (t))))
2746 return;
2747 if (!linear_step)
2748 linear_step = 1;
2749 v = t;
2750 continue;
2751 default:
2752 return;
2753 }
2754 else if (is_gimple_call (def_stmt)
2755 && gimple_call_internal_p (def_stmt)
2756 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2757 && loop->simduid
2758 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2759 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2760 == loop->simduid))
2761 {
2762 if (!linear_step)
2763 linear_step = 1;
2764 arginfo->linear_step = linear_step;
2765 arginfo->op = base;
2766 arginfo->simd_lane_linear = true;
2767 return;
2768 }
2769 }
2770}
2771
0136f8f0
AH
2772/* Function vectorizable_simd_clone_call.
2773
2774 Check if STMT performs a function call that can be vectorized
2775 by calling a simd clone of the function.
2776 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2777 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2778 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2779
2780static bool
355fe088
TS
2781vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2782 gimple **vec_stmt, slp_tree slp_node)
0136f8f0
AH
2783{
2784 tree vec_dest;
2785 tree scalar_dest;
2786 tree op, type;
2787 tree vec_oprnd0 = NULL_TREE;
2788 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2789 tree vectype;
2790 unsigned int nunits;
2791 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2792 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 2793 vec_info *vinfo = stmt_info->vinfo;
0136f8f0 2794 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
81c40241 2795 tree fndecl, new_temp;
355fe088
TS
2796 gimple *def_stmt;
2797 gimple *new_stmt = NULL;
0136f8f0
AH
2798 int ncopies, j;
2799 vec<simd_call_arg_info> arginfo = vNULL;
2800 vec<tree> vargs = vNULL;
2801 size_t i, nargs;
2802 tree lhs, rtype, ratype;
2803 vec<constructor_elt, va_gc> *ret_ctor_elts;
2804
2805 /* Is STMT a vectorizable call? */
2806 if (!is_gimple_call (stmt))
2807 return false;
2808
2809 fndecl = gimple_call_fndecl (stmt);
2810 if (fndecl == NULL_TREE)
2811 return false;
2812
d52f5295 2813 struct cgraph_node *node = cgraph_node::get (fndecl);
0136f8f0
AH
2814 if (node == NULL || node->simd_clones == NULL)
2815 return false;
2816
2817 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2818 return false;
2819
66c16fd9
RB
2820 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2821 && ! vec_stmt)
0136f8f0
AH
2822 return false;
2823
2824 if (gimple_call_lhs (stmt)
2825 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2826 return false;
2827
2828 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2829
2830 vectype = STMT_VINFO_VECTYPE (stmt_info);
2831
2832 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2833 return false;
2834
2835 /* FORNOW */
2836 if (slp_node || PURE_SLP_STMT (stmt_info))
2837 return false;
2838
2839 /* Process function arguments. */
2840 nargs = gimple_call_num_args (stmt);
2841
2842 /* Bail out if the function has zero arguments. */
2843 if (nargs == 0)
2844 return false;
2845
2846 arginfo.create (nargs);
2847
2848 for (i = 0; i < nargs; i++)
2849 {
2850 simd_call_arg_info thisarginfo;
2851 affine_iv iv;
2852
2853 thisarginfo.linear_step = 0;
2854 thisarginfo.align = 0;
2855 thisarginfo.op = NULL_TREE;
17b658af 2856 thisarginfo.simd_lane_linear = false;
0136f8f0
AH
2857
2858 op = gimple_call_arg (stmt, i);
81c40241
RB
2859 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2860 &thisarginfo.vectype)
0136f8f0
AH
2861 || thisarginfo.dt == vect_uninitialized_def)
2862 {
2863 if (dump_enabled_p ())
2864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2865 "use not simple.\n");
2866 arginfo.release ();
2867 return false;
2868 }
2869
2870 if (thisarginfo.dt == vect_constant_def
2871 || thisarginfo.dt == vect_external_def)
2872 gcc_assert (thisarginfo.vectype == NULL_TREE);
2873 else
2874 gcc_assert (thisarginfo.vectype != NULL_TREE);
2875
6c9e85fb
JJ
2876 /* For linear arguments, the analyze phase should have saved
2877 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
17b658af
JJ
2878 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2879 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
6c9e85fb
JJ
2880 {
2881 gcc_assert (vec_stmt);
2882 thisarginfo.linear_step
17b658af 2883 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
6c9e85fb 2884 thisarginfo.op
17b658af
JJ
2885 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2886 thisarginfo.simd_lane_linear
2887 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2888 == boolean_true_node);
6c9e85fb
JJ
2889 /* If loop has been peeled for alignment, we need to adjust it. */
2890 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2891 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
17b658af 2892 if (n1 != n2 && !thisarginfo.simd_lane_linear)
6c9e85fb
JJ
2893 {
2894 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
17b658af 2895 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
6c9e85fb
JJ
2896 tree opt = TREE_TYPE (thisarginfo.op);
2897 bias = fold_convert (TREE_TYPE (step), bias);
2898 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2899 thisarginfo.op
2900 = fold_build2 (POINTER_TYPE_P (opt)
2901 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2902 thisarginfo.op, bias);
2903 }
2904 }
2905 else if (!vec_stmt
2906 && thisarginfo.dt != vect_constant_def
2907 && thisarginfo.dt != vect_external_def
2908 && loop_vinfo
2909 && TREE_CODE (op) == SSA_NAME
2910 && simple_iv (loop, loop_containing_stmt (stmt), op,
2911 &iv, false)
2912 && tree_fits_shwi_p (iv.step))
0136f8f0
AH
2913 {
2914 thisarginfo.linear_step = tree_to_shwi (iv.step);
2915 thisarginfo.op = iv.base;
2916 }
2917 else if ((thisarginfo.dt == vect_constant_def
2918 || thisarginfo.dt == vect_external_def)
2919 && POINTER_TYPE_P (TREE_TYPE (op)))
2920 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
17b658af
JJ
2921 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2922 linear too. */
2923 if (POINTER_TYPE_P (TREE_TYPE (op))
2924 && !thisarginfo.linear_step
2925 && !vec_stmt
2926 && thisarginfo.dt != vect_constant_def
2927 && thisarginfo.dt != vect_external_def
2928 && loop_vinfo
2929 && !slp_node
2930 && TREE_CODE (op) == SSA_NAME)
2931 vect_simd_lane_linear (op, loop, &thisarginfo);
0136f8f0
AH
2932
2933 arginfo.quick_push (thisarginfo);
2934 }
2935
2936 unsigned int badness = 0;
2937 struct cgraph_node *bestn = NULL;
6c9e85fb
JJ
2938 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2939 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
0136f8f0
AH
2940 else
2941 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2942 n = n->simdclone->next_clone)
2943 {
2944 unsigned int this_badness = 0;
2945 if (n->simdclone->simdlen
2946 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2947 || n->simdclone->nargs != nargs)
2948 continue;
2949 if (n->simdclone->simdlen
2950 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2951 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2952 - exact_log2 (n->simdclone->simdlen)) * 1024;
2953 if (n->simdclone->inbranch)
2954 this_badness += 2048;
2955 int target_badness = targetm.simd_clone.usable (n);
2956 if (target_badness < 0)
2957 continue;
2958 this_badness += target_badness * 512;
2959 /* FORNOW: Have to add code to add the mask argument. */
2960 if (n->simdclone->inbranch)
2961 continue;
2962 for (i = 0; i < nargs; i++)
2963 {
2964 switch (n->simdclone->args[i].arg_type)
2965 {
2966 case SIMD_CLONE_ARG_TYPE_VECTOR:
2967 if (!useless_type_conversion_p
2968 (n->simdclone->args[i].orig_type,
2969 TREE_TYPE (gimple_call_arg (stmt, i))))
2970 i = -1;
2971 else if (arginfo[i].dt == vect_constant_def
2972 || arginfo[i].dt == vect_external_def
2973 || arginfo[i].linear_step)
2974 this_badness += 64;
2975 break;
2976 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2977 if (arginfo[i].dt != vect_constant_def
2978 && arginfo[i].dt != vect_external_def)
2979 i = -1;
2980 break;
2981 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
d9a6bd32 2982 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
0136f8f0
AH
2983 if (arginfo[i].dt == vect_constant_def
2984 || arginfo[i].dt == vect_external_def
2985 || (arginfo[i].linear_step
2986 != n->simdclone->args[i].linear_step))
2987 i = -1;
2988 break;
2989 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
d9a6bd32
JJ
2990 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2991 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
e01d41e5
JJ
2992 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
2993 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
2994 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
0136f8f0
AH
2995 /* FORNOW */
2996 i = -1;
2997 break;
2998 case SIMD_CLONE_ARG_TYPE_MASK:
2999 gcc_unreachable ();
3000 }
3001 if (i == (size_t) -1)
3002 break;
3003 if (n->simdclone->args[i].alignment > arginfo[i].align)
3004 {
3005 i = -1;
3006 break;
3007 }
3008 if (arginfo[i].align)
3009 this_badness += (exact_log2 (arginfo[i].align)
3010 - exact_log2 (n->simdclone->args[i].alignment));
3011 }
3012 if (i == (size_t) -1)
3013 continue;
3014 if (bestn == NULL || this_badness < badness)
3015 {
3016 bestn = n;
3017 badness = this_badness;
3018 }
3019 }
3020
3021 if (bestn == NULL)
3022 {
3023 arginfo.release ();
3024 return false;
3025 }
3026
3027 for (i = 0; i < nargs; i++)
3028 if ((arginfo[i].dt == vect_constant_def
3029 || arginfo[i].dt == vect_external_def)
3030 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3031 {
3032 arginfo[i].vectype
3033 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3034 i)));
3035 if (arginfo[i].vectype == NULL
3036 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3037 > bestn->simdclone->simdlen))
3038 {
3039 arginfo.release ();
3040 return false;
3041 }
3042 }
3043
3044 fndecl = bestn->decl;
3045 nunits = bestn->simdclone->simdlen;
3046 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3047
3048 /* If the function isn't const, only allow it in simd loops where user
3049 has asserted that at least nunits consecutive iterations can be
3050 performed using SIMD instructions. */
3051 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3052 && gimple_vuse (stmt))
3053 {
3054 arginfo.release ();
3055 return false;
3056 }
3057
3058 /* Sanity check: make sure that at least one copy of the vectorized stmt
3059 needs to be generated. */
3060 gcc_assert (ncopies >= 1);
3061
3062 if (!vec_stmt) /* transformation not required. */
3063 {
6c9e85fb
JJ
3064 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3065 for (i = 0; i < nargs; i++)
3066 if (bestn->simdclone->args[i].arg_type
3067 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3068 {
17b658af 3069 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
6c9e85fb
JJ
3070 + 1);
3071 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3072 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3073 ? size_type_node : TREE_TYPE (arginfo[i].op);
3074 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3075 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
17b658af
JJ
3076 tree sll = arginfo[i].simd_lane_linear
3077 ? boolean_true_node : boolean_false_node;
3078 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
6c9e85fb 3079 }
0136f8f0
AH
3080 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3081 if (dump_enabled_p ())
3082 dump_printf_loc (MSG_NOTE, vect_location,
3083 "=== vectorizable_simd_clone_call ===\n");
3084/* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3085 arginfo.release ();
3086 return true;
3087 }
3088
3089 /** Transform. **/
3090
3091 if (dump_enabled_p ())
3092 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3093
3094 /* Handle def. */
3095 scalar_dest = gimple_call_lhs (stmt);
3096 vec_dest = NULL_TREE;
3097 rtype = NULL_TREE;
3098 ratype = NULL_TREE;
3099 if (scalar_dest)
3100 {
3101 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3102 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3103 if (TREE_CODE (rtype) == ARRAY_TYPE)
3104 {
3105 ratype = rtype;
3106 rtype = TREE_TYPE (ratype);
3107 }
3108 }
3109
3110 prev_stmt_info = NULL;
3111 for (j = 0; j < ncopies; ++j)
3112 {
3113 /* Build argument list for the vectorized call. */
3114 if (j == 0)
3115 vargs.create (nargs);
3116 else
3117 vargs.truncate (0);
3118
3119 for (i = 0; i < nargs; i++)
3120 {
3121 unsigned int k, l, m, o;
3122 tree atype;
3123 op = gimple_call_arg (stmt, i);
3124 switch (bestn->simdclone->args[i].arg_type)
3125 {
3126 case SIMD_CLONE_ARG_TYPE_VECTOR:
3127 atype = bestn->simdclone->args[i].vector_type;
3128 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
3129 for (m = j * o; m < (j + 1) * o; m++)
3130 {
3131 if (TYPE_VECTOR_SUBPARTS (atype)
3132 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3133 {
3134 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3135 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3136 / TYPE_VECTOR_SUBPARTS (atype));
3137 gcc_assert ((k & (k - 1)) == 0);
3138 if (m == 0)
3139 vec_oprnd0
81c40241 3140 = vect_get_vec_def_for_operand (op, stmt);
0136f8f0
AH
3141 else
3142 {
3143 vec_oprnd0 = arginfo[i].op;
3144 if ((m & (k - 1)) == 0)
3145 vec_oprnd0
3146 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3147 vec_oprnd0);
3148 }
3149 arginfo[i].op = vec_oprnd0;
3150 vec_oprnd0
3151 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3152 size_int (prec),
3153 bitsize_int ((m & (k - 1)) * prec));
3154 new_stmt
b731b390 3155 = gimple_build_assign (make_ssa_name (atype),
0136f8f0
AH
3156 vec_oprnd0);
3157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3158 vargs.safe_push (gimple_assign_lhs (new_stmt));
3159 }
3160 else
3161 {
3162 k = (TYPE_VECTOR_SUBPARTS (atype)
3163 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3164 gcc_assert ((k & (k - 1)) == 0);
3165 vec<constructor_elt, va_gc> *ctor_elts;
3166 if (k != 1)
3167 vec_alloc (ctor_elts, k);
3168 else
3169 ctor_elts = NULL;
3170 for (l = 0; l < k; l++)
3171 {
3172 if (m == 0 && l == 0)
3173 vec_oprnd0
81c40241 3174 = vect_get_vec_def_for_operand (op, stmt);
0136f8f0
AH
3175 else
3176 vec_oprnd0
3177 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3178 arginfo[i].op);
3179 arginfo[i].op = vec_oprnd0;
3180 if (k == 1)
3181 break;
3182 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3183 vec_oprnd0);
3184 }
3185 if (k == 1)
3186 vargs.safe_push (vec_oprnd0);
3187 else
3188 {
3189 vec_oprnd0 = build_constructor (atype, ctor_elts);
3190 new_stmt
b731b390 3191 = gimple_build_assign (make_ssa_name (atype),
0136f8f0
AH
3192 vec_oprnd0);
3193 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3194 vargs.safe_push (gimple_assign_lhs (new_stmt));
3195 }
3196 }
3197 }
3198 break;
3199 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3200 vargs.safe_push (op);
3201 break;
3202 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3203 if (j == 0)
3204 {
3205 gimple_seq stmts;
3206 arginfo[i].op
3207 = force_gimple_operand (arginfo[i].op, &stmts, true,
3208 NULL_TREE);
3209 if (stmts != NULL)
3210 {
3211 basic_block new_bb;
3212 edge pe = loop_preheader_edge (loop);
3213 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3214 gcc_assert (!new_bb);
3215 }
17b658af
JJ
3216 if (arginfo[i].simd_lane_linear)
3217 {
3218 vargs.safe_push (arginfo[i].op);
3219 break;
3220 }
b731b390 3221 tree phi_res = copy_ssa_name (op);
538dd0b7 3222 gphi *new_phi = create_phi_node (phi_res, loop->header);
0136f8f0 3223 set_vinfo_for_stmt (new_phi,
310213d4 3224 new_stmt_vec_info (new_phi, loop_vinfo));
0136f8f0
AH
3225 add_phi_arg (new_phi, arginfo[i].op,
3226 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3227 enum tree_code code
3228 = POINTER_TYPE_P (TREE_TYPE (op))
3229 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3230 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3231 ? sizetype : TREE_TYPE (op);
807e902e
KZ
3232 widest_int cst
3233 = wi::mul (bestn->simdclone->args[i].linear_step,
3234 ncopies * nunits);
3235 tree tcst = wide_int_to_tree (type, cst);
b731b390 3236 tree phi_arg = copy_ssa_name (op);
0d0e4a03
JJ
3237 new_stmt
3238 = gimple_build_assign (phi_arg, code, phi_res, tcst);
0136f8f0
AH
3239 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3240 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3241 set_vinfo_for_stmt (new_stmt,
310213d4 3242 new_stmt_vec_info (new_stmt, loop_vinfo));
0136f8f0
AH
3243 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3244 UNKNOWN_LOCATION);
3245 arginfo[i].op = phi_res;
3246 vargs.safe_push (phi_res);
3247 }
3248 else
3249 {
3250 enum tree_code code
3251 = POINTER_TYPE_P (TREE_TYPE (op))
3252 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3253 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3254 ? sizetype : TREE_TYPE (op);
807e902e
KZ
3255 widest_int cst
3256 = wi::mul (bestn->simdclone->args[i].linear_step,
3257 j * nunits);
3258 tree tcst = wide_int_to_tree (type, cst);
b731b390 3259 new_temp = make_ssa_name (TREE_TYPE (op));
0d0e4a03
JJ
3260 new_stmt = gimple_build_assign (new_temp, code,
3261 arginfo[i].op, tcst);
0136f8f0
AH
3262 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3263 vargs.safe_push (new_temp);
3264 }
3265 break;
3266 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
e01d41e5
JJ
3267 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3268 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3269 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
0136f8f0
AH
3270 default:
3271 gcc_unreachable ();
3272 }
3273 }
3274
3275 new_stmt = gimple_build_call_vec (fndecl, vargs);
3276 if (vec_dest)
3277 {
3278 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3279 if (ratype)
b731b390 3280 new_temp = create_tmp_var (ratype);
0136f8f0
AH
3281 else if (TYPE_VECTOR_SUBPARTS (vectype)
3282 == TYPE_VECTOR_SUBPARTS (rtype))
3283 new_temp = make_ssa_name (vec_dest, new_stmt);
3284 else
3285 new_temp = make_ssa_name (rtype, new_stmt);
3286 gimple_call_set_lhs (new_stmt, new_temp);
3287 }
3288 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3289
3290 if (vec_dest)
3291 {
3292 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3293 {
3294 unsigned int k, l;
3295 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3296 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3297 gcc_assert ((k & (k - 1)) == 0);
3298 for (l = 0; l < k; l++)
3299 {
3300 tree t;
3301 if (ratype)
3302 {
3303 t = build_fold_addr_expr (new_temp);
3304 t = build2 (MEM_REF, vectype, t,
3305 build_int_cst (TREE_TYPE (t),
3306 l * prec / BITS_PER_UNIT));
3307 }
3308 else
3309 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3310 size_int (prec), bitsize_int (l * prec));
3311 new_stmt
b731b390 3312 = gimple_build_assign (make_ssa_name (vectype), t);
0136f8f0
AH
3313 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3314 if (j == 0 && l == 0)
3315 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3316 else
3317 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3318
3319 prev_stmt_info = vinfo_for_stmt (new_stmt);
3320 }
3321
3322 if (ratype)
3323 {
3324 tree clobber = build_constructor (ratype, NULL);
3325 TREE_THIS_VOLATILE (clobber) = 1;
3326 new_stmt = gimple_build_assign (new_temp, clobber);
3327 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3328 }
3329 continue;
3330 }
3331 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3332 {
3333 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3334 / TYPE_VECTOR_SUBPARTS (rtype));
3335 gcc_assert ((k & (k - 1)) == 0);
3336 if ((j & (k - 1)) == 0)
3337 vec_alloc (ret_ctor_elts, k);
3338 if (ratype)
3339 {
3340 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3341 for (m = 0; m < o; m++)
3342 {
3343 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3344 size_int (m), NULL_TREE, NULL_TREE);
3345 new_stmt
b731b390 3346 = gimple_build_assign (make_ssa_name (rtype), tem);
0136f8f0
AH
3347 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3348 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3349 gimple_assign_lhs (new_stmt));
3350 }
3351 tree clobber = build_constructor (ratype, NULL);
3352 TREE_THIS_VOLATILE (clobber) = 1;
3353 new_stmt = gimple_build_assign (new_temp, clobber);
3354 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3355 }
3356 else
3357 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3358 if ((j & (k - 1)) != k - 1)
3359 continue;
3360 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3361 new_stmt
b731b390 3362 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
0136f8f0
AH
3363 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3364
3365 if ((unsigned) j == k - 1)
3366 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3367 else
3368 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3369
3370 prev_stmt_info = vinfo_for_stmt (new_stmt);
3371 continue;
3372 }
3373 else if (ratype)
3374 {
3375 tree t = build_fold_addr_expr (new_temp);
3376 t = build2 (MEM_REF, vectype, t,
3377 build_int_cst (TREE_TYPE (t), 0));
3378 new_stmt
b731b390 3379 = gimple_build_assign (make_ssa_name (vec_dest), t);
0136f8f0
AH
3380 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3381 tree clobber = build_constructor (ratype, NULL);
3382 TREE_THIS_VOLATILE (clobber) = 1;
3383 vect_finish_stmt_generation (stmt,
3384 gimple_build_assign (new_temp,
3385 clobber), gsi);
3386 }
3387 }
3388
3389 if (j == 0)
3390 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3391 else
3392 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3393
3394 prev_stmt_info = vinfo_for_stmt (new_stmt);
3395 }
3396
3397 vargs.release ();
3398
3399 /* The call in STMT might prevent it from being removed in dce.
3400 We however cannot remove it here, due to the way the ssa name
3401 it defines is mapped to the new definition. So just replace
3402 rhs of the statement with something harmless. */
3403
3404 if (slp_node)
3405 return true;
3406
3407 if (scalar_dest)
3408 {
3409 type = TREE_TYPE (scalar_dest);
3410 if (is_pattern_stmt_p (stmt_info))
3411 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3412 else
3413 lhs = gimple_call_lhs (stmt);
3414 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3415 }
3416 else
3417 new_stmt = gimple_build_nop ();
3418 set_vinfo_for_stmt (new_stmt, stmt_info);
3419 set_vinfo_for_stmt (stmt, NULL);
3420 STMT_VINFO_STMT (stmt_info) = new_stmt;
2865f32a 3421 gsi_replace (gsi, new_stmt, true);
0136f8f0
AH
3422 unlink_stmt_vdef (stmt);
3423
3424 return true;
3425}
3426
3427
ebfd146a
IR
3428/* Function vect_gen_widened_results_half
3429
3430 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 3431 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ff802fa1 3432 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
ebfd146a
IR
3433 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3434 needs to be created (DECL is a function-decl of a target-builtin).
3435 STMT is the original scalar stmt that we are vectorizing. */
3436
355fe088 3437static gimple *
ebfd146a
IR
3438vect_gen_widened_results_half (enum tree_code code,
3439 tree decl,
3440 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3441 tree vec_dest, gimple_stmt_iterator *gsi,
355fe088 3442 gimple *stmt)
b8698a0f 3443{
355fe088 3444 gimple *new_stmt;
b8698a0f
L
3445 tree new_temp;
3446
3447 /* Generate half of the widened result: */
3448 if (code == CALL_EXPR)
3449 {
3450 /* Target specific support */
ebfd146a
IR
3451 if (op_type == binary_op)
3452 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3453 else
3454 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3455 new_temp = make_ssa_name (vec_dest, new_stmt);
3456 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
3457 }
3458 else
ebfd146a 3459 {
b8698a0f
L
3460 /* Generic support */
3461 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
3462 if (op_type != binary_op)
3463 vec_oprnd1 = NULL;
0d0e4a03 3464 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
ebfd146a
IR
3465 new_temp = make_ssa_name (vec_dest, new_stmt);
3466 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 3467 }
ebfd146a
IR
3468 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3469
ebfd146a
IR
3470 return new_stmt;
3471}
3472
4a00c761
JJ
3473
3474/* Get vectorized definitions for loop-based vectorization. For the first
3475 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3476 scalar operand), and for the rest we get a copy with
3477 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3478 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3479 The vectors are collected into VEC_OPRNDS. */
3480
3481static void
355fe088 3482vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
9771b263 3483 vec<tree> *vec_oprnds, int multi_step_cvt)
4a00c761
JJ
3484{
3485 tree vec_oprnd;
3486
3487 /* Get first vector operand. */
3488 /* All the vector operands except the very first one (that is scalar oprnd)
3489 are stmt copies. */
3490 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
81c40241 3491 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
4a00c761
JJ
3492 else
3493 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3494
9771b263 3495 vec_oprnds->quick_push (vec_oprnd);
4a00c761
JJ
3496
3497 /* Get second vector operand. */
3498 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
9771b263 3499 vec_oprnds->quick_push (vec_oprnd);
4a00c761
JJ
3500
3501 *oprnd = vec_oprnd;
3502
3503 /* For conversion in multiple steps, continue to get operands
3504 recursively. */
3505 if (multi_step_cvt)
3506 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3507}
3508
3509
3510/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3511 For multi-step conversions store the resulting vectors and call the function
3512 recursively. */
3513
3514static void
9771b263 3515vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
355fe088 3516 int multi_step_cvt, gimple *stmt,
9771b263 3517 vec<tree> vec_dsts,
4a00c761
JJ
3518 gimple_stmt_iterator *gsi,
3519 slp_tree slp_node, enum tree_code code,
3520 stmt_vec_info *prev_stmt_info)
3521{
3522 unsigned int i;
3523 tree vop0, vop1, new_tmp, vec_dest;
355fe088 3524 gimple *new_stmt;
4a00c761
JJ
3525 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3526
9771b263 3527 vec_dest = vec_dsts.pop ();
4a00c761 3528
9771b263 3529 for (i = 0; i < vec_oprnds->length (); i += 2)
4a00c761
JJ
3530 {
3531 /* Create demotion operation. */
9771b263
DN
3532 vop0 = (*vec_oprnds)[i];
3533 vop1 = (*vec_oprnds)[i + 1];
0d0e4a03 3534 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4a00c761
JJ
3535 new_tmp = make_ssa_name (vec_dest, new_stmt);
3536 gimple_assign_set_lhs (new_stmt, new_tmp);
3537 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3538
3539 if (multi_step_cvt)
3540 /* Store the resulting vector for next recursive call. */
9771b263 3541 (*vec_oprnds)[i/2] = new_tmp;
4a00c761
JJ
3542 else
3543 {
3544 /* This is the last step of the conversion sequence. Store the
3545 vectors in SLP_NODE or in vector info of the scalar statement
3546 (or in STMT_VINFO_RELATED_STMT chain). */
3547 if (slp_node)
9771b263 3548 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4a00c761 3549 else
c689ce1e
RB
3550 {
3551 if (!*prev_stmt_info)
3552 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3553 else
3554 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4a00c761 3555
c689ce1e
RB
3556 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3557 }
4a00c761
JJ
3558 }
3559 }
3560
3561 /* For multi-step demotion operations we first generate demotion operations
3562 from the source type to the intermediate types, and then combine the
3563 results (stored in VEC_OPRNDS) in demotion operation to the destination
3564 type. */
3565 if (multi_step_cvt)
3566 {
3567 /* At each level of recursion we have half of the operands we had at the
3568 previous level. */
9771b263 3569 vec_oprnds->truncate ((i+1)/2);
4a00c761
JJ
3570 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3571 stmt, vec_dsts, gsi, slp_node,
3572 VEC_PACK_TRUNC_EXPR,
3573 prev_stmt_info);
3574 }
3575
9771b263 3576 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
3577}
3578
3579
3580/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3581 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3582 the resulting vectors and call the function recursively. */
3583
3584static void
9771b263
DN
3585vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3586 vec<tree> *vec_oprnds1,
355fe088 3587 gimple *stmt, tree vec_dest,
4a00c761
JJ
3588 gimple_stmt_iterator *gsi,
3589 enum tree_code code1,
3590 enum tree_code code2, tree decl1,
3591 tree decl2, int op_type)
3592{
3593 int i;
3594 tree vop0, vop1, new_tmp1, new_tmp2;
355fe088 3595 gimple *new_stmt1, *new_stmt2;
6e1aa848 3596 vec<tree> vec_tmp = vNULL;
4a00c761 3597
9771b263
DN
3598 vec_tmp.create (vec_oprnds0->length () * 2);
3599 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4a00c761
JJ
3600 {
3601 if (op_type == binary_op)
9771b263 3602 vop1 = (*vec_oprnds1)[i];
4a00c761
JJ
3603 else
3604 vop1 = NULL_TREE;
3605
3606 /* Generate the two halves of promotion operation. */
3607 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3608 op_type, vec_dest, gsi, stmt);
3609 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3610 op_type, vec_dest, gsi, stmt);
3611 if (is_gimple_call (new_stmt1))
3612 {
3613 new_tmp1 = gimple_call_lhs (new_stmt1);
3614 new_tmp2 = gimple_call_lhs (new_stmt2);
3615 }
3616 else
3617 {
3618 new_tmp1 = gimple_assign_lhs (new_stmt1);
3619 new_tmp2 = gimple_assign_lhs (new_stmt2);
3620 }
3621
3622 /* Store the results for the next step. */
9771b263
DN
3623 vec_tmp.quick_push (new_tmp1);
3624 vec_tmp.quick_push (new_tmp2);
4a00c761
JJ
3625 }
3626
689eaba3 3627 vec_oprnds0->release ();
4a00c761
JJ
3628 *vec_oprnds0 = vec_tmp;
3629}
3630
3631
b8698a0f
L
3632/* Check if STMT performs a conversion operation, that can be vectorized.
3633 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4a00c761 3634 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
ebfd146a
IR
3635 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3636
3637static bool
355fe088
TS
3638vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3639 gimple **vec_stmt, slp_tree slp_node)
ebfd146a
IR
3640{
3641 tree vec_dest;
3642 tree scalar_dest;
4a00c761 3643 tree op0, op1 = NULL_TREE;
ebfd146a
IR
3644 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3647 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4a00c761 3648 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
ebfd146a
IR
3649 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3650 tree new_temp;
355fe088 3651 gimple *def_stmt;
ebfd146a 3652 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
355fe088 3653 gimple *new_stmt = NULL;
ebfd146a
IR
3654 stmt_vec_info prev_stmt_info;
3655 int nunits_in;
3656 int nunits_out;
3657 tree vectype_out, vectype_in;
4a00c761
JJ
3658 int ncopies, i, j;
3659 tree lhs_type, rhs_type;
ebfd146a 3660 enum { NARROW, NONE, WIDEN } modifier;
6e1aa848
DN
3661 vec<tree> vec_oprnds0 = vNULL;
3662 vec<tree> vec_oprnds1 = vNULL;
ebfd146a 3663 tree vop0;
4a00c761 3664 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 3665 vec_info *vinfo = stmt_info->vinfo;
4a00c761 3666 int multi_step_cvt = 0;
6e1aa848
DN
3667 vec<tree> vec_dsts = vNULL;
3668 vec<tree> interm_types = vNULL;
4a00c761
JJ
3669 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3670 int op_type;
ef4bddc2 3671 machine_mode rhs_mode;
4a00c761 3672 unsigned short fltsz;
ebfd146a
IR
3673
3674 /* Is STMT a vectorizable conversion? */
3675
4a00c761 3676 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3677 return false;
3678
66c16fd9
RB
3679 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3680 && ! vec_stmt)
ebfd146a
IR
3681 return false;
3682
3683 if (!is_gimple_assign (stmt))
3684 return false;
3685
3686 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3687 return false;
3688
3689 code = gimple_assign_rhs_code (stmt);
4a00c761
JJ
3690 if (!CONVERT_EXPR_CODE_P (code)
3691 && code != FIX_TRUNC_EXPR
3692 && code != FLOAT_EXPR
3693 && code != WIDEN_MULT_EXPR
3694 && code != WIDEN_LSHIFT_EXPR)
ebfd146a
IR
3695 return false;
3696
4a00c761
JJ
3697 op_type = TREE_CODE_LENGTH (code);
3698
ebfd146a 3699 /* Check types of lhs and rhs. */
b690cc0f 3700 scalar_dest = gimple_assign_lhs (stmt);
4a00c761 3701 lhs_type = TREE_TYPE (scalar_dest);
b690cc0f
RG
3702 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3703
ebfd146a
IR
3704 op0 = gimple_assign_rhs1 (stmt);
3705 rhs_type = TREE_TYPE (op0);
4a00c761
JJ
3706
3707 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3708 && !((INTEGRAL_TYPE_P (lhs_type)
3709 && INTEGRAL_TYPE_P (rhs_type))
3710 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3711 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3712 return false;
3713
e6f5c25d
IE
3714 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
3715 && ((INTEGRAL_TYPE_P (lhs_type)
3716 && (TYPE_PRECISION (lhs_type)
3717 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3718 || (INTEGRAL_TYPE_P (rhs_type)
3719 && (TYPE_PRECISION (rhs_type)
3720 != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
4a00c761 3721 {
73fbfcad 3722 if (dump_enabled_p ())
78c60e3d 3723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
3724 "type conversion to/from bit-precision unsupported."
3725 "\n");
4a00c761
JJ
3726 return false;
3727 }
3728
b690cc0f 3729 /* Check the operands of the operation. */
81c40241 3730 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
b690cc0f 3731 {
73fbfcad 3732 if (dump_enabled_p ())
78c60e3d 3733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3734 "use not simple.\n");
b690cc0f
RG
3735 return false;
3736 }
4a00c761
JJ
3737 if (op_type == binary_op)
3738 {
3739 bool ok;
3740
3741 op1 = gimple_assign_rhs2 (stmt);
3742 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3743 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3744 OP1. */
3745 if (CONSTANT_CLASS_P (op0))
81c40241 3746 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4a00c761 3747 else
81c40241 3748 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4a00c761
JJ
3749
3750 if (!ok)
3751 {
73fbfcad 3752 if (dump_enabled_p ())
78c60e3d 3753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3754 "use not simple.\n");
4a00c761
JJ
3755 return false;
3756 }
3757 }
3758
b690cc0f
RG
3759 /* If op0 is an external or constant defs use a vector type of
3760 the same size as the output vector type. */
ebfd146a 3761 if (!vectype_in)
b690cc0f 3762 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
3763 if (vec_stmt)
3764 gcc_assert (vectype_in);
3765 if (!vectype_in)
3766 {
73fbfcad 3767 if (dump_enabled_p ())
4a00c761 3768 {
78c60e3d
SS
3769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3770 "no vectype for scalar type ");
3771 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
e645e942 3772 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4a00c761 3773 }
7d8930a0
IR
3774
3775 return false;
3776 }
ebfd146a 3777
e6f5c25d
IE
3778 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
3779 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
3780 {
3781 if (dump_enabled_p ())
3782 {
3783 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3784 "can't convert between boolean and non "
3785 "boolean vectors");
3786 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3787 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3788 }
3789
3790 return false;
3791 }
3792
b690cc0f
RG
3793 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3794 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4a00c761 3795 if (nunits_in < nunits_out)
ebfd146a
IR
3796 modifier = NARROW;
3797 else if (nunits_out == nunits_in)
3798 modifier = NONE;
ebfd146a 3799 else
4a00c761 3800 modifier = WIDEN;
ebfd146a 3801
ff802fa1
IR
3802 /* Multiple types in SLP are handled by creating the appropriate number of
3803 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3804 case of SLP. */
437f4a00 3805 if (slp_node || PURE_SLP_STMT (stmt_info))
ebfd146a 3806 ncopies = 1;
4a00c761
JJ
3807 else if (modifier == NARROW)
3808 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3809 else
3810 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
b8698a0f 3811
ebfd146a
IR
3812 /* Sanity check: make sure that at least one copy of the vectorized stmt
3813 needs to be generated. */
3814 gcc_assert (ncopies >= 1);
3815
ebfd146a 3816 /* Supportable by target? */
4a00c761 3817 switch (modifier)
ebfd146a 3818 {
4a00c761
JJ
3819 case NONE:
3820 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3821 return false;
3822 if (supportable_convert_operation (code, vectype_out, vectype_in,
3823 &decl1, &code1))
3824 break;
3825 /* FALLTHRU */
3826 unsupported:
73fbfcad 3827 if (dump_enabled_p ())
78c60e3d 3828 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3829 "conversion not supported by target.\n");
ebfd146a 3830 return false;
ebfd146a 3831
4a00c761
JJ
3832 case WIDEN:
3833 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
a86ec597
RH
3834 &code1, &code2, &multi_step_cvt,
3835 &interm_types))
4a00c761
JJ
3836 {
3837 /* Binary widening operation can only be supported directly by the
3838 architecture. */
3839 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3840 break;
3841 }
3842
3843 if (code != FLOAT_EXPR
3844 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3845 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3846 goto unsupported;
3847
3848 rhs_mode = TYPE_MODE (rhs_type);
3849 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3850 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3851 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3852 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3853 {
3854 cvt_type
3855 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3856 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3857 if (cvt_type == NULL_TREE)
3858 goto unsupported;
3859
3860 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3861 {
3862 if (!supportable_convert_operation (code, vectype_out,
3863 cvt_type, &decl1, &codecvt1))
3864 goto unsupported;
3865 }
3866 else if (!supportable_widening_operation (code, stmt, vectype_out,
a86ec597
RH
3867 cvt_type, &codecvt1,
3868 &codecvt2, &multi_step_cvt,
4a00c761
JJ
3869 &interm_types))
3870 continue;
3871 else
3872 gcc_assert (multi_step_cvt == 0);
3873
3874 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
a86ec597
RH
3875 vectype_in, &code1, &code2,
3876 &multi_step_cvt, &interm_types))
4a00c761
JJ
3877 break;
3878 }
3879
3880 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3881 goto unsupported;
3882
3883 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3884 codecvt2 = ERROR_MARK;
3885 else
3886 {
3887 multi_step_cvt++;
9771b263 3888 interm_types.safe_push (cvt_type);
4a00c761
JJ
3889 cvt_type = NULL_TREE;
3890 }
3891 break;
3892
3893 case NARROW:
3894 gcc_assert (op_type == unary_op);
3895 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3896 &code1, &multi_step_cvt,
3897 &interm_types))
3898 break;
3899
3900 if (code != FIX_TRUNC_EXPR
3901 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3902 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3903 goto unsupported;
3904
3905 rhs_mode = TYPE_MODE (rhs_type);
3906 cvt_type
3907 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3908 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3909 if (cvt_type == NULL_TREE)
3910 goto unsupported;
3911 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3912 &decl1, &codecvt1))
3913 goto unsupported;
3914 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3915 &code1, &multi_step_cvt,
3916 &interm_types))
3917 break;
3918 goto unsupported;
3919
3920 default:
3921 gcc_unreachable ();
ebfd146a
IR
3922 }
3923
3924 if (!vec_stmt) /* transformation not required. */
3925 {
73fbfcad 3926 if (dump_enabled_p ())
78c60e3d 3927 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 3928 "=== vectorizable_conversion ===\n");
4a00c761 3929 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
8bd37302
BS
3930 {
3931 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
c3e7ee41 3932 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
8bd37302 3933 }
4a00c761
JJ
3934 else if (modifier == NARROW)
3935 {
3936 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
8bd37302 3937 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4a00c761
JJ
3938 }
3939 else
3940 {
3941 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
8bd37302 3942 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4a00c761 3943 }
9771b263 3944 interm_types.release ();
ebfd146a
IR
3945 return true;
3946 }
3947
3948 /** Transform. **/
73fbfcad 3949 if (dump_enabled_p ())
78c60e3d 3950 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 3951 "transform conversion. ncopies = %d.\n", ncopies);
ebfd146a 3952
4a00c761
JJ
3953 if (op_type == binary_op)
3954 {
3955 if (CONSTANT_CLASS_P (op0))
3956 op0 = fold_convert (TREE_TYPE (op1), op0);
3957 else if (CONSTANT_CLASS_P (op1))
3958 op1 = fold_convert (TREE_TYPE (op0), op1);
3959 }
3960
3961 /* In case of multi-step conversion, we first generate conversion operations
3962 to the intermediate types, and then from that types to the final one.
3963 We create vector destinations for the intermediate type (TYPES) received
3964 from supportable_*_operation, and store them in the correct order
3965 for future use in vect_create_vectorized_*_stmts (). */
9771b263 3966 vec_dsts.create (multi_step_cvt + 1);
82294ec1
JJ
3967 vec_dest = vect_create_destination_var (scalar_dest,
3968 (cvt_type && modifier == WIDEN)
3969 ? cvt_type : vectype_out);
9771b263 3970 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
3971
3972 if (multi_step_cvt)
3973 {
9771b263
DN
3974 for (i = interm_types.length () - 1;
3975 interm_types.iterate (i, &intermediate_type); i--)
4a00c761
JJ
3976 {
3977 vec_dest = vect_create_destination_var (scalar_dest,
3978 intermediate_type);
9771b263 3979 vec_dsts.quick_push (vec_dest);
4a00c761
JJ
3980 }
3981 }
ebfd146a 3982
4a00c761 3983 if (cvt_type)
82294ec1
JJ
3984 vec_dest = vect_create_destination_var (scalar_dest,
3985 modifier == WIDEN
3986 ? vectype_out : cvt_type);
4a00c761
JJ
3987
3988 if (!slp_node)
3989 {
30862efc 3990 if (modifier == WIDEN)
4a00c761 3991 {
c3284718 3992 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4a00c761 3993 if (op_type == binary_op)
9771b263 3994 vec_oprnds1.create (1);
4a00c761 3995 }
30862efc 3996 else if (modifier == NARROW)
9771b263
DN
3997 vec_oprnds0.create (
3998 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4a00c761
JJ
3999 }
4000 else if (code == WIDEN_LSHIFT_EXPR)
9771b263 4001 vec_oprnds1.create (slp_node->vec_stmts_size);
ebfd146a 4002
4a00c761 4003 last_oprnd = op0;
ebfd146a
IR
4004 prev_stmt_info = NULL;
4005 switch (modifier)
4006 {
4007 case NONE:
4008 for (j = 0; j < ncopies; j++)
4009 {
ebfd146a 4010 if (j == 0)
d092494c
IR
4011 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
4012 -1);
ebfd146a
IR
4013 else
4014 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4015
9771b263 4016 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
4017 {
4018 /* Arguments are ready, create the new vector stmt. */
4019 if (code1 == CALL_EXPR)
4020 {
4021 new_stmt = gimple_build_call (decl1, 1, vop0);
4022 new_temp = make_ssa_name (vec_dest, new_stmt);
4023 gimple_call_set_lhs (new_stmt, new_temp);
4024 }
4025 else
4026 {
4027 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
0d0e4a03 4028 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4a00c761
JJ
4029 new_temp = make_ssa_name (vec_dest, new_stmt);
4030 gimple_assign_set_lhs (new_stmt, new_temp);
4031 }
4032
4033 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4034 if (slp_node)
9771b263 4035 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
225ce44b
RB
4036 else
4037 {
4038 if (!prev_stmt_info)
4039 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4040 else
4041 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4042 prev_stmt_info = vinfo_for_stmt (new_stmt);
4043 }
4a00c761 4044 }
ebfd146a
IR
4045 }
4046 break;
4047
4048 case WIDEN:
4049 /* In case the vectorization factor (VF) is bigger than the number
4050 of elements that we can fit in a vectype (nunits), we have to
4051 generate more than one vector stmt - i.e - we need to "unroll"
4052 the vector stmt by a factor VF/nunits. */
4053 for (j = 0; j < ncopies; j++)
4054 {
4a00c761 4055 /* Handle uses. */
ebfd146a 4056 if (j == 0)
4a00c761
JJ
4057 {
4058 if (slp_node)
4059 {
4060 if (code == WIDEN_LSHIFT_EXPR)
4061 {
4062 unsigned int k;
ebfd146a 4063
4a00c761
JJ
4064 vec_oprnd1 = op1;
4065 /* Store vec_oprnd1 for every vector stmt to be created
4066 for SLP_NODE. We check during the analysis that all
4067 the shift arguments are the same. */
4068 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
9771b263 4069 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
4070
4071 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4072 slp_node, -1);
4073 }
4074 else
4075 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4076 &vec_oprnds1, slp_node, -1);
4077 }
4078 else
4079 {
81c40241 4080 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
9771b263 4081 vec_oprnds0.quick_push (vec_oprnd0);
4a00c761
JJ
4082 if (op_type == binary_op)
4083 {
4084 if (code == WIDEN_LSHIFT_EXPR)
4085 vec_oprnd1 = op1;
4086 else
81c40241 4087 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
9771b263 4088 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
4089 }
4090 }
4091 }
ebfd146a 4092 else
4a00c761
JJ
4093 {
4094 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
9771b263
DN
4095 vec_oprnds0.truncate (0);
4096 vec_oprnds0.quick_push (vec_oprnd0);
4a00c761
JJ
4097 if (op_type == binary_op)
4098 {
4099 if (code == WIDEN_LSHIFT_EXPR)
4100 vec_oprnd1 = op1;
4101 else
4102 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4103 vec_oprnd1);
9771b263
DN
4104 vec_oprnds1.truncate (0);
4105 vec_oprnds1.quick_push (vec_oprnd1);
4a00c761
JJ
4106 }
4107 }
ebfd146a 4108
4a00c761
JJ
4109 /* Arguments are ready. Create the new vector stmts. */
4110 for (i = multi_step_cvt; i >= 0; i--)
4111 {
9771b263 4112 tree this_dest = vec_dsts[i];
4a00c761
JJ
4113 enum tree_code c1 = code1, c2 = code2;
4114 if (i == 0 && codecvt2 != ERROR_MARK)
4115 {
4116 c1 = codecvt1;
4117 c2 = codecvt2;
4118 }
4119 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4120 &vec_oprnds1,
4121 stmt, this_dest, gsi,
4122 c1, c2, decl1, decl2,
4123 op_type);
4124 }
4125
9771b263 4126 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
4127 {
4128 if (cvt_type)
4129 {
4130 if (codecvt1 == CALL_EXPR)
4131 {
4132 new_stmt = gimple_build_call (decl1, 1, vop0);
4133 new_temp = make_ssa_name (vec_dest, new_stmt);
4134 gimple_call_set_lhs (new_stmt, new_temp);
4135 }
4136 else
4137 {
4138 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
b731b390 4139 new_temp = make_ssa_name (vec_dest);
0d0e4a03
JJ
4140 new_stmt = gimple_build_assign (new_temp, codecvt1,
4141 vop0);
4a00c761
JJ
4142 }
4143
4144 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4145 }
4146 else
4147 new_stmt = SSA_NAME_DEF_STMT (vop0);
4148
4149 if (slp_node)
9771b263 4150 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4a00c761 4151 else
c689ce1e
RB
4152 {
4153 if (!prev_stmt_info)
4154 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4155 else
4156 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4157 prev_stmt_info = vinfo_for_stmt (new_stmt);
4158 }
4a00c761 4159 }
ebfd146a 4160 }
4a00c761
JJ
4161
4162 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
4163 break;
4164
4165 case NARROW:
4166 /* In case the vectorization factor (VF) is bigger than the number
4167 of elements that we can fit in a vectype (nunits), we have to
4168 generate more than one vector stmt - i.e - we need to "unroll"
4169 the vector stmt by a factor VF/nunits. */
4170 for (j = 0; j < ncopies; j++)
4171 {
4172 /* Handle uses. */
4a00c761
JJ
4173 if (slp_node)
4174 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4175 slp_node, -1);
ebfd146a
IR
4176 else
4177 {
9771b263 4178 vec_oprnds0.truncate (0);
4a00c761
JJ
4179 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4180 vect_pow2 (multi_step_cvt) - 1);
ebfd146a
IR
4181 }
4182
4a00c761
JJ
4183 /* Arguments are ready. Create the new vector stmts. */
4184 if (cvt_type)
9771b263 4185 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4a00c761
JJ
4186 {
4187 if (codecvt1 == CALL_EXPR)
4188 {
4189 new_stmt = gimple_build_call (decl1, 1, vop0);
4190 new_temp = make_ssa_name (vec_dest, new_stmt);
4191 gimple_call_set_lhs (new_stmt, new_temp);
4192 }
4193 else
4194 {
4195 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
b731b390 4196 new_temp = make_ssa_name (vec_dest);
0d0e4a03
JJ
4197 new_stmt = gimple_build_assign (new_temp, codecvt1,
4198 vop0);
4a00c761 4199 }
ebfd146a 4200
4a00c761 4201 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9771b263 4202 vec_oprnds0[i] = new_temp;
4a00c761 4203 }
ebfd146a 4204
4a00c761
JJ
4205 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4206 stmt, vec_dsts, gsi,
4207 slp_node, code1,
4208 &prev_stmt_info);
ebfd146a
IR
4209 }
4210
4211 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4a00c761 4212 break;
ebfd146a
IR
4213 }
4214
9771b263
DN
4215 vec_oprnds0.release ();
4216 vec_oprnds1.release ();
4217 vec_dsts.release ();
4218 interm_types.release ();
ebfd146a
IR
4219
4220 return true;
4221}
ff802fa1
IR
4222
4223
ebfd146a
IR
4224/* Function vectorizable_assignment.
4225
b8698a0f
L
4226 Check if STMT performs an assignment (copy) that can be vectorized.
4227 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
4228 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4229 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4230
4231static bool
355fe088
TS
4232vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4233 gimple **vec_stmt, slp_tree slp_node)
ebfd146a
IR
4234{
4235 tree vec_dest;
4236 tree scalar_dest;
4237 tree op;
4238 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
ebfd146a
IR
4239 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4240 tree new_temp;
355fe088 4241 gimple *def_stmt;
ebfd146a 4242 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
ebfd146a 4243 int ncopies;
f18b55bd 4244 int i, j;
6e1aa848 4245 vec<tree> vec_oprnds = vNULL;
ebfd146a 4246 tree vop;
a70d6342 4247 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4248 vec_info *vinfo = stmt_info->vinfo;
355fe088 4249 gimple *new_stmt = NULL;
f18b55bd 4250 stmt_vec_info prev_stmt_info = NULL;
fde9c428
RG
4251 enum tree_code code;
4252 tree vectype_in;
ebfd146a 4253
a70d6342 4254 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
4255 return false;
4256
66c16fd9
RB
4257 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4258 && ! vec_stmt)
ebfd146a
IR
4259 return false;
4260
4261 /* Is vectorizable assignment? */
4262 if (!is_gimple_assign (stmt))
4263 return false;
4264
4265 scalar_dest = gimple_assign_lhs (stmt);
4266 if (TREE_CODE (scalar_dest) != SSA_NAME)
4267 return false;
4268
fde9c428 4269 code = gimple_assign_rhs_code (stmt);
ebfd146a 4270 if (gimple_assign_single_p (stmt)
fde9c428
RG
4271 || code == PAREN_EXPR
4272 || CONVERT_EXPR_CODE_P (code))
ebfd146a
IR
4273 op = gimple_assign_rhs1 (stmt);
4274 else
4275 return false;
4276
7b7ec6c5
RG
4277 if (code == VIEW_CONVERT_EXPR)
4278 op = TREE_OPERAND (op, 0);
4279
465c8c19
JJ
4280 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4281 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4282
4283 /* Multiple types in SLP are handled by creating the appropriate number of
4284 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4285 case of SLP. */
4286 if (slp_node || PURE_SLP_STMT (stmt_info))
4287 ncopies = 1;
4288 else
4289 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4290
4291 gcc_assert (ncopies >= 1);
4292
81c40241 4293 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
ebfd146a 4294 {
73fbfcad 4295 if (dump_enabled_p ())
78c60e3d 4296 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4297 "use not simple.\n");
ebfd146a
IR
4298 return false;
4299 }
4300
fde9c428
RG
4301 /* We can handle NOP_EXPR conversions that do not change the number
4302 of elements or the vector size. */
7b7ec6c5
RG
4303 if ((CONVERT_EXPR_CODE_P (code)
4304 || code == VIEW_CONVERT_EXPR)
fde9c428
RG
4305 && (!vectype_in
4306 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4307 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4308 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4309 return false;
4310
7b7b1813
RG
4311 /* We do not handle bit-precision changes. */
4312 if ((CONVERT_EXPR_CODE_P (code)
4313 || code == VIEW_CONVERT_EXPR)
4314 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4315 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4316 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4317 || ((TYPE_PRECISION (TREE_TYPE (op))
4318 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4319 /* But a conversion that does not change the bit-pattern is ok. */
4320 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4321 > TYPE_PRECISION (TREE_TYPE (op)))
2dab46d5
IE
4322 && TYPE_UNSIGNED (TREE_TYPE (op)))
4323 /* Conversion between boolean types of different sizes is
4324 a simple assignment in case their vectypes are same
4325 boolean vectors. */
4326 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4327 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
7b7b1813 4328 {
73fbfcad 4329 if (dump_enabled_p ())
78c60e3d
SS
4330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4331 "type conversion to/from bit-precision "
e645e942 4332 "unsupported.\n");
7b7b1813
RG
4333 return false;
4334 }
4335
ebfd146a
IR
4336 if (!vec_stmt) /* transformation not required. */
4337 {
4338 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
73fbfcad 4339 if (dump_enabled_p ())
78c60e3d 4340 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4341 "=== vectorizable_assignment ===\n");
c3e7ee41 4342 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
ebfd146a
IR
4343 return true;
4344 }
4345
4346 /** Transform. **/
73fbfcad 4347 if (dump_enabled_p ())
e645e942 4348 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
ebfd146a
IR
4349
4350 /* Handle def. */
4351 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4352
4353 /* Handle use. */
f18b55bd 4354 for (j = 0; j < ncopies; j++)
ebfd146a 4355 {
f18b55bd
IR
4356 /* Handle uses. */
4357 if (j == 0)
d092494c 4358 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
f18b55bd
IR
4359 else
4360 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4361
4362 /* Arguments are ready. create the new vector stmt. */
9771b263 4363 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
f18b55bd 4364 {
7b7ec6c5
RG
4365 if (CONVERT_EXPR_CODE_P (code)
4366 || code == VIEW_CONVERT_EXPR)
4a73490d 4367 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
f18b55bd
IR
4368 new_stmt = gimple_build_assign (vec_dest, vop);
4369 new_temp = make_ssa_name (vec_dest, new_stmt);
4370 gimple_assign_set_lhs (new_stmt, new_temp);
4371 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4372 if (slp_node)
9771b263 4373 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
f18b55bd 4374 }
ebfd146a
IR
4375
4376 if (slp_node)
f18b55bd
IR
4377 continue;
4378
4379 if (j == 0)
4380 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4381 else
4382 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4383
4384 prev_stmt_info = vinfo_for_stmt (new_stmt);
4385 }
b8698a0f 4386
9771b263 4387 vec_oprnds.release ();
ebfd146a
IR
4388 return true;
4389}
4390
9dc3f7de 4391
1107f3ae
IR
4392/* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4393 either as shift by a scalar or by a vector. */
4394
4395bool
4396vect_supportable_shift (enum tree_code code, tree scalar_type)
4397{
4398
ef4bddc2 4399 machine_mode vec_mode;
1107f3ae
IR
4400 optab optab;
4401 int icode;
4402 tree vectype;
4403
4404 vectype = get_vectype_for_scalar_type (scalar_type);
4405 if (!vectype)
4406 return false;
4407
4408 optab = optab_for_tree_code (code, vectype, optab_scalar);
4409 if (!optab
4410 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4411 {
4412 optab = optab_for_tree_code (code, vectype, optab_vector);
4413 if (!optab
4414 || (optab_handler (optab, TYPE_MODE (vectype))
4415 == CODE_FOR_nothing))
4416 return false;
4417 }
4418
4419 vec_mode = TYPE_MODE (vectype);
4420 icode = (int) optab_handler (optab, vec_mode);
4421 if (icode == CODE_FOR_nothing)
4422 return false;
4423
4424 return true;
4425}
4426
4427
9dc3f7de
IR
4428/* Function vectorizable_shift.
4429
4430 Check if STMT performs a shift operation that can be vectorized.
4431 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4432 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4433 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4434
4435static bool
355fe088
TS
4436vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4437 gimple **vec_stmt, slp_tree slp_node)
9dc3f7de
IR
4438{
4439 tree vec_dest;
4440 tree scalar_dest;
4441 tree op0, op1 = NULL;
4442 tree vec_oprnd1 = NULL_TREE;
4443 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4444 tree vectype;
4445 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4446 enum tree_code code;
ef4bddc2 4447 machine_mode vec_mode;
9dc3f7de
IR
4448 tree new_temp;
4449 optab optab;
4450 int icode;
ef4bddc2 4451 machine_mode optab_op2_mode;
355fe088 4452 gimple *def_stmt;
9dc3f7de 4453 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
355fe088 4454 gimple *new_stmt = NULL;
9dc3f7de
IR
4455 stmt_vec_info prev_stmt_info;
4456 int nunits_in;
4457 int nunits_out;
4458 tree vectype_out;
cede2577 4459 tree op1_vectype;
9dc3f7de
IR
4460 int ncopies;
4461 int j, i;
6e1aa848
DN
4462 vec<tree> vec_oprnds0 = vNULL;
4463 vec<tree> vec_oprnds1 = vNULL;
9dc3f7de
IR
4464 tree vop0, vop1;
4465 unsigned int k;
49eab32e 4466 bool scalar_shift_arg = true;
9dc3f7de 4467 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4468 vec_info *vinfo = stmt_info->vinfo;
9dc3f7de
IR
4469 int vf;
4470
4471 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4472 return false;
4473
66c16fd9
RB
4474 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4475 && ! vec_stmt)
9dc3f7de
IR
4476 return false;
4477
4478 /* Is STMT a vectorizable binary/unary operation? */
4479 if (!is_gimple_assign (stmt))
4480 return false;
4481
4482 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4483 return false;
4484
4485 code = gimple_assign_rhs_code (stmt);
4486
4487 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4488 || code == RROTATE_EXPR))
4489 return false;
4490
4491 scalar_dest = gimple_assign_lhs (stmt);
4492 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
7b7b1813
RG
4493 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4494 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4495 {
73fbfcad 4496 if (dump_enabled_p ())
78c60e3d 4497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4498 "bit-precision shifts not supported.\n");
7b7b1813
RG
4499 return false;
4500 }
9dc3f7de
IR
4501
4502 op0 = gimple_assign_rhs1 (stmt);
81c40241 4503 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
9dc3f7de 4504 {
73fbfcad 4505 if (dump_enabled_p ())
78c60e3d 4506 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4507 "use not simple.\n");
9dc3f7de
IR
4508 return false;
4509 }
4510 /* If op0 is an external or constant def use a vector type with
4511 the same size as the output vector type. */
4512 if (!vectype)
4513 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4514 if (vec_stmt)
4515 gcc_assert (vectype);
4516 if (!vectype)
4517 {
73fbfcad 4518 if (dump_enabled_p ())
78c60e3d 4519 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4520 "no vectype for scalar type\n");
9dc3f7de
IR
4521 return false;
4522 }
4523
4524 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4525 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4526 if (nunits_out != nunits_in)
4527 return false;
4528
4529 op1 = gimple_assign_rhs2 (stmt);
81c40241 4530 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
9dc3f7de 4531 {
73fbfcad 4532 if (dump_enabled_p ())
78c60e3d 4533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4534 "use not simple.\n");
9dc3f7de
IR
4535 return false;
4536 }
4537
4538 if (loop_vinfo)
4539 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4540 else
4541 vf = 1;
4542
4543 /* Multiple types in SLP are handled by creating the appropriate number of
4544 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4545 case of SLP. */
437f4a00 4546 if (slp_node || PURE_SLP_STMT (stmt_info))
9dc3f7de
IR
4547 ncopies = 1;
4548 else
4549 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4550
4551 gcc_assert (ncopies >= 1);
4552
4553 /* Determine whether the shift amount is a vector, or scalar. If the
4554 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4555
dbfa87aa
YR
4556 if ((dt[1] == vect_internal_def
4557 || dt[1] == vect_induction_def)
4558 && !slp_node)
49eab32e
JJ
4559 scalar_shift_arg = false;
4560 else if (dt[1] == vect_constant_def
4561 || dt[1] == vect_external_def
4562 || dt[1] == vect_internal_def)
4563 {
4564 /* In SLP, need to check whether the shift count is the same,
4565 in loops if it is a constant or invariant, it is always
4566 a scalar shift. */
4567 if (slp_node)
4568 {
355fe088
TS
4569 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4570 gimple *slpstmt;
49eab32e 4571
9771b263 4572 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
49eab32e
JJ
4573 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4574 scalar_shift_arg = false;
4575 }
4576 }
4577 else
4578 {
73fbfcad 4579 if (dump_enabled_p ())
78c60e3d 4580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4581 "operand mode requires invariant argument.\n");
49eab32e
JJ
4582 return false;
4583 }
4584
9dc3f7de 4585 /* Vector shifted by vector. */
49eab32e 4586 if (!scalar_shift_arg)
9dc3f7de
IR
4587 {
4588 optab = optab_for_tree_code (code, vectype, optab_vector);
73fbfcad 4589 if (dump_enabled_p ())
78c60e3d 4590 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4591 "vector/vector shift/rotate found.\n");
78c60e3d 4592
aa948027
JJ
4593 if (!op1_vectype)
4594 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4595 if (op1_vectype == NULL_TREE
4596 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
cede2577 4597 {
73fbfcad 4598 if (dump_enabled_p ())
78c60e3d
SS
4599 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4600 "unusable type for last operand in"
e645e942 4601 " vector/vector shift/rotate.\n");
cede2577
JJ
4602 return false;
4603 }
9dc3f7de
IR
4604 }
4605 /* See if the machine has a vector shifted by scalar insn and if not
4606 then see if it has a vector shifted by vector insn. */
49eab32e 4607 else
9dc3f7de
IR
4608 {
4609 optab = optab_for_tree_code (code, vectype, optab_scalar);
4610 if (optab
4611 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4612 {
73fbfcad 4613 if (dump_enabled_p ())
78c60e3d 4614 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4615 "vector/scalar shift/rotate found.\n");
9dc3f7de
IR
4616 }
4617 else
4618 {
4619 optab = optab_for_tree_code (code, vectype, optab_vector);
4620 if (optab
4621 && (optab_handler (optab, TYPE_MODE (vectype))
4622 != CODE_FOR_nothing))
4623 {
49eab32e
JJ
4624 scalar_shift_arg = false;
4625
73fbfcad 4626 if (dump_enabled_p ())
78c60e3d 4627 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4628 "vector/vector shift/rotate found.\n");
9dc3f7de
IR
4629
4630 /* Unlike the other binary operators, shifts/rotates have
4631 the rhs being int, instead of the same type as the lhs,
4632 so make sure the scalar is the right type if we are
aa948027 4633 dealing with vectors of long long/long/short/char. */
9dc3f7de
IR
4634 if (dt[1] == vect_constant_def)
4635 op1 = fold_convert (TREE_TYPE (vectype), op1);
aa948027
JJ
4636 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4637 TREE_TYPE (op1)))
4638 {
4639 if (slp_node
4640 && TYPE_MODE (TREE_TYPE (vectype))
4641 != TYPE_MODE (TREE_TYPE (op1)))
4642 {
73fbfcad 4643 if (dump_enabled_p ())
78c60e3d
SS
4644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4645 "unusable type for last operand in"
e645e942 4646 " vector/vector shift/rotate.\n");
21c0a521 4647 return false;
aa948027
JJ
4648 }
4649 if (vec_stmt && !slp_node)
4650 {
4651 op1 = fold_convert (TREE_TYPE (vectype), op1);
4652 op1 = vect_init_vector (stmt, op1,
4653 TREE_TYPE (vectype), NULL);
4654 }
4655 }
9dc3f7de
IR
4656 }
4657 }
4658 }
9dc3f7de
IR
4659
4660 /* Supportable by target? */
4661 if (!optab)
4662 {
73fbfcad 4663 if (dump_enabled_p ())
78c60e3d 4664 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4665 "no optab.\n");
9dc3f7de
IR
4666 return false;
4667 }
4668 vec_mode = TYPE_MODE (vectype);
4669 icode = (int) optab_handler (optab, vec_mode);
4670 if (icode == CODE_FOR_nothing)
4671 {
73fbfcad 4672 if (dump_enabled_p ())
78c60e3d 4673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4674 "op not supported by target.\n");
9dc3f7de
IR
4675 /* Check only during analysis. */
4676 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4677 || (vf < vect_min_worthwhile_factor (code)
4678 && !vec_stmt))
4679 return false;
73fbfcad 4680 if (dump_enabled_p ())
e645e942
TJ
4681 dump_printf_loc (MSG_NOTE, vect_location,
4682 "proceeding using word mode.\n");
9dc3f7de
IR
4683 }
4684
4685 /* Worthwhile without SIMD support? Check only during analysis. */
4686 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4687 && vf < vect_min_worthwhile_factor (code)
4688 && !vec_stmt)
4689 {
73fbfcad 4690 if (dump_enabled_p ())
78c60e3d 4691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4692 "not worthwhile without SIMD support.\n");
9dc3f7de
IR
4693 return false;
4694 }
4695
4696 if (!vec_stmt) /* transformation not required. */
4697 {
4698 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
73fbfcad 4699 if (dump_enabled_p ())
e645e942
TJ
4700 dump_printf_loc (MSG_NOTE, vect_location,
4701 "=== vectorizable_shift ===\n");
c3e7ee41 4702 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
9dc3f7de
IR
4703 return true;
4704 }
4705
4706 /** Transform. **/
4707
73fbfcad 4708 if (dump_enabled_p ())
78c60e3d 4709 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4710 "transform binary/unary operation.\n");
9dc3f7de
IR
4711
4712 /* Handle def. */
4713 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4714
9dc3f7de
IR
4715 prev_stmt_info = NULL;
4716 for (j = 0; j < ncopies; j++)
4717 {
4718 /* Handle uses. */
4719 if (j == 0)
4720 {
4721 if (scalar_shift_arg)
4722 {
4723 /* Vector shl and shr insn patterns can be defined with scalar
4724 operand 2 (shift operand). In this case, use constant or loop
4725 invariant op1 directly, without extending it to vector mode
4726 first. */
4727 optab_op2_mode = insn_data[icode].operand[2].mode;
4728 if (!VECTOR_MODE_P (optab_op2_mode))
4729 {
73fbfcad 4730 if (dump_enabled_p ())
78c60e3d 4731 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 4732 "operand 1 using scalar mode.\n");
9dc3f7de 4733 vec_oprnd1 = op1;
8930f723 4734 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
9771b263 4735 vec_oprnds1.quick_push (vec_oprnd1);
9dc3f7de
IR
4736 if (slp_node)
4737 {
4738 /* Store vec_oprnd1 for every vector stmt to be created
4739 for SLP_NODE. We check during the analysis that all
4740 the shift arguments are the same.
4741 TODO: Allow different constants for different vector
4742 stmts generated for an SLP instance. */
4743 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
9771b263 4744 vec_oprnds1.quick_push (vec_oprnd1);
9dc3f7de
IR
4745 }
4746 }
4747 }
4748
4749 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4750 (a special case for certain kind of vector shifts); otherwise,
4751 operand 1 should be of a vector type (the usual case). */
4752 if (vec_oprnd1)
4753 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
d092494c 4754 slp_node, -1);
9dc3f7de
IR
4755 else
4756 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
d092494c 4757 slp_node, -1);
9dc3f7de
IR
4758 }
4759 else
4760 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4761
4762 /* Arguments are ready. Create the new vector stmt. */
9771b263 4763 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
9dc3f7de 4764 {
9771b263 4765 vop1 = vec_oprnds1[i];
0d0e4a03 4766 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
9dc3f7de
IR
4767 new_temp = make_ssa_name (vec_dest, new_stmt);
4768 gimple_assign_set_lhs (new_stmt, new_temp);
4769 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4770 if (slp_node)
9771b263 4771 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9dc3f7de
IR
4772 }
4773
4774 if (slp_node)
4775 continue;
4776
4777 if (j == 0)
4778 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4779 else
4780 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4781 prev_stmt_info = vinfo_for_stmt (new_stmt);
4782 }
4783
9771b263
DN
4784 vec_oprnds0.release ();
4785 vec_oprnds1.release ();
9dc3f7de
IR
4786
4787 return true;
4788}
4789
4790
ebfd146a
IR
4791/* Function vectorizable_operation.
4792
16949072
RG
4793 Check if STMT performs a binary, unary or ternary operation that can
4794 be vectorized.
b8698a0f 4795 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
4796 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4797 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4798
4799static bool
355fe088
TS
4800vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4801 gimple **vec_stmt, slp_tree slp_node)
ebfd146a 4802{
00f07b86 4803 tree vec_dest;
ebfd146a 4804 tree scalar_dest;
16949072 4805 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
ebfd146a 4806 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
00f07b86 4807 tree vectype;
ebfd146a
IR
4808 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4809 enum tree_code code;
ef4bddc2 4810 machine_mode vec_mode;
ebfd146a
IR
4811 tree new_temp;
4812 int op_type;
00f07b86 4813 optab optab;
523ba738 4814 bool target_support_p;
355fe088 4815 gimple *def_stmt;
16949072
RG
4816 enum vect_def_type dt[3]
4817 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
355fe088 4818 gimple *new_stmt = NULL;
ebfd146a 4819 stmt_vec_info prev_stmt_info;
b690cc0f 4820 int nunits_in;
ebfd146a
IR
4821 int nunits_out;
4822 tree vectype_out;
4823 int ncopies;
4824 int j, i;
6e1aa848
DN
4825 vec<tree> vec_oprnds0 = vNULL;
4826 vec<tree> vec_oprnds1 = vNULL;
4827 vec<tree> vec_oprnds2 = vNULL;
16949072 4828 tree vop0, vop1, vop2;
a70d6342 4829 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 4830 vec_info *vinfo = stmt_info->vinfo;
a70d6342
IR
4831 int vf;
4832
a70d6342 4833 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
4834 return false;
4835
66c16fd9
RB
4836 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4837 && ! vec_stmt)
ebfd146a
IR
4838 return false;
4839
4840 /* Is STMT a vectorizable binary/unary operation? */
4841 if (!is_gimple_assign (stmt))
4842 return false;
4843
4844 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4845 return false;
4846
ebfd146a
IR
4847 code = gimple_assign_rhs_code (stmt);
4848
4849 /* For pointer addition, we should use the normal plus for
4850 the vector addition. */
4851 if (code == POINTER_PLUS_EXPR)
4852 code = PLUS_EXPR;
4853
4854 /* Support only unary or binary operations. */
4855 op_type = TREE_CODE_LENGTH (code);
16949072 4856 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
ebfd146a 4857 {
73fbfcad 4858 if (dump_enabled_p ())
78c60e3d 4859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4860 "num. args = %d (not unary/binary/ternary op).\n",
78c60e3d 4861 op_type);
ebfd146a
IR
4862 return false;
4863 }
4864
b690cc0f
RG
4865 scalar_dest = gimple_assign_lhs (stmt);
4866 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4867
7b7b1813
RG
4868 /* Most operations cannot handle bit-precision types without extra
4869 truncations. */
045c1278
IE
4870 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4871 && (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4872 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
7b7b1813
RG
4873 /* Exception are bitwise binary operations. */
4874 && code != BIT_IOR_EXPR
4875 && code != BIT_XOR_EXPR
4876 && code != BIT_AND_EXPR)
4877 {
73fbfcad 4878 if (dump_enabled_p ())
78c60e3d 4879 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4880 "bit-precision arithmetic not supported.\n");
7b7b1813
RG
4881 return false;
4882 }
4883
ebfd146a 4884 op0 = gimple_assign_rhs1 (stmt);
81c40241 4885 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
ebfd146a 4886 {
73fbfcad 4887 if (dump_enabled_p ())
78c60e3d 4888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4889 "use not simple.\n");
ebfd146a
IR
4890 return false;
4891 }
b690cc0f
RG
4892 /* If op0 is an external or constant def use a vector type with
4893 the same size as the output vector type. */
4894 if (!vectype)
b036c6c5
IE
4895 {
4896 /* For boolean type we cannot determine vectype by
4897 invariant value (don't know whether it is a vector
4898 of booleans or vector of integers). We use output
4899 vectype because operations on boolean don't change
4900 type. */
4901 if (TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE)
4902 {
4903 if (TREE_CODE (TREE_TYPE (scalar_dest)) != BOOLEAN_TYPE)
4904 {
4905 if (dump_enabled_p ())
4906 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4907 "not supported operation on bool value.\n");
4908 return false;
4909 }
4910 vectype = vectype_out;
4911 }
4912 else
4913 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4914 }
7d8930a0
IR
4915 if (vec_stmt)
4916 gcc_assert (vectype);
4917 if (!vectype)
4918 {
73fbfcad 4919 if (dump_enabled_p ())
7d8930a0 4920 {
78c60e3d
SS
4921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4922 "no vectype for scalar type ");
4923 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4924 TREE_TYPE (op0));
e645e942 4925 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7d8930a0
IR
4926 }
4927
4928 return false;
4929 }
b690cc0f
RG
4930
4931 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4932 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4933 if (nunits_out != nunits_in)
4934 return false;
ebfd146a 4935
16949072 4936 if (op_type == binary_op || op_type == ternary_op)
ebfd146a
IR
4937 {
4938 op1 = gimple_assign_rhs2 (stmt);
81c40241 4939 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
ebfd146a 4940 {
73fbfcad 4941 if (dump_enabled_p ())
78c60e3d 4942 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4943 "use not simple.\n");
ebfd146a
IR
4944 return false;
4945 }
4946 }
16949072
RG
4947 if (op_type == ternary_op)
4948 {
4949 op2 = gimple_assign_rhs3 (stmt);
81c40241 4950 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
16949072 4951 {
73fbfcad 4952 if (dump_enabled_p ())
78c60e3d 4953 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4954 "use not simple.\n");
16949072
RG
4955 return false;
4956 }
4957 }
ebfd146a 4958
b690cc0f
RG
4959 if (loop_vinfo)
4960 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4961 else
4962 vf = 1;
4963
4964 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 4965 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
b690cc0f 4966 case of SLP. */
437f4a00 4967 if (slp_node || PURE_SLP_STMT (stmt_info))
b690cc0f
RG
4968 ncopies = 1;
4969 else
4970 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4971
4972 gcc_assert (ncopies >= 1);
4973
9dc3f7de 4974 /* Shifts are handled in vectorizable_shift (). */
ebfd146a
IR
4975 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4976 || code == RROTATE_EXPR)
9dc3f7de 4977 return false;
ebfd146a 4978
ebfd146a 4979 /* Supportable by target? */
00f07b86
RH
4980
4981 vec_mode = TYPE_MODE (vectype);
4982 if (code == MULT_HIGHPART_EXPR)
523ba738 4983 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
00f07b86
RH
4984 else
4985 {
4986 optab = optab_for_tree_code (code, vectype, optab_default);
4987 if (!optab)
5deb57cb 4988 {
73fbfcad 4989 if (dump_enabled_p ())
78c60e3d 4990 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 4991 "no optab.\n");
00f07b86 4992 return false;
5deb57cb 4993 }
523ba738
RS
4994 target_support_p = (optab_handler (optab, vec_mode)
4995 != CODE_FOR_nothing);
5deb57cb
JJ
4996 }
4997
523ba738 4998 if (!target_support_p)
ebfd146a 4999 {
73fbfcad 5000 if (dump_enabled_p ())
78c60e3d 5001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5002 "op not supported by target.\n");
ebfd146a
IR
5003 /* Check only during analysis. */
5004 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5deb57cb 5005 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
ebfd146a 5006 return false;
73fbfcad 5007 if (dump_enabled_p ())
e645e942
TJ
5008 dump_printf_loc (MSG_NOTE, vect_location,
5009 "proceeding using word mode.\n");
383d9c83
IR
5010 }
5011
4a00c761 5012 /* Worthwhile without SIMD support? Check only during analysis. */
5deb57cb
JJ
5013 if (!VECTOR_MODE_P (vec_mode)
5014 && !vec_stmt
5015 && vf < vect_min_worthwhile_factor (code))
7d8930a0 5016 {
73fbfcad 5017 if (dump_enabled_p ())
78c60e3d 5018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5019 "not worthwhile without SIMD support.\n");
e34842c6 5020 return false;
7d8930a0 5021 }
ebfd146a 5022
ebfd146a
IR
5023 if (!vec_stmt) /* transformation not required. */
5024 {
4a00c761 5025 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
73fbfcad 5026 if (dump_enabled_p ())
78c60e3d 5027 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5028 "=== vectorizable_operation ===\n");
c3e7ee41 5029 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
ebfd146a
IR
5030 return true;
5031 }
5032
5033 /** Transform. **/
5034
73fbfcad 5035 if (dump_enabled_p ())
78c60e3d 5036 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5037 "transform binary/unary operation.\n");
383d9c83 5038
ebfd146a 5039 /* Handle def. */
00f07b86 5040 vec_dest = vect_create_destination_var (scalar_dest, vectype);
b8698a0f 5041
ebfd146a
IR
5042 /* In case the vectorization factor (VF) is bigger than the number
5043 of elements that we can fit in a vectype (nunits), we have to generate
5044 more than one vector stmt - i.e - we need to "unroll" the
4a00c761
JJ
5045 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5046 from one copy of the vector stmt to the next, in the field
5047 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5048 stages to find the correct vector defs to be used when vectorizing
5049 stmts that use the defs of the current stmt. The example below
5050 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5051 we need to create 4 vectorized stmts):
5052
5053 before vectorization:
5054 RELATED_STMT VEC_STMT
5055 S1: x = memref - -
5056 S2: z = x + 1 - -
5057
5058 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5059 there):
5060 RELATED_STMT VEC_STMT
5061 VS1_0: vx0 = memref0 VS1_1 -
5062 VS1_1: vx1 = memref1 VS1_2 -
5063 VS1_2: vx2 = memref2 VS1_3 -
5064 VS1_3: vx3 = memref3 - -
5065 S1: x = load - VS1_0
5066 S2: z = x + 1 - -
5067
5068 step2: vectorize stmt S2 (done here):
5069 To vectorize stmt S2 we first need to find the relevant vector
5070 def for the first operand 'x'. This is, as usual, obtained from
5071 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5072 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5073 relevant vector def 'vx0'. Having found 'vx0' we can generate
5074 the vector stmt VS2_0, and as usual, record it in the
5075 STMT_VINFO_VEC_STMT of stmt S2.
5076 When creating the second copy (VS2_1), we obtain the relevant vector
5077 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5078 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5079 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5080 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5081 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5082 chain of stmts and pointers:
5083 RELATED_STMT VEC_STMT
5084 VS1_0: vx0 = memref0 VS1_1 -
5085 VS1_1: vx1 = memref1 VS1_2 -
5086 VS1_2: vx2 = memref2 VS1_3 -
5087 VS1_3: vx3 = memref3 - -
5088 S1: x = load - VS1_0
5089 VS2_0: vz0 = vx0 + v1 VS2_1 -
5090 VS2_1: vz1 = vx1 + v1 VS2_2 -
5091 VS2_2: vz2 = vx2 + v1 VS2_3 -
5092 VS2_3: vz3 = vx3 + v1 - -
5093 S2: z = x + 1 - VS2_0 */
ebfd146a
IR
5094
5095 prev_stmt_info = NULL;
5096 for (j = 0; j < ncopies; j++)
5097 {
5098 /* Handle uses. */
5099 if (j == 0)
4a00c761
JJ
5100 {
5101 if (op_type == binary_op || op_type == ternary_op)
5102 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5103 slp_node, -1);
5104 else
5105 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5106 slp_node, -1);
5107 if (op_type == ternary_op)
36ba4aae 5108 {
9771b263
DN
5109 vec_oprnds2.create (1);
5110 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
81c40241 5111 stmt));
36ba4aae 5112 }
4a00c761 5113 }
ebfd146a 5114 else
4a00c761
JJ
5115 {
5116 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5117 if (op_type == ternary_op)
5118 {
9771b263
DN
5119 tree vec_oprnd = vec_oprnds2.pop ();
5120 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5121 vec_oprnd));
4a00c761
JJ
5122 }
5123 }
5124
5125 /* Arguments are ready. Create the new vector stmt. */
9771b263 5126 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
ebfd146a 5127 {
4a00c761 5128 vop1 = ((op_type == binary_op || op_type == ternary_op)
9771b263 5129 ? vec_oprnds1[i] : NULL_TREE);
4a00c761 5130 vop2 = ((op_type == ternary_op)
9771b263 5131 ? vec_oprnds2[i] : NULL_TREE);
0d0e4a03 5132 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4a00c761
JJ
5133 new_temp = make_ssa_name (vec_dest, new_stmt);
5134 gimple_assign_set_lhs (new_stmt, new_temp);
5135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5136 if (slp_node)
9771b263 5137 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
ebfd146a
IR
5138 }
5139
4a00c761
JJ
5140 if (slp_node)
5141 continue;
5142
5143 if (j == 0)
5144 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5145 else
5146 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5147 prev_stmt_info = vinfo_for_stmt (new_stmt);
ebfd146a
IR
5148 }
5149
9771b263
DN
5150 vec_oprnds0.release ();
5151 vec_oprnds1.release ();
5152 vec_oprnds2.release ();
ebfd146a 5153
ebfd146a
IR
5154 return true;
5155}
5156
c716e67f
XDL
5157/* A helper function to ensure data reference DR's base alignment
5158 for STMT_INFO. */
5159
5160static void
5161ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
5162{
5163 if (!dr->aux)
5164 return;
5165
52639a61 5166 if (DR_VECT_AUX (dr)->base_misaligned)
c716e67f
XDL
5167 {
5168 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
52639a61 5169 tree base_decl = DR_VECT_AUX (dr)->base_decl;
c716e67f 5170
428f0c67
JH
5171 if (decl_in_symtab_p (base_decl))
5172 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
5173 else
5174 {
5175 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
5176 DECL_USER_ALIGN (base_decl) = 1;
5177 }
52639a61 5178 DR_VECT_AUX (dr)->base_misaligned = false;
c716e67f
XDL
5179 }
5180}
5181
ebfd146a 5182
09dfa495
BM
5183/* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5184 reversal of the vector elements. If that is impossible to do,
5185 returns NULL. */
5186
5187static tree
5188perm_mask_for_reverse (tree vectype)
5189{
5190 int i, nunits;
5191 unsigned char *sel;
5192
5193 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5194 sel = XALLOCAVEC (unsigned char, nunits);
5195
5196 for (i = 0; i < nunits; ++i)
5197 sel[i] = nunits - 1 - i;
5198
557be5a8
AL
5199 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5200 return NULL_TREE;
5201 return vect_gen_perm_mask_checked (vectype, sel);
09dfa495
BM
5202}
5203
ebfd146a
IR
5204/* Function vectorizable_store.
5205
b8698a0f
L
5206 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5207 can be vectorized.
5208 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
5209 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5210 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5211
5212static bool
355fe088 5213vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
c716e67f 5214 slp_tree slp_node)
ebfd146a
IR
5215{
5216 tree scalar_dest;
5217 tree data_ref;
5218 tree op;
5219 tree vec_oprnd = NULL_TREE;
5220 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5221 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
272c6793 5222 tree elem_type;
ebfd146a 5223 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 5224 struct loop *loop = NULL;
ef4bddc2 5225 machine_mode vec_mode;
ebfd146a
IR
5226 tree dummy;
5227 enum dr_alignment_support alignment_support_scheme;
355fe088 5228 gimple *def_stmt;
ebfd146a
IR
5229 enum vect_def_type dt;
5230 stmt_vec_info prev_stmt_info = NULL;
5231 tree dataref_ptr = NULL_TREE;
74bf76ed 5232 tree dataref_offset = NULL_TREE;
355fe088 5233 gimple *ptr_incr = NULL;
ebfd146a
IR
5234 int ncopies;
5235 int j;
355fe088 5236 gimple *next_stmt, *first_stmt = NULL;
0d0293ac 5237 bool grouped_store = false;
272c6793 5238 bool store_lanes_p = false;
ebfd146a 5239 unsigned int group_size, i;
6e1aa848
DN
5240 vec<tree> dr_chain = vNULL;
5241 vec<tree> oprnds = vNULL;
5242 vec<tree> result_chain = vNULL;
ebfd146a 5243 bool inv_p;
09dfa495
BM
5244 bool negative = false;
5245 tree offset = NULL_TREE;
6e1aa848 5246 vec<tree> vec_oprnds = vNULL;
ebfd146a 5247 bool slp = (slp_node != NULL);
ebfd146a 5248 unsigned int vec_num;
a70d6342 5249 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
310213d4 5250 vec_info *vinfo = stmt_info->vinfo;
272c6793 5251 tree aggr_type;
3bab6342
AT
5252 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5253 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5254 int scatter_scale = 1;
5255 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5256 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
355fe088 5257 gimple *new_stmt;
a70d6342 5258
a70d6342 5259 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
5260 return false;
5261
66c16fd9
RB
5262 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5263 && ! vec_stmt)
ebfd146a
IR
5264 return false;
5265
5266 /* Is vectorizable store? */
5267
5268 if (!is_gimple_assign (stmt))
5269 return false;
5270
5271 scalar_dest = gimple_assign_lhs (stmt);
ab0ef706
JJ
5272 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5273 && is_pattern_stmt_p (stmt_info))
5274 scalar_dest = TREE_OPERAND (scalar_dest, 0);
ebfd146a 5275 if (TREE_CODE (scalar_dest) != ARRAY_REF
38000232 5276 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
ebfd146a 5277 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
5278 && TREE_CODE (scalar_dest) != COMPONENT_REF
5279 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
70f34814
RG
5280 && TREE_CODE (scalar_dest) != REALPART_EXPR
5281 && TREE_CODE (scalar_dest) != MEM_REF)
ebfd146a
IR
5282 return false;
5283
5284 gcc_assert (gimple_assign_single_p (stmt));
465c8c19 5285
f4d09712 5286 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
465c8c19
JJ
5287 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5288
5289 if (loop_vinfo)
5290 loop = LOOP_VINFO_LOOP (loop_vinfo);
5291
5292 /* Multiple types in SLP are handled by creating the appropriate number of
5293 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5294 case of SLP. */
5295 if (slp || PURE_SLP_STMT (stmt_info))
5296 ncopies = 1;
5297 else
5298 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5299
5300 gcc_assert (ncopies >= 1);
5301
5302 /* FORNOW. This restriction should be relaxed. */
5303 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5304 {
5305 if (dump_enabled_p ())
5306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5307 "multiple types in nested loop.\n");
5308 return false;
5309 }
5310
ebfd146a 5311 op = gimple_assign_rhs1 (stmt);
f4d09712
KY
5312
5313 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
ebfd146a 5314 {
73fbfcad 5315 if (dump_enabled_p ())
78c60e3d 5316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5317 "use not simple.\n");
ebfd146a
IR
5318 return false;
5319 }
5320
f4d09712
KY
5321 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5322 return false;
5323
272c6793 5324 elem_type = TREE_TYPE (vectype);
ebfd146a 5325 vec_mode = TYPE_MODE (vectype);
7b7b1813 5326
ebfd146a
IR
5327 /* FORNOW. In some cases can vectorize even if data-type not supported
5328 (e.g. - array initialization with 0). */
947131ba 5329 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
ebfd146a
IR
5330 return false;
5331
5332 if (!STMT_VINFO_DATA_REF (stmt_info))
5333 return false;
5334
f2e2a985 5335 if (!STMT_VINFO_STRIDED_P (stmt_info))
09dfa495 5336 {
f2e2a985
MM
5337 negative =
5338 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5339 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5340 size_zero_node) < 0;
5341 if (negative && ncopies > 1)
09dfa495
BM
5342 {
5343 if (dump_enabled_p ())
5344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
f2e2a985 5345 "multiple types with negative step.\n");
09dfa495
BM
5346 return false;
5347 }
f2e2a985 5348 if (negative)
09dfa495 5349 {
f2e2a985
MM
5350 gcc_assert (!grouped_store);
5351 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5352 if (alignment_support_scheme != dr_aligned
5353 && alignment_support_scheme != dr_unaligned_supported)
5354 {
5355 if (dump_enabled_p ())
5356 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5357 "negative step but alignment required.\n");
5358 return false;
5359 }
5360 if (dt != vect_constant_def
5361 && dt != vect_external_def
5362 && !perm_mask_for_reverse (vectype))
5363 {
5364 if (dump_enabled_p ())
5365 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5366 "negative step and reversing not supported.\n");
5367 return false;
5368 }
09dfa495
BM
5369 }
5370 }
5371
0d0293ac 5372 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
ebfd146a 5373 {
0d0293ac 5374 grouped_store = true;
e14c1050 5375 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
cee62fee
MM
5376 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5377 if (!slp
5378 && !PURE_SLP_STMT (stmt_info)
5379 && !STMT_VINFO_STRIDED_P (stmt_info))
b602d918 5380 {
272c6793
RS
5381 if (vect_store_lanes_supported (vectype, group_size))
5382 store_lanes_p = true;
0d0293ac 5383 else if (!vect_grouped_store_supported (vectype, group_size))
b602d918
RS
5384 return false;
5385 }
b8698a0f 5386
cee62fee
MM
5387 if (STMT_VINFO_STRIDED_P (stmt_info)
5388 && (slp || PURE_SLP_STMT (stmt_info))
5389 && (group_size > nunits
5390 || nunits % group_size != 0))
5391 {
5392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5393 "unhandled strided group store\n");
5394 return false;
5395 }
5396
ebfd146a
IR
5397 if (first_stmt == stmt)
5398 {
5399 /* STMT is the leader of the group. Check the operands of all the
5400 stmts of the group. */
e14c1050 5401 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
ebfd146a
IR
5402 while (next_stmt)
5403 {
5404 gcc_assert (gimple_assign_single_p (next_stmt));
5405 op = gimple_assign_rhs1 (next_stmt);
81c40241 5406 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
ebfd146a 5407 {
73fbfcad 5408 if (dump_enabled_p ())
78c60e3d 5409 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 5410 "use not simple.\n");
ebfd146a
IR
5411 return false;
5412 }
e14c1050 5413 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
5414 }
5415 }
5416 }
5417
3bab6342
AT
5418 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5419 {
355fe088 5420 gimple *def_stmt;
3bab6342
AT
5421 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5422 &scatter_off, &scatter_scale);
5423 gcc_assert (scatter_decl);
81c40241
RB
5424 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5425 &scatter_off_vectype))
3bab6342
AT
5426 {
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5429 "scatter index use not simple.");
5430 return false;
5431 }
5432 }
5433
ebfd146a
IR
5434 if (!vec_stmt) /* transformation not required. */
5435 {
5436 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
2e8ab70c
RB
5437 /* The SLP costs are calculated during SLP analysis. */
5438 if (!PURE_SLP_STMT (stmt_info))
5439 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5440 NULL, NULL, NULL);
ebfd146a
IR
5441 return true;
5442 }
5443
5444 /** Transform. **/
5445
c716e67f
XDL
5446 ensure_base_align (stmt_info, dr);
5447
3bab6342
AT
5448 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5449 {
5450 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5451 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5452 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5453 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5454 edge pe = loop_preheader_edge (loop);
5455 gimple_seq seq;
5456 basic_block new_bb;
5457 enum { NARROW, NONE, WIDEN } modifier;
5458 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5459
5460 if (nunits == (unsigned int) scatter_off_nunits)
5461 modifier = NONE;
5462 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5463 {
5464 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5465 modifier = WIDEN;
5466
5467 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5468 sel[i] = i | nunits;
5469
5470 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5471 gcc_assert (perm_mask != NULL_TREE);
5472 }
5473 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5474 {
5475 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5476 modifier = NARROW;
5477
5478 for (i = 0; i < (unsigned int) nunits; ++i)
5479 sel[i] = i | scatter_off_nunits;
5480
5481 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5482 gcc_assert (perm_mask != NULL_TREE);
5483 ncopies *= 2;
5484 }
5485 else
5486 gcc_unreachable ();
5487
5488 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5489 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5490 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5491 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5492 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5493 scaletype = TREE_VALUE (arglist);
5494
5495 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5496 && TREE_CODE (rettype) == VOID_TYPE);
5497
5498 ptr = fold_convert (ptrtype, scatter_base);
5499 if (!is_gimple_min_invariant (ptr))
5500 {
5501 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5502 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5503 gcc_assert (!new_bb);
5504 }
5505
5506 /* Currently we support only unconditional scatter stores,
5507 so mask should be all ones. */
5508 mask = build_int_cst (masktype, -1);
5509 mask = vect_init_vector (stmt, mask, masktype, NULL);
5510
5511 scale = build_int_cst (scaletype, scatter_scale);
5512
5513 prev_stmt_info = NULL;
5514 for (j = 0; j < ncopies; ++j)
5515 {
5516 if (j == 0)
5517 {
5518 src = vec_oprnd1
81c40241 5519 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
3bab6342 5520 op = vec_oprnd0
81c40241 5521 = vect_get_vec_def_for_operand (scatter_off, stmt);
3bab6342
AT
5522 }
5523 else if (modifier != NONE && (j & 1))
5524 {
5525 if (modifier == WIDEN)
5526 {
5527 src = vec_oprnd1
5528 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5529 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5530 stmt, gsi);
5531 }
5532 else if (modifier == NARROW)
5533 {
5534 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5535 stmt, gsi);
5536 op = vec_oprnd0
5537 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5538 }
5539 else
5540 gcc_unreachable ();
5541 }
5542 else
5543 {
5544 src = vec_oprnd1
5545 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5546 op = vec_oprnd0
5547 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5548 }
5549
5550 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5551 {
5552 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5553 == TYPE_VECTOR_SUBPARTS (srctype));
0e22bb5a 5554 var = vect_get_new_ssa_name (srctype, vect_simple_var);
3bab6342
AT
5555 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5556 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5557 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5558 src = var;
5559 }
5560
5561 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5562 {
5563 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5564 == TYPE_VECTOR_SUBPARTS (idxtype));
0e22bb5a 5565 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
3bab6342
AT
5566 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5567 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5568 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5569 op = var;
5570 }
5571
5572 new_stmt
5573 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5574
5575 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5576
5577 if (prev_stmt_info == NULL)
5578 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5579 else
5580 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5581 prev_stmt_info = vinfo_for_stmt (new_stmt);
5582 }
5583 return true;
5584 }
5585
0d0293ac 5586 if (grouped_store)
ebfd146a
IR
5587 {
5588 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
e14c1050 5589 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
ebfd146a 5590
e14c1050 5591 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
ebfd146a
IR
5592
5593 /* FORNOW */
a70d6342 5594 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
5595
5596 /* We vectorize all the stmts of the interleaving group when we
5597 reach the last stmt in the group. */
e14c1050
IR
5598 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5599 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
ebfd146a
IR
5600 && !slp)
5601 {
5602 *vec_stmt = NULL;
5603 return true;
5604 }
5605
5606 if (slp)
4b5caab7 5607 {
0d0293ac 5608 grouped_store = false;
4b5caab7
IR
5609 /* VEC_NUM is the number of vect stmts to be created for this
5610 group. */
5611 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9771b263 5612 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
52eab378 5613 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
4b5caab7 5614 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
d092494c 5615 op = gimple_assign_rhs1 (first_stmt);
4b5caab7 5616 }
ebfd146a 5617 else
4b5caab7
IR
5618 /* VEC_NUM is the number of vect stmts to be created for this
5619 group. */
ebfd146a
IR
5620 vec_num = group_size;
5621 }
b8698a0f 5622 else
ebfd146a
IR
5623 {
5624 first_stmt = stmt;
5625 first_dr = dr;
5626 group_size = vec_num = 1;
ebfd146a 5627 }
b8698a0f 5628
73fbfcad 5629 if (dump_enabled_p ())
78c60e3d 5630 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 5631 "transform store. ncopies = %d\n", ncopies);
ebfd146a 5632
f2e2a985
MM
5633 if (STMT_VINFO_STRIDED_P (stmt_info))
5634 {
5635 gimple_stmt_iterator incr_gsi;
5636 bool insert_after;
355fe088 5637 gimple *incr;
f2e2a985
MM
5638 tree offvar;
5639 tree ivstep;
5640 tree running_off;
5641 gimple_seq stmts = NULL;
5642 tree stride_base, stride_step, alias_off;
5643 tree vec_oprnd;
f502d50e 5644 unsigned int g;
f2e2a985
MM
5645
5646 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5647
5648 stride_base
5649 = fold_build_pointer_plus
f502d50e 5650 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
f2e2a985 5651 size_binop (PLUS_EXPR,
f502d50e
MM
5652 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5653 convert_to_ptrofftype (DR_INIT(first_dr))));
5654 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
f2e2a985
MM
5655
5656 /* For a store with loop-invariant (but other than power-of-2)
5657 stride (i.e. not a grouped access) like so:
5658
5659 for (i = 0; i < n; i += stride)
5660 array[i] = ...;
5661
5662 we generate a new induction variable and new stores from
5663 the components of the (vectorized) rhs:
5664
5665 for (j = 0; ; j += VF*stride)
5666 vectemp = ...;
5667 tmp1 = vectemp[0];
5668 array[j] = tmp1;
5669 tmp2 = vectemp[1];
5670 array[j + stride] = tmp2;
5671 ...
5672 */
5673
cee62fee
MM
5674 unsigned nstores = nunits;
5675 tree ltype = elem_type;
5676 if (slp)
5677 {
5678 nstores = nunits / group_size;
5679 if (group_size < nunits)
5680 ltype = build_vector_type (elem_type, group_size);
5681 else
5682 ltype = vectype;
5683 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5684 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
f502d50e 5685 group_size = 1;
cee62fee
MM
5686 }
5687
f2e2a985
MM
5688 ivstep = stride_step;
5689 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5690 build_int_cst (TREE_TYPE (ivstep),
cee62fee 5691 ncopies * nstores));
f2e2a985
MM
5692
5693 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5694
5695 create_iv (stride_base, ivstep, NULL,
5696 loop, &incr_gsi, insert_after,
5697 &offvar, NULL);
5698 incr = gsi_stmt (incr_gsi);
310213d4 5699 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
f2e2a985
MM
5700
5701 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5702 if (stmts)
5703 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5704
5705 prev_stmt_info = NULL;
f502d50e
MM
5706 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5707 next_stmt = first_stmt;
5708 for (g = 0; g < group_size; g++)
f2e2a985 5709 {
f502d50e
MM
5710 running_off = offvar;
5711 if (g)
f2e2a985 5712 {
f502d50e
MM
5713 tree size = TYPE_SIZE_UNIT (ltype);
5714 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
f2e2a985 5715 size);
f502d50e 5716 tree newoff = copy_ssa_name (running_off, NULL);
f2e2a985 5717 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
f502d50e 5718 running_off, pos);
f2e2a985 5719 vect_finish_stmt_generation (stmt, incr, gsi);
f2e2a985 5720 running_off = newoff;
f502d50e
MM
5721 }
5722 for (j = 0; j < ncopies; j++)
5723 {
5724 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5725 and first_stmt == stmt. */
5726 if (j == 0)
5727 {
5728 if (slp)
5729 {
5730 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5731 slp_node, -1);
5732 vec_oprnd = vec_oprnds[0];
5733 }
5734 else
5735 {
5736 gcc_assert (gimple_assign_single_p (next_stmt));
5737 op = gimple_assign_rhs1 (next_stmt);
81c40241 5738 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
f502d50e
MM
5739 }
5740 }
f2e2a985 5741 else
f502d50e
MM
5742 {
5743 if (slp)
5744 vec_oprnd = vec_oprnds[j];
5745 else
c079cbac 5746 {
81c40241 5747 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
c079cbac
RB
5748 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5749 }
f502d50e
MM
5750 }
5751
5752 for (i = 0; i < nstores; i++)
5753 {
5754 tree newref, newoff;
355fe088 5755 gimple *incr, *assign;
f502d50e
MM
5756 tree size = TYPE_SIZE (ltype);
5757 /* Extract the i'th component. */
5758 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5759 bitsize_int (i), size);
5760 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5761 size, pos);
5762
5763 elem = force_gimple_operand_gsi (gsi, elem, true,
5764 NULL_TREE, true,
5765 GSI_SAME_STMT);
5766
5767 newref = build2 (MEM_REF, ltype,
5768 running_off, alias_off);
5769
5770 /* And store it to *running_off. */
5771 assign = gimple_build_assign (newref, elem);
5772 vect_finish_stmt_generation (stmt, assign, gsi);
5773
5774 newoff = copy_ssa_name (running_off, NULL);
5775 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5776 running_off, stride_step);
5777 vect_finish_stmt_generation (stmt, incr, gsi);
5778
5779 running_off = newoff;
225ce44b
RB
5780 if (g == group_size - 1
5781 && !slp)
f502d50e
MM
5782 {
5783 if (j == 0 && i == 0)
225ce44b
RB
5784 STMT_VINFO_VEC_STMT (stmt_info)
5785 = *vec_stmt = assign;
f502d50e
MM
5786 else
5787 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5788 prev_stmt_info = vinfo_for_stmt (assign);
5789 }
5790 }
f2e2a985 5791 }
f502d50e 5792 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
f2e2a985
MM
5793 }
5794 return true;
5795 }
5796
9771b263
DN
5797 dr_chain.create (group_size);
5798 oprnds.create (group_size);
ebfd146a 5799
720f5239 5800 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 5801 gcc_assert (alignment_support_scheme);
272c6793
RS
5802 /* Targets with store-lane instructions must not require explicit
5803 realignment. */
5804 gcc_assert (!store_lanes_p
5805 || alignment_support_scheme == dr_aligned
5806 || alignment_support_scheme == dr_unaligned_supported);
5807
09dfa495
BM
5808 if (negative)
5809 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5810
272c6793
RS
5811 if (store_lanes_p)
5812 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5813 else
5814 aggr_type = vectype;
ebfd146a
IR
5815
5816 /* In case the vectorization factor (VF) is bigger than the number
5817 of elements that we can fit in a vectype (nunits), we have to generate
5818 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 5819 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
5820 vect_get_vec_def_for_copy_stmt. */
5821
0d0293ac 5822 /* In case of interleaving (non-unit grouped access):
ebfd146a
IR
5823
5824 S1: &base + 2 = x2
5825 S2: &base = x0
5826 S3: &base + 1 = x1
5827 S4: &base + 3 = x3
5828
5829 We create vectorized stores starting from base address (the access of the
5830 first stmt in the chain (S2 in the above example), when the last store stmt
5831 of the chain (S4) is reached:
5832
5833 VS1: &base = vx2
5834 VS2: &base + vec_size*1 = vx0
5835 VS3: &base + vec_size*2 = vx1
5836 VS4: &base + vec_size*3 = vx3
5837
5838 Then permutation statements are generated:
5839
3fcc1b55
JJ
5840 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5841 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
ebfd146a 5842 ...
b8698a0f 5843
ebfd146a
IR
5844 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5845 (the order of the data-refs in the output of vect_permute_store_chain
5846 corresponds to the order of scalar stmts in the interleaving chain - see
5847 the documentation of vect_permute_store_chain()).
5848
5849 In case of both multiple types and interleaving, above vector stores and
ff802fa1 5850 permutation stmts are created for every copy. The result vector stmts are
ebfd146a 5851 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 5852 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
5853 */
5854
5855 prev_stmt_info = NULL;
5856 for (j = 0; j < ncopies; j++)
5857 {
ebfd146a
IR
5858
5859 if (j == 0)
5860 {
5861 if (slp)
5862 {
5863 /* Get vectorized arguments for SLP_NODE. */
d092494c
IR
5864 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5865 NULL, slp_node, -1);
ebfd146a 5866
9771b263 5867 vec_oprnd = vec_oprnds[0];
ebfd146a
IR
5868 }
5869 else
5870 {
b8698a0f
L
5871 /* For interleaved stores we collect vectorized defs for all the
5872 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5873 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
5874 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5875
0d0293ac 5876 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
ebfd146a 5877 OPRNDS are of size 1. */
b8698a0f 5878 next_stmt = first_stmt;
ebfd146a
IR
5879 for (i = 0; i < group_size; i++)
5880 {
b8698a0f
L
5881 /* Since gaps are not supported for interleaved stores,
5882 GROUP_SIZE is the exact number of stmts in the chain.
5883 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5884 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
5885 iteration of the loop will be executed. */
5886 gcc_assert (next_stmt
5887 && gimple_assign_single_p (next_stmt));
5888 op = gimple_assign_rhs1 (next_stmt);
5889
81c40241 5890 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
9771b263
DN
5891 dr_chain.quick_push (vec_oprnd);
5892 oprnds.quick_push (vec_oprnd);
e14c1050 5893 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
5894 }
5895 }
5896
5897 /* We should have catched mismatched types earlier. */
5898 gcc_assert (useless_type_conversion_p (vectype,
5899 TREE_TYPE (vec_oprnd)));
74bf76ed
JJ
5900 bool simd_lane_access_p
5901 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5902 if (simd_lane_access_p
5903 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5904 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5905 && integer_zerop (DR_OFFSET (first_dr))
5906 && integer_zerop (DR_INIT (first_dr))
5907 && alias_sets_conflict_p (get_alias_set (aggr_type),
5908 get_alias_set (DR_REF (first_dr))))
5909 {
5910 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5911 dataref_offset = build_int_cst (reference_alias_ptr_type
5912 (DR_REF (first_dr)), 0);
8928eff3 5913 inv_p = false;
74bf76ed
JJ
5914 }
5915 else
5916 dataref_ptr
5917 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5918 simd_lane_access_p ? loop : NULL,
09dfa495 5919 offset, &dummy, gsi, &ptr_incr,
74bf76ed 5920 simd_lane_access_p, &inv_p);
a70d6342 5921 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 5922 }
b8698a0f 5923 else
ebfd146a 5924 {
b8698a0f
L
5925 /* For interleaved stores we created vectorized defs for all the
5926 defs stored in OPRNDS in the previous iteration (previous copy).
5927 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
5928 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5929 next copy.
0d0293ac 5930 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
ebfd146a
IR
5931 OPRNDS are of size 1. */
5932 for (i = 0; i < group_size; i++)
5933 {
9771b263 5934 op = oprnds[i];
81c40241 5935 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
b8698a0f 5936 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
9771b263
DN
5937 dr_chain[i] = vec_oprnd;
5938 oprnds[i] = vec_oprnd;
ebfd146a 5939 }
74bf76ed
JJ
5940 if (dataref_offset)
5941 dataref_offset
5942 = int_const_binop (PLUS_EXPR, dataref_offset,
5943 TYPE_SIZE_UNIT (aggr_type));
5944 else
5945 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5946 TYPE_SIZE_UNIT (aggr_type));
ebfd146a
IR
5947 }
5948
272c6793 5949 if (store_lanes_p)
ebfd146a 5950 {
272c6793 5951 tree vec_array;
267d3070 5952
272c6793
RS
5953 /* Combine all the vectors into an array. */
5954 vec_array = create_vector_array (vectype, vec_num);
5955 for (i = 0; i < vec_num; i++)
c2d7ab2a 5956 {
9771b263 5957 vec_oprnd = dr_chain[i];
272c6793 5958 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
267d3070 5959 }
b8698a0f 5960
272c6793
RS
5961 /* Emit:
5962 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5963 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5964 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5965 gimple_call_set_lhs (new_stmt, data_ref);
267d3070 5966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
5967 }
5968 else
5969 {
5970 new_stmt = NULL;
0d0293ac 5971 if (grouped_store)
272c6793 5972 {
b6b9227d
JJ
5973 if (j == 0)
5974 result_chain.create (group_size);
272c6793
RS
5975 /* Permute. */
5976 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5977 &result_chain);
5978 }
c2d7ab2a 5979
272c6793
RS
5980 next_stmt = first_stmt;
5981 for (i = 0; i < vec_num; i++)
5982 {
644ffefd 5983 unsigned align, misalign;
272c6793
RS
5984
5985 if (i > 0)
5986 /* Bump the vector pointer. */
5987 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5988 stmt, NULL_TREE);
5989
5990 if (slp)
9771b263 5991 vec_oprnd = vec_oprnds[i];
0d0293ac
MM
5992 else if (grouped_store)
5993 /* For grouped stores vectorized defs are interleaved in
272c6793 5994 vect_permute_store_chain(). */
9771b263 5995 vec_oprnd = result_chain[i];
272c6793 5996
aed93b23
RB
5997 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5998 dataref_ptr,
5999 dataref_offset
6000 ? dataref_offset
6001 : build_int_cst (reference_alias_ptr_type
6002 (DR_REF (first_dr)), 0));
644ffefd 6003 align = TYPE_ALIGN_UNIT (vectype);
272c6793 6004 if (aligned_access_p (first_dr))
644ffefd 6005 misalign = 0;
272c6793
RS
6006 else if (DR_MISALIGNMENT (first_dr) == -1)
6007 {
52639a61
RB
6008 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6009 align = TYPE_ALIGN_UNIT (elem_type);
6010 else
6011 align = get_object_alignment (DR_REF (first_dr))
6012 / BITS_PER_UNIT;
6013 misalign = 0;
272c6793
RS
6014 TREE_TYPE (data_ref)
6015 = build_aligned_type (TREE_TYPE (data_ref),
52639a61 6016 align * BITS_PER_UNIT);
272c6793
RS
6017 }
6018 else
6019 {
6020 TREE_TYPE (data_ref)
6021 = build_aligned_type (TREE_TYPE (data_ref),
6022 TYPE_ALIGN (elem_type));
644ffefd 6023 misalign = DR_MISALIGNMENT (first_dr);
272c6793 6024 }
aed93b23
RB
6025 if (dataref_offset == NULL_TREE
6026 && TREE_CODE (dataref_ptr) == SSA_NAME)
74bf76ed
JJ
6027 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6028 misalign);
c2d7ab2a 6029
f234d260
BM
6030 if (negative
6031 && dt != vect_constant_def
6032 && dt != vect_external_def)
09dfa495
BM
6033 {
6034 tree perm_mask = perm_mask_for_reverse (vectype);
6035 tree perm_dest
6036 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6037 vectype);
b731b390 6038 tree new_temp = make_ssa_name (perm_dest);
09dfa495
BM
6039
6040 /* Generate the permute statement. */
355fe088 6041 gimple *perm_stmt
0d0e4a03
JJ
6042 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6043 vec_oprnd, perm_mask);
09dfa495
BM
6044 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6045
6046 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6047 vec_oprnd = new_temp;
6048 }
6049
272c6793
RS
6050 /* Arguments are ready. Create the new vector stmt. */
6051 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6052 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793
RS
6053
6054 if (slp)
6055 continue;
6056
e14c1050 6057 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
272c6793
RS
6058 if (!next_stmt)
6059 break;
6060 }
ebfd146a 6061 }
1da0876c
RS
6062 if (!slp)
6063 {
6064 if (j == 0)
6065 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6066 else
6067 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6068 prev_stmt_info = vinfo_for_stmt (new_stmt);
6069 }
ebfd146a
IR
6070 }
6071
9771b263
DN
6072 dr_chain.release ();
6073 oprnds.release ();
6074 result_chain.release ();
6075 vec_oprnds.release ();
ebfd146a
IR
6076
6077 return true;
6078}
6079
557be5a8
AL
6080/* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6081 VECTOR_CST mask. No checks are made that the target platform supports the
6082 mask, so callers may wish to test can_vec_perm_p separately, or use
6083 vect_gen_perm_mask_checked. */
a1e53f3f 6084
3fcc1b55 6085tree
557be5a8 6086vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
a1e53f3f 6087{
d2a12ae7 6088 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
2635892a 6089 int i, nunits;
a1e53f3f 6090
22e4dee7 6091 nunits = TYPE_VECTOR_SUBPARTS (vectype);
22e4dee7 6092
96f9265a
RG
6093 mask_elt_type = lang_hooks.types.type_for_mode
6094 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
22e4dee7 6095 mask_type = get_vectype_for_scalar_type (mask_elt_type);
a1e53f3f 6096
d2a12ae7 6097 mask_elts = XALLOCAVEC (tree, nunits);
aec7ae7d 6098 for (i = nunits - 1; i >= 0; i--)
d2a12ae7
RG
6099 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
6100 mask_vec = build_vector (mask_type, mask_elts);
a1e53f3f 6101
2635892a 6102 return mask_vec;
a1e53f3f
L
6103}
6104
cf7aa6a3
AL
6105/* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6106 i.e. that the target supports the pattern _for arbitrary input vectors_. */
557be5a8
AL
6107
6108tree
6109vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
6110{
6111 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
6112 return vect_gen_perm_mask_any (vectype, sel);
6113}
6114
aec7ae7d
JJ
6115/* Given a vector variable X and Y, that was generated for the scalar
6116 STMT, generate instructions to permute the vector elements of X and Y
6117 using permutation mask MASK_VEC, insert them at *GSI and return the
6118 permuted vector variable. */
a1e53f3f
L
6119
6120static tree
355fe088 6121permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
aec7ae7d 6122 gimple_stmt_iterator *gsi)
a1e53f3f
L
6123{
6124 tree vectype = TREE_TYPE (x);
aec7ae7d 6125 tree perm_dest, data_ref;
355fe088 6126 gimple *perm_stmt;
a1e53f3f 6127
acdcd61b 6128 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
b731b390 6129 data_ref = make_ssa_name (perm_dest);
a1e53f3f
L
6130
6131 /* Generate the permute statement. */
0d0e4a03 6132 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
a1e53f3f
L
6133 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6134
6135 return data_ref;
6136}
6137
6b916b36
RB
6138/* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6139 inserting them on the loops preheader edge. Returns true if we
6140 were successful in doing so (and thus STMT can be moved then),
6141 otherwise returns false. */
6142
6143static bool
355fe088 6144hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6b916b36
RB
6145{
6146 ssa_op_iter i;
6147 tree op;
6148 bool any = false;
6149
6150 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6151 {
355fe088 6152 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6b916b36
RB
6153 if (!gimple_nop_p (def_stmt)
6154 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6155 {
6156 /* Make sure we don't need to recurse. While we could do
6157 so in simple cases when there are more complex use webs
6158 we don't have an easy way to preserve stmt order to fulfil
6159 dependencies within them. */
6160 tree op2;
6161 ssa_op_iter i2;
d1417442
JJ
6162 if (gimple_code (def_stmt) == GIMPLE_PHI)
6163 return false;
6b916b36
RB
6164 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6165 {
355fe088 6166 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6b916b36
RB
6167 if (!gimple_nop_p (def_stmt2)
6168 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6169 return false;
6170 }
6171 any = true;
6172 }
6173 }
6174
6175 if (!any)
6176 return true;
6177
6178 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6179 {
355fe088 6180 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6b916b36
RB
6181 if (!gimple_nop_p (def_stmt)
6182 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6183 {
6184 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6185 gsi_remove (&gsi, false);
6186 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6187 }
6188 }
6189
6190 return true;
6191}
6192
ebfd146a
IR
6193/* vectorizable_load.
6194
b8698a0f
L
6195 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6196 can be vectorized.
6197 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
6198 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6199 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6200
6201static bool
355fe088 6202vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
c716e67f 6203 slp_tree slp_node, slp_instance slp_node_instance)
ebfd146a
IR
6204{
6205 tree scalar_dest;
6206 tree vec_dest = NULL;
6207 tree data_ref = NULL;
6208 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 6209 stmt_vec_info prev_stmt_info;
ebfd146a 6210 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 6211 struct loop *loop = NULL;
ebfd146a 6212 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 6213 bool nested_in_vect_loop = false;
c716e67f 6214 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
272c6793 6215 tree elem_type;
ebfd146a 6216 tree new_temp;
ef4bddc2 6217 machine_mode mode;
355fe088 6218 gimple *new_stmt = NULL;
ebfd146a
IR
6219 tree dummy;
6220 enum dr_alignment_support alignment_support_scheme;
6221 tree dataref_ptr = NULL_TREE;
74bf76ed 6222 tree dataref_offset = NULL_TREE;
355fe088 6223 gimple *ptr_incr = NULL;
ebfd146a 6224 int ncopies;
9b999e8c 6225 int i, j, group_size = -1, group_gap_adj;
ebfd146a
IR
6226 tree msq = NULL_TREE, lsq;
6227 tree offset = NULL_TREE;
356bbc4c 6228 tree byte_offset = NULL_TREE;
ebfd146a 6229 tree realignment_token = NULL_TREE;
538dd0b7 6230 gphi *phi = NULL;
6e1aa848 6231 vec<tree> dr_chain = vNULL;
0d0293ac 6232 bool grouped_load = false;
272c6793 6233 bool load_lanes_p = false;
355fe088 6234 gimple *first_stmt;
4f0a0218 6235 gimple *first_stmt_for_drptr = NULL;
ebfd146a 6236 bool inv_p;
319e6439 6237 bool negative = false;
ebfd146a
IR
6238 bool compute_in_loop = false;
6239 struct loop *at_loop;
6240 int vec_num;
6241 bool slp = (slp_node != NULL);
6242 bool slp_perm = false;
6243 enum tree_code code;
a70d6342
IR
6244 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6245 int vf;
272c6793 6246 tree aggr_type;
aec7ae7d
JJ
6247 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6248 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6249 int gather_scale = 1;
6250 enum vect_def_type gather_dt = vect_unknown_def_type;
310213d4 6251 vec_info *vinfo = stmt_info->vinfo;
a70d6342 6252
465c8c19
JJ
6253 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6254 return false;
6255
66c16fd9
RB
6256 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6257 && ! vec_stmt)
465c8c19
JJ
6258 return false;
6259
6260 /* Is vectorizable load? */
6261 if (!is_gimple_assign (stmt))
6262 return false;
6263
6264 scalar_dest = gimple_assign_lhs (stmt);
6265 if (TREE_CODE (scalar_dest) != SSA_NAME)
6266 return false;
6267
6268 code = gimple_assign_rhs_code (stmt);
6269 if (code != ARRAY_REF
6270 && code != BIT_FIELD_REF
6271 && code != INDIRECT_REF
6272 && code != COMPONENT_REF
6273 && code != IMAGPART_EXPR
6274 && code != REALPART_EXPR
6275 && code != MEM_REF
6276 && TREE_CODE_CLASS (code) != tcc_declaration)
6277 return false;
6278
6279 if (!STMT_VINFO_DATA_REF (stmt_info))
6280 return false;
6281
6282 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6283 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6284
a70d6342
IR
6285 if (loop_vinfo)
6286 {
6287 loop = LOOP_VINFO_LOOP (loop_vinfo);
6288 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6289 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6290 }
6291 else
3533e503 6292 vf = 1;
ebfd146a
IR
6293
6294 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 6295 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a 6296 case of SLP. */
437f4a00 6297 if (slp || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
6298 ncopies = 1;
6299 else
6300 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6301
6302 gcc_assert (ncopies >= 1);
6303
6304 /* FORNOW. This restriction should be relaxed. */
6305 if (nested_in_vect_loop && ncopies > 1)
6306 {
73fbfcad 6307 if (dump_enabled_p ())
78c60e3d 6308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6309 "multiple types in nested loop.\n");
ebfd146a
IR
6310 return false;
6311 }
6312
f2556b68
RB
6313 /* Invalidate assumptions made by dependence analysis when vectorization
6314 on the unrolled body effectively re-orders stmts. */
6315 if (ncopies > 1
6316 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6317 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6318 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6319 {
6320 if (dump_enabled_p ())
6321 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6322 "cannot perform implicit CSE when unrolling "
6323 "with negative dependence distance\n");
6324 return false;
6325 }
6326
7b7b1813 6327 elem_type = TREE_TYPE (vectype);
947131ba 6328 mode = TYPE_MODE (vectype);
ebfd146a
IR
6329
6330 /* FORNOW. In some cases can vectorize even if data-type not supported
6331 (e.g. - data copies). */
947131ba 6332 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
ebfd146a 6333 {
73fbfcad 6334 if (dump_enabled_p ())
78c60e3d 6335 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6336 "Aligned load, but unsupported type.\n");
ebfd146a
IR
6337 return false;
6338 }
6339
ebfd146a 6340 /* Check if the load is a part of an interleaving chain. */
0d0293ac 6341 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
ebfd146a 6342 {
0d0293ac 6343 grouped_load = true;
ebfd146a 6344 /* FORNOW */
3bab6342 6345 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
ebfd146a 6346
e14c1050 6347 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
d5f035ea
RB
6348
6349 /* If this is single-element interleaving with an element distance
6350 that leaves unused vector loads around punt - we at least create
6351 very sub-optimal code in that case (and blow up memory,
6352 see PR65518). */
72c0f643 6353 bool force_peeling = false;
d5f035ea 6354 if (first_stmt == stmt
72c0f643
RB
6355 && !GROUP_NEXT_ELEMENT (stmt_info))
6356 {
6357 if (GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6358 {
6359 if (dump_enabled_p ())
6360 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6361 "single-element interleaving not supported "
6362 "for not adjacent vector loads\n");
6363 return false;
6364 }
6365
6366 /* Single-element interleaving requires peeling for gaps. */
6367 force_peeling = true;
6368 }
6369
6370 /* If there is a gap in the end of the group or the group size cannot
6371 be made a multiple of the vector element count then we access excess
6372 elements in the last iteration and thus need to peel that off. */
6373 if (loop_vinfo
6374 && ! STMT_VINFO_STRIDED_P (stmt_info)
6375 && (force_peeling
6376 || GROUP_GAP (vinfo_for_stmt (first_stmt)) != 0
6377 || (!slp && vf % GROUP_SIZE (vinfo_for_stmt (first_stmt)) != 0)))
d5f035ea
RB
6378 {
6379 if (dump_enabled_p ())
6380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
72c0f643
RB
6381 "Data access with gaps requires scalar "
6382 "epilogue loop\n");
6383 if (loop->inner)
6384 {
6385 if (dump_enabled_p ())
6386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6387 "Peeling for outer loop is not supported\n");
6388 return false;
6389 }
6390
6391 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
d5f035ea
RB
6392 }
6393
b1af7da6
RB
6394 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6395 slp_perm = true;
6396
7b5fc413 6397 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
47d3fdb2
RB
6398
6399 /* ??? The following is overly pessimistic (as well as the loop
6400 case above) in the case we can statically determine the excess
6401 elements loaded are within the bounds of a decl that is accessed.
6402 Likewise for BB vectorizations using masked loads is a possibility. */
6403 if (bb_vinfo && slp_perm && group_size % nunits != 0)
6404 {
6405 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6406 "BB vectorization with gaps at the end of a load "
6407 "is not supported\n");
6408 return false;
6409 }
6410
7b5fc413
RB
6411 if (!slp
6412 && !PURE_SLP_STMT (stmt_info)
f2e2a985 6413 && !STMT_VINFO_STRIDED_P (stmt_info))
b602d918 6414 {
272c6793
RS
6415 if (vect_load_lanes_supported (vectype, group_size))
6416 load_lanes_p = true;
0d0293ac 6417 else if (!vect_grouped_load_supported (vectype, group_size))
b602d918
RS
6418 return false;
6419 }
f2556b68
RB
6420
6421 /* Invalidate assumptions made by dependence analysis when vectorization
6422 on the unrolled body effectively re-orders stmts. */
6423 if (!PURE_SLP_STMT (stmt_info)
6424 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6425 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6426 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6427 {
6428 if (dump_enabled_p ())
6429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6430 "cannot perform implicit CSE when performing "
6431 "group loads with negative dependence distance\n");
6432 return false;
6433 }
96bb56b2
RB
6434
6435 /* Similarly when the stmt is a load that is both part of a SLP
6436 instance and a loop vectorized stmt via the same-dr mechanism
6437 we have to give up. */
6438 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6439 && (STMT_SLP_TYPE (stmt_info)
6440 != STMT_SLP_TYPE (vinfo_for_stmt
6441 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6442 {
6443 if (dump_enabled_p ())
6444 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6445 "conflicting SLP types for CSEd load\n");
6446 return false;
6447 }
ebfd146a
IR
6448 }
6449
a1e53f3f 6450
3bab6342 6451 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
aec7ae7d 6452 {
355fe088 6453 gimple *def_stmt;
3bab6342
AT
6454 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6455 &gather_off, &gather_scale);
aec7ae7d 6456 gcc_assert (gather_decl);
81c40241
RB
6457 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6458 &gather_off_vectype))
aec7ae7d 6459 {
73fbfcad 6460 if (dump_enabled_p ())
78c60e3d 6461 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6462 "gather index use not simple.\n");
aec7ae7d
JJ
6463 return false;
6464 }
6465 }
f2e2a985 6466 else if (STMT_VINFO_STRIDED_P (stmt_info))
7b5fc413
RB
6467 {
6468 if ((grouped_load
6469 && (slp || PURE_SLP_STMT (stmt_info)))
6470 && (group_size > nunits
b266b968 6471 || nunits % group_size != 0))
7b5fc413
RB
6472 {
6473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6474 "unhandled strided group load\n");
6475 return false;
6476 }
6477 }
319e6439
RG
6478 else
6479 {
6480 negative = tree_int_cst_compare (nested_in_vect_loop
6481 ? STMT_VINFO_DR_STEP (stmt_info)
6482 : DR_STEP (dr),
6483 size_zero_node) < 0;
6484 if (negative && ncopies > 1)
6485 {
73fbfcad 6486 if (dump_enabled_p ())
78c60e3d 6487 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6488 "multiple types with negative step.\n");
319e6439
RG
6489 return false;
6490 }
6491
6492 if (negative)
6493 {
08940f33
RB
6494 if (grouped_load)
6495 {
6496 if (dump_enabled_p ())
6497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
6498 "negative step for group load not supported"
6499 "\n");
08940f33
RB
6500 return false;
6501 }
319e6439
RG
6502 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6503 if (alignment_support_scheme != dr_aligned
6504 && alignment_support_scheme != dr_unaligned_supported)
6505 {
73fbfcad 6506 if (dump_enabled_p ())
78c60e3d 6507 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 6508 "negative step but alignment required.\n");
319e6439
RG
6509 return false;
6510 }
6511 if (!perm_mask_for_reverse (vectype))
6512 {
73fbfcad 6513 if (dump_enabled_p ())
78c60e3d 6514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
6515 "negative step and reversing not supported."
6516 "\n");
319e6439
RG
6517 return false;
6518 }
6519 }
7d75abc8 6520 }
aec7ae7d 6521
ebfd146a
IR
6522 if (!vec_stmt) /* transformation not required. */
6523 {
6524 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
2e8ab70c
RB
6525 /* The SLP costs are calculated during SLP analysis. */
6526 if (!PURE_SLP_STMT (stmt_info))
6527 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6528 NULL, NULL, NULL);
ebfd146a
IR
6529 return true;
6530 }
6531
73fbfcad 6532 if (dump_enabled_p ())
78c60e3d 6533 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 6534 "transform load. ncopies = %d\n", ncopies);
ebfd146a
IR
6535
6536 /** Transform. **/
6537
c716e67f
XDL
6538 ensure_base_align (stmt_info, dr);
6539
3bab6342 6540 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
aec7ae7d
JJ
6541 {
6542 tree vec_oprnd0 = NULL_TREE, op;
6543 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6544 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
d3c2fee0 6545 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
aec7ae7d
JJ
6546 edge pe = loop_preheader_edge (loop);
6547 gimple_seq seq;
6548 basic_block new_bb;
6549 enum { NARROW, NONE, WIDEN } modifier;
6550 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6551
6552 if (nunits == gather_off_nunits)
6553 modifier = NONE;
6554 else if (nunits == gather_off_nunits / 2)
6555 {
6556 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6557 modifier = WIDEN;
6558
6559 for (i = 0; i < gather_off_nunits; ++i)
6560 sel[i] = i | nunits;
6561
557be5a8 6562 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
aec7ae7d
JJ
6563 }
6564 else if (nunits == gather_off_nunits * 2)
6565 {
6566 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6567 modifier = NARROW;
6568
6569 for (i = 0; i < nunits; ++i)
6570 sel[i] = i < gather_off_nunits
6571 ? i : i + nunits - gather_off_nunits;
6572
557be5a8 6573 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
aec7ae7d
JJ
6574 ncopies *= 2;
6575 }
6576 else
6577 gcc_unreachable ();
6578
6579 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6580 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6581 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6582 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6583 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6584 scaletype = TREE_VALUE (arglist);
d3c2fee0 6585 gcc_checking_assert (types_compatible_p (srctype, rettype));
aec7ae7d
JJ
6586
6587 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6588
6589 ptr = fold_convert (ptrtype, gather_base);
6590 if (!is_gimple_min_invariant (ptr))
6591 {
6592 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6593 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6594 gcc_assert (!new_bb);
6595 }
6596
6597 /* Currently we support only unconditional gather loads,
6598 so mask should be all ones. */
d3c2fee0
AI
6599 if (TREE_CODE (masktype) == INTEGER_TYPE)
6600 mask = build_int_cst (masktype, -1);
6601 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6602 {
6603 mask = build_int_cst (TREE_TYPE (masktype), -1);
6604 mask = build_vector_from_val (masktype, mask);
03b9e8e4 6605 mask = vect_init_vector (stmt, mask, masktype, NULL);
d3c2fee0 6606 }
aec7ae7d
JJ
6607 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6608 {
6609 REAL_VALUE_TYPE r;
6610 long tmp[6];
6611 for (j = 0; j < 6; ++j)
6612 tmp[j] = -1;
6613 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6614 mask = build_real (TREE_TYPE (masktype), r);
d3c2fee0 6615 mask = build_vector_from_val (masktype, mask);
03b9e8e4 6616 mask = vect_init_vector (stmt, mask, masktype, NULL);
aec7ae7d
JJ
6617 }
6618 else
6619 gcc_unreachable ();
aec7ae7d
JJ
6620
6621 scale = build_int_cst (scaletype, gather_scale);
6622
d3c2fee0
AI
6623 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6624 merge = build_int_cst (TREE_TYPE (rettype), 0);
6625 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6626 {
6627 REAL_VALUE_TYPE r;
6628 long tmp[6];
6629 for (j = 0; j < 6; ++j)
6630 tmp[j] = 0;
6631 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6632 merge = build_real (TREE_TYPE (rettype), r);
6633 }
6634 else
6635 gcc_unreachable ();
6636 merge = build_vector_from_val (rettype, merge);
6637 merge = vect_init_vector (stmt, merge, rettype, NULL);
6638
aec7ae7d
JJ
6639 prev_stmt_info = NULL;
6640 for (j = 0; j < ncopies; ++j)
6641 {
6642 if (modifier == WIDEN && (j & 1))
6643 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6644 perm_mask, stmt, gsi);
6645 else if (j == 0)
6646 op = vec_oprnd0
81c40241 6647 = vect_get_vec_def_for_operand (gather_off, stmt);
aec7ae7d
JJ
6648 else
6649 op = vec_oprnd0
6650 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6651
6652 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6653 {
6654 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6655 == TYPE_VECTOR_SUBPARTS (idxtype));
0e22bb5a 6656 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
aec7ae7d
JJ
6657 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6658 new_stmt
0d0e4a03 6659 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
aec7ae7d
JJ
6660 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6661 op = var;
6662 }
6663
6664 new_stmt
d3c2fee0 6665 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
aec7ae7d
JJ
6666
6667 if (!useless_type_conversion_p (vectype, rettype))
6668 {
6669 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6670 == TYPE_VECTOR_SUBPARTS (rettype));
0e22bb5a 6671 op = vect_get_new_ssa_name (rettype, vect_simple_var);
aec7ae7d
JJ
6672 gimple_call_set_lhs (new_stmt, op);
6673 vect_finish_stmt_generation (stmt, new_stmt, gsi);
b731b390 6674 var = make_ssa_name (vec_dest);
aec7ae7d
JJ
6675 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6676 new_stmt
0d0e4a03 6677 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
aec7ae7d
JJ
6678 }
6679 else
6680 {
6681 var = make_ssa_name (vec_dest, new_stmt);
6682 gimple_call_set_lhs (new_stmt, var);
6683 }
6684
6685 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6686
6687 if (modifier == NARROW)
6688 {
6689 if ((j & 1) == 0)
6690 {
6691 prev_res = var;
6692 continue;
6693 }
6694 var = permute_vec_elements (prev_res, var,
6695 perm_mask, stmt, gsi);
6696 new_stmt = SSA_NAME_DEF_STMT (var);
6697 }
6698
6699 if (prev_stmt_info == NULL)
6700 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6701 else
6702 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6703 prev_stmt_info = vinfo_for_stmt (new_stmt);
6704 }
6705 return true;
6706 }
f2e2a985 6707 else if (STMT_VINFO_STRIDED_P (stmt_info))
7d75abc8
MM
6708 {
6709 gimple_stmt_iterator incr_gsi;
6710 bool insert_after;
355fe088 6711 gimple *incr;
7d75abc8 6712 tree offvar;
7d75abc8
MM
6713 tree ivstep;
6714 tree running_off;
9771b263 6715 vec<constructor_elt, va_gc> *v = NULL;
7d75abc8 6716 gimple_seq stmts = NULL;
14ac6aa2
RB
6717 tree stride_base, stride_step, alias_off;
6718
6719 gcc_assert (!nested_in_vect_loop);
7d75abc8 6720
f502d50e 6721 if (slp && grouped_load)
ab313a8c
RB
6722 first_dr = STMT_VINFO_DATA_REF
6723 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6724 else
6725 first_dr = dr;
6726
14ac6aa2
RB
6727 stride_base
6728 = fold_build_pointer_plus
ab313a8c 6729 (DR_BASE_ADDRESS (first_dr),
14ac6aa2 6730 size_binop (PLUS_EXPR,
ab313a8c
RB
6731 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6732 convert_to_ptrofftype (DR_INIT (first_dr))));
6733 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7d75abc8
MM
6734
6735 /* For a load with loop-invariant (but other than power-of-2)
6736 stride (i.e. not a grouped access) like so:
6737
6738 for (i = 0; i < n; i += stride)
6739 ... = array[i];
6740
6741 we generate a new induction variable and new accesses to
6742 form a new vector (or vectors, depending on ncopies):
6743
6744 for (j = 0; ; j += VF*stride)
6745 tmp1 = array[j];
6746 tmp2 = array[j + stride];
6747 ...
6748 vectemp = {tmp1, tmp2, ...}
6749 */
6750
ab313a8c
RB
6751 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6752 build_int_cst (TREE_TYPE (stride_step), vf));
7d75abc8
MM
6753
6754 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6755
ab313a8c 6756 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7d75abc8
MM
6757 loop, &incr_gsi, insert_after,
6758 &offvar, NULL);
6759 incr = gsi_stmt (incr_gsi);
310213d4 6760 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7d75abc8 6761
ab313a8c
RB
6762 stride_step = force_gimple_operand (unshare_expr (stride_step),
6763 &stmts, true, NULL_TREE);
7d75abc8
MM
6764 if (stmts)
6765 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6766
6767 prev_stmt_info = NULL;
6768 running_off = offvar;
ab313a8c 6769 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
7b5fc413
RB
6770 int nloads = nunits;
6771 tree ltype = TREE_TYPE (vectype);
b266b968 6772 auto_vec<tree> dr_chain;
7b5fc413
RB
6773 if (slp)
6774 {
6775 nloads = nunits / group_size;
6776 if (group_size < nunits)
6777 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6778 else
6779 ltype = vectype;
6780 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
66c16fd9
RB
6781 /* For SLP permutation support we need to load the whole group,
6782 not only the number of vector stmts the permutation result
6783 fits in. */
b266b968 6784 if (slp_perm)
66c16fd9
RB
6785 {
6786 ncopies = (group_size * vf + nunits - 1) / nunits;
6787 dr_chain.create (ncopies);
6788 }
6789 else
6790 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7b5fc413 6791 }
7d75abc8
MM
6792 for (j = 0; j < ncopies; j++)
6793 {
6794 tree vec_inv;
6795
7b5fc413
RB
6796 if (nloads > 1)
6797 {
6798 vec_alloc (v, nloads);
6799 for (i = 0; i < nloads; i++)
6800 {
6801 tree newref, newoff;
355fe088 6802 gimple *incr;
7b5fc413
RB
6803 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6804
6805 newref = force_gimple_operand_gsi (gsi, newref, true,
6806 NULL_TREE, true,
6807 GSI_SAME_STMT);
6808 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6809 newoff = copy_ssa_name (running_off);
6810 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6811 running_off, stride_step);
6812 vect_finish_stmt_generation (stmt, incr, gsi);
6813
6814 running_off = newoff;
6815 }
6816
6817 vec_inv = build_constructor (vectype, v);
6818 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6819 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6820 }
6821 else
7d75abc8 6822 {
7b5fc413
RB
6823 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6824 build2 (MEM_REF, ltype,
6825 running_off, alias_off));
6826 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6827
6828 tree newoff = copy_ssa_name (running_off);
355fe088 6829 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
0d0e4a03 6830 running_off, stride_step);
7d75abc8
MM
6831 vect_finish_stmt_generation (stmt, incr, gsi);
6832
6833 running_off = newoff;
6834 }
6835
7b5fc413 6836 if (slp)
b266b968 6837 {
b266b968
RB
6838 if (slp_perm)
6839 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
66c16fd9
RB
6840 else
6841 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
b266b968 6842 }
7d75abc8 6843 else
225ce44b
RB
6844 {
6845 if (j == 0)
6846 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6847 else
6848 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6849 prev_stmt_info = vinfo_for_stmt (new_stmt);
6850 }
7d75abc8 6851 }
b266b968
RB
6852 if (slp_perm)
6853 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6854 slp_node_instance, false);
7d75abc8
MM
6855 return true;
6856 }
aec7ae7d 6857
0d0293ac 6858 if (grouped_load)
ebfd146a 6859 {
e14c1050 6860 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4f0a0218 6861 /* For SLP vectorization we directly vectorize a subchain
52eab378
RB
6862 without permutation. */
6863 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
4f0a0218
RB
6864 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6865 /* For BB vectorization always use the first stmt to base
6866 the data ref pointer on. */
6867 if (bb_vinfo)
6868 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6aa904c4 6869
ebfd146a 6870 /* Check if the chain of loads is already vectorized. */
01d8bf07
RB
6871 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6872 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6873 ??? But we can only do so if there is exactly one
6874 as we have no way to get at the rest. Leave the CSE
6875 opportunity alone.
6876 ??? With the group load eventually participating
6877 in multiple different permutations (having multiple
6878 slp nodes which refer to the same group) the CSE
6879 is even wrong code. See PR56270. */
6880 && !slp)
ebfd146a
IR
6881 {
6882 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6883 return true;
6884 }
6885 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
e14c1050 6886 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
9b999e8c 6887 group_gap_adj = 0;
ebfd146a
IR
6888
6889 /* VEC_NUM is the number of vect stmts to be created for this group. */
6890 if (slp)
6891 {
0d0293ac 6892 grouped_load = false;
91ff1504
RB
6893 /* For SLP permutation support we need to load the whole group,
6894 not only the number of vector stmts the permutation result
6895 fits in. */
6896 if (slp_perm)
6897 vec_num = (group_size * vf + nunits - 1) / nunits;
6898 else
6899 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9b999e8c 6900 group_gap_adj = vf * group_size - nunits * vec_num;
a70d6342 6901 }
ebfd146a 6902 else
9b999e8c 6903 vec_num = group_size;
ebfd146a
IR
6904 }
6905 else
6906 {
6907 first_stmt = stmt;
6908 first_dr = dr;
6909 group_size = vec_num = 1;
9b999e8c 6910 group_gap_adj = 0;
ebfd146a
IR
6911 }
6912
720f5239 6913 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 6914 gcc_assert (alignment_support_scheme);
272c6793
RS
6915 /* Targets with load-lane instructions must not require explicit
6916 realignment. */
6917 gcc_assert (!load_lanes_p
6918 || alignment_support_scheme == dr_aligned
6919 || alignment_support_scheme == dr_unaligned_supported);
ebfd146a
IR
6920
6921 /* In case the vectorization factor (VF) is bigger than the number
6922 of elements that we can fit in a vectype (nunits), we have to generate
6923 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 6924 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 6925 from one copy of the vector stmt to the next, in the field
ff802fa1 6926 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 6927 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
6928 stmts that use the defs of the current stmt. The example below
6929 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6930 need to create 4 vectorized stmts):
ebfd146a
IR
6931
6932 before vectorization:
6933 RELATED_STMT VEC_STMT
6934 S1: x = memref - -
6935 S2: z = x + 1 - -
6936
6937 step 1: vectorize stmt S1:
6938 We first create the vector stmt VS1_0, and, as usual, record a
6939 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6940 Next, we create the vector stmt VS1_1, and record a pointer to
6941 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
ff802fa1 6942 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
ebfd146a
IR
6943 stmts and pointers:
6944 RELATED_STMT VEC_STMT
6945 VS1_0: vx0 = memref0 VS1_1 -
6946 VS1_1: vx1 = memref1 VS1_2 -
6947 VS1_2: vx2 = memref2 VS1_3 -
6948 VS1_3: vx3 = memref3 - -
6949 S1: x = load - VS1_0
6950 S2: z = x + 1 - -
6951
b8698a0f
L
6952 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6953 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
6954 stmt S2. */
6955
0d0293ac 6956 /* In case of interleaving (non-unit grouped access):
ebfd146a
IR
6957
6958 S1: x2 = &base + 2
6959 S2: x0 = &base
6960 S3: x1 = &base + 1
6961 S4: x3 = &base + 3
6962
b8698a0f 6963 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
6964 starting from the access of the first stmt of the chain:
6965
6966 VS1: vx0 = &base
6967 VS2: vx1 = &base + vec_size*1
6968 VS3: vx3 = &base + vec_size*2
6969 VS4: vx4 = &base + vec_size*3
6970
6971 Then permutation statements are generated:
6972
e2c83630
RH
6973 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6974 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
ebfd146a
IR
6975 ...
6976
6977 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6978 (the order of the data-refs in the output of vect_permute_load_chain
6979 corresponds to the order of scalar stmts in the interleaving chain - see
6980 the documentation of vect_permute_load_chain()).
6981 The generation of permutation stmts and recording them in
0d0293ac 6982 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
ebfd146a 6983
b8698a0f 6984 In case of both multiple types and interleaving, the vector loads and
ff802fa1
IR
6985 permutation stmts above are created for every copy. The result vector
6986 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6987 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
ebfd146a
IR
6988
6989 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6990 on a target that supports unaligned accesses (dr_unaligned_supported)
6991 we generate the following code:
6992 p = initial_addr;
6993 indx = 0;
6994 loop {
6995 p = p + indx * vectype_size;
6996 vec_dest = *(p);
6997 indx = indx + 1;
6998 }
6999
7000 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 7001 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
7002 then generate the following code, in which the data in each iteration is
7003 obtained by two vector loads, one from the previous iteration, and one
7004 from the current iteration:
7005 p1 = initial_addr;
7006 msq_init = *(floor(p1))
7007 p2 = initial_addr + VS - 1;
7008 realignment_token = call target_builtin;
7009 indx = 0;
7010 loop {
7011 p2 = p2 + indx * vectype_size
7012 lsq = *(floor(p2))
7013 vec_dest = realign_load (msq, lsq, realignment_token)
7014 indx = indx + 1;
7015 msq = lsq;
7016 } */
7017
7018 /* If the misalignment remains the same throughout the execution of the
7019 loop, we can create the init_addr and permutation mask at the loop
ff802fa1 7020 preheader. Otherwise, it needs to be created inside the loop.
ebfd146a
IR
7021 This can only occur when vectorizing memory accesses in the inner-loop
7022 nested within an outer-loop that is being vectorized. */
7023
d1e4b493 7024 if (nested_in_vect_loop
211bea38 7025 && (TREE_INT_CST_LOW (DR_STEP (dr))
ebfd146a
IR
7026 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
7027 {
7028 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7029 compute_in_loop = true;
7030 }
7031
7032 if ((alignment_support_scheme == dr_explicit_realign_optimized
7033 || alignment_support_scheme == dr_explicit_realign)
59fd17e3 7034 && !compute_in_loop)
ebfd146a
IR
7035 {
7036 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7037 alignment_support_scheme, NULL_TREE,
7038 &at_loop);
7039 if (alignment_support_scheme == dr_explicit_realign_optimized)
7040 {
538dd0b7 7041 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
356bbc4c
JJ
7042 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7043 size_one_node);
ebfd146a
IR
7044 }
7045 }
7046 else
7047 at_loop = loop;
7048
a1e53f3f
L
7049 if (negative)
7050 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7051
272c6793
RS
7052 if (load_lanes_p)
7053 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7054 else
7055 aggr_type = vectype;
7056
ebfd146a
IR
7057 prev_stmt_info = NULL;
7058 for (j = 0; j < ncopies; j++)
b8698a0f 7059 {
272c6793 7060 /* 1. Create the vector or array pointer update chain. */
ebfd146a 7061 if (j == 0)
74bf76ed
JJ
7062 {
7063 bool simd_lane_access_p
7064 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7065 if (simd_lane_access_p
7066 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7067 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7068 && integer_zerop (DR_OFFSET (first_dr))
7069 && integer_zerop (DR_INIT (first_dr))
7070 && alias_sets_conflict_p (get_alias_set (aggr_type),
7071 get_alias_set (DR_REF (first_dr)))
7072 && (alignment_support_scheme == dr_aligned
7073 || alignment_support_scheme == dr_unaligned_supported))
7074 {
7075 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7076 dataref_offset = build_int_cst (reference_alias_ptr_type
7077 (DR_REF (first_dr)), 0);
8928eff3 7078 inv_p = false;
74bf76ed 7079 }
4f0a0218
RB
7080 else if (first_stmt_for_drptr
7081 && first_stmt != first_stmt_for_drptr)
7082 {
7083 dataref_ptr
7084 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7085 at_loop, offset, &dummy, gsi,
7086 &ptr_incr, simd_lane_access_p,
7087 &inv_p, byte_offset);
7088 /* Adjust the pointer by the difference to first_stmt. */
7089 data_reference_p ptrdr
7090 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7091 tree diff = fold_convert (sizetype,
7092 size_binop (MINUS_EXPR,
7093 DR_INIT (first_dr),
7094 DR_INIT (ptrdr)));
7095 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7096 stmt, diff);
7097 }
74bf76ed
JJ
7098 else
7099 dataref_ptr
7100 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7101 offset, &dummy, gsi, &ptr_incr,
356bbc4c
JJ
7102 simd_lane_access_p, &inv_p,
7103 byte_offset);
74bf76ed
JJ
7104 }
7105 else if (dataref_offset)
7106 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7107 TYPE_SIZE_UNIT (aggr_type));
ebfd146a 7108 else
272c6793
RS
7109 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7110 TYPE_SIZE_UNIT (aggr_type));
ebfd146a 7111
0d0293ac 7112 if (grouped_load || slp_perm)
9771b263 7113 dr_chain.create (vec_num);
5ce1ee7f 7114
272c6793 7115 if (load_lanes_p)
ebfd146a 7116 {
272c6793
RS
7117 tree vec_array;
7118
7119 vec_array = create_vector_array (vectype, vec_num);
7120
7121 /* Emit:
7122 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7123 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
7124 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
7125 gimple_call_set_lhs (new_stmt, vec_array);
7126 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a 7127
272c6793
RS
7128 /* Extract each vector into an SSA_NAME. */
7129 for (i = 0; i < vec_num; i++)
ebfd146a 7130 {
272c6793
RS
7131 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7132 vec_array, i);
9771b263 7133 dr_chain.quick_push (new_temp);
272c6793
RS
7134 }
7135
7136 /* Record the mapping between SSA_NAMEs and statements. */
0d0293ac 7137 vect_record_grouped_load_vectors (stmt, dr_chain);
272c6793
RS
7138 }
7139 else
7140 {
7141 for (i = 0; i < vec_num; i++)
7142 {
7143 if (i > 0)
7144 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7145 stmt, NULL_TREE);
7146
7147 /* 2. Create the vector-load in the loop. */
7148 switch (alignment_support_scheme)
7149 {
7150 case dr_aligned:
7151 case dr_unaligned_supported:
be1ac4ec 7152 {
644ffefd
MJ
7153 unsigned int align, misalign;
7154
272c6793 7155 data_ref
aed93b23
RB
7156 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7157 dataref_offset
7158 ? dataref_offset
7159 : build_int_cst (reference_alias_ptr_type
7160 (DR_REF (first_dr)), 0));
644ffefd 7161 align = TYPE_ALIGN_UNIT (vectype);
272c6793
RS
7162 if (alignment_support_scheme == dr_aligned)
7163 {
7164 gcc_assert (aligned_access_p (first_dr));
644ffefd 7165 misalign = 0;
272c6793
RS
7166 }
7167 else if (DR_MISALIGNMENT (first_dr) == -1)
7168 {
52639a61
RB
7169 if (DR_VECT_AUX (first_dr)->base_element_aligned)
7170 align = TYPE_ALIGN_UNIT (elem_type);
7171 else
7172 align = (get_object_alignment (DR_REF (first_dr))
7173 / BITS_PER_UNIT);
7174 misalign = 0;
272c6793
RS
7175 TREE_TYPE (data_ref)
7176 = build_aligned_type (TREE_TYPE (data_ref),
52639a61 7177 align * BITS_PER_UNIT);
272c6793
RS
7178 }
7179 else
7180 {
7181 TREE_TYPE (data_ref)
7182 = build_aligned_type (TREE_TYPE (data_ref),
7183 TYPE_ALIGN (elem_type));
644ffefd 7184 misalign = DR_MISALIGNMENT (first_dr);
272c6793 7185 }
aed93b23
RB
7186 if (dataref_offset == NULL_TREE
7187 && TREE_CODE (dataref_ptr) == SSA_NAME)
74bf76ed
JJ
7188 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7189 align, misalign);
272c6793 7190 break;
be1ac4ec 7191 }
272c6793 7192 case dr_explicit_realign:
267d3070 7193 {
272c6793 7194 tree ptr, bump;
272c6793 7195
d88981fc 7196 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
272c6793
RS
7197
7198 if (compute_in_loop)
7199 msq = vect_setup_realignment (first_stmt, gsi,
7200 &realignment_token,
7201 dr_explicit_realign,
7202 dataref_ptr, NULL);
7203
aed93b23
RB
7204 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7205 ptr = copy_ssa_name (dataref_ptr);
7206 else
7207 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
0d0e4a03
JJ
7208 new_stmt = gimple_build_assign
7209 (ptr, BIT_AND_EXPR, dataref_ptr,
272c6793
RS
7210 build_int_cst
7211 (TREE_TYPE (dataref_ptr),
7212 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
272c6793
RS
7213 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7214 data_ref
7215 = build2 (MEM_REF, vectype, ptr,
7216 build_int_cst (reference_alias_ptr_type
7217 (DR_REF (first_dr)), 0));
7218 vec_dest = vect_create_destination_var (scalar_dest,
7219 vectype);
7220 new_stmt = gimple_build_assign (vec_dest, data_ref);
7221 new_temp = make_ssa_name (vec_dest, new_stmt);
7222 gimple_assign_set_lhs (new_stmt, new_temp);
7223 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7224 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7225 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7226 msq = new_temp;
7227
d88981fc 7228 bump = size_binop (MULT_EXPR, vs,
7b7b1813 7229 TYPE_SIZE_UNIT (elem_type));
d88981fc 7230 bump = size_binop (MINUS_EXPR, bump, size_one_node);
272c6793 7231 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
0d0e4a03
JJ
7232 new_stmt = gimple_build_assign
7233 (NULL_TREE, BIT_AND_EXPR, ptr,
272c6793
RS
7234 build_int_cst
7235 (TREE_TYPE (ptr),
7236 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
aed93b23 7237 ptr = copy_ssa_name (ptr, new_stmt);
272c6793
RS
7238 gimple_assign_set_lhs (new_stmt, ptr);
7239 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7240 data_ref
7241 = build2 (MEM_REF, vectype, ptr,
7242 build_int_cst (reference_alias_ptr_type
7243 (DR_REF (first_dr)), 0));
7244 break;
267d3070 7245 }
272c6793 7246 case dr_explicit_realign_optimized:
aed93b23
RB
7247 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7248 new_temp = copy_ssa_name (dataref_ptr);
7249 else
7250 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
0d0e4a03
JJ
7251 new_stmt = gimple_build_assign
7252 (new_temp, BIT_AND_EXPR, dataref_ptr,
272c6793
RS
7253 build_int_cst
7254 (TREE_TYPE (dataref_ptr),
7255 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
272c6793
RS
7256 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7257 data_ref
7258 = build2 (MEM_REF, vectype, new_temp,
7259 build_int_cst (reference_alias_ptr_type
7260 (DR_REF (first_dr)), 0));
7261 break;
7262 default:
7263 gcc_unreachable ();
7264 }
ebfd146a 7265 vec_dest = vect_create_destination_var (scalar_dest, vectype);
272c6793 7266 new_stmt = gimple_build_assign (vec_dest, data_ref);
ebfd146a
IR
7267 new_temp = make_ssa_name (vec_dest, new_stmt);
7268 gimple_assign_set_lhs (new_stmt, new_temp);
7269 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7270
272c6793
RS
7271 /* 3. Handle explicit realignment if necessary/supported.
7272 Create in loop:
7273 vec_dest = realign_load (msq, lsq, realignment_token) */
7274 if (alignment_support_scheme == dr_explicit_realign_optimized
7275 || alignment_support_scheme == dr_explicit_realign)
ebfd146a 7276 {
272c6793
RS
7277 lsq = gimple_assign_lhs (new_stmt);
7278 if (!realignment_token)
7279 realignment_token = dataref_ptr;
7280 vec_dest = vect_create_destination_var (scalar_dest, vectype);
0d0e4a03
JJ
7281 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7282 msq, lsq, realignment_token);
272c6793
RS
7283 new_temp = make_ssa_name (vec_dest, new_stmt);
7284 gimple_assign_set_lhs (new_stmt, new_temp);
7285 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7286
7287 if (alignment_support_scheme == dr_explicit_realign_optimized)
7288 {
7289 gcc_assert (phi);
7290 if (i == vec_num - 1 && j == ncopies - 1)
7291 add_phi_arg (phi, lsq,
7292 loop_latch_edge (containing_loop),
9e227d60 7293 UNKNOWN_LOCATION);
272c6793
RS
7294 msq = lsq;
7295 }
ebfd146a 7296 }
ebfd146a 7297
59fd17e3
RB
7298 /* 4. Handle invariant-load. */
7299 if (inv_p && !bb_vinfo)
7300 {
59fd17e3 7301 gcc_assert (!grouped_load);
d1417442
JJ
7302 /* If we have versioned for aliasing or the loop doesn't
7303 have any data dependencies that would preclude this,
7304 then we are sure this is a loop invariant load and
7305 thus we can insert it on the preheader edge. */
7306 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7307 && !nested_in_vect_loop
6b916b36 7308 && hoist_defs_of_uses (stmt, loop))
a0e35eb0
RB
7309 {
7310 if (dump_enabled_p ())
7311 {
7312 dump_printf_loc (MSG_NOTE, vect_location,
7313 "hoisting out of the vectorized "
7314 "loop: ");
7315 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
a0e35eb0 7316 }
b731b390 7317 tree tem = copy_ssa_name (scalar_dest);
a0e35eb0
RB
7318 gsi_insert_on_edge_immediate
7319 (loop_preheader_edge (loop),
7320 gimple_build_assign (tem,
7321 unshare_expr
7322 (gimple_assign_rhs1 (stmt))));
7323 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
34cd48e5
RB
7324 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7325 set_vinfo_for_stmt (new_stmt,
7326 new_stmt_vec_info (new_stmt, vinfo));
a0e35eb0
RB
7327 }
7328 else
7329 {
7330 gimple_stmt_iterator gsi2 = *gsi;
7331 gsi_next (&gsi2);
7332 new_temp = vect_init_vector (stmt, scalar_dest,
7333 vectype, &gsi2);
34cd48e5 7334 new_stmt = SSA_NAME_DEF_STMT (new_temp);
a0e35eb0 7335 }
59fd17e3
RB
7336 }
7337
272c6793
RS
7338 if (negative)
7339 {
aec7ae7d
JJ
7340 tree perm_mask = perm_mask_for_reverse (vectype);
7341 new_temp = permute_vec_elements (new_temp, new_temp,
7342 perm_mask, stmt, gsi);
ebfd146a
IR
7343 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7344 }
267d3070 7345
272c6793 7346 /* Collect vector loads and later create their permutation in
0d0293ac
MM
7347 vect_transform_grouped_load (). */
7348 if (grouped_load || slp_perm)
9771b263 7349 dr_chain.quick_push (new_temp);
267d3070 7350
272c6793
RS
7351 /* Store vector loads in the corresponding SLP_NODE. */
7352 if (slp && !slp_perm)
9771b263 7353 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
272c6793 7354 }
9b999e8c
RB
7355 /* Bump the vector pointer to account for a gap or for excess
7356 elements loaded for a permuted SLP load. */
7357 if (group_gap_adj != 0)
a64b9c26 7358 {
9b999e8c
RB
7359 bool ovf;
7360 tree bump
7361 = wide_int_to_tree (sizetype,
7362 wi::smul (TYPE_SIZE_UNIT (elem_type),
7363 group_gap_adj, &ovf));
a64b9c26
RB
7364 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7365 stmt, bump);
7366 }
ebfd146a
IR
7367 }
7368
7369 if (slp && !slp_perm)
7370 continue;
7371
7372 if (slp_perm)
7373 {
01d8bf07 7374 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
ebfd146a
IR
7375 slp_node_instance, false))
7376 {
9771b263 7377 dr_chain.release ();
ebfd146a
IR
7378 return false;
7379 }
7380 }
7381 else
7382 {
0d0293ac 7383 if (grouped_load)
ebfd146a 7384 {
272c6793 7385 if (!load_lanes_p)
0d0293ac 7386 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
ebfd146a 7387 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
7388 }
7389 else
7390 {
7391 if (j == 0)
7392 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7393 else
7394 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7395 prev_stmt_info = vinfo_for_stmt (new_stmt);
7396 }
7397 }
9771b263 7398 dr_chain.release ();
ebfd146a
IR
7399 }
7400
ebfd146a
IR
7401 return true;
7402}
7403
7404/* Function vect_is_simple_cond.
b8698a0f 7405
ebfd146a
IR
7406 Input:
7407 LOOP - the loop that is being vectorized.
7408 COND - Condition that is checked for simple use.
7409
e9e1d143
RG
7410 Output:
7411 *COMP_VECTYPE - the vector type for the comparison.
7412
ebfd146a
IR
7413 Returns whether a COND can be vectorized. Checks whether
7414 condition operands are supportable using vec_is_simple_use. */
7415
87aab9b2 7416static bool
81c40241 7417vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
ebfd146a
IR
7418{
7419 tree lhs, rhs;
ebfd146a 7420 enum vect_def_type dt;
e9e1d143 7421 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
ebfd146a 7422
a414c77f
IE
7423 /* Mask case. */
7424 if (TREE_CODE (cond) == SSA_NAME
7425 && TREE_CODE (TREE_TYPE (cond)) == BOOLEAN_TYPE)
7426 {
7427 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7428 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7429 &dt, comp_vectype)
7430 || !*comp_vectype
7431 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7432 return false;
7433 return true;
7434 }
7435
ebfd146a
IR
7436 if (!COMPARISON_CLASS_P (cond))
7437 return false;
7438
7439 lhs = TREE_OPERAND (cond, 0);
7440 rhs = TREE_OPERAND (cond, 1);
7441
7442 if (TREE_CODE (lhs) == SSA_NAME)
7443 {
355fe088 7444 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
81c40241 7445 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
ebfd146a
IR
7446 return false;
7447 }
7448 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7449 && TREE_CODE (lhs) != FIXED_CST)
7450 return false;
7451
7452 if (TREE_CODE (rhs) == SSA_NAME)
7453 {
355fe088 7454 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
81c40241 7455 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
ebfd146a
IR
7456 return false;
7457 }
f7e531cf 7458 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
ebfd146a
IR
7459 && TREE_CODE (rhs) != FIXED_CST)
7460 return false;
7461
28b33016
IE
7462 if (vectype1 && vectype2
7463 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7464 return false;
7465
e9e1d143 7466 *comp_vectype = vectype1 ? vectype1 : vectype2;
ebfd146a
IR
7467 return true;
7468}
7469
7470/* vectorizable_condition.
7471
b8698a0f
L
7472 Check if STMT is conditional modify expression that can be vectorized.
7473 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7474 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
7475 at GSI.
7476
7477 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7478 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
0ad23163 7479 else clause if it is 2).
ebfd146a
IR
7480
7481 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7482
4bbe8262 7483bool
355fe088
TS
7484vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7485 gimple **vec_stmt, tree reduc_def, int reduc_index,
f7e531cf 7486 slp_tree slp_node)
ebfd146a
IR
7487{
7488 tree scalar_dest = NULL_TREE;
7489 tree vec_dest = NULL_TREE;
ebfd146a
IR
7490 tree cond_expr, then_clause, else_clause;
7491 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
df11cc78 7492 tree comp_vectype = NULL_TREE;
ff802fa1
IR
7493 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7494 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
5958f9e2 7495 tree vec_compare;
ebfd146a
IR
7496 tree new_temp;
7497 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a855b1b1 7498 enum vect_def_type dt, dts[4];
f7e531cf 7499 int ncopies;
ebfd146a 7500 enum tree_code code;
a855b1b1 7501 stmt_vec_info prev_stmt_info = NULL;
f7e531cf
IR
7502 int i, j;
7503 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6e1aa848
DN
7504 vec<tree> vec_oprnds0 = vNULL;
7505 vec<tree> vec_oprnds1 = vNULL;
7506 vec<tree> vec_oprnds2 = vNULL;
7507 vec<tree> vec_oprnds3 = vNULL;
74946978 7508 tree vec_cmp_type;
a414c77f 7509 bool masked = false;
b8698a0f 7510
f7e531cf
IR
7511 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7512 return false;
7513
af29617a
AH
7514 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7515 {
7516 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7517 return false;
ebfd146a 7518
af29617a
AH
7519 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7520 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7521 && reduc_def))
7522 return false;
ebfd146a 7523
af29617a
AH
7524 /* FORNOW: not yet supported. */
7525 if (STMT_VINFO_LIVE_P (stmt_info))
7526 {
7527 if (dump_enabled_p ())
7528 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7529 "value used after loop.\n");
7530 return false;
7531 }
ebfd146a
IR
7532 }
7533
7534 /* Is vectorizable conditional operation? */
7535 if (!is_gimple_assign (stmt))
7536 return false;
7537
7538 code = gimple_assign_rhs_code (stmt);
7539
7540 if (code != COND_EXPR)
7541 return false;
7542
465c8c19
JJ
7543 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7544 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2947d3b2 7545 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
465c8c19
JJ
7546
7547 if (slp_node || PURE_SLP_STMT (stmt_info))
7548 ncopies = 1;
7549 else
7550 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7551
7552 gcc_assert (ncopies >= 1);
7553 if (reduc_index && ncopies > 1)
7554 return false; /* FORNOW */
7555
4e71066d
RG
7556 cond_expr = gimple_assign_rhs1 (stmt);
7557 then_clause = gimple_assign_rhs2 (stmt);
7558 else_clause = gimple_assign_rhs3 (stmt);
ebfd146a 7559
81c40241 7560 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
e9e1d143 7561 || !comp_vectype)
ebfd146a
IR
7562 return false;
7563
81c40241 7564 gimple *def_stmt;
2947d3b2
IE
7565 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt,
7566 &vectype1))
7567 return false;
7568 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt,
7569 &vectype2))
ebfd146a 7570 return false;
2947d3b2
IE
7571
7572 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
7573 return false;
7574
7575 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
ebfd146a
IR
7576 return false;
7577
28b33016
IE
7578 masked = !COMPARISON_CLASS_P (cond_expr);
7579 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7580
74946978
MP
7581 if (vec_cmp_type == NULL_TREE)
7582 return false;
784fb9b3 7583
b8698a0f 7584 if (!vec_stmt)
ebfd146a
IR
7585 {
7586 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
e9e1d143 7587 return expand_vec_cond_expr_p (vectype, comp_vectype);
ebfd146a
IR
7588 }
7589
f7e531cf
IR
7590 /* Transform. */
7591
7592 if (!slp_node)
7593 {
9771b263
DN
7594 vec_oprnds0.create (1);
7595 vec_oprnds1.create (1);
7596 vec_oprnds2.create (1);
7597 vec_oprnds3.create (1);
f7e531cf 7598 }
ebfd146a
IR
7599
7600 /* Handle def. */
7601 scalar_dest = gimple_assign_lhs (stmt);
7602 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7603
7604 /* Handle cond expr. */
a855b1b1
MM
7605 for (j = 0; j < ncopies; j++)
7606 {
538dd0b7 7607 gassign *new_stmt = NULL;
a855b1b1
MM
7608 if (j == 0)
7609 {
f7e531cf
IR
7610 if (slp_node)
7611 {
00f96dc9
TS
7612 auto_vec<tree, 4> ops;
7613 auto_vec<vec<tree>, 4> vec_defs;
9771b263 7614
a414c77f
IE
7615 if (masked)
7616 ops.safe_push (cond_expr);
7617 else
7618 {
7619 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7620 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7621 }
9771b263
DN
7622 ops.safe_push (then_clause);
7623 ops.safe_push (else_clause);
f7e531cf 7624 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
37b5ec8f
JJ
7625 vec_oprnds3 = vec_defs.pop ();
7626 vec_oprnds2 = vec_defs.pop ();
a414c77f
IE
7627 if (!masked)
7628 vec_oprnds1 = vec_defs.pop ();
37b5ec8f 7629 vec_oprnds0 = vec_defs.pop ();
f7e531cf 7630
9771b263
DN
7631 ops.release ();
7632 vec_defs.release ();
f7e531cf
IR
7633 }
7634 else
7635 {
355fe088 7636 gimple *gtemp;
a414c77f
IE
7637 if (masked)
7638 {
7639 vec_cond_lhs
7640 = vect_get_vec_def_for_operand (cond_expr, stmt,
7641 comp_vectype);
7642 vect_is_simple_use (cond_expr, stmt_info->vinfo,
7643 &gtemp, &dts[0]);
7644 }
7645 else
7646 {
7647 vec_cond_lhs =
7648 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7649 stmt, comp_vectype);
7650 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7651 loop_vinfo, &gtemp, &dts[0]);
7652
7653 vec_cond_rhs =
7654 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7655 stmt, comp_vectype);
7656 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7657 loop_vinfo, &gtemp, &dts[1]);
7658 }
f7e531cf
IR
7659 if (reduc_index == 1)
7660 vec_then_clause = reduc_def;
7661 else
7662 {
7663 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
81c40241
RB
7664 stmt);
7665 vect_is_simple_use (then_clause, loop_vinfo,
7666 &gtemp, &dts[2]);
f7e531cf
IR
7667 }
7668 if (reduc_index == 2)
7669 vec_else_clause = reduc_def;
7670 else
7671 {
7672 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
81c40241
RB
7673 stmt);
7674 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
f7e531cf 7675 }
a855b1b1
MM
7676 }
7677 }
7678 else
7679 {
a414c77f
IE
7680 vec_cond_lhs
7681 = vect_get_vec_def_for_stmt_copy (dts[0],
7682 vec_oprnds0.pop ());
7683 if (!masked)
7684 vec_cond_rhs
7685 = vect_get_vec_def_for_stmt_copy (dts[1],
7686 vec_oprnds1.pop ());
7687
a855b1b1 7688 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
9771b263 7689 vec_oprnds2.pop ());
a855b1b1 7690 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
9771b263 7691 vec_oprnds3.pop ());
f7e531cf
IR
7692 }
7693
7694 if (!slp_node)
7695 {
9771b263 7696 vec_oprnds0.quick_push (vec_cond_lhs);
a414c77f
IE
7697 if (!masked)
7698 vec_oprnds1.quick_push (vec_cond_rhs);
9771b263
DN
7699 vec_oprnds2.quick_push (vec_then_clause);
7700 vec_oprnds3.quick_push (vec_else_clause);
a855b1b1
MM
7701 }
7702
9dc3f7de 7703 /* Arguments are ready. Create the new vector stmt. */
9771b263 7704 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
f7e531cf 7705 {
9771b263
DN
7706 vec_then_clause = vec_oprnds2[i];
7707 vec_else_clause = vec_oprnds3[i];
a855b1b1 7708
a414c77f
IE
7709 if (masked)
7710 vec_compare = vec_cond_lhs;
7711 else
7712 {
7713 vec_cond_rhs = vec_oprnds1[i];
7714 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7715 vec_cond_lhs, vec_cond_rhs);
7716 }
5958f9e2
JJ
7717 new_temp = make_ssa_name (vec_dest);
7718 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
7719 vec_compare, vec_then_clause,
7720 vec_else_clause);
f7e531cf
IR
7721 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7722 if (slp_node)
9771b263 7723 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
f7e531cf
IR
7724 }
7725
7726 if (slp_node)
7727 continue;
7728
7729 if (j == 0)
7730 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7731 else
7732 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7733
7734 prev_stmt_info = vinfo_for_stmt (new_stmt);
a855b1b1 7735 }
b8698a0f 7736
9771b263
DN
7737 vec_oprnds0.release ();
7738 vec_oprnds1.release ();
7739 vec_oprnds2.release ();
7740 vec_oprnds3.release ();
f7e531cf 7741
ebfd146a
IR
7742 return true;
7743}
7744
42fd8198
IE
7745/* vectorizable_comparison.
7746
7747 Check if STMT is comparison expression that can be vectorized.
7748 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7749 comparison, put it in VEC_STMT, and insert it at GSI.
7750
7751 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7752
7753bool
7754vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
7755 gimple **vec_stmt, tree reduc_def,
7756 slp_tree slp_node)
7757{
7758 tree lhs, rhs1, rhs2;
7759 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7760 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7761 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7762 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
7763 tree new_temp;
7764 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7765 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
7766 unsigned nunits;
7767 int ncopies;
7768 enum tree_code code;
7769 stmt_vec_info prev_stmt_info = NULL;
7770 int i, j;
7771 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7772 vec<tree> vec_oprnds0 = vNULL;
7773 vec<tree> vec_oprnds1 = vNULL;
7774 gimple *def_stmt;
7775 tree mask_type;
7776 tree mask;
7777
c245362b
IE
7778 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7779 return false;
7780
30480bcd 7781 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
42fd8198
IE
7782 return false;
7783
7784 mask_type = vectype;
7785 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7786
7787 if (slp_node || PURE_SLP_STMT (stmt_info))
7788 ncopies = 1;
7789 else
7790 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7791
7792 gcc_assert (ncopies >= 1);
42fd8198
IE
7793 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7794 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7795 && reduc_def))
7796 return false;
7797
7798 if (STMT_VINFO_LIVE_P (stmt_info))
7799 {
7800 if (dump_enabled_p ())
7801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7802 "value used after loop.\n");
7803 return false;
7804 }
7805
7806 if (!is_gimple_assign (stmt))
7807 return false;
7808
7809 code = gimple_assign_rhs_code (stmt);
7810
7811 if (TREE_CODE_CLASS (code) != tcc_comparison)
7812 return false;
7813
7814 rhs1 = gimple_assign_rhs1 (stmt);
7815 rhs2 = gimple_assign_rhs2 (stmt);
7816
7817 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
7818 &dts[0], &vectype1))
7819 return false;
7820
7821 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
7822 &dts[1], &vectype2))
7823 return false;
7824
7825 if (vectype1 && vectype2
7826 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7827 return false;
7828
7829 vectype = vectype1 ? vectype1 : vectype2;
7830
7831 /* Invariant comparison. */
7832 if (!vectype)
7833 {
7834 vectype = build_vector_type (TREE_TYPE (rhs1), nunits);
7835 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype)) != current_vector_size)
7836 return false;
7837 }
7838 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
7839 return false;
7840
7841 if (!vec_stmt)
7842 {
7843 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
7844 vect_model_simple_cost (stmt_info, ncopies, dts, NULL, NULL);
7845 return expand_vec_cmp_expr_p (vectype, mask_type);
7846 }
7847
7848 /* Transform. */
7849 if (!slp_node)
7850 {
7851 vec_oprnds0.create (1);
7852 vec_oprnds1.create (1);
7853 }
7854
7855 /* Handle def. */
7856 lhs = gimple_assign_lhs (stmt);
7857 mask = vect_create_destination_var (lhs, mask_type);
7858
7859 /* Handle cmp expr. */
7860 for (j = 0; j < ncopies; j++)
7861 {
7862 gassign *new_stmt = NULL;
7863 if (j == 0)
7864 {
7865 if (slp_node)
7866 {
7867 auto_vec<tree, 2> ops;
7868 auto_vec<vec<tree>, 2> vec_defs;
7869
7870 ops.safe_push (rhs1);
7871 ops.safe_push (rhs2);
7872 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7873 vec_oprnds1 = vec_defs.pop ();
7874 vec_oprnds0 = vec_defs.pop ();
7875 }
7876 else
7877 {
e4af0bc4
IE
7878 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
7879 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
42fd8198
IE
7880 }
7881 }
7882 else
7883 {
7884 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
7885 vec_oprnds0.pop ());
7886 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
7887 vec_oprnds1.pop ());
7888 }
7889
7890 if (!slp_node)
7891 {
7892 vec_oprnds0.quick_push (vec_rhs1);
7893 vec_oprnds1.quick_push (vec_rhs2);
7894 }
7895
7896 /* Arguments are ready. Create the new vector stmt. */
7897 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
7898 {
7899 vec_rhs2 = vec_oprnds1[i];
7900
7901 new_temp = make_ssa_name (mask);
7902 new_stmt = gimple_build_assign (new_temp, code, vec_rhs1, vec_rhs2);
7903 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7904 if (slp_node)
7905 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7906 }
7907
7908 if (slp_node)
7909 continue;
7910
7911 if (j == 0)
7912 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7913 else
7914 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7915
7916 prev_stmt_info = vinfo_for_stmt (new_stmt);
7917 }
7918
7919 vec_oprnds0.release ();
7920 vec_oprnds1.release ();
7921
7922 return true;
7923}
ebfd146a 7924
8644a673 7925/* Make sure the statement is vectorizable. */
ebfd146a
IR
7926
7927bool
355fe088 7928vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 7929{
8644a673 7930 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 7931 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 7932 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 7933 bool ok;
a70d6342 7934 tree scalar_type, vectype;
355fe088 7935 gimple *pattern_stmt;
363477c0 7936 gimple_seq pattern_def_seq;
ebfd146a 7937
73fbfcad 7938 if (dump_enabled_p ())
ebfd146a 7939 {
78c60e3d
SS
7940 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7941 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8644a673 7942 }
ebfd146a 7943
1825a1f3 7944 if (gimple_has_volatile_ops (stmt))
b8698a0f 7945 {
73fbfcad 7946 if (dump_enabled_p ())
78c60e3d 7947 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 7948 "not vectorized: stmt has volatile operands\n");
1825a1f3
IR
7949
7950 return false;
7951 }
b8698a0f
L
7952
7953 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
7954 to include:
7955 - the COND_EXPR which is the loop exit condition
7956 - any LABEL_EXPRs in the loop
b8698a0f 7957 - computations that are used only for array indexing or loop control.
8644a673 7958 In basic blocks we only analyze statements that are a part of some SLP
83197f37 7959 instance, therefore, all the statements are relevant.
ebfd146a 7960
d092494c 7961 Pattern statement needs to be analyzed instead of the original statement
83197f37 7962 if the original statement is not relevant. Otherwise, we analyze both
079c527f
JJ
7963 statements. In basic blocks we are called from some SLP instance
7964 traversal, don't analyze pattern stmts instead, the pattern stmts
7965 already will be part of SLP instance. */
83197f37
IR
7966
7967 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
b8698a0f 7968 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 7969 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a 7970 {
9d5e7640 7971 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
83197f37 7972 && pattern_stmt
9d5e7640
IR
7973 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7974 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7975 {
83197f37 7976 /* Analyze PATTERN_STMT instead of the original stmt. */
9d5e7640
IR
7977 stmt = pattern_stmt;
7978 stmt_info = vinfo_for_stmt (pattern_stmt);
73fbfcad 7979 if (dump_enabled_p ())
9d5e7640 7980 {
78c60e3d
SS
7981 dump_printf_loc (MSG_NOTE, vect_location,
7982 "==> examining pattern statement: ");
7983 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9d5e7640
IR
7984 }
7985 }
7986 else
7987 {
73fbfcad 7988 if (dump_enabled_p ())
e645e942 7989 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
ebfd146a 7990
9d5e7640
IR
7991 return true;
7992 }
8644a673 7993 }
83197f37 7994 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
079c527f 7995 && node == NULL
83197f37
IR
7996 && pattern_stmt
7997 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7998 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7999 {
8000 /* Analyze PATTERN_STMT too. */
73fbfcad 8001 if (dump_enabled_p ())
83197f37 8002 {
78c60e3d
SS
8003 dump_printf_loc (MSG_NOTE, vect_location,
8004 "==> examining pattern statement: ");
8005 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
83197f37
IR
8006 }
8007
8008 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
8009 return false;
8010 }
ebfd146a 8011
1107f3ae 8012 if (is_pattern_stmt_p (stmt_info)
079c527f 8013 && node == NULL
363477c0 8014 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
1107f3ae 8015 {
363477c0 8016 gimple_stmt_iterator si;
1107f3ae 8017
363477c0
JJ
8018 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8019 {
355fe088 8020 gimple *pattern_def_stmt = gsi_stmt (si);
363477c0
JJ
8021 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8022 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8023 {
8024 /* Analyze def stmt of STMT if it's a pattern stmt. */
73fbfcad 8025 if (dump_enabled_p ())
363477c0 8026 {
78c60e3d
SS
8027 dump_printf_loc (MSG_NOTE, vect_location,
8028 "==> examining pattern def statement: ");
8029 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
363477c0 8030 }
1107f3ae 8031
363477c0
JJ
8032 if (!vect_analyze_stmt (pattern_def_stmt,
8033 need_to_vectorize, node))
8034 return false;
8035 }
8036 }
8037 }
1107f3ae 8038
8644a673
IR
8039 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8040 {
8041 case vect_internal_def:
8042 break;
ebfd146a 8043
8644a673 8044 case vect_reduction_def:
7c5222ff 8045 case vect_nested_cycle:
14a61437
RB
8046 gcc_assert (!bb_vinfo
8047 && (relevance == vect_used_in_outer
8048 || relevance == vect_used_in_outer_by_reduction
8049 || relevance == vect_used_by_reduction
8050 || relevance == vect_unused_in_scope));
8644a673
IR
8051 break;
8052
8053 case vect_induction_def:
8054 case vect_constant_def:
8055 case vect_external_def:
8056 case vect_unknown_def_type:
8057 default:
8058 gcc_unreachable ();
8059 }
ebfd146a 8060
a70d6342
IR
8061 if (bb_vinfo)
8062 {
8063 gcc_assert (PURE_SLP_STMT (stmt_info));
8064
b690cc0f 8065 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
73fbfcad 8066 if (dump_enabled_p ())
a70d6342 8067 {
78c60e3d
SS
8068 dump_printf_loc (MSG_NOTE, vect_location,
8069 "get vectype for scalar type: ");
8070 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
e645e942 8071 dump_printf (MSG_NOTE, "\n");
a70d6342
IR
8072 }
8073
8074 vectype = get_vectype_for_scalar_type (scalar_type);
8075 if (!vectype)
8076 {
73fbfcad 8077 if (dump_enabled_p ())
a70d6342 8078 {
78c60e3d
SS
8079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8080 "not SLPed: unsupported data-type ");
8081 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
8082 scalar_type);
e645e942 8083 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
a70d6342
IR
8084 }
8085 return false;
8086 }
8087
73fbfcad 8088 if (dump_enabled_p ())
a70d6342 8089 {
78c60e3d
SS
8090 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
8091 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
e645e942 8092 dump_printf (MSG_NOTE, "\n");
a70d6342
IR
8093 }
8094
8095 STMT_VINFO_VECTYPE (stmt_info) = vectype;
8096 }
8097
8644a673 8098 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 8099 {
8644a673 8100 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
0136f8f0
AH
8101 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8102 || (is_gimple_call (stmt)
8103 && gimple_call_lhs (stmt) == NULL_TREE));
8644a673 8104 *need_to_vectorize = true;
ebfd146a
IR
8105 }
8106
b1af7da6
RB
8107 if (PURE_SLP_STMT (stmt_info) && !node)
8108 {
8109 dump_printf_loc (MSG_NOTE, vect_location,
8110 "handled only by SLP analysis\n");
8111 return true;
8112 }
8113
8114 ok = true;
8115 if (!bb_vinfo
8116 && (STMT_VINFO_RELEVANT_P (stmt_info)
8117 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8118 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8119 || vectorizable_conversion (stmt, NULL, NULL, node)
8120 || vectorizable_shift (stmt, NULL, NULL, node)
8121 || vectorizable_operation (stmt, NULL, NULL, node)
8122 || vectorizable_assignment (stmt, NULL, NULL, node)
8123 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8124 || vectorizable_call (stmt, NULL, NULL, node)
8125 || vectorizable_store (stmt, NULL, NULL, node)
8126 || vectorizable_reduction (stmt, NULL, NULL, node)
42fd8198
IE
8127 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8128 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
b1af7da6
RB
8129 else
8130 {
8131 if (bb_vinfo)
8132 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8133 || vectorizable_conversion (stmt, NULL, NULL, node)
8134 || vectorizable_shift (stmt, NULL, NULL, node)
8135 || vectorizable_operation (stmt, NULL, NULL, node)
8136 || vectorizable_assignment (stmt, NULL, NULL, node)
8137 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8138 || vectorizable_call (stmt, NULL, NULL, node)
8139 || vectorizable_store (stmt, NULL, NULL, node)
42fd8198
IE
8140 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8141 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
b1af7da6 8142 }
8644a673
IR
8143
8144 if (!ok)
ebfd146a 8145 {
73fbfcad 8146 if (dump_enabled_p ())
8644a673 8147 {
78c60e3d
SS
8148 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8149 "not vectorized: relevant stmt not ");
8150 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8151 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8644a673 8152 }
b8698a0f 8153
ebfd146a
IR
8154 return false;
8155 }
8156
a70d6342
IR
8157 if (bb_vinfo)
8158 return true;
8159
8644a673
IR
8160 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8161 need extra handling, except for vectorizable reductions. */
8162 if (STMT_VINFO_LIVE_P (stmt_info)
8163 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8164 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 8165
8644a673 8166 if (!ok)
ebfd146a 8167 {
73fbfcad 8168 if (dump_enabled_p ())
8644a673 8169 {
78c60e3d
SS
8170 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8171 "not vectorized: live stmt not ");
8172 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8173 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8644a673 8174 }
b8698a0f 8175
8644a673 8176 return false;
ebfd146a
IR
8177 }
8178
ebfd146a
IR
8179 return true;
8180}
8181
8182
8183/* Function vect_transform_stmt.
8184
8185 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8186
8187bool
355fe088 8188vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
0d0293ac 8189 bool *grouped_store, slp_tree slp_node,
ebfd146a
IR
8190 slp_instance slp_node_instance)
8191{
8192 bool is_store = false;
355fe088 8193 gimple *vec_stmt = NULL;
ebfd146a 8194 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
ebfd146a 8195 bool done;
ebfd146a 8196
355fe088 8197 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
225ce44b 8198
ebfd146a
IR
8199 switch (STMT_VINFO_TYPE (stmt_info))
8200 {
8201 case type_demotion_vec_info_type:
ebfd146a 8202 case type_promotion_vec_info_type:
ebfd146a
IR
8203 case type_conversion_vec_info_type:
8204 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8205 gcc_assert (done);
8206 break;
8207
8208 case induc_vec_info_type:
8209 gcc_assert (!slp_node);
8210 done = vectorizable_induction (stmt, gsi, &vec_stmt);
8211 gcc_assert (done);
8212 break;
8213
9dc3f7de
IR
8214 case shift_vec_info_type:
8215 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8216 gcc_assert (done);
8217 break;
8218
ebfd146a
IR
8219 case op_vec_info_type:
8220 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8221 gcc_assert (done);
8222 break;
8223
8224 case assignment_vec_info_type:
8225 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8226 gcc_assert (done);
8227 break;
8228
8229 case load_vec_info_type:
b8698a0f 8230 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
8231 slp_node_instance);
8232 gcc_assert (done);
8233 break;
8234
8235 case store_vec_info_type:
8236 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8237 gcc_assert (done);
0d0293ac 8238 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
ebfd146a
IR
8239 {
8240 /* In case of interleaving, the whole chain is vectorized when the
ff802fa1 8241 last store in the chain is reached. Store stmts before the last
ebfd146a
IR
8242 one are skipped, and there vec_stmt_info shouldn't be freed
8243 meanwhile. */
0d0293ac 8244 *grouped_store = true;
ebfd146a
IR
8245 if (STMT_VINFO_VEC_STMT (stmt_info))
8246 is_store = true;
8247 }
8248 else
8249 is_store = true;
8250 break;
8251
8252 case condition_vec_info_type:
f7e531cf 8253 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
ebfd146a
IR
8254 gcc_assert (done);
8255 break;
8256
42fd8198
IE
8257 case comparison_vec_info_type:
8258 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8259 gcc_assert (done);
8260 break;
8261
ebfd146a 8262 case call_vec_info_type:
190c2236 8263 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
039d9ea1 8264 stmt = gsi_stmt (*gsi);
5ce9450f
JJ
8265 if (is_gimple_call (stmt)
8266 && gimple_call_internal_p (stmt)
8267 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
8268 is_store = true;
ebfd146a
IR
8269 break;
8270
0136f8f0
AH
8271 case call_simd_clone_vec_info_type:
8272 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8273 stmt = gsi_stmt (*gsi);
8274 break;
8275
ebfd146a 8276 case reduc_vec_info_type:
b5aeb3bb 8277 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
ebfd146a
IR
8278 gcc_assert (done);
8279 break;
8280
8281 default:
8282 if (!STMT_VINFO_LIVE_P (stmt_info))
8283 {
73fbfcad 8284 if (dump_enabled_p ())
78c60e3d 8285 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 8286 "stmt not supported.\n");
ebfd146a
IR
8287 gcc_unreachable ();
8288 }
8289 }
8290
225ce44b
RB
8291 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8292 This would break hybrid SLP vectorization. */
8293 if (slp_node)
d90f8440
RB
8294 gcc_assert (!vec_stmt
8295 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
225ce44b 8296
ebfd146a
IR
8297 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8298 is being vectorized, but outside the immediately enclosing loop. */
8299 if (vec_stmt
a70d6342
IR
8300 && STMT_VINFO_LOOP_VINFO (stmt_info)
8301 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8302 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
8303 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8304 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 8305 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 8306 vect_used_in_outer_by_reduction))
ebfd146a 8307 {
a70d6342
IR
8308 struct loop *innerloop = LOOP_VINFO_LOOP (
8309 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
8310 imm_use_iterator imm_iter;
8311 use_operand_p use_p;
8312 tree scalar_dest;
355fe088 8313 gimple *exit_phi;
ebfd146a 8314
73fbfcad 8315 if (dump_enabled_p ())
78c60e3d 8316 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 8317 "Record the vdef for outer-loop vectorization.\n");
ebfd146a
IR
8318
8319 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8320 (to be used when vectorizing outer-loop stmts that use the DEF of
8321 STMT). */
8322 if (gimple_code (stmt) == GIMPLE_PHI)
8323 scalar_dest = PHI_RESULT (stmt);
8324 else
8325 scalar_dest = gimple_assign_lhs (stmt);
8326
8327 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8328 {
8329 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8330 {
8331 exit_phi = USE_STMT (use_p);
8332 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8333 }
8334 }
8335 }
8336
8337 /* Handle stmts whose DEF is used outside the loop-nest that is
8338 being vectorized. */
8339 if (STMT_VINFO_LIVE_P (stmt_info)
8340 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8341 {
8342 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
8343 gcc_assert (done);
8344 }
8345
8346 if (vec_stmt)
83197f37 8347 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
ebfd146a 8348
b8698a0f 8349 return is_store;
ebfd146a
IR
8350}
8351
8352
b8698a0f 8353/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
8354 stmt_vec_info. */
8355
8356void
355fe088 8357vect_remove_stores (gimple *first_stmt)
ebfd146a 8358{
355fe088
TS
8359 gimple *next = first_stmt;
8360 gimple *tmp;
ebfd146a
IR
8361 gimple_stmt_iterator next_si;
8362
8363 while (next)
8364 {
78048b1c
JJ
8365 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8366
8367 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8368 if (is_pattern_stmt_p (stmt_info))
8369 next = STMT_VINFO_RELATED_STMT (stmt_info);
ebfd146a
IR
8370 /* Free the attached stmt_vec_info and remove the stmt. */
8371 next_si = gsi_for_stmt (next);
3d3f2249 8372 unlink_stmt_vdef (next);
ebfd146a 8373 gsi_remove (&next_si, true);
3d3f2249 8374 release_defs (next);
ebfd146a
IR
8375 free_stmt_vec_info (next);
8376 next = tmp;
8377 }
8378}
8379
8380
8381/* Function new_stmt_vec_info.
8382
8383 Create and initialize a new stmt_vec_info struct for STMT. */
8384
8385stmt_vec_info
310213d4 8386new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
ebfd146a
IR
8387{
8388 stmt_vec_info res;
8389 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8390
8391 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8392 STMT_VINFO_STMT (res) = stmt;
310213d4 8393 res->vinfo = vinfo;
8644a673 8394 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
8395 STMT_VINFO_LIVE_P (res) = false;
8396 STMT_VINFO_VECTYPE (res) = NULL;
8397 STMT_VINFO_VEC_STMT (res) = NULL;
4b5caab7 8398 STMT_VINFO_VECTORIZABLE (res) = true;
ebfd146a
IR
8399 STMT_VINFO_IN_PATTERN_P (res) = false;
8400 STMT_VINFO_RELATED_STMT (res) = NULL;
363477c0 8401 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
ebfd146a 8402 STMT_VINFO_DATA_REF (res) = NULL;
af29617a 8403 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
ebfd146a
IR
8404
8405 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
8406 STMT_VINFO_DR_OFFSET (res) = NULL;
8407 STMT_VINFO_DR_INIT (res) = NULL;
8408 STMT_VINFO_DR_STEP (res) = NULL;
8409 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
8410
8411 if (gimple_code (stmt) == GIMPLE_PHI
8412 && is_loop_header_bb_p (gimple_bb (stmt)))
8413 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
8414 else
8644a673
IR
8415 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
8416
9771b263 8417 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
32e8bb8e 8418 STMT_SLP_TYPE (res) = loop_vect;
78810bd3
RB
8419 STMT_VINFO_NUM_SLP_USES (res) = 0;
8420
e14c1050
IR
8421 GROUP_FIRST_ELEMENT (res) = NULL;
8422 GROUP_NEXT_ELEMENT (res) = NULL;
8423 GROUP_SIZE (res) = 0;
8424 GROUP_STORE_COUNT (res) = 0;
8425 GROUP_GAP (res) = 0;
8426 GROUP_SAME_DR_STMT (res) = NULL;
ebfd146a
IR
8427
8428 return res;
8429}
8430
8431
8432/* Create a hash table for stmt_vec_info. */
8433
8434void
8435init_stmt_vec_info_vec (void)
8436{
9771b263
DN
8437 gcc_assert (!stmt_vec_info_vec.exists ());
8438 stmt_vec_info_vec.create (50);
ebfd146a
IR
8439}
8440
8441
8442/* Free hash table for stmt_vec_info. */
8443
8444void
8445free_stmt_vec_info_vec (void)
8446{
93675444 8447 unsigned int i;
3161455c 8448 stmt_vec_info info;
93675444
JJ
8449 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
8450 if (info != NULL)
3161455c 8451 free_stmt_vec_info (STMT_VINFO_STMT (info));
9771b263
DN
8452 gcc_assert (stmt_vec_info_vec.exists ());
8453 stmt_vec_info_vec.release ();
ebfd146a
IR
8454}
8455
8456
8457/* Free stmt vectorization related info. */
8458
8459void
355fe088 8460free_stmt_vec_info (gimple *stmt)
ebfd146a
IR
8461{
8462 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8463
8464 if (!stmt_info)
8465 return;
8466
78048b1c
JJ
8467 /* Check if this statement has a related "pattern stmt"
8468 (introduced by the vectorizer during the pattern recognition
8469 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8470 too. */
8471 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8472 {
8473 stmt_vec_info patt_info
8474 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8475 if (patt_info)
8476 {
363477c0 8477 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
355fe088 8478 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
f0281fde
RB
8479 gimple_set_bb (patt_stmt, NULL);
8480 tree lhs = gimple_get_lhs (patt_stmt);
e6f5c25d 8481 if (lhs && TREE_CODE (lhs) == SSA_NAME)
f0281fde 8482 release_ssa_name (lhs);
363477c0
JJ
8483 if (seq)
8484 {
8485 gimple_stmt_iterator si;
8486 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
f0281fde 8487 {
355fe088 8488 gimple *seq_stmt = gsi_stmt (si);
f0281fde 8489 gimple_set_bb (seq_stmt, NULL);
7532abf2 8490 lhs = gimple_get_lhs (seq_stmt);
e6f5c25d 8491 if (lhs && TREE_CODE (lhs) == SSA_NAME)
f0281fde
RB
8492 release_ssa_name (lhs);
8493 free_stmt_vec_info (seq_stmt);
8494 }
363477c0 8495 }
f0281fde 8496 free_stmt_vec_info (patt_stmt);
78048b1c
JJ
8497 }
8498 }
8499
9771b263 8500 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
6c9e85fb 8501 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
ebfd146a
IR
8502 set_vinfo_for_stmt (stmt, NULL);
8503 free (stmt_info);
8504}
8505
8506
bb67d9c7 8507/* Function get_vectype_for_scalar_type_and_size.
ebfd146a 8508
bb67d9c7 8509 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
ebfd146a
IR
8510 by the target. */
8511
bb67d9c7
RG
8512static tree
8513get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
ebfd146a 8514{
ef4bddc2
RS
8515 machine_mode inner_mode = TYPE_MODE (scalar_type);
8516 machine_mode simd_mode;
2f816591 8517 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
8518 int nunits;
8519 tree vectype;
8520
cc4b5170 8521 if (nbytes == 0)
ebfd146a
IR
8522 return NULL_TREE;
8523
48f2e373
RB
8524 if (GET_MODE_CLASS (inner_mode) != MODE_INT
8525 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
8526 return NULL_TREE;
8527
7b7b1813
RG
8528 /* For vector types of elements whose mode precision doesn't
8529 match their types precision we use a element type of mode
8530 precision. The vectorization routines will have to make sure
48f2e373
RB
8531 they support the proper result truncation/extension.
8532 We also make sure to build vector types with INTEGER_TYPE
8533 component type only. */
6d7971b8 8534 if (INTEGRAL_TYPE_P (scalar_type)
48f2e373
RB
8535 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8536 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7b7b1813
RG
8537 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8538 TYPE_UNSIGNED (scalar_type));
6d7971b8 8539
ccbf5bb4
RG
8540 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8541 When the component mode passes the above test simply use a type
8542 corresponding to that mode. The theory is that any use that
8543 would cause problems with this will disable vectorization anyway. */
dfc2e2ac 8544 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
e67f39f7 8545 && !INTEGRAL_TYPE_P (scalar_type))
60b95d28
RB
8546 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8547
8548 /* We can't build a vector type of elements with alignment bigger than
8549 their size. */
dfc2e2ac 8550 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
aca43c6c
JJ
8551 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8552 TYPE_UNSIGNED (scalar_type));
ccbf5bb4 8553
dfc2e2ac
RB
8554 /* If we felt back to using the mode fail if there was
8555 no scalar type for it. */
8556 if (scalar_type == NULL_TREE)
8557 return NULL_TREE;
8558
bb67d9c7
RG
8559 /* If no size was supplied use the mode the target prefers. Otherwise
8560 lookup a vector mode of the specified size. */
8561 if (size == 0)
8562 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8563 else
8564 simd_mode = mode_for_vector (inner_mode, size / nbytes);
cc4b5170
RG
8565 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8566 if (nunits <= 1)
8567 return NULL_TREE;
ebfd146a
IR
8568
8569 vectype = build_vector_type (scalar_type, nunits);
ebfd146a
IR
8570
8571 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8572 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
451dabda 8573 return NULL_TREE;
ebfd146a
IR
8574
8575 return vectype;
8576}
8577
bb67d9c7
RG
8578unsigned int current_vector_size;
8579
8580/* Function get_vectype_for_scalar_type.
8581
8582 Returns the vector type corresponding to SCALAR_TYPE as supported
8583 by the target. */
8584
8585tree
8586get_vectype_for_scalar_type (tree scalar_type)
8587{
8588 tree vectype;
8589 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8590 current_vector_size);
8591 if (vectype
8592 && current_vector_size == 0)
8593 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8594 return vectype;
8595}
8596
42fd8198
IE
8597/* Function get_mask_type_for_scalar_type.
8598
8599 Returns the mask type corresponding to a result of comparison
8600 of vectors of specified SCALAR_TYPE as supported by target. */
8601
8602tree
8603get_mask_type_for_scalar_type (tree scalar_type)
8604{
8605 tree vectype = get_vectype_for_scalar_type (scalar_type);
8606
8607 if (!vectype)
8608 return NULL;
8609
8610 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
8611 current_vector_size);
8612}
8613
b690cc0f
RG
8614/* Function get_same_sized_vectype
8615
8616 Returns a vector type corresponding to SCALAR_TYPE of size
8617 VECTOR_TYPE if supported by the target. */
8618
8619tree
bb67d9c7 8620get_same_sized_vectype (tree scalar_type, tree vector_type)
b690cc0f 8621{
9f47c7e5
IE
8622 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8623 return build_same_sized_truth_vector_type (vector_type);
8624
bb67d9c7
RG
8625 return get_vectype_for_scalar_type_and_size
8626 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
b690cc0f
RG
8627}
8628
ebfd146a
IR
8629/* Function vect_is_simple_use.
8630
8631 Input:
81c40241
RB
8632 VINFO - the vect info of the loop or basic block that is being vectorized.
8633 OPERAND - operand in the loop or bb.
8634 Output:
8635 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8636 DT - the type of definition
ebfd146a
IR
8637
8638 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f 8639 For loops, supportable operands are constants, loop invariants, and operands
ff802fa1 8640 that are defined by the current iteration of the loop. Unsupportable
b8698a0f 8641 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
8642 is the case in reduction/induction computations).
8643 For basic blocks, supportable operands are constants and bb invariants.
8644 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
8645
8646bool
81c40241
RB
8647vect_is_simple_use (tree operand, vec_info *vinfo,
8648 gimple **def_stmt, enum vect_def_type *dt)
b8698a0f 8649{
ebfd146a 8650 *def_stmt = NULL;
3fc356dc 8651 *dt = vect_unknown_def_type;
b8698a0f 8652
73fbfcad 8653 if (dump_enabled_p ())
ebfd146a 8654 {
78c60e3d
SS
8655 dump_printf_loc (MSG_NOTE, vect_location,
8656 "vect_is_simple_use: operand ");
8657 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
e645e942 8658 dump_printf (MSG_NOTE, "\n");
ebfd146a 8659 }
b8698a0f 8660
b758f602 8661 if (CONSTANT_CLASS_P (operand))
ebfd146a
IR
8662 {
8663 *dt = vect_constant_def;
8664 return true;
8665 }
b8698a0f 8666
ebfd146a
IR
8667 if (is_gimple_min_invariant (operand))
8668 {
8644a673 8669 *dt = vect_external_def;
ebfd146a
IR
8670 return true;
8671 }
8672
ebfd146a
IR
8673 if (TREE_CODE (operand) != SSA_NAME)
8674 {
73fbfcad 8675 if (dump_enabled_p ())
af29617a
AH
8676 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8677 "not ssa-name.\n");
ebfd146a
IR
8678 return false;
8679 }
b8698a0f 8680
3fc356dc 8681 if (SSA_NAME_IS_DEFAULT_DEF (operand))
ebfd146a 8682 {
3fc356dc
RB
8683 *dt = vect_external_def;
8684 return true;
ebfd146a
IR
8685 }
8686
3fc356dc 8687 *def_stmt = SSA_NAME_DEF_STMT (operand);
73fbfcad 8688 if (dump_enabled_p ())
ebfd146a 8689 {
78c60e3d
SS
8690 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8691 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
ebfd146a
IR
8692 }
8693
61d371eb 8694 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
8644a673 8695 *dt = vect_external_def;
ebfd146a
IR
8696 else
8697 {
3fc356dc 8698 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
603cca93 8699 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
ebfd146a
IR
8700 }
8701
2e8ab70c
RB
8702 if (dump_enabled_p ())
8703 {
8704 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8705 switch (*dt)
8706 {
8707 case vect_uninitialized_def:
8708 dump_printf (MSG_NOTE, "uninitialized\n");
8709 break;
8710 case vect_constant_def:
8711 dump_printf (MSG_NOTE, "constant\n");
8712 break;
8713 case vect_external_def:
8714 dump_printf (MSG_NOTE, "external\n");
8715 break;
8716 case vect_internal_def:
8717 dump_printf (MSG_NOTE, "internal\n");
8718 break;
8719 case vect_induction_def:
8720 dump_printf (MSG_NOTE, "induction\n");
8721 break;
8722 case vect_reduction_def:
8723 dump_printf (MSG_NOTE, "reduction\n");
8724 break;
8725 case vect_double_reduction_def:
8726 dump_printf (MSG_NOTE, "double reduction\n");
8727 break;
8728 case vect_nested_cycle:
8729 dump_printf (MSG_NOTE, "nested cycle\n");
8730 break;
8731 case vect_unknown_def_type:
8732 dump_printf (MSG_NOTE, "unknown\n");
8733 break;
8734 }
8735 }
8736
81c40241 8737 if (*dt == vect_unknown_def_type)
ebfd146a 8738 {
73fbfcad 8739 if (dump_enabled_p ())
78c60e3d 8740 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 8741 "Unsupported pattern.\n");
ebfd146a
IR
8742 return false;
8743 }
8744
ebfd146a
IR
8745 switch (gimple_code (*def_stmt))
8746 {
8747 case GIMPLE_PHI:
ebfd146a 8748 case GIMPLE_ASSIGN:
ebfd146a 8749 case GIMPLE_CALL:
81c40241 8750 break;
ebfd146a 8751 default:
73fbfcad 8752 if (dump_enabled_p ())
78c60e3d 8753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 8754 "unsupported defining stmt:\n");
ebfd146a
IR
8755 return false;
8756 }
8757
8758 return true;
8759}
8760
81c40241 8761/* Function vect_is_simple_use.
b690cc0f 8762
81c40241 8763 Same as vect_is_simple_use but also determines the vector operand
b690cc0f
RG
8764 type of OPERAND and stores it to *VECTYPE. If the definition of
8765 OPERAND is vect_uninitialized_def, vect_constant_def or
8766 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8767 is responsible to compute the best suited vector type for the
8768 scalar operand. */
8769
8770bool
81c40241
RB
8771vect_is_simple_use (tree operand, vec_info *vinfo,
8772 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
b690cc0f 8773{
81c40241 8774 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
b690cc0f
RG
8775 return false;
8776
8777 /* Now get a vector type if the def is internal, otherwise supply
8778 NULL_TREE and leave it up to the caller to figure out a proper
8779 type for the use stmt. */
8780 if (*dt == vect_internal_def
8781 || *dt == vect_induction_def
8782 || *dt == vect_reduction_def
8783 || *dt == vect_double_reduction_def
8784 || *dt == vect_nested_cycle)
8785 {
8786 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
83197f37
IR
8787
8788 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8789 && !STMT_VINFO_RELEVANT (stmt_info)
8790 && !STMT_VINFO_LIVE_P (stmt_info))
b690cc0f 8791 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
83197f37 8792
b690cc0f
RG
8793 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8794 gcc_assert (*vectype != NULL_TREE);
8795 }
8796 else if (*dt == vect_uninitialized_def
8797 || *dt == vect_constant_def
8798 || *dt == vect_external_def)
8799 *vectype = NULL_TREE;
8800 else
8801 gcc_unreachable ();
8802
8803 return true;
8804}
8805
ebfd146a
IR
8806
8807/* Function supportable_widening_operation
8808
b8698a0f
L
8809 Check whether an operation represented by the code CODE is a
8810 widening operation that is supported by the target platform in
b690cc0f
RG
8811 vector form (i.e., when operating on arguments of type VECTYPE_IN
8812 producing a result of type VECTYPE_OUT).
b8698a0f 8813
ebfd146a
IR
8814 Widening operations we currently support are NOP (CONVERT), FLOAT
8815 and WIDEN_MULT. This function checks if these operations are supported
8816 by the target platform either directly (via vector tree-codes), or via
8817 target builtins.
8818
8819 Output:
b8698a0f
L
8820 - CODE1 and CODE2 are codes of vector operations to be used when
8821 vectorizing the operation, if available.
ebfd146a
IR
8822 - MULTI_STEP_CVT determines the number of required intermediate steps in
8823 case of multi-step conversion (like char->short->int - in that case
8824 MULTI_STEP_CVT will be 1).
b8698a0f
L
8825 - INTERM_TYPES contains the intermediate type required to perform the
8826 widening operation (short in the above example). */
ebfd146a
IR
8827
8828bool
355fe088 8829supportable_widening_operation (enum tree_code code, gimple *stmt,
b690cc0f 8830 tree vectype_out, tree vectype_in,
ebfd146a
IR
8831 enum tree_code *code1, enum tree_code *code2,
8832 int *multi_step_cvt,
9771b263 8833 vec<tree> *interm_types)
ebfd146a
IR
8834{
8835 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8836 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4ef69dfc 8837 struct loop *vect_loop = NULL;
ef4bddc2 8838 machine_mode vec_mode;
81f40b79 8839 enum insn_code icode1, icode2;
ebfd146a 8840 optab optab1, optab2;
b690cc0f
RG
8841 tree vectype = vectype_in;
8842 tree wide_vectype = vectype_out;
ebfd146a 8843 enum tree_code c1, c2;
4a00c761
JJ
8844 int i;
8845 tree prev_type, intermediate_type;
ef4bddc2 8846 machine_mode intermediate_mode, prev_mode;
4a00c761 8847 optab optab3, optab4;
ebfd146a 8848
4a00c761 8849 *multi_step_cvt = 0;
4ef69dfc
IR
8850 if (loop_info)
8851 vect_loop = LOOP_VINFO_LOOP (loop_info);
8852
ebfd146a
IR
8853 switch (code)
8854 {
8855 case WIDEN_MULT_EXPR:
6ae6116f
RH
8856 /* The result of a vectorized widening operation usually requires
8857 two vectors (because the widened results do not fit into one vector).
8858 The generated vector results would normally be expected to be
8859 generated in the same order as in the original scalar computation,
8860 i.e. if 8 results are generated in each vector iteration, they are
8861 to be organized as follows:
8862 vect1: [res1,res2,res3,res4],
8863 vect2: [res5,res6,res7,res8].
8864
8865 However, in the special case that the result of the widening
8866 operation is used in a reduction computation only, the order doesn't
8867 matter (because when vectorizing a reduction we change the order of
8868 the computation). Some targets can take advantage of this and
8869 generate more efficient code. For example, targets like Altivec,
8870 that support widen_mult using a sequence of {mult_even,mult_odd}
8871 generate the following vectors:
8872 vect1: [res1,res3,res5,res7],
8873 vect2: [res2,res4,res6,res8].
8874
8875 When vectorizing outer-loops, we execute the inner-loop sequentially
8876 (each vectorized inner-loop iteration contributes to VF outer-loop
8877 iterations in parallel). We therefore don't allow to change the
8878 order of the computation in the inner-loop during outer-loop
8879 vectorization. */
8880 /* TODO: Another case in which order doesn't *really* matter is when we
8881 widen and then contract again, e.g. (short)((int)x * y >> 8).
8882 Normally, pack_trunc performs an even/odd permute, whereas the
8883 repack from an even/odd expansion would be an interleave, which
8884 would be significantly simpler for e.g. AVX2. */
8885 /* In any case, in order to avoid duplicating the code below, recurse
8886 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8887 are properly set up for the caller. If we fail, we'll continue with
8888 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8889 if (vect_loop
8890 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8891 && !nested_in_vect_loop_p (vect_loop, stmt)
8892 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8893 stmt, vectype_out, vectype_in,
a86ec597
RH
8894 code1, code2, multi_step_cvt,
8895 interm_types))
ebc047a2
CH
8896 {
8897 /* Elements in a vector with vect_used_by_reduction property cannot
8898 be reordered if the use chain with this property does not have the
8899 same operation. One such an example is s += a * b, where elements
8900 in a and b cannot be reordered. Here we check if the vector defined
8901 by STMT is only directly used in the reduction statement. */
8902 tree lhs = gimple_assign_lhs (stmt);
8903 use_operand_p dummy;
355fe088 8904 gimple *use_stmt;
ebc047a2
CH
8905 stmt_vec_info use_stmt_info = NULL;
8906 if (single_imm_use (lhs, &dummy, &use_stmt)
8907 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8908 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8909 return true;
8910 }
4a00c761
JJ
8911 c1 = VEC_WIDEN_MULT_LO_EXPR;
8912 c2 = VEC_WIDEN_MULT_HI_EXPR;
ebfd146a
IR
8913 break;
8914
81c40241
RB
8915 case DOT_PROD_EXPR:
8916 c1 = DOT_PROD_EXPR;
8917 c2 = DOT_PROD_EXPR;
8918 break;
8919
8920 case SAD_EXPR:
8921 c1 = SAD_EXPR;
8922 c2 = SAD_EXPR;
8923 break;
8924
6ae6116f
RH
8925 case VEC_WIDEN_MULT_EVEN_EXPR:
8926 /* Support the recursion induced just above. */
8927 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8928 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8929 break;
8930
36ba4aae 8931 case WIDEN_LSHIFT_EXPR:
4a00c761
JJ
8932 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8933 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
36ba4aae
IR
8934 break;
8935
ebfd146a 8936 CASE_CONVERT:
4a00c761
JJ
8937 c1 = VEC_UNPACK_LO_EXPR;
8938 c2 = VEC_UNPACK_HI_EXPR;
ebfd146a
IR
8939 break;
8940
8941 case FLOAT_EXPR:
4a00c761
JJ
8942 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8943 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
ebfd146a
IR
8944 break;
8945
8946 case FIX_TRUNC_EXPR:
8947 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8948 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8949 computing the operation. */
8950 return false;
8951
8952 default:
8953 gcc_unreachable ();
8954 }
8955
6ae6116f 8956 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
6b4db501 8957 std::swap (c1, c2);
4a00c761 8958
ebfd146a
IR
8959 if (code == FIX_TRUNC_EXPR)
8960 {
8961 /* The signedness is determined from output operand. */
b690cc0f
RG
8962 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8963 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
8964 }
8965 else
8966 {
8967 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8968 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8969 }
8970
8971 if (!optab1 || !optab2)
8972 return false;
8973
8974 vec_mode = TYPE_MODE (vectype);
947131ba
RS
8975 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8976 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
8977 return false;
8978
4a00c761
JJ
8979 *code1 = c1;
8980 *code2 = c2;
8981
8982 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8983 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8984 return true;
8985
b8698a0f 8986 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a 8987 types. */
ebfd146a 8988
4a00c761
JJ
8989 prev_type = vectype;
8990 prev_mode = vec_mode;
b8698a0f 8991
4a00c761
JJ
8992 if (!CONVERT_EXPR_CODE_P (code))
8993 return false;
b8698a0f 8994
4a00c761
JJ
8995 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8996 intermediate steps in promotion sequence. We try
8997 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8998 not. */
9771b263 8999 interm_types->create (MAX_INTERM_CVT_STEPS);
4a00c761
JJ
9000 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9001 {
9002 intermediate_mode = insn_data[icode1].operand[0].mode;
9003 intermediate_type
9004 = lang_hooks.types.type_for_mode (intermediate_mode,
9005 TYPE_UNSIGNED (prev_type));
9006 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9007 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9008
9009 if (!optab3 || !optab4
9010 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9011 || insn_data[icode1].operand[0].mode != intermediate_mode
9012 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9013 || insn_data[icode2].operand[0].mode != intermediate_mode
9014 || ((icode1 = optab_handler (optab3, intermediate_mode))
9015 == CODE_FOR_nothing)
9016 || ((icode2 = optab_handler (optab4, intermediate_mode))
9017 == CODE_FOR_nothing))
9018 break;
ebfd146a 9019
9771b263 9020 interm_types->quick_push (intermediate_type);
4a00c761
JJ
9021 (*multi_step_cvt)++;
9022
9023 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9024 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9025 return true;
9026
9027 prev_type = intermediate_type;
9028 prev_mode = intermediate_mode;
ebfd146a
IR
9029 }
9030
9771b263 9031 interm_types->release ();
4a00c761 9032 return false;
ebfd146a
IR
9033}
9034
9035
9036/* Function supportable_narrowing_operation
9037
b8698a0f
L
9038 Check whether an operation represented by the code CODE is a
9039 narrowing operation that is supported by the target platform in
b690cc0f
RG
9040 vector form (i.e., when operating on arguments of type VECTYPE_IN
9041 and producing a result of type VECTYPE_OUT).
b8698a0f 9042
ebfd146a 9043 Narrowing operations we currently support are NOP (CONVERT) and
ff802fa1 9044 FIX_TRUNC. This function checks if these operations are supported by
ebfd146a
IR
9045 the target platform directly via vector tree-codes.
9046
9047 Output:
b8698a0f
L
9048 - CODE1 is the code of a vector operation to be used when
9049 vectorizing the operation, if available.
ebfd146a
IR
9050 - MULTI_STEP_CVT determines the number of required intermediate steps in
9051 case of multi-step conversion (like int->short->char - in that case
9052 MULTI_STEP_CVT will be 1).
9053 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 9054 narrowing operation (short in the above example). */
ebfd146a
IR
9055
9056bool
9057supportable_narrowing_operation (enum tree_code code,
b690cc0f 9058 tree vectype_out, tree vectype_in,
ebfd146a 9059 enum tree_code *code1, int *multi_step_cvt,
9771b263 9060 vec<tree> *interm_types)
ebfd146a 9061{
ef4bddc2 9062 machine_mode vec_mode;
ebfd146a
IR
9063 enum insn_code icode1;
9064 optab optab1, interm_optab;
b690cc0f
RG
9065 tree vectype = vectype_in;
9066 tree narrow_vectype = vectype_out;
ebfd146a 9067 enum tree_code c1;
4a00c761 9068 tree intermediate_type;
ef4bddc2 9069 machine_mode intermediate_mode, prev_mode;
ebfd146a 9070 int i;
4a00c761 9071 bool uns;
ebfd146a 9072
4a00c761 9073 *multi_step_cvt = 0;
ebfd146a
IR
9074 switch (code)
9075 {
9076 CASE_CONVERT:
9077 c1 = VEC_PACK_TRUNC_EXPR;
9078 break;
9079
9080 case FIX_TRUNC_EXPR:
9081 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9082 break;
9083
9084 case FLOAT_EXPR:
9085 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9086 tree code and optabs used for computing the operation. */
9087 return false;
9088
9089 default:
9090 gcc_unreachable ();
9091 }
9092
9093 if (code == FIX_TRUNC_EXPR)
9094 /* The signedness is determined from output operand. */
b690cc0f 9095 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
9096 else
9097 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9098
9099 if (!optab1)
9100 return false;
9101
9102 vec_mode = TYPE_MODE (vectype);
947131ba 9103 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
9104 return false;
9105
4a00c761
JJ
9106 *code1 = c1;
9107
9108 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9109 return true;
9110
ebfd146a
IR
9111 /* Check if it's a multi-step conversion that can be done using intermediate
9112 types. */
4a00c761
JJ
9113 prev_mode = vec_mode;
9114 if (code == FIX_TRUNC_EXPR)
9115 uns = TYPE_UNSIGNED (vectype_out);
9116 else
9117 uns = TYPE_UNSIGNED (vectype);
9118
9119 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9120 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9121 costly than signed. */
9122 if (code == FIX_TRUNC_EXPR && uns)
9123 {
9124 enum insn_code icode2;
9125
9126 intermediate_type
9127 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9128 interm_optab
9129 = optab_for_tree_code (c1, intermediate_type, optab_default);
2225b9f2 9130 if (interm_optab != unknown_optab
4a00c761
JJ
9131 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9132 && insn_data[icode1].operand[0].mode
9133 == insn_data[icode2].operand[0].mode)
9134 {
9135 uns = false;
9136 optab1 = interm_optab;
9137 icode1 = icode2;
9138 }
9139 }
ebfd146a 9140
4a00c761
JJ
9141 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9142 intermediate steps in promotion sequence. We try
9143 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9771b263 9144 interm_types->create (MAX_INTERM_CVT_STEPS);
4a00c761
JJ
9145 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9146 {
9147 intermediate_mode = insn_data[icode1].operand[0].mode;
9148 intermediate_type
9149 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9150 interm_optab
9151 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9152 optab_default);
9153 if (!interm_optab
9154 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9155 || insn_data[icode1].operand[0].mode != intermediate_mode
9156 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9157 == CODE_FOR_nothing))
9158 break;
9159
9771b263 9160 interm_types->quick_push (intermediate_type);
4a00c761
JJ
9161 (*multi_step_cvt)++;
9162
9163 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9164 return true;
9165
9166 prev_mode = intermediate_mode;
9167 optab1 = interm_optab;
ebfd146a
IR
9168 }
9169
9771b263 9170 interm_types->release ();
4a00c761 9171 return false;
ebfd146a 9172}