]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
Daily bump.
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
62f7fd21
MM
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "target.h"
30#include "basic-block.h"
cf835838
JM
31#include "tree-pretty-print.h"
32#include "gimple-pretty-print.h"
ebfd146a
IR
33#include "tree-flow.h"
34#include "tree-dump.h"
35#include "cfgloop.h"
36#include "cfglayout.h"
37#include "expr.h"
38#include "recog.h"
39#include "optabs.h"
718f9c0f 40#include "diagnostic-core.h"
ebfd146a
IR
41#include "tree-vectorizer.h"
42#include "langhooks.h"
43
44
272c6793
RS
45/* Return a variable of type ELEM_TYPE[NELEMS]. */
46
47static tree
48create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
49{
50 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
51 "vect_array");
52}
53
54/* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
58
59static tree
60read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
61 tree array, unsigned HOST_WIDE_INT n)
62{
63 tree vect_type, vect, vect_name, array_ref;
64 gimple new_stmt;
65
66 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
67 vect_type = TREE_TYPE (TREE_TYPE (array));
68 vect = vect_create_destination_var (scalar_dest, vect_type);
69 array_ref = build4 (ARRAY_REF, vect_type, array,
70 build_int_cst (size_type_node, n),
71 NULL_TREE, NULL_TREE);
72
73 new_stmt = gimple_build_assign (vect, array_ref);
74 vect_name = make_ssa_name (vect, new_stmt);
75 gimple_assign_set_lhs (new_stmt, vect_name);
76 vect_finish_stmt_generation (stmt, new_stmt, gsi);
77 mark_symbols_for_renaming (new_stmt);
78
79 return vect_name;
80}
81
82/* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
85
86static void
87write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
88 tree array, unsigned HOST_WIDE_INT n)
89{
90 tree array_ref;
91 gimple new_stmt;
92
93 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
94 build_int_cst (size_type_node, n),
95 NULL_TREE, NULL_TREE);
96
97 new_stmt = gimple_build_assign (array_ref, vect);
98 vect_finish_stmt_generation (stmt, new_stmt, gsi);
99 mark_symbols_for_renaming (new_stmt);
100}
101
102/* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
104 (and its group). */
105
106static tree
107create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
108{
109 struct ptr_info_def *pi;
110 tree mem_ref, alias_ptr_type;
111
112 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
113 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
114 /* Arrays have the same alignment as their type. */
115 pi = get_ptr_info (ptr);
116 pi->align = TYPE_ALIGN_UNIT (type);
117 pi->misalign = 0;
118 return mem_ref;
119}
120
ebfd146a
IR
121/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
122
123/* Function vect_mark_relevant.
124
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
126
127static void
128vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
129 enum vect_relevant relevant, bool live_p)
130{
131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
132 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
133 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
134
135 if (vect_print_dump_info (REPORT_DETAILS))
136 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
137
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
139 {
140 gimple pattern_stmt;
141
b8698a0f 142 /* This is the last stmt in a sequence that was detected as a
ebfd146a
IR
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
146
147 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
148
149 if (vect_print_dump_info (REPORT_DETAILS))
150 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info = vinfo_for_stmt (pattern_stmt);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
153 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
154 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
155 stmt = pattern_stmt;
156 }
157
158 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
159 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
160 STMT_VINFO_RELEVANT (stmt_info) = relevant;
161
162 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
164 {
165 if (vect_print_dump_info (REPORT_DETAILS))
166 fprintf (vect_dump, "already marked relevant/live.");
167 return;
168 }
169
170 VEC_safe_push (gimple, heap, *worklist, stmt);
171}
172
173
174/* Function vect_stmt_relevant_p.
175
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
178
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
183
184 CHECKME: what other side effects would the vectorizer allow? */
185
186static bool
187vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
188 enum vect_relevant *relevant, bool *live_p)
189{
190 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
191 ssa_op_iter op_iter;
192 imm_use_iterator imm_iter;
193 use_operand_p use_p;
194 def_operand_p def_p;
195
8644a673 196 *relevant = vect_unused_in_scope;
ebfd146a
IR
197 *live_p = false;
198
199 /* cond stmt other than loop exit cond. */
b8698a0f
L
200 if (is_ctrl_stmt (stmt)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
202 != loop_exit_ctrl_vec_info_type)
8644a673 203 *relevant = vect_used_in_scope;
ebfd146a
IR
204
205 /* changing memory. */
206 if (gimple_code (stmt) != GIMPLE_PHI)
5006671f 207 if (gimple_vdef (stmt))
ebfd146a
IR
208 {
209 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
8644a673 211 *relevant = vect_used_in_scope;
ebfd146a
IR
212 }
213
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
216 {
217 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
218 {
219 basic_block bb = gimple_bb (USE_STMT (use_p));
220 if (!flow_bb_inside_loop_p (loop, bb))
221 {
222 if (vect_print_dump_info (REPORT_DETAILS))
223 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
224
3157b0c2
AO
225 if (is_gimple_debug (USE_STMT (use_p)))
226 continue;
227
ebfd146a
IR
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
231 gcc_assert (bb == single_exit (loop)->dest);
232
233 *live_p = true;
234 }
235 }
236 }
237
238 return (*live_p || *relevant);
239}
240
241
b8698a0f 242/* Function exist_non_indexing_operands_for_use_p
ebfd146a 243
ff802fa1 244 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
245 used in STMT for anything other than indexing an array. */
246
247static bool
248exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
249{
250 tree operand;
251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 252
ff802fa1 253 /* USE corresponds to some operand in STMT. If there is no data
ebfd146a
IR
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info))
257 return true;
59a05b0c 258
ebfd146a
IR
259 /* STMT has a data_ref. FORNOW this means that its of one of
260 the following forms:
261 -1- ARRAY_REF = var
262 -2- var = ARRAY_REF
263 (This should have been verified in analyze_data_refs).
264
265 'var' in the second case corresponds to a def, not a use,
b8698a0f 266 so USE cannot correspond to any operands that are not used
ebfd146a
IR
267 for array indexing.
268
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
ebfd146a
IR
271
272 if (!gimple_assign_copy_p (stmt))
273 return false;
59a05b0c
EB
274 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
275 return false;
ebfd146a 276 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
277 if (TREE_CODE (operand) != SSA_NAME)
278 return false;
279
280 if (operand == use)
281 return true;
282
283 return false;
284}
285
286
b8698a0f 287/*
ebfd146a
IR
288 Function process_use.
289
290 Inputs:
291 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ff802fa1 293 that defined USE. This is done by calling mark_relevant and passing it
ebfd146a
IR
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
295
296 Outputs:
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
301 Exceptions:
302 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 303 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 304 of the respective DEF_STMT is left unchanged.
b8698a0f
L
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
309
310 Return true if everything is as expected. Return false otherwise. */
311
312static bool
b8698a0f 313process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
ebfd146a
IR
314 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
315{
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
318 stmt_vec_info dstmt_vinfo;
319 basic_block bb, def_bb;
320 tree def;
321 gimple def_stmt;
322 enum vect_def_type dt;
323
b8698a0f 324 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a
IR
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use, stmt))
327 return true;
328
a70d6342 329 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
b8698a0f 330 {
8644a673 331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
332 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
333 return false;
334 }
335
336 if (!def_stmt || gimple_nop_p (def_stmt))
337 return true;
338
339 def_bb = gimple_bb (def_stmt);
340 if (!flow_bb_inside_loop_p (loop, def_bb))
341 {
342 if (vect_print_dump_info (REPORT_DETAILS))
343 fprintf (vect_dump, "def_stmt is out of loop.");
344 return true;
345 }
346
b8698a0f
L
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo = vinfo_for_stmt (def_stmt);
353 bb = gimple_bb (stmt);
354 if (gimple_code (stmt) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
356 && gimple_code (def_stmt) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
358 && bb->loop_father == def_bb->loop_father)
359 {
360 if (vect_print_dump_info (REPORT_DETAILS))
361 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
363 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 366 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
367 return true;
368 }
369
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
372 d = def_stmt
373 inner-loop:
374 stmt # use (d)
375 outer-loop-tail-bb:
376 ... */
377 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
378 {
379 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
7c5222ff 381
ebfd146a
IR
382 switch (relevant)
383 {
8644a673 384 case vect_unused_in_scope:
7c5222ff
IR
385 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
386 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 387 break;
7c5222ff 388
ebfd146a 389 case vect_used_in_outer_by_reduction:
7c5222ff 390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
391 relevant = vect_used_by_reduction;
392 break;
7c5222ff 393
ebfd146a 394 case vect_used_in_outer:
7c5222ff 395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 396 relevant = vect_used_in_scope;
ebfd146a 397 break;
7c5222ff 398
8644a673 399 case vect_used_in_scope:
ebfd146a
IR
400 break;
401
402 default:
403 gcc_unreachable ();
b8698a0f 404 }
ebfd146a
IR
405 }
406
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
409 ...
410 inner-loop:
411 d = def_stmt
06066f92 412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
413 stmt # use (d) */
414 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
415 {
416 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
7c5222ff 418
ebfd146a
IR
419 switch (relevant)
420 {
8644a673 421 case vect_unused_in_scope:
b8698a0f 422 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 423 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 424 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
425 break;
426
ebfd146a
IR
427 case vect_used_by_reduction:
428 relevant = vect_used_in_outer_by_reduction;
429 break;
430
8644a673 431 case vect_used_in_scope:
ebfd146a
IR
432 relevant = vect_used_in_outer;
433 break;
434
435 default:
436 gcc_unreachable ();
437 }
438 }
439
440 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
441 return true;
442}
443
444
445/* Function vect_mark_stmts_to_be_vectorized.
446
447 Not all stmts in the loop need to be vectorized. For example:
448
449 for i...
450 for j...
451 1. T0 = i + j
452 2. T1 = a[T0]
453
454 3. j = j + 1
455
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
458
459 This pass detects such stmts. */
460
461bool
462vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
463{
464 VEC(gimple,heap) *worklist;
465 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
466 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
467 unsigned int nbbs = loop->num_nodes;
468 gimple_stmt_iterator si;
469 gimple stmt;
470 unsigned int i;
471 stmt_vec_info stmt_vinfo;
472 basic_block bb;
473 gimple phi;
474 bool live_p;
06066f92
IR
475 enum vect_relevant relevant, tmp_relevant;
476 enum vect_def_type def_type;
ebfd146a
IR
477
478 if (vect_print_dump_info (REPORT_DETAILS))
479 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
480
481 worklist = VEC_alloc (gimple, heap, 64);
482
483 /* 1. Init worklist. */
484 for (i = 0; i < nbbs; i++)
485 {
486 bb = bbs[i];
487 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 488 {
ebfd146a
IR
489 phi = gsi_stmt (si);
490 if (vect_print_dump_info (REPORT_DETAILS))
491 {
492 fprintf (vect_dump, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
494 }
495
496 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
497 vect_mark_relevant (&worklist, phi, relevant, live_p);
498 }
499 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
500 {
501 stmt = gsi_stmt (si);
502 if (vect_print_dump_info (REPORT_DETAILS))
503 {
504 fprintf (vect_dump, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
b8698a0f 506 }
ebfd146a
IR
507
508 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
509 vect_mark_relevant (&worklist, stmt, relevant, live_p);
510 }
511 }
512
513 /* 2. Process_worklist */
514 while (VEC_length (gimple, worklist) > 0)
515 {
516 use_operand_p use_p;
517 ssa_op_iter iter;
518
519 stmt = VEC_pop (gimple, worklist);
520 if (vect_print_dump_info (REPORT_DETAILS))
521 {
522 fprintf (vect_dump, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
524 }
525
b8698a0f
L
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
528 liveness and relevance properties of STMT. */
529 stmt_vinfo = vinfo_for_stmt (stmt);
530 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
531 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
532
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
537
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
540 live_p = false
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
b8698a0f 543 those that are used by a reduction computation, and those that are
ff802fa1 544 (also) used by a regular computation. This allows us later on to
b8698a0f 545 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 546 order of the results that they produce does not have to be kept. */
ebfd146a 547
06066f92
IR
548 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
549 tmp_relevant = relevant;
550 switch (def_type)
ebfd146a 551 {
06066f92
IR
552 case vect_reduction_def:
553 switch (tmp_relevant)
554 {
555 case vect_unused_in_scope:
556 relevant = vect_used_by_reduction;
557 break;
558
559 case vect_used_by_reduction:
560 if (gimple_code (stmt) == GIMPLE_PHI)
561 break;
562 /* fall through */
563
564 default:
565 if (vect_print_dump_info (REPORT_DETAILS))
566 fprintf (vect_dump, "unsupported use of reduction.");
567
568 VEC_free (gimple, heap, worklist);
569 return false;
570 }
571
b8698a0f 572 live_p = false;
06066f92 573 break;
b8698a0f 574
06066f92
IR
575 case vect_nested_cycle:
576 if (tmp_relevant != vect_unused_in_scope
577 && tmp_relevant != vect_used_in_outer_by_reduction
578 && tmp_relevant != vect_used_in_outer)
579 {
580 if (vect_print_dump_info (REPORT_DETAILS))
581 fprintf (vect_dump, "unsupported use of nested cycle.");
7c5222ff 582
06066f92
IR
583 VEC_free (gimple, heap, worklist);
584 return false;
585 }
7c5222ff 586
b8698a0f
L
587 live_p = false;
588 break;
589
06066f92
IR
590 case vect_double_reduction_def:
591 if (tmp_relevant != vect_unused_in_scope
592 && tmp_relevant != vect_used_by_reduction)
593 {
7c5222ff 594 if (vect_print_dump_info (REPORT_DETAILS))
06066f92 595 fprintf (vect_dump, "unsupported use of double reduction.");
7c5222ff
IR
596
597 VEC_free (gimple, heap, worklist);
598 return false;
06066f92
IR
599 }
600
601 live_p = false;
b8698a0f 602 break;
7c5222ff 603
06066f92
IR
604 default:
605 break;
7c5222ff 606 }
b8698a0f 607
ebfd146a
IR
608 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
609 {
610 tree op = USE_FROM_PTR (use_p);
611 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
612 {
613 VEC_free (gimple, heap, worklist);
614 return false;
615 }
616 }
617 } /* while worklist */
618
619 VEC_free (gimple, heap, worklist);
620 return true;
621}
622
623
720f5239
IR
624/* Get cost by calling cost target builtin. */
625
626static inline
627int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
628{
629 tree dummy_type = NULL;
630 int dummy = 0;
631
632 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
633 dummy_type, dummy);
634}
635
ff802fa1
IR
636
637/* Get cost for STMT. */
638
ebfd146a
IR
639int
640cost_for_stmt (gimple stmt)
641{
642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
643
644 switch (STMT_VINFO_TYPE (stmt_info))
645 {
646 case load_vec_info_type:
720f5239 647 return vect_get_stmt_cost (scalar_load);
ebfd146a 648 case store_vec_info_type:
720f5239 649 return vect_get_stmt_cost (scalar_store);
ebfd146a
IR
650 case op_vec_info_type:
651 case condition_vec_info_type:
652 case assignment_vec_info_type:
653 case reduc_vec_info_type:
654 case induc_vec_info_type:
655 case type_promotion_vec_info_type:
656 case type_demotion_vec_info_type:
657 case type_conversion_vec_info_type:
658 case call_vec_info_type:
720f5239 659 return vect_get_stmt_cost (scalar_stmt);
ebfd146a
IR
660 case undef_vec_info_type:
661 default:
662 gcc_unreachable ();
663 }
664}
665
b8698a0f 666/* Function vect_model_simple_cost.
ebfd146a 667
b8698a0f 668 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
669 single op. Right now, this does not account for multiple insns that could
670 be generated for the single vector op. We will handle that shortly. */
671
672void
b8698a0f 673vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
674 enum vect_def_type *dt, slp_tree slp_node)
675{
676 int i;
677 int inside_cost = 0, outside_cost = 0;
678
679 /* The SLP costs were already calculated during SLP tree build. */
680 if (PURE_SLP_STMT (stmt_info))
681 return;
682
720f5239 683 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
684
685 /* FORNOW: Assuming maximum 2 args per stmts. */
686 for (i = 0; i < 2; i++)
687 {
8644a673 688 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
720f5239 689 outside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a 690 }
b8698a0f 691
ebfd146a
IR
692 if (vect_print_dump_info (REPORT_COST))
693 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
694 "outside_cost = %d .", inside_cost, outside_cost);
695
696 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
697 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
698 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
699}
700
701
b8698a0f
L
702/* Function vect_cost_strided_group_size
703
ebfd146a
IR
704 For strided load or store, return the group_size only if it is the first
705 load or store of a group, else return 1. This ensures that group size is
706 only returned once per group. */
707
708static int
709vect_cost_strided_group_size (stmt_vec_info stmt_info)
710{
e14c1050 711 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
ebfd146a
IR
712
713 if (first_stmt == STMT_VINFO_STMT (stmt_info))
e14c1050 714 return GROUP_SIZE (stmt_info);
ebfd146a
IR
715
716 return 1;
717}
718
719
720/* Function vect_model_store_cost
721
722 Models cost for stores. In the case of strided accesses, one access
723 has the overhead of the strided access attributed to it. */
724
725void
b8698a0f 726vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
272c6793
RS
727 bool store_lanes_p, enum vect_def_type dt,
728 slp_tree slp_node)
ebfd146a
IR
729{
730 int group_size;
720f5239
IR
731 unsigned int inside_cost = 0, outside_cost = 0;
732 struct data_reference *first_dr;
733 gimple first_stmt;
ebfd146a
IR
734
735 /* The SLP costs were already calculated during SLP tree build. */
736 if (PURE_SLP_STMT (stmt_info))
737 return;
738
8644a673 739 if (dt == vect_constant_def || dt == vect_external_def)
720f5239 740 outside_cost = vect_get_stmt_cost (scalar_to_vec);
ebfd146a
IR
741
742 /* Strided access? */
e14c1050 743 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
720f5239
IR
744 {
745 if (slp_node)
746 {
747 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
748 group_size = 1;
749 }
750 else
751 {
e14c1050 752 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
720f5239
IR
753 group_size = vect_cost_strided_group_size (stmt_info);
754 }
755
756 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
757 }
ebfd146a
IR
758 /* Not a strided access. */
759 else
720f5239
IR
760 {
761 group_size = 1;
762 first_dr = STMT_VINFO_DATA_REF (stmt_info);
763 }
ebfd146a 764
272c6793
RS
765 /* We assume that the cost of a single store-lanes instruction is
766 equivalent to the cost of GROUP_SIZE separate stores. If a strided
767 access is instead being provided by a permute-and-store operation,
768 include the cost of the permutes. */
769 if (!store_lanes_p && group_size > 1)
ebfd146a
IR
770 {
771 /* Uses a high and low interleave operation for each needed permute. */
b8698a0f 772 inside_cost = ncopies * exact_log2(group_size) * group_size
720f5239 773 * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
774
775 if (vect_print_dump_info (REPORT_COST))
776 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
777 group_size);
778
779 }
780
781 /* Costs of the stores. */
720f5239 782 vect_get_store_cost (first_dr, ncopies, &inside_cost);
ebfd146a
IR
783
784 if (vect_print_dump_info (REPORT_COST))
785 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
786 "outside_cost = %d .", inside_cost, outside_cost);
787
788 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
789 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
790 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
791}
792
793
720f5239
IR
794/* Calculate cost of DR's memory access. */
795void
796vect_get_store_cost (struct data_reference *dr, int ncopies,
797 unsigned int *inside_cost)
798{
799 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
800
801 switch (alignment_support_scheme)
802 {
803 case dr_aligned:
804 {
805 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
806
807 if (vect_print_dump_info (REPORT_COST))
808 fprintf (vect_dump, "vect_model_store_cost: aligned.");
809
810 break;
811 }
812
813 case dr_unaligned_supported:
814 {
815 gimple stmt = DR_STMT (dr);
816 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
817 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
818
819 /* Here, we assign an additional cost for the unaligned store. */
820 *inside_cost += ncopies
821 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
822 vectype, DR_MISALIGNMENT (dr));
823
824 if (vect_print_dump_info (REPORT_COST))
825 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
826 "hardware.");
827
828 break;
829 }
830
831 default:
832 gcc_unreachable ();
833 }
834}
835
836
ebfd146a
IR
837/* Function vect_model_load_cost
838
839 Models cost for loads. In the case of strided accesses, the last access
840 has the overhead of the strided access attributed to it. Since unaligned
b8698a0f 841 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
842 access scheme chosen. */
843
844void
272c6793
RS
845vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
846 slp_tree slp_node)
ebfd146a
IR
847{
848 int group_size;
ebfd146a
IR
849 gimple first_stmt;
850 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
720f5239 851 unsigned int inside_cost = 0, outside_cost = 0;
ebfd146a
IR
852
853 /* The SLP costs were already calculated during SLP tree build. */
854 if (PURE_SLP_STMT (stmt_info))
855 return;
856
857 /* Strided accesses? */
e14c1050
IR
858 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
859 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
ebfd146a
IR
860 {
861 group_size = vect_cost_strided_group_size (stmt_info);
862 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
863 }
864 /* Not a strided access. */
865 else
866 {
867 group_size = 1;
868 first_dr = dr;
869 }
870
272c6793
RS
871 /* We assume that the cost of a single load-lanes instruction is
872 equivalent to the cost of GROUP_SIZE separate loads. If a strided
873 access is instead being provided by a load-and-permute operation,
874 include the cost of the permutes. */
875 if (!load_lanes_p && group_size > 1)
ebfd146a
IR
876 {
877 /* Uses an even and odd extract operations for each needed permute. */
878 inside_cost = ncopies * exact_log2(group_size) * group_size
720f5239 879 * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
880
881 if (vect_print_dump_info (REPORT_COST))
882 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
883 group_size);
ebfd146a
IR
884 }
885
886 /* The loads themselves. */
720f5239 887 vect_get_load_cost (first_dr, ncopies,
e14c1050
IR
888 ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
889 || slp_node),
720f5239
IR
890 &inside_cost, &outside_cost);
891
892 if (vect_print_dump_info (REPORT_COST))
893 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
894 "outside_cost = %d .", inside_cost, outside_cost);
895
896 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
897 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
898 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
899}
900
901
902/* Calculate cost of DR's memory access. */
903void
904vect_get_load_cost (struct data_reference *dr, int ncopies,
905 bool add_realign_cost, unsigned int *inside_cost,
906 unsigned int *outside_cost)
907{
908 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
909
910 switch (alignment_support_scheme)
ebfd146a
IR
911 {
912 case dr_aligned:
913 {
9940b13c 914 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
ebfd146a
IR
915
916 if (vect_print_dump_info (REPORT_COST))
917 fprintf (vect_dump, "vect_model_load_cost: aligned.");
918
919 break;
920 }
921 case dr_unaligned_supported:
922 {
720f5239
IR
923 gimple stmt = DR_STMT (dr);
924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
925 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 926
720f5239
IR
927 /* Here, we assign an additional cost for the unaligned load. */
928 *inside_cost += ncopies
929 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
930 vectype, DR_MISALIGNMENT (dr));
ebfd146a
IR
931 if (vect_print_dump_info (REPORT_COST))
932 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
933 "hardware.");
934
935 break;
936 }
937 case dr_explicit_realign:
938 {
720f5239
IR
939 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
940 + vect_get_stmt_cost (vector_stmt));
ebfd146a
IR
941
942 /* FIXME: If the misalignment remains fixed across the iterations of
943 the containing loop, the following cost should be added to the
944 outside costs. */
945 if (targetm.vectorize.builtin_mask_for_load)
720f5239 946 *inside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
947
948 break;
949 }
950 case dr_explicit_realign_optimized:
951 {
952 if (vect_print_dump_info (REPORT_COST))
953 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
954 "pipelined.");
955
956 /* Unaligned software pipeline has a load of an address, an initial
ff802fa1 957 load, and possibly a mask operation to "prime" the loop. However,
ebfd146a
IR
958 if this is an access in a group of loads, which provide strided
959 access, then the above cost should only be considered for one
ff802fa1 960 access in the group. Inside the loop, there is a load op
ebfd146a
IR
961 and a realignment op. */
962
720f5239 963 if (add_realign_cost)
ebfd146a 964 {
720f5239 965 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
ebfd146a 966 if (targetm.vectorize.builtin_mask_for_load)
720f5239 967 *outside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
968 }
969
720f5239
IR
970 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
971 + vect_get_stmt_cost (vector_stmt));
ebfd146a
IR
972 break;
973 }
974
975 default:
976 gcc_unreachable ();
977 }
ebfd146a
IR
978}
979
980
981/* Function vect_init_vector.
982
983 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
ff802fa1
IR
984 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
985 is not NULL. Otherwise, place the initialization at the loop preheader.
b8698a0f 986 Return the DEF of INIT_STMT.
ebfd146a
IR
987 It will be used in the vectorization of STMT. */
988
989tree
990vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
991 gimple_stmt_iterator *gsi)
992{
993 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
994 tree new_var;
995 gimple init_stmt;
996 tree vec_oprnd;
997 edge pe;
998 tree new_temp;
999 basic_block new_bb;
b8698a0f 1000
ebfd146a 1001 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
b8698a0f 1002 add_referenced_var (new_var);
ebfd146a
IR
1003 init_stmt = gimple_build_assign (new_var, vector_var);
1004 new_temp = make_ssa_name (new_var, init_stmt);
1005 gimple_assign_set_lhs (init_stmt, new_temp);
1006
1007 if (gsi)
1008 vect_finish_stmt_generation (stmt, init_stmt, gsi);
1009 else
1010 {
1011 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 1012
a70d6342
IR
1013 if (loop_vinfo)
1014 {
1015 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1016
1017 if (nested_in_vect_loop_p (loop, stmt))
1018 loop = loop->inner;
b8698a0f 1019
a70d6342
IR
1020 pe = loop_preheader_edge (loop);
1021 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1022 gcc_assert (!new_bb);
1023 }
1024 else
1025 {
1026 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1027 basic_block bb;
1028 gimple_stmt_iterator gsi_bb_start;
1029
1030 gcc_assert (bb_vinfo);
1031 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 1032 gsi_bb_start = gsi_after_labels (bb);
a70d6342
IR
1033 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
1034 }
ebfd146a
IR
1035 }
1036
1037 if (vect_print_dump_info (REPORT_DETAILS))
1038 {
1039 fprintf (vect_dump, "created new init_stmt: ");
1040 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1041 }
1042
1043 vec_oprnd = gimple_assign_lhs (init_stmt);
1044 return vec_oprnd;
1045}
1046
a70d6342 1047
ebfd146a
IR
1048/* Function vect_get_vec_def_for_operand.
1049
ff802fa1 1050 OP is an operand in STMT. This function returns a (vector) def that will be
ebfd146a
IR
1051 used in the vectorized stmt for STMT.
1052
1053 In the case that OP is an SSA_NAME which is defined in the loop, then
1054 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1055
1056 In case OP is an invariant or constant, a new stmt that creates a vector def
1057 needs to be introduced. */
1058
1059tree
1060vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1061{
1062 tree vec_oprnd;
1063 gimple vec_stmt;
1064 gimple def_stmt;
1065 stmt_vec_info def_stmt_info = NULL;
1066 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
9dc3f7de 1067 unsigned int nunits;
ebfd146a
IR
1068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1069 tree vec_inv;
1070 tree vec_cst;
e7e9eb2f 1071 tree t = NULL_TREE;
ebfd146a 1072 tree def;
e7e9eb2f 1073 int i;
ebfd146a
IR
1074 enum vect_def_type dt;
1075 bool is_simple_use;
1076 tree vector_type;
1077
1078 if (vect_print_dump_info (REPORT_DETAILS))
1079 {
1080 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1081 print_generic_expr (vect_dump, op, TDF_SLIM);
1082 }
1083
b8698a0f 1084 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
a70d6342 1085 &dt);
ebfd146a
IR
1086 gcc_assert (is_simple_use);
1087 if (vect_print_dump_info (REPORT_DETAILS))
1088 {
1089 if (def)
1090 {
1091 fprintf (vect_dump, "def = ");
1092 print_generic_expr (vect_dump, def, TDF_SLIM);
1093 }
1094 if (def_stmt)
1095 {
1096 fprintf (vect_dump, " def_stmt = ");
1097 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1098 }
1099 }
1100
1101 switch (dt)
1102 {
1103 /* Case 1: operand is a constant. */
1104 case vect_constant_def:
1105 {
7569a6cc
RG
1106 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1107 gcc_assert (vector_type);
9dc3f7de 1108 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
7569a6cc 1109
b8698a0f 1110 if (scalar_def)
ebfd146a
IR
1111 *scalar_def = op;
1112
1113 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1114 if (vect_print_dump_info (REPORT_DETAILS))
1115 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1116
b9acc9f1 1117 vec_cst = build_vector_from_val (vector_type, op);
7569a6cc 1118 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
ebfd146a
IR
1119 }
1120
1121 /* Case 2: operand is defined outside the loop - loop invariant. */
8644a673 1122 case vect_external_def:
ebfd146a
IR
1123 {
1124 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1125 gcc_assert (vector_type);
1126 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1127
b8698a0f 1128 if (scalar_def)
ebfd146a
IR
1129 *scalar_def = def;
1130
1131 /* Create 'vec_inv = {inv,inv,..,inv}' */
1132 if (vect_print_dump_info (REPORT_DETAILS))
1133 fprintf (vect_dump, "Create vector_inv.");
1134
e7e9eb2f
NF
1135 for (i = nunits - 1; i >= 0; --i)
1136 {
1137 t = tree_cons (NULL_TREE, def, t);
1138 }
1139
1140 /* FIXME: use build_constructor directly. */
1141 vec_inv = build_constructor_from_list (vector_type, t);
ebfd146a
IR
1142 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1143 }
1144
1145 /* Case 3: operand is defined inside the loop. */
8644a673 1146 case vect_internal_def:
ebfd146a 1147 {
b8698a0f 1148 if (scalar_def)
ebfd146a
IR
1149 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1150
1151 /* Get the def from the vectorized stmt. */
1152 def_stmt_info = vinfo_for_stmt (def_stmt);
1153 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1154 gcc_assert (vec_stmt);
1155 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1156 vec_oprnd = PHI_RESULT (vec_stmt);
1157 else if (is_gimple_call (vec_stmt))
1158 vec_oprnd = gimple_call_lhs (vec_stmt);
1159 else
1160 vec_oprnd = gimple_assign_lhs (vec_stmt);
1161 return vec_oprnd;
1162 }
1163
1164 /* Case 4: operand is defined by a loop header phi - reduction */
1165 case vect_reduction_def:
06066f92 1166 case vect_double_reduction_def:
7c5222ff 1167 case vect_nested_cycle:
ebfd146a
IR
1168 {
1169 struct loop *loop;
1170
1171 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
b8698a0f 1172 loop = (gimple_bb (def_stmt))->loop_father;
ebfd146a
IR
1173
1174 /* Get the def before the loop */
1175 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1176 return get_initial_def_for_reduction (stmt, op, scalar_def);
1177 }
1178
1179 /* Case 5: operand is defined by loop-header phi - induction. */
1180 case vect_induction_def:
1181 {
1182 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1183
1184 /* Get the def from the vectorized stmt. */
1185 def_stmt_info = vinfo_for_stmt (def_stmt);
1186 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
6dbbece6
RG
1187 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1188 vec_oprnd = PHI_RESULT (vec_stmt);
1189 else
1190 vec_oprnd = gimple_get_lhs (vec_stmt);
ebfd146a
IR
1191 return vec_oprnd;
1192 }
1193
1194 default:
1195 gcc_unreachable ();
1196 }
1197}
1198
1199
1200/* Function vect_get_vec_def_for_stmt_copy
1201
ff802fa1 1202 Return a vector-def for an operand. This function is used when the
b8698a0f
L
1203 vectorized stmt to be created (by the caller to this function) is a "copy"
1204 created in case the vectorized result cannot fit in one vector, and several
ff802fa1 1205 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1206 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1207 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1208 DT is the type of the vector def VEC_OPRND.
1209
1210 Context:
1211 In case the vectorization factor (VF) is bigger than the number
1212 of elements that can fit in a vectype (nunits), we have to generate
ff802fa1 1213 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1214 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1215 smallest data-type determines the VF, and as a result, when vectorizing
1216 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1217 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1218 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1219 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1220 which VF=16 and nunits=4, so the number of copies required is 4):
1221
1222 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1223
ebfd146a
IR
1224 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1225 VS1.1: vx.1 = memref1 VS1.2
1226 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1227 VS1.3: vx.3 = memref3
ebfd146a
IR
1228
1229 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1230 VSnew.1: vz1 = vx.1 + ... VSnew.2
1231 VSnew.2: vz2 = vx.2 + ... VSnew.3
1232 VSnew.3: vz3 = vx.3 + ...
1233
1234 The vectorization of S1 is explained in vectorizable_load.
1235 The vectorization of S2:
b8698a0f
L
1236 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1237 the function 'vect_get_vec_def_for_operand' is called to
ff802fa1 1238 get the relevant vector-def for each operand of S2. For operand x it
ebfd146a
IR
1239 returns the vector-def 'vx.0'.
1240
b8698a0f
L
1241 To create the remaining copies of the vector-stmt (VSnew.j), this
1242 function is called to get the relevant vector-def for each operand. It is
1243 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1244 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1245
b8698a0f
L
1246 For example, to obtain the vector-def 'vx.1' in order to create the
1247 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1248 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1249 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1250 and return its def ('vx.1').
1251 Overall, to create the above sequence this function will be called 3 times:
1252 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1253 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1254 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1255
1256tree
1257vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1258{
1259 gimple vec_stmt_for_operand;
1260 stmt_vec_info def_stmt_info;
1261
1262 /* Do nothing; can reuse same def. */
8644a673 1263 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1264 return vec_oprnd;
1265
1266 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1267 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1268 gcc_assert (def_stmt_info);
1269 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1270 gcc_assert (vec_stmt_for_operand);
1271 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1272 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1273 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1274 else
1275 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1276 return vec_oprnd;
1277}
1278
1279
1280/* Get vectorized definitions for the operands to create a copy of an original
ff802fa1 1281 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
ebfd146a
IR
1282
1283static void
b8698a0f
L
1284vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1285 VEC(tree,heap) **vec_oprnds0,
ebfd146a
IR
1286 VEC(tree,heap) **vec_oprnds1)
1287{
1288 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1289
1290 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1291 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1292
1293 if (vec_oprnds1 && *vec_oprnds1)
1294 {
1295 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1296 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1297 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1298 }
1299}
1300
1301
ff802fa1
IR
1302/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1303 NULL. */
ebfd146a
IR
1304
1305static void
1306vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1307 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1308 slp_tree slp_node)
1309{
1310 if (slp_node)
9dc3f7de 1311 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
ebfd146a
IR
1312 else
1313 {
1314 tree vec_oprnd;
1315
b8698a0f
L
1316 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1317 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
ebfd146a
IR
1318 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1319
1320 if (op1)
1321 {
b8698a0f
L
1322 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1323 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
ebfd146a
IR
1324 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1325 }
1326 }
1327}
1328
1329
1330/* Function vect_finish_stmt_generation.
1331
1332 Insert a new stmt. */
1333
1334void
1335vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1336 gimple_stmt_iterator *gsi)
1337{
1338 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1339 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 1340 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
ebfd146a
IR
1341
1342 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1343
1344 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1345
b8698a0f 1346 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
a70d6342 1347 bb_vinfo));
ebfd146a
IR
1348
1349 if (vect_print_dump_info (REPORT_DETAILS))
1350 {
1351 fprintf (vect_dump, "add new stmt: ");
1352 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1353 }
1354
1355 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1356}
1357
1358/* Checks if CALL can be vectorized in type VECTYPE. Returns
1359 a function declaration if the target has a vectorized version
1360 of the function, or NULL_TREE if the function cannot be vectorized. */
1361
1362tree
1363vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1364{
1365 tree fndecl = gimple_call_fndecl (call);
ebfd146a
IR
1366
1367 /* We only handle functions that do not read or clobber memory -- i.e.
1368 const or novops ones. */
1369 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1370 return NULL_TREE;
1371
1372 if (!fndecl
1373 || TREE_CODE (fndecl) != FUNCTION_DECL
1374 || !DECL_BUILT_IN (fndecl))
1375 return NULL_TREE;
1376
62f7fd21 1377 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
ebfd146a
IR
1378 vectype_in);
1379}
1380
1381/* Function vectorizable_call.
1382
b8698a0f
L
1383 Check if STMT performs a function call that can be vectorized.
1384 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1385 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1386 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1387
1388static bool
1389vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1390{
1391 tree vec_dest;
1392 tree scalar_dest;
1393 tree op, type;
1394 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1395 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1396 tree vectype_out, vectype_in;
1397 int nunits_in;
1398 int nunits_out;
1399 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
b690cc0f 1400 tree fndecl, new_temp, def, rhs_type;
ebfd146a 1401 gimple def_stmt;
0502fb85
UB
1402 enum vect_def_type dt[3]
1403 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
63827fb8 1404 gimple new_stmt = NULL;
ebfd146a
IR
1405 int ncopies, j;
1406 VEC(tree, heap) *vargs = NULL;
1407 enum { NARROW, NONE, WIDEN } modifier;
1408 size_t i, nargs;
1409
a70d6342
IR
1410 /* FORNOW: unsupported in basic block SLP. */
1411 gcc_assert (loop_vinfo);
b8698a0f 1412
ebfd146a
IR
1413 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1414 return false;
1415
8644a673 1416 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1417 return false;
1418
1419 /* FORNOW: SLP not supported. */
1420 if (STMT_SLP_TYPE (stmt_info))
1421 return false;
1422
1423 /* Is STMT a vectorizable call? */
1424 if (!is_gimple_call (stmt))
1425 return false;
1426
1427 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1428 return false;
1429
822ba6d7 1430 if (stmt_can_throw_internal (stmt))
5a2c1986
IR
1431 return false;
1432
b690cc0f
RG
1433 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1434
ebfd146a
IR
1435 /* Process function arguments. */
1436 rhs_type = NULL_TREE;
b690cc0f 1437 vectype_in = NULL_TREE;
ebfd146a
IR
1438 nargs = gimple_call_num_args (stmt);
1439
1b1562a5
MM
1440 /* Bail out if the function has more than three arguments, we do not have
1441 interesting builtin functions to vectorize with more than two arguments
1442 except for fma. No arguments is also not good. */
1443 if (nargs == 0 || nargs > 3)
ebfd146a
IR
1444 return false;
1445
1446 for (i = 0; i < nargs; i++)
1447 {
b690cc0f
RG
1448 tree opvectype;
1449
ebfd146a
IR
1450 op = gimple_call_arg (stmt, i);
1451
1452 /* We can only handle calls with arguments of the same type. */
1453 if (rhs_type
8533c9d8 1454 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a
IR
1455 {
1456 if (vect_print_dump_info (REPORT_DETAILS))
1457 fprintf (vect_dump, "argument types differ.");
1458 return false;
1459 }
b690cc0f
RG
1460 if (!rhs_type)
1461 rhs_type = TREE_TYPE (op);
ebfd146a 1462
b690cc0f
RG
1463 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1464 &def_stmt, &def, &dt[i], &opvectype))
ebfd146a
IR
1465 {
1466 if (vect_print_dump_info (REPORT_DETAILS))
1467 fprintf (vect_dump, "use not simple.");
1468 return false;
1469 }
ebfd146a 1470
b690cc0f
RG
1471 if (!vectype_in)
1472 vectype_in = opvectype;
1473 else if (opvectype
1474 && opvectype != vectype_in)
1475 {
1476 if (vect_print_dump_info (REPORT_DETAILS))
1477 fprintf (vect_dump, "argument vector types differ.");
1478 return false;
1479 }
1480 }
1481 /* If all arguments are external or constant defs use a vector type with
1482 the same size as the output vector type. */
ebfd146a 1483 if (!vectype_in)
b690cc0f 1484 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1485 if (vec_stmt)
1486 gcc_assert (vectype_in);
1487 if (!vectype_in)
1488 {
1489 if (vect_print_dump_info (REPORT_DETAILS))
1490 {
1491 fprintf (vect_dump, "no vectype for scalar type ");
1492 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1493 }
1494
1495 return false;
1496 }
ebfd146a
IR
1497
1498 /* FORNOW */
b690cc0f
RG
1499 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1500 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1501 if (nunits_in == nunits_out / 2)
1502 modifier = NARROW;
1503 else if (nunits_out == nunits_in)
1504 modifier = NONE;
1505 else if (nunits_out == nunits_in / 2)
1506 modifier = WIDEN;
1507 else
1508 return false;
1509
1510 /* For now, we only vectorize functions if a target specific builtin
1511 is available. TODO -- in some cases, it might be profitable to
1512 insert the calls for pieces of the vector, in order to be able
1513 to vectorize other operations in the loop. */
1514 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1515 if (fndecl == NULL_TREE)
1516 {
1517 if (vect_print_dump_info (REPORT_DETAILS))
1518 fprintf (vect_dump, "function is not vectorizable.");
1519
1520 return false;
1521 }
1522
5006671f 1523 gcc_assert (!gimple_vuse (stmt));
ebfd146a
IR
1524
1525 if (modifier == NARROW)
1526 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1527 else
1528 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1529
1530 /* Sanity check: make sure that at least one copy of the vectorized stmt
1531 needs to be generated. */
1532 gcc_assert (ncopies >= 1);
1533
1534 if (!vec_stmt) /* transformation not required. */
1535 {
1536 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1537 if (vect_print_dump_info (REPORT_DETAILS))
1538 fprintf (vect_dump, "=== vectorizable_call ===");
1539 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1540 return true;
1541 }
1542
1543 /** Transform. **/
1544
1545 if (vect_print_dump_info (REPORT_DETAILS))
1546 fprintf (vect_dump, "transform operation.");
1547
1548 /* Handle def. */
1549 scalar_dest = gimple_call_lhs (stmt);
1550 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1551
1552 prev_stmt_info = NULL;
1553 switch (modifier)
1554 {
1555 case NONE:
1556 for (j = 0; j < ncopies; ++j)
1557 {
1558 /* Build argument list for the vectorized call. */
1559 if (j == 0)
1560 vargs = VEC_alloc (tree, heap, nargs);
1561 else
1562 VEC_truncate (tree, vargs, 0);
1563
1564 for (i = 0; i < nargs; i++)
1565 {
1566 op = gimple_call_arg (stmt, i);
1567 if (j == 0)
1568 vec_oprnd0
1569 = vect_get_vec_def_for_operand (op, stmt, NULL);
1570 else
63827fb8
IR
1571 {
1572 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1573 vec_oprnd0
1574 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1575 }
ebfd146a
IR
1576
1577 VEC_quick_push (tree, vargs, vec_oprnd0);
1578 }
1579
1580 new_stmt = gimple_build_call_vec (fndecl, vargs);
1581 new_temp = make_ssa_name (vec_dest, new_stmt);
1582 gimple_call_set_lhs (new_stmt, new_temp);
1583
1584 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1585 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1586
1587 if (j == 0)
1588 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1589 else
1590 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1591
1592 prev_stmt_info = vinfo_for_stmt (new_stmt);
1593 }
1594
1595 break;
1596
1597 case NARROW:
1598 for (j = 0; j < ncopies; ++j)
1599 {
1600 /* Build argument list for the vectorized call. */
1601 if (j == 0)
1602 vargs = VEC_alloc (tree, heap, nargs * 2);
1603 else
1604 VEC_truncate (tree, vargs, 0);
1605
1606 for (i = 0; i < nargs; i++)
1607 {
1608 op = gimple_call_arg (stmt, i);
1609 if (j == 0)
1610 {
1611 vec_oprnd0
1612 = vect_get_vec_def_for_operand (op, stmt, NULL);
1613 vec_oprnd1
63827fb8 1614 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1615 }
1616 else
1617 {
63827fb8 1618 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
ebfd146a 1619 vec_oprnd0
63827fb8 1620 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 1621 vec_oprnd1
63827fb8 1622 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1623 }
1624
1625 VEC_quick_push (tree, vargs, vec_oprnd0);
1626 VEC_quick_push (tree, vargs, vec_oprnd1);
1627 }
1628
1629 new_stmt = gimple_build_call_vec (fndecl, vargs);
1630 new_temp = make_ssa_name (vec_dest, new_stmt);
1631 gimple_call_set_lhs (new_stmt, new_temp);
1632
1633 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1634 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1635
1636 if (j == 0)
1637 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1638 else
1639 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1640
1641 prev_stmt_info = vinfo_for_stmt (new_stmt);
1642 }
1643
1644 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1645
1646 break;
1647
1648 case WIDEN:
1649 /* No current target implements this case. */
1650 return false;
1651 }
1652
1653 VEC_free (tree, heap, vargs);
1654
1655 /* Update the exception handling table with the vector stmt if necessary. */
1656 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1657 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1658
1659 /* The call in STMT might prevent it from being removed in dce.
1660 We however cannot remove it here, due to the way the ssa name
1661 it defines is mapped to the new definition. So just replace
1662 rhs of the statement with something harmless. */
1663
1664 type = TREE_TYPE (scalar_dest);
1665 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
e8160c9a 1666 build_zero_cst (type));
ebfd146a
IR
1667 set_vinfo_for_stmt (new_stmt, stmt_info);
1668 set_vinfo_for_stmt (stmt, NULL);
1669 STMT_VINFO_STMT (stmt_info) = new_stmt;
1670 gsi_replace (gsi, new_stmt, false);
1671 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1672
1673 return true;
1674}
1675
1676
1677/* Function vect_gen_widened_results_half
1678
1679 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 1680 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ff802fa1 1681 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
ebfd146a
IR
1682 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1683 needs to be created (DECL is a function-decl of a target-builtin).
1684 STMT is the original scalar stmt that we are vectorizing. */
1685
1686static gimple
1687vect_gen_widened_results_half (enum tree_code code,
1688 tree decl,
1689 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1690 tree vec_dest, gimple_stmt_iterator *gsi,
1691 gimple stmt)
b8698a0f 1692{
ebfd146a 1693 gimple new_stmt;
b8698a0f
L
1694 tree new_temp;
1695
1696 /* Generate half of the widened result: */
1697 if (code == CALL_EXPR)
1698 {
1699 /* Target specific support */
ebfd146a
IR
1700 if (op_type == binary_op)
1701 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1702 else
1703 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1704 new_temp = make_ssa_name (vec_dest, new_stmt);
1705 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
1706 }
1707 else
ebfd146a 1708 {
b8698a0f
L
1709 /* Generic support */
1710 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
1711 if (op_type != binary_op)
1712 vec_oprnd1 = NULL;
1713 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1714 vec_oprnd1);
1715 new_temp = make_ssa_name (vec_dest, new_stmt);
1716 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 1717 }
ebfd146a
IR
1718 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1719
ebfd146a
IR
1720 return new_stmt;
1721}
1722
1723
b8698a0f
L
1724/* Check if STMT performs a conversion operation, that can be vectorized.
1725 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1726 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1727 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1728
1729static bool
1730vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1731 gimple *vec_stmt, slp_tree slp_node)
1732{
1733 tree vec_dest;
1734 tree scalar_dest;
1735 tree op0;
1736 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1737 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1738 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1739 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1740 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1741 tree new_temp;
1742 tree def;
1743 gimple def_stmt;
1744 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1745 gimple new_stmt = NULL;
1746 stmt_vec_info prev_stmt_info;
1747 int nunits_in;
1748 int nunits_out;
1749 tree vectype_out, vectype_in;
1750 int ncopies, j;
b690cc0f 1751 tree rhs_type;
ebfd146a
IR
1752 tree builtin_decl;
1753 enum { NARROW, NONE, WIDEN } modifier;
1754 int i;
1755 VEC(tree,heap) *vec_oprnds0 = NULL;
1756 tree vop0;
ebfd146a
IR
1757 VEC(tree,heap) *dummy = NULL;
1758 int dummy_int;
1759
1760 /* Is STMT a vectorizable conversion? */
1761
a70d6342
IR
1762 /* FORNOW: unsupported in basic block SLP. */
1763 gcc_assert (loop_vinfo);
b8698a0f 1764
ebfd146a
IR
1765 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1766 return false;
1767
8644a673 1768 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1769 return false;
1770
1771 if (!is_gimple_assign (stmt))
1772 return false;
1773
1774 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1775 return false;
1776
1777 code = gimple_assign_rhs_code (stmt);
1778 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1779 return false;
1780
1781 /* Check types of lhs and rhs. */
b690cc0f
RG
1782 scalar_dest = gimple_assign_lhs (stmt);
1783 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1784
ebfd146a
IR
1785 op0 = gimple_assign_rhs1 (stmt);
1786 rhs_type = TREE_TYPE (op0);
b690cc0f
RG
1787 /* Check the operands of the operation. */
1788 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1789 &def_stmt, &def, &dt[0], &vectype_in))
1790 {
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "use not simple.");
1793 return false;
1794 }
1795 /* If op0 is an external or constant defs use a vector type of
1796 the same size as the output vector type. */
ebfd146a 1797 if (!vectype_in)
b690cc0f 1798 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1799 if (vec_stmt)
1800 gcc_assert (vectype_in);
1801 if (!vectype_in)
1802 {
1803 if (vect_print_dump_info (REPORT_DETAILS))
1804 {
1805 fprintf (vect_dump, "no vectype for scalar type ");
1806 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1807 }
1808
1809 return false;
1810 }
ebfd146a
IR
1811
1812 /* FORNOW */
b690cc0f
RG
1813 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1814 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1815 if (nunits_in == nunits_out / 2)
1816 modifier = NARROW;
1817 else if (nunits_out == nunits_in)
1818 modifier = NONE;
1819 else if (nunits_out == nunits_in / 2)
1820 modifier = WIDEN;
1821 else
1822 return false;
1823
ebfd146a
IR
1824 if (modifier == NARROW)
1825 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1826 else
1827 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1828
ff802fa1
IR
1829 /* Multiple types in SLP are handled by creating the appropriate number of
1830 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1831 case of SLP. */
437f4a00 1832 if (slp_node || PURE_SLP_STMT (stmt_info))
ebfd146a 1833 ncopies = 1;
b8698a0f 1834
ebfd146a
IR
1835 /* Sanity check: make sure that at least one copy of the vectorized stmt
1836 needs to be generated. */
1837 gcc_assert (ncopies >= 1);
1838
ebfd146a
IR
1839 /* Supportable by target? */
1840 if ((modifier == NONE
88dd7150 1841 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
ebfd146a 1842 || (modifier == WIDEN
b690cc0f
RG
1843 && !supportable_widening_operation (code, stmt,
1844 vectype_out, vectype_in,
ebfd146a
IR
1845 &decl1, &decl2,
1846 &code1, &code2,
1847 &dummy_int, &dummy))
1848 || (modifier == NARROW
b690cc0f 1849 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
ebfd146a
IR
1850 &code1, &dummy_int, &dummy)))
1851 {
1852 if (vect_print_dump_info (REPORT_DETAILS))
1853 fprintf (vect_dump, "conversion not supported by target.");
1854 return false;
1855 }
1856
1857 if (modifier != NONE)
1858 {
ebfd146a
IR
1859 /* FORNOW: SLP not supported. */
1860 if (STMT_SLP_TYPE (stmt_info))
b8698a0f 1861 return false;
ebfd146a
IR
1862 }
1863
1864 if (!vec_stmt) /* transformation not required. */
1865 {
1866 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1867 return true;
1868 }
1869
1870 /** Transform. **/
1871 if (vect_print_dump_info (REPORT_DETAILS))
1872 fprintf (vect_dump, "transform conversion.");
1873
1874 /* Handle def. */
1875 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1876
1877 if (modifier == NONE && !slp_node)
1878 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1879
1880 prev_stmt_info = NULL;
1881 switch (modifier)
1882 {
1883 case NONE:
1884 for (j = 0; j < ncopies; j++)
1885 {
ebfd146a 1886 if (j == 0)
b8698a0f 1887 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
ebfd146a
IR
1888 else
1889 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1890
1891 builtin_decl =
88dd7150
RG
1892 targetm.vectorize.builtin_conversion (code,
1893 vectype_out, vectype_in);
ac47786e 1894 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
b8698a0f 1895 {
ebfd146a
IR
1896 /* Arguments are ready. create the new vector stmt. */
1897 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1898 new_temp = make_ssa_name (vec_dest, new_stmt);
1899 gimple_call_set_lhs (new_stmt, new_temp);
1900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1901 if (slp_node)
1902 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1903 }
1904
1905 if (j == 0)
1906 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1907 else
1908 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1909 prev_stmt_info = vinfo_for_stmt (new_stmt);
1910 }
1911 break;
1912
1913 case WIDEN:
1914 /* In case the vectorization factor (VF) is bigger than the number
1915 of elements that we can fit in a vectype (nunits), we have to
1916 generate more than one vector stmt - i.e - we need to "unroll"
1917 the vector stmt by a factor VF/nunits. */
1918 for (j = 0; j < ncopies; j++)
1919 {
1920 if (j == 0)
1921 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1922 else
1923 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1924
ebfd146a
IR
1925 /* Generate first half of the widened result: */
1926 new_stmt
b8698a0f 1927 = vect_gen_widened_results_half (code1, decl1,
ebfd146a
IR
1928 vec_oprnd0, vec_oprnd1,
1929 unary_op, vec_dest, gsi, stmt);
1930 if (j == 0)
1931 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1932 else
1933 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1934 prev_stmt_info = vinfo_for_stmt (new_stmt);
1935
1936 /* Generate second half of the widened result: */
1937 new_stmt
1938 = vect_gen_widened_results_half (code2, decl2,
1939 vec_oprnd0, vec_oprnd1,
1940 unary_op, vec_dest, gsi, stmt);
1941 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1942 prev_stmt_info = vinfo_for_stmt (new_stmt);
1943 }
1944 break;
1945
1946 case NARROW:
1947 /* In case the vectorization factor (VF) is bigger than the number
1948 of elements that we can fit in a vectype (nunits), we have to
1949 generate more than one vector stmt - i.e - we need to "unroll"
1950 the vector stmt by a factor VF/nunits. */
1951 for (j = 0; j < ncopies; j++)
1952 {
1953 /* Handle uses. */
1954 if (j == 0)
1955 {
1956 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1957 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1958 }
1959 else
1960 {
1961 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1962 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1963 }
1964
9dc3f7de 1965 /* Arguments are ready. Create the new vector stmt. */
ebfd146a
IR
1966 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1967 vec_oprnd1);
1968 new_temp = make_ssa_name (vec_dest, new_stmt);
1969 gimple_assign_set_lhs (new_stmt, new_temp);
1970 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1971
1972 if (j == 0)
1973 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1974 else
1975 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1976
1977 prev_stmt_info = vinfo_for_stmt (new_stmt);
1978 }
1979
1980 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1981 }
1982
1983 if (vec_oprnds0)
b8698a0f 1984 VEC_free (tree, heap, vec_oprnds0);
ebfd146a
IR
1985
1986 return true;
1987}
ff802fa1
IR
1988
1989
ebfd146a
IR
1990/* Function vectorizable_assignment.
1991
b8698a0f
L
1992 Check if STMT performs an assignment (copy) that can be vectorized.
1993 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1994 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1995 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1996
1997static bool
1998vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1999 gimple *vec_stmt, slp_tree slp_node)
2000{
2001 tree vec_dest;
2002 tree scalar_dest;
2003 tree op;
2004 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2005 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2006 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2007 tree new_temp;
2008 tree def;
2009 gimple def_stmt;
2010 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
fde9c428 2011 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
ebfd146a 2012 int ncopies;
f18b55bd 2013 int i, j;
ebfd146a
IR
2014 VEC(tree,heap) *vec_oprnds = NULL;
2015 tree vop;
a70d6342 2016 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
f18b55bd
IR
2017 gimple new_stmt = NULL;
2018 stmt_vec_info prev_stmt_info = NULL;
fde9c428
RG
2019 enum tree_code code;
2020 tree vectype_in;
ebfd146a
IR
2021
2022 /* Multiple types in SLP are handled by creating the appropriate number of
2023 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2024 case of SLP. */
437f4a00 2025 if (slp_node || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
2026 ncopies = 1;
2027 else
2028 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2029
2030 gcc_assert (ncopies >= 1);
ebfd146a 2031
a70d6342 2032 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2033 return false;
2034
8644a673 2035 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2036 return false;
2037
2038 /* Is vectorizable assignment? */
2039 if (!is_gimple_assign (stmt))
2040 return false;
2041
2042 scalar_dest = gimple_assign_lhs (stmt);
2043 if (TREE_CODE (scalar_dest) != SSA_NAME)
2044 return false;
2045
fde9c428 2046 code = gimple_assign_rhs_code (stmt);
ebfd146a 2047 if (gimple_assign_single_p (stmt)
fde9c428
RG
2048 || code == PAREN_EXPR
2049 || CONVERT_EXPR_CODE_P (code))
ebfd146a
IR
2050 op = gimple_assign_rhs1 (stmt);
2051 else
2052 return false;
2053
fde9c428
RG
2054 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
2055 &def_stmt, &def, &dt[0], &vectype_in))
ebfd146a
IR
2056 {
2057 if (vect_print_dump_info (REPORT_DETAILS))
2058 fprintf (vect_dump, "use not simple.");
2059 return false;
2060 }
2061
fde9c428
RG
2062 /* We can handle NOP_EXPR conversions that do not change the number
2063 of elements or the vector size. */
2064 if (CONVERT_EXPR_CODE_P (code)
2065 && (!vectype_in
2066 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
2067 || (GET_MODE_SIZE (TYPE_MODE (vectype))
2068 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
2069 return false;
2070
ebfd146a
IR
2071 if (!vec_stmt) /* transformation not required. */
2072 {
2073 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
2074 if (vect_print_dump_info (REPORT_DETAILS))
2075 fprintf (vect_dump, "=== vectorizable_assignment ===");
2076 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2077 return true;
2078 }
2079
2080 /** Transform. **/
2081 if (vect_print_dump_info (REPORT_DETAILS))
2082 fprintf (vect_dump, "transform assignment.");
2083
2084 /* Handle def. */
2085 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2086
2087 /* Handle use. */
f18b55bd 2088 for (j = 0; j < ncopies; j++)
ebfd146a 2089 {
f18b55bd
IR
2090 /* Handle uses. */
2091 if (j == 0)
2092 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2093 else
2094 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2095
2096 /* Arguments are ready. create the new vector stmt. */
ac47786e 2097 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
f18b55bd 2098 {
fde9c428 2099 if (CONVERT_EXPR_CODE_P (code))
4a73490d 2100 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
f18b55bd
IR
2101 new_stmt = gimple_build_assign (vec_dest, vop);
2102 new_temp = make_ssa_name (vec_dest, new_stmt);
2103 gimple_assign_set_lhs (new_stmt, new_temp);
2104 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2105 if (slp_node)
2106 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2107 }
ebfd146a
IR
2108
2109 if (slp_node)
f18b55bd
IR
2110 continue;
2111
2112 if (j == 0)
2113 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2114 else
2115 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2116
2117 prev_stmt_info = vinfo_for_stmt (new_stmt);
2118 }
b8698a0f
L
2119
2120 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
2121 return true;
2122}
2123
9dc3f7de
IR
2124
2125/* Function vectorizable_shift.
2126
2127 Check if STMT performs a shift operation that can be vectorized.
2128 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2129 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2130 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2131
2132static bool
2133vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2134 gimple *vec_stmt, slp_tree slp_node)
2135{
2136 tree vec_dest;
2137 tree scalar_dest;
2138 tree op0, op1 = NULL;
2139 tree vec_oprnd1 = NULL_TREE;
2140 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2141 tree vectype;
2142 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2143 enum tree_code code;
2144 enum machine_mode vec_mode;
2145 tree new_temp;
2146 optab optab;
2147 int icode;
2148 enum machine_mode optab_op2_mode;
2149 tree def;
2150 gimple def_stmt;
2151 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2152 gimple new_stmt = NULL;
2153 stmt_vec_info prev_stmt_info;
2154 int nunits_in;
2155 int nunits_out;
2156 tree vectype_out;
2157 int ncopies;
2158 int j, i;
2159 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2160 tree vop0, vop1;
2161 unsigned int k;
49eab32e 2162 bool scalar_shift_arg = true;
9dc3f7de
IR
2163 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2164 int vf;
2165
2166 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2167 return false;
2168
2169 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2170 return false;
2171
2172 /* Is STMT a vectorizable binary/unary operation? */
2173 if (!is_gimple_assign (stmt))
2174 return false;
2175
2176 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2177 return false;
2178
2179 code = gimple_assign_rhs_code (stmt);
2180
2181 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2182 || code == RROTATE_EXPR))
2183 return false;
2184
2185 scalar_dest = gimple_assign_lhs (stmt);
2186 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2187
2188 op0 = gimple_assign_rhs1 (stmt);
2189 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2190 &def_stmt, &def, &dt[0], &vectype))
2191 {
2192 if (vect_print_dump_info (REPORT_DETAILS))
2193 fprintf (vect_dump, "use not simple.");
2194 return false;
2195 }
2196 /* If op0 is an external or constant def use a vector type with
2197 the same size as the output vector type. */
2198 if (!vectype)
2199 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2200 if (vec_stmt)
2201 gcc_assert (vectype);
2202 if (!vectype)
2203 {
2204 if (vect_print_dump_info (REPORT_DETAILS))
2205 {
2206 fprintf (vect_dump, "no vectype for scalar type ");
2207 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2208 }
2209
2210 return false;
2211 }
2212
2213 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2214 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2215 if (nunits_out != nunits_in)
2216 return false;
2217
2218 op1 = gimple_assign_rhs2 (stmt);
2219 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2220 {
2221 if (vect_print_dump_info (REPORT_DETAILS))
2222 fprintf (vect_dump, "use not simple.");
2223 return false;
2224 }
2225
2226 if (loop_vinfo)
2227 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2228 else
2229 vf = 1;
2230
2231 /* Multiple types in SLP are handled by creating the appropriate number of
2232 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2233 case of SLP. */
437f4a00 2234 if (slp_node || PURE_SLP_STMT (stmt_info))
9dc3f7de
IR
2235 ncopies = 1;
2236 else
2237 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2238
2239 gcc_assert (ncopies >= 1);
2240
2241 /* Determine whether the shift amount is a vector, or scalar. If the
2242 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2243
49eab32e
JJ
2244 if (dt[1] == vect_internal_def && !slp_node)
2245 scalar_shift_arg = false;
2246 else if (dt[1] == vect_constant_def
2247 || dt[1] == vect_external_def
2248 || dt[1] == vect_internal_def)
2249 {
2250 /* In SLP, need to check whether the shift count is the same,
2251 in loops if it is a constant or invariant, it is always
2252 a scalar shift. */
2253 if (slp_node)
2254 {
2255 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2256 gimple slpstmt;
2257
2258 FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
2259 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
2260 scalar_shift_arg = false;
2261 }
2262 }
2263 else
2264 {
2265 if (vect_print_dump_info (REPORT_DETAILS))
2266 fprintf (vect_dump, "operand mode requires invariant argument.");
2267 return false;
2268 }
2269
9dc3f7de 2270 /* Vector shifted by vector. */
49eab32e 2271 if (!scalar_shift_arg)
9dc3f7de
IR
2272 {
2273 optab = optab_for_tree_code (code, vectype, optab_vector);
2274 if (vect_print_dump_info (REPORT_DETAILS))
2275 fprintf (vect_dump, "vector/vector shift/rotate found.");
2276 }
2277 /* See if the machine has a vector shifted by scalar insn and if not
2278 then see if it has a vector shifted by vector insn. */
49eab32e 2279 else
9dc3f7de
IR
2280 {
2281 optab = optab_for_tree_code (code, vectype, optab_scalar);
2282 if (optab
2283 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2284 {
9dc3f7de
IR
2285 if (vect_print_dump_info (REPORT_DETAILS))
2286 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2287 }
2288 else
2289 {
2290 optab = optab_for_tree_code (code, vectype, optab_vector);
2291 if (optab
2292 && (optab_handler (optab, TYPE_MODE (vectype))
2293 != CODE_FOR_nothing))
2294 {
49eab32e
JJ
2295 scalar_shift_arg = false;
2296
9dc3f7de
IR
2297 if (vect_print_dump_info (REPORT_DETAILS))
2298 fprintf (vect_dump, "vector/vector shift/rotate found.");
2299
2300 /* Unlike the other binary operators, shifts/rotates have
2301 the rhs being int, instead of the same type as the lhs,
2302 so make sure the scalar is the right type if we are
2303 dealing with vectors of short/char. */
2304 if (dt[1] == vect_constant_def)
2305 op1 = fold_convert (TREE_TYPE (vectype), op1);
2306 }
2307 }
2308 }
9dc3f7de
IR
2309
2310 /* Supportable by target? */
2311 if (!optab)
2312 {
2313 if (vect_print_dump_info (REPORT_DETAILS))
2314 fprintf (vect_dump, "no optab.");
2315 return false;
2316 }
2317 vec_mode = TYPE_MODE (vectype);
2318 icode = (int) optab_handler (optab, vec_mode);
2319 if (icode == CODE_FOR_nothing)
2320 {
2321 if (vect_print_dump_info (REPORT_DETAILS))
2322 fprintf (vect_dump, "op not supported by target.");
2323 /* Check only during analysis. */
2324 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2325 || (vf < vect_min_worthwhile_factor (code)
2326 && !vec_stmt))
2327 return false;
2328 if (vect_print_dump_info (REPORT_DETAILS))
2329 fprintf (vect_dump, "proceeding using word mode.");
2330 }
2331
2332 /* Worthwhile without SIMD support? Check only during analysis. */
2333 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2334 && vf < vect_min_worthwhile_factor (code)
2335 && !vec_stmt)
2336 {
2337 if (vect_print_dump_info (REPORT_DETAILS))
2338 fprintf (vect_dump, "not worthwhile without SIMD support.");
2339 return false;
2340 }
2341
2342 if (!vec_stmt) /* transformation not required. */
2343 {
2344 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2345 if (vect_print_dump_info (REPORT_DETAILS))
2346 fprintf (vect_dump, "=== vectorizable_shift ===");
2347 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2348 return true;
2349 }
2350
2351 /** Transform. **/
2352
2353 if (vect_print_dump_info (REPORT_DETAILS))
2354 fprintf (vect_dump, "transform binary/unary operation.");
2355
2356 /* Handle def. */
2357 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2358
2359 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2360 created in the previous stages of the recursion, so no allocation is
2361 needed, except for the case of shift with scalar shift argument. In that
2362 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2363 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2364 In case of loop-based vectorization we allocate VECs of size 1. We
2365 allocate VEC_OPRNDS1 only in case of binary operation. */
2366 if (!slp_node)
2367 {
2368 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2369 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2370 }
2371 else if (scalar_shift_arg)
2372 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2373
2374 prev_stmt_info = NULL;
2375 for (j = 0; j < ncopies; j++)
2376 {
2377 /* Handle uses. */
2378 if (j == 0)
2379 {
2380 if (scalar_shift_arg)
2381 {
2382 /* Vector shl and shr insn patterns can be defined with scalar
2383 operand 2 (shift operand). In this case, use constant or loop
2384 invariant op1 directly, without extending it to vector mode
2385 first. */
2386 optab_op2_mode = insn_data[icode].operand[2].mode;
2387 if (!VECTOR_MODE_P (optab_op2_mode))
2388 {
2389 if (vect_print_dump_info (REPORT_DETAILS))
2390 fprintf (vect_dump, "operand 1 using scalar mode.");
2391 vec_oprnd1 = op1;
2392 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2393 if (slp_node)
2394 {
2395 /* Store vec_oprnd1 for every vector stmt to be created
2396 for SLP_NODE. We check during the analysis that all
2397 the shift arguments are the same.
2398 TODO: Allow different constants for different vector
2399 stmts generated for an SLP instance. */
2400 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2401 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2402 }
2403 }
2404 }
2405
2406 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2407 (a special case for certain kind of vector shifts); otherwise,
2408 operand 1 should be of a vector type (the usual case). */
2409 if (vec_oprnd1)
2410 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2411 slp_node);
2412 else
2413 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2414 slp_node);
2415 }
2416 else
2417 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2418
2419 /* Arguments are ready. Create the new vector stmt. */
2420 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2421 {
2422 vop1 = VEC_index (tree, vec_oprnds1, i);
2423 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2424 new_temp = make_ssa_name (vec_dest, new_stmt);
2425 gimple_assign_set_lhs (new_stmt, new_temp);
2426 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2427 if (slp_node)
2428 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2429 }
2430
2431 if (slp_node)
2432 continue;
2433
2434 if (j == 0)
2435 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2436 else
2437 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2438 prev_stmt_info = vinfo_for_stmt (new_stmt);
2439 }
2440
2441 VEC_free (tree, heap, vec_oprnds0);
2442 VEC_free (tree, heap, vec_oprnds1);
2443
2444 return true;
2445}
2446
2447
ebfd146a
IR
2448/* Function vectorizable_operation.
2449
16949072
RG
2450 Check if STMT performs a binary, unary or ternary operation that can
2451 be vectorized.
b8698a0f 2452 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2453 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2454 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2455
2456static bool
2457vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2458 gimple *vec_stmt, slp_tree slp_node)
2459{
2460 tree vec_dest;
2461 tree scalar_dest;
16949072 2462 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
ebfd146a 2463 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b690cc0f 2464 tree vectype;
ebfd146a
IR
2465 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2466 enum tree_code code;
2467 enum machine_mode vec_mode;
2468 tree new_temp;
2469 int op_type;
2470 optab optab;
2471 int icode;
ebfd146a
IR
2472 tree def;
2473 gimple def_stmt;
16949072
RG
2474 enum vect_def_type dt[3]
2475 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
ebfd146a
IR
2476 gimple new_stmt = NULL;
2477 stmt_vec_info prev_stmt_info;
b690cc0f 2478 int nunits_in;
ebfd146a
IR
2479 int nunits_out;
2480 tree vectype_out;
2481 int ncopies;
2482 int j, i;
16949072
RG
2483 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2484 tree vop0, vop1, vop2;
a70d6342
IR
2485 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2486 int vf;
2487
a70d6342 2488 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2489 return false;
2490
8644a673 2491 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2492 return false;
2493
2494 /* Is STMT a vectorizable binary/unary operation? */
2495 if (!is_gimple_assign (stmt))
2496 return false;
2497
2498 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2499 return false;
2500
ebfd146a
IR
2501 code = gimple_assign_rhs_code (stmt);
2502
2503 /* For pointer addition, we should use the normal plus for
2504 the vector addition. */
2505 if (code == POINTER_PLUS_EXPR)
2506 code = PLUS_EXPR;
2507
2508 /* Support only unary or binary operations. */
2509 op_type = TREE_CODE_LENGTH (code);
16949072 2510 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
ebfd146a
IR
2511 {
2512 if (vect_print_dump_info (REPORT_DETAILS))
16949072
RG
2513 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2514 op_type);
ebfd146a
IR
2515 return false;
2516 }
2517
b690cc0f
RG
2518 scalar_dest = gimple_assign_lhs (stmt);
2519 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2520
ebfd146a 2521 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2522 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2523 &def_stmt, &def, &dt[0], &vectype))
ebfd146a
IR
2524 {
2525 if (vect_print_dump_info (REPORT_DETAILS))
2526 fprintf (vect_dump, "use not simple.");
2527 return false;
2528 }
b690cc0f
RG
2529 /* If op0 is an external or constant def use a vector type with
2530 the same size as the output vector type. */
2531 if (!vectype)
2532 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2533 if (vec_stmt)
2534 gcc_assert (vectype);
2535 if (!vectype)
2536 {
2537 if (vect_print_dump_info (REPORT_DETAILS))
2538 {
2539 fprintf (vect_dump, "no vectype for scalar type ");
2540 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2541 }
2542
2543 return false;
2544 }
b690cc0f
RG
2545
2546 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2547 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2548 if (nunits_out != nunits_in)
2549 return false;
ebfd146a 2550
16949072 2551 if (op_type == binary_op || op_type == ternary_op)
ebfd146a
IR
2552 {
2553 op1 = gimple_assign_rhs2 (stmt);
b8698a0f 2554 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 2555 &dt[1]))
ebfd146a
IR
2556 {
2557 if (vect_print_dump_info (REPORT_DETAILS))
2558 fprintf (vect_dump, "use not simple.");
2559 return false;
2560 }
2561 }
16949072
RG
2562 if (op_type == ternary_op)
2563 {
2564 op2 = gimple_assign_rhs3 (stmt);
2565 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2566 &dt[2]))
2567 {
2568 if (vect_print_dump_info (REPORT_DETAILS))
2569 fprintf (vect_dump, "use not simple.");
2570 return false;
2571 }
2572 }
ebfd146a 2573
b690cc0f
RG
2574 if (loop_vinfo)
2575 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2576 else
2577 vf = 1;
2578
2579 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 2580 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
b690cc0f 2581 case of SLP. */
437f4a00 2582 if (slp_node || PURE_SLP_STMT (stmt_info))
b690cc0f
RG
2583 ncopies = 1;
2584 else
2585 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2586
2587 gcc_assert (ncopies >= 1);
2588
9dc3f7de 2589 /* Shifts are handled in vectorizable_shift (). */
ebfd146a
IR
2590 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2591 || code == RROTATE_EXPR)
9dc3f7de 2592 return false;
ebfd146a 2593
16949072 2594 optab = optab_for_tree_code (code, vectype, optab_default);
ebfd146a
IR
2595
2596 /* Supportable by target? */
2597 if (!optab)
2598 {
2599 if (vect_print_dump_info (REPORT_DETAILS))
2600 fprintf (vect_dump, "no optab.");
2601 return false;
2602 }
2603 vec_mode = TYPE_MODE (vectype);
947131ba 2604 icode = (int) optab_handler (optab, vec_mode);
ebfd146a
IR
2605 if (icode == CODE_FOR_nothing)
2606 {
2607 if (vect_print_dump_info (REPORT_DETAILS))
2608 fprintf (vect_dump, "op not supported by target.");
2609 /* Check only during analysis. */
2610 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
a70d6342 2611 || (vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2612 && !vec_stmt))
2613 return false;
2614 if (vect_print_dump_info (REPORT_DETAILS))
2615 fprintf (vect_dump, "proceeding using word mode.");
2616 }
2617
ff802fa1 2618 /* Worthwhile without SIMD support? Check only during analysis. */
ebfd146a 2619 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
a70d6342 2620 && vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2621 && !vec_stmt)
2622 {
2623 if (vect_print_dump_info (REPORT_DETAILS))
2624 fprintf (vect_dump, "not worthwhile without SIMD support.");
2625 return false;
2626 }
2627
2628 if (!vec_stmt) /* transformation not required. */
2629 {
2630 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2631 if (vect_print_dump_info (REPORT_DETAILS))
2632 fprintf (vect_dump, "=== vectorizable_operation ===");
2633 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2634 return true;
2635 }
2636
2637 /** Transform. **/
2638
2639 if (vect_print_dump_info (REPORT_DETAILS))
2640 fprintf (vect_dump, "transform binary/unary operation.");
2641
2642 /* Handle def. */
2643 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2644
ff802fa1 2645 /* Allocate VECs for vector operands. In case of SLP, vector operands are
ebfd146a 2646 created in the previous stages of the recursion, so no allocation is
ff802fa1 2647 needed, except for the case of shift with scalar shift argument. In that
ebfd146a
IR
2648 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2649 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
ff802fa1 2650 In case of loop-based vectorization we allocate VECs of size 1. We
b8698a0f 2651 allocate VEC_OPRNDS1 only in case of binary operation. */
ebfd146a
IR
2652 if (!slp_node)
2653 {
2654 vec_oprnds0 = VEC_alloc (tree, heap, 1);
16949072 2655 if (op_type == binary_op || op_type == ternary_op)
ebfd146a 2656 vec_oprnds1 = VEC_alloc (tree, heap, 1);
16949072
RG
2657 if (op_type == ternary_op)
2658 vec_oprnds2 = VEC_alloc (tree, heap, 1);
ebfd146a 2659 }
ebfd146a
IR
2660
2661 /* In case the vectorization factor (VF) is bigger than the number
2662 of elements that we can fit in a vectype (nunits), we have to generate
2663 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 2664 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 2665 from one copy of the vector stmt to the next, in the field
ff802fa1 2666 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 2667 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
2668 stmts that use the defs of the current stmt. The example below
2669 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2670 we need to create 4 vectorized stmts):
ebfd146a
IR
2671
2672 before vectorization:
2673 RELATED_STMT VEC_STMT
2674 S1: x = memref - -
2675 S2: z = x + 1 - -
2676
2677 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2678 there):
2679 RELATED_STMT VEC_STMT
2680 VS1_0: vx0 = memref0 VS1_1 -
2681 VS1_1: vx1 = memref1 VS1_2 -
2682 VS1_2: vx2 = memref2 VS1_3 -
2683 VS1_3: vx3 = memref3 - -
2684 S1: x = load - VS1_0
2685 S2: z = x + 1 - -
2686
2687 step2: vectorize stmt S2 (done here):
2688 To vectorize stmt S2 we first need to find the relevant vector
ff802fa1 2689 def for the first operand 'x'. This is, as usual, obtained from
ebfd146a 2690 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
ff802fa1
IR
2691 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2692 relevant vector def 'vx0'. Having found 'vx0' we can generate
ebfd146a
IR
2693 the vector stmt VS2_0, and as usual, record it in the
2694 STMT_VINFO_VEC_STMT of stmt S2.
2695 When creating the second copy (VS2_1), we obtain the relevant vector
2696 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
ff802fa1
IR
2697 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2698 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
ebfd146a 2699 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
ff802fa1 2700 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
ebfd146a
IR
2701 chain of stmts and pointers:
2702 RELATED_STMT VEC_STMT
2703 VS1_0: vx0 = memref0 VS1_1 -
2704 VS1_1: vx1 = memref1 VS1_2 -
2705 VS1_2: vx2 = memref2 VS1_3 -
2706 VS1_3: vx3 = memref3 - -
2707 S1: x = load - VS1_0
2708 VS2_0: vz0 = vx0 + v1 VS2_1 -
2709 VS2_1: vz1 = vx1 + v1 VS2_2 -
2710 VS2_2: vz2 = vx2 + v1 VS2_3 -
2711 VS2_3: vz3 = vx3 + v1 - -
2712 S2: z = x + 1 - VS2_0 */
2713
2714 prev_stmt_info = NULL;
2715 for (j = 0; j < ncopies; j++)
2716 {
2717 /* Handle uses. */
2718 if (j == 0)
2719 {
16949072 2720 if (op_type == binary_op || op_type == ternary_op)
b8698a0f 2721 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
ebfd146a
IR
2722 slp_node);
2723 else
b8698a0f 2724 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
ebfd146a 2725 slp_node);
16949072
RG
2726 if (op_type == ternary_op)
2727 {
2728 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2729 VEC_quick_push (tree, vec_oprnds2,
2730 vect_get_vec_def_for_operand (op2, stmt, NULL));
2731 }
ebfd146a
IR
2732 }
2733 else
16949072
RG
2734 {
2735 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2736 if (op_type == ternary_op)
2737 {
2738 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2739 VEC_quick_push (tree, vec_oprnds2,
2740 vect_get_vec_def_for_stmt_copy (dt[2],
2741 vec_oprnd));
2742 }
2743 }
ebfd146a 2744
9dc3f7de 2745 /* Arguments are ready. Create the new vector stmt. */
ac47786e 2746 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
ebfd146a 2747 {
16949072
RG
2748 vop1 = ((op_type == binary_op || op_type == ternary_op)
2749 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2750 vop2 = ((op_type == ternary_op)
2751 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2752 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2753 vop0, vop1, vop2);
ebfd146a
IR
2754 new_temp = make_ssa_name (vec_dest, new_stmt);
2755 gimple_assign_set_lhs (new_stmt, new_temp);
2756 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2757 if (slp_node)
2758 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2759 }
2760
2761 if (slp_node)
2762 continue;
2763
2764 if (j == 0)
2765 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2766 else
2767 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2768 prev_stmt_info = vinfo_for_stmt (new_stmt);
2769 }
2770
2771 VEC_free (tree, heap, vec_oprnds0);
2772 if (vec_oprnds1)
2773 VEC_free (tree, heap, vec_oprnds1);
16949072
RG
2774 if (vec_oprnds2)
2775 VEC_free (tree, heap, vec_oprnds2);
ebfd146a
IR
2776
2777 return true;
2778}
2779
2780
ff802fa1 2781/* Get vectorized definitions for loop-based vectorization. For the first
b8698a0f
L
2782 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2783 scalar operand), and for the rest we get a copy with
ebfd146a
IR
2784 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2785 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2786 The vectors are collected into VEC_OPRNDS. */
2787
2788static void
b8698a0f 2789vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
ebfd146a
IR
2790 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2791{
2792 tree vec_oprnd;
2793
2794 /* Get first vector operand. */
2795 /* All the vector operands except the very first one (that is scalar oprnd)
2796 are stmt copies. */
b8698a0f 2797 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
ebfd146a
IR
2798 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2799 else
2800 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2801
2802 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2803
2804 /* Get second vector operand. */
2805 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2806 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
b8698a0f 2807
ebfd146a
IR
2808 *oprnd = vec_oprnd;
2809
b8698a0f 2810 /* For conversion in multiple steps, continue to get operands
ebfd146a
IR
2811 recursively. */
2812 if (multi_step_cvt)
b8698a0f 2813 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
ebfd146a
IR
2814}
2815
2816
2817/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
b8698a0f 2818 For multi-step conversions store the resulting vectors and call the function
ebfd146a
IR
2819 recursively. */
2820
2821static void
2822vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2823 int multi_step_cvt, gimple stmt,
2824 VEC (tree, heap) *vec_dsts,
2825 gimple_stmt_iterator *gsi,
2826 slp_tree slp_node, enum tree_code code,
2827 stmt_vec_info *prev_stmt_info)
2828{
2829 unsigned int i;
2830 tree vop0, vop1, new_tmp, vec_dest;
2831 gimple new_stmt;
2832 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2833
b8698a0f 2834 vec_dest = VEC_pop (tree, vec_dsts);
ebfd146a
IR
2835
2836 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2837 {
2838 /* Create demotion operation. */
2839 vop0 = VEC_index (tree, *vec_oprnds, i);
2840 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2841 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2842 new_tmp = make_ssa_name (vec_dest, new_stmt);
2843 gimple_assign_set_lhs (new_stmt, new_tmp);
2844 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2845
2846 if (multi_step_cvt)
2847 /* Store the resulting vector for next recursive call. */
b8698a0f 2848 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
ebfd146a
IR
2849 else
2850 {
b8698a0f 2851 /* This is the last step of the conversion sequence. Store the
ebfd146a
IR
2852 vectors in SLP_NODE or in vector info of the scalar statement
2853 (or in STMT_VINFO_RELATED_STMT chain). */
2854 if (slp_node)
2855 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2856 else
2857 {
2858 if (!*prev_stmt_info)
2859 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2860 else
2861 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2862
2863 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2864 }
2865 }
2866 }
2867
2868 /* For multi-step demotion operations we first generate demotion operations
b8698a0f 2869 from the source type to the intermediate types, and then combine the
ebfd146a
IR
2870 results (stored in VEC_OPRNDS) in demotion operation to the destination
2871 type. */
2872 if (multi_step_cvt)
2873 {
2874 /* At each level of recursion we have have of the operands we had at the
2875 previous level. */
2876 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
b8698a0f 2877 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
ebfd146a
IR
2878 stmt, vec_dsts, gsi, slp_node,
2879 code, prev_stmt_info);
2880 }
2881}
2882
2883
2884/* Function vectorizable_type_demotion
2885
2886 Check if STMT performs a binary or unary operation that involves
2887 type demotion, and if it can be vectorized.
2888 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2889 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2890 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2891
2892static bool
2893vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2894 gimple *vec_stmt, slp_tree slp_node)
2895{
2896 tree vec_dest;
2897 tree scalar_dest;
2898 tree op0;
2899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2900 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2901 enum tree_code code, code1 = ERROR_MARK;
2902 tree def;
2903 gimple def_stmt;
2904 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2905 stmt_vec_info prev_stmt_info;
2906 int nunits_in;
2907 int nunits_out;
2908 tree vectype_out;
2909 int ncopies;
2910 int j, i;
2911 tree vectype_in;
2912 int multi_step_cvt = 0;
2913 VEC (tree, heap) *vec_oprnds0 = NULL;
2914 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2915 tree last_oprnd, intermediate_type;
2916
a70d6342
IR
2917 /* FORNOW: not supported by basic block SLP vectorization. */
2918 gcc_assert (loop_vinfo);
2919
ebfd146a
IR
2920 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2921 return false;
2922
8644a673 2923 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2924 return false;
2925
2926 /* Is STMT a vectorizable type-demotion operation? */
2927 if (!is_gimple_assign (stmt))
2928 return false;
2929
2930 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2931 return false;
2932
2933 code = gimple_assign_rhs_code (stmt);
2934 if (!CONVERT_EXPR_CODE_P (code))
2935 return false;
2936
b690cc0f
RG
2937 scalar_dest = gimple_assign_lhs (stmt);
2938 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2939
2940 /* Check the operands of the operation. */
ebfd146a 2941 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2942 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2943 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2944 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2945 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2946 && CONVERT_EXPR_CODE_P (code))))
2947 return false;
2948 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2949 &def_stmt, &def, &dt[0], &vectype_in))
2950 {
2951 if (vect_print_dump_info (REPORT_DETAILS))
2952 fprintf (vect_dump, "use not simple.");
2953 return false;
2954 }
2955 /* If op0 is an external def use a vector type with the
2956 same size as the output vector type if possible. */
2957 if (!vectype_in)
2958 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2959 if (vec_stmt)
2960 gcc_assert (vectype_in);
ebfd146a 2961 if (!vectype_in)
7d8930a0
IR
2962 {
2963 if (vect_print_dump_info (REPORT_DETAILS))
2964 {
2965 fprintf (vect_dump, "no vectype for scalar type ");
2966 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2967 }
2968
2969 return false;
2970 }
ebfd146a 2971
b690cc0f 2972 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2973 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2974 if (nunits_in >= nunits_out)
2975 return false;
2976
2977 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 2978 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a 2979 case of SLP. */
437f4a00 2980 if (slp_node || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
2981 ncopies = 1;
2982 else
2983 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
ebfd146a
IR
2984 gcc_assert (ncopies >= 1);
2985
ebfd146a 2986 /* Supportable by target? */
b690cc0f
RG
2987 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2988 &code1, &multi_step_cvt, &interm_types))
ebfd146a
IR
2989 return false;
2990
ebfd146a
IR
2991 if (!vec_stmt) /* transformation not required. */
2992 {
2993 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2994 if (vect_print_dump_info (REPORT_DETAILS))
2995 fprintf (vect_dump, "=== vectorizable_demotion ===");
2996 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2997 return true;
2998 }
2999
3000 /** Transform. **/
3001 if (vect_print_dump_info (REPORT_DETAILS))
3002 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
3003 ncopies);
3004
b8698a0f
L
3005 /* In case of multi-step demotion, we first generate demotion operations to
3006 the intermediate types, and then from that types to the final one.
ebfd146a 3007 We create vector destinations for the intermediate type (TYPES) received
b8698a0f 3008 from supportable_narrowing_operation, and store them in the correct order
ebfd146a
IR
3009 for future use in vect_create_vectorized_demotion_stmts(). */
3010 if (multi_step_cvt)
3011 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3012 else
3013 vec_dsts = VEC_alloc (tree, heap, 1);
b8698a0f 3014
ebfd146a
IR
3015 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3016 VEC_quick_push (tree, vec_dsts, vec_dest);
3017
3018 if (multi_step_cvt)
3019 {
b8698a0f 3020 for (i = VEC_length (tree, interm_types) - 1;
ebfd146a
IR
3021 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3022 {
b8698a0f 3023 vec_dest = vect_create_destination_var (scalar_dest,
ebfd146a
IR
3024 intermediate_type);
3025 VEC_quick_push (tree, vec_dsts, vec_dest);
3026 }
3027 }
3028
3029 /* In case the vectorization factor (VF) is bigger than the number
3030 of elements that we can fit in a vectype (nunits), we have to generate
3031 more than one vector stmt - i.e - we need to "unroll" the
3032 vector stmt by a factor VF/nunits. */
3033 last_oprnd = op0;
3034 prev_stmt_info = NULL;
3035 for (j = 0; j < ncopies; j++)
3036 {
3037 /* Handle uses. */
3038 if (slp_node)
9dc3f7de 3039 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
ebfd146a
IR
3040 else
3041 {
3042 VEC_free (tree, heap, vec_oprnds0);
3043 vec_oprnds0 = VEC_alloc (tree, heap,
3044 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
b8698a0f 3045 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
ebfd146a
IR
3046 vect_pow2 (multi_step_cvt) - 1);
3047 }
3048
9dc3f7de 3049 /* Arguments are ready. Create the new vector stmts. */
ebfd146a 3050 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
b8698a0f 3051 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
ebfd146a 3052 multi_step_cvt, stmt, tmp_vec_dsts,
b8698a0f 3053 gsi, slp_node, code1,
ebfd146a
IR
3054 &prev_stmt_info);
3055 }
3056
3057 VEC_free (tree, heap, vec_oprnds0);
3058 VEC_free (tree, heap, vec_dsts);
3059 VEC_free (tree, heap, tmp_vec_dsts);
3060 VEC_free (tree, heap, interm_types);
3061
3062 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3063 return true;
3064}
3065
3066
3067/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
ff802fa1 3068 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
ebfd146a
IR
3069 the resulting vectors and call the function recursively. */
3070
3071static void
3072vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
3073 VEC (tree, heap) **vec_oprnds1,
3074 int multi_step_cvt, gimple stmt,
3075 VEC (tree, heap) *vec_dsts,
3076 gimple_stmt_iterator *gsi,
3077 slp_tree slp_node, enum tree_code code1,
b8698a0f 3078 enum tree_code code2, tree decl1,
ebfd146a
IR
3079 tree decl2, int op_type,
3080 stmt_vec_info *prev_stmt_info)
3081{
3082 int i;
3083 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
3084 gimple new_stmt1, new_stmt2;
3085 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3086 VEC (tree, heap) *vec_tmp;
3087
3088 vec_dest = VEC_pop (tree, vec_dsts);
3089 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
3090
ac47786e 3091 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
ebfd146a
IR
3092 {
3093 if (op_type == binary_op)
3094 vop1 = VEC_index (tree, *vec_oprnds1, i);
3095 else
3096 vop1 = NULL_TREE;
3097
3098 /* Generate the two halves of promotion operation. */
b8698a0f 3099 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
ebfd146a
IR
3100 op_type, vec_dest, gsi, stmt);
3101 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3102 op_type, vec_dest, gsi, stmt);
3103 if (is_gimple_call (new_stmt1))
3104 {
3105 new_tmp1 = gimple_call_lhs (new_stmt1);
3106 new_tmp2 = gimple_call_lhs (new_stmt2);
3107 }
3108 else
3109 {
3110 new_tmp1 = gimple_assign_lhs (new_stmt1);
3111 new_tmp2 = gimple_assign_lhs (new_stmt2);
3112 }
3113
3114 if (multi_step_cvt)
3115 {
3116 /* Store the results for the recursive call. */
3117 VEC_quick_push (tree, vec_tmp, new_tmp1);
3118 VEC_quick_push (tree, vec_tmp, new_tmp2);
3119 }
3120 else
3121 {
3122 /* Last step of promotion sequience - store the results. */
3123 if (slp_node)
3124 {
3125 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3126 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3127 }
3128 else
3129 {
3130 if (!*prev_stmt_info)
3131 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3132 else
3133 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3134
3135 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3136 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3137 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3138 }
3139 }
3140 }
3141
3142 if (multi_step_cvt)
3143 {
b8698a0f 3144 /* For multi-step promotion operation we first generate we call the
ff802fa1 3145 function recurcively for every stage. We start from the input type,
ebfd146a
IR
3146 create promotion operations to the intermediate types, and then
3147 create promotions to the output type. */
3148 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
ebfd146a
IR
3149 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3150 multi_step_cvt - 1, stmt,
3151 vec_dsts, gsi, slp_node, code1,
3152 code2, decl2, decl2, op_type,
3153 prev_stmt_info);
3154 }
ff802fa1
IR
3155
3156 VEC_free (tree, heap, vec_tmp);
ebfd146a 3157}
b8698a0f 3158
ebfd146a
IR
3159
3160/* Function vectorizable_type_promotion
3161
3162 Check if STMT performs a binary or unary operation that involves
3163 type promotion, and if it can be vectorized.
3164 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3165 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3166 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3167
3168static bool
3169vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3170 gimple *vec_stmt, slp_tree slp_node)
3171{
3172 tree vec_dest;
3173 tree scalar_dest;
3174 tree op0, op1 = NULL;
3175 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3177 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3178 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3179 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
b8698a0f 3180 int op_type;
ebfd146a
IR
3181 tree def;
3182 gimple def_stmt;
3183 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3184 stmt_vec_info prev_stmt_info;
3185 int nunits_in;
3186 int nunits_out;
3187 tree vectype_out;
3188 int ncopies;
3189 int j, i;
3190 tree vectype_in;
3191 tree intermediate_type = NULL_TREE;
3192 int multi_step_cvt = 0;
3193 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3194 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
b8698a0f 3195
a70d6342
IR
3196 /* FORNOW: not supported by basic block SLP vectorization. */
3197 gcc_assert (loop_vinfo);
b8698a0f 3198
ebfd146a
IR
3199 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3200 return false;
3201
8644a673 3202 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3203 return false;
3204
3205 /* Is STMT a vectorizable type-promotion operation? */
3206 if (!is_gimple_assign (stmt))
3207 return false;
3208
3209 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3210 return false;
3211
3212 code = gimple_assign_rhs_code (stmt);
3213 if (!CONVERT_EXPR_CODE_P (code)
3214 && code != WIDEN_MULT_EXPR)
3215 return false;
3216
b690cc0f
RG
3217 scalar_dest = gimple_assign_lhs (stmt);
3218 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3219
3220 /* Check the operands of the operation. */
ebfd146a 3221 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
3222 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3223 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3224 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3225 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3226 && CONVERT_EXPR_CODE_P (code))))
3227 return false;
3228 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3229 &def_stmt, &def, &dt[0], &vectype_in))
3230 {
3231 if (vect_print_dump_info (REPORT_DETAILS))
3232 fprintf (vect_dump, "use not simple.");
3233 return false;
3234 }
3235 /* If op0 is an external or constant def use a vector type with
3236 the same size as the output vector type. */
3237 if (!vectype_in)
3238 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
3239 if (vec_stmt)
3240 gcc_assert (vectype_in);
ebfd146a 3241 if (!vectype_in)
7d8930a0
IR
3242 {
3243 if (vect_print_dump_info (REPORT_DETAILS))
3244 {
3245 fprintf (vect_dump, "no vectype for scalar type ");
3246 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3247 }
3248
3249 return false;
3250 }
ebfd146a 3251
b690cc0f 3252 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
3253 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3254 if (nunits_in <= nunits_out)
3255 return false;
3256
3257 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 3258 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a 3259 case of SLP. */
437f4a00 3260 if (slp_node || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
3261 ncopies = 1;
3262 else
3263 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3264
3265 gcc_assert (ncopies >= 1);
3266
ebfd146a
IR
3267 op_type = TREE_CODE_LENGTH (code);
3268 if (op_type == binary_op)
3269 {
3270 op1 = gimple_assign_rhs2 (stmt);
a70d6342 3271 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
ebfd146a
IR
3272 {
3273 if (vect_print_dump_info (REPORT_DETAILS))
3274 fprintf (vect_dump, "use not simple.");
3275 return false;
3276 }
3277 }
3278
3279 /* Supportable by target? */
b690cc0f 3280 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
ebfd146a
IR
3281 &decl1, &decl2, &code1, &code2,
3282 &multi_step_cvt, &interm_types))
3283 return false;
3284
3285 /* Binary widening operation can only be supported directly by the
3286 architecture. */
3287 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3288
ebfd146a
IR
3289 if (!vec_stmt) /* transformation not required. */
3290 {
3291 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3292 if (vect_print_dump_info (REPORT_DETAILS))
3293 fprintf (vect_dump, "=== vectorizable_promotion ===");
3294 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3295 return true;
3296 }
3297
3298 /** Transform. **/
3299
3300 if (vect_print_dump_info (REPORT_DETAILS))
3301 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3302 ncopies);
3303
3304 /* Handle def. */
b8698a0f 3305 /* In case of multi-step promotion, we first generate promotion operations
ebfd146a 3306 to the intermediate types, and then from that types to the final one.
b8698a0f
L
3307 We store vector destination in VEC_DSTS in the correct order for
3308 recursive creation of promotion operations in
ebfd146a
IR
3309 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3310 according to TYPES recieved from supportable_widening_operation(). */
3311 if (multi_step_cvt)
3312 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3313 else
3314 vec_dsts = VEC_alloc (tree, heap, 1);
3315
3316 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3317 VEC_quick_push (tree, vec_dsts, vec_dest);
3318
3319 if (multi_step_cvt)
3320 {
3321 for (i = VEC_length (tree, interm_types) - 1;
3322 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3323 {
3324 vec_dest = vect_create_destination_var (scalar_dest,
3325 intermediate_type);
3326 VEC_quick_push (tree, vec_dsts, vec_dest);
3327 }
3328 }
b8698a0f 3329
ebfd146a
IR
3330 if (!slp_node)
3331 {
b8698a0f 3332 vec_oprnds0 = VEC_alloc (tree, heap,
ebfd146a
IR
3333 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3334 if (op_type == binary_op)
3335 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3336 }
3337
3338 /* In case the vectorization factor (VF) is bigger than the number
3339 of elements that we can fit in a vectype (nunits), we have to generate
3340 more than one vector stmt - i.e - we need to "unroll" the
3341 vector stmt by a factor VF/nunits. */
3342
3343 prev_stmt_info = NULL;
3344 for (j = 0; j < ncopies; j++)
3345 {
3346 /* Handle uses. */
3347 if (j == 0)
3348 {
3349 if (slp_node)
9dc3f7de
IR
3350 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3351 &vec_oprnds1, -1);
ebfd146a
IR
3352 else
3353 {
3354 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3355 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3356 if (op_type == binary_op)
3357 {
3358 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3359 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3360 }
3361 }
3362 }
3363 else
3364 {
3365 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3366 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3367 if (op_type == binary_op)
3368 {
3369 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3370 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3371 }
3372 }
3373
9dc3f7de 3374 /* Arguments are ready. Create the new vector stmts. */
ebfd146a
IR
3375 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3376 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
b8698a0f 3377 multi_step_cvt, stmt,
ebfd146a
IR
3378 tmp_vec_dsts,
3379 gsi, slp_node, code1, code2,
3380 decl1, decl2, op_type,
3381 &prev_stmt_info);
3382 }
3383
3384 VEC_free (tree, heap, vec_dsts);
3385 VEC_free (tree, heap, tmp_vec_dsts);
3386 VEC_free (tree, heap, interm_types);
3387 VEC_free (tree, heap, vec_oprnds0);
3388 VEC_free (tree, heap, vec_oprnds1);
3389
3390 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3391 return true;
3392}
3393
3394
3395/* Function vectorizable_store.
3396
b8698a0f
L
3397 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3398 can be vectorized.
3399 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3400 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3401 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3402
3403static bool
3404vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3405 slp_tree slp_node)
3406{
3407 tree scalar_dest;
3408 tree data_ref;
3409 tree op;
3410 tree vec_oprnd = NULL_TREE;
3411 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3412 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3413 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
272c6793 3414 tree elem_type;
ebfd146a 3415 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3416 struct loop *loop = NULL;
ebfd146a
IR
3417 enum machine_mode vec_mode;
3418 tree dummy;
3419 enum dr_alignment_support alignment_support_scheme;
3420 tree def;
3421 gimple def_stmt;
3422 enum vect_def_type dt;
3423 stmt_vec_info prev_stmt_info = NULL;
3424 tree dataref_ptr = NULL_TREE;
3425 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3426 int ncopies;
3427 int j;
3428 gimple next_stmt, first_stmt = NULL;
3429 bool strided_store = false;
272c6793 3430 bool store_lanes_p = false;
ebfd146a
IR
3431 unsigned int group_size, i;
3432 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3433 bool inv_p;
3434 VEC(tree,heap) *vec_oprnds = NULL;
3435 bool slp = (slp_node != NULL);
ebfd146a 3436 unsigned int vec_num;
a70d6342 3437 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
272c6793 3438 tree aggr_type;
a70d6342
IR
3439
3440 if (loop_vinfo)
3441 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
3442
3443 /* Multiple types in SLP are handled by creating the appropriate number of
3444 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3445 case of SLP. */
437f4a00 3446 if (slp || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
3447 ncopies = 1;
3448 else
3449 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3450
3451 gcc_assert (ncopies >= 1);
3452
3453 /* FORNOW. This restriction should be relaxed. */
a70d6342 3454 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
ebfd146a
IR
3455 {
3456 if (vect_print_dump_info (REPORT_DETAILS))
3457 fprintf (vect_dump, "multiple types in nested loop.");
3458 return false;
3459 }
3460
a70d6342 3461 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3462 return false;
3463
8644a673 3464 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3465 return false;
3466
3467 /* Is vectorizable store? */
3468
3469 if (!is_gimple_assign (stmt))
3470 return false;
3471
3472 scalar_dest = gimple_assign_lhs (stmt);
3473 if (TREE_CODE (scalar_dest) != ARRAY_REF
3474 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
3475 && TREE_CODE (scalar_dest) != COMPONENT_REF
3476 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
70f34814
RG
3477 && TREE_CODE (scalar_dest) != REALPART_EXPR
3478 && TREE_CODE (scalar_dest) != MEM_REF)
ebfd146a
IR
3479 return false;
3480
3481 gcc_assert (gimple_assign_single_p (stmt));
3482 op = gimple_assign_rhs1 (stmt);
a70d6342 3483 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
ebfd146a
IR
3484 {
3485 if (vect_print_dump_info (REPORT_DETAILS))
3486 fprintf (vect_dump, "use not simple.");
3487 return false;
3488 }
3489
3490 /* The scalar rhs type needs to be trivially convertible to the vector
3491 component type. This should always be the case. */
272c6793
RS
3492 elem_type = TREE_TYPE (vectype);
3493 if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
b8698a0f 3494 {
ebfd146a
IR
3495 if (vect_print_dump_info (REPORT_DETAILS))
3496 fprintf (vect_dump, "??? operands of different types");
3497 return false;
3498 }
3499
3500 vec_mode = TYPE_MODE (vectype);
3501 /* FORNOW. In some cases can vectorize even if data-type not supported
3502 (e.g. - array initialization with 0). */
947131ba 3503 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
ebfd146a
IR
3504 return false;
3505
3506 if (!STMT_VINFO_DATA_REF (stmt_info))
3507 return false;
3508
a1e53f3f
L
3509 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3510 {
3511 if (vect_print_dump_info (REPORT_DETAILS))
3512 fprintf (vect_dump, "negative step for store.");
3513 return false;
3514 }
3515
ebfd146a
IR
3516 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3517 {
3518 strided_store = true;
e14c1050 3519 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
b602d918
RS
3520 if (!slp && !PURE_SLP_STMT (stmt_info))
3521 {
e14c1050 3522 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
272c6793
RS
3523 if (vect_store_lanes_supported (vectype, group_size))
3524 store_lanes_p = true;
3525 else if (!vect_strided_store_supported (vectype, group_size))
b602d918
RS
3526 return false;
3527 }
b8698a0f 3528
ebfd146a
IR
3529 if (first_stmt == stmt)
3530 {
3531 /* STMT is the leader of the group. Check the operands of all the
3532 stmts of the group. */
e14c1050 3533 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
ebfd146a
IR
3534 while (next_stmt)
3535 {
3536 gcc_assert (gimple_assign_single_p (next_stmt));
3537 op = gimple_assign_rhs1 (next_stmt);
b8698a0f 3538 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
a70d6342 3539 &def, &dt))
ebfd146a
IR
3540 {
3541 if (vect_print_dump_info (REPORT_DETAILS))
3542 fprintf (vect_dump, "use not simple.");
3543 return false;
3544 }
e14c1050 3545 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
3546 }
3547 }
3548 }
3549
3550 if (!vec_stmt) /* transformation not required. */
3551 {
3552 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
272c6793 3553 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt, NULL);
ebfd146a
IR
3554 return true;
3555 }
3556
3557 /** Transform. **/
3558
3559 if (strided_store)
3560 {
3561 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
e14c1050 3562 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
ebfd146a 3563
e14c1050 3564 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
ebfd146a
IR
3565
3566 /* FORNOW */
a70d6342 3567 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
3568
3569 /* We vectorize all the stmts of the interleaving group when we
3570 reach the last stmt in the group. */
e14c1050
IR
3571 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3572 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
ebfd146a
IR
3573 && !slp)
3574 {
3575 *vec_stmt = NULL;
3576 return true;
3577 }
3578
3579 if (slp)
4b5caab7
IR
3580 {
3581 strided_store = false;
3582 /* VEC_NUM is the number of vect stmts to be created for this
3583 group. */
3584 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3585 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3586 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3587 }
ebfd146a 3588 else
4b5caab7
IR
3589 /* VEC_NUM is the number of vect stmts to be created for this
3590 group. */
ebfd146a
IR
3591 vec_num = group_size;
3592 }
b8698a0f 3593 else
ebfd146a
IR
3594 {
3595 first_stmt = stmt;
3596 first_dr = dr;
3597 group_size = vec_num = 1;
ebfd146a 3598 }
b8698a0f 3599
ebfd146a
IR
3600 if (vect_print_dump_info (REPORT_DETAILS))
3601 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3602
3603 dr_chain = VEC_alloc (tree, heap, group_size);
3604 oprnds = VEC_alloc (tree, heap, group_size);
3605
720f5239 3606 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 3607 gcc_assert (alignment_support_scheme);
272c6793
RS
3608 /* Targets with store-lane instructions must not require explicit
3609 realignment. */
3610 gcc_assert (!store_lanes_p
3611 || alignment_support_scheme == dr_aligned
3612 || alignment_support_scheme == dr_unaligned_supported);
3613
3614 if (store_lanes_p)
3615 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
3616 else
3617 aggr_type = vectype;
ebfd146a
IR
3618
3619 /* In case the vectorization factor (VF) is bigger than the number
3620 of elements that we can fit in a vectype (nunits), we have to generate
3621 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 3622 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
3623 vect_get_vec_def_for_copy_stmt. */
3624
3625 /* In case of interleaving (non-unit strided access):
3626
3627 S1: &base + 2 = x2
3628 S2: &base = x0
3629 S3: &base + 1 = x1
3630 S4: &base + 3 = x3
3631
3632 We create vectorized stores starting from base address (the access of the
3633 first stmt in the chain (S2 in the above example), when the last store stmt
3634 of the chain (S4) is reached:
3635
3636 VS1: &base = vx2
3637 VS2: &base + vec_size*1 = vx0
3638 VS3: &base + vec_size*2 = vx1
3639 VS4: &base + vec_size*3 = vx3
3640
3641 Then permutation statements are generated:
3642
3643 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3644 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3645 ...
b8698a0f 3646
ebfd146a
IR
3647 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3648 (the order of the data-refs in the output of vect_permute_store_chain
3649 corresponds to the order of scalar stmts in the interleaving chain - see
3650 the documentation of vect_permute_store_chain()).
3651
3652 In case of both multiple types and interleaving, above vector stores and
ff802fa1 3653 permutation stmts are created for every copy. The result vector stmts are
ebfd146a 3654 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 3655 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
3656 */
3657
3658 prev_stmt_info = NULL;
3659 for (j = 0; j < ncopies; j++)
3660 {
3661 gimple new_stmt;
3662 gimple ptr_incr;
3663
3664 if (j == 0)
3665 {
3666 if (slp)
3667 {
3668 /* Get vectorized arguments for SLP_NODE. */
9dc3f7de
IR
3669 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3670 NULL, -1);
ebfd146a
IR
3671
3672 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3673 }
3674 else
3675 {
b8698a0f
L
3676 /* For interleaved stores we collect vectorized defs for all the
3677 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3678 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
3679 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3680
3681 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3682 OPRNDS are of size 1. */
b8698a0f 3683 next_stmt = first_stmt;
ebfd146a
IR
3684 for (i = 0; i < group_size; i++)
3685 {
b8698a0f
L
3686 /* Since gaps are not supported for interleaved stores,
3687 GROUP_SIZE is the exact number of stmts in the chain.
3688 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3689 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
3690 iteration of the loop will be executed. */
3691 gcc_assert (next_stmt
3692 && gimple_assign_single_p (next_stmt));
3693 op = gimple_assign_rhs1 (next_stmt);
3694
b8698a0f 3695 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
ebfd146a 3696 NULL);
b8698a0f
L
3697 VEC_quick_push(tree, dr_chain, vec_oprnd);
3698 VEC_quick_push(tree, oprnds, vec_oprnd);
e14c1050 3699 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
3700 }
3701 }
3702
3703 /* We should have catched mismatched types earlier. */
3704 gcc_assert (useless_type_conversion_p (vectype,
3705 TREE_TYPE (vec_oprnd)));
272c6793 3706 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, NULL,
920e8172
RS
3707 NULL_TREE, &dummy, gsi,
3708 &ptr_incr, false, &inv_p);
a70d6342 3709 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 3710 }
b8698a0f 3711 else
ebfd146a 3712 {
b8698a0f
L
3713 /* For interleaved stores we created vectorized defs for all the
3714 defs stored in OPRNDS in the previous iteration (previous copy).
3715 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
3716 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3717 next copy.
3718 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3719 OPRNDS are of size 1. */
3720 for (i = 0; i < group_size; i++)
3721 {
3722 op = VEC_index (tree, oprnds, i);
b8698a0f 3723 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 3724 &dt);
b8698a0f 3725 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
ebfd146a
IR
3726 VEC_replace(tree, dr_chain, i, vec_oprnd);
3727 VEC_replace(tree, oprnds, i, vec_oprnd);
3728 }
272c6793
RS
3729 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3730 TYPE_SIZE_UNIT (aggr_type));
ebfd146a
IR
3731 }
3732
272c6793 3733 if (store_lanes_p)
ebfd146a 3734 {
272c6793 3735 tree vec_array;
267d3070 3736
272c6793
RS
3737 /* Combine all the vectors into an array. */
3738 vec_array = create_vector_array (vectype, vec_num);
3739 for (i = 0; i < vec_num; i++)
c2d7ab2a 3740 {
272c6793
RS
3741 vec_oprnd = VEC_index (tree, dr_chain, i);
3742 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
267d3070 3743 }
b8698a0f 3744
272c6793
RS
3745 /* Emit:
3746 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3747 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
3748 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
3749 gimple_call_set_lhs (new_stmt, data_ref);
267d3070
RS
3750 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3751 mark_symbols_for_renaming (new_stmt);
272c6793
RS
3752 }
3753 else
3754 {
3755 new_stmt = NULL;
3756 if (strided_store)
3757 {
3758 result_chain = VEC_alloc (tree, heap, group_size);
3759 /* Permute. */
3760 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3761 &result_chain);
3762 }
c2d7ab2a 3763
272c6793
RS
3764 next_stmt = first_stmt;
3765 for (i = 0; i < vec_num; i++)
3766 {
3767 struct ptr_info_def *pi;
3768
3769 if (i > 0)
3770 /* Bump the vector pointer. */
3771 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
3772 stmt, NULL_TREE);
3773
3774 if (slp)
3775 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3776 else if (strided_store)
3777 /* For strided stores vectorized defs are interleaved in
3778 vect_permute_store_chain(). */
3779 vec_oprnd = VEC_index (tree, result_chain, i);
3780
3781 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3782 build_int_cst (reference_alias_ptr_type
3783 (DR_REF (first_dr)), 0));
3784 pi = get_ptr_info (dataref_ptr);
3785 pi->align = TYPE_ALIGN_UNIT (vectype);
3786 if (aligned_access_p (first_dr))
3787 pi->misalign = 0;
3788 else if (DR_MISALIGNMENT (first_dr) == -1)
3789 {
3790 TREE_TYPE (data_ref)
3791 = build_aligned_type (TREE_TYPE (data_ref),
3792 TYPE_ALIGN (elem_type));
3793 pi->align = TYPE_ALIGN_UNIT (elem_type);
3794 pi->misalign = 0;
3795 }
3796 else
3797 {
3798 TREE_TYPE (data_ref)
3799 = build_aligned_type (TREE_TYPE (data_ref),
3800 TYPE_ALIGN (elem_type));
3801 pi->misalign = DR_MISALIGNMENT (first_dr);
3802 }
c2d7ab2a 3803
272c6793
RS
3804 /* Arguments are ready. Create the new vector stmt. */
3805 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3806 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3807 mark_symbols_for_renaming (new_stmt);
3808
3809 if (slp)
3810 continue;
3811
e14c1050 3812 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
272c6793
RS
3813 if (!next_stmt)
3814 break;
3815 }
ebfd146a 3816 }
1da0876c
RS
3817 if (!slp)
3818 {
3819 if (j == 0)
3820 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3821 else
3822 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3823 prev_stmt_info = vinfo_for_stmt (new_stmt);
3824 }
ebfd146a
IR
3825 }
3826
b8698a0f
L
3827 VEC_free (tree, heap, dr_chain);
3828 VEC_free (tree, heap, oprnds);
ebfd146a 3829 if (result_chain)
b8698a0f 3830 VEC_free (tree, heap, result_chain);
ff802fa1
IR
3831 if (vec_oprnds)
3832 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
3833
3834 return true;
3835}
3836
a1e53f3f
L
3837/* Given a vector type VECTYPE returns a builtin DECL to be used
3838 for vector permutation and stores a mask into *MASK that implements
3839 reversal of the vector elements. If that is impossible to do
3840 returns NULL (and *MASK is unchanged). */
3841
3842static tree
3843perm_mask_for_reverse (tree vectype, tree *mask)
3844{
3845 tree builtin_decl;
3846 tree mask_element_type, mask_type;
3847 tree mask_vec = NULL;
3848 int i;
3849 int nunits;
3850 if (!targetm.vectorize.builtin_vec_perm)
3851 return NULL;
3852
3853 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3854 &mask_element_type);
3855 if (!builtin_decl || !mask_element_type)
3856 return NULL;
3857
3858 mask_type = get_vectype_for_scalar_type (mask_element_type);
3859 nunits = TYPE_VECTOR_SUBPARTS (vectype);
bb67d9c7
RG
3860 if (!mask_type
3861 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
a1e53f3f
L
3862 return NULL;
3863
3864 for (i = 0; i < nunits; i++)
3865 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3866 mask_vec = build_vector (mask_type, mask_vec);
3867
3868 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3869 return NULL;
3870 if (mask)
3871 *mask = mask_vec;
3872 return builtin_decl;
3873}
3874
3875/* Given a vector variable X, that was generated for the scalar LHS of
3876 STMT, generate instructions to reverse the vector elements of X,
3877 insert them a *GSI and return the permuted vector variable. */
3878
3879static tree
3880reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3881{
3882 tree vectype = TREE_TYPE (x);
3883 tree mask_vec, builtin_decl;
3884 tree perm_dest, data_ref;
3885 gimple perm_stmt;
3886
3887 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3888
3889 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3890
3891 /* Generate the permute statement. */
3892 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
2a2651b7
RG
3893 if (!useless_type_conversion_p (vectype,
3894 TREE_TYPE (TREE_TYPE (builtin_decl))))
3895 {
3896 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3897 tem = make_ssa_name (tem, perm_stmt);
3898 gimple_call_set_lhs (perm_stmt, tem);
3899 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3900 perm_stmt = gimple_build_assign (NULL_TREE,
3901 build1 (VIEW_CONVERT_EXPR,
3902 vectype, tem));
3903 }
a1e53f3f 3904 data_ref = make_ssa_name (perm_dest, perm_stmt);
2a2651b7 3905 gimple_set_lhs (perm_stmt, data_ref);
a1e53f3f
L
3906 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3907
3908 return data_ref;
3909}
3910
ebfd146a
IR
3911/* vectorizable_load.
3912
b8698a0f
L
3913 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3914 can be vectorized.
3915 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3916 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3917 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3918
3919static bool
3920vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3921 slp_tree slp_node, slp_instance slp_node_instance)
3922{
3923 tree scalar_dest;
3924 tree vec_dest = NULL;
3925 tree data_ref = NULL;
3926 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 3927 stmt_vec_info prev_stmt_info;
ebfd146a 3928 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3929 struct loop *loop = NULL;
ebfd146a 3930 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 3931 bool nested_in_vect_loop = false;
ebfd146a
IR
3932 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3933 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
272c6793 3934 tree elem_type;
ebfd146a 3935 tree new_temp;
947131ba 3936 enum machine_mode mode;
ebfd146a
IR
3937 gimple new_stmt = NULL;
3938 tree dummy;
3939 enum dr_alignment_support alignment_support_scheme;
3940 tree dataref_ptr = NULL_TREE;
3941 gimple ptr_incr;
3942 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3943 int ncopies;
3944 int i, j, group_size;
3945 tree msq = NULL_TREE, lsq;
3946 tree offset = NULL_TREE;
3947 tree realignment_token = NULL_TREE;
3948 gimple phi = NULL;
3949 VEC(tree,heap) *dr_chain = NULL;
3950 bool strided_load = false;
272c6793 3951 bool load_lanes_p = false;
ebfd146a
IR
3952 gimple first_stmt;
3953 tree scalar_type;
3954 bool inv_p;
a1e53f3f 3955 bool negative;
ebfd146a
IR
3956 bool compute_in_loop = false;
3957 struct loop *at_loop;
3958 int vec_num;
3959 bool slp = (slp_node != NULL);
3960 bool slp_perm = false;
3961 enum tree_code code;
a70d6342
IR
3962 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3963 int vf;
272c6793 3964 tree aggr_type;
a70d6342
IR
3965
3966 if (loop_vinfo)
3967 {
3968 loop = LOOP_VINFO_LOOP (loop_vinfo);
3969 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3970 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3971 }
3972 else
3533e503 3973 vf = 1;
ebfd146a
IR
3974
3975 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 3976 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a 3977 case of SLP. */
437f4a00 3978 if (slp || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
3979 ncopies = 1;
3980 else
3981 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3982
3983 gcc_assert (ncopies >= 1);
3984
3985 /* FORNOW. This restriction should be relaxed. */
3986 if (nested_in_vect_loop && ncopies > 1)
3987 {
3988 if (vect_print_dump_info (REPORT_DETAILS))
3989 fprintf (vect_dump, "multiple types in nested loop.");
3990 return false;
3991 }
3992
a70d6342 3993 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3994 return false;
3995
8644a673 3996 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3997 return false;
3998
3999 /* Is vectorizable load? */
4000 if (!is_gimple_assign (stmt))
4001 return false;
4002
4003 scalar_dest = gimple_assign_lhs (stmt);
4004 if (TREE_CODE (scalar_dest) != SSA_NAME)
4005 return false;
4006
4007 code = gimple_assign_rhs_code (stmt);
4008 if (code != ARRAY_REF
4009 && code != INDIRECT_REF
e9dbe7bb
IR
4010 && code != COMPONENT_REF
4011 && code != IMAGPART_EXPR
70f34814
RG
4012 && code != REALPART_EXPR
4013 && code != MEM_REF)
ebfd146a
IR
4014 return false;
4015
4016 if (!STMT_VINFO_DATA_REF (stmt_info))
4017 return false;
4018
a1e53f3f
L
4019 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
4020 if (negative && ncopies > 1)
4021 {
4022 if (vect_print_dump_info (REPORT_DETAILS))
4023 fprintf (vect_dump, "multiple types with negative step.");
4024 return false;
4025 }
4026
ebfd146a 4027 scalar_type = TREE_TYPE (DR_REF (dr));
947131ba 4028 mode = TYPE_MODE (vectype);
ebfd146a
IR
4029
4030 /* FORNOW. In some cases can vectorize even if data-type not supported
4031 (e.g. - data copies). */
947131ba 4032 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
ebfd146a
IR
4033 {
4034 if (vect_print_dump_info (REPORT_DETAILS))
4035 fprintf (vect_dump, "Aligned load, but unsupported type.");
4036 return false;
4037 }
4038
4039 /* The vector component type needs to be trivially convertible to the
4040 scalar lhs. This should always be the case. */
272c6793
RS
4041 elem_type = TREE_TYPE (vectype);
4042 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
b8698a0f 4043 {
ebfd146a
IR
4044 if (vect_print_dump_info (REPORT_DETAILS))
4045 fprintf (vect_dump, "??? operands of different types");
4046 return false;
4047 }
4048
4049 /* Check if the load is a part of an interleaving chain. */
4050 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4051 {
4052 strided_load = true;
4053 /* FORNOW */
4054 gcc_assert (! nested_in_vect_loop);
4055
e14c1050 4056 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
b602d918
RS
4057 if (!slp && !PURE_SLP_STMT (stmt_info))
4058 {
e14c1050 4059 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
272c6793
RS
4060 if (vect_load_lanes_supported (vectype, group_size))
4061 load_lanes_p = true;
4062 else if (!vect_strided_load_supported (vectype, group_size))
b602d918
RS
4063 return false;
4064 }
ebfd146a
IR
4065 }
4066
a1e53f3f
L
4067 if (negative)
4068 {
4069 gcc_assert (!strided_load);
4070 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
4071 if (alignment_support_scheme != dr_aligned
4072 && alignment_support_scheme != dr_unaligned_supported)
4073 {
4074 if (vect_print_dump_info (REPORT_DETAILS))
4075 fprintf (vect_dump, "negative step but alignment required.");
4076 return false;
4077 }
4078 if (!perm_mask_for_reverse (vectype, NULL))
4079 {
4080 if (vect_print_dump_info (REPORT_DETAILS))
4081 fprintf (vect_dump, "negative step and reversing not supported.");
4082 return false;
4083 }
4084 }
4085
ebfd146a
IR
4086 if (!vec_stmt) /* transformation not required. */
4087 {
4088 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
272c6793 4089 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
ebfd146a
IR
4090 return true;
4091 }
4092
4093 if (vect_print_dump_info (REPORT_DETAILS))
0ea25ecd 4094 fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
ebfd146a
IR
4095
4096 /** Transform. **/
4097
4098 if (strided_load)
4099 {
e14c1050 4100 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
ebfd146a
IR
4101 /* Check if the chain of loads is already vectorized. */
4102 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
4103 {
4104 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4105 return true;
4106 }
4107 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
e14c1050 4108 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
ebfd146a
IR
4109
4110 /* VEC_NUM is the number of vect stmts to be created for this group. */
4111 if (slp)
4112 {
4113 strided_load = false;
4114 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
a70d6342
IR
4115 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
4116 slp_perm = true;
4117 }
ebfd146a
IR
4118 else
4119 vec_num = group_size;
ebfd146a
IR
4120 }
4121 else
4122 {
4123 first_stmt = stmt;
4124 first_dr = dr;
4125 group_size = vec_num = 1;
4126 }
4127
720f5239 4128 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 4129 gcc_assert (alignment_support_scheme);
272c6793
RS
4130 /* Targets with load-lane instructions must not require explicit
4131 realignment. */
4132 gcc_assert (!load_lanes_p
4133 || alignment_support_scheme == dr_aligned
4134 || alignment_support_scheme == dr_unaligned_supported);
ebfd146a
IR
4135
4136 /* In case the vectorization factor (VF) is bigger than the number
4137 of elements that we can fit in a vectype (nunits), we have to generate
4138 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 4139 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 4140 from one copy of the vector stmt to the next, in the field
ff802fa1 4141 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 4142 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
4143 stmts that use the defs of the current stmt. The example below
4144 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4145 need to create 4 vectorized stmts):
ebfd146a
IR
4146
4147 before vectorization:
4148 RELATED_STMT VEC_STMT
4149 S1: x = memref - -
4150 S2: z = x + 1 - -
4151
4152 step 1: vectorize stmt S1:
4153 We first create the vector stmt VS1_0, and, as usual, record a
4154 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4155 Next, we create the vector stmt VS1_1, and record a pointer to
4156 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
ff802fa1 4157 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
ebfd146a
IR
4158 stmts and pointers:
4159 RELATED_STMT VEC_STMT
4160 VS1_0: vx0 = memref0 VS1_1 -
4161 VS1_1: vx1 = memref1 VS1_2 -
4162 VS1_2: vx2 = memref2 VS1_3 -
4163 VS1_3: vx3 = memref3 - -
4164 S1: x = load - VS1_0
4165 S2: z = x + 1 - -
4166
b8698a0f
L
4167 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4168 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
4169 stmt S2. */
4170
4171 /* In case of interleaving (non-unit strided access):
4172
4173 S1: x2 = &base + 2
4174 S2: x0 = &base
4175 S3: x1 = &base + 1
4176 S4: x3 = &base + 3
4177
b8698a0f 4178 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
4179 starting from the access of the first stmt of the chain:
4180
4181 VS1: vx0 = &base
4182 VS2: vx1 = &base + vec_size*1
4183 VS3: vx3 = &base + vec_size*2
4184 VS4: vx4 = &base + vec_size*3
4185
4186 Then permutation statements are generated:
4187
4188 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4189 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4190 ...
4191
4192 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4193 (the order of the data-refs in the output of vect_permute_load_chain
4194 corresponds to the order of scalar stmts in the interleaving chain - see
4195 the documentation of vect_permute_load_chain()).
4196 The generation of permutation stmts and recording them in
4197 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4198
b8698a0f 4199 In case of both multiple types and interleaving, the vector loads and
ff802fa1
IR
4200 permutation stmts above are created for every copy. The result vector
4201 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4202 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
ebfd146a
IR
4203
4204 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4205 on a target that supports unaligned accesses (dr_unaligned_supported)
4206 we generate the following code:
4207 p = initial_addr;
4208 indx = 0;
4209 loop {
4210 p = p + indx * vectype_size;
4211 vec_dest = *(p);
4212 indx = indx + 1;
4213 }
4214
4215 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 4216 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
4217 then generate the following code, in which the data in each iteration is
4218 obtained by two vector loads, one from the previous iteration, and one
4219 from the current iteration:
4220 p1 = initial_addr;
4221 msq_init = *(floor(p1))
4222 p2 = initial_addr + VS - 1;
4223 realignment_token = call target_builtin;
4224 indx = 0;
4225 loop {
4226 p2 = p2 + indx * vectype_size
4227 lsq = *(floor(p2))
4228 vec_dest = realign_load (msq, lsq, realignment_token)
4229 indx = indx + 1;
4230 msq = lsq;
4231 } */
4232
4233 /* If the misalignment remains the same throughout the execution of the
4234 loop, we can create the init_addr and permutation mask at the loop
ff802fa1 4235 preheader. Otherwise, it needs to be created inside the loop.
ebfd146a
IR
4236 This can only occur when vectorizing memory accesses in the inner-loop
4237 nested within an outer-loop that is being vectorized. */
4238
a70d6342 4239 if (loop && nested_in_vect_loop_p (loop, stmt)
ebfd146a
IR
4240 && (TREE_INT_CST_LOW (DR_STEP (dr))
4241 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4242 {
4243 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4244 compute_in_loop = true;
4245 }
4246
4247 if ((alignment_support_scheme == dr_explicit_realign_optimized
4248 || alignment_support_scheme == dr_explicit_realign)
4249 && !compute_in_loop)
4250 {
4251 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4252 alignment_support_scheme, NULL_TREE,
4253 &at_loop);
4254 if (alignment_support_scheme == dr_explicit_realign_optimized)
4255 {
4256 phi = SSA_NAME_DEF_STMT (msq);
4257 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4258 }
4259 }
4260 else
4261 at_loop = loop;
4262
a1e53f3f
L
4263 if (negative)
4264 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4265
272c6793
RS
4266 if (load_lanes_p)
4267 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
4268 else
4269 aggr_type = vectype;
4270
ebfd146a
IR
4271 prev_stmt_info = NULL;
4272 for (j = 0; j < ncopies; j++)
b8698a0f 4273 {
272c6793 4274 /* 1. Create the vector or array pointer update chain. */
ebfd146a 4275 if (j == 0)
272c6793 4276 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
920e8172
RS
4277 offset, &dummy, gsi,
4278 &ptr_incr, false, &inv_p);
ebfd146a 4279 else
272c6793
RS
4280 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4281 TYPE_SIZE_UNIT (aggr_type));
ebfd146a 4282
5ce1ee7f
RS
4283 if (strided_load || slp_perm)
4284 dr_chain = VEC_alloc (tree, heap, vec_num);
4285
272c6793 4286 if (load_lanes_p)
ebfd146a 4287 {
272c6793
RS
4288 tree vec_array;
4289
4290 vec_array = create_vector_array (vectype, vec_num);
4291
4292 /* Emit:
4293 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4294 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
4295 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
4296 gimple_call_set_lhs (new_stmt, vec_array);
4297 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4298 mark_symbols_for_renaming (new_stmt);
ebfd146a 4299
272c6793
RS
4300 /* Extract each vector into an SSA_NAME. */
4301 for (i = 0; i < vec_num; i++)
ebfd146a 4302 {
272c6793
RS
4303 new_temp = read_vector_array (stmt, gsi, scalar_dest,
4304 vec_array, i);
4305 VEC_quick_push (tree, dr_chain, new_temp);
4306 }
4307
4308 /* Record the mapping between SSA_NAMEs and statements. */
4309 vect_record_strided_load_vectors (stmt, dr_chain);
4310 }
4311 else
4312 {
4313 for (i = 0; i < vec_num; i++)
4314 {
4315 if (i > 0)
4316 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
4317 stmt, NULL_TREE);
4318
4319 /* 2. Create the vector-load in the loop. */
4320 switch (alignment_support_scheme)
4321 {
4322 case dr_aligned:
4323 case dr_unaligned_supported:
be1ac4ec 4324 {
272c6793
RS
4325 struct ptr_info_def *pi;
4326 data_ref
4327 = build2 (MEM_REF, vectype, dataref_ptr,
4328 build_int_cst (reference_alias_ptr_type
4329 (DR_REF (first_dr)), 0));
4330 pi = get_ptr_info (dataref_ptr);
4331 pi->align = TYPE_ALIGN_UNIT (vectype);
4332 if (alignment_support_scheme == dr_aligned)
4333 {
4334 gcc_assert (aligned_access_p (first_dr));
4335 pi->misalign = 0;
4336 }
4337 else if (DR_MISALIGNMENT (first_dr) == -1)
4338 {
4339 TREE_TYPE (data_ref)
4340 = build_aligned_type (TREE_TYPE (data_ref),
4341 TYPE_ALIGN (elem_type));
4342 pi->align = TYPE_ALIGN_UNIT (elem_type);
4343 pi->misalign = 0;
4344 }
4345 else
4346 {
4347 TREE_TYPE (data_ref)
4348 = build_aligned_type (TREE_TYPE (data_ref),
4349 TYPE_ALIGN (elem_type));
4350 pi->misalign = DR_MISALIGNMENT (first_dr);
4351 }
4352 break;
be1ac4ec 4353 }
272c6793 4354 case dr_explicit_realign:
267d3070 4355 {
272c6793
RS
4356 tree ptr, bump;
4357 tree vs_minus_1;
4358
4359 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4360
4361 if (compute_in_loop)
4362 msq = vect_setup_realignment (first_stmt, gsi,
4363 &realignment_token,
4364 dr_explicit_realign,
4365 dataref_ptr, NULL);
4366
4367 new_stmt = gimple_build_assign_with_ops
4368 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4369 build_int_cst
4370 (TREE_TYPE (dataref_ptr),
4371 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4372 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4373 gimple_assign_set_lhs (new_stmt, ptr);
4374 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4375 data_ref
4376 = build2 (MEM_REF, vectype, ptr,
4377 build_int_cst (reference_alias_ptr_type
4378 (DR_REF (first_dr)), 0));
4379 vec_dest = vect_create_destination_var (scalar_dest,
4380 vectype);
4381 new_stmt = gimple_build_assign (vec_dest, data_ref);
4382 new_temp = make_ssa_name (vec_dest, new_stmt);
4383 gimple_assign_set_lhs (new_stmt, new_temp);
4384 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4385 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4386 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4387 msq = new_temp;
4388
4389 bump = size_binop (MULT_EXPR, vs_minus_1,
4390 TYPE_SIZE_UNIT (scalar_type));
4391 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4392 new_stmt = gimple_build_assign_with_ops
4393 (BIT_AND_EXPR, NULL_TREE, ptr,
4394 build_int_cst
4395 (TREE_TYPE (ptr),
4396 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4397 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4398 gimple_assign_set_lhs (new_stmt, ptr);
4399 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4400 data_ref
4401 = build2 (MEM_REF, vectype, ptr,
4402 build_int_cst (reference_alias_ptr_type
4403 (DR_REF (first_dr)), 0));
4404 break;
267d3070 4405 }
272c6793
RS
4406 case dr_explicit_realign_optimized:
4407 new_stmt = gimple_build_assign_with_ops
4408 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4409 build_int_cst
4410 (TREE_TYPE (dataref_ptr),
4411 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4412 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
4413 new_stmt);
4414 gimple_assign_set_lhs (new_stmt, new_temp);
4415 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4416 data_ref
4417 = build2 (MEM_REF, vectype, new_temp,
4418 build_int_cst (reference_alias_ptr_type
4419 (DR_REF (first_dr)), 0));
4420 break;
4421 default:
4422 gcc_unreachable ();
4423 }
ebfd146a 4424 vec_dest = vect_create_destination_var (scalar_dest, vectype);
272c6793 4425 new_stmt = gimple_build_assign (vec_dest, data_ref);
ebfd146a
IR
4426 new_temp = make_ssa_name (vec_dest, new_stmt);
4427 gimple_assign_set_lhs (new_stmt, new_temp);
4428 vect_finish_stmt_generation (stmt, new_stmt, gsi);
272c6793 4429 mark_symbols_for_renaming (new_stmt);
ebfd146a 4430
272c6793
RS
4431 /* 3. Handle explicit realignment if necessary/supported.
4432 Create in loop:
4433 vec_dest = realign_load (msq, lsq, realignment_token) */
4434 if (alignment_support_scheme == dr_explicit_realign_optimized
4435 || alignment_support_scheme == dr_explicit_realign)
ebfd146a 4436 {
272c6793
RS
4437 lsq = gimple_assign_lhs (new_stmt);
4438 if (!realignment_token)
4439 realignment_token = dataref_ptr;
4440 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4441 new_stmt
4442 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
4443 vec_dest, msq, lsq,
4444 realignment_token);
4445 new_temp = make_ssa_name (vec_dest, new_stmt);
4446 gimple_assign_set_lhs (new_stmt, new_temp);
4447 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4448
4449 if (alignment_support_scheme == dr_explicit_realign_optimized)
4450 {
4451 gcc_assert (phi);
4452 if (i == vec_num - 1 && j == ncopies - 1)
4453 add_phi_arg (phi, lsq,
4454 loop_latch_edge (containing_loop),
4455 UNKNOWN_LOCATION);
4456 msq = lsq;
4457 }
ebfd146a 4458 }
ebfd146a 4459
272c6793
RS
4460 /* 4. Handle invariant-load. */
4461 if (inv_p && !bb_vinfo)
ebfd146a 4462 {
272c6793
RS
4463 gcc_assert (!strided_load);
4464 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4465 if (j == 0)
4466 {
4467 int k;
4468 tree t = NULL_TREE;
4469 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4470
4471 /* CHECKME: bitpos depends on endianess? */
4472 bitpos = bitsize_zero_node;
4473 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4474 bitsize, bitpos);
4475 vec_dest = vect_create_destination_var (scalar_dest,
4476 NULL_TREE);
4477 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4478 new_temp = make_ssa_name (vec_dest, new_stmt);
4479 gimple_assign_set_lhs (new_stmt, new_temp);
4480 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4481
4482 for (k = nunits - 1; k >= 0; --k)
4483 t = tree_cons (NULL_TREE, new_temp, t);
4484 /* FIXME: use build_constructor directly. */
4485 vec_inv = build_constructor_from_list (vectype, t);
4486 new_temp = vect_init_vector (stmt, vec_inv,
4487 vectype, gsi);
4488 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4489 }
4490 else
4491 gcc_unreachable (); /* FORNOW. */
4492 }
ebfd146a 4493
272c6793
RS
4494 if (negative)
4495 {
4496 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
ebfd146a
IR
4497 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4498 }
267d3070 4499
272c6793
RS
4500 /* Collect vector loads and later create their permutation in
4501 vect_transform_strided_load (). */
4502 if (strided_load || slp_perm)
4503 VEC_quick_push (tree, dr_chain, new_temp);
267d3070 4504
272c6793
RS
4505 /* Store vector loads in the corresponding SLP_NODE. */
4506 if (slp && !slp_perm)
4507 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
4508 new_stmt);
4509 }
ebfd146a
IR
4510 }
4511
4512 if (slp && !slp_perm)
4513 continue;
4514
4515 if (slp_perm)
4516 {
a70d6342 4517 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
ebfd146a
IR
4518 slp_node_instance, false))
4519 {
4520 VEC_free (tree, heap, dr_chain);
4521 return false;
4522 }
4523 }
4524 else
4525 {
4526 if (strided_load)
4527 {
272c6793
RS
4528 if (!load_lanes_p)
4529 vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
ebfd146a 4530 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
ebfd146a
IR
4531 }
4532 else
4533 {
4534 if (j == 0)
4535 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4536 else
4537 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4538 prev_stmt_info = vinfo_for_stmt (new_stmt);
4539 }
4540 }
5ce1ee7f
RS
4541 if (dr_chain)
4542 VEC_free (tree, heap, dr_chain);
ebfd146a
IR
4543 }
4544
ebfd146a
IR
4545 return true;
4546}
4547
4548/* Function vect_is_simple_cond.
b8698a0f 4549
ebfd146a
IR
4550 Input:
4551 LOOP - the loop that is being vectorized.
4552 COND - Condition that is checked for simple use.
4553
4554 Returns whether a COND can be vectorized. Checks whether
4555 condition operands are supportable using vec_is_simple_use. */
4556
4557static bool
4558vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4559{
4560 tree lhs, rhs;
4561 tree def;
4562 enum vect_def_type dt;
4563
4564 if (!COMPARISON_CLASS_P (cond))
4565 return false;
4566
4567 lhs = TREE_OPERAND (cond, 0);
4568 rhs = TREE_OPERAND (cond, 1);
4569
4570 if (TREE_CODE (lhs) == SSA_NAME)
4571 {
4572 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
b8698a0f 4573 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
a70d6342 4574 &dt))
ebfd146a
IR
4575 return false;
4576 }
4577 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4578 && TREE_CODE (lhs) != FIXED_CST)
4579 return false;
4580
4581 if (TREE_CODE (rhs) == SSA_NAME)
4582 {
4583 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
b8698a0f 4584 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
a70d6342 4585 &dt))
ebfd146a
IR
4586 return false;
4587 }
4588 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4589 && TREE_CODE (rhs) != FIXED_CST)
4590 return false;
4591
4592 return true;
4593}
4594
4595/* vectorizable_condition.
4596
b8698a0f
L
4597 Check if STMT is conditional modify expression that can be vectorized.
4598 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4599 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
4600 at GSI.
4601
4602 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4603 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4604 else caluse if it is 2).
ebfd146a
IR
4605
4606 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4607
4bbe8262 4608bool
ebfd146a 4609vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4bbe8262 4610 gimple *vec_stmt, tree reduc_def, int reduc_index)
ebfd146a
IR
4611{
4612 tree scalar_dest = NULL_TREE;
4613 tree vec_dest = NULL_TREE;
4614 tree op = NULL_TREE;
4615 tree cond_expr, then_clause, else_clause;
4616 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4617 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ff802fa1
IR
4618 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4619 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
ebfd146a
IR
4620 tree vec_compare, vec_cond_expr;
4621 tree new_temp;
4622 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4623 enum machine_mode vec_mode;
4624 tree def;
a855b1b1 4625 enum vect_def_type dt, dts[4];
ebfd146a
IR
4626 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4627 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4628 enum tree_code code;
a855b1b1
MM
4629 stmt_vec_info prev_stmt_info = NULL;
4630 int j;
ebfd146a 4631
a70d6342
IR
4632 /* FORNOW: unsupported in basic block SLP. */
4633 gcc_assert (loop_vinfo);
b8698a0f 4634
437f4a00
IR
4635 /* FORNOW: SLP not supported. */
4636 if (STMT_SLP_TYPE (stmt_info))
4637 return false;
4638
ebfd146a 4639 gcc_assert (ncopies >= 1);
a855b1b1 4640 if (reduc_index && ncopies > 1)
ebfd146a
IR
4641 return false; /* FORNOW */
4642
4643 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4644 return false;
4645
4bbe8262
IR
4646 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4647 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4648 && reduc_def))
ebfd146a
IR
4649 return false;
4650
ebfd146a 4651 /* FORNOW: not yet supported. */
b8698a0f 4652 if (STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4653 {
4654 if (vect_print_dump_info (REPORT_DETAILS))
4655 fprintf (vect_dump, "value used after loop.");
4656 return false;
4657 }
4658
4659 /* Is vectorizable conditional operation? */
4660 if (!is_gimple_assign (stmt))
4661 return false;
4662
4663 code = gimple_assign_rhs_code (stmt);
4664
4665 if (code != COND_EXPR)
4666 return false;
4667
4668 gcc_assert (gimple_assign_single_p (stmt));
4669 op = gimple_assign_rhs1 (stmt);
4670 cond_expr = TREE_OPERAND (op, 0);
4671 then_clause = TREE_OPERAND (op, 1);
4672 else_clause = TREE_OPERAND (op, 2);
4673
4674 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4675 return false;
4676
4677 /* We do not handle two different vector types for the condition
4678 and the values. */
8533c9d8
SP
4679 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4680 TREE_TYPE (vectype)))
ebfd146a
IR
4681 return false;
4682
4683 if (TREE_CODE (then_clause) == SSA_NAME)
4684 {
4685 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
b8698a0f 4686 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
ebfd146a
IR
4687 &then_def_stmt, &def, &dt))
4688 return false;
4689 }
b8698a0f 4690 else if (TREE_CODE (then_clause) != INTEGER_CST
ebfd146a
IR
4691 && TREE_CODE (then_clause) != REAL_CST
4692 && TREE_CODE (then_clause) != FIXED_CST)
4693 return false;
4694
4695 if (TREE_CODE (else_clause) == SSA_NAME)
4696 {
4697 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
a70d6342 4698 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
ebfd146a
IR
4699 &else_def_stmt, &def, &dt))
4700 return false;
4701 }
b8698a0f 4702 else if (TREE_CODE (else_clause) != INTEGER_CST
ebfd146a
IR
4703 && TREE_CODE (else_clause) != REAL_CST
4704 && TREE_CODE (else_clause) != FIXED_CST)
4705 return false;
4706
4707
4708 vec_mode = TYPE_MODE (vectype);
4709
b8698a0f 4710 if (!vec_stmt)
ebfd146a
IR
4711 {
4712 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8e7aa1f9 4713 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
ebfd146a
IR
4714 }
4715
4716 /* Transform */
4717
4718 /* Handle def. */
4719 scalar_dest = gimple_assign_lhs (stmt);
4720 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4721
4722 /* Handle cond expr. */
a855b1b1
MM
4723 for (j = 0; j < ncopies; j++)
4724 {
4725 gimple new_stmt;
4726 if (j == 0)
4727 {
4728 gimple gtemp;
4729 vec_cond_lhs =
4730 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4731 stmt, NULL);
4732 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4733 NULL, &gtemp, &def, &dts[0]);
4734 vec_cond_rhs =
4735 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4736 stmt, NULL);
4737 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4738 NULL, &gtemp, &def, &dts[1]);
4739 if (reduc_index == 1)
4740 vec_then_clause = reduc_def;
4741 else
4742 {
4743 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4744 stmt, NULL);
4745 vect_is_simple_use (then_clause, loop_vinfo,
4746 NULL, &gtemp, &def, &dts[2]);
4747 }
4748 if (reduc_index == 2)
4749 vec_else_clause = reduc_def;
4750 else
4751 {
4752 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4753 stmt, NULL);
4754 vect_is_simple_use (else_clause, loop_vinfo,
4755 NULL, &gtemp, &def, &dts[3]);
4756 }
4757 }
4758 else
4759 {
4760 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4761 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4762 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4763 vec_then_clause);
4764 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4765 vec_else_clause);
4766 }
4767
9dc3f7de 4768 /* Arguments are ready. Create the new vector stmt. */
a855b1b1
MM
4769 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4770 vec_cond_lhs, vec_cond_rhs);
4771 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4772 vec_compare, vec_then_clause, vec_else_clause);
4773
4774 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4775 new_temp = make_ssa_name (vec_dest, new_stmt);
4776 gimple_assign_set_lhs (new_stmt, new_temp);
4777 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4778 if (j == 0)
4779 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4780 else
4781 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4782
4783 prev_stmt_info = vinfo_for_stmt (new_stmt);
4784 }
b8698a0f 4785
ebfd146a
IR
4786 return true;
4787}
4788
4789
8644a673 4790/* Make sure the statement is vectorizable. */
ebfd146a
IR
4791
4792bool
a70d6342 4793vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 4794{
8644a673 4795 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 4796 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 4797 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 4798 bool ok;
a70d6342 4799 tree scalar_type, vectype;
ebfd146a
IR
4800
4801 if (vect_print_dump_info (REPORT_DETAILS))
ebfd146a 4802 {
8644a673
IR
4803 fprintf (vect_dump, "==> examining statement: ");
4804 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4805 }
ebfd146a 4806
1825a1f3 4807 if (gimple_has_volatile_ops (stmt))
b8698a0f 4808 {
1825a1f3
IR
4809 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4810 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4811
4812 return false;
4813 }
b8698a0f
L
4814
4815 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
4816 to include:
4817 - the COND_EXPR which is the loop exit condition
4818 - any LABEL_EXPRs in the loop
b8698a0f 4819 - computations that are used only for array indexing or loop control.
8644a673
IR
4820 In basic blocks we only analyze statements that are a part of some SLP
4821 instance, therefore, all the statements are relevant. */
ebfd146a 4822
b8698a0f 4823 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 4824 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4825 {
4826 if (vect_print_dump_info (REPORT_DETAILS))
8644a673 4827 fprintf (vect_dump, "irrelevant.");
ebfd146a 4828
8644a673
IR
4829 return true;
4830 }
ebfd146a 4831
8644a673
IR
4832 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4833 {
4834 case vect_internal_def:
4835 break;
ebfd146a 4836
8644a673 4837 case vect_reduction_def:
7c5222ff 4838 case vect_nested_cycle:
a70d6342 4839 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
8644a673 4840 || relevance == vect_used_in_outer_by_reduction
a70d6342 4841 || relevance == vect_unused_in_scope));
8644a673
IR
4842 break;
4843
4844 case vect_induction_def:
4845 case vect_constant_def:
4846 case vect_external_def:
4847 case vect_unknown_def_type:
4848 default:
4849 gcc_unreachable ();
4850 }
ebfd146a 4851
a70d6342
IR
4852 if (bb_vinfo)
4853 {
4854 gcc_assert (PURE_SLP_STMT (stmt_info));
4855
b690cc0f 4856 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
a70d6342
IR
4857 if (vect_print_dump_info (REPORT_DETAILS))
4858 {
4859 fprintf (vect_dump, "get vectype for scalar type: ");
4860 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4861 }
4862
4863 vectype = get_vectype_for_scalar_type (scalar_type);
4864 if (!vectype)
4865 {
4866 if (vect_print_dump_info (REPORT_DETAILS))
4867 {
4868 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4869 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4870 }
4871 return false;
4872 }
4873
4874 if (vect_print_dump_info (REPORT_DETAILS))
4875 {
4876 fprintf (vect_dump, "vectype: ");
4877 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4878 }
4879
4880 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4881 }
4882
8644a673 4883 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 4884 {
8644a673
IR
4885 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4886 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4887 *need_to_vectorize = true;
ebfd146a
IR
4888 }
4889
8644a673 4890 ok = true;
b8698a0f 4891 if (!bb_vinfo
a70d6342
IR
4892 && (STMT_VINFO_RELEVANT_P (stmt_info)
4893 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8644a673
IR
4894 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4895 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4896 || vectorizable_conversion (stmt, NULL, NULL, NULL)
9dc3f7de 4897 || vectorizable_shift (stmt, NULL, NULL, NULL)
8644a673
IR
4898 || vectorizable_operation (stmt, NULL, NULL, NULL)
4899 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4900 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4901 || vectorizable_call (stmt, NULL, NULL)
4902 || vectorizable_store (stmt, NULL, NULL, NULL)
b5aeb3bb 4903 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4bbe8262 4904 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
a70d6342
IR
4905 else
4906 {
4907 if (bb_vinfo)
57416708 4908 ok = (vectorizable_shift (stmt, NULL, NULL, node)
9dc3f7de 4909 || vectorizable_operation (stmt, NULL, NULL, node)
a70d6342
IR
4910 || vectorizable_assignment (stmt, NULL, NULL, node)
4911 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4912 || vectorizable_store (stmt, NULL, NULL, node));
b8698a0f 4913 }
8644a673
IR
4914
4915 if (!ok)
ebfd146a 4916 {
8644a673
IR
4917 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4918 {
4919 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4920 fprintf (vect_dump, "supported: ");
4921 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4922 }
b8698a0f 4923
ebfd146a
IR
4924 return false;
4925 }
4926
a70d6342
IR
4927 if (bb_vinfo)
4928 return true;
4929
8644a673
IR
4930 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4931 need extra handling, except for vectorizable reductions. */
4932 if (STMT_VINFO_LIVE_P (stmt_info)
4933 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4934 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 4935
8644a673 4936 if (!ok)
ebfd146a 4937 {
8644a673
IR
4938 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4939 {
4940 fprintf (vect_dump, "not vectorized: live stmt not ");
4941 fprintf (vect_dump, "supported: ");
4942 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4943 }
b8698a0f 4944
8644a673 4945 return false;
ebfd146a
IR
4946 }
4947
ebfd146a
IR
4948 return true;
4949}
4950
4951
4952/* Function vect_transform_stmt.
4953
4954 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4955
4956bool
4957vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
b8698a0f 4958 bool *strided_store, slp_tree slp_node,
ebfd146a
IR
4959 slp_instance slp_node_instance)
4960{
4961 bool is_store = false;
4962 gimple vec_stmt = NULL;
4963 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
039d9ea1 4964 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
ebfd146a 4965 bool done;
ebfd146a
IR
4966
4967 switch (STMT_VINFO_TYPE (stmt_info))
4968 {
4969 case type_demotion_vec_info_type:
4970 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4971 gcc_assert (done);
4972 break;
4973
4974 case type_promotion_vec_info_type:
4975 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4976 gcc_assert (done);
4977 break;
4978
4979 case type_conversion_vec_info_type:
4980 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4981 gcc_assert (done);
4982 break;
4983
4984 case induc_vec_info_type:
4985 gcc_assert (!slp_node);
4986 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4987 gcc_assert (done);
4988 break;
4989
9dc3f7de
IR
4990 case shift_vec_info_type:
4991 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4992 gcc_assert (done);
4993 break;
4994
ebfd146a
IR
4995 case op_vec_info_type:
4996 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4997 gcc_assert (done);
4998 break;
4999
5000 case assignment_vec_info_type:
5001 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
5002 gcc_assert (done);
5003 break;
5004
5005 case load_vec_info_type:
b8698a0f 5006 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
5007 slp_node_instance);
5008 gcc_assert (done);
5009 break;
5010
5011 case store_vec_info_type:
5012 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
5013 gcc_assert (done);
5014 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
5015 {
5016 /* In case of interleaving, the whole chain is vectorized when the
ff802fa1 5017 last store in the chain is reached. Store stmts before the last
ebfd146a
IR
5018 one are skipped, and there vec_stmt_info shouldn't be freed
5019 meanwhile. */
5020 *strided_store = true;
5021 if (STMT_VINFO_VEC_STMT (stmt_info))
5022 is_store = true;
5023 }
5024 else
5025 is_store = true;
5026 break;
5027
5028 case condition_vec_info_type:
5029 gcc_assert (!slp_node);
4bbe8262 5030 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
ebfd146a
IR
5031 gcc_assert (done);
5032 break;
5033
5034 case call_vec_info_type:
5035 gcc_assert (!slp_node);
5036 done = vectorizable_call (stmt, gsi, &vec_stmt);
039d9ea1 5037 stmt = gsi_stmt (*gsi);
ebfd146a
IR
5038 break;
5039
5040 case reduc_vec_info_type:
b5aeb3bb 5041 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
ebfd146a
IR
5042 gcc_assert (done);
5043 break;
5044
5045 default:
5046 if (!STMT_VINFO_LIVE_P (stmt_info))
5047 {
5048 if (vect_print_dump_info (REPORT_DETAILS))
5049 fprintf (vect_dump, "stmt not supported.");
5050 gcc_unreachable ();
5051 }
5052 }
5053
5054 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5055 is being vectorized, but outside the immediately enclosing loop. */
5056 if (vec_stmt
a70d6342
IR
5057 && STMT_VINFO_LOOP_VINFO (stmt_info)
5058 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5059 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
5060 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
5061 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 5062 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 5063 vect_used_in_outer_by_reduction))
ebfd146a 5064 {
a70d6342
IR
5065 struct loop *innerloop = LOOP_VINFO_LOOP (
5066 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
5067 imm_use_iterator imm_iter;
5068 use_operand_p use_p;
5069 tree scalar_dest;
5070 gimple exit_phi;
5071
5072 if (vect_print_dump_info (REPORT_DETAILS))
a70d6342 5073 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
ebfd146a
IR
5074
5075 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5076 (to be used when vectorizing outer-loop stmts that use the DEF of
5077 STMT). */
5078 if (gimple_code (stmt) == GIMPLE_PHI)
5079 scalar_dest = PHI_RESULT (stmt);
5080 else
5081 scalar_dest = gimple_assign_lhs (stmt);
5082
5083 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5084 {
5085 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
5086 {
5087 exit_phi = USE_STMT (use_p);
5088 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
5089 }
5090 }
5091 }
5092
5093 /* Handle stmts whose DEF is used outside the loop-nest that is
5094 being vectorized. */
5095 if (STMT_VINFO_LIVE_P (stmt_info)
5096 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5097 {
5098 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
5099 gcc_assert (done);
5100 }
5101
5102 if (vec_stmt)
5103 {
5104 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
5105 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
5106 if (orig_stmt_in_pattern)
5107 {
5108 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
5109 /* STMT was inserted by the vectorizer to replace a computation idiom.
b8698a0f
L
5110 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5111 computed this idiom. We need to record a pointer to VEC_STMT in
5112 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
ebfd146a
IR
5113 documentation of vect_pattern_recog. */
5114 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
5115 {
039d9ea1
IR
5116 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
5117 == orig_scalar_stmt);
ebfd146a
IR
5118 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
5119 }
5120 }
5121 }
5122
b8698a0f 5123 return is_store;
ebfd146a
IR
5124}
5125
5126
b8698a0f 5127/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
5128 stmt_vec_info. */
5129
5130void
5131vect_remove_stores (gimple first_stmt)
5132{
5133 gimple next = first_stmt;
5134 gimple tmp;
5135 gimple_stmt_iterator next_si;
5136
5137 while (next)
5138 {
5139 /* Free the attached stmt_vec_info and remove the stmt. */
5140 next_si = gsi_for_stmt (next);
5141 gsi_remove (&next_si, true);
e14c1050 5142 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
ebfd146a
IR
5143 free_stmt_vec_info (next);
5144 next = tmp;
5145 }
5146}
5147
5148
5149/* Function new_stmt_vec_info.
5150
5151 Create and initialize a new stmt_vec_info struct for STMT. */
5152
5153stmt_vec_info
b8698a0f 5154new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
a70d6342 5155 bb_vec_info bb_vinfo)
ebfd146a
IR
5156{
5157 stmt_vec_info res;
5158 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
5159
5160 STMT_VINFO_TYPE (res) = undef_vec_info_type;
5161 STMT_VINFO_STMT (res) = stmt;
5162 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
a70d6342 5163 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
8644a673 5164 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
5165 STMT_VINFO_LIVE_P (res) = false;
5166 STMT_VINFO_VECTYPE (res) = NULL;
5167 STMT_VINFO_VEC_STMT (res) = NULL;
4b5caab7 5168 STMT_VINFO_VECTORIZABLE (res) = true;
ebfd146a
IR
5169 STMT_VINFO_IN_PATTERN_P (res) = false;
5170 STMT_VINFO_RELATED_STMT (res) = NULL;
5171 STMT_VINFO_DATA_REF (res) = NULL;
5172
5173 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
5174 STMT_VINFO_DR_OFFSET (res) = NULL;
5175 STMT_VINFO_DR_INIT (res) = NULL;
5176 STMT_VINFO_DR_STEP (res) = NULL;
5177 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5178
5179 if (gimple_code (stmt) == GIMPLE_PHI
5180 && is_loop_header_bb_p (gimple_bb (stmt)))
5181 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5182 else
8644a673
IR
5183 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5184
ebfd146a
IR
5185 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5186 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5187 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
32e8bb8e 5188 STMT_SLP_TYPE (res) = loop_vect;
e14c1050
IR
5189 GROUP_FIRST_ELEMENT (res) = NULL;
5190 GROUP_NEXT_ELEMENT (res) = NULL;
5191 GROUP_SIZE (res) = 0;
5192 GROUP_STORE_COUNT (res) = 0;
5193 GROUP_GAP (res) = 0;
5194 GROUP_SAME_DR_STMT (res) = NULL;
5195 GROUP_READ_WRITE_DEPENDENCE (res) = false;
ebfd146a
IR
5196
5197 return res;
5198}
5199
5200
5201/* Create a hash table for stmt_vec_info. */
5202
5203void
5204init_stmt_vec_info_vec (void)
5205{
5206 gcc_assert (!stmt_vec_info_vec);
5207 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5208}
5209
5210
5211/* Free hash table for stmt_vec_info. */
5212
5213void
5214free_stmt_vec_info_vec (void)
5215{
5216 gcc_assert (stmt_vec_info_vec);
5217 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5218}
5219
5220
5221/* Free stmt vectorization related info. */
5222
5223void
5224free_stmt_vec_info (gimple stmt)
5225{
5226 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5227
5228 if (!stmt_info)
5229 return;
5230
5231 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5232 set_vinfo_for_stmt (stmt, NULL);
5233 free (stmt_info);
5234}
5235
5236
bb67d9c7 5237/* Function get_vectype_for_scalar_type_and_size.
ebfd146a 5238
bb67d9c7 5239 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
ebfd146a
IR
5240 by the target. */
5241
bb67d9c7
RG
5242static tree
5243get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
ebfd146a
IR
5244{
5245 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
cc4b5170 5246 enum machine_mode simd_mode;
2f816591 5247 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
5248 int nunits;
5249 tree vectype;
5250
cc4b5170 5251 if (nbytes == 0)
ebfd146a
IR
5252 return NULL_TREE;
5253
2f816591
RG
5254 /* We can't build a vector type of elements with alignment bigger than
5255 their size. */
5256 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5257 return NULL_TREE;
5258
6d7971b8
RG
5259 /* If we'd build a vector type of elements whose mode precision doesn't
5260 match their types precision we'll get mismatched types on vector
5261 extracts via BIT_FIELD_REFs. This effectively means we disable
5262 vectorization of bool and/or enum types in some languages. */
5263 if (INTEGRAL_TYPE_P (scalar_type)
5264 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5265 return NULL_TREE;
5266
cc4b5170
RG
5267 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5268 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5269 return NULL_TREE;
5270
bb67d9c7
RG
5271 /* If no size was supplied use the mode the target prefers. Otherwise
5272 lookup a vector mode of the specified size. */
5273 if (size == 0)
5274 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5275 else
5276 simd_mode = mode_for_vector (inner_mode, size / nbytes);
cc4b5170
RG
5277 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5278 if (nunits <= 1)
5279 return NULL_TREE;
ebfd146a
IR
5280
5281 vectype = build_vector_type (scalar_type, nunits);
5282 if (vect_print_dump_info (REPORT_DETAILS))
5283 {
5284 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5285 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5286 }
5287
5288 if (!vectype)
5289 return NULL_TREE;
5290
5291 if (vect_print_dump_info (REPORT_DETAILS))
5292 {
5293 fprintf (vect_dump, "vectype: ");
5294 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5295 }
5296
5297 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5298 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5299 {
5300 if (vect_print_dump_info (REPORT_DETAILS))
5301 fprintf (vect_dump, "mode not supported by target.");
5302 return NULL_TREE;
5303 }
5304
5305 return vectype;
5306}
5307
bb67d9c7
RG
5308unsigned int current_vector_size;
5309
5310/* Function get_vectype_for_scalar_type.
5311
5312 Returns the vector type corresponding to SCALAR_TYPE as supported
5313 by the target. */
5314
5315tree
5316get_vectype_for_scalar_type (tree scalar_type)
5317{
5318 tree vectype;
5319 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5320 current_vector_size);
5321 if (vectype
5322 && current_vector_size == 0)
5323 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5324 return vectype;
5325}
5326
b690cc0f
RG
5327/* Function get_same_sized_vectype
5328
5329 Returns a vector type corresponding to SCALAR_TYPE of size
5330 VECTOR_TYPE if supported by the target. */
5331
5332tree
bb67d9c7 5333get_same_sized_vectype (tree scalar_type, tree vector_type)
b690cc0f 5334{
bb67d9c7
RG
5335 return get_vectype_for_scalar_type_and_size
5336 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
b690cc0f
RG
5337}
5338
ebfd146a
IR
5339/* Function vect_is_simple_use.
5340
5341 Input:
a70d6342
IR
5342 LOOP_VINFO - the vect info of the loop that is being vectorized.
5343 BB_VINFO - the vect info of the basic block that is being vectorized.
5344 OPERAND - operand of a stmt in the loop or bb.
ebfd146a
IR
5345 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5346
5347 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f 5348 For loops, supportable operands are constants, loop invariants, and operands
ff802fa1 5349 that are defined by the current iteration of the loop. Unsupportable
b8698a0f 5350 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
5351 is the case in reduction/induction computations).
5352 For basic blocks, supportable operands are constants and bb invariants.
5353 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
5354
5355bool
b8698a0f 5356vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
a70d6342 5357 bb_vec_info bb_vinfo, gimple *def_stmt,
ebfd146a 5358 tree *def, enum vect_def_type *dt)
b8698a0f 5359{
ebfd146a
IR
5360 basic_block bb;
5361 stmt_vec_info stmt_vinfo;
a70d6342 5362 struct loop *loop = NULL;
b8698a0f 5363
a70d6342
IR
5364 if (loop_vinfo)
5365 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
5366
5367 *def_stmt = NULL;
5368 *def = NULL_TREE;
b8698a0f 5369
ebfd146a
IR
5370 if (vect_print_dump_info (REPORT_DETAILS))
5371 {
5372 fprintf (vect_dump, "vect_is_simple_use: operand ");
5373 print_generic_expr (vect_dump, operand, TDF_SLIM);
5374 }
b8698a0f 5375
ebfd146a
IR
5376 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5377 {
5378 *dt = vect_constant_def;
5379 return true;
5380 }
b8698a0f 5381
ebfd146a
IR
5382 if (is_gimple_min_invariant (operand))
5383 {
5384 *def = operand;
8644a673 5385 *dt = vect_external_def;
ebfd146a
IR
5386 return true;
5387 }
5388
5389 if (TREE_CODE (operand) == PAREN_EXPR)
5390 {
5391 if (vect_print_dump_info (REPORT_DETAILS))
5392 fprintf (vect_dump, "non-associatable copy.");
5393 operand = TREE_OPERAND (operand, 0);
5394 }
b8698a0f 5395
ebfd146a
IR
5396 if (TREE_CODE (operand) != SSA_NAME)
5397 {
5398 if (vect_print_dump_info (REPORT_DETAILS))
5399 fprintf (vect_dump, "not ssa-name.");
5400 return false;
5401 }
b8698a0f 5402
ebfd146a
IR
5403 *def_stmt = SSA_NAME_DEF_STMT (operand);
5404 if (*def_stmt == NULL)
5405 {
5406 if (vect_print_dump_info (REPORT_DETAILS))
5407 fprintf (vect_dump, "no def_stmt.");
5408 return false;
5409 }
5410
5411 if (vect_print_dump_info (REPORT_DETAILS))
5412 {
5413 fprintf (vect_dump, "def_stmt: ");
5414 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5415 }
5416
8644a673 5417 /* Empty stmt is expected only in case of a function argument.
ebfd146a
IR
5418 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5419 if (gimple_nop_p (*def_stmt))
5420 {
5421 *def = operand;
8644a673 5422 *dt = vect_external_def;
ebfd146a
IR
5423 return true;
5424 }
5425
5426 bb = gimple_bb (*def_stmt);
a70d6342
IR
5427
5428 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5429 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
b8698a0f 5430 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
8644a673 5431 *dt = vect_external_def;
ebfd146a
IR
5432 else
5433 {
5434 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5435 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5436 }
5437
5438 if (*dt == vect_unknown_def_type)
5439 {
5440 if (vect_print_dump_info (REPORT_DETAILS))
5441 fprintf (vect_dump, "Unsupported pattern.");
5442 return false;
5443 }
5444
5445 if (vect_print_dump_info (REPORT_DETAILS))
5446 fprintf (vect_dump, "type of def: %d.",*dt);
5447
5448 switch (gimple_code (*def_stmt))
5449 {
5450 case GIMPLE_PHI:
5451 *def = gimple_phi_result (*def_stmt);
5452 break;
5453
5454 case GIMPLE_ASSIGN:
5455 *def = gimple_assign_lhs (*def_stmt);
5456 break;
5457
5458 case GIMPLE_CALL:
5459 *def = gimple_call_lhs (*def_stmt);
5460 if (*def != NULL)
5461 break;
5462 /* FALLTHRU */
5463 default:
5464 if (vect_print_dump_info (REPORT_DETAILS))
5465 fprintf (vect_dump, "unsupported defining stmt: ");
5466 return false;
5467 }
5468
5469 return true;
5470}
5471
b690cc0f
RG
5472/* Function vect_is_simple_use_1.
5473
5474 Same as vect_is_simple_use_1 but also determines the vector operand
5475 type of OPERAND and stores it to *VECTYPE. If the definition of
5476 OPERAND is vect_uninitialized_def, vect_constant_def or
5477 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5478 is responsible to compute the best suited vector type for the
5479 scalar operand. */
5480
5481bool
5482vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5483 bb_vec_info bb_vinfo, gimple *def_stmt,
5484 tree *def, enum vect_def_type *dt, tree *vectype)
5485{
5486 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5487 return false;
5488
5489 /* Now get a vector type if the def is internal, otherwise supply
5490 NULL_TREE and leave it up to the caller to figure out a proper
5491 type for the use stmt. */
5492 if (*dt == vect_internal_def
5493 || *dt == vect_induction_def
5494 || *dt == vect_reduction_def
5495 || *dt == vect_double_reduction_def
5496 || *dt == vect_nested_cycle)
5497 {
5498 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5499 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5500 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5501 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5502 gcc_assert (*vectype != NULL_TREE);
5503 }
5504 else if (*dt == vect_uninitialized_def
5505 || *dt == vect_constant_def
5506 || *dt == vect_external_def)
5507 *vectype = NULL_TREE;
5508 else
5509 gcc_unreachable ();
5510
5511 return true;
5512}
5513
ebfd146a
IR
5514
5515/* Function supportable_widening_operation
5516
b8698a0f
L
5517 Check whether an operation represented by the code CODE is a
5518 widening operation that is supported by the target platform in
b690cc0f
RG
5519 vector form (i.e., when operating on arguments of type VECTYPE_IN
5520 producing a result of type VECTYPE_OUT).
b8698a0f 5521
ebfd146a
IR
5522 Widening operations we currently support are NOP (CONVERT), FLOAT
5523 and WIDEN_MULT. This function checks if these operations are supported
5524 by the target platform either directly (via vector tree-codes), or via
5525 target builtins.
5526
5527 Output:
b8698a0f
L
5528 - CODE1 and CODE2 are codes of vector operations to be used when
5529 vectorizing the operation, if available.
ebfd146a 5530 - DECL1 and DECL2 are decls of target builtin functions to be used
ff802fa1 5531 when vectorizing the operation, if available. In this case,
b8698a0f 5532 CODE1 and CODE2 are CALL_EXPR.
ebfd146a
IR
5533 - MULTI_STEP_CVT determines the number of required intermediate steps in
5534 case of multi-step conversion (like char->short->int - in that case
5535 MULTI_STEP_CVT will be 1).
b8698a0f
L
5536 - INTERM_TYPES contains the intermediate type required to perform the
5537 widening operation (short in the above example). */
ebfd146a
IR
5538
5539bool
b690cc0f
RG
5540supportable_widening_operation (enum tree_code code, gimple stmt,
5541 tree vectype_out, tree vectype_in,
ebfd146a
IR
5542 tree *decl1, tree *decl2,
5543 enum tree_code *code1, enum tree_code *code2,
5544 int *multi_step_cvt,
5545 VEC (tree, heap) **interm_types)
5546{
5547 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5548 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5549 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5550 bool ordered_p;
5551 enum machine_mode vec_mode;
81f40b79 5552 enum insn_code icode1, icode2;
ebfd146a 5553 optab optab1, optab2;
b690cc0f
RG
5554 tree vectype = vectype_in;
5555 tree wide_vectype = vectype_out;
ebfd146a
IR
5556 enum tree_code c1, c2;
5557
5558 /* The result of a vectorized widening operation usually requires two vectors
b8698a0f
L
5559 (because the widened results do not fit int one vector). The generated
5560 vector results would normally be expected to be generated in the same
ebfd146a
IR
5561 order as in the original scalar computation, i.e. if 8 results are
5562 generated in each vector iteration, they are to be organized as follows:
b8698a0f 5563 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
ebfd146a 5564
b8698a0f 5565 However, in the special case that the result of the widening operation is
ebfd146a 5566 used in a reduction computation only, the order doesn't matter (because
b8698a0f 5567 when vectorizing a reduction we change the order of the computation).
ebfd146a
IR
5568 Some targets can take advantage of this and generate more efficient code.
5569 For example, targets like Altivec, that support widen_mult using a sequence
5570 of {mult_even,mult_odd} generate the following vectors:
5571 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5572
5573 When vectorizing outer-loops, we execute the inner-loop sequentially
b8698a0f 5574 (each vectorized inner-loop iteration contributes to VF outer-loop
ff802fa1 5575 iterations in parallel). We therefore don't allow to change the order
ebfd146a
IR
5576 of the computation in the inner-loop during outer-loop vectorization. */
5577
5578 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5579 && !nested_in_vect_loop_p (vect_loop, stmt))
5580 ordered_p = false;
5581 else
5582 ordered_p = true;
5583
5584 if (!ordered_p
5585 && code == WIDEN_MULT_EXPR
5586 && targetm.vectorize.builtin_mul_widen_even
5587 && targetm.vectorize.builtin_mul_widen_even (vectype)
5588 && targetm.vectorize.builtin_mul_widen_odd
5589 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5590 {
5591 if (vect_print_dump_info (REPORT_DETAILS))
5592 fprintf (vect_dump, "Unordered widening operation detected.");
5593
5594 *code1 = *code2 = CALL_EXPR;
5595 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5596 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5597 return true;
5598 }
5599
5600 switch (code)
5601 {
5602 case WIDEN_MULT_EXPR:
5603 if (BYTES_BIG_ENDIAN)
5604 {
5605 c1 = VEC_WIDEN_MULT_HI_EXPR;
5606 c2 = VEC_WIDEN_MULT_LO_EXPR;
5607 }
5608 else
5609 {
5610 c2 = VEC_WIDEN_MULT_HI_EXPR;
5611 c1 = VEC_WIDEN_MULT_LO_EXPR;
5612 }
5613 break;
5614
5615 CASE_CONVERT:
5616 if (BYTES_BIG_ENDIAN)
5617 {
5618 c1 = VEC_UNPACK_HI_EXPR;
5619 c2 = VEC_UNPACK_LO_EXPR;
5620 }
5621 else
5622 {
5623 c2 = VEC_UNPACK_HI_EXPR;
5624 c1 = VEC_UNPACK_LO_EXPR;
5625 }
5626 break;
5627
5628 case FLOAT_EXPR:
5629 if (BYTES_BIG_ENDIAN)
5630 {
5631 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5632 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5633 }
5634 else
5635 {
5636 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5637 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5638 }
5639 break;
5640
5641 case FIX_TRUNC_EXPR:
5642 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5643 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5644 computing the operation. */
5645 return false;
5646
5647 default:
5648 gcc_unreachable ();
5649 }
5650
5651 if (code == FIX_TRUNC_EXPR)
5652 {
5653 /* The signedness is determined from output operand. */
b690cc0f
RG
5654 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5655 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
5656 }
5657 else
5658 {
5659 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5660 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5661 }
5662
5663 if (!optab1 || !optab2)
5664 return false;
5665
5666 vec_mode = TYPE_MODE (vectype);
947131ba
RS
5667 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5668 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
5669 return false;
5670
b8698a0f 5671 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a
IR
5672 types. */
5673 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5674 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5675 {
5676 int i;
5677 tree prev_type = vectype, intermediate_type;
5678 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5679 optab optab3, optab4;
5680
5681 if (!CONVERT_EXPR_CODE_P (code))
5682 return false;
b8698a0f 5683
ebfd146a
IR
5684 *code1 = c1;
5685 *code2 = c2;
b8698a0f 5686
ebfd146a 5687 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
ff802fa1
IR
5688 intermediate steps in promotion sequence. We try
5689 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5690 not. */
ebfd146a
IR
5691 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5692 for (i = 0; i < 3; i++)
5693 {
5694 intermediate_mode = insn_data[icode1].operand[0].mode;
5695 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5696 TYPE_UNSIGNED (prev_type));
5697 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5698 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5699
5700 if (!optab3 || !optab4
947131ba
RS
5701 || ((icode1 = optab_handler (optab1, prev_mode))
5702 == CODE_FOR_nothing)
ebfd146a 5703 || insn_data[icode1].operand[0].mode != intermediate_mode
947131ba
RS
5704 || ((icode2 = optab_handler (optab2, prev_mode))
5705 == CODE_FOR_nothing)
ebfd146a 5706 || insn_data[icode2].operand[0].mode != intermediate_mode
947131ba
RS
5707 || ((icode1 = optab_handler (optab3, intermediate_mode))
5708 == CODE_FOR_nothing)
5709 || ((icode2 = optab_handler (optab4, intermediate_mode))
5710 == CODE_FOR_nothing))
ebfd146a
IR
5711 return false;
5712
5713 VEC_quick_push (tree, *interm_types, intermediate_type);
5714 (*multi_step_cvt)++;
5715
5716 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5717 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5718 return true;
5719
5720 prev_type = intermediate_type;
5721 prev_mode = intermediate_mode;
5722 }
5723
5724 return false;
5725 }
5726
5727 *code1 = c1;
5728 *code2 = c2;
5729 return true;
5730}
5731
5732
5733/* Function supportable_narrowing_operation
5734
b8698a0f
L
5735 Check whether an operation represented by the code CODE is a
5736 narrowing operation that is supported by the target platform in
b690cc0f
RG
5737 vector form (i.e., when operating on arguments of type VECTYPE_IN
5738 and producing a result of type VECTYPE_OUT).
b8698a0f 5739
ebfd146a 5740 Narrowing operations we currently support are NOP (CONVERT) and
ff802fa1 5741 FIX_TRUNC. This function checks if these operations are supported by
ebfd146a
IR
5742 the target platform directly via vector tree-codes.
5743
5744 Output:
b8698a0f
L
5745 - CODE1 is the code of a vector operation to be used when
5746 vectorizing the operation, if available.
ebfd146a
IR
5747 - MULTI_STEP_CVT determines the number of required intermediate steps in
5748 case of multi-step conversion (like int->short->char - in that case
5749 MULTI_STEP_CVT will be 1).
5750 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 5751 narrowing operation (short in the above example). */
ebfd146a
IR
5752
5753bool
5754supportable_narrowing_operation (enum tree_code code,
b690cc0f 5755 tree vectype_out, tree vectype_in,
ebfd146a
IR
5756 enum tree_code *code1, int *multi_step_cvt,
5757 VEC (tree, heap) **interm_types)
5758{
5759 enum machine_mode vec_mode;
5760 enum insn_code icode1;
5761 optab optab1, interm_optab;
b690cc0f
RG
5762 tree vectype = vectype_in;
5763 tree narrow_vectype = vectype_out;
ebfd146a
IR
5764 enum tree_code c1;
5765 tree intermediate_type, prev_type;
5766 int i;
5767
5768 switch (code)
5769 {
5770 CASE_CONVERT:
5771 c1 = VEC_PACK_TRUNC_EXPR;
5772 break;
5773
5774 case FIX_TRUNC_EXPR:
5775 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5776 break;
5777
5778 case FLOAT_EXPR:
5779 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5780 tree code and optabs used for computing the operation. */
5781 return false;
5782
5783 default:
5784 gcc_unreachable ();
5785 }
5786
5787 if (code == FIX_TRUNC_EXPR)
5788 /* The signedness is determined from output operand. */
b690cc0f 5789 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
5790 else
5791 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5792
5793 if (!optab1)
5794 return false;
5795
5796 vec_mode = TYPE_MODE (vectype);
947131ba 5797 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
5798 return false;
5799
5800 /* Check if it's a multi-step conversion that can be done using intermediate
5801 types. */
5802 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5803 {
5804 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5805
5806 *code1 = c1;
5807 prev_type = vectype;
5808 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
ff802fa1
IR
5809 intermediate steps in promotion sequence. We try
5810 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5811 not. */
ebfd146a
IR
5812 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5813 for (i = 0; i < 3; i++)
5814 {
5815 intermediate_mode = insn_data[icode1].operand[0].mode;
5816 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5817 TYPE_UNSIGNED (prev_type));
b8698a0f 5818 interm_optab = optab_for_tree_code (c1, intermediate_type,
ebfd146a 5819 optab_default);
b8698a0f 5820 if (!interm_optab
947131ba
RS
5821 || ((icode1 = optab_handler (optab1, prev_mode))
5822 == CODE_FOR_nothing)
ebfd146a 5823 || insn_data[icode1].operand[0].mode != intermediate_mode
947131ba
RS
5824 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5825 == CODE_FOR_nothing))
ebfd146a
IR
5826 return false;
5827
5828 VEC_quick_push (tree, *interm_types, intermediate_type);
5829 (*multi_step_cvt)++;
5830
5831 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5832 return true;
5833
5834 prev_type = intermediate_type;
5835 prev_mode = intermediate_mode;
5836 }
5837
5838 return false;
5839 }
5840
5841 *code1 = c1;
5842 return true;
5843}