]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
re PR c++/43559 (Overloaded template functions became ambiguous)
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
62f7fd21
MM
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "target.h"
30#include "basic-block.h"
31#include "diagnostic.h"
32#include "tree-flow.h"
33#include "tree-dump.h"
34#include "cfgloop.h"
35#include "cfglayout.h"
36#include "expr.h"
37#include "recog.h"
38#include "optabs.h"
39#include "toplev.h"
40#include "tree-vectorizer.h"
41#include "langhooks.h"
42
43
44/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
45
46/* Function vect_mark_relevant.
47
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
49
50static void
51vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
53{
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
57
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
60
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
62 {
63 gimple pattern_stmt;
64
b8698a0f 65 /* This is the last stmt in a sequence that was detected as a
ebfd146a
IR
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
69
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
71
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
79 }
80
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
84
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
87 {
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
91 }
92
93 VEC_safe_push (gimple, heap, *worklist, stmt);
94}
95
96
97/* Function vect_stmt_relevant_p.
98
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
101
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
106
107 CHECKME: what other side effects would the vectorizer allow? */
108
109static bool
110vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
112{
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
118
8644a673 119 *relevant = vect_unused_in_scope;
ebfd146a
IR
120 *live_p = false;
121
122 /* cond stmt other than loop exit cond. */
b8698a0f
L
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
8644a673 126 *relevant = vect_used_in_scope;
ebfd146a
IR
127
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
5006671f 130 if (gimple_vdef (stmt))
ebfd146a
IR
131 {
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
8644a673 134 *relevant = vect_used_in_scope;
ebfd146a
IR
135 }
136
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
139 {
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
141 {
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
144 {
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
147
3157b0c2
AO
148 if (is_gimple_debug (USE_STMT (use_p)))
149 continue;
150
ebfd146a
IR
151 /* We expect all such uses to be in the loop exit phis
152 (because of loop closed form) */
153 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
154 gcc_assert (bb == single_exit (loop)->dest);
155
156 *live_p = true;
157 }
158 }
159 }
160
161 return (*live_p || *relevant);
162}
163
164
b8698a0f 165/* Function exist_non_indexing_operands_for_use_p
ebfd146a 166
b8698a0f 167 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
168 used in STMT for anything other than indexing an array. */
169
170static bool
171exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
172{
173 tree operand;
174 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 175
ebfd146a
IR
176 /* USE corresponds to some operand in STMT. If there is no data
177 reference in STMT, then any operand that corresponds to USE
178 is not indexing an array. */
179 if (!STMT_VINFO_DATA_REF (stmt_info))
180 return true;
59a05b0c 181
ebfd146a
IR
182 /* STMT has a data_ref. FORNOW this means that its of one of
183 the following forms:
184 -1- ARRAY_REF = var
185 -2- var = ARRAY_REF
186 (This should have been verified in analyze_data_refs).
187
188 'var' in the second case corresponds to a def, not a use,
b8698a0f 189 so USE cannot correspond to any operands that are not used
ebfd146a
IR
190 for array indexing.
191
192 Therefore, all we need to check is if STMT falls into the
193 first case, and whether var corresponds to USE. */
ebfd146a
IR
194
195 if (!gimple_assign_copy_p (stmt))
196 return false;
59a05b0c
EB
197 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
198 return false;
ebfd146a 199 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
200 if (TREE_CODE (operand) != SSA_NAME)
201 return false;
202
203 if (operand == use)
204 return true;
205
206 return false;
207}
208
209
b8698a0f 210/*
ebfd146a
IR
211 Function process_use.
212
213 Inputs:
214 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 215 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ebfd146a
IR
216 that defined USE. This is done by calling mark_relevant and passing it
217 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
218
219 Outputs:
220 Generally, LIVE_P and RELEVANT are used to define the liveness and
221 relevance info of the DEF_STMT of this USE:
222 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
223 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
224 Exceptions:
225 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 226 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 227 of the respective DEF_STMT is left unchanged.
b8698a0f
L
228 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
229 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
230 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
231 be modified accordingly.
232
233 Return true if everything is as expected. Return false otherwise. */
234
235static bool
b8698a0f 236process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
ebfd146a
IR
237 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
238{
239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
240 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
241 stmt_vec_info dstmt_vinfo;
242 basic_block bb, def_bb;
243 tree def;
244 gimple def_stmt;
245 enum vect_def_type dt;
246
b8698a0f 247 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a
IR
248 that are used for address computation are not considered relevant. */
249 if (!exist_non_indexing_operands_for_use_p (use, stmt))
250 return true;
251
a70d6342 252 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
b8698a0f 253 {
8644a673 254 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
255 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
256 return false;
257 }
258
259 if (!def_stmt || gimple_nop_p (def_stmt))
260 return true;
261
262 def_bb = gimple_bb (def_stmt);
263 if (!flow_bb_inside_loop_p (loop, def_bb))
264 {
265 if (vect_print_dump_info (REPORT_DETAILS))
266 fprintf (vect_dump, "def_stmt is out of loop.");
267 return true;
268 }
269
b8698a0f
L
270 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
271 DEF_STMT must have already been processed, because this should be the
272 only way that STMT, which is a reduction-phi, was put in the worklist,
273 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
274 check that everything is as expected, and we are done. */
275 dstmt_vinfo = vinfo_for_stmt (def_stmt);
276 bb = gimple_bb (stmt);
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
279 && gimple_code (def_stmt) != GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
281 && bb->loop_father == def_bb->loop_father)
282 {
283 if (vect_print_dump_info (REPORT_DETAILS))
284 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
285 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
286 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
287 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 288 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 289 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
290 return true;
291 }
292
293 /* case 3a: outer-loop stmt defining an inner-loop stmt:
294 outer-loop-header-bb:
295 d = def_stmt
296 inner-loop:
297 stmt # use (d)
298 outer-loop-tail-bb:
299 ... */
300 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
301 {
302 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
7c5222ff 304
ebfd146a
IR
305 switch (relevant)
306 {
8644a673 307 case vect_unused_in_scope:
7c5222ff
IR
308 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
309 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 310 break;
7c5222ff 311
ebfd146a 312 case vect_used_in_outer_by_reduction:
7c5222ff 313 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
314 relevant = vect_used_by_reduction;
315 break;
7c5222ff 316
ebfd146a 317 case vect_used_in_outer:
7c5222ff 318 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 319 relevant = vect_used_in_scope;
ebfd146a 320 break;
7c5222ff 321
8644a673 322 case vect_used_in_scope:
ebfd146a
IR
323 break;
324
325 default:
326 gcc_unreachable ();
b8698a0f 327 }
ebfd146a
IR
328 }
329
330 /* case 3b: inner-loop stmt defining an outer-loop stmt:
331 outer-loop-header-bb:
332 ...
333 inner-loop:
334 d = def_stmt
06066f92 335 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
336 stmt # use (d) */
337 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
338 {
339 if (vect_print_dump_info (REPORT_DETAILS))
340 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
7c5222ff 341
ebfd146a
IR
342 switch (relevant)
343 {
8644a673 344 case vect_unused_in_scope:
b8698a0f 345 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 346 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 347 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
348 break;
349
ebfd146a
IR
350 case vect_used_by_reduction:
351 relevant = vect_used_in_outer_by_reduction;
352 break;
353
8644a673 354 case vect_used_in_scope:
ebfd146a
IR
355 relevant = vect_used_in_outer;
356 break;
357
358 default:
359 gcc_unreachable ();
360 }
361 }
362
363 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
364 return true;
365}
366
367
368/* Function vect_mark_stmts_to_be_vectorized.
369
370 Not all stmts in the loop need to be vectorized. For example:
371
372 for i...
373 for j...
374 1. T0 = i + j
375 2. T1 = a[T0]
376
377 3. j = j + 1
378
379 Stmt 1 and 3 do not need to be vectorized, because loop control and
380 addressing of vectorized data-refs are handled differently.
381
382 This pass detects such stmts. */
383
384bool
385vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
386{
387 VEC(gimple,heap) *worklist;
388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
389 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
390 unsigned int nbbs = loop->num_nodes;
391 gimple_stmt_iterator si;
392 gimple stmt;
393 unsigned int i;
394 stmt_vec_info stmt_vinfo;
395 basic_block bb;
396 gimple phi;
397 bool live_p;
06066f92
IR
398 enum vect_relevant relevant, tmp_relevant;
399 enum vect_def_type def_type;
ebfd146a
IR
400
401 if (vect_print_dump_info (REPORT_DETAILS))
402 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
403
404 worklist = VEC_alloc (gimple, heap, 64);
405
406 /* 1. Init worklist. */
407 for (i = 0; i < nbbs; i++)
408 {
409 bb = bbs[i];
410 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 411 {
ebfd146a
IR
412 phi = gsi_stmt (si);
413 if (vect_print_dump_info (REPORT_DETAILS))
414 {
415 fprintf (vect_dump, "init: phi relevant? ");
416 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
417 }
418
419 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
420 vect_mark_relevant (&worklist, phi, relevant, live_p);
421 }
422 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
423 {
424 stmt = gsi_stmt (si);
425 if (vect_print_dump_info (REPORT_DETAILS))
426 {
427 fprintf (vect_dump, "init: stmt relevant? ");
428 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
b8698a0f 429 }
ebfd146a
IR
430
431 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
432 vect_mark_relevant (&worklist, stmt, relevant, live_p);
433 }
434 }
435
436 /* 2. Process_worklist */
437 while (VEC_length (gimple, worklist) > 0)
438 {
439 use_operand_p use_p;
440 ssa_op_iter iter;
441
442 stmt = VEC_pop (gimple, worklist);
443 if (vect_print_dump_info (REPORT_DETAILS))
444 {
445 fprintf (vect_dump, "worklist: examine stmt: ");
446 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
447 }
448
b8698a0f
L
449 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
450 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
451 liveness and relevance properties of STMT. */
452 stmt_vinfo = vinfo_for_stmt (stmt);
453 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
454 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
455
456 /* Generally, the liveness and relevance properties of STMT are
457 propagated as is to the DEF_STMTs of its USEs:
458 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
459 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
460
461 One exception is when STMT has been identified as defining a reduction
462 variable; in this case we set the liveness/relevance as follows:
463 live_p = false
464 relevant = vect_used_by_reduction
465 This is because we distinguish between two kinds of relevant stmts -
b8698a0f
L
466 those that are used by a reduction computation, and those that are
467 (also) used by a regular computation. This allows us later on to
468 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 469 order of the results that they produce does not have to be kept. */
ebfd146a 470
06066f92
IR
471 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
472 tmp_relevant = relevant;
473 switch (def_type)
ebfd146a 474 {
06066f92
IR
475 case vect_reduction_def:
476 switch (tmp_relevant)
477 {
478 case vect_unused_in_scope:
479 relevant = vect_used_by_reduction;
480 break;
481
482 case vect_used_by_reduction:
483 if (gimple_code (stmt) == GIMPLE_PHI)
484 break;
485 /* fall through */
486
487 default:
488 if (vect_print_dump_info (REPORT_DETAILS))
489 fprintf (vect_dump, "unsupported use of reduction.");
490
491 VEC_free (gimple, heap, worklist);
492 return false;
493 }
494
b8698a0f 495 live_p = false;
06066f92 496 break;
b8698a0f 497
06066f92
IR
498 case vect_nested_cycle:
499 if (tmp_relevant != vect_unused_in_scope
500 && tmp_relevant != vect_used_in_outer_by_reduction
501 && tmp_relevant != vect_used_in_outer)
502 {
503 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "unsupported use of nested cycle.");
7c5222ff 505
06066f92
IR
506 VEC_free (gimple, heap, worklist);
507 return false;
508 }
7c5222ff 509
b8698a0f
L
510 live_p = false;
511 break;
512
06066f92
IR
513 case vect_double_reduction_def:
514 if (tmp_relevant != vect_unused_in_scope
515 && tmp_relevant != vect_used_by_reduction)
516 {
7c5222ff 517 if (vect_print_dump_info (REPORT_DETAILS))
06066f92 518 fprintf (vect_dump, "unsupported use of double reduction.");
7c5222ff
IR
519
520 VEC_free (gimple, heap, worklist);
521 return false;
06066f92
IR
522 }
523
524 live_p = false;
b8698a0f 525 break;
7c5222ff 526
06066f92
IR
527 default:
528 break;
7c5222ff 529 }
b8698a0f 530
ebfd146a
IR
531 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
532 {
533 tree op = USE_FROM_PTR (use_p);
534 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
535 {
536 VEC_free (gimple, heap, worklist);
537 return false;
538 }
539 }
540 } /* while worklist */
541
542 VEC_free (gimple, heap, worklist);
543 return true;
544}
545
546
547int
548cost_for_stmt (gimple stmt)
549{
550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
551
552 switch (STMT_VINFO_TYPE (stmt_info))
553 {
554 case load_vec_info_type:
555 return TARG_SCALAR_LOAD_COST;
556 case store_vec_info_type:
557 return TARG_SCALAR_STORE_COST;
558 case op_vec_info_type:
559 case condition_vec_info_type:
560 case assignment_vec_info_type:
561 case reduc_vec_info_type:
562 case induc_vec_info_type:
563 case type_promotion_vec_info_type:
564 case type_demotion_vec_info_type:
565 case type_conversion_vec_info_type:
566 case call_vec_info_type:
567 return TARG_SCALAR_STMT_COST;
568 case undef_vec_info_type:
569 default:
570 gcc_unreachable ();
571 }
572}
573
b8698a0f 574/* Function vect_model_simple_cost.
ebfd146a 575
b8698a0f 576 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
577 single op. Right now, this does not account for multiple insns that could
578 be generated for the single vector op. We will handle that shortly. */
579
580void
b8698a0f 581vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
582 enum vect_def_type *dt, slp_tree slp_node)
583{
584 int i;
585 int inside_cost = 0, outside_cost = 0;
586
587 /* The SLP costs were already calculated during SLP tree build. */
588 if (PURE_SLP_STMT (stmt_info))
589 return;
590
591 inside_cost = ncopies * TARG_VEC_STMT_COST;
592
593 /* FORNOW: Assuming maximum 2 args per stmts. */
594 for (i = 0; i < 2; i++)
595 {
8644a673 596 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
b8698a0f 597 outside_cost += TARG_SCALAR_TO_VEC_COST;
ebfd146a 598 }
b8698a0f 599
ebfd146a
IR
600 if (vect_print_dump_info (REPORT_COST))
601 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
602 "outside_cost = %d .", inside_cost, outside_cost);
603
604 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
605 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
606 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
607}
608
609
b8698a0f
L
610/* Function vect_cost_strided_group_size
611
ebfd146a
IR
612 For strided load or store, return the group_size only if it is the first
613 load or store of a group, else return 1. This ensures that group size is
614 only returned once per group. */
615
616static int
617vect_cost_strided_group_size (stmt_vec_info stmt_info)
618{
619 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
620
621 if (first_stmt == STMT_VINFO_STMT (stmt_info))
622 return DR_GROUP_SIZE (stmt_info);
623
624 return 1;
625}
626
627
628/* Function vect_model_store_cost
629
630 Models cost for stores. In the case of strided accesses, one access
631 has the overhead of the strided access attributed to it. */
632
633void
b8698a0f 634vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
635 enum vect_def_type dt, slp_tree slp_node)
636{
637 int group_size;
638 int inside_cost = 0, outside_cost = 0;
639
640 /* The SLP costs were already calculated during SLP tree build. */
641 if (PURE_SLP_STMT (stmt_info))
642 return;
643
8644a673 644 if (dt == vect_constant_def || dt == vect_external_def)
ebfd146a
IR
645 outside_cost = TARG_SCALAR_TO_VEC_COST;
646
647 /* Strided access? */
b8698a0f 648 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
ebfd146a
IR
649 group_size = vect_cost_strided_group_size (stmt_info);
650 /* Not a strided access. */
651 else
652 group_size = 1;
653
b8698a0f 654 /* Is this an access in a group of stores, which provide strided access?
ebfd146a 655 If so, add in the cost of the permutes. */
b8698a0f 656 if (group_size > 1)
ebfd146a
IR
657 {
658 /* Uses a high and low interleave operation for each needed permute. */
b8698a0f 659 inside_cost = ncopies * exact_log2(group_size) * group_size
ebfd146a
IR
660 * TARG_VEC_STMT_COST;
661
662 if (vect_print_dump_info (REPORT_COST))
663 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
664 group_size);
665
666 }
667
668 /* Costs of the stores. */
669 inside_cost += ncopies * TARG_VEC_STORE_COST;
670
671 if (vect_print_dump_info (REPORT_COST))
672 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
673 "outside_cost = %d .", inside_cost, outside_cost);
674
675 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
676 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
677 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
678}
679
680
681/* Function vect_model_load_cost
682
683 Models cost for loads. In the case of strided accesses, the last access
684 has the overhead of the strided access attributed to it. Since unaligned
b8698a0f 685 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
686 access scheme chosen. */
687
688void
689vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
b8698a0f 690
ebfd146a
IR
691{
692 int group_size;
693 int alignment_support_cheme;
694 gimple first_stmt;
695 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
696 int inside_cost = 0, outside_cost = 0;
697
698 /* The SLP costs were already calculated during SLP tree build. */
699 if (PURE_SLP_STMT (stmt_info))
700 return;
701
702 /* Strided accesses? */
703 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
704 if (first_stmt && !slp_node)
705 {
706 group_size = vect_cost_strided_group_size (stmt_info);
707 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
708 }
709 /* Not a strided access. */
710 else
711 {
712 group_size = 1;
713 first_dr = dr;
714 }
715
716 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
717
b8698a0f 718 /* Is this an access in a group of loads providing strided access?
ebfd146a 719 If so, add in the cost of the permutes. */
b8698a0f 720 if (group_size > 1)
ebfd146a
IR
721 {
722 /* Uses an even and odd extract operations for each needed permute. */
723 inside_cost = ncopies * exact_log2(group_size) * group_size
724 * TARG_VEC_STMT_COST;
725
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
728 group_size);
729
730 }
731
732 /* The loads themselves. */
733 switch (alignment_support_cheme)
734 {
735 case dr_aligned:
736 {
737 inside_cost += ncopies * TARG_VEC_LOAD_COST;
738
739 if (vect_print_dump_info (REPORT_COST))
740 fprintf (vect_dump, "vect_model_load_cost: aligned.");
741
742 break;
743 }
744 case dr_unaligned_supported:
745 {
746 /* Here, we assign an additional cost for the unaligned load. */
747 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
748
749 if (vect_print_dump_info (REPORT_COST))
750 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
751 "hardware.");
752
753 break;
754 }
755 case dr_explicit_realign:
756 {
757 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
758
759 /* FIXME: If the misalignment remains fixed across the iterations of
760 the containing loop, the following cost should be added to the
761 outside costs. */
762 if (targetm.vectorize.builtin_mask_for_load)
763 inside_cost += TARG_VEC_STMT_COST;
764
765 break;
766 }
767 case dr_explicit_realign_optimized:
768 {
769 if (vect_print_dump_info (REPORT_COST))
770 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
771 "pipelined.");
772
773 /* Unaligned software pipeline has a load of an address, an initial
774 load, and possibly a mask operation to "prime" the loop. However,
775 if this is an access in a group of loads, which provide strided
776 access, then the above cost should only be considered for one
777 access in the group. Inside the loop, there is a load op
778 and a realignment op. */
779
780 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
781 {
782 outside_cost = 2*TARG_VEC_STMT_COST;
783 if (targetm.vectorize.builtin_mask_for_load)
784 outside_cost += TARG_VEC_STMT_COST;
785 }
786
787 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
788
789 break;
790 }
791
792 default:
793 gcc_unreachable ();
794 }
b8698a0f 795
ebfd146a
IR
796 if (vect_print_dump_info (REPORT_COST))
797 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
798 "outside_cost = %d .", inside_cost, outside_cost);
799
800 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
801 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
802 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
803}
804
805
806/* Function vect_init_vector.
807
808 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
809 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
810 is not NULL. Otherwise, place the initialization at the loop preheader.
b8698a0f 811 Return the DEF of INIT_STMT.
ebfd146a
IR
812 It will be used in the vectorization of STMT. */
813
814tree
815vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
816 gimple_stmt_iterator *gsi)
817{
818 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
819 tree new_var;
820 gimple init_stmt;
821 tree vec_oprnd;
822 edge pe;
823 tree new_temp;
824 basic_block new_bb;
b8698a0f 825
ebfd146a 826 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
b8698a0f 827 add_referenced_var (new_var);
ebfd146a
IR
828 init_stmt = gimple_build_assign (new_var, vector_var);
829 new_temp = make_ssa_name (new_var, init_stmt);
830 gimple_assign_set_lhs (init_stmt, new_temp);
831
832 if (gsi)
833 vect_finish_stmt_generation (stmt, init_stmt, gsi);
834 else
835 {
836 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 837
a70d6342
IR
838 if (loop_vinfo)
839 {
840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
841
842 if (nested_in_vect_loop_p (loop, stmt))
843 loop = loop->inner;
b8698a0f 844
a70d6342
IR
845 pe = loop_preheader_edge (loop);
846 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
847 gcc_assert (!new_bb);
848 }
849 else
850 {
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
852 basic_block bb;
853 gimple_stmt_iterator gsi_bb_start;
854
855 gcc_assert (bb_vinfo);
856 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 857 gsi_bb_start = gsi_after_labels (bb);
a70d6342
IR
858 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
859 }
ebfd146a
IR
860 }
861
862 if (vect_print_dump_info (REPORT_DETAILS))
863 {
864 fprintf (vect_dump, "created new init_stmt: ");
865 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
866 }
867
868 vec_oprnd = gimple_assign_lhs (init_stmt);
869 return vec_oprnd;
870}
871
a70d6342 872
ebfd146a
IR
873/* Function vect_get_vec_def_for_operand.
874
875 OP is an operand in STMT. This function returns a (vector) def that will be
876 used in the vectorized stmt for STMT.
877
878 In the case that OP is an SSA_NAME which is defined in the loop, then
879 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
880
881 In case OP is an invariant or constant, a new stmt that creates a vector def
882 needs to be introduced. */
883
884tree
885vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
886{
887 tree vec_oprnd;
888 gimple vec_stmt;
889 gimple def_stmt;
890 stmt_vec_info def_stmt_info = NULL;
891 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
892 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
893 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
895 tree vec_inv;
896 tree vec_cst;
897 tree t = NULL_TREE;
898 tree def;
899 int i;
900 enum vect_def_type dt;
901 bool is_simple_use;
902 tree vector_type;
903
904 if (vect_print_dump_info (REPORT_DETAILS))
905 {
906 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
907 print_generic_expr (vect_dump, op, TDF_SLIM);
908 }
909
b8698a0f 910 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
a70d6342 911 &dt);
ebfd146a
IR
912 gcc_assert (is_simple_use);
913 if (vect_print_dump_info (REPORT_DETAILS))
914 {
915 if (def)
916 {
917 fprintf (vect_dump, "def = ");
918 print_generic_expr (vect_dump, def, TDF_SLIM);
919 }
920 if (def_stmt)
921 {
922 fprintf (vect_dump, " def_stmt = ");
923 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
924 }
925 }
926
927 switch (dt)
928 {
929 /* Case 1: operand is a constant. */
930 case vect_constant_def:
931 {
7569a6cc
RG
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
933 gcc_assert (vector_type);
934
b8698a0f 935 if (scalar_def)
ebfd146a
IR
936 *scalar_def = op;
937
938 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
939 if (vect_print_dump_info (REPORT_DETAILS))
940 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
941
942 for (i = nunits - 1; i >= 0; --i)
943 {
944 t = tree_cons (NULL_TREE, op, t);
945 }
7569a6cc
RG
946 vec_cst = build_vector (vector_type, t);
947 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
ebfd146a
IR
948 }
949
950 /* Case 2: operand is defined outside the loop - loop invariant. */
8644a673 951 case vect_external_def:
ebfd146a
IR
952 {
953 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
954 gcc_assert (vector_type);
955 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
956
b8698a0f 957 if (scalar_def)
ebfd146a
IR
958 *scalar_def = def;
959
960 /* Create 'vec_inv = {inv,inv,..,inv}' */
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "Create vector_inv.");
963
964 for (i = nunits - 1; i >= 0; --i)
965 {
966 t = tree_cons (NULL_TREE, def, t);
967 }
968
969 /* FIXME: use build_constructor directly. */
970 vec_inv = build_constructor_from_list (vector_type, t);
971 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
972 }
973
974 /* Case 3: operand is defined inside the loop. */
8644a673 975 case vect_internal_def:
ebfd146a 976 {
b8698a0f 977 if (scalar_def)
ebfd146a
IR
978 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
979
980 /* Get the def from the vectorized stmt. */
981 def_stmt_info = vinfo_for_stmt (def_stmt);
982 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
983 gcc_assert (vec_stmt);
984 if (gimple_code (vec_stmt) == GIMPLE_PHI)
985 vec_oprnd = PHI_RESULT (vec_stmt);
986 else if (is_gimple_call (vec_stmt))
987 vec_oprnd = gimple_call_lhs (vec_stmt);
988 else
989 vec_oprnd = gimple_assign_lhs (vec_stmt);
990 return vec_oprnd;
991 }
992
993 /* Case 4: operand is defined by a loop header phi - reduction */
994 case vect_reduction_def:
06066f92 995 case vect_double_reduction_def:
7c5222ff 996 case vect_nested_cycle:
ebfd146a
IR
997 {
998 struct loop *loop;
999
1000 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
b8698a0f 1001 loop = (gimple_bb (def_stmt))->loop_father;
ebfd146a
IR
1002
1003 /* Get the def before the loop */
1004 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1005 return get_initial_def_for_reduction (stmt, op, scalar_def);
1006 }
1007
1008 /* Case 5: operand is defined by loop-header phi - induction. */
1009 case vect_induction_def:
1010 {
1011 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1012
1013 /* Get the def from the vectorized stmt. */
1014 def_stmt_info = vinfo_for_stmt (def_stmt);
1015 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1016 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1017 vec_oprnd = PHI_RESULT (vec_stmt);
1018 return vec_oprnd;
1019 }
1020
1021 default:
1022 gcc_unreachable ();
1023 }
1024}
1025
1026
1027/* Function vect_get_vec_def_for_stmt_copy
1028
b8698a0f
L
1029 Return a vector-def for an operand. This function is used when the
1030 vectorized stmt to be created (by the caller to this function) is a "copy"
1031 created in case the vectorized result cannot fit in one vector, and several
1032 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1033 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1034 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1035 DT is the type of the vector def VEC_OPRND.
1036
1037 Context:
1038 In case the vectorization factor (VF) is bigger than the number
1039 of elements that can fit in a vectype (nunits), we have to generate
1040 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1041 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1042 smallest data-type determines the VF, and as a result, when vectorizing
1043 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1044 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1045 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1046 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1047 which VF=16 and nunits=4, so the number of copies required is 4):
1048
1049 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1050
ebfd146a
IR
1051 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1052 VS1.1: vx.1 = memref1 VS1.2
1053 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1054 VS1.3: vx.3 = memref3
ebfd146a
IR
1055
1056 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1057 VSnew.1: vz1 = vx.1 + ... VSnew.2
1058 VSnew.2: vz2 = vx.2 + ... VSnew.3
1059 VSnew.3: vz3 = vx.3 + ...
1060
1061 The vectorization of S1 is explained in vectorizable_load.
1062 The vectorization of S2:
b8698a0f
L
1063 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1064 the function 'vect_get_vec_def_for_operand' is called to
ebfd146a
IR
1065 get the relevant vector-def for each operand of S2. For operand x it
1066 returns the vector-def 'vx.0'.
1067
b8698a0f
L
1068 To create the remaining copies of the vector-stmt (VSnew.j), this
1069 function is called to get the relevant vector-def for each operand. It is
1070 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1071 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1072
b8698a0f
L
1073 For example, to obtain the vector-def 'vx.1' in order to create the
1074 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1075 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1076 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1077 and return its def ('vx.1').
1078 Overall, to create the above sequence this function will be called 3 times:
1079 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1080 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1081 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1082
1083tree
1084vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1085{
1086 gimple vec_stmt_for_operand;
1087 stmt_vec_info def_stmt_info;
1088
1089 /* Do nothing; can reuse same def. */
8644a673 1090 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1091 return vec_oprnd;
1092
1093 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1094 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1095 gcc_assert (def_stmt_info);
1096 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1097 gcc_assert (vec_stmt_for_operand);
1098 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1099 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1100 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1101 else
1102 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1103 return vec_oprnd;
1104}
1105
1106
1107/* Get vectorized definitions for the operands to create a copy of an original
1108 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1109
1110static void
b8698a0f
L
1111vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1112 VEC(tree,heap) **vec_oprnds0,
ebfd146a
IR
1113 VEC(tree,heap) **vec_oprnds1)
1114{
1115 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1116
1117 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1118 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1119
1120 if (vec_oprnds1 && *vec_oprnds1)
1121 {
1122 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1123 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1124 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1125 }
1126}
1127
1128
1129/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1130
1131static void
1132vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1133 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1134 slp_tree slp_node)
1135{
1136 if (slp_node)
1137 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1138 else
1139 {
1140 tree vec_oprnd;
1141
b8698a0f
L
1142 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1143 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
ebfd146a
IR
1144 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1145
1146 if (op1)
1147 {
b8698a0f
L
1148 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1149 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
ebfd146a
IR
1150 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1151 }
1152 }
1153}
1154
1155
1156/* Function vect_finish_stmt_generation.
1157
1158 Insert a new stmt. */
1159
1160void
1161vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1162 gimple_stmt_iterator *gsi)
1163{
1164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1165 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 1166 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
ebfd146a
IR
1167
1168 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1169
1170 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1171
b8698a0f 1172 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
a70d6342 1173 bb_vinfo));
ebfd146a
IR
1174
1175 if (vect_print_dump_info (REPORT_DETAILS))
1176 {
1177 fprintf (vect_dump, "add new stmt: ");
1178 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1179 }
1180
1181 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1182}
1183
1184/* Checks if CALL can be vectorized in type VECTYPE. Returns
1185 a function declaration if the target has a vectorized version
1186 of the function, or NULL_TREE if the function cannot be vectorized. */
1187
1188tree
1189vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1190{
1191 tree fndecl = gimple_call_fndecl (call);
ebfd146a
IR
1192
1193 /* We only handle functions that do not read or clobber memory -- i.e.
1194 const or novops ones. */
1195 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1196 return NULL_TREE;
1197
1198 if (!fndecl
1199 || TREE_CODE (fndecl) != FUNCTION_DECL
1200 || !DECL_BUILT_IN (fndecl))
1201 return NULL_TREE;
1202
62f7fd21 1203 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
ebfd146a
IR
1204 vectype_in);
1205}
1206
1207/* Function vectorizable_call.
1208
b8698a0f
L
1209 Check if STMT performs a function call that can be vectorized.
1210 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1211 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1212 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1213
1214static bool
1215vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1216{
1217 tree vec_dest;
1218 tree scalar_dest;
1219 tree op, type;
1220 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1221 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1222 tree vectype_out, vectype_in;
1223 int nunits_in;
1224 int nunits_out;
1225 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1226 tree fndecl, new_temp, def, rhs_type, lhs_type;
1227 gimple def_stmt;
1228 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
63827fb8 1229 gimple new_stmt = NULL;
ebfd146a
IR
1230 int ncopies, j;
1231 VEC(tree, heap) *vargs = NULL;
1232 enum { NARROW, NONE, WIDEN } modifier;
1233 size_t i, nargs;
1234
a70d6342
IR
1235 /* FORNOW: unsupported in basic block SLP. */
1236 gcc_assert (loop_vinfo);
b8698a0f 1237
ebfd146a
IR
1238 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1239 return false;
1240
8644a673 1241 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1242 return false;
1243
1244 /* FORNOW: SLP not supported. */
1245 if (STMT_SLP_TYPE (stmt_info))
1246 return false;
1247
1248 /* Is STMT a vectorizable call? */
1249 if (!is_gimple_call (stmt))
1250 return false;
1251
1252 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1253 return false;
1254
1255 /* Process function arguments. */
1256 rhs_type = NULL_TREE;
1257 nargs = gimple_call_num_args (stmt);
1258
1259 /* Bail out if the function has more than two arguments, we
1260 do not have interesting builtin functions to vectorize with
1261 more than two arguments. No arguments is also not good. */
1262 if (nargs == 0 || nargs > 2)
1263 return false;
1264
1265 for (i = 0; i < nargs; i++)
1266 {
1267 op = gimple_call_arg (stmt, i);
1268
1269 /* We can only handle calls with arguments of the same type. */
1270 if (rhs_type
1271 && rhs_type != TREE_TYPE (op))
1272 {
1273 if (vect_print_dump_info (REPORT_DETAILS))
1274 fprintf (vect_dump, "argument types differ.");
1275 return false;
1276 }
1277 rhs_type = TREE_TYPE (op);
1278
a70d6342 1279 if (!vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt[i]))
ebfd146a
IR
1280 {
1281 if (vect_print_dump_info (REPORT_DETAILS))
1282 fprintf (vect_dump, "use not simple.");
1283 return false;
1284 }
1285 }
1286
1287 vectype_in = get_vectype_for_scalar_type (rhs_type);
1288 if (!vectype_in)
1289 return false;
1290 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1291
1292 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
1293 vectype_out = get_vectype_for_scalar_type (lhs_type);
1294 if (!vectype_out)
1295 return false;
1296 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1297
1298 /* FORNOW */
1299 if (nunits_in == nunits_out / 2)
1300 modifier = NARROW;
1301 else if (nunits_out == nunits_in)
1302 modifier = NONE;
1303 else if (nunits_out == nunits_in / 2)
1304 modifier = WIDEN;
1305 else
1306 return false;
1307
1308 /* For now, we only vectorize functions if a target specific builtin
1309 is available. TODO -- in some cases, it might be profitable to
1310 insert the calls for pieces of the vector, in order to be able
1311 to vectorize other operations in the loop. */
1312 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1313 if (fndecl == NULL_TREE)
1314 {
1315 if (vect_print_dump_info (REPORT_DETAILS))
1316 fprintf (vect_dump, "function is not vectorizable.");
1317
1318 return false;
1319 }
1320
5006671f 1321 gcc_assert (!gimple_vuse (stmt));
ebfd146a
IR
1322
1323 if (modifier == NARROW)
1324 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1325 else
1326 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1327
1328 /* Sanity check: make sure that at least one copy of the vectorized stmt
1329 needs to be generated. */
1330 gcc_assert (ncopies >= 1);
1331
1332 if (!vec_stmt) /* transformation not required. */
1333 {
1334 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1335 if (vect_print_dump_info (REPORT_DETAILS))
1336 fprintf (vect_dump, "=== vectorizable_call ===");
1337 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1338 return true;
1339 }
1340
1341 /** Transform. **/
1342
1343 if (vect_print_dump_info (REPORT_DETAILS))
1344 fprintf (vect_dump, "transform operation.");
1345
1346 /* Handle def. */
1347 scalar_dest = gimple_call_lhs (stmt);
1348 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1349
1350 prev_stmt_info = NULL;
1351 switch (modifier)
1352 {
1353 case NONE:
1354 for (j = 0; j < ncopies; ++j)
1355 {
1356 /* Build argument list for the vectorized call. */
1357 if (j == 0)
1358 vargs = VEC_alloc (tree, heap, nargs);
1359 else
1360 VEC_truncate (tree, vargs, 0);
1361
1362 for (i = 0; i < nargs; i++)
1363 {
1364 op = gimple_call_arg (stmt, i);
1365 if (j == 0)
1366 vec_oprnd0
1367 = vect_get_vec_def_for_operand (op, stmt, NULL);
1368 else
63827fb8
IR
1369 {
1370 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1371 vec_oprnd0
1372 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1373 }
ebfd146a
IR
1374
1375 VEC_quick_push (tree, vargs, vec_oprnd0);
1376 }
1377
1378 new_stmt = gimple_build_call_vec (fndecl, vargs);
1379 new_temp = make_ssa_name (vec_dest, new_stmt);
1380 gimple_call_set_lhs (new_stmt, new_temp);
1381
1382 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1383 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1384
1385 if (j == 0)
1386 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1387 else
1388 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1389
1390 prev_stmt_info = vinfo_for_stmt (new_stmt);
1391 }
1392
1393 break;
1394
1395 case NARROW:
1396 for (j = 0; j < ncopies; ++j)
1397 {
1398 /* Build argument list for the vectorized call. */
1399 if (j == 0)
1400 vargs = VEC_alloc (tree, heap, nargs * 2);
1401 else
1402 VEC_truncate (tree, vargs, 0);
1403
1404 for (i = 0; i < nargs; i++)
1405 {
1406 op = gimple_call_arg (stmt, i);
1407 if (j == 0)
1408 {
1409 vec_oprnd0
1410 = vect_get_vec_def_for_operand (op, stmt, NULL);
1411 vec_oprnd1
63827fb8 1412 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1413 }
1414 else
1415 {
63827fb8 1416 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
ebfd146a 1417 vec_oprnd0
63827fb8 1418 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 1419 vec_oprnd1
63827fb8 1420 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1421 }
1422
1423 VEC_quick_push (tree, vargs, vec_oprnd0);
1424 VEC_quick_push (tree, vargs, vec_oprnd1);
1425 }
1426
1427 new_stmt = gimple_build_call_vec (fndecl, vargs);
1428 new_temp = make_ssa_name (vec_dest, new_stmt);
1429 gimple_call_set_lhs (new_stmt, new_temp);
1430
1431 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1432 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1433
1434 if (j == 0)
1435 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1436 else
1437 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1438
1439 prev_stmt_info = vinfo_for_stmt (new_stmt);
1440 }
1441
1442 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1443
1444 break;
1445
1446 case WIDEN:
1447 /* No current target implements this case. */
1448 return false;
1449 }
1450
1451 VEC_free (tree, heap, vargs);
1452
1453 /* Update the exception handling table with the vector stmt if necessary. */
1454 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1455 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1456
1457 /* The call in STMT might prevent it from being removed in dce.
1458 We however cannot remove it here, due to the way the ssa name
1459 it defines is mapped to the new definition. So just replace
1460 rhs of the statement with something harmless. */
1461
1462 type = TREE_TYPE (scalar_dest);
1463 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1464 fold_convert (type, integer_zero_node));
1465 set_vinfo_for_stmt (new_stmt, stmt_info);
1466 set_vinfo_for_stmt (stmt, NULL);
1467 STMT_VINFO_STMT (stmt_info) = new_stmt;
1468 gsi_replace (gsi, new_stmt, false);
1469 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1470
1471 return true;
1472}
1473
1474
1475/* Function vect_gen_widened_results_half
1476
1477 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 1478 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ebfd146a
IR
1479 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1480 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1481 needs to be created (DECL is a function-decl of a target-builtin).
1482 STMT is the original scalar stmt that we are vectorizing. */
1483
1484static gimple
1485vect_gen_widened_results_half (enum tree_code code,
1486 tree decl,
1487 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1488 tree vec_dest, gimple_stmt_iterator *gsi,
1489 gimple stmt)
b8698a0f 1490{
ebfd146a 1491 gimple new_stmt;
b8698a0f
L
1492 tree new_temp;
1493
1494 /* Generate half of the widened result: */
1495 if (code == CALL_EXPR)
1496 {
1497 /* Target specific support */
ebfd146a
IR
1498 if (op_type == binary_op)
1499 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1500 else
1501 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1502 new_temp = make_ssa_name (vec_dest, new_stmt);
1503 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
1504 }
1505 else
ebfd146a 1506 {
b8698a0f
L
1507 /* Generic support */
1508 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
1509 if (op_type != binary_op)
1510 vec_oprnd1 = NULL;
1511 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1512 vec_oprnd1);
1513 new_temp = make_ssa_name (vec_dest, new_stmt);
1514 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 1515 }
ebfd146a
IR
1516 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1517
ebfd146a
IR
1518 return new_stmt;
1519}
1520
1521
b8698a0f
L
1522/* Check if STMT performs a conversion operation, that can be vectorized.
1523 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1524 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1525 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1526
1527static bool
1528vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1529 gimple *vec_stmt, slp_tree slp_node)
1530{
1531 tree vec_dest;
1532 tree scalar_dest;
1533 tree op0;
1534 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1535 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1536 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1537 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1538 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1539 tree new_temp;
1540 tree def;
1541 gimple def_stmt;
1542 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1543 gimple new_stmt = NULL;
1544 stmt_vec_info prev_stmt_info;
1545 int nunits_in;
1546 int nunits_out;
1547 tree vectype_out, vectype_in;
1548 int ncopies, j;
ebfd146a
IR
1549 tree rhs_type, lhs_type;
1550 tree builtin_decl;
1551 enum { NARROW, NONE, WIDEN } modifier;
1552 int i;
1553 VEC(tree,heap) *vec_oprnds0 = NULL;
1554 tree vop0;
1555 tree integral_type;
1556 VEC(tree,heap) *dummy = NULL;
1557 int dummy_int;
1558
1559 /* Is STMT a vectorizable conversion? */
1560
a70d6342
IR
1561 /* FORNOW: unsupported in basic block SLP. */
1562 gcc_assert (loop_vinfo);
b8698a0f 1563
ebfd146a
IR
1564 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1565 return false;
1566
8644a673 1567 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1568 return false;
1569
1570 if (!is_gimple_assign (stmt))
1571 return false;
1572
1573 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1574 return false;
1575
1576 code = gimple_assign_rhs_code (stmt);
1577 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1578 return false;
1579
1580 /* Check types of lhs and rhs. */
1581 op0 = gimple_assign_rhs1 (stmt);
1582 rhs_type = TREE_TYPE (op0);
1583 vectype_in = get_vectype_for_scalar_type (rhs_type);
1584 if (!vectype_in)
1585 return false;
1586 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1587
1588 scalar_dest = gimple_assign_lhs (stmt);
1589 lhs_type = TREE_TYPE (scalar_dest);
1590 vectype_out = get_vectype_for_scalar_type (lhs_type);
1591 if (!vectype_out)
1592 return false;
1593 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1594
1595 /* FORNOW */
1596 if (nunits_in == nunits_out / 2)
1597 modifier = NARROW;
1598 else if (nunits_out == nunits_in)
1599 modifier = NONE;
1600 else if (nunits_out == nunits_in / 2)
1601 modifier = WIDEN;
1602 else
1603 return false;
1604
1605 if (modifier == NONE)
1606 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
1607
1608 /* Bail out if the types are both integral or non-integral. */
1609 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
1610 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
1611 return false;
1612
1613 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1614
1615 if (modifier == NARROW)
1616 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1617 else
1618 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1619
1620 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1621 this, so we can safely override NCOPIES with 1 here. */
1622 if (slp_node)
1623 ncopies = 1;
b8698a0f 1624
ebfd146a
IR
1625 /* Sanity check: make sure that at least one copy of the vectorized stmt
1626 needs to be generated. */
1627 gcc_assert (ncopies >= 1);
1628
1629 /* Check the operands of the operation. */
a70d6342 1630 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
ebfd146a
IR
1631 {
1632 if (vect_print_dump_info (REPORT_DETAILS))
1633 fprintf (vect_dump, "use not simple.");
1634 return false;
1635 }
1636
1637 /* Supportable by target? */
1638 if ((modifier == NONE
1639 && !targetm.vectorize.builtin_conversion (code, integral_type))
1640 || (modifier == WIDEN
1641 && !supportable_widening_operation (code, stmt, vectype_in,
1642 &decl1, &decl2,
1643 &code1, &code2,
1644 &dummy_int, &dummy))
1645 || (modifier == NARROW
1646 && !supportable_narrowing_operation (code, stmt, vectype_in,
1647 &code1, &dummy_int, &dummy)))
1648 {
1649 if (vect_print_dump_info (REPORT_DETAILS))
1650 fprintf (vect_dump, "conversion not supported by target.");
1651 return false;
1652 }
1653
1654 if (modifier != NONE)
1655 {
1656 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1657 /* FORNOW: SLP not supported. */
1658 if (STMT_SLP_TYPE (stmt_info))
b8698a0f 1659 return false;
ebfd146a
IR
1660 }
1661
1662 if (!vec_stmt) /* transformation not required. */
1663 {
1664 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1665 return true;
1666 }
1667
1668 /** Transform. **/
1669 if (vect_print_dump_info (REPORT_DETAILS))
1670 fprintf (vect_dump, "transform conversion.");
1671
1672 /* Handle def. */
1673 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1674
1675 if (modifier == NONE && !slp_node)
1676 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1677
1678 prev_stmt_info = NULL;
1679 switch (modifier)
1680 {
1681 case NONE:
1682 for (j = 0; j < ncopies; j++)
1683 {
ebfd146a 1684 if (j == 0)
b8698a0f 1685 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
ebfd146a
IR
1686 else
1687 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1688
1689 builtin_decl =
1690 targetm.vectorize.builtin_conversion (code, integral_type);
1691 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
b8698a0f 1692 {
ebfd146a
IR
1693 /* Arguments are ready. create the new vector stmt. */
1694 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1695 new_temp = make_ssa_name (vec_dest, new_stmt);
1696 gimple_call_set_lhs (new_stmt, new_temp);
1697 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1698 if (slp_node)
1699 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1700 }
1701
1702 if (j == 0)
1703 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1704 else
1705 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1706 prev_stmt_info = vinfo_for_stmt (new_stmt);
1707 }
1708 break;
1709
1710 case WIDEN:
1711 /* In case the vectorization factor (VF) is bigger than the number
1712 of elements that we can fit in a vectype (nunits), we have to
1713 generate more than one vector stmt - i.e - we need to "unroll"
1714 the vector stmt by a factor VF/nunits. */
1715 for (j = 0; j < ncopies; j++)
1716 {
1717 if (j == 0)
1718 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1719 else
1720 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1721
1722 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1723
1724 /* Generate first half of the widened result: */
1725 new_stmt
b8698a0f 1726 = vect_gen_widened_results_half (code1, decl1,
ebfd146a
IR
1727 vec_oprnd0, vec_oprnd1,
1728 unary_op, vec_dest, gsi, stmt);
1729 if (j == 0)
1730 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1731 else
1732 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1733 prev_stmt_info = vinfo_for_stmt (new_stmt);
1734
1735 /* Generate second half of the widened result: */
1736 new_stmt
1737 = vect_gen_widened_results_half (code2, decl2,
1738 vec_oprnd0, vec_oprnd1,
1739 unary_op, vec_dest, gsi, stmt);
1740 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1741 prev_stmt_info = vinfo_for_stmt (new_stmt);
1742 }
1743 break;
1744
1745 case NARROW:
1746 /* In case the vectorization factor (VF) is bigger than the number
1747 of elements that we can fit in a vectype (nunits), we have to
1748 generate more than one vector stmt - i.e - we need to "unroll"
1749 the vector stmt by a factor VF/nunits. */
1750 for (j = 0; j < ncopies; j++)
1751 {
1752 /* Handle uses. */
1753 if (j == 0)
1754 {
1755 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1756 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1757 }
1758 else
1759 {
1760 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1761 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1762 }
1763
1764 /* Arguments are ready. Create the new vector stmt. */
ebfd146a
IR
1765 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1766 vec_oprnd1);
1767 new_temp = make_ssa_name (vec_dest, new_stmt);
1768 gimple_assign_set_lhs (new_stmt, new_temp);
1769 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1770
1771 if (j == 0)
1772 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1773 else
1774 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1775
1776 prev_stmt_info = vinfo_for_stmt (new_stmt);
1777 }
1778
1779 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1780 }
1781
1782 if (vec_oprnds0)
b8698a0f 1783 VEC_free (tree, heap, vec_oprnds0);
ebfd146a
IR
1784
1785 return true;
1786}
1787/* Function vectorizable_assignment.
1788
b8698a0f
L
1789 Check if STMT performs an assignment (copy) that can be vectorized.
1790 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1791 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1792 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1793
1794static bool
1795vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1796 gimple *vec_stmt, slp_tree slp_node)
1797{
1798 tree vec_dest;
1799 tree scalar_dest;
1800 tree op;
1801 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1802 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1803 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1804 tree new_temp;
1805 tree def;
1806 gimple def_stmt;
1807 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1808 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1809 int ncopies;
f18b55bd 1810 int i, j;
ebfd146a
IR
1811 VEC(tree,heap) *vec_oprnds = NULL;
1812 tree vop;
a70d6342 1813 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
f18b55bd
IR
1814 gimple new_stmt = NULL;
1815 stmt_vec_info prev_stmt_info = NULL;
ebfd146a
IR
1816
1817 /* Multiple types in SLP are handled by creating the appropriate number of
1818 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1819 case of SLP. */
1820 if (slp_node)
1821 ncopies = 1;
1822 else
1823 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1824
1825 gcc_assert (ncopies >= 1);
ebfd146a 1826
a70d6342 1827 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
1828 return false;
1829
8644a673 1830 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1831 return false;
1832
1833 /* Is vectorizable assignment? */
1834 if (!is_gimple_assign (stmt))
1835 return false;
1836
1837 scalar_dest = gimple_assign_lhs (stmt);
1838 if (TREE_CODE (scalar_dest) != SSA_NAME)
1839 return false;
1840
1841 if (gimple_assign_single_p (stmt)
1842 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1843 op = gimple_assign_rhs1 (stmt);
1844 else
1845 return false;
1846
a70d6342 1847 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
ebfd146a
IR
1848 {
1849 if (vect_print_dump_info (REPORT_DETAILS))
1850 fprintf (vect_dump, "use not simple.");
1851 return false;
1852 }
1853
1854 if (!vec_stmt) /* transformation not required. */
1855 {
1856 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1857 if (vect_print_dump_info (REPORT_DETAILS))
1858 fprintf (vect_dump, "=== vectorizable_assignment ===");
1859 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1860 return true;
1861 }
1862
1863 /** Transform. **/
1864 if (vect_print_dump_info (REPORT_DETAILS))
1865 fprintf (vect_dump, "transform assignment.");
1866
1867 /* Handle def. */
1868 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1869
1870 /* Handle use. */
f18b55bd 1871 for (j = 0; j < ncopies; j++)
ebfd146a 1872 {
f18b55bd
IR
1873 /* Handle uses. */
1874 if (j == 0)
1875 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1876 else
1877 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1878
1879 /* Arguments are ready. create the new vector stmt. */
1880 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1881 {
1882 new_stmt = gimple_build_assign (vec_dest, vop);
1883 new_temp = make_ssa_name (vec_dest, new_stmt);
1884 gimple_assign_set_lhs (new_stmt, new_temp);
1885 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1886 if (slp_node)
1887 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1888 }
ebfd146a
IR
1889
1890 if (slp_node)
f18b55bd
IR
1891 continue;
1892
1893 if (j == 0)
1894 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1895 else
1896 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1897
1898 prev_stmt_info = vinfo_for_stmt (new_stmt);
1899 }
b8698a0f
L
1900
1901 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
1902 return true;
1903}
1904
1905/* Function vectorizable_operation.
1906
b8698a0f
L
1907 Check if STMT performs a binary or unary operation that can be vectorized.
1908 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1909 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1910 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1911
1912static bool
1913vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1914 gimple *vec_stmt, slp_tree slp_node)
1915{
1916 tree vec_dest;
1917 tree scalar_dest;
1918 tree op0, op1 = NULL;
1919 tree vec_oprnd1 = NULL_TREE;
1920 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1921 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1922 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1923 enum tree_code code;
1924 enum machine_mode vec_mode;
1925 tree new_temp;
1926 int op_type;
1927 optab optab;
1928 int icode;
1929 enum machine_mode optab_op2_mode;
1930 tree def;
1931 gimple def_stmt;
1932 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1933 gimple new_stmt = NULL;
1934 stmt_vec_info prev_stmt_info;
1935 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1936 int nunits_out;
1937 tree vectype_out;
1938 int ncopies;
1939 int j, i;
1940 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1941 tree vop0, vop1;
1942 unsigned int k;
ebfd146a 1943 bool scalar_shift_arg = false;
a70d6342
IR
1944 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1945 int vf;
1946
1947 if (loop_vinfo)
1948 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1949 else
3533e503 1950 vf = 1;
ebfd146a
IR
1951
1952 /* Multiple types in SLP are handled by creating the appropriate number of
1953 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1954 case of SLP. */
1955 if (slp_node)
1956 ncopies = 1;
1957 else
1958 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1959
1960 gcc_assert (ncopies >= 1);
1961
a70d6342 1962 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
1963 return false;
1964
8644a673 1965 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1966 return false;
1967
1968 /* Is STMT a vectorizable binary/unary operation? */
1969 if (!is_gimple_assign (stmt))
1970 return false;
1971
1972 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1973 return false;
1974
1975 scalar_dest = gimple_assign_lhs (stmt);
1976 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
1977 if (!vectype_out)
1978 return false;
1979 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1980 if (nunits_out != nunits_in)
1981 return false;
1982
1983 code = gimple_assign_rhs_code (stmt);
1984
1985 /* For pointer addition, we should use the normal plus for
1986 the vector addition. */
1987 if (code == POINTER_PLUS_EXPR)
1988 code = PLUS_EXPR;
1989
1990 /* Support only unary or binary operations. */
1991 op_type = TREE_CODE_LENGTH (code);
1992 if (op_type != unary_op && op_type != binary_op)
1993 {
1994 if (vect_print_dump_info (REPORT_DETAILS))
1995 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1996 return false;
1997 }
1998
1999 op0 = gimple_assign_rhs1 (stmt);
a70d6342 2000 if (!vect_is_simple_use (op0, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
ebfd146a
IR
2001 {
2002 if (vect_print_dump_info (REPORT_DETAILS))
2003 fprintf (vect_dump, "use not simple.");
2004 return false;
2005 }
2006
2007 if (op_type == binary_op)
2008 {
2009 op1 = gimple_assign_rhs2 (stmt);
b8698a0f 2010 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 2011 &dt[1]))
ebfd146a
IR
2012 {
2013 if (vect_print_dump_info (REPORT_DETAILS))
2014 fprintf (vect_dump, "use not simple.");
2015 return false;
2016 }
2017 }
2018
2019 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2020 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2021 shift optabs. */
2022 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2023 || code == RROTATE_EXPR)
2024 {
ebfd146a 2025 /* vector shifted by vector */
8644a673 2026 if (dt[1] == vect_internal_def)
ebfd146a
IR
2027 {
2028 optab = optab_for_tree_code (code, vectype, optab_vector);
2029 if (vect_print_dump_info (REPORT_DETAILS))
2030 fprintf (vect_dump, "vector/vector shift/rotate found.");
2031 }
2032
2033 /* See if the machine has a vector shifted by scalar insn and if not
2034 then see if it has a vector shifted by vector insn */
8644a673 2035 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
ebfd146a
IR
2036 {
2037 optab = optab_for_tree_code (code, vectype, optab_scalar);
2038 if (optab
2039 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2040 != CODE_FOR_nothing))
2041 {
2042 scalar_shift_arg = true;
2043 if (vect_print_dump_info (REPORT_DETAILS))
2044 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2045 }
2046 else
2047 {
2048 optab = optab_for_tree_code (code, vectype, optab_vector);
ad6c0864 2049 if (optab
ebfd146a
IR
2050 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2051 != CODE_FOR_nothing))
ad6c0864
MM
2052 {
2053 if (vect_print_dump_info (REPORT_DETAILS))
2054 fprintf (vect_dump, "vector/vector shift/rotate found.");
2055
2056 /* Unlike the other binary operators, shifts/rotates have
2057 the rhs being int, instead of the same type as the lhs,
2058 so make sure the scalar is the right type if we are
2059 dealing with vectors of short/char. */
2060 if (dt[1] == vect_constant_def)
2061 op1 = fold_convert (TREE_TYPE (vectype), op1);
2062 }
ebfd146a
IR
2063 }
2064 }
2065
2066 else
2067 {
2068 if (vect_print_dump_info (REPORT_DETAILS))
2069 fprintf (vect_dump, "operand mode requires invariant argument.");
2070 return false;
2071 }
2072 }
2073 else
2074 optab = optab_for_tree_code (code, vectype, optab_default);
2075
2076 /* Supportable by target? */
2077 if (!optab)
2078 {
2079 if (vect_print_dump_info (REPORT_DETAILS))
2080 fprintf (vect_dump, "no optab.");
2081 return false;
2082 }
2083 vec_mode = TYPE_MODE (vectype);
2084 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2085 if (icode == CODE_FOR_nothing)
2086 {
2087 if (vect_print_dump_info (REPORT_DETAILS))
2088 fprintf (vect_dump, "op not supported by target.");
2089 /* Check only during analysis. */
2090 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
a70d6342 2091 || (vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2092 && !vec_stmt))
2093 return false;
2094 if (vect_print_dump_info (REPORT_DETAILS))
2095 fprintf (vect_dump, "proceeding using word mode.");
2096 }
2097
2098 /* Worthwhile without SIMD support? Check only during analysis. */
2099 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
a70d6342 2100 && vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2101 && !vec_stmt)
2102 {
2103 if (vect_print_dump_info (REPORT_DETAILS))
2104 fprintf (vect_dump, "not worthwhile without SIMD support.");
2105 return false;
2106 }
2107
2108 if (!vec_stmt) /* transformation not required. */
2109 {
2110 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2111 if (vect_print_dump_info (REPORT_DETAILS))
2112 fprintf (vect_dump, "=== vectorizable_operation ===");
2113 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2114 return true;
2115 }
2116
2117 /** Transform. **/
2118
2119 if (vect_print_dump_info (REPORT_DETAILS))
2120 fprintf (vect_dump, "transform binary/unary operation.");
2121
2122 /* Handle def. */
2123 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2124
b8698a0f 2125 /* Allocate VECs for vector operands. In case of SLP, vector operands are
ebfd146a
IR
2126 created in the previous stages of the recursion, so no allocation is
2127 needed, except for the case of shift with scalar shift argument. In that
2128 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2129 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
b8698a0f
L
2130 In case of loop-based vectorization we allocate VECs of size 1. We
2131 allocate VEC_OPRNDS1 only in case of binary operation. */
ebfd146a
IR
2132 if (!slp_node)
2133 {
2134 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2135 if (op_type == binary_op)
2136 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2137 }
2138 else if (scalar_shift_arg)
b8698a0f 2139 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
ebfd146a
IR
2140
2141 /* In case the vectorization factor (VF) is bigger than the number
2142 of elements that we can fit in a vectype (nunits), we have to generate
2143 more than one vector stmt - i.e - we need to "unroll" the
2144 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2145 from one copy of the vector stmt to the next, in the field
2146 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2147 stages to find the correct vector defs to be used when vectorizing
2148 stmts that use the defs of the current stmt. The example below illustrates
2149 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2150 4 vectorized stmts):
2151
2152 before vectorization:
2153 RELATED_STMT VEC_STMT
2154 S1: x = memref - -
2155 S2: z = x + 1 - -
2156
2157 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2158 there):
2159 RELATED_STMT VEC_STMT
2160 VS1_0: vx0 = memref0 VS1_1 -
2161 VS1_1: vx1 = memref1 VS1_2 -
2162 VS1_2: vx2 = memref2 VS1_3 -
2163 VS1_3: vx3 = memref3 - -
2164 S1: x = load - VS1_0
2165 S2: z = x + 1 - -
2166
2167 step2: vectorize stmt S2 (done here):
2168 To vectorize stmt S2 we first need to find the relevant vector
2169 def for the first operand 'x'. This is, as usual, obtained from
2170 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2171 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2172 relevant vector def 'vx0'. Having found 'vx0' we can generate
2173 the vector stmt VS2_0, and as usual, record it in the
2174 STMT_VINFO_VEC_STMT of stmt S2.
2175 When creating the second copy (VS2_1), we obtain the relevant vector
2176 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2177 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2178 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2179 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2180 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2181 chain of stmts and pointers:
2182 RELATED_STMT VEC_STMT
2183 VS1_0: vx0 = memref0 VS1_1 -
2184 VS1_1: vx1 = memref1 VS1_2 -
2185 VS1_2: vx2 = memref2 VS1_3 -
2186 VS1_3: vx3 = memref3 - -
2187 S1: x = load - VS1_0
2188 VS2_0: vz0 = vx0 + v1 VS2_1 -
2189 VS2_1: vz1 = vx1 + v1 VS2_2 -
2190 VS2_2: vz2 = vx2 + v1 VS2_3 -
2191 VS2_3: vz3 = vx3 + v1 - -
2192 S2: z = x + 1 - VS2_0 */
2193
2194 prev_stmt_info = NULL;
2195 for (j = 0; j < ncopies; j++)
2196 {
2197 /* Handle uses. */
2198 if (j == 0)
2199 {
2200 if (op_type == binary_op && scalar_shift_arg)
2201 {
b8698a0f
L
2202 /* Vector shl and shr insn patterns can be defined with scalar
2203 operand 2 (shift operand). In this case, use constant or loop
2204 invariant op1 directly, without extending it to vector mode
ebfd146a
IR
2205 first. */
2206 optab_op2_mode = insn_data[icode].operand[2].mode;
2207 if (!VECTOR_MODE_P (optab_op2_mode))
2208 {
2209 if (vect_print_dump_info (REPORT_DETAILS))
2210 fprintf (vect_dump, "operand 1 using scalar mode.");
2211 vec_oprnd1 = op1;
2212 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2213 if (slp_node)
2214 {
2215 /* Store vec_oprnd1 for every vector stmt to be created
2216 for SLP_NODE. We check during the analysis that all the
b8698a0f
L
2217 shift arguments are the same.
2218 TODO: Allow different constants for different vector
2219 stmts generated for an SLP instance. */
ebfd146a
IR
2220 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2221 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2222 }
2223 }
2224 }
b8698a0f
L
2225
2226 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2227 (a special case for certain kind of vector shifts); otherwise,
ebfd146a
IR
2228 operand 1 should be of a vector type (the usual case). */
2229 if (op_type == binary_op && !vec_oprnd1)
b8698a0f 2230 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
ebfd146a
IR
2231 slp_node);
2232 else
b8698a0f 2233 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
ebfd146a
IR
2234 slp_node);
2235 }
2236 else
2237 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2238
2239 /* Arguments are ready. Create the new vector stmt. */
2240 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2241 {
2242 vop1 = ((op_type == binary_op)
2243 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2244 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2245 new_temp = make_ssa_name (vec_dest, new_stmt);
2246 gimple_assign_set_lhs (new_stmt, new_temp);
2247 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2248 if (slp_node)
2249 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2250 }
2251
2252 if (slp_node)
2253 continue;
2254
2255 if (j == 0)
2256 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2257 else
2258 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2259 prev_stmt_info = vinfo_for_stmt (new_stmt);
2260 }
2261
2262 VEC_free (tree, heap, vec_oprnds0);
2263 if (vec_oprnds1)
2264 VEC_free (tree, heap, vec_oprnds1);
2265
2266 return true;
2267}
2268
2269
2270/* Get vectorized definitions for loop-based vectorization. For the first
b8698a0f
L
2271 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2272 scalar operand), and for the rest we get a copy with
ebfd146a
IR
2273 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2274 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2275 The vectors are collected into VEC_OPRNDS. */
2276
2277static void
b8698a0f 2278vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
ebfd146a
IR
2279 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2280{
2281 tree vec_oprnd;
2282
2283 /* Get first vector operand. */
2284 /* All the vector operands except the very first one (that is scalar oprnd)
2285 are stmt copies. */
b8698a0f 2286 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
ebfd146a
IR
2287 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2288 else
2289 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2290
2291 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2292
2293 /* Get second vector operand. */
2294 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2295 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
b8698a0f 2296
ebfd146a
IR
2297 *oprnd = vec_oprnd;
2298
b8698a0f 2299 /* For conversion in multiple steps, continue to get operands
ebfd146a
IR
2300 recursively. */
2301 if (multi_step_cvt)
b8698a0f 2302 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
ebfd146a
IR
2303}
2304
2305
2306/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
b8698a0f 2307 For multi-step conversions store the resulting vectors and call the function
ebfd146a
IR
2308 recursively. */
2309
2310static void
2311vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2312 int multi_step_cvt, gimple stmt,
2313 VEC (tree, heap) *vec_dsts,
2314 gimple_stmt_iterator *gsi,
2315 slp_tree slp_node, enum tree_code code,
2316 stmt_vec_info *prev_stmt_info)
2317{
2318 unsigned int i;
2319 tree vop0, vop1, new_tmp, vec_dest;
2320 gimple new_stmt;
2321 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2322
b8698a0f 2323 vec_dest = VEC_pop (tree, vec_dsts);
ebfd146a
IR
2324
2325 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2326 {
2327 /* Create demotion operation. */
2328 vop0 = VEC_index (tree, *vec_oprnds, i);
2329 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2330 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2331 new_tmp = make_ssa_name (vec_dest, new_stmt);
2332 gimple_assign_set_lhs (new_stmt, new_tmp);
2333 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2334
2335 if (multi_step_cvt)
2336 /* Store the resulting vector for next recursive call. */
b8698a0f 2337 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
ebfd146a
IR
2338 else
2339 {
b8698a0f 2340 /* This is the last step of the conversion sequence. Store the
ebfd146a
IR
2341 vectors in SLP_NODE or in vector info of the scalar statement
2342 (or in STMT_VINFO_RELATED_STMT chain). */
2343 if (slp_node)
2344 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2345 else
2346 {
2347 if (!*prev_stmt_info)
2348 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2349 else
2350 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2351
2352 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2353 }
2354 }
2355 }
2356
2357 /* For multi-step demotion operations we first generate demotion operations
b8698a0f 2358 from the source type to the intermediate types, and then combine the
ebfd146a
IR
2359 results (stored in VEC_OPRNDS) in demotion operation to the destination
2360 type. */
2361 if (multi_step_cvt)
2362 {
2363 /* At each level of recursion we have have of the operands we had at the
2364 previous level. */
2365 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
b8698a0f 2366 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
ebfd146a
IR
2367 stmt, vec_dsts, gsi, slp_node,
2368 code, prev_stmt_info);
2369 }
2370}
2371
2372
2373/* Function vectorizable_type_demotion
2374
2375 Check if STMT performs a binary or unary operation that involves
2376 type demotion, and if it can be vectorized.
2377 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2378 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2379 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2380
2381static bool
2382vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2383 gimple *vec_stmt, slp_tree slp_node)
2384{
2385 tree vec_dest;
2386 tree scalar_dest;
2387 tree op0;
2388 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2389 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2390 enum tree_code code, code1 = ERROR_MARK;
2391 tree def;
2392 gimple def_stmt;
2393 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2394 stmt_vec_info prev_stmt_info;
2395 int nunits_in;
2396 int nunits_out;
2397 tree vectype_out;
2398 int ncopies;
2399 int j, i;
2400 tree vectype_in;
2401 int multi_step_cvt = 0;
2402 VEC (tree, heap) *vec_oprnds0 = NULL;
2403 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2404 tree last_oprnd, intermediate_type;
2405
a70d6342
IR
2406 /* FORNOW: not supported by basic block SLP vectorization. */
2407 gcc_assert (loop_vinfo);
2408
ebfd146a
IR
2409 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2410 return false;
2411
8644a673 2412 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2413 return false;
2414
2415 /* Is STMT a vectorizable type-demotion operation? */
2416 if (!is_gimple_assign (stmt))
2417 return false;
2418
2419 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2420 return false;
2421
2422 code = gimple_assign_rhs_code (stmt);
2423 if (!CONVERT_EXPR_CODE_P (code))
2424 return false;
2425
2426 op0 = gimple_assign_rhs1 (stmt);
2427 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2428 if (!vectype_in)
2429 return false;
2430 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2431
2432 scalar_dest = gimple_assign_lhs (stmt);
2433 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2434 if (!vectype_out)
2435 return false;
2436 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2437 if (nunits_in >= nunits_out)
2438 return false;
2439
2440 /* Multiple types in SLP are handled by creating the appropriate number of
2441 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2442 case of SLP. */
2443 if (slp_node)
2444 ncopies = 1;
2445 else
2446 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
ebfd146a
IR
2447 gcc_assert (ncopies >= 1);
2448
2449 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2450 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2451 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2452 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2453 && CONVERT_EXPR_CODE_P (code))))
2454 return false;
2455
2456 /* Check the operands of the operation. */
a70d6342 2457 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
ebfd146a
IR
2458 {
2459 if (vect_print_dump_info (REPORT_DETAILS))
2460 fprintf (vect_dump, "use not simple.");
2461 return false;
2462 }
2463
2464 /* Supportable by target? */
2465 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
2466 &multi_step_cvt, &interm_types))
2467 return false;
2468
2469 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2470
2471 if (!vec_stmt) /* transformation not required. */
2472 {
2473 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2474 if (vect_print_dump_info (REPORT_DETAILS))
2475 fprintf (vect_dump, "=== vectorizable_demotion ===");
2476 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2477 return true;
2478 }
2479
2480 /** Transform. **/
2481 if (vect_print_dump_info (REPORT_DETAILS))
2482 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2483 ncopies);
2484
b8698a0f
L
2485 /* In case of multi-step demotion, we first generate demotion operations to
2486 the intermediate types, and then from that types to the final one.
ebfd146a 2487 We create vector destinations for the intermediate type (TYPES) received
b8698a0f 2488 from supportable_narrowing_operation, and store them in the correct order
ebfd146a
IR
2489 for future use in vect_create_vectorized_demotion_stmts(). */
2490 if (multi_step_cvt)
2491 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2492 else
2493 vec_dsts = VEC_alloc (tree, heap, 1);
b8698a0f 2494
ebfd146a
IR
2495 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2496 VEC_quick_push (tree, vec_dsts, vec_dest);
2497
2498 if (multi_step_cvt)
2499 {
b8698a0f 2500 for (i = VEC_length (tree, interm_types) - 1;
ebfd146a
IR
2501 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2502 {
b8698a0f 2503 vec_dest = vect_create_destination_var (scalar_dest,
ebfd146a
IR
2504 intermediate_type);
2505 VEC_quick_push (tree, vec_dsts, vec_dest);
2506 }
2507 }
2508
2509 /* In case the vectorization factor (VF) is bigger than the number
2510 of elements that we can fit in a vectype (nunits), we have to generate
2511 more than one vector stmt - i.e - we need to "unroll" the
2512 vector stmt by a factor VF/nunits. */
2513 last_oprnd = op0;
2514 prev_stmt_info = NULL;
2515 for (j = 0; j < ncopies; j++)
2516 {
2517 /* Handle uses. */
2518 if (slp_node)
b8698a0f 2519 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
ebfd146a
IR
2520 else
2521 {
2522 VEC_free (tree, heap, vec_oprnds0);
2523 vec_oprnds0 = VEC_alloc (tree, heap,
2524 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
b8698a0f 2525 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
ebfd146a
IR
2526 vect_pow2 (multi_step_cvt) - 1);
2527 }
2528
2529 /* Arguments are ready. Create the new vector stmts. */
2530 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
b8698a0f 2531 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
ebfd146a 2532 multi_step_cvt, stmt, tmp_vec_dsts,
b8698a0f 2533 gsi, slp_node, code1,
ebfd146a
IR
2534 &prev_stmt_info);
2535 }
2536
2537 VEC_free (tree, heap, vec_oprnds0);
2538 VEC_free (tree, heap, vec_dsts);
2539 VEC_free (tree, heap, tmp_vec_dsts);
2540 VEC_free (tree, heap, interm_types);
2541
2542 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2543 return true;
2544}
2545
2546
2547/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
b8698a0f 2548 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
ebfd146a
IR
2549 the resulting vectors and call the function recursively. */
2550
2551static void
2552vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2553 VEC (tree, heap) **vec_oprnds1,
2554 int multi_step_cvt, gimple stmt,
2555 VEC (tree, heap) *vec_dsts,
2556 gimple_stmt_iterator *gsi,
2557 slp_tree slp_node, enum tree_code code1,
b8698a0f 2558 enum tree_code code2, tree decl1,
ebfd146a
IR
2559 tree decl2, int op_type,
2560 stmt_vec_info *prev_stmt_info)
2561{
2562 int i;
2563 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2564 gimple new_stmt1, new_stmt2;
2565 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2566 VEC (tree, heap) *vec_tmp;
2567
2568 vec_dest = VEC_pop (tree, vec_dsts);
2569 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2570
2571 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2572 {
2573 if (op_type == binary_op)
2574 vop1 = VEC_index (tree, *vec_oprnds1, i);
2575 else
2576 vop1 = NULL_TREE;
2577
2578 /* Generate the two halves of promotion operation. */
b8698a0f 2579 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
ebfd146a
IR
2580 op_type, vec_dest, gsi, stmt);
2581 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2582 op_type, vec_dest, gsi, stmt);
2583 if (is_gimple_call (new_stmt1))
2584 {
2585 new_tmp1 = gimple_call_lhs (new_stmt1);
2586 new_tmp2 = gimple_call_lhs (new_stmt2);
2587 }
2588 else
2589 {
2590 new_tmp1 = gimple_assign_lhs (new_stmt1);
2591 new_tmp2 = gimple_assign_lhs (new_stmt2);
2592 }
2593
2594 if (multi_step_cvt)
2595 {
2596 /* Store the results for the recursive call. */
2597 VEC_quick_push (tree, vec_tmp, new_tmp1);
2598 VEC_quick_push (tree, vec_tmp, new_tmp2);
2599 }
2600 else
2601 {
2602 /* Last step of promotion sequience - store the results. */
2603 if (slp_node)
2604 {
2605 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2606 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2607 }
2608 else
2609 {
2610 if (!*prev_stmt_info)
2611 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2612 else
2613 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2614
2615 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2616 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2617 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2618 }
2619 }
2620 }
2621
2622 if (multi_step_cvt)
2623 {
b8698a0f 2624 /* For multi-step promotion operation we first generate we call the
ebfd146a
IR
2625 function recurcively for every stage. We start from the input type,
2626 create promotion operations to the intermediate types, and then
2627 create promotions to the output type. */
2628 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2629 VEC_free (tree, heap, vec_tmp);
2630 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2631 multi_step_cvt - 1, stmt,
2632 vec_dsts, gsi, slp_node, code1,
2633 code2, decl2, decl2, op_type,
2634 prev_stmt_info);
2635 }
2636}
b8698a0f 2637
ebfd146a
IR
2638
2639/* Function vectorizable_type_promotion
2640
2641 Check if STMT performs a binary or unary operation that involves
2642 type promotion, and if it can be vectorized.
2643 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2644 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2645 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2646
2647static bool
2648vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2649 gimple *vec_stmt, slp_tree slp_node)
2650{
2651 tree vec_dest;
2652 tree scalar_dest;
2653 tree op0, op1 = NULL;
2654 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2655 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2656 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2657 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2658 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
b8698a0f 2659 int op_type;
ebfd146a
IR
2660 tree def;
2661 gimple def_stmt;
2662 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2663 stmt_vec_info prev_stmt_info;
2664 int nunits_in;
2665 int nunits_out;
2666 tree vectype_out;
2667 int ncopies;
2668 int j, i;
2669 tree vectype_in;
2670 tree intermediate_type = NULL_TREE;
2671 int multi_step_cvt = 0;
2672 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2673 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
b8698a0f 2674
a70d6342
IR
2675 /* FORNOW: not supported by basic block SLP vectorization. */
2676 gcc_assert (loop_vinfo);
b8698a0f 2677
ebfd146a
IR
2678 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2679 return false;
2680
8644a673 2681 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2682 return false;
2683
2684 /* Is STMT a vectorizable type-promotion operation? */
2685 if (!is_gimple_assign (stmt))
2686 return false;
2687
2688 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2689 return false;
2690
2691 code = gimple_assign_rhs_code (stmt);
2692 if (!CONVERT_EXPR_CODE_P (code)
2693 && code != WIDEN_MULT_EXPR)
2694 return false;
2695
2696 op0 = gimple_assign_rhs1 (stmt);
2697 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2698 if (!vectype_in)
2699 return false;
2700 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2701
2702 scalar_dest = gimple_assign_lhs (stmt);
2703 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2704 if (!vectype_out)
2705 return false;
2706 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2707 if (nunits_in <= nunits_out)
2708 return false;
2709
2710 /* Multiple types in SLP are handled by creating the appropriate number of
2711 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2712 case of SLP. */
2713 if (slp_node)
2714 ncopies = 1;
2715 else
2716 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2717
2718 gcc_assert (ncopies >= 1);
2719
2720 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2721 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2722 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2723 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2724 && CONVERT_EXPR_CODE_P (code))))
2725 return false;
2726
2727 /* Check the operands of the operation. */
a70d6342 2728 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
ebfd146a
IR
2729 {
2730 if (vect_print_dump_info (REPORT_DETAILS))
2731 fprintf (vect_dump, "use not simple.");
2732 return false;
2733 }
2734
2735 op_type = TREE_CODE_LENGTH (code);
2736 if (op_type == binary_op)
2737 {
2738 op1 = gimple_assign_rhs2 (stmt);
a70d6342 2739 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
ebfd146a
IR
2740 {
2741 if (vect_print_dump_info (REPORT_DETAILS))
2742 fprintf (vect_dump, "use not simple.");
2743 return false;
2744 }
2745 }
2746
2747 /* Supportable by target? */
2748 if (!supportable_widening_operation (code, stmt, vectype_in,
2749 &decl1, &decl2, &code1, &code2,
2750 &multi_step_cvt, &interm_types))
2751 return false;
2752
2753 /* Binary widening operation can only be supported directly by the
2754 architecture. */
2755 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2756
2757 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2758
2759 if (!vec_stmt) /* transformation not required. */
2760 {
2761 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2762 if (vect_print_dump_info (REPORT_DETAILS))
2763 fprintf (vect_dump, "=== vectorizable_promotion ===");
2764 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2765 return true;
2766 }
2767
2768 /** Transform. **/
2769
2770 if (vect_print_dump_info (REPORT_DETAILS))
2771 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2772 ncopies);
2773
2774 /* Handle def. */
b8698a0f 2775 /* In case of multi-step promotion, we first generate promotion operations
ebfd146a 2776 to the intermediate types, and then from that types to the final one.
b8698a0f
L
2777 We store vector destination in VEC_DSTS in the correct order for
2778 recursive creation of promotion operations in
ebfd146a
IR
2779 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2780 according to TYPES recieved from supportable_widening_operation(). */
2781 if (multi_step_cvt)
2782 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2783 else
2784 vec_dsts = VEC_alloc (tree, heap, 1);
2785
2786 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2787 VEC_quick_push (tree, vec_dsts, vec_dest);
2788
2789 if (multi_step_cvt)
2790 {
2791 for (i = VEC_length (tree, interm_types) - 1;
2792 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2793 {
2794 vec_dest = vect_create_destination_var (scalar_dest,
2795 intermediate_type);
2796 VEC_quick_push (tree, vec_dsts, vec_dest);
2797 }
2798 }
b8698a0f 2799
ebfd146a
IR
2800 if (!slp_node)
2801 {
b8698a0f 2802 vec_oprnds0 = VEC_alloc (tree, heap,
ebfd146a
IR
2803 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2804 if (op_type == binary_op)
2805 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2806 }
2807
2808 /* In case the vectorization factor (VF) is bigger than the number
2809 of elements that we can fit in a vectype (nunits), we have to generate
2810 more than one vector stmt - i.e - we need to "unroll" the
2811 vector stmt by a factor VF/nunits. */
2812
2813 prev_stmt_info = NULL;
2814 for (j = 0; j < ncopies; j++)
2815 {
2816 /* Handle uses. */
2817 if (j == 0)
2818 {
2819 if (slp_node)
2820 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2821 else
2822 {
2823 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2824 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2825 if (op_type == binary_op)
2826 {
2827 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2828 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2829 }
2830 }
2831 }
2832 else
2833 {
2834 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2835 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2836 if (op_type == binary_op)
2837 {
2838 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2839 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2840 }
2841 }
2842
2843 /* Arguments are ready. Create the new vector stmts. */
2844 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2845 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
b8698a0f 2846 multi_step_cvt, stmt,
ebfd146a
IR
2847 tmp_vec_dsts,
2848 gsi, slp_node, code1, code2,
2849 decl1, decl2, op_type,
2850 &prev_stmt_info);
2851 }
2852
2853 VEC_free (tree, heap, vec_dsts);
2854 VEC_free (tree, heap, tmp_vec_dsts);
2855 VEC_free (tree, heap, interm_types);
2856 VEC_free (tree, heap, vec_oprnds0);
2857 VEC_free (tree, heap, vec_oprnds1);
2858
2859 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2860 return true;
2861}
2862
2863
2864/* Function vectorizable_store.
2865
b8698a0f
L
2866 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2867 can be vectorized.
2868 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2869 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2870 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2871
2872static bool
2873vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2874 slp_tree slp_node)
2875{
2876 tree scalar_dest;
2877 tree data_ref;
2878 tree op;
2879 tree vec_oprnd = NULL_TREE;
2880 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2881 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2882 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2883 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 2884 struct loop *loop = NULL;
ebfd146a
IR
2885 enum machine_mode vec_mode;
2886 tree dummy;
2887 enum dr_alignment_support alignment_support_scheme;
2888 tree def;
2889 gimple def_stmt;
2890 enum vect_def_type dt;
2891 stmt_vec_info prev_stmt_info = NULL;
2892 tree dataref_ptr = NULL_TREE;
2893 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2894 int ncopies;
2895 int j;
2896 gimple next_stmt, first_stmt = NULL;
2897 bool strided_store = false;
2898 unsigned int group_size, i;
2899 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2900 bool inv_p;
2901 VEC(tree,heap) *vec_oprnds = NULL;
2902 bool slp = (slp_node != NULL);
ebfd146a 2903 unsigned int vec_num;
a70d6342
IR
2904 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2905
2906 if (loop_vinfo)
2907 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
2908
2909 /* Multiple types in SLP are handled by creating the appropriate number of
2910 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2911 case of SLP. */
2912 if (slp)
2913 ncopies = 1;
2914 else
2915 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2916
2917 gcc_assert (ncopies >= 1);
2918
2919 /* FORNOW. This restriction should be relaxed. */
a70d6342 2920 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
ebfd146a
IR
2921 {
2922 if (vect_print_dump_info (REPORT_DETAILS))
2923 fprintf (vect_dump, "multiple types in nested loop.");
2924 return false;
2925 }
2926
a70d6342 2927 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2928 return false;
2929
8644a673 2930 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2931 return false;
2932
2933 /* Is vectorizable store? */
2934
2935 if (!is_gimple_assign (stmt))
2936 return false;
2937
2938 scalar_dest = gimple_assign_lhs (stmt);
2939 if (TREE_CODE (scalar_dest) != ARRAY_REF
2940 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
2941 && TREE_CODE (scalar_dest) != COMPONENT_REF
2942 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2943 && TREE_CODE (scalar_dest) != REALPART_EXPR)
ebfd146a
IR
2944 return false;
2945
2946 gcc_assert (gimple_assign_single_p (stmt));
2947 op = gimple_assign_rhs1 (stmt);
a70d6342 2948 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
ebfd146a
IR
2949 {
2950 if (vect_print_dump_info (REPORT_DETAILS))
2951 fprintf (vect_dump, "use not simple.");
2952 return false;
2953 }
2954
2955 /* The scalar rhs type needs to be trivially convertible to the vector
2956 component type. This should always be the case. */
2957 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
b8698a0f 2958 {
ebfd146a
IR
2959 if (vect_print_dump_info (REPORT_DETAILS))
2960 fprintf (vect_dump, "??? operands of different types");
2961 return false;
2962 }
2963
2964 vec_mode = TYPE_MODE (vectype);
2965 /* FORNOW. In some cases can vectorize even if data-type not supported
2966 (e.g. - array initialization with 0). */
2967 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2968 return false;
2969
2970 if (!STMT_VINFO_DATA_REF (stmt_info))
2971 return false;
2972
2973 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2974 {
2975 strided_store = true;
2976 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2977 if (!vect_strided_store_supported (vectype)
2978 && !PURE_SLP_STMT (stmt_info) && !slp)
2979 return false;
b8698a0f 2980
ebfd146a
IR
2981 if (first_stmt == stmt)
2982 {
2983 /* STMT is the leader of the group. Check the operands of all the
2984 stmts of the group. */
2985 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2986 while (next_stmt)
2987 {
2988 gcc_assert (gimple_assign_single_p (next_stmt));
2989 op = gimple_assign_rhs1 (next_stmt);
b8698a0f 2990 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
a70d6342 2991 &def, &dt))
ebfd146a
IR
2992 {
2993 if (vect_print_dump_info (REPORT_DETAILS))
2994 fprintf (vect_dump, "use not simple.");
2995 return false;
2996 }
2997 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
2998 }
2999 }
3000 }
3001
3002 if (!vec_stmt) /* transformation not required. */
3003 {
3004 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3005 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3006 return true;
3007 }
3008
3009 /** Transform. **/
3010
3011 if (strided_store)
3012 {
3013 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3014 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3015
3016 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3017
3018 /* FORNOW */
a70d6342 3019 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
3020
3021 /* We vectorize all the stmts of the interleaving group when we
3022 reach the last stmt in the group. */
b8698a0f 3023 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
ebfd146a
IR
3024 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3025 && !slp)
3026 {
3027 *vec_stmt = NULL;
3028 return true;
3029 }
3030
3031 if (slp)
3032 strided_store = false;
3033
3034 /* VEC_NUM is the number of vect stmts to be created for this group. */
3035 if (slp)
3036 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3037 else
3038 vec_num = group_size;
3039 }
b8698a0f 3040 else
ebfd146a
IR
3041 {
3042 first_stmt = stmt;
3043 first_dr = dr;
3044 group_size = vec_num = 1;
ebfd146a 3045 }
b8698a0f 3046
ebfd146a
IR
3047 if (vect_print_dump_info (REPORT_DETAILS))
3048 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3049
3050 dr_chain = VEC_alloc (tree, heap, group_size);
3051 oprnds = VEC_alloc (tree, heap, group_size);
3052
3053 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3054 gcc_assert (alignment_support_scheme);
ebfd146a
IR
3055
3056 /* In case the vectorization factor (VF) is bigger than the number
3057 of elements that we can fit in a vectype (nunits), we have to generate
3058 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 3059 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
3060 vect_get_vec_def_for_copy_stmt. */
3061
3062 /* In case of interleaving (non-unit strided access):
3063
3064 S1: &base + 2 = x2
3065 S2: &base = x0
3066 S3: &base + 1 = x1
3067 S4: &base + 3 = x3
3068
3069 We create vectorized stores starting from base address (the access of the
3070 first stmt in the chain (S2 in the above example), when the last store stmt
3071 of the chain (S4) is reached:
3072
3073 VS1: &base = vx2
3074 VS2: &base + vec_size*1 = vx0
3075 VS3: &base + vec_size*2 = vx1
3076 VS4: &base + vec_size*3 = vx3
3077
3078 Then permutation statements are generated:
3079
3080 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3081 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3082 ...
b8698a0f 3083
ebfd146a
IR
3084 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3085 (the order of the data-refs in the output of vect_permute_store_chain
3086 corresponds to the order of scalar stmts in the interleaving chain - see
3087 the documentation of vect_permute_store_chain()).
3088
3089 In case of both multiple types and interleaving, above vector stores and
3090 permutation stmts are created for every copy. The result vector stmts are
3091 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 3092 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
3093 */
3094
3095 prev_stmt_info = NULL;
3096 for (j = 0; j < ncopies; j++)
3097 {
3098 gimple new_stmt;
3099 gimple ptr_incr;
3100
3101 if (j == 0)
3102 {
3103 if (slp)
3104 {
3105 /* Get vectorized arguments for SLP_NODE. */
3106 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3107
3108 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3109 }
3110 else
3111 {
b8698a0f
L
3112 /* For interleaved stores we collect vectorized defs for all the
3113 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3114 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
3115 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3116
3117 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3118 OPRNDS are of size 1. */
b8698a0f 3119 next_stmt = first_stmt;
ebfd146a
IR
3120 for (i = 0; i < group_size; i++)
3121 {
b8698a0f
L
3122 /* Since gaps are not supported for interleaved stores,
3123 GROUP_SIZE is the exact number of stmts in the chain.
3124 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3125 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
3126 iteration of the loop will be executed. */
3127 gcc_assert (next_stmt
3128 && gimple_assign_single_p (next_stmt));
3129 op = gimple_assign_rhs1 (next_stmt);
3130
b8698a0f 3131 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
ebfd146a 3132 NULL);
b8698a0f
L
3133 VEC_quick_push(tree, dr_chain, vec_oprnd);
3134 VEC_quick_push(tree, oprnds, vec_oprnd);
ebfd146a
IR
3135 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3136 }
3137 }
3138
3139 /* We should have catched mismatched types earlier. */
3140 gcc_assert (useless_type_conversion_p (vectype,
3141 TREE_TYPE (vec_oprnd)));
b8698a0f
L
3142 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3143 &dummy, &ptr_incr, false,
5006671f 3144 &inv_p);
a70d6342 3145 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 3146 }
b8698a0f 3147 else
ebfd146a 3148 {
b8698a0f
L
3149 /* For interleaved stores we created vectorized defs for all the
3150 defs stored in OPRNDS in the previous iteration (previous copy).
3151 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
3152 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3153 next copy.
3154 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3155 OPRNDS are of size 1. */
3156 for (i = 0; i < group_size; i++)
3157 {
3158 op = VEC_index (tree, oprnds, i);
b8698a0f 3159 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 3160 &dt);
b8698a0f 3161 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
ebfd146a
IR
3162 VEC_replace(tree, dr_chain, i, vec_oprnd);
3163 VEC_replace(tree, oprnds, i, vec_oprnd);
3164 }
b8698a0f 3165 dataref_ptr =
ebfd146a
IR
3166 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3167 }
3168
3169 if (strided_store)
3170 {
b8698a0f 3171 result_chain = VEC_alloc (tree, heap, group_size);
ebfd146a
IR
3172 /* Permute. */
3173 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3174 &result_chain))
3175 return false;
3176 }
3177
3178 next_stmt = first_stmt;
3179 for (i = 0; i < vec_num; i++)
3180 {
3181 if (i > 0)
3182 /* Bump the vector pointer. */
3183 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3184 NULL_TREE);
3185
3186 if (slp)
3187 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3188 else if (strided_store)
b8698a0f 3189 /* For strided stores vectorized defs are interleaved in
ebfd146a
IR
3190 vect_permute_store_chain(). */
3191 vec_oprnd = VEC_index (tree, result_chain, i);
3192
8f439681
RE
3193 if (aligned_access_p (first_dr))
3194 data_ref = build_fold_indirect_ref (dataref_ptr);
3195 else
3196 {
3197 int mis = DR_MISALIGNMENT (first_dr);
3198 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3199 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3200 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3201 }
3202
5006671f
RG
3203 /* If accesses through a pointer to vectype do not alias the original
3204 memory reference we have a problem. This should never happen. */
3205 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3206 get_alias_set (gimple_assign_lhs (stmt))));
ebfd146a
IR
3207
3208 /* Arguments are ready. Create the new vector stmt. */
3209 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3210 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3211 mark_symbols_for_renaming (new_stmt);
3212
3213 if (slp)
3214 continue;
b8698a0f 3215
ebfd146a
IR
3216 if (j == 0)
3217 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3218 else
3219 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3220
3221 prev_stmt_info = vinfo_for_stmt (new_stmt);
3222 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3223 if (!next_stmt)
3224 break;
3225 }
3226 }
3227
b8698a0f
L
3228 VEC_free (tree, heap, dr_chain);
3229 VEC_free (tree, heap, oprnds);
ebfd146a 3230 if (result_chain)
b8698a0f 3231 VEC_free (tree, heap, result_chain);
ebfd146a
IR
3232
3233 return true;
3234}
3235
3236/* vectorizable_load.
3237
b8698a0f
L
3238 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3239 can be vectorized.
3240 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3241 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3242 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3243
3244static bool
3245vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3246 slp_tree slp_node, slp_instance slp_node_instance)
3247{
3248 tree scalar_dest;
3249 tree vec_dest = NULL;
3250 tree data_ref = NULL;
3251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 3252 stmt_vec_info prev_stmt_info;
ebfd146a 3253 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3254 struct loop *loop = NULL;
ebfd146a 3255 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 3256 bool nested_in_vect_loop = false;
ebfd146a
IR
3257 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3258 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3259 tree new_temp;
3260 int mode;
3261 gimple new_stmt = NULL;
3262 tree dummy;
3263 enum dr_alignment_support alignment_support_scheme;
3264 tree dataref_ptr = NULL_TREE;
3265 gimple ptr_incr;
3266 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3267 int ncopies;
3268 int i, j, group_size;
3269 tree msq = NULL_TREE, lsq;
3270 tree offset = NULL_TREE;
3271 tree realignment_token = NULL_TREE;
3272 gimple phi = NULL;
3273 VEC(tree,heap) *dr_chain = NULL;
3274 bool strided_load = false;
3275 gimple first_stmt;
3276 tree scalar_type;
3277 bool inv_p;
3278 bool compute_in_loop = false;
3279 struct loop *at_loop;
3280 int vec_num;
3281 bool slp = (slp_node != NULL);
3282 bool slp_perm = false;
3283 enum tree_code code;
a70d6342
IR
3284 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3285 int vf;
3286
3287 if (loop_vinfo)
3288 {
3289 loop = LOOP_VINFO_LOOP (loop_vinfo);
3290 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3291 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3292 }
3293 else
3533e503 3294 vf = 1;
ebfd146a
IR
3295
3296 /* Multiple types in SLP are handled by creating the appropriate number of
3297 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3298 case of SLP. */
3299 if (slp)
3300 ncopies = 1;
3301 else
3302 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3303
3304 gcc_assert (ncopies >= 1);
3305
3306 /* FORNOW. This restriction should be relaxed. */
3307 if (nested_in_vect_loop && ncopies > 1)
3308 {
3309 if (vect_print_dump_info (REPORT_DETAILS))
3310 fprintf (vect_dump, "multiple types in nested loop.");
3311 return false;
3312 }
3313
a70d6342 3314 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3315 return false;
3316
8644a673 3317 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3318 return false;
3319
3320 /* Is vectorizable load? */
3321 if (!is_gimple_assign (stmt))
3322 return false;
3323
3324 scalar_dest = gimple_assign_lhs (stmt);
3325 if (TREE_CODE (scalar_dest) != SSA_NAME)
3326 return false;
3327
3328 code = gimple_assign_rhs_code (stmt);
3329 if (code != ARRAY_REF
3330 && code != INDIRECT_REF
e9dbe7bb
IR
3331 && code != COMPONENT_REF
3332 && code != IMAGPART_EXPR
3333 && code != REALPART_EXPR)
ebfd146a
IR
3334 return false;
3335
3336 if (!STMT_VINFO_DATA_REF (stmt_info))
3337 return false;
3338
3339 scalar_type = TREE_TYPE (DR_REF (dr));
3340 mode = (int) TYPE_MODE (vectype);
3341
3342 /* FORNOW. In some cases can vectorize even if data-type not supported
3343 (e.g. - data copies). */
3344 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3345 {
3346 if (vect_print_dump_info (REPORT_DETAILS))
3347 fprintf (vect_dump, "Aligned load, but unsupported type.");
3348 return false;
3349 }
3350
3351 /* The vector component type needs to be trivially convertible to the
3352 scalar lhs. This should always be the case. */
3353 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
b8698a0f 3354 {
ebfd146a
IR
3355 if (vect_print_dump_info (REPORT_DETAILS))
3356 fprintf (vect_dump, "??? operands of different types");
3357 return false;
3358 }
3359
3360 /* Check if the load is a part of an interleaving chain. */
3361 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3362 {
3363 strided_load = true;
3364 /* FORNOW */
3365 gcc_assert (! nested_in_vect_loop);
3366
3367 /* Check if interleaving is supported. */
3368 if (!vect_strided_load_supported (vectype)
3369 && !PURE_SLP_STMT (stmt_info) && !slp)
3370 return false;
3371 }
3372
3373 if (!vec_stmt) /* transformation not required. */
3374 {
3375 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3376 vect_model_load_cost (stmt_info, ncopies, NULL);
3377 return true;
3378 }
3379
3380 if (vect_print_dump_info (REPORT_DETAILS))
3381 fprintf (vect_dump, "transform load.");
3382
3383 /** Transform. **/
3384
3385 if (strided_load)
3386 {
3387 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3388 /* Check if the chain of loads is already vectorized. */
3389 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3390 {
3391 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3392 return true;
3393 }
3394 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3395 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3396
3397 /* VEC_NUM is the number of vect stmts to be created for this group. */
3398 if (slp)
3399 {
3400 strided_load = false;
3401 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
a70d6342
IR
3402 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3403 slp_perm = true;
3404 }
ebfd146a
IR
3405 else
3406 vec_num = group_size;
3407
3408 dr_chain = VEC_alloc (tree, heap, vec_num);
3409 }
3410 else
3411 {
3412 first_stmt = stmt;
3413 first_dr = dr;
3414 group_size = vec_num = 1;
3415 }
3416
3417 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3418 gcc_assert (alignment_support_scheme);
3419
3420 /* In case the vectorization factor (VF) is bigger than the number
3421 of elements that we can fit in a vectype (nunits), we have to generate
3422 more than one vector stmt - i.e - we need to "unroll" the
3423 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3424 from one copy of the vector stmt to the next, in the field
3425 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3426 stages to find the correct vector defs to be used when vectorizing
3427 stmts that use the defs of the current stmt. The example below illustrates
3428 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3429 4 vectorized stmts):
3430
3431 before vectorization:
3432 RELATED_STMT VEC_STMT
3433 S1: x = memref - -
3434 S2: z = x + 1 - -
3435
3436 step 1: vectorize stmt S1:
3437 We first create the vector stmt VS1_0, and, as usual, record a
3438 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3439 Next, we create the vector stmt VS1_1, and record a pointer to
3440 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3441 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3442 stmts and pointers:
3443 RELATED_STMT VEC_STMT
3444 VS1_0: vx0 = memref0 VS1_1 -
3445 VS1_1: vx1 = memref1 VS1_2 -
3446 VS1_2: vx2 = memref2 VS1_3 -
3447 VS1_3: vx3 = memref3 - -
3448 S1: x = load - VS1_0
3449 S2: z = x + 1 - -
3450
b8698a0f
L
3451 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3452 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
3453 stmt S2. */
3454
3455 /* In case of interleaving (non-unit strided access):
3456
3457 S1: x2 = &base + 2
3458 S2: x0 = &base
3459 S3: x1 = &base + 1
3460 S4: x3 = &base + 3
3461
b8698a0f 3462 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
3463 starting from the access of the first stmt of the chain:
3464
3465 VS1: vx0 = &base
3466 VS2: vx1 = &base + vec_size*1
3467 VS3: vx3 = &base + vec_size*2
3468 VS4: vx4 = &base + vec_size*3
3469
3470 Then permutation statements are generated:
3471
3472 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3473 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3474 ...
3475
3476 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3477 (the order of the data-refs in the output of vect_permute_load_chain
3478 corresponds to the order of scalar stmts in the interleaving chain - see
3479 the documentation of vect_permute_load_chain()).
3480 The generation of permutation stmts and recording them in
3481 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3482
b8698a0f 3483 In case of both multiple types and interleaving, the vector loads and
ebfd146a
IR
3484 permutation stmts above are created for every copy. The result vector stmts
3485 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3486 STMT_VINFO_RELATED_STMT for the next copies. */
3487
3488 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3489 on a target that supports unaligned accesses (dr_unaligned_supported)
3490 we generate the following code:
3491 p = initial_addr;
3492 indx = 0;
3493 loop {
3494 p = p + indx * vectype_size;
3495 vec_dest = *(p);
3496 indx = indx + 1;
3497 }
3498
3499 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 3500 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
3501 then generate the following code, in which the data in each iteration is
3502 obtained by two vector loads, one from the previous iteration, and one
3503 from the current iteration:
3504 p1 = initial_addr;
3505 msq_init = *(floor(p1))
3506 p2 = initial_addr + VS - 1;
3507 realignment_token = call target_builtin;
3508 indx = 0;
3509 loop {
3510 p2 = p2 + indx * vectype_size
3511 lsq = *(floor(p2))
3512 vec_dest = realign_load (msq, lsq, realignment_token)
3513 indx = indx + 1;
3514 msq = lsq;
3515 } */
3516
3517 /* If the misalignment remains the same throughout the execution of the
3518 loop, we can create the init_addr and permutation mask at the loop
3519 preheader. Otherwise, it needs to be created inside the loop.
3520 This can only occur when vectorizing memory accesses in the inner-loop
3521 nested within an outer-loop that is being vectorized. */
3522
a70d6342 3523 if (loop && nested_in_vect_loop_p (loop, stmt)
ebfd146a
IR
3524 && (TREE_INT_CST_LOW (DR_STEP (dr))
3525 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3526 {
3527 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3528 compute_in_loop = true;
3529 }
3530
3531 if ((alignment_support_scheme == dr_explicit_realign_optimized
3532 || alignment_support_scheme == dr_explicit_realign)
3533 && !compute_in_loop)
3534 {
3535 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3536 alignment_support_scheme, NULL_TREE,
3537 &at_loop);
3538 if (alignment_support_scheme == dr_explicit_realign_optimized)
3539 {
3540 phi = SSA_NAME_DEF_STMT (msq);
3541 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3542 }
3543 }
3544 else
3545 at_loop = loop;
3546
3547 prev_stmt_info = NULL;
3548 for (j = 0; j < ncopies; j++)
b8698a0f 3549 {
ebfd146a
IR
3550 /* 1. Create the vector pointer update chain. */
3551 if (j == 0)
3552 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
b8698a0f
L
3553 at_loop, offset,
3554 &dummy, &ptr_incr, false,
5006671f 3555 &inv_p);
ebfd146a 3556 else
b8698a0f 3557 dataref_ptr =
ebfd146a
IR
3558 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3559
3560 for (i = 0; i < vec_num; i++)
3561 {
3562 if (i > 0)
3563 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3564 NULL_TREE);
3565
3566 /* 2. Create the vector-load in the loop. */
3567 switch (alignment_support_scheme)
3568 {
3569 case dr_aligned:
3570 gcc_assert (aligned_access_p (first_dr));
3571 data_ref = build_fold_indirect_ref (dataref_ptr);
3572 break;
3573 case dr_unaligned_supported:
3574 {
3575 int mis = DR_MISALIGNMENT (first_dr);
3576 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3577
3578 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3579 data_ref =
3580 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3581 break;
3582 }
3583 case dr_explicit_realign:
3584 {
3585 tree ptr, bump;
3586 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3587
3588 if (compute_in_loop)
3589 msq = vect_setup_realignment (first_stmt, gsi,
3590 &realignment_token,
b8698a0f 3591 dr_explicit_realign,
ebfd146a
IR
3592 dataref_ptr, NULL);
3593
3594 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3595 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3596 new_stmt = gimple_build_assign (vec_dest, data_ref);
3597 new_temp = make_ssa_name (vec_dest, new_stmt);
3598 gimple_assign_set_lhs (new_stmt, new_temp);
5006671f
RG
3599 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3600 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
ebfd146a 3601 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
3602 msq = new_temp;
3603
3604 bump = size_binop (MULT_EXPR, vs_minus_1,
3605 TYPE_SIZE_UNIT (scalar_type));
3606 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3607 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3608 break;
3609 }
3610 case dr_explicit_realign_optimized:
3611 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3612 break;
3613 default:
3614 gcc_unreachable ();
3615 }
5006671f
RG
3616 /* If accesses through a pointer to vectype do not alias the original
3617 memory reference we have a problem. This should never happen. */
3618 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3619 get_alias_set (gimple_assign_rhs1 (stmt))));
ebfd146a
IR
3620 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3621 new_stmt = gimple_build_assign (vec_dest, data_ref);
3622 new_temp = make_ssa_name (vec_dest, new_stmt);
3623 gimple_assign_set_lhs (new_stmt, new_temp);
3624 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3625 mark_symbols_for_renaming (new_stmt);
3626
3627 /* 3. Handle explicit realignment if necessary/supported. Create in
3628 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3629 if (alignment_support_scheme == dr_explicit_realign_optimized
3630 || alignment_support_scheme == dr_explicit_realign)
3631 {
3632 tree tmp;
3633
3634 lsq = gimple_assign_lhs (new_stmt);
3635 if (!realignment_token)
3636 realignment_token = dataref_ptr;
3637 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3638 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3639 realignment_token);
3640 new_stmt = gimple_build_assign (vec_dest, tmp);
3641 new_temp = make_ssa_name (vec_dest, new_stmt);
3642 gimple_assign_set_lhs (new_stmt, new_temp);
3643 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3644
3645 if (alignment_support_scheme == dr_explicit_realign_optimized)
3646 {
3647 gcc_assert (phi);
3648 if (i == vec_num - 1 && j == ncopies - 1)
f5045c96
AM
3649 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3650 UNKNOWN_LOCATION);
ebfd146a
IR
3651 msq = lsq;
3652 }
3653 }
3654
3655 /* 4. Handle invariant-load. */
a70d6342 3656 if (inv_p && !bb_vinfo)
ebfd146a
IR
3657 {
3658 gcc_assert (!strided_load);
3659 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3660 if (j == 0)
3661 {
3662 int k;
3663 tree t = NULL_TREE;
3664 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3665
3666 /* CHECKME: bitpos depends on endianess? */
3667 bitpos = bitsize_zero_node;
b8698a0f 3668 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
ebfd146a 3669 bitsize, bitpos);
b8698a0f 3670 vec_dest =
ebfd146a
IR
3671 vect_create_destination_var (scalar_dest, NULL_TREE);
3672 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3673 new_temp = make_ssa_name (vec_dest, new_stmt);
3674 gimple_assign_set_lhs (new_stmt, new_temp);
3675 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3676
3677 for (k = nunits - 1; k >= 0; --k)
3678 t = tree_cons (NULL_TREE, new_temp, t);
3679 /* FIXME: use build_constructor directly. */
3680 vec_inv = build_constructor_from_list (vectype, t);
3681 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3682 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3683 }
3684 else
3685 gcc_unreachable (); /* FORNOW. */
3686 }
3687
3688 /* Collect vector loads and later create their permutation in
3689 vect_transform_strided_load (). */
3690 if (strided_load || slp_perm)
3691 VEC_quick_push (tree, dr_chain, new_temp);
3692
3693 /* Store vector loads in the corresponding SLP_NODE. */
3694 if (slp && !slp_perm)
3695 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3696 }
3697
3698 if (slp && !slp_perm)
3699 continue;
3700
3701 if (slp_perm)
3702 {
a70d6342 3703 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
ebfd146a
IR
3704 slp_node_instance, false))
3705 {
3706 VEC_free (tree, heap, dr_chain);
3707 return false;
3708 }
3709 }
3710 else
3711 {
3712 if (strided_load)
3713 {
3714 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
b8698a0f 3715 return false;
ebfd146a
IR
3716
3717 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3718 VEC_free (tree, heap, dr_chain);
3719 dr_chain = VEC_alloc (tree, heap, group_size);
3720 }
3721 else
3722 {
3723 if (j == 0)
3724 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3725 else
3726 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3727 prev_stmt_info = vinfo_for_stmt (new_stmt);
3728 }
3729 }
3730 }
3731
3732 if (dr_chain)
3733 VEC_free (tree, heap, dr_chain);
3734
3735 return true;
3736}
3737
3738/* Function vect_is_simple_cond.
b8698a0f 3739
ebfd146a
IR
3740 Input:
3741 LOOP - the loop that is being vectorized.
3742 COND - Condition that is checked for simple use.
3743
3744 Returns whether a COND can be vectorized. Checks whether
3745 condition operands are supportable using vec_is_simple_use. */
3746
3747static bool
3748vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3749{
3750 tree lhs, rhs;
3751 tree def;
3752 enum vect_def_type dt;
3753
3754 if (!COMPARISON_CLASS_P (cond))
3755 return false;
3756
3757 lhs = TREE_OPERAND (cond, 0);
3758 rhs = TREE_OPERAND (cond, 1);
3759
3760 if (TREE_CODE (lhs) == SSA_NAME)
3761 {
3762 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
b8698a0f 3763 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
a70d6342 3764 &dt))
ebfd146a
IR
3765 return false;
3766 }
3767 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3768 && TREE_CODE (lhs) != FIXED_CST)
3769 return false;
3770
3771 if (TREE_CODE (rhs) == SSA_NAME)
3772 {
3773 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
b8698a0f 3774 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
a70d6342 3775 &dt))
ebfd146a
IR
3776 return false;
3777 }
3778 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3779 && TREE_CODE (rhs) != FIXED_CST)
3780 return false;
3781
3782 return true;
3783}
3784
3785/* vectorizable_condition.
3786
b8698a0f
L
3787 Check if STMT is conditional modify expression that can be vectorized.
3788 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3789 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
3790 at GSI.
3791
3792 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3793 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3794 else caluse if it is 2).
ebfd146a
IR
3795
3796 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3797
4bbe8262 3798bool
ebfd146a 3799vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4bbe8262 3800 gimple *vec_stmt, tree reduc_def, int reduc_index)
ebfd146a
IR
3801{
3802 tree scalar_dest = NULL_TREE;
3803 tree vec_dest = NULL_TREE;
3804 tree op = NULL_TREE;
3805 tree cond_expr, then_clause, else_clause;
3806 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3807 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3808 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3809 tree vec_compare, vec_cond_expr;
3810 tree new_temp;
3811 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3812 enum machine_mode vec_mode;
3813 tree def;
3814 enum vect_def_type dt;
3815 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3816 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3817 enum tree_code code;
3818
a70d6342
IR
3819 /* FORNOW: unsupported in basic block SLP. */
3820 gcc_assert (loop_vinfo);
b8698a0f 3821
ebfd146a
IR
3822 gcc_assert (ncopies >= 1);
3823 if (ncopies > 1)
3824 return false; /* FORNOW */
3825
3826 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3827 return false;
3828
4bbe8262
IR
3829 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3830 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3831 && reduc_def))
ebfd146a
IR
3832 return false;
3833
3834 /* FORNOW: SLP not supported. */
3835 if (STMT_SLP_TYPE (stmt_info))
3836 return false;
3837
3838 /* FORNOW: not yet supported. */
b8698a0f 3839 if (STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
3840 {
3841 if (vect_print_dump_info (REPORT_DETAILS))
3842 fprintf (vect_dump, "value used after loop.");
3843 return false;
3844 }
3845
3846 /* Is vectorizable conditional operation? */
3847 if (!is_gimple_assign (stmt))
3848 return false;
3849
3850 code = gimple_assign_rhs_code (stmt);
3851
3852 if (code != COND_EXPR)
3853 return false;
3854
3855 gcc_assert (gimple_assign_single_p (stmt));
3856 op = gimple_assign_rhs1 (stmt);
3857 cond_expr = TREE_OPERAND (op, 0);
3858 then_clause = TREE_OPERAND (op, 1);
3859 else_clause = TREE_OPERAND (op, 2);
3860
3861 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3862 return false;
3863
3864 /* We do not handle two different vector types for the condition
3865 and the values. */
3866 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
3867 return false;
3868
3869 if (TREE_CODE (then_clause) == SSA_NAME)
3870 {
3871 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
b8698a0f 3872 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
ebfd146a
IR
3873 &then_def_stmt, &def, &dt))
3874 return false;
3875 }
b8698a0f 3876 else if (TREE_CODE (then_clause) != INTEGER_CST
ebfd146a
IR
3877 && TREE_CODE (then_clause) != REAL_CST
3878 && TREE_CODE (then_clause) != FIXED_CST)
3879 return false;
3880
3881 if (TREE_CODE (else_clause) == SSA_NAME)
3882 {
3883 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
a70d6342 3884 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
ebfd146a
IR
3885 &else_def_stmt, &def, &dt))
3886 return false;
3887 }
b8698a0f 3888 else if (TREE_CODE (else_clause) != INTEGER_CST
ebfd146a
IR
3889 && TREE_CODE (else_clause) != REAL_CST
3890 && TREE_CODE (else_clause) != FIXED_CST)
3891 return false;
3892
3893
3894 vec_mode = TYPE_MODE (vectype);
3895
b8698a0f 3896 if (!vec_stmt)
ebfd146a
IR
3897 {
3898 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8e7aa1f9 3899 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
ebfd146a
IR
3900 }
3901
3902 /* Transform */
3903
3904 /* Handle def. */
3905 scalar_dest = gimple_assign_lhs (stmt);
3906 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3907
3908 /* Handle cond expr. */
b8698a0f 3909 vec_cond_lhs =
ebfd146a 3910 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
b8698a0f 3911 vec_cond_rhs =
ebfd146a 3912 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
4bbe8262
IR
3913 if (reduc_index == 1)
3914 vec_then_clause = reduc_def;
3915 else
3916 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3917 if (reduc_index == 2)
3918 vec_else_clause = reduc_def;
3919 else
3920 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
ebfd146a
IR
3921
3922 /* Arguments are ready. Create the new vector stmt. */
b8698a0f 3923 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
ebfd146a 3924 vec_cond_lhs, vec_cond_rhs);
b8698a0f 3925 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
ebfd146a
IR
3926 vec_compare, vec_then_clause, vec_else_clause);
3927
3928 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3929 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3930 gimple_assign_set_lhs (*vec_stmt, new_temp);
3931 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
b8698a0f 3932
ebfd146a
IR
3933 return true;
3934}
3935
3936
8644a673 3937/* Make sure the statement is vectorizable. */
ebfd146a
IR
3938
3939bool
a70d6342 3940vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 3941{
8644a673 3942 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 3943 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 3944 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 3945 bool ok;
a70d6342
IR
3946 HOST_WIDE_INT dummy;
3947 tree scalar_type, vectype;
ebfd146a
IR
3948
3949 if (vect_print_dump_info (REPORT_DETAILS))
ebfd146a 3950 {
8644a673
IR
3951 fprintf (vect_dump, "==> examining statement: ");
3952 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3953 }
ebfd146a 3954
1825a1f3 3955 if (gimple_has_volatile_ops (stmt))
b8698a0f 3956 {
1825a1f3
IR
3957 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3958 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
3959
3960 return false;
3961 }
b8698a0f
L
3962
3963 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
3964 to include:
3965 - the COND_EXPR which is the loop exit condition
3966 - any LABEL_EXPRs in the loop
b8698a0f 3967 - computations that are used only for array indexing or loop control.
8644a673
IR
3968 In basic blocks we only analyze statements that are a part of some SLP
3969 instance, therefore, all the statements are relevant. */
ebfd146a 3970
b8698a0f 3971 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 3972 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
3973 {
3974 if (vect_print_dump_info (REPORT_DETAILS))
8644a673 3975 fprintf (vect_dump, "irrelevant.");
ebfd146a 3976
8644a673
IR
3977 return true;
3978 }
ebfd146a 3979
8644a673
IR
3980 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3981 {
3982 case vect_internal_def:
3983 break;
ebfd146a 3984
8644a673 3985 case vect_reduction_def:
7c5222ff 3986 case vect_nested_cycle:
a70d6342 3987 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
8644a673 3988 || relevance == vect_used_in_outer_by_reduction
a70d6342 3989 || relevance == vect_unused_in_scope));
8644a673
IR
3990 break;
3991
3992 case vect_induction_def:
3993 case vect_constant_def:
3994 case vect_external_def:
3995 case vect_unknown_def_type:
3996 default:
3997 gcc_unreachable ();
3998 }
ebfd146a 3999
a70d6342
IR
4000 if (bb_vinfo)
4001 {
4002 gcc_assert (PURE_SLP_STMT (stmt_info));
4003
4004 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
4005 if (vect_print_dump_info (REPORT_DETAILS))
4006 {
4007 fprintf (vect_dump, "get vectype for scalar type: ");
4008 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4009 }
4010
4011 vectype = get_vectype_for_scalar_type (scalar_type);
4012 if (!vectype)
4013 {
4014 if (vect_print_dump_info (REPORT_DETAILS))
4015 {
4016 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4017 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4018 }
4019 return false;
4020 }
4021
4022 if (vect_print_dump_info (REPORT_DETAILS))
4023 {
4024 fprintf (vect_dump, "vectype: ");
4025 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4026 }
4027
4028 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4029 }
4030
8644a673 4031 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 4032 {
8644a673
IR
4033 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4034 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4035 *need_to_vectorize = true;
ebfd146a
IR
4036 }
4037
8644a673 4038 ok = true;
b8698a0f 4039 if (!bb_vinfo
a70d6342
IR
4040 && (STMT_VINFO_RELEVANT_P (stmt_info)
4041 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8644a673
IR
4042 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4043 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4044 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4045 || vectorizable_operation (stmt, NULL, NULL, NULL)
4046 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4047 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4048 || vectorizable_call (stmt, NULL, NULL)
4049 || vectorizable_store (stmt, NULL, NULL, NULL)
4bbe8262
IR
4050 || vectorizable_reduction (stmt, NULL, NULL)
4051 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
a70d6342
IR
4052 else
4053 {
4054 if (bb_vinfo)
4055 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4056 || vectorizable_assignment (stmt, NULL, NULL, node)
4057 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4058 || vectorizable_store (stmt, NULL, NULL, node));
b8698a0f 4059 }
8644a673
IR
4060
4061 if (!ok)
ebfd146a 4062 {
8644a673
IR
4063 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4064 {
4065 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4066 fprintf (vect_dump, "supported: ");
4067 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4068 }
b8698a0f 4069
ebfd146a
IR
4070 return false;
4071 }
4072
a70d6342
IR
4073 if (bb_vinfo)
4074 return true;
4075
8644a673
IR
4076 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4077 need extra handling, except for vectorizable reductions. */
4078 if (STMT_VINFO_LIVE_P (stmt_info)
4079 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4080 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 4081
8644a673 4082 if (!ok)
ebfd146a 4083 {
8644a673
IR
4084 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4085 {
4086 fprintf (vect_dump, "not vectorized: live stmt not ");
4087 fprintf (vect_dump, "supported: ");
4088 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4089 }
b8698a0f 4090
8644a673 4091 return false;
ebfd146a
IR
4092 }
4093
8644a673 4094 if (!PURE_SLP_STMT (stmt_info))
ebfd146a 4095 {
b8698a0f
L
4096 /* Groups of strided accesses whose size is not a power of 2 are not
4097 vectorizable yet using loop-vectorization. Therefore, if this stmt
4098 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
a70d6342 4099 loop-based vectorized), the loop cannot be vectorized. */
8644a673
IR
4100 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4101 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4102 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
ebfd146a 4103 {
8644a673
IR
4104 if (vect_print_dump_info (REPORT_DETAILS))
4105 {
4106 fprintf (vect_dump, "not vectorized: the size of group "
4107 "of strided accesses is not a power of 2");
4108 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4109 }
4110
ebfd146a
IR
4111 return false;
4112 }
4113 }
b8698a0f 4114
ebfd146a
IR
4115 return true;
4116}
4117
4118
4119/* Function vect_transform_stmt.
4120
4121 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4122
4123bool
4124vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
b8698a0f 4125 bool *strided_store, slp_tree slp_node,
ebfd146a
IR
4126 slp_instance slp_node_instance)
4127{
4128 bool is_store = false;
4129 gimple vec_stmt = NULL;
4130 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4131 gimple orig_stmt_in_pattern;
4132 bool done;
ebfd146a
IR
4133
4134 switch (STMT_VINFO_TYPE (stmt_info))
4135 {
4136 case type_demotion_vec_info_type:
4137 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4138 gcc_assert (done);
4139 break;
4140
4141 case type_promotion_vec_info_type:
4142 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4143 gcc_assert (done);
4144 break;
4145
4146 case type_conversion_vec_info_type:
4147 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4148 gcc_assert (done);
4149 break;
4150
4151 case induc_vec_info_type:
4152 gcc_assert (!slp_node);
4153 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4154 gcc_assert (done);
4155 break;
4156
4157 case op_vec_info_type:
4158 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4159 gcc_assert (done);
4160 break;
4161
4162 case assignment_vec_info_type:
4163 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4164 gcc_assert (done);
4165 break;
4166
4167 case load_vec_info_type:
b8698a0f 4168 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
4169 slp_node_instance);
4170 gcc_assert (done);
4171 break;
4172
4173 case store_vec_info_type:
4174 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4175 gcc_assert (done);
4176 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4177 {
4178 /* In case of interleaving, the whole chain is vectorized when the
4179 last store in the chain is reached. Store stmts before the last
4180 one are skipped, and there vec_stmt_info shouldn't be freed
4181 meanwhile. */
4182 *strided_store = true;
4183 if (STMT_VINFO_VEC_STMT (stmt_info))
4184 is_store = true;
4185 }
4186 else
4187 is_store = true;
4188 break;
4189
4190 case condition_vec_info_type:
4191 gcc_assert (!slp_node);
4bbe8262 4192 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
ebfd146a
IR
4193 gcc_assert (done);
4194 break;
4195
4196 case call_vec_info_type:
4197 gcc_assert (!slp_node);
4198 done = vectorizable_call (stmt, gsi, &vec_stmt);
4199 break;
4200
4201 case reduc_vec_info_type:
4202 gcc_assert (!slp_node);
4203 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
4204 gcc_assert (done);
4205 break;
4206
4207 default:
4208 if (!STMT_VINFO_LIVE_P (stmt_info))
4209 {
4210 if (vect_print_dump_info (REPORT_DETAILS))
4211 fprintf (vect_dump, "stmt not supported.");
4212 gcc_unreachable ();
4213 }
4214 }
4215
4216 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4217 is being vectorized, but outside the immediately enclosing loop. */
4218 if (vec_stmt
a70d6342
IR
4219 && STMT_VINFO_LOOP_VINFO (stmt_info)
4220 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4221 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
4222 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4223 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 4224 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 4225 vect_used_in_outer_by_reduction))
ebfd146a 4226 {
a70d6342
IR
4227 struct loop *innerloop = LOOP_VINFO_LOOP (
4228 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
4229 imm_use_iterator imm_iter;
4230 use_operand_p use_p;
4231 tree scalar_dest;
4232 gimple exit_phi;
4233
4234 if (vect_print_dump_info (REPORT_DETAILS))
a70d6342 4235 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
ebfd146a
IR
4236
4237 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4238 (to be used when vectorizing outer-loop stmts that use the DEF of
4239 STMT). */
4240 if (gimple_code (stmt) == GIMPLE_PHI)
4241 scalar_dest = PHI_RESULT (stmt);
4242 else
4243 scalar_dest = gimple_assign_lhs (stmt);
4244
4245 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4246 {
4247 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4248 {
4249 exit_phi = USE_STMT (use_p);
4250 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4251 }
4252 }
4253 }
4254
4255 /* Handle stmts whose DEF is used outside the loop-nest that is
4256 being vectorized. */
4257 if (STMT_VINFO_LIVE_P (stmt_info)
4258 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4259 {
4260 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4261 gcc_assert (done);
4262 }
4263
4264 if (vec_stmt)
4265 {
4266 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4267 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4268 if (orig_stmt_in_pattern)
4269 {
4270 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4271 /* STMT was inserted by the vectorizer to replace a computation idiom.
b8698a0f
L
4272 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4273 computed this idiom. We need to record a pointer to VEC_STMT in
4274 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
ebfd146a
IR
4275 documentation of vect_pattern_recog. */
4276 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4277 {
4278 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4279 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4280 }
4281 }
4282 }
4283
b8698a0f 4284 return is_store;
ebfd146a
IR
4285}
4286
4287
b8698a0f 4288/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
4289 stmt_vec_info. */
4290
4291void
4292vect_remove_stores (gimple first_stmt)
4293{
4294 gimple next = first_stmt;
4295 gimple tmp;
4296 gimple_stmt_iterator next_si;
4297
4298 while (next)
4299 {
4300 /* Free the attached stmt_vec_info and remove the stmt. */
4301 next_si = gsi_for_stmt (next);
4302 gsi_remove (&next_si, true);
4303 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4304 free_stmt_vec_info (next);
4305 next = tmp;
4306 }
4307}
4308
4309
4310/* Function new_stmt_vec_info.
4311
4312 Create and initialize a new stmt_vec_info struct for STMT. */
4313
4314stmt_vec_info
b8698a0f 4315new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
a70d6342 4316 bb_vec_info bb_vinfo)
ebfd146a
IR
4317{
4318 stmt_vec_info res;
4319 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4320
4321 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4322 STMT_VINFO_STMT (res) = stmt;
4323 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
a70d6342 4324 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
8644a673 4325 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
4326 STMT_VINFO_LIVE_P (res) = false;
4327 STMT_VINFO_VECTYPE (res) = NULL;
4328 STMT_VINFO_VEC_STMT (res) = NULL;
4329 STMT_VINFO_IN_PATTERN_P (res) = false;
4330 STMT_VINFO_RELATED_STMT (res) = NULL;
4331 STMT_VINFO_DATA_REF (res) = NULL;
4332
4333 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4334 STMT_VINFO_DR_OFFSET (res) = NULL;
4335 STMT_VINFO_DR_INIT (res) = NULL;
4336 STMT_VINFO_DR_STEP (res) = NULL;
4337 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4338
4339 if (gimple_code (stmt) == GIMPLE_PHI
4340 && is_loop_header_bb_p (gimple_bb (stmt)))
4341 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4342 else
8644a673
IR
4343 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4344
ebfd146a
IR
4345 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4346 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4347 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
32e8bb8e 4348 STMT_SLP_TYPE (res) = loop_vect;
ebfd146a
IR
4349 DR_GROUP_FIRST_DR (res) = NULL;
4350 DR_GROUP_NEXT_DR (res) = NULL;
4351 DR_GROUP_SIZE (res) = 0;
4352 DR_GROUP_STORE_COUNT (res) = 0;
4353 DR_GROUP_GAP (res) = 0;
4354 DR_GROUP_SAME_DR_STMT (res) = NULL;
4355 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4356
4357 return res;
4358}
4359
4360
4361/* Create a hash table for stmt_vec_info. */
4362
4363void
4364init_stmt_vec_info_vec (void)
4365{
4366 gcc_assert (!stmt_vec_info_vec);
4367 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4368}
4369
4370
4371/* Free hash table for stmt_vec_info. */
4372
4373void
4374free_stmt_vec_info_vec (void)
4375{
4376 gcc_assert (stmt_vec_info_vec);
4377 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4378}
4379
4380
4381/* Free stmt vectorization related info. */
4382
4383void
4384free_stmt_vec_info (gimple stmt)
4385{
4386 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4387
4388 if (!stmt_info)
4389 return;
4390
4391 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4392 set_vinfo_for_stmt (stmt, NULL);
4393 free (stmt_info);
4394}
4395
4396
4397/* Function get_vectype_for_scalar_type.
4398
4399 Returns the vector type corresponding to SCALAR_TYPE as supported
4400 by the target. */
4401
4402tree
4403get_vectype_for_scalar_type (tree scalar_type)
4404{
4405 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
2f816591 4406 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
4407 int nunits;
4408 tree vectype;
4409
4410 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4411 return NULL_TREE;
4412
2f816591
RG
4413 /* We can't build a vector type of elements with alignment bigger than
4414 their size. */
4415 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4416 return NULL_TREE;
4417
6d7971b8
RG
4418 /* If we'd build a vector type of elements whose mode precision doesn't
4419 match their types precision we'll get mismatched types on vector
4420 extracts via BIT_FIELD_REFs. This effectively means we disable
4421 vectorization of bool and/or enum types in some languages. */
4422 if (INTEGRAL_TYPE_P (scalar_type)
4423 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4424 return NULL_TREE;
4425
ebfd146a
IR
4426 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4427 is expected. */
4428 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4429
4430 vectype = build_vector_type (scalar_type, nunits);
4431 if (vect_print_dump_info (REPORT_DETAILS))
4432 {
4433 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4434 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4435 }
4436
4437 if (!vectype)
4438 return NULL_TREE;
4439
4440 if (vect_print_dump_info (REPORT_DETAILS))
4441 {
4442 fprintf (vect_dump, "vectype: ");
4443 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4444 }
4445
4446 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4447 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4448 {
4449 if (vect_print_dump_info (REPORT_DETAILS))
4450 fprintf (vect_dump, "mode not supported by target.");
4451 return NULL_TREE;
4452 }
4453
4454 return vectype;
4455}
4456
4457/* Function vect_is_simple_use.
4458
4459 Input:
a70d6342
IR
4460 LOOP_VINFO - the vect info of the loop that is being vectorized.
4461 BB_VINFO - the vect info of the basic block that is being vectorized.
4462 OPERAND - operand of a stmt in the loop or bb.
ebfd146a
IR
4463 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4464
4465 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f
L
4466 For loops, supportable operands are constants, loop invariants, and operands
4467 that are defined by the current iteration of the loop. Unsupportable
4468 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
4469 is the case in reduction/induction computations).
4470 For basic blocks, supportable operands are constants and bb invariants.
4471 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
4472
4473bool
b8698a0f 4474vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
a70d6342 4475 bb_vec_info bb_vinfo, gimple *def_stmt,
ebfd146a 4476 tree *def, enum vect_def_type *dt)
b8698a0f 4477{
ebfd146a
IR
4478 basic_block bb;
4479 stmt_vec_info stmt_vinfo;
a70d6342 4480 struct loop *loop = NULL;
b8698a0f 4481
a70d6342
IR
4482 if (loop_vinfo)
4483 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
4484
4485 *def_stmt = NULL;
4486 *def = NULL_TREE;
b8698a0f 4487
ebfd146a
IR
4488 if (vect_print_dump_info (REPORT_DETAILS))
4489 {
4490 fprintf (vect_dump, "vect_is_simple_use: operand ");
4491 print_generic_expr (vect_dump, operand, TDF_SLIM);
4492 }
b8698a0f 4493
ebfd146a
IR
4494 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4495 {
4496 *dt = vect_constant_def;
4497 return true;
4498 }
b8698a0f 4499
ebfd146a
IR
4500 if (is_gimple_min_invariant (operand))
4501 {
4502 *def = operand;
8644a673 4503 *dt = vect_external_def;
ebfd146a
IR
4504 return true;
4505 }
4506
4507 if (TREE_CODE (operand) == PAREN_EXPR)
4508 {
4509 if (vect_print_dump_info (REPORT_DETAILS))
4510 fprintf (vect_dump, "non-associatable copy.");
4511 operand = TREE_OPERAND (operand, 0);
4512 }
b8698a0f 4513
ebfd146a
IR
4514 if (TREE_CODE (operand) != SSA_NAME)
4515 {
4516 if (vect_print_dump_info (REPORT_DETAILS))
4517 fprintf (vect_dump, "not ssa-name.");
4518 return false;
4519 }
b8698a0f 4520
ebfd146a
IR
4521 *def_stmt = SSA_NAME_DEF_STMT (operand);
4522 if (*def_stmt == NULL)
4523 {
4524 if (vect_print_dump_info (REPORT_DETAILS))
4525 fprintf (vect_dump, "no def_stmt.");
4526 return false;
4527 }
4528
4529 if (vect_print_dump_info (REPORT_DETAILS))
4530 {
4531 fprintf (vect_dump, "def_stmt: ");
4532 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4533 }
4534
8644a673 4535 /* Empty stmt is expected only in case of a function argument.
ebfd146a
IR
4536 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4537 if (gimple_nop_p (*def_stmt))
4538 {
4539 *def = operand;
8644a673 4540 *dt = vect_external_def;
ebfd146a
IR
4541 return true;
4542 }
4543
4544 bb = gimple_bb (*def_stmt);
a70d6342
IR
4545
4546 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4547 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
b8698a0f 4548 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
8644a673 4549 *dt = vect_external_def;
ebfd146a
IR
4550 else
4551 {
4552 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4553 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4554 }
4555
4556 if (*dt == vect_unknown_def_type)
4557 {
4558 if (vect_print_dump_info (REPORT_DETAILS))
4559 fprintf (vect_dump, "Unsupported pattern.");
4560 return false;
4561 }
4562
4563 if (vect_print_dump_info (REPORT_DETAILS))
4564 fprintf (vect_dump, "type of def: %d.",*dt);
4565
4566 switch (gimple_code (*def_stmt))
4567 {
4568 case GIMPLE_PHI:
4569 *def = gimple_phi_result (*def_stmt);
4570 break;
4571
4572 case GIMPLE_ASSIGN:
4573 *def = gimple_assign_lhs (*def_stmt);
4574 break;
4575
4576 case GIMPLE_CALL:
4577 *def = gimple_call_lhs (*def_stmt);
4578 if (*def != NULL)
4579 break;
4580 /* FALLTHRU */
4581 default:
4582 if (vect_print_dump_info (REPORT_DETAILS))
4583 fprintf (vect_dump, "unsupported defining stmt: ");
4584 return false;
4585 }
4586
4587 return true;
4588}
4589
4590
4591/* Function supportable_widening_operation
4592
b8698a0f
L
4593 Check whether an operation represented by the code CODE is a
4594 widening operation that is supported by the target platform in
ebfd146a 4595 vector form (i.e., when operating on arguments of type VECTYPE).
b8698a0f 4596
ebfd146a
IR
4597 Widening operations we currently support are NOP (CONVERT), FLOAT
4598 and WIDEN_MULT. This function checks if these operations are supported
4599 by the target platform either directly (via vector tree-codes), or via
4600 target builtins.
4601
4602 Output:
b8698a0f
L
4603 - CODE1 and CODE2 are codes of vector operations to be used when
4604 vectorizing the operation, if available.
ebfd146a
IR
4605 - DECL1 and DECL2 are decls of target builtin functions to be used
4606 when vectorizing the operation, if available. In this case,
b8698a0f 4607 CODE1 and CODE2 are CALL_EXPR.
ebfd146a
IR
4608 - MULTI_STEP_CVT determines the number of required intermediate steps in
4609 case of multi-step conversion (like char->short->int - in that case
4610 MULTI_STEP_CVT will be 1).
b8698a0f
L
4611 - INTERM_TYPES contains the intermediate type required to perform the
4612 widening operation (short in the above example). */
ebfd146a
IR
4613
4614bool
4615supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
4616 tree *decl1, tree *decl2,
4617 enum tree_code *code1, enum tree_code *code2,
4618 int *multi_step_cvt,
4619 VEC (tree, heap) **interm_types)
4620{
4621 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4622 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4623 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4624 bool ordered_p;
4625 enum machine_mode vec_mode;
81f40b79 4626 enum insn_code icode1, icode2;
ebfd146a
IR
4627 optab optab1, optab2;
4628 tree type = gimple_expr_type (stmt);
4629 tree wide_vectype = get_vectype_for_scalar_type (type);
4630 enum tree_code c1, c2;
4631
4632 /* The result of a vectorized widening operation usually requires two vectors
b8698a0f
L
4633 (because the widened results do not fit int one vector). The generated
4634 vector results would normally be expected to be generated in the same
ebfd146a
IR
4635 order as in the original scalar computation, i.e. if 8 results are
4636 generated in each vector iteration, they are to be organized as follows:
b8698a0f 4637 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
ebfd146a 4638
b8698a0f 4639 However, in the special case that the result of the widening operation is
ebfd146a 4640 used in a reduction computation only, the order doesn't matter (because
b8698a0f 4641 when vectorizing a reduction we change the order of the computation).
ebfd146a
IR
4642 Some targets can take advantage of this and generate more efficient code.
4643 For example, targets like Altivec, that support widen_mult using a sequence
4644 of {mult_even,mult_odd} generate the following vectors:
4645 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4646
4647 When vectorizing outer-loops, we execute the inner-loop sequentially
b8698a0f
L
4648 (each vectorized inner-loop iteration contributes to VF outer-loop
4649 iterations in parallel). We therefore don't allow to change the order
ebfd146a
IR
4650 of the computation in the inner-loop during outer-loop vectorization. */
4651
4652 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4653 && !nested_in_vect_loop_p (vect_loop, stmt))
4654 ordered_p = false;
4655 else
4656 ordered_p = true;
4657
4658 if (!ordered_p
4659 && code == WIDEN_MULT_EXPR
4660 && targetm.vectorize.builtin_mul_widen_even
4661 && targetm.vectorize.builtin_mul_widen_even (vectype)
4662 && targetm.vectorize.builtin_mul_widen_odd
4663 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4664 {
4665 if (vect_print_dump_info (REPORT_DETAILS))
4666 fprintf (vect_dump, "Unordered widening operation detected.");
4667
4668 *code1 = *code2 = CALL_EXPR;
4669 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4670 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4671 return true;
4672 }
4673
4674 switch (code)
4675 {
4676 case WIDEN_MULT_EXPR:
4677 if (BYTES_BIG_ENDIAN)
4678 {
4679 c1 = VEC_WIDEN_MULT_HI_EXPR;
4680 c2 = VEC_WIDEN_MULT_LO_EXPR;
4681 }
4682 else
4683 {
4684 c2 = VEC_WIDEN_MULT_HI_EXPR;
4685 c1 = VEC_WIDEN_MULT_LO_EXPR;
4686 }
4687 break;
4688
4689 CASE_CONVERT:
4690 if (BYTES_BIG_ENDIAN)
4691 {
4692 c1 = VEC_UNPACK_HI_EXPR;
4693 c2 = VEC_UNPACK_LO_EXPR;
4694 }
4695 else
4696 {
4697 c2 = VEC_UNPACK_HI_EXPR;
4698 c1 = VEC_UNPACK_LO_EXPR;
4699 }
4700 break;
4701
4702 case FLOAT_EXPR:
4703 if (BYTES_BIG_ENDIAN)
4704 {
4705 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4706 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4707 }
4708 else
4709 {
4710 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4711 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4712 }
4713 break;
4714
4715 case FIX_TRUNC_EXPR:
4716 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4717 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4718 computing the operation. */
4719 return false;
4720
4721 default:
4722 gcc_unreachable ();
4723 }
4724
4725 if (code == FIX_TRUNC_EXPR)
4726 {
4727 /* The signedness is determined from output operand. */
4728 optab1 = optab_for_tree_code (c1, type, optab_default);
4729 optab2 = optab_for_tree_code (c2, type, optab_default);
4730 }
4731 else
4732 {
4733 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4734 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4735 }
4736
4737 if (!optab1 || !optab2)
4738 return false;
4739
4740 vec_mode = TYPE_MODE (vectype);
4741 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4742 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4743 == CODE_FOR_nothing)
4744 return false;
4745
b8698a0f 4746 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a
IR
4747 types. */
4748 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4749 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4750 {
4751 int i;
4752 tree prev_type = vectype, intermediate_type;
4753 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4754 optab optab3, optab4;
4755
4756 if (!CONVERT_EXPR_CODE_P (code))
4757 return false;
b8698a0f 4758
ebfd146a
IR
4759 *code1 = c1;
4760 *code2 = c2;
b8698a0f 4761
ebfd146a
IR
4762 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4763 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4764 to get to NARROW_VECTYPE, and fail if we do not. */
4765 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4766 for (i = 0; i < 3; i++)
4767 {
4768 intermediate_mode = insn_data[icode1].operand[0].mode;
4769 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4770 TYPE_UNSIGNED (prev_type));
4771 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4772 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4773
4774 if (!optab3 || !optab4
4775 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4776 == CODE_FOR_nothing
4777 || insn_data[icode1].operand[0].mode != intermediate_mode
4778 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4779 == CODE_FOR_nothing
4780 || insn_data[icode2].operand[0].mode != intermediate_mode
b8698a0f 4781 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
ebfd146a
IR
4782 == CODE_FOR_nothing
4783 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4784 == CODE_FOR_nothing)
4785 return false;
4786
4787 VEC_quick_push (tree, *interm_types, intermediate_type);
4788 (*multi_step_cvt)++;
4789
4790 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4791 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4792 return true;
4793
4794 prev_type = intermediate_type;
4795 prev_mode = intermediate_mode;
4796 }
4797
4798 return false;
4799 }
4800
4801 *code1 = c1;
4802 *code2 = c2;
4803 return true;
4804}
4805
4806
4807/* Function supportable_narrowing_operation
4808
b8698a0f
L
4809 Check whether an operation represented by the code CODE is a
4810 narrowing operation that is supported by the target platform in
ebfd146a 4811 vector form (i.e., when operating on arguments of type VECTYPE).
b8698a0f 4812
ebfd146a
IR
4813 Narrowing operations we currently support are NOP (CONVERT) and
4814 FIX_TRUNC. This function checks if these operations are supported by
4815 the target platform directly via vector tree-codes.
4816
4817 Output:
b8698a0f
L
4818 - CODE1 is the code of a vector operation to be used when
4819 vectorizing the operation, if available.
ebfd146a
IR
4820 - MULTI_STEP_CVT determines the number of required intermediate steps in
4821 case of multi-step conversion (like int->short->char - in that case
4822 MULTI_STEP_CVT will be 1).
4823 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 4824 narrowing operation (short in the above example). */
ebfd146a
IR
4825
4826bool
4827supportable_narrowing_operation (enum tree_code code,
4828 const_gimple stmt, tree vectype,
4829 enum tree_code *code1, int *multi_step_cvt,
4830 VEC (tree, heap) **interm_types)
4831{
4832 enum machine_mode vec_mode;
4833 enum insn_code icode1;
4834 optab optab1, interm_optab;
4835 tree type = gimple_expr_type (stmt);
4836 tree narrow_vectype = get_vectype_for_scalar_type (type);
4837 enum tree_code c1;
4838 tree intermediate_type, prev_type;
4839 int i;
4840
4841 switch (code)
4842 {
4843 CASE_CONVERT:
4844 c1 = VEC_PACK_TRUNC_EXPR;
4845 break;
4846
4847 case FIX_TRUNC_EXPR:
4848 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4849 break;
4850
4851 case FLOAT_EXPR:
4852 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4853 tree code and optabs used for computing the operation. */
4854 return false;
4855
4856 default:
4857 gcc_unreachable ();
4858 }
4859
4860 if (code == FIX_TRUNC_EXPR)
4861 /* The signedness is determined from output operand. */
4862 optab1 = optab_for_tree_code (c1, type, optab_default);
4863 else
4864 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4865
4866 if (!optab1)
4867 return false;
4868
4869 vec_mode = TYPE_MODE (vectype);
b8698a0f 4870 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
ebfd146a
IR
4871 == CODE_FOR_nothing)
4872 return false;
4873
4874 /* Check if it's a multi-step conversion that can be done using intermediate
4875 types. */
4876 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4877 {
4878 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4879
4880 *code1 = c1;
4881 prev_type = vectype;
4882 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4883 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4884 to get to NARROW_VECTYPE, and fail if we do not. */
4885 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4886 for (i = 0; i < 3; i++)
4887 {
4888 intermediate_mode = insn_data[icode1].operand[0].mode;
4889 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4890 TYPE_UNSIGNED (prev_type));
b8698a0f 4891 interm_optab = optab_for_tree_code (c1, intermediate_type,
ebfd146a 4892 optab_default);
b8698a0f 4893 if (!interm_optab
ebfd146a
IR
4894 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4895 == CODE_FOR_nothing
4896 || insn_data[icode1].operand[0].mode != intermediate_mode
b8698a0f 4897 || (icode1
ebfd146a
IR
4898 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4899 == CODE_FOR_nothing)
4900 return false;
4901
4902 VEC_quick_push (tree, *interm_types, intermediate_type);
4903 (*multi_step_cvt)++;
4904
4905 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4906 return true;
4907
4908 prev_type = intermediate_type;
4909 prev_mode = intermediate_mode;
4910 }
4911
4912 return false;
4913 }
4914
4915 *code1 = c1;
4916 return true;
4917}