]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-stmts.c
re PR middle-end/42834 (memcpy folding overeager)
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "toplev.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
43
44
45 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46
47 /* Function vect_mark_relevant.
48
49 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50
51 static void
52 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
53 enum vect_relevant relevant, bool live_p)
54 {
55 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
56 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
57 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58
59 if (vect_print_dump_info (REPORT_DETAILS))
60 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61
62 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 {
64 gimple pattern_stmt;
65
66 /* This is the last stmt in a sequence that was detected as a
67 pattern that can potentially be vectorized. Don't mark the stmt
68 as relevant/live because it's not going to be vectorized.
69 Instead mark the pattern-stmt that replaces it. */
70
71 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72
73 if (vect_print_dump_info (REPORT_DETAILS))
74 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
75 stmt_info = vinfo_for_stmt (pattern_stmt);
76 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
77 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
78 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
79 stmt = pattern_stmt;
80 }
81
82 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
83 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
84 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85
86 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
87 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 {
89 if (vect_print_dump_info (REPORT_DETAILS))
90 fprintf (vect_dump, "already marked relevant/live.");
91 return;
92 }
93
94 VEC_safe_push (gimple, heap, *worklist, stmt);
95 }
96
97
98 /* Function vect_stmt_relevant_p.
99
100 Return true if STMT in loop that is represented by LOOP_VINFO is
101 "relevant for vectorization".
102
103 A stmt is considered "relevant for vectorization" if:
104 - it has uses outside the loop.
105 - it has vdefs (it alters memory).
106 - control stmts in the loop (except for the exit condition).
107
108 CHECKME: what other side effects would the vectorizer allow? */
109
110 static bool
111 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
112 enum vect_relevant *relevant, bool *live_p)
113 {
114 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
115 ssa_op_iter op_iter;
116 imm_use_iterator imm_iter;
117 use_operand_p use_p;
118 def_operand_p def_p;
119
120 *relevant = vect_unused_in_scope;
121 *live_p = false;
122
123 /* cond stmt other than loop exit cond. */
124 if (is_ctrl_stmt (stmt)
125 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
126 != loop_exit_ctrl_vec_info_type)
127 *relevant = vect_used_in_scope;
128
129 /* changing memory. */
130 if (gimple_code (stmt) != GIMPLE_PHI)
131 if (gimple_vdef (stmt))
132 {
133 if (vect_print_dump_info (REPORT_DETAILS))
134 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
135 *relevant = vect_used_in_scope;
136 }
137
138 /* uses outside the loop. */
139 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 {
141 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 {
143 basic_block bb = gimple_bb (USE_STMT (use_p));
144 if (!flow_bb_inside_loop_p (loop, bb))
145 {
146 if (vect_print_dump_info (REPORT_DETAILS))
147 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148
149 if (is_gimple_debug (USE_STMT (use_p)))
150 continue;
151
152 /* We expect all such uses to be in the loop exit phis
153 (because of loop closed form) */
154 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
155 gcc_assert (bb == single_exit (loop)->dest);
156
157 *live_p = true;
158 }
159 }
160 }
161
162 return (*live_p || *relevant);
163 }
164
165
166 /* Function exist_non_indexing_operands_for_use_p
167
168 USE is one of the uses attached to STMT. Check if USE is
169 used in STMT for anything other than indexing an array. */
170
171 static bool
172 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 {
174 tree operand;
175 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176
177 /* USE corresponds to some operand in STMT. If there is no data
178 reference in STMT, then any operand that corresponds to USE
179 is not indexing an array. */
180 if (!STMT_VINFO_DATA_REF (stmt_info))
181 return true;
182
183 /* STMT has a data_ref. FORNOW this means that its of one of
184 the following forms:
185 -1- ARRAY_REF = var
186 -2- var = ARRAY_REF
187 (This should have been verified in analyze_data_refs).
188
189 'var' in the second case corresponds to a def, not a use,
190 so USE cannot correspond to any operands that are not used
191 for array indexing.
192
193 Therefore, all we need to check is if STMT falls into the
194 first case, and whether var corresponds to USE. */
195
196 if (!gimple_assign_copy_p (stmt))
197 return false;
198 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
199 return false;
200 operand = gimple_assign_rhs1 (stmt);
201 if (TREE_CODE (operand) != SSA_NAME)
202 return false;
203
204 if (operand == use)
205 return true;
206
207 return false;
208 }
209
210
211 /*
212 Function process_use.
213
214 Inputs:
215 - a USE in STMT in a loop represented by LOOP_VINFO
216 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
217 that defined USE. This is done by calling mark_relevant and passing it
218 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219
220 Outputs:
221 Generally, LIVE_P and RELEVANT are used to define the liveness and
222 relevance info of the DEF_STMT of this USE:
223 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
224 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
225 Exceptions:
226 - case 1: If USE is used only for address computations (e.g. array indexing),
227 which does not need to be directly vectorized, then the liveness/relevance
228 of the respective DEF_STMT is left unchanged.
229 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
230 skip DEF_STMT cause it had already been processed.
231 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
232 be modified accordingly.
233
234 Return true if everything is as expected. Return false otherwise. */
235
236 static bool
237 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
238 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 {
240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
241 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
242 stmt_vec_info dstmt_vinfo;
243 basic_block bb, def_bb;
244 tree def;
245 gimple def_stmt;
246 enum vect_def_type dt;
247
248 /* case 1: we are only interested in uses that need to be vectorized. Uses
249 that are used for address computation are not considered relevant. */
250 if (!exist_non_indexing_operands_for_use_p (use, stmt))
251 return true;
252
253 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 {
255 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
256 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
257 return false;
258 }
259
260 if (!def_stmt || gimple_nop_p (def_stmt))
261 return true;
262
263 def_bb = gimple_bb (def_stmt);
264 if (!flow_bb_inside_loop_p (loop, def_bb))
265 {
266 if (vect_print_dump_info (REPORT_DETAILS))
267 fprintf (vect_dump, "def_stmt is out of loop.");
268 return true;
269 }
270
271 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
272 DEF_STMT must have already been processed, because this should be the
273 only way that STMT, which is a reduction-phi, was put in the worklist,
274 as there should be no other uses for DEF_STMT in the loop. So we just
275 check that everything is as expected, and we are done. */
276 dstmt_vinfo = vinfo_for_stmt (def_stmt);
277 bb = gimple_bb (stmt);
278 if (gimple_code (stmt) == GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
280 && gimple_code (def_stmt) != GIMPLE_PHI
281 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
282 && bb->loop_father == def_bb->loop_father)
283 {
284 if (vect_print_dump_info (REPORT_DETAILS))
285 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
286 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
287 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
288 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
289 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
290 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
291 return true;
292 }
293
294 /* case 3a: outer-loop stmt defining an inner-loop stmt:
295 outer-loop-header-bb:
296 d = def_stmt
297 inner-loop:
298 stmt # use (d)
299 outer-loop-tail-bb:
300 ... */
301 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 {
303 if (vect_print_dump_info (REPORT_DETAILS))
304 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305
306 switch (relevant)
307 {
308 case vect_unused_in_scope:
309 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
310 vect_used_in_scope : vect_unused_in_scope;
311 break;
312
313 case vect_used_in_outer_by_reduction:
314 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
315 relevant = vect_used_by_reduction;
316 break;
317
318 case vect_used_in_outer:
319 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
320 relevant = vect_used_in_scope;
321 break;
322
323 case vect_used_in_scope:
324 break;
325
326 default:
327 gcc_unreachable ();
328 }
329 }
330
331 /* case 3b: inner-loop stmt defining an outer-loop stmt:
332 outer-loop-header-bb:
333 ...
334 inner-loop:
335 d = def_stmt
336 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
337 stmt # use (d) */
338 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 {
340 if (vect_print_dump_info (REPORT_DETAILS))
341 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342
343 switch (relevant)
344 {
345 case vect_unused_in_scope:
346 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
347 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
348 vect_used_in_outer_by_reduction : vect_unused_in_scope;
349 break;
350
351 case vect_used_by_reduction:
352 relevant = vect_used_in_outer_by_reduction;
353 break;
354
355 case vect_used_in_scope:
356 relevant = vect_used_in_outer;
357 break;
358
359 default:
360 gcc_unreachable ();
361 }
362 }
363
364 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
365 return true;
366 }
367
368
369 /* Function vect_mark_stmts_to_be_vectorized.
370
371 Not all stmts in the loop need to be vectorized. For example:
372
373 for i...
374 for j...
375 1. T0 = i + j
376 2. T1 = a[T0]
377
378 3. j = j + 1
379
380 Stmt 1 and 3 do not need to be vectorized, because loop control and
381 addressing of vectorized data-refs are handled differently.
382
383 This pass detects such stmts. */
384
385 bool
386 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 {
388 VEC(gimple,heap) *worklist;
389 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
390 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
391 unsigned int nbbs = loop->num_nodes;
392 gimple_stmt_iterator si;
393 gimple stmt;
394 unsigned int i;
395 stmt_vec_info stmt_vinfo;
396 basic_block bb;
397 gimple phi;
398 bool live_p;
399 enum vect_relevant relevant, tmp_relevant;
400 enum vect_def_type def_type;
401
402 if (vect_print_dump_info (REPORT_DETAILS))
403 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404
405 worklist = VEC_alloc (gimple, heap, 64);
406
407 /* 1. Init worklist. */
408 for (i = 0; i < nbbs; i++)
409 {
410 bb = bbs[i];
411 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 {
413 phi = gsi_stmt (si);
414 if (vect_print_dump_info (REPORT_DETAILS))
415 {
416 fprintf (vect_dump, "init: phi relevant? ");
417 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
418 }
419
420 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
421 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 }
423 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 {
425 stmt = gsi_stmt (si);
426 if (vect_print_dump_info (REPORT_DETAILS))
427 {
428 fprintf (vect_dump, "init: stmt relevant? ");
429 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
430 }
431
432 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
433 vect_mark_relevant (&worklist, stmt, relevant, live_p);
434 }
435 }
436
437 /* 2. Process_worklist */
438 while (VEC_length (gimple, worklist) > 0)
439 {
440 use_operand_p use_p;
441 ssa_op_iter iter;
442
443 stmt = VEC_pop (gimple, worklist);
444 if (vect_print_dump_info (REPORT_DETAILS))
445 {
446 fprintf (vect_dump, "worklist: examine stmt: ");
447 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
448 }
449
450 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
451 (DEF_STMT) as relevant/irrelevant and live/dead according to the
452 liveness and relevance properties of STMT. */
453 stmt_vinfo = vinfo_for_stmt (stmt);
454 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
455 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456
457 /* Generally, the liveness and relevance properties of STMT are
458 propagated as is to the DEF_STMTs of its USEs:
459 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
460 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461
462 One exception is when STMT has been identified as defining a reduction
463 variable; in this case we set the liveness/relevance as follows:
464 live_p = false
465 relevant = vect_used_by_reduction
466 This is because we distinguish between two kinds of relevant stmts -
467 those that are used by a reduction computation, and those that are
468 (also) used by a regular computation. This allows us later on to
469 identify stmts that are used solely by a reduction, and therefore the
470 order of the results that they produce does not have to be kept. */
471
472 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
473 tmp_relevant = relevant;
474 switch (def_type)
475 {
476 case vect_reduction_def:
477 switch (tmp_relevant)
478 {
479 case vect_unused_in_scope:
480 relevant = vect_used_by_reduction;
481 break;
482
483 case vect_used_by_reduction:
484 if (gimple_code (stmt) == GIMPLE_PHI)
485 break;
486 /* fall through */
487
488 default:
489 if (vect_print_dump_info (REPORT_DETAILS))
490 fprintf (vect_dump, "unsupported use of reduction.");
491
492 VEC_free (gimple, heap, worklist);
493 return false;
494 }
495
496 live_p = false;
497 break;
498
499 case vect_nested_cycle:
500 if (tmp_relevant != vect_unused_in_scope
501 && tmp_relevant != vect_used_in_outer_by_reduction
502 && tmp_relevant != vect_used_in_outer)
503 {
504 if (vect_print_dump_info (REPORT_DETAILS))
505 fprintf (vect_dump, "unsupported use of nested cycle.");
506
507 VEC_free (gimple, heap, worklist);
508 return false;
509 }
510
511 live_p = false;
512 break;
513
514 case vect_double_reduction_def:
515 if (tmp_relevant != vect_unused_in_scope
516 && tmp_relevant != vect_used_by_reduction)
517 {
518 if (vect_print_dump_info (REPORT_DETAILS))
519 fprintf (vect_dump, "unsupported use of double reduction.");
520
521 VEC_free (gimple, heap, worklist);
522 return false;
523 }
524
525 live_p = false;
526 break;
527
528 default:
529 break;
530 }
531
532 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 {
534 tree op = USE_FROM_PTR (use_p);
535 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 {
537 VEC_free (gimple, heap, worklist);
538 return false;
539 }
540 }
541 } /* while worklist */
542
543 VEC_free (gimple, heap, worklist);
544 return true;
545 }
546
547
548 int
549 cost_for_stmt (gimple stmt)
550 {
551 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552
553 switch (STMT_VINFO_TYPE (stmt_info))
554 {
555 case load_vec_info_type:
556 return targetm.vectorize.builtin_vectorization_cost (scalar_load);
557 case store_vec_info_type:
558 return targetm.vectorize.builtin_vectorization_cost (scalar_store);
559 case op_vec_info_type:
560 case condition_vec_info_type:
561 case assignment_vec_info_type:
562 case reduc_vec_info_type:
563 case induc_vec_info_type:
564 case type_promotion_vec_info_type:
565 case type_demotion_vec_info_type:
566 case type_conversion_vec_info_type:
567 case call_vec_info_type:
568 return targetm.vectorize.builtin_vectorization_cost (scalar_stmt);
569 case undef_vec_info_type:
570 default:
571 gcc_unreachable ();
572 }
573 }
574
575 /* Function vect_model_simple_cost.
576
577 Models cost for simple operations, i.e. those that only emit ncopies of a
578 single op. Right now, this does not account for multiple insns that could
579 be generated for the single vector op. We will handle that shortly. */
580
581 void
582 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
583 enum vect_def_type *dt, slp_tree slp_node)
584 {
585 int i;
586 int inside_cost = 0, outside_cost = 0;
587
588 /* The SLP costs were already calculated during SLP tree build. */
589 if (PURE_SLP_STMT (stmt_info))
590 return;
591
592 inside_cost = ncopies
593 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
594
595 /* FORNOW: Assuming maximum 2 args per stmts. */
596 for (i = 0; i < 2; i++)
597 {
598 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
599 outside_cost
600 += targetm.vectorize.builtin_vectorization_cost (vector_stmt);
601 }
602
603 if (vect_print_dump_info (REPORT_COST))
604 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
605 "outside_cost = %d .", inside_cost, outside_cost);
606
607 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
608 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
609 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 }
611
612
613 /* Function vect_cost_strided_group_size
614
615 For strided load or store, return the group_size only if it is the first
616 load or store of a group, else return 1. This ensures that group size is
617 only returned once per group. */
618
619 static int
620 vect_cost_strided_group_size (stmt_vec_info stmt_info)
621 {
622 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
623
624 if (first_stmt == STMT_VINFO_STMT (stmt_info))
625 return DR_GROUP_SIZE (stmt_info);
626
627 return 1;
628 }
629
630
631 /* Function vect_model_store_cost
632
633 Models cost for stores. In the case of strided accesses, one access
634 has the overhead of the strided access attributed to it. */
635
636 void
637 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
638 enum vect_def_type dt, slp_tree slp_node)
639 {
640 int group_size;
641 int inside_cost = 0, outside_cost = 0;
642
643 /* The SLP costs were already calculated during SLP tree build. */
644 if (PURE_SLP_STMT (stmt_info))
645 return;
646
647 if (dt == vect_constant_def || dt == vect_external_def)
648 outside_cost
649 = targetm.vectorize.builtin_vectorization_cost (scalar_to_vec);
650
651 /* Strided access? */
652 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
653 group_size = vect_cost_strided_group_size (stmt_info);
654 /* Not a strided access. */
655 else
656 group_size = 1;
657
658 /* Is this an access in a group of stores, which provide strided access?
659 If so, add in the cost of the permutes. */
660 if (group_size > 1)
661 {
662 /* Uses a high and low interleave operation for each needed permute. */
663 inside_cost = ncopies * exact_log2(group_size) * group_size
664 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
665
666 if (vect_print_dump_info (REPORT_COST))
667 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
668 group_size);
669
670 }
671
672 /* Costs of the stores. */
673 inside_cost += ncopies
674 * targetm.vectorize.builtin_vectorization_cost (vector_store);
675
676 if (vect_print_dump_info (REPORT_COST))
677 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
678 "outside_cost = %d .", inside_cost, outside_cost);
679
680 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
681 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
682 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
683 }
684
685
686 /* Function vect_model_load_cost
687
688 Models cost for loads. In the case of strided accesses, the last access
689 has the overhead of the strided access attributed to it. Since unaligned
690 accesses are supported for loads, we also account for the costs of the
691 access scheme chosen. */
692
693 void
694 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
695
696 {
697 int group_size;
698 int alignment_support_cheme;
699 gimple first_stmt;
700 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
701 int inside_cost = 0, outside_cost = 0;
702
703 /* The SLP costs were already calculated during SLP tree build. */
704 if (PURE_SLP_STMT (stmt_info))
705 return;
706
707 /* Strided accesses? */
708 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
709 if (first_stmt && !slp_node)
710 {
711 group_size = vect_cost_strided_group_size (stmt_info);
712 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
713 }
714 /* Not a strided access. */
715 else
716 {
717 group_size = 1;
718 first_dr = dr;
719 }
720
721 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
722
723 /* Is this an access in a group of loads providing strided access?
724 If so, add in the cost of the permutes. */
725 if (group_size > 1)
726 {
727 /* Uses an even and odd extract operations for each needed permute. */
728 inside_cost = ncopies * exact_log2(group_size) * group_size
729 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
730
731 if (vect_print_dump_info (REPORT_COST))
732 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
733 group_size);
734
735 }
736
737 /* The loads themselves. */
738 switch (alignment_support_cheme)
739 {
740 case dr_aligned:
741 {
742 inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (vector_load);
744
745 if (vect_print_dump_info (REPORT_COST))
746 fprintf (vect_dump, "vect_model_load_cost: aligned.");
747
748 break;
749 }
750 case dr_unaligned_supported:
751 {
752 /* Here, we assign an additional cost for the unaligned load. */
753 inside_cost += ncopies
754 * targetm.vectorize.builtin_vectorization_cost (unaligned_load);
755
756 if (vect_print_dump_info (REPORT_COST))
757 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
758 "hardware.");
759
760 break;
761 }
762 case dr_explicit_realign:
763 {
764 inside_cost += ncopies * (2
765 * targetm.vectorize.builtin_vectorization_cost (vector_load)
766 + targetm.vectorize.builtin_vectorization_cost (vector_stmt));
767
768 /* FIXME: If the misalignment remains fixed across the iterations of
769 the containing loop, the following cost should be added to the
770 outside costs. */
771 if (targetm.vectorize.builtin_mask_for_load)
772 inside_cost
773 += targetm.vectorize.builtin_vectorization_cost (vector_stmt);
774
775 break;
776 }
777 case dr_explicit_realign_optimized:
778 {
779 if (vect_print_dump_info (REPORT_COST))
780 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
781 "pipelined.");
782
783 /* Unaligned software pipeline has a load of an address, an initial
784 load, and possibly a mask operation to "prime" the loop. However,
785 if this is an access in a group of loads, which provide strided
786 access, then the above cost should only be considered for one
787 access in the group. Inside the loop, there is a load op
788 and a realignment op. */
789
790 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
791 {
792 outside_cost = 2
793 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
794 if (targetm.vectorize.builtin_mask_for_load)
795 outside_cost
796 += targetm.vectorize.builtin_vectorization_cost (vector_stmt);
797 }
798
799 inside_cost += ncopies
800 * (targetm.vectorize.builtin_vectorization_cost (vector_load)
801 + targetm.vectorize.builtin_vectorization_cost (vector_stmt));
802 break;
803 }
804
805 default:
806 gcc_unreachable ();
807 }
808
809 if (vect_print_dump_info (REPORT_COST))
810 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
811 "outside_cost = %d .", inside_cost, outside_cost);
812
813 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
814 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
815 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
816 }
817
818
819 /* Function vect_init_vector.
820
821 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
822 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
823 is not NULL. Otherwise, place the initialization at the loop preheader.
824 Return the DEF of INIT_STMT.
825 It will be used in the vectorization of STMT. */
826
827 tree
828 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
829 gimple_stmt_iterator *gsi)
830 {
831 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
832 tree new_var;
833 gimple init_stmt;
834 tree vec_oprnd;
835 edge pe;
836 tree new_temp;
837 basic_block new_bb;
838
839 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
840 add_referenced_var (new_var);
841 init_stmt = gimple_build_assign (new_var, vector_var);
842 new_temp = make_ssa_name (new_var, init_stmt);
843 gimple_assign_set_lhs (init_stmt, new_temp);
844
845 if (gsi)
846 vect_finish_stmt_generation (stmt, init_stmt, gsi);
847 else
848 {
849 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
850
851 if (loop_vinfo)
852 {
853 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
854
855 if (nested_in_vect_loop_p (loop, stmt))
856 loop = loop->inner;
857
858 pe = loop_preheader_edge (loop);
859 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
860 gcc_assert (!new_bb);
861 }
862 else
863 {
864 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
865 basic_block bb;
866 gimple_stmt_iterator gsi_bb_start;
867
868 gcc_assert (bb_vinfo);
869 bb = BB_VINFO_BB (bb_vinfo);
870 gsi_bb_start = gsi_after_labels (bb);
871 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
872 }
873 }
874
875 if (vect_print_dump_info (REPORT_DETAILS))
876 {
877 fprintf (vect_dump, "created new init_stmt: ");
878 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
879 }
880
881 vec_oprnd = gimple_assign_lhs (init_stmt);
882 return vec_oprnd;
883 }
884
885
886 /* Function vect_get_vec_def_for_operand.
887
888 OP is an operand in STMT. This function returns a (vector) def that will be
889 used in the vectorized stmt for STMT.
890
891 In the case that OP is an SSA_NAME which is defined in the loop, then
892 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
893
894 In case OP is an invariant or constant, a new stmt that creates a vector def
895 needs to be introduced. */
896
897 tree
898 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
899 {
900 tree vec_oprnd;
901 gimple vec_stmt;
902 gimple def_stmt;
903 stmt_vec_info def_stmt_info = NULL;
904 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
905 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
906 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
907 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
908 tree vec_inv;
909 tree vec_cst;
910 tree t = NULL_TREE;
911 tree def;
912 int i;
913 enum vect_def_type dt;
914 bool is_simple_use;
915 tree vector_type;
916
917 if (vect_print_dump_info (REPORT_DETAILS))
918 {
919 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
920 print_generic_expr (vect_dump, op, TDF_SLIM);
921 }
922
923 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
924 &dt);
925 gcc_assert (is_simple_use);
926 if (vect_print_dump_info (REPORT_DETAILS))
927 {
928 if (def)
929 {
930 fprintf (vect_dump, "def = ");
931 print_generic_expr (vect_dump, def, TDF_SLIM);
932 }
933 if (def_stmt)
934 {
935 fprintf (vect_dump, " def_stmt = ");
936 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
937 }
938 }
939
940 switch (dt)
941 {
942 /* Case 1: operand is a constant. */
943 case vect_constant_def:
944 {
945 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
946 gcc_assert (vector_type);
947
948 if (scalar_def)
949 *scalar_def = op;
950
951 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
952 if (vect_print_dump_info (REPORT_DETAILS))
953 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
954
955 for (i = nunits - 1; i >= 0; --i)
956 {
957 t = tree_cons (NULL_TREE, op, t);
958 }
959 vec_cst = build_vector (vector_type, t);
960 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
961 }
962
963 /* Case 2: operand is defined outside the loop - loop invariant. */
964 case vect_external_def:
965 {
966 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
967 gcc_assert (vector_type);
968 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
969
970 if (scalar_def)
971 *scalar_def = def;
972
973 /* Create 'vec_inv = {inv,inv,..,inv}' */
974 if (vect_print_dump_info (REPORT_DETAILS))
975 fprintf (vect_dump, "Create vector_inv.");
976
977 for (i = nunits - 1; i >= 0; --i)
978 {
979 t = tree_cons (NULL_TREE, def, t);
980 }
981
982 /* FIXME: use build_constructor directly. */
983 vec_inv = build_constructor_from_list (vector_type, t);
984 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
985 }
986
987 /* Case 3: operand is defined inside the loop. */
988 case vect_internal_def:
989 {
990 if (scalar_def)
991 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
992
993 /* Get the def from the vectorized stmt. */
994 def_stmt_info = vinfo_for_stmt (def_stmt);
995 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
996 gcc_assert (vec_stmt);
997 if (gimple_code (vec_stmt) == GIMPLE_PHI)
998 vec_oprnd = PHI_RESULT (vec_stmt);
999 else if (is_gimple_call (vec_stmt))
1000 vec_oprnd = gimple_call_lhs (vec_stmt);
1001 else
1002 vec_oprnd = gimple_assign_lhs (vec_stmt);
1003 return vec_oprnd;
1004 }
1005
1006 /* Case 4: operand is defined by a loop header phi - reduction */
1007 case vect_reduction_def:
1008 case vect_double_reduction_def:
1009 case vect_nested_cycle:
1010 {
1011 struct loop *loop;
1012
1013 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1014 loop = (gimple_bb (def_stmt))->loop_father;
1015
1016 /* Get the def before the loop */
1017 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1018 return get_initial_def_for_reduction (stmt, op, scalar_def);
1019 }
1020
1021 /* Case 5: operand is defined by loop-header phi - induction. */
1022 case vect_induction_def:
1023 {
1024 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1025
1026 /* Get the def from the vectorized stmt. */
1027 def_stmt_info = vinfo_for_stmt (def_stmt);
1028 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1029 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1030 vec_oprnd = PHI_RESULT (vec_stmt);
1031 return vec_oprnd;
1032 }
1033
1034 default:
1035 gcc_unreachable ();
1036 }
1037 }
1038
1039
1040 /* Function vect_get_vec_def_for_stmt_copy
1041
1042 Return a vector-def for an operand. This function is used when the
1043 vectorized stmt to be created (by the caller to this function) is a "copy"
1044 created in case the vectorized result cannot fit in one vector, and several
1045 copies of the vector-stmt are required. In this case the vector-def is
1046 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1047 of the stmt that defines VEC_OPRND.
1048 DT is the type of the vector def VEC_OPRND.
1049
1050 Context:
1051 In case the vectorization factor (VF) is bigger than the number
1052 of elements that can fit in a vectype (nunits), we have to generate
1053 more than one vector stmt to vectorize the scalar stmt. This situation
1054 arises when there are multiple data-types operated upon in the loop; the
1055 smallest data-type determines the VF, and as a result, when vectorizing
1056 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1057 vector stmt (each computing a vector of 'nunits' results, and together
1058 computing 'VF' results in each iteration). This function is called when
1059 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1060 which VF=16 and nunits=4, so the number of copies required is 4):
1061
1062 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1063
1064 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1065 VS1.1: vx.1 = memref1 VS1.2
1066 VS1.2: vx.2 = memref2 VS1.3
1067 VS1.3: vx.3 = memref3
1068
1069 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1070 VSnew.1: vz1 = vx.1 + ... VSnew.2
1071 VSnew.2: vz2 = vx.2 + ... VSnew.3
1072 VSnew.3: vz3 = vx.3 + ...
1073
1074 The vectorization of S1 is explained in vectorizable_load.
1075 The vectorization of S2:
1076 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1077 the function 'vect_get_vec_def_for_operand' is called to
1078 get the relevant vector-def for each operand of S2. For operand x it
1079 returns the vector-def 'vx.0'.
1080
1081 To create the remaining copies of the vector-stmt (VSnew.j), this
1082 function is called to get the relevant vector-def for each operand. It is
1083 obtained from the respective VS1.j stmt, which is recorded in the
1084 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1085
1086 For example, to obtain the vector-def 'vx.1' in order to create the
1087 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1088 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1089 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1090 and return its def ('vx.1').
1091 Overall, to create the above sequence this function will be called 3 times:
1092 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1093 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1094 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1095
1096 tree
1097 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1098 {
1099 gimple vec_stmt_for_operand;
1100 stmt_vec_info def_stmt_info;
1101
1102 /* Do nothing; can reuse same def. */
1103 if (dt == vect_external_def || dt == vect_constant_def )
1104 return vec_oprnd;
1105
1106 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1107 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1108 gcc_assert (def_stmt_info);
1109 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1110 gcc_assert (vec_stmt_for_operand);
1111 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1112 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1113 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1114 else
1115 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1116 return vec_oprnd;
1117 }
1118
1119
1120 /* Get vectorized definitions for the operands to create a copy of an original
1121 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1122
1123 static void
1124 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1125 VEC(tree,heap) **vec_oprnds0,
1126 VEC(tree,heap) **vec_oprnds1)
1127 {
1128 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1129
1130 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1131 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1132
1133 if (vec_oprnds1 && *vec_oprnds1)
1134 {
1135 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1136 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1137 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1138 }
1139 }
1140
1141
1142 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1143
1144 static void
1145 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1146 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1147 slp_tree slp_node)
1148 {
1149 if (slp_node)
1150 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
1151 else
1152 {
1153 tree vec_oprnd;
1154
1155 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1156 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1157 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1158
1159 if (op1)
1160 {
1161 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1162 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1163 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1164 }
1165 }
1166 }
1167
1168
1169 /* Function vect_finish_stmt_generation.
1170
1171 Insert a new stmt. */
1172
1173 void
1174 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1175 gimple_stmt_iterator *gsi)
1176 {
1177 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1178 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1179 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1180
1181 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1182
1183 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1184
1185 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1186 bb_vinfo));
1187
1188 if (vect_print_dump_info (REPORT_DETAILS))
1189 {
1190 fprintf (vect_dump, "add new stmt: ");
1191 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1192 }
1193
1194 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1195 }
1196
1197 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1198 a function declaration if the target has a vectorized version
1199 of the function, or NULL_TREE if the function cannot be vectorized. */
1200
1201 tree
1202 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1203 {
1204 tree fndecl = gimple_call_fndecl (call);
1205
1206 /* We only handle functions that do not read or clobber memory -- i.e.
1207 const or novops ones. */
1208 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1209 return NULL_TREE;
1210
1211 if (!fndecl
1212 || TREE_CODE (fndecl) != FUNCTION_DECL
1213 || !DECL_BUILT_IN (fndecl))
1214 return NULL_TREE;
1215
1216 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1217 vectype_in);
1218 }
1219
1220 /* Function vectorizable_call.
1221
1222 Check if STMT performs a function call that can be vectorized.
1223 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1224 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1225 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1226
1227 static bool
1228 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1229 {
1230 tree vec_dest;
1231 tree scalar_dest;
1232 tree op, type;
1233 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1234 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1235 tree vectype_out, vectype_in;
1236 int nunits_in;
1237 int nunits_out;
1238 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1239 tree fndecl, new_temp, def, rhs_type;
1240 gimple def_stmt;
1241 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1242 gimple new_stmt = NULL;
1243 int ncopies, j;
1244 VEC(tree, heap) *vargs = NULL;
1245 enum { NARROW, NONE, WIDEN } modifier;
1246 size_t i, nargs;
1247
1248 /* FORNOW: unsupported in basic block SLP. */
1249 gcc_assert (loop_vinfo);
1250
1251 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1252 return false;
1253
1254 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1255 return false;
1256
1257 /* FORNOW: SLP not supported. */
1258 if (STMT_SLP_TYPE (stmt_info))
1259 return false;
1260
1261 /* Is STMT a vectorizable call? */
1262 if (!is_gimple_call (stmt))
1263 return false;
1264
1265 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1266 return false;
1267
1268 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1269
1270 /* Process function arguments. */
1271 rhs_type = NULL_TREE;
1272 vectype_in = NULL_TREE;
1273 nargs = gimple_call_num_args (stmt);
1274
1275 /* Bail out if the function has more than two arguments, we
1276 do not have interesting builtin functions to vectorize with
1277 more than two arguments. No arguments is also not good. */
1278 if (nargs == 0 || nargs > 2)
1279 return false;
1280
1281 for (i = 0; i < nargs; i++)
1282 {
1283 tree opvectype;
1284
1285 op = gimple_call_arg (stmt, i);
1286
1287 /* We can only handle calls with arguments of the same type. */
1288 if (rhs_type
1289 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1290 {
1291 if (vect_print_dump_info (REPORT_DETAILS))
1292 fprintf (vect_dump, "argument types differ.");
1293 return false;
1294 }
1295 if (!rhs_type)
1296 rhs_type = TREE_TYPE (op);
1297
1298 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1299 &def_stmt, &def, &dt[i], &opvectype))
1300 {
1301 if (vect_print_dump_info (REPORT_DETAILS))
1302 fprintf (vect_dump, "use not simple.");
1303 return false;
1304 }
1305
1306 if (!vectype_in)
1307 vectype_in = opvectype;
1308 else if (opvectype
1309 && opvectype != vectype_in)
1310 {
1311 if (vect_print_dump_info (REPORT_DETAILS))
1312 fprintf (vect_dump, "argument vector types differ.");
1313 return false;
1314 }
1315 }
1316 /* If all arguments are external or constant defs use a vector type with
1317 the same size as the output vector type. */
1318 if (!vectype_in)
1319 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1320 if (vec_stmt)
1321 gcc_assert (vectype_in);
1322 if (!vectype_in)
1323 {
1324 if (vect_print_dump_info (REPORT_DETAILS))
1325 {
1326 fprintf (vect_dump, "no vectype for scalar type ");
1327 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1328 }
1329
1330 return false;
1331 }
1332
1333 /* FORNOW */
1334 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1335 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1336 if (nunits_in == nunits_out / 2)
1337 modifier = NARROW;
1338 else if (nunits_out == nunits_in)
1339 modifier = NONE;
1340 else if (nunits_out == nunits_in / 2)
1341 modifier = WIDEN;
1342 else
1343 return false;
1344
1345 /* For now, we only vectorize functions if a target specific builtin
1346 is available. TODO -- in some cases, it might be profitable to
1347 insert the calls for pieces of the vector, in order to be able
1348 to vectorize other operations in the loop. */
1349 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1350 if (fndecl == NULL_TREE)
1351 {
1352 if (vect_print_dump_info (REPORT_DETAILS))
1353 fprintf (vect_dump, "function is not vectorizable.");
1354
1355 return false;
1356 }
1357
1358 gcc_assert (!gimple_vuse (stmt));
1359
1360 if (modifier == NARROW)
1361 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1362 else
1363 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1364
1365 /* Sanity check: make sure that at least one copy of the vectorized stmt
1366 needs to be generated. */
1367 gcc_assert (ncopies >= 1);
1368
1369 if (!vec_stmt) /* transformation not required. */
1370 {
1371 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1372 if (vect_print_dump_info (REPORT_DETAILS))
1373 fprintf (vect_dump, "=== vectorizable_call ===");
1374 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1375 return true;
1376 }
1377
1378 /** Transform. **/
1379
1380 if (vect_print_dump_info (REPORT_DETAILS))
1381 fprintf (vect_dump, "transform operation.");
1382
1383 /* Handle def. */
1384 scalar_dest = gimple_call_lhs (stmt);
1385 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1386
1387 prev_stmt_info = NULL;
1388 switch (modifier)
1389 {
1390 case NONE:
1391 for (j = 0; j < ncopies; ++j)
1392 {
1393 /* Build argument list for the vectorized call. */
1394 if (j == 0)
1395 vargs = VEC_alloc (tree, heap, nargs);
1396 else
1397 VEC_truncate (tree, vargs, 0);
1398
1399 for (i = 0; i < nargs; i++)
1400 {
1401 op = gimple_call_arg (stmt, i);
1402 if (j == 0)
1403 vec_oprnd0
1404 = vect_get_vec_def_for_operand (op, stmt, NULL);
1405 else
1406 {
1407 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1408 vec_oprnd0
1409 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1410 }
1411
1412 VEC_quick_push (tree, vargs, vec_oprnd0);
1413 }
1414
1415 new_stmt = gimple_build_call_vec (fndecl, vargs);
1416 new_temp = make_ssa_name (vec_dest, new_stmt);
1417 gimple_call_set_lhs (new_stmt, new_temp);
1418
1419 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1420 mark_symbols_for_renaming (new_stmt);
1421
1422 if (j == 0)
1423 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1424 else
1425 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1426
1427 prev_stmt_info = vinfo_for_stmt (new_stmt);
1428 }
1429
1430 break;
1431
1432 case NARROW:
1433 for (j = 0; j < ncopies; ++j)
1434 {
1435 /* Build argument list for the vectorized call. */
1436 if (j == 0)
1437 vargs = VEC_alloc (tree, heap, nargs * 2);
1438 else
1439 VEC_truncate (tree, vargs, 0);
1440
1441 for (i = 0; i < nargs; i++)
1442 {
1443 op = gimple_call_arg (stmt, i);
1444 if (j == 0)
1445 {
1446 vec_oprnd0
1447 = vect_get_vec_def_for_operand (op, stmt, NULL);
1448 vec_oprnd1
1449 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1450 }
1451 else
1452 {
1453 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1454 vec_oprnd0
1455 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1456 vec_oprnd1
1457 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1458 }
1459
1460 VEC_quick_push (tree, vargs, vec_oprnd0);
1461 VEC_quick_push (tree, vargs, vec_oprnd1);
1462 }
1463
1464 new_stmt = gimple_build_call_vec (fndecl, vargs);
1465 new_temp = make_ssa_name (vec_dest, new_stmt);
1466 gimple_call_set_lhs (new_stmt, new_temp);
1467
1468 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1469 mark_symbols_for_renaming (new_stmt);
1470
1471 if (j == 0)
1472 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1473 else
1474 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1475
1476 prev_stmt_info = vinfo_for_stmt (new_stmt);
1477 }
1478
1479 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1480
1481 break;
1482
1483 case WIDEN:
1484 /* No current target implements this case. */
1485 return false;
1486 }
1487
1488 VEC_free (tree, heap, vargs);
1489
1490 /* Update the exception handling table with the vector stmt if necessary. */
1491 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1492 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1493
1494 /* The call in STMT might prevent it from being removed in dce.
1495 We however cannot remove it here, due to the way the ssa name
1496 it defines is mapped to the new definition. So just replace
1497 rhs of the statement with something harmless. */
1498
1499 type = TREE_TYPE (scalar_dest);
1500 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1501 fold_convert (type, integer_zero_node));
1502 set_vinfo_for_stmt (new_stmt, stmt_info);
1503 set_vinfo_for_stmt (stmt, NULL);
1504 STMT_VINFO_STMT (stmt_info) = new_stmt;
1505 gsi_replace (gsi, new_stmt, false);
1506 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1507
1508 return true;
1509 }
1510
1511
1512 /* Function vect_gen_widened_results_half
1513
1514 Create a vector stmt whose code, type, number of arguments, and result
1515 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1516 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1517 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1518 needs to be created (DECL is a function-decl of a target-builtin).
1519 STMT is the original scalar stmt that we are vectorizing. */
1520
1521 static gimple
1522 vect_gen_widened_results_half (enum tree_code code,
1523 tree decl,
1524 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1525 tree vec_dest, gimple_stmt_iterator *gsi,
1526 gimple stmt)
1527 {
1528 gimple new_stmt;
1529 tree new_temp;
1530
1531 /* Generate half of the widened result: */
1532 if (code == CALL_EXPR)
1533 {
1534 /* Target specific support */
1535 if (op_type == binary_op)
1536 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1537 else
1538 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1539 new_temp = make_ssa_name (vec_dest, new_stmt);
1540 gimple_call_set_lhs (new_stmt, new_temp);
1541 }
1542 else
1543 {
1544 /* Generic support */
1545 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1546 if (op_type != binary_op)
1547 vec_oprnd1 = NULL;
1548 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1549 vec_oprnd1);
1550 new_temp = make_ssa_name (vec_dest, new_stmt);
1551 gimple_assign_set_lhs (new_stmt, new_temp);
1552 }
1553 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1554
1555 return new_stmt;
1556 }
1557
1558
1559 /* Check if STMT performs a conversion operation, that can be vectorized.
1560 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1561 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1562 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1563
1564 static bool
1565 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1566 gimple *vec_stmt, slp_tree slp_node)
1567 {
1568 tree vec_dest;
1569 tree scalar_dest;
1570 tree op0;
1571 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1572 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1573 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1574 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1575 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1576 tree new_temp;
1577 tree def;
1578 gimple def_stmt;
1579 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1580 gimple new_stmt = NULL;
1581 stmt_vec_info prev_stmt_info;
1582 int nunits_in;
1583 int nunits_out;
1584 tree vectype_out, vectype_in;
1585 int ncopies, j;
1586 tree rhs_type;
1587 tree builtin_decl;
1588 enum { NARROW, NONE, WIDEN } modifier;
1589 int i;
1590 VEC(tree,heap) *vec_oprnds0 = NULL;
1591 tree vop0;
1592 VEC(tree,heap) *dummy = NULL;
1593 int dummy_int;
1594
1595 /* Is STMT a vectorizable conversion? */
1596
1597 /* FORNOW: unsupported in basic block SLP. */
1598 gcc_assert (loop_vinfo);
1599
1600 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1601 return false;
1602
1603 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1604 return false;
1605
1606 if (!is_gimple_assign (stmt))
1607 return false;
1608
1609 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1610 return false;
1611
1612 code = gimple_assign_rhs_code (stmt);
1613 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1614 return false;
1615
1616 /* Check types of lhs and rhs. */
1617 scalar_dest = gimple_assign_lhs (stmt);
1618 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1619
1620 op0 = gimple_assign_rhs1 (stmt);
1621 rhs_type = TREE_TYPE (op0);
1622 /* Check the operands of the operation. */
1623 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1624 &def_stmt, &def, &dt[0], &vectype_in))
1625 {
1626 if (vect_print_dump_info (REPORT_DETAILS))
1627 fprintf (vect_dump, "use not simple.");
1628 return false;
1629 }
1630 /* If op0 is an external or constant defs use a vector type of
1631 the same size as the output vector type. */
1632 if (!vectype_in)
1633 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1634 if (vec_stmt)
1635 gcc_assert (vectype_in);
1636 if (!vectype_in)
1637 {
1638 if (vect_print_dump_info (REPORT_DETAILS))
1639 {
1640 fprintf (vect_dump, "no vectype for scalar type ");
1641 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1642 }
1643
1644 return false;
1645 }
1646
1647 /* FORNOW */
1648 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1649 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1650 if (nunits_in == nunits_out / 2)
1651 modifier = NARROW;
1652 else if (nunits_out == nunits_in)
1653 modifier = NONE;
1654 else if (nunits_out == nunits_in / 2)
1655 modifier = WIDEN;
1656 else
1657 return false;
1658
1659 if (modifier == NARROW)
1660 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1661 else
1662 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1663
1664 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1665 this, so we can safely override NCOPIES with 1 here. */
1666 if (slp_node)
1667 ncopies = 1;
1668
1669 /* Sanity check: make sure that at least one copy of the vectorized stmt
1670 needs to be generated. */
1671 gcc_assert (ncopies >= 1);
1672
1673 /* Supportable by target? */
1674 if ((modifier == NONE
1675 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1676 || (modifier == WIDEN
1677 && !supportable_widening_operation (code, stmt,
1678 vectype_out, vectype_in,
1679 &decl1, &decl2,
1680 &code1, &code2,
1681 &dummy_int, &dummy))
1682 || (modifier == NARROW
1683 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1684 &code1, &dummy_int, &dummy)))
1685 {
1686 if (vect_print_dump_info (REPORT_DETAILS))
1687 fprintf (vect_dump, "conversion not supported by target.");
1688 return false;
1689 }
1690
1691 if (modifier != NONE)
1692 {
1693 /* FORNOW: SLP not supported. */
1694 if (STMT_SLP_TYPE (stmt_info))
1695 return false;
1696 }
1697
1698 if (!vec_stmt) /* transformation not required. */
1699 {
1700 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1701 return true;
1702 }
1703
1704 /** Transform. **/
1705 if (vect_print_dump_info (REPORT_DETAILS))
1706 fprintf (vect_dump, "transform conversion.");
1707
1708 /* Handle def. */
1709 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1710
1711 if (modifier == NONE && !slp_node)
1712 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1713
1714 prev_stmt_info = NULL;
1715 switch (modifier)
1716 {
1717 case NONE:
1718 for (j = 0; j < ncopies; j++)
1719 {
1720 if (j == 0)
1721 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1722 else
1723 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1724
1725 builtin_decl =
1726 targetm.vectorize.builtin_conversion (code,
1727 vectype_out, vectype_in);
1728 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1729 {
1730 /* Arguments are ready. create the new vector stmt. */
1731 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1732 new_temp = make_ssa_name (vec_dest, new_stmt);
1733 gimple_call_set_lhs (new_stmt, new_temp);
1734 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1735 if (slp_node)
1736 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1737 }
1738
1739 if (j == 0)
1740 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1741 else
1742 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1743 prev_stmt_info = vinfo_for_stmt (new_stmt);
1744 }
1745 break;
1746
1747 case WIDEN:
1748 /* In case the vectorization factor (VF) is bigger than the number
1749 of elements that we can fit in a vectype (nunits), we have to
1750 generate more than one vector stmt - i.e - we need to "unroll"
1751 the vector stmt by a factor VF/nunits. */
1752 for (j = 0; j < ncopies; j++)
1753 {
1754 if (j == 0)
1755 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1756 else
1757 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1758
1759 /* Generate first half of the widened result: */
1760 new_stmt
1761 = vect_gen_widened_results_half (code1, decl1,
1762 vec_oprnd0, vec_oprnd1,
1763 unary_op, vec_dest, gsi, stmt);
1764 if (j == 0)
1765 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1766 else
1767 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1768 prev_stmt_info = vinfo_for_stmt (new_stmt);
1769
1770 /* Generate second half of the widened result: */
1771 new_stmt
1772 = vect_gen_widened_results_half (code2, decl2,
1773 vec_oprnd0, vec_oprnd1,
1774 unary_op, vec_dest, gsi, stmt);
1775 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1776 prev_stmt_info = vinfo_for_stmt (new_stmt);
1777 }
1778 break;
1779
1780 case NARROW:
1781 /* In case the vectorization factor (VF) is bigger than the number
1782 of elements that we can fit in a vectype (nunits), we have to
1783 generate more than one vector stmt - i.e - we need to "unroll"
1784 the vector stmt by a factor VF/nunits. */
1785 for (j = 0; j < ncopies; j++)
1786 {
1787 /* Handle uses. */
1788 if (j == 0)
1789 {
1790 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1791 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1792 }
1793 else
1794 {
1795 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1796 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1797 }
1798
1799 /* Arguments are ready. Create the new vector stmt. */
1800 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1801 vec_oprnd1);
1802 new_temp = make_ssa_name (vec_dest, new_stmt);
1803 gimple_assign_set_lhs (new_stmt, new_temp);
1804 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1805
1806 if (j == 0)
1807 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1808 else
1809 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1810
1811 prev_stmt_info = vinfo_for_stmt (new_stmt);
1812 }
1813
1814 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1815 }
1816
1817 if (vec_oprnds0)
1818 VEC_free (tree, heap, vec_oprnds0);
1819
1820 return true;
1821 }
1822 /* Function vectorizable_assignment.
1823
1824 Check if STMT performs an assignment (copy) that can be vectorized.
1825 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1826 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1827 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1828
1829 static bool
1830 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1831 gimple *vec_stmt, slp_tree slp_node)
1832 {
1833 tree vec_dest;
1834 tree scalar_dest;
1835 tree op;
1836 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1837 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1838 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1839 tree new_temp;
1840 tree def;
1841 gimple def_stmt;
1842 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1843 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1844 int ncopies;
1845 int i, j;
1846 VEC(tree,heap) *vec_oprnds = NULL;
1847 tree vop;
1848 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1849 gimple new_stmt = NULL;
1850 stmt_vec_info prev_stmt_info = NULL;
1851 enum tree_code code;
1852 tree vectype_in;
1853
1854 /* Multiple types in SLP are handled by creating the appropriate number of
1855 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1856 case of SLP. */
1857 if (slp_node)
1858 ncopies = 1;
1859 else
1860 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1861
1862 gcc_assert (ncopies >= 1);
1863
1864 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1865 return false;
1866
1867 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1868 return false;
1869
1870 /* Is vectorizable assignment? */
1871 if (!is_gimple_assign (stmt))
1872 return false;
1873
1874 scalar_dest = gimple_assign_lhs (stmt);
1875 if (TREE_CODE (scalar_dest) != SSA_NAME)
1876 return false;
1877
1878 code = gimple_assign_rhs_code (stmt);
1879 if (gimple_assign_single_p (stmt)
1880 || code == PAREN_EXPR
1881 || CONVERT_EXPR_CODE_P (code))
1882 op = gimple_assign_rhs1 (stmt);
1883 else
1884 return false;
1885
1886 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1887 &def_stmt, &def, &dt[0], &vectype_in))
1888 {
1889 if (vect_print_dump_info (REPORT_DETAILS))
1890 fprintf (vect_dump, "use not simple.");
1891 return false;
1892 }
1893
1894 /* We can handle NOP_EXPR conversions that do not change the number
1895 of elements or the vector size. */
1896 if (CONVERT_EXPR_CODE_P (code)
1897 && (!vectype_in
1898 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1899 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1900 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1901 return false;
1902
1903 if (!vec_stmt) /* transformation not required. */
1904 {
1905 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1906 if (vect_print_dump_info (REPORT_DETAILS))
1907 fprintf (vect_dump, "=== vectorizable_assignment ===");
1908 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1909 return true;
1910 }
1911
1912 /** Transform. **/
1913 if (vect_print_dump_info (REPORT_DETAILS))
1914 fprintf (vect_dump, "transform assignment.");
1915
1916 /* Handle def. */
1917 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1918
1919 /* Handle use. */
1920 for (j = 0; j < ncopies; j++)
1921 {
1922 /* Handle uses. */
1923 if (j == 0)
1924 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1925 else
1926 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1927
1928 /* Arguments are ready. create the new vector stmt. */
1929 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1930 {
1931 if (CONVERT_EXPR_CODE_P (code))
1932 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
1933 new_stmt = gimple_build_assign (vec_dest, vop);
1934 new_temp = make_ssa_name (vec_dest, new_stmt);
1935 gimple_assign_set_lhs (new_stmt, new_temp);
1936 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1937 if (slp_node)
1938 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1939 }
1940
1941 if (slp_node)
1942 continue;
1943
1944 if (j == 0)
1945 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1946 else
1947 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1948
1949 prev_stmt_info = vinfo_for_stmt (new_stmt);
1950 }
1951
1952 VEC_free (tree, heap, vec_oprnds);
1953 return true;
1954 }
1955
1956 /* Function vectorizable_operation.
1957
1958 Check if STMT performs a binary or unary operation that can be vectorized.
1959 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1960 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1961 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1962
1963 static bool
1964 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1965 gimple *vec_stmt, slp_tree slp_node)
1966 {
1967 tree vec_dest;
1968 tree scalar_dest;
1969 tree op0, op1 = NULL;
1970 tree vec_oprnd1 = NULL_TREE;
1971 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1972 tree vectype;
1973 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1974 enum tree_code code;
1975 enum machine_mode vec_mode;
1976 tree new_temp;
1977 int op_type;
1978 optab optab;
1979 int icode;
1980 enum machine_mode optab_op2_mode;
1981 tree def;
1982 gimple def_stmt;
1983 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1984 gimple new_stmt = NULL;
1985 stmt_vec_info prev_stmt_info;
1986 int nunits_in;
1987 int nunits_out;
1988 tree vectype_out;
1989 int ncopies;
1990 int j, i;
1991 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1992 tree vop0, vop1;
1993 unsigned int k;
1994 bool scalar_shift_arg = false;
1995 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1996 int vf;
1997
1998 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1999 return false;
2000
2001 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2002 return false;
2003
2004 /* Is STMT a vectorizable binary/unary operation? */
2005 if (!is_gimple_assign (stmt))
2006 return false;
2007
2008 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2009 return false;
2010
2011 code = gimple_assign_rhs_code (stmt);
2012
2013 /* For pointer addition, we should use the normal plus for
2014 the vector addition. */
2015 if (code == POINTER_PLUS_EXPR)
2016 code = PLUS_EXPR;
2017
2018 /* Support only unary or binary operations. */
2019 op_type = TREE_CODE_LENGTH (code);
2020 if (op_type != unary_op && op_type != binary_op)
2021 {
2022 if (vect_print_dump_info (REPORT_DETAILS))
2023 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2024 return false;
2025 }
2026
2027 scalar_dest = gimple_assign_lhs (stmt);
2028 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2029
2030 op0 = gimple_assign_rhs1 (stmt);
2031 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2032 &def_stmt, &def, &dt[0], &vectype))
2033 {
2034 if (vect_print_dump_info (REPORT_DETAILS))
2035 fprintf (vect_dump, "use not simple.");
2036 return false;
2037 }
2038 /* If op0 is an external or constant def use a vector type with
2039 the same size as the output vector type. */
2040 if (!vectype)
2041 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2042 if (vec_stmt)
2043 gcc_assert (vectype);
2044 if (!vectype)
2045 {
2046 if (vect_print_dump_info (REPORT_DETAILS))
2047 {
2048 fprintf (vect_dump, "no vectype for scalar type ");
2049 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2050 }
2051
2052 return false;
2053 }
2054
2055 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2056 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2057 if (nunits_out != nunits_in)
2058 return false;
2059
2060 if (op_type == binary_op)
2061 {
2062 op1 = gimple_assign_rhs2 (stmt);
2063 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2064 &dt[1]))
2065 {
2066 if (vect_print_dump_info (REPORT_DETAILS))
2067 fprintf (vect_dump, "use not simple.");
2068 return false;
2069 }
2070 }
2071
2072 if (loop_vinfo)
2073 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2074 else
2075 vf = 1;
2076
2077 /* Multiple types in SLP are handled by creating the appropriate number of
2078 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2079 case of SLP. */
2080 if (slp_node)
2081 ncopies = 1;
2082 else
2083 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2084
2085 gcc_assert (ncopies >= 1);
2086
2087 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2088 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2089 shift optabs. */
2090 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2091 || code == RROTATE_EXPR)
2092 {
2093 /* vector shifted by vector */
2094 if (dt[1] == vect_internal_def)
2095 {
2096 optab = optab_for_tree_code (code, vectype, optab_vector);
2097 if (vect_print_dump_info (REPORT_DETAILS))
2098 fprintf (vect_dump, "vector/vector shift/rotate found.");
2099 }
2100
2101 /* See if the machine has a vector shifted by scalar insn and if not
2102 then see if it has a vector shifted by vector insn */
2103 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2104 {
2105 optab = optab_for_tree_code (code, vectype, optab_scalar);
2106 if (optab
2107 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2108 != CODE_FOR_nothing))
2109 {
2110 scalar_shift_arg = true;
2111 if (vect_print_dump_info (REPORT_DETAILS))
2112 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2113 }
2114 else
2115 {
2116 optab = optab_for_tree_code (code, vectype, optab_vector);
2117 if (optab
2118 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2119 != CODE_FOR_nothing))
2120 {
2121 if (vect_print_dump_info (REPORT_DETAILS))
2122 fprintf (vect_dump, "vector/vector shift/rotate found.");
2123
2124 /* Unlike the other binary operators, shifts/rotates have
2125 the rhs being int, instead of the same type as the lhs,
2126 so make sure the scalar is the right type if we are
2127 dealing with vectors of short/char. */
2128 if (dt[1] == vect_constant_def)
2129 op1 = fold_convert (TREE_TYPE (vectype), op1);
2130 }
2131 }
2132 }
2133
2134 else
2135 {
2136 if (vect_print_dump_info (REPORT_DETAILS))
2137 fprintf (vect_dump, "operand mode requires invariant argument.");
2138 return false;
2139 }
2140 }
2141 else
2142 optab = optab_for_tree_code (code, vectype, optab_default);
2143
2144 /* Supportable by target? */
2145 if (!optab)
2146 {
2147 if (vect_print_dump_info (REPORT_DETAILS))
2148 fprintf (vect_dump, "no optab.");
2149 return false;
2150 }
2151 vec_mode = TYPE_MODE (vectype);
2152 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2153 if (icode == CODE_FOR_nothing)
2154 {
2155 if (vect_print_dump_info (REPORT_DETAILS))
2156 fprintf (vect_dump, "op not supported by target.");
2157 /* Check only during analysis. */
2158 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2159 || (vf < vect_min_worthwhile_factor (code)
2160 && !vec_stmt))
2161 return false;
2162 if (vect_print_dump_info (REPORT_DETAILS))
2163 fprintf (vect_dump, "proceeding using word mode.");
2164 }
2165
2166 /* Worthwhile without SIMD support? Check only during analysis. */
2167 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2168 && vf < vect_min_worthwhile_factor (code)
2169 && !vec_stmt)
2170 {
2171 if (vect_print_dump_info (REPORT_DETAILS))
2172 fprintf (vect_dump, "not worthwhile without SIMD support.");
2173 return false;
2174 }
2175
2176 if (!vec_stmt) /* transformation not required. */
2177 {
2178 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2179 if (vect_print_dump_info (REPORT_DETAILS))
2180 fprintf (vect_dump, "=== vectorizable_operation ===");
2181 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2182 return true;
2183 }
2184
2185 /** Transform. **/
2186
2187 if (vect_print_dump_info (REPORT_DETAILS))
2188 fprintf (vect_dump, "transform binary/unary operation.");
2189
2190 /* Handle def. */
2191 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2192
2193 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2194 created in the previous stages of the recursion, so no allocation is
2195 needed, except for the case of shift with scalar shift argument. In that
2196 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2197 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2198 In case of loop-based vectorization we allocate VECs of size 1. We
2199 allocate VEC_OPRNDS1 only in case of binary operation. */
2200 if (!slp_node)
2201 {
2202 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2203 if (op_type == binary_op)
2204 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2205 }
2206 else if (scalar_shift_arg)
2207 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2208
2209 /* In case the vectorization factor (VF) is bigger than the number
2210 of elements that we can fit in a vectype (nunits), we have to generate
2211 more than one vector stmt - i.e - we need to "unroll" the
2212 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2213 from one copy of the vector stmt to the next, in the field
2214 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2215 stages to find the correct vector defs to be used when vectorizing
2216 stmts that use the defs of the current stmt. The example below illustrates
2217 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2218 4 vectorized stmts):
2219
2220 before vectorization:
2221 RELATED_STMT VEC_STMT
2222 S1: x = memref - -
2223 S2: z = x + 1 - -
2224
2225 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2226 there):
2227 RELATED_STMT VEC_STMT
2228 VS1_0: vx0 = memref0 VS1_1 -
2229 VS1_1: vx1 = memref1 VS1_2 -
2230 VS1_2: vx2 = memref2 VS1_3 -
2231 VS1_3: vx3 = memref3 - -
2232 S1: x = load - VS1_0
2233 S2: z = x + 1 - -
2234
2235 step2: vectorize stmt S2 (done here):
2236 To vectorize stmt S2 we first need to find the relevant vector
2237 def for the first operand 'x'. This is, as usual, obtained from
2238 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2239 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2240 relevant vector def 'vx0'. Having found 'vx0' we can generate
2241 the vector stmt VS2_0, and as usual, record it in the
2242 STMT_VINFO_VEC_STMT of stmt S2.
2243 When creating the second copy (VS2_1), we obtain the relevant vector
2244 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2245 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2246 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2247 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2248 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2249 chain of stmts and pointers:
2250 RELATED_STMT VEC_STMT
2251 VS1_0: vx0 = memref0 VS1_1 -
2252 VS1_1: vx1 = memref1 VS1_2 -
2253 VS1_2: vx2 = memref2 VS1_3 -
2254 VS1_3: vx3 = memref3 - -
2255 S1: x = load - VS1_0
2256 VS2_0: vz0 = vx0 + v1 VS2_1 -
2257 VS2_1: vz1 = vx1 + v1 VS2_2 -
2258 VS2_2: vz2 = vx2 + v1 VS2_3 -
2259 VS2_3: vz3 = vx3 + v1 - -
2260 S2: z = x + 1 - VS2_0 */
2261
2262 prev_stmt_info = NULL;
2263 for (j = 0; j < ncopies; j++)
2264 {
2265 /* Handle uses. */
2266 if (j == 0)
2267 {
2268 if (op_type == binary_op && scalar_shift_arg)
2269 {
2270 /* Vector shl and shr insn patterns can be defined with scalar
2271 operand 2 (shift operand). In this case, use constant or loop
2272 invariant op1 directly, without extending it to vector mode
2273 first. */
2274 optab_op2_mode = insn_data[icode].operand[2].mode;
2275 if (!VECTOR_MODE_P (optab_op2_mode))
2276 {
2277 if (vect_print_dump_info (REPORT_DETAILS))
2278 fprintf (vect_dump, "operand 1 using scalar mode.");
2279 vec_oprnd1 = op1;
2280 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2281 if (slp_node)
2282 {
2283 /* Store vec_oprnd1 for every vector stmt to be created
2284 for SLP_NODE. We check during the analysis that all the
2285 shift arguments are the same.
2286 TODO: Allow different constants for different vector
2287 stmts generated for an SLP instance. */
2288 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2289 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2290 }
2291 }
2292 }
2293
2294 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2295 (a special case for certain kind of vector shifts); otherwise,
2296 operand 1 should be of a vector type (the usual case). */
2297 if (op_type == binary_op && !vec_oprnd1)
2298 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2299 slp_node);
2300 else
2301 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2302 slp_node);
2303 }
2304 else
2305 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2306
2307 /* Arguments are ready. Create the new vector stmt. */
2308 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2309 {
2310 vop1 = ((op_type == binary_op)
2311 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2312 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2313 new_temp = make_ssa_name (vec_dest, new_stmt);
2314 gimple_assign_set_lhs (new_stmt, new_temp);
2315 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2316 if (slp_node)
2317 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2318 }
2319
2320 if (slp_node)
2321 continue;
2322
2323 if (j == 0)
2324 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2325 else
2326 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2327 prev_stmt_info = vinfo_for_stmt (new_stmt);
2328 }
2329
2330 VEC_free (tree, heap, vec_oprnds0);
2331 if (vec_oprnds1)
2332 VEC_free (tree, heap, vec_oprnds1);
2333
2334 return true;
2335 }
2336
2337
2338 /* Get vectorized definitions for loop-based vectorization. For the first
2339 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2340 scalar operand), and for the rest we get a copy with
2341 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2342 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2343 The vectors are collected into VEC_OPRNDS. */
2344
2345 static void
2346 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2347 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2348 {
2349 tree vec_oprnd;
2350
2351 /* Get first vector operand. */
2352 /* All the vector operands except the very first one (that is scalar oprnd)
2353 are stmt copies. */
2354 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2355 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2356 else
2357 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2358
2359 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2360
2361 /* Get second vector operand. */
2362 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2363 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2364
2365 *oprnd = vec_oprnd;
2366
2367 /* For conversion in multiple steps, continue to get operands
2368 recursively. */
2369 if (multi_step_cvt)
2370 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2371 }
2372
2373
2374 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2375 For multi-step conversions store the resulting vectors and call the function
2376 recursively. */
2377
2378 static void
2379 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2380 int multi_step_cvt, gimple stmt,
2381 VEC (tree, heap) *vec_dsts,
2382 gimple_stmt_iterator *gsi,
2383 slp_tree slp_node, enum tree_code code,
2384 stmt_vec_info *prev_stmt_info)
2385 {
2386 unsigned int i;
2387 tree vop0, vop1, new_tmp, vec_dest;
2388 gimple new_stmt;
2389 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2390
2391 vec_dest = VEC_pop (tree, vec_dsts);
2392
2393 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2394 {
2395 /* Create demotion operation. */
2396 vop0 = VEC_index (tree, *vec_oprnds, i);
2397 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2398 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2399 new_tmp = make_ssa_name (vec_dest, new_stmt);
2400 gimple_assign_set_lhs (new_stmt, new_tmp);
2401 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2402
2403 if (multi_step_cvt)
2404 /* Store the resulting vector for next recursive call. */
2405 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2406 else
2407 {
2408 /* This is the last step of the conversion sequence. Store the
2409 vectors in SLP_NODE or in vector info of the scalar statement
2410 (or in STMT_VINFO_RELATED_STMT chain). */
2411 if (slp_node)
2412 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2413 else
2414 {
2415 if (!*prev_stmt_info)
2416 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2417 else
2418 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2419
2420 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2421 }
2422 }
2423 }
2424
2425 /* For multi-step demotion operations we first generate demotion operations
2426 from the source type to the intermediate types, and then combine the
2427 results (stored in VEC_OPRNDS) in demotion operation to the destination
2428 type. */
2429 if (multi_step_cvt)
2430 {
2431 /* At each level of recursion we have have of the operands we had at the
2432 previous level. */
2433 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2434 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2435 stmt, vec_dsts, gsi, slp_node,
2436 code, prev_stmt_info);
2437 }
2438 }
2439
2440
2441 /* Function vectorizable_type_demotion
2442
2443 Check if STMT performs a binary or unary operation that involves
2444 type demotion, and if it can be vectorized.
2445 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2446 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2447 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2448
2449 static bool
2450 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2451 gimple *vec_stmt, slp_tree slp_node)
2452 {
2453 tree vec_dest;
2454 tree scalar_dest;
2455 tree op0;
2456 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2457 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2458 enum tree_code code, code1 = ERROR_MARK;
2459 tree def;
2460 gimple def_stmt;
2461 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2462 stmt_vec_info prev_stmt_info;
2463 int nunits_in;
2464 int nunits_out;
2465 tree vectype_out;
2466 int ncopies;
2467 int j, i;
2468 tree vectype_in;
2469 int multi_step_cvt = 0;
2470 VEC (tree, heap) *vec_oprnds0 = NULL;
2471 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2472 tree last_oprnd, intermediate_type;
2473
2474 /* FORNOW: not supported by basic block SLP vectorization. */
2475 gcc_assert (loop_vinfo);
2476
2477 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2478 return false;
2479
2480 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2481 return false;
2482
2483 /* Is STMT a vectorizable type-demotion operation? */
2484 if (!is_gimple_assign (stmt))
2485 return false;
2486
2487 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2488 return false;
2489
2490 code = gimple_assign_rhs_code (stmt);
2491 if (!CONVERT_EXPR_CODE_P (code))
2492 return false;
2493
2494 scalar_dest = gimple_assign_lhs (stmt);
2495 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2496
2497 /* Check the operands of the operation. */
2498 op0 = gimple_assign_rhs1 (stmt);
2499 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2500 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2501 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2502 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2503 && CONVERT_EXPR_CODE_P (code))))
2504 return false;
2505 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2506 &def_stmt, &def, &dt[0], &vectype_in))
2507 {
2508 if (vect_print_dump_info (REPORT_DETAILS))
2509 fprintf (vect_dump, "use not simple.");
2510 return false;
2511 }
2512 /* If op0 is an external def use a vector type with the
2513 same size as the output vector type if possible. */
2514 if (!vectype_in)
2515 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2516 if (vec_stmt)
2517 gcc_assert (vectype_in);
2518 if (!vectype_in)
2519 {
2520 if (vect_print_dump_info (REPORT_DETAILS))
2521 {
2522 fprintf (vect_dump, "no vectype for scalar type ");
2523 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2524 }
2525
2526 return false;
2527 }
2528
2529 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2530 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2531 if (nunits_in >= nunits_out)
2532 return false;
2533
2534 /* Multiple types in SLP are handled by creating the appropriate number of
2535 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2536 case of SLP. */
2537 if (slp_node)
2538 ncopies = 1;
2539 else
2540 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2541 gcc_assert (ncopies >= 1);
2542
2543 /* Supportable by target? */
2544 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2545 &code1, &multi_step_cvt, &interm_types))
2546 return false;
2547
2548 if (!vec_stmt) /* transformation not required. */
2549 {
2550 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2551 if (vect_print_dump_info (REPORT_DETAILS))
2552 fprintf (vect_dump, "=== vectorizable_demotion ===");
2553 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2554 return true;
2555 }
2556
2557 /** Transform. **/
2558 if (vect_print_dump_info (REPORT_DETAILS))
2559 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2560 ncopies);
2561
2562 /* In case of multi-step demotion, we first generate demotion operations to
2563 the intermediate types, and then from that types to the final one.
2564 We create vector destinations for the intermediate type (TYPES) received
2565 from supportable_narrowing_operation, and store them in the correct order
2566 for future use in vect_create_vectorized_demotion_stmts(). */
2567 if (multi_step_cvt)
2568 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2569 else
2570 vec_dsts = VEC_alloc (tree, heap, 1);
2571
2572 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2573 VEC_quick_push (tree, vec_dsts, vec_dest);
2574
2575 if (multi_step_cvt)
2576 {
2577 for (i = VEC_length (tree, interm_types) - 1;
2578 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2579 {
2580 vec_dest = vect_create_destination_var (scalar_dest,
2581 intermediate_type);
2582 VEC_quick_push (tree, vec_dsts, vec_dest);
2583 }
2584 }
2585
2586 /* In case the vectorization factor (VF) is bigger than the number
2587 of elements that we can fit in a vectype (nunits), we have to generate
2588 more than one vector stmt - i.e - we need to "unroll" the
2589 vector stmt by a factor VF/nunits. */
2590 last_oprnd = op0;
2591 prev_stmt_info = NULL;
2592 for (j = 0; j < ncopies; j++)
2593 {
2594 /* Handle uses. */
2595 if (slp_node)
2596 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
2597 else
2598 {
2599 VEC_free (tree, heap, vec_oprnds0);
2600 vec_oprnds0 = VEC_alloc (tree, heap,
2601 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2602 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2603 vect_pow2 (multi_step_cvt) - 1);
2604 }
2605
2606 /* Arguments are ready. Create the new vector stmts. */
2607 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2608 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2609 multi_step_cvt, stmt, tmp_vec_dsts,
2610 gsi, slp_node, code1,
2611 &prev_stmt_info);
2612 }
2613
2614 VEC_free (tree, heap, vec_oprnds0);
2615 VEC_free (tree, heap, vec_dsts);
2616 VEC_free (tree, heap, tmp_vec_dsts);
2617 VEC_free (tree, heap, interm_types);
2618
2619 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2620 return true;
2621 }
2622
2623
2624 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2625 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2626 the resulting vectors and call the function recursively. */
2627
2628 static void
2629 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2630 VEC (tree, heap) **vec_oprnds1,
2631 int multi_step_cvt, gimple stmt,
2632 VEC (tree, heap) *vec_dsts,
2633 gimple_stmt_iterator *gsi,
2634 slp_tree slp_node, enum tree_code code1,
2635 enum tree_code code2, tree decl1,
2636 tree decl2, int op_type,
2637 stmt_vec_info *prev_stmt_info)
2638 {
2639 int i;
2640 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2641 gimple new_stmt1, new_stmt2;
2642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2643 VEC (tree, heap) *vec_tmp;
2644
2645 vec_dest = VEC_pop (tree, vec_dsts);
2646 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2647
2648 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2649 {
2650 if (op_type == binary_op)
2651 vop1 = VEC_index (tree, *vec_oprnds1, i);
2652 else
2653 vop1 = NULL_TREE;
2654
2655 /* Generate the two halves of promotion operation. */
2656 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2657 op_type, vec_dest, gsi, stmt);
2658 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2659 op_type, vec_dest, gsi, stmt);
2660 if (is_gimple_call (new_stmt1))
2661 {
2662 new_tmp1 = gimple_call_lhs (new_stmt1);
2663 new_tmp2 = gimple_call_lhs (new_stmt2);
2664 }
2665 else
2666 {
2667 new_tmp1 = gimple_assign_lhs (new_stmt1);
2668 new_tmp2 = gimple_assign_lhs (new_stmt2);
2669 }
2670
2671 if (multi_step_cvt)
2672 {
2673 /* Store the results for the recursive call. */
2674 VEC_quick_push (tree, vec_tmp, new_tmp1);
2675 VEC_quick_push (tree, vec_tmp, new_tmp2);
2676 }
2677 else
2678 {
2679 /* Last step of promotion sequience - store the results. */
2680 if (slp_node)
2681 {
2682 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2683 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2684 }
2685 else
2686 {
2687 if (!*prev_stmt_info)
2688 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2689 else
2690 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2691
2692 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2693 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2694 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2695 }
2696 }
2697 }
2698
2699 if (multi_step_cvt)
2700 {
2701 /* For multi-step promotion operation we first generate we call the
2702 function recurcively for every stage. We start from the input type,
2703 create promotion operations to the intermediate types, and then
2704 create promotions to the output type. */
2705 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2706 VEC_free (tree, heap, vec_tmp);
2707 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2708 multi_step_cvt - 1, stmt,
2709 vec_dsts, gsi, slp_node, code1,
2710 code2, decl2, decl2, op_type,
2711 prev_stmt_info);
2712 }
2713 }
2714
2715
2716 /* Function vectorizable_type_promotion
2717
2718 Check if STMT performs a binary or unary operation that involves
2719 type promotion, and if it can be vectorized.
2720 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2721 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2722 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2723
2724 static bool
2725 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2726 gimple *vec_stmt, slp_tree slp_node)
2727 {
2728 tree vec_dest;
2729 tree scalar_dest;
2730 tree op0, op1 = NULL;
2731 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2732 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2733 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2734 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2735 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2736 int op_type;
2737 tree def;
2738 gimple def_stmt;
2739 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2740 stmt_vec_info prev_stmt_info;
2741 int nunits_in;
2742 int nunits_out;
2743 tree vectype_out;
2744 int ncopies;
2745 int j, i;
2746 tree vectype_in;
2747 tree intermediate_type = NULL_TREE;
2748 int multi_step_cvt = 0;
2749 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2750 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2751
2752 /* FORNOW: not supported by basic block SLP vectorization. */
2753 gcc_assert (loop_vinfo);
2754
2755 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2756 return false;
2757
2758 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2759 return false;
2760
2761 /* Is STMT a vectorizable type-promotion operation? */
2762 if (!is_gimple_assign (stmt))
2763 return false;
2764
2765 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2766 return false;
2767
2768 code = gimple_assign_rhs_code (stmt);
2769 if (!CONVERT_EXPR_CODE_P (code)
2770 && code != WIDEN_MULT_EXPR)
2771 return false;
2772
2773 scalar_dest = gimple_assign_lhs (stmt);
2774 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2775
2776 /* Check the operands of the operation. */
2777 op0 = gimple_assign_rhs1 (stmt);
2778 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2779 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2780 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2781 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2782 && CONVERT_EXPR_CODE_P (code))))
2783 return false;
2784 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2785 &def_stmt, &def, &dt[0], &vectype_in))
2786 {
2787 if (vect_print_dump_info (REPORT_DETAILS))
2788 fprintf (vect_dump, "use not simple.");
2789 return false;
2790 }
2791 /* If op0 is an external or constant def use a vector type with
2792 the same size as the output vector type. */
2793 if (!vectype_in)
2794 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2795 if (vec_stmt)
2796 gcc_assert (vectype_in);
2797 if (!vectype_in)
2798 {
2799 if (vect_print_dump_info (REPORT_DETAILS))
2800 {
2801 fprintf (vect_dump, "no vectype for scalar type ");
2802 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2803 }
2804
2805 return false;
2806 }
2807
2808 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2809 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2810 if (nunits_in <= nunits_out)
2811 return false;
2812
2813 /* Multiple types in SLP are handled by creating the appropriate number of
2814 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2815 case of SLP. */
2816 if (slp_node)
2817 ncopies = 1;
2818 else
2819 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2820
2821 gcc_assert (ncopies >= 1);
2822
2823 op_type = TREE_CODE_LENGTH (code);
2824 if (op_type == binary_op)
2825 {
2826 op1 = gimple_assign_rhs2 (stmt);
2827 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2828 {
2829 if (vect_print_dump_info (REPORT_DETAILS))
2830 fprintf (vect_dump, "use not simple.");
2831 return false;
2832 }
2833 }
2834
2835 /* Supportable by target? */
2836 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2837 &decl1, &decl2, &code1, &code2,
2838 &multi_step_cvt, &interm_types))
2839 return false;
2840
2841 /* Binary widening operation can only be supported directly by the
2842 architecture. */
2843 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2844
2845 if (!vec_stmt) /* transformation not required. */
2846 {
2847 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2848 if (vect_print_dump_info (REPORT_DETAILS))
2849 fprintf (vect_dump, "=== vectorizable_promotion ===");
2850 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2851 return true;
2852 }
2853
2854 /** Transform. **/
2855
2856 if (vect_print_dump_info (REPORT_DETAILS))
2857 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2858 ncopies);
2859
2860 /* Handle def. */
2861 /* In case of multi-step promotion, we first generate promotion operations
2862 to the intermediate types, and then from that types to the final one.
2863 We store vector destination in VEC_DSTS in the correct order for
2864 recursive creation of promotion operations in
2865 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2866 according to TYPES recieved from supportable_widening_operation(). */
2867 if (multi_step_cvt)
2868 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2869 else
2870 vec_dsts = VEC_alloc (tree, heap, 1);
2871
2872 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2873 VEC_quick_push (tree, vec_dsts, vec_dest);
2874
2875 if (multi_step_cvt)
2876 {
2877 for (i = VEC_length (tree, interm_types) - 1;
2878 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2879 {
2880 vec_dest = vect_create_destination_var (scalar_dest,
2881 intermediate_type);
2882 VEC_quick_push (tree, vec_dsts, vec_dest);
2883 }
2884 }
2885
2886 if (!slp_node)
2887 {
2888 vec_oprnds0 = VEC_alloc (tree, heap,
2889 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2890 if (op_type == binary_op)
2891 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2892 }
2893
2894 /* In case the vectorization factor (VF) is bigger than the number
2895 of elements that we can fit in a vectype (nunits), we have to generate
2896 more than one vector stmt - i.e - we need to "unroll" the
2897 vector stmt by a factor VF/nunits. */
2898
2899 prev_stmt_info = NULL;
2900 for (j = 0; j < ncopies; j++)
2901 {
2902 /* Handle uses. */
2903 if (j == 0)
2904 {
2905 if (slp_node)
2906 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
2907 else
2908 {
2909 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2910 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2911 if (op_type == binary_op)
2912 {
2913 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2914 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2915 }
2916 }
2917 }
2918 else
2919 {
2920 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2921 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2922 if (op_type == binary_op)
2923 {
2924 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2925 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2926 }
2927 }
2928
2929 /* Arguments are ready. Create the new vector stmts. */
2930 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2931 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2932 multi_step_cvt, stmt,
2933 tmp_vec_dsts,
2934 gsi, slp_node, code1, code2,
2935 decl1, decl2, op_type,
2936 &prev_stmt_info);
2937 }
2938
2939 VEC_free (tree, heap, vec_dsts);
2940 VEC_free (tree, heap, tmp_vec_dsts);
2941 VEC_free (tree, heap, interm_types);
2942 VEC_free (tree, heap, vec_oprnds0);
2943 VEC_free (tree, heap, vec_oprnds1);
2944
2945 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2946 return true;
2947 }
2948
2949
2950 /* Function vectorizable_store.
2951
2952 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2953 can be vectorized.
2954 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2955 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2956 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2957
2958 static bool
2959 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2960 slp_tree slp_node)
2961 {
2962 tree scalar_dest;
2963 tree data_ref;
2964 tree op;
2965 tree vec_oprnd = NULL_TREE;
2966 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2967 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2968 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2969 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2970 struct loop *loop = NULL;
2971 enum machine_mode vec_mode;
2972 tree dummy;
2973 enum dr_alignment_support alignment_support_scheme;
2974 tree def;
2975 gimple def_stmt;
2976 enum vect_def_type dt;
2977 stmt_vec_info prev_stmt_info = NULL;
2978 tree dataref_ptr = NULL_TREE;
2979 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2980 int ncopies;
2981 int j;
2982 gimple next_stmt, first_stmt = NULL;
2983 bool strided_store = false;
2984 unsigned int group_size, i;
2985 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2986 bool inv_p;
2987 VEC(tree,heap) *vec_oprnds = NULL;
2988 bool slp = (slp_node != NULL);
2989 unsigned int vec_num;
2990 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2991
2992 if (loop_vinfo)
2993 loop = LOOP_VINFO_LOOP (loop_vinfo);
2994
2995 /* Multiple types in SLP are handled by creating the appropriate number of
2996 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2997 case of SLP. */
2998 if (slp)
2999 ncopies = 1;
3000 else
3001 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3002
3003 gcc_assert (ncopies >= 1);
3004
3005 /* FORNOW. This restriction should be relaxed. */
3006 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3007 {
3008 if (vect_print_dump_info (REPORT_DETAILS))
3009 fprintf (vect_dump, "multiple types in nested loop.");
3010 return false;
3011 }
3012
3013 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3014 return false;
3015
3016 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3017 return false;
3018
3019 /* Is vectorizable store? */
3020
3021 if (!is_gimple_assign (stmt))
3022 return false;
3023
3024 scalar_dest = gimple_assign_lhs (stmt);
3025 if (TREE_CODE (scalar_dest) != ARRAY_REF
3026 && TREE_CODE (scalar_dest) != INDIRECT_REF
3027 && TREE_CODE (scalar_dest) != COMPONENT_REF
3028 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3029 && TREE_CODE (scalar_dest) != REALPART_EXPR
3030 && TREE_CODE (scalar_dest) != MEM_REF)
3031 return false;
3032
3033 gcc_assert (gimple_assign_single_p (stmt));
3034 op = gimple_assign_rhs1 (stmt);
3035 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3036 {
3037 if (vect_print_dump_info (REPORT_DETAILS))
3038 fprintf (vect_dump, "use not simple.");
3039 return false;
3040 }
3041
3042 /* The scalar rhs type needs to be trivially convertible to the vector
3043 component type. This should always be the case. */
3044 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3045 {
3046 if (vect_print_dump_info (REPORT_DETAILS))
3047 fprintf (vect_dump, "??? operands of different types");
3048 return false;
3049 }
3050
3051 vec_mode = TYPE_MODE (vectype);
3052 /* FORNOW. In some cases can vectorize even if data-type not supported
3053 (e.g. - array initialization with 0). */
3054 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
3055 return false;
3056
3057 if (!STMT_VINFO_DATA_REF (stmt_info))
3058 return false;
3059
3060 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3061 {
3062 strided_store = true;
3063 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3064 if (!vect_strided_store_supported (vectype)
3065 && !PURE_SLP_STMT (stmt_info) && !slp)
3066 return false;
3067
3068 if (first_stmt == stmt)
3069 {
3070 /* STMT is the leader of the group. Check the operands of all the
3071 stmts of the group. */
3072 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3073 while (next_stmt)
3074 {
3075 gcc_assert (gimple_assign_single_p (next_stmt));
3076 op = gimple_assign_rhs1 (next_stmt);
3077 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3078 &def, &dt))
3079 {
3080 if (vect_print_dump_info (REPORT_DETAILS))
3081 fprintf (vect_dump, "use not simple.");
3082 return false;
3083 }
3084 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3085 }
3086 }
3087 }
3088
3089 if (!vec_stmt) /* transformation not required. */
3090 {
3091 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3092 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3093 return true;
3094 }
3095
3096 /** Transform. **/
3097
3098 if (strided_store)
3099 {
3100 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3101 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3102
3103 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3104
3105 /* FORNOW */
3106 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3107
3108 /* We vectorize all the stmts of the interleaving group when we
3109 reach the last stmt in the group. */
3110 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3111 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3112 && !slp)
3113 {
3114 *vec_stmt = NULL;
3115 return true;
3116 }
3117
3118 if (slp)
3119 {
3120 strided_store = false;
3121 /* VEC_NUM is the number of vect stmts to be created for this
3122 group. */
3123 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3124 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3125 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3126 }
3127 else
3128 /* VEC_NUM is the number of vect stmts to be created for this
3129 group. */
3130 vec_num = group_size;
3131 }
3132 else
3133 {
3134 first_stmt = stmt;
3135 first_dr = dr;
3136 group_size = vec_num = 1;
3137 }
3138
3139 if (vect_print_dump_info (REPORT_DETAILS))
3140 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3141
3142 dr_chain = VEC_alloc (tree, heap, group_size);
3143 oprnds = VEC_alloc (tree, heap, group_size);
3144
3145 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3146 gcc_assert (alignment_support_scheme);
3147
3148 /* In case the vectorization factor (VF) is bigger than the number
3149 of elements that we can fit in a vectype (nunits), we have to generate
3150 more than one vector stmt - i.e - we need to "unroll" the
3151 vector stmt by a factor VF/nunits. For more details see documentation in
3152 vect_get_vec_def_for_copy_stmt. */
3153
3154 /* In case of interleaving (non-unit strided access):
3155
3156 S1: &base + 2 = x2
3157 S2: &base = x0
3158 S3: &base + 1 = x1
3159 S4: &base + 3 = x3
3160
3161 We create vectorized stores starting from base address (the access of the
3162 first stmt in the chain (S2 in the above example), when the last store stmt
3163 of the chain (S4) is reached:
3164
3165 VS1: &base = vx2
3166 VS2: &base + vec_size*1 = vx0
3167 VS3: &base + vec_size*2 = vx1
3168 VS4: &base + vec_size*3 = vx3
3169
3170 Then permutation statements are generated:
3171
3172 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3173 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3174 ...
3175
3176 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3177 (the order of the data-refs in the output of vect_permute_store_chain
3178 corresponds to the order of scalar stmts in the interleaving chain - see
3179 the documentation of vect_permute_store_chain()).
3180
3181 In case of both multiple types and interleaving, above vector stores and
3182 permutation stmts are created for every copy. The result vector stmts are
3183 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3184 STMT_VINFO_RELATED_STMT for the next copies.
3185 */
3186
3187 prev_stmt_info = NULL;
3188 for (j = 0; j < ncopies; j++)
3189 {
3190 gimple new_stmt;
3191 gimple ptr_incr;
3192
3193 if (j == 0)
3194 {
3195 if (slp)
3196 {
3197 /* Get vectorized arguments for SLP_NODE. */
3198 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
3199
3200 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3201 }
3202 else
3203 {
3204 /* For interleaved stores we collect vectorized defs for all the
3205 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3206 used as an input to vect_permute_store_chain(), and OPRNDS as
3207 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3208
3209 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3210 OPRNDS are of size 1. */
3211 next_stmt = first_stmt;
3212 for (i = 0; i < group_size; i++)
3213 {
3214 /* Since gaps are not supported for interleaved stores,
3215 GROUP_SIZE is the exact number of stmts in the chain.
3216 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3217 there is no interleaving, GROUP_SIZE is 1, and only one
3218 iteration of the loop will be executed. */
3219 gcc_assert (next_stmt
3220 && gimple_assign_single_p (next_stmt));
3221 op = gimple_assign_rhs1 (next_stmt);
3222
3223 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3224 NULL);
3225 VEC_quick_push(tree, dr_chain, vec_oprnd);
3226 VEC_quick_push(tree, oprnds, vec_oprnd);
3227 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3228 }
3229 }
3230
3231 /* We should have catched mismatched types earlier. */
3232 gcc_assert (useless_type_conversion_p (vectype,
3233 TREE_TYPE (vec_oprnd)));
3234 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3235 &dummy, &ptr_incr, false,
3236 &inv_p);
3237 gcc_assert (bb_vinfo || !inv_p);
3238 }
3239 else
3240 {
3241 /* For interleaved stores we created vectorized defs for all the
3242 defs stored in OPRNDS in the previous iteration (previous copy).
3243 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3244 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3245 next copy.
3246 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3247 OPRNDS are of size 1. */
3248 for (i = 0; i < group_size; i++)
3249 {
3250 op = VEC_index (tree, oprnds, i);
3251 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3252 &dt);
3253 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3254 VEC_replace(tree, dr_chain, i, vec_oprnd);
3255 VEC_replace(tree, oprnds, i, vec_oprnd);
3256 }
3257 dataref_ptr =
3258 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3259 }
3260
3261 if (strided_store)
3262 {
3263 result_chain = VEC_alloc (tree, heap, group_size);
3264 /* Permute. */
3265 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3266 &result_chain))
3267 return false;
3268 }
3269
3270 next_stmt = first_stmt;
3271 for (i = 0; i < vec_num; i++)
3272 {
3273 if (i > 0)
3274 /* Bump the vector pointer. */
3275 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3276 NULL_TREE);
3277
3278 if (slp)
3279 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3280 else if (strided_store)
3281 /* For strided stores vectorized defs are interleaved in
3282 vect_permute_store_chain(). */
3283 vec_oprnd = VEC_index (tree, result_chain, i);
3284
3285 if (aligned_access_p (first_dr))
3286 data_ref = build_simple_mem_ref (dataref_ptr);
3287 else
3288 {
3289 int mis = DR_MISALIGNMENT (first_dr);
3290 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3291 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3292 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3293 }
3294
3295 /* If accesses through a pointer to vectype do not alias the original
3296 memory reference we have a problem. This should never happen. */
3297 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3298 get_alias_set (gimple_assign_lhs (stmt))));
3299
3300 /* Arguments are ready. Create the new vector stmt. */
3301 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3302 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3303 mark_symbols_for_renaming (new_stmt);
3304
3305 if (slp)
3306 continue;
3307
3308 if (j == 0)
3309 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3310 else
3311 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3312
3313 prev_stmt_info = vinfo_for_stmt (new_stmt);
3314 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3315 if (!next_stmt)
3316 break;
3317 }
3318 }
3319
3320 VEC_free (tree, heap, dr_chain);
3321 VEC_free (tree, heap, oprnds);
3322 if (result_chain)
3323 VEC_free (tree, heap, result_chain);
3324
3325 return true;
3326 }
3327
3328 /* vectorizable_load.
3329
3330 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3331 can be vectorized.
3332 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3333 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3334 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3335
3336 static bool
3337 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3338 slp_tree slp_node, slp_instance slp_node_instance)
3339 {
3340 tree scalar_dest;
3341 tree vec_dest = NULL;
3342 tree data_ref = NULL;
3343 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3344 stmt_vec_info prev_stmt_info;
3345 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3346 struct loop *loop = NULL;
3347 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3348 bool nested_in_vect_loop = false;
3349 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3350 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3351 tree new_temp;
3352 int mode;
3353 gimple new_stmt = NULL;
3354 tree dummy;
3355 enum dr_alignment_support alignment_support_scheme;
3356 tree dataref_ptr = NULL_TREE;
3357 gimple ptr_incr;
3358 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3359 int ncopies;
3360 int i, j, group_size;
3361 tree msq = NULL_TREE, lsq;
3362 tree offset = NULL_TREE;
3363 tree realignment_token = NULL_TREE;
3364 gimple phi = NULL;
3365 VEC(tree,heap) *dr_chain = NULL;
3366 bool strided_load = false;
3367 gimple first_stmt;
3368 tree scalar_type;
3369 bool inv_p;
3370 bool compute_in_loop = false;
3371 struct loop *at_loop;
3372 int vec_num;
3373 bool slp = (slp_node != NULL);
3374 bool slp_perm = false;
3375 enum tree_code code;
3376 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3377 int vf;
3378
3379 if (loop_vinfo)
3380 {
3381 loop = LOOP_VINFO_LOOP (loop_vinfo);
3382 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3383 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3384 }
3385 else
3386 vf = 1;
3387
3388 /* Multiple types in SLP are handled by creating the appropriate number of
3389 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3390 case of SLP. */
3391 if (slp)
3392 ncopies = 1;
3393 else
3394 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3395
3396 gcc_assert (ncopies >= 1);
3397
3398 /* FORNOW. This restriction should be relaxed. */
3399 if (nested_in_vect_loop && ncopies > 1)
3400 {
3401 if (vect_print_dump_info (REPORT_DETAILS))
3402 fprintf (vect_dump, "multiple types in nested loop.");
3403 return false;
3404 }
3405
3406 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3407 return false;
3408
3409 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3410 return false;
3411
3412 /* Is vectorizable load? */
3413 if (!is_gimple_assign (stmt))
3414 return false;
3415
3416 scalar_dest = gimple_assign_lhs (stmt);
3417 if (TREE_CODE (scalar_dest) != SSA_NAME)
3418 return false;
3419
3420 code = gimple_assign_rhs_code (stmt);
3421 if (code != ARRAY_REF
3422 && code != INDIRECT_REF
3423 && code != COMPONENT_REF
3424 && code != IMAGPART_EXPR
3425 && code != REALPART_EXPR
3426 && code != MEM_REF)
3427 return false;
3428
3429 if (!STMT_VINFO_DATA_REF (stmt_info))
3430 return false;
3431
3432 scalar_type = TREE_TYPE (DR_REF (dr));
3433 mode = (int) TYPE_MODE (vectype);
3434
3435 /* FORNOW. In some cases can vectorize even if data-type not supported
3436 (e.g. - data copies). */
3437 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3438 {
3439 if (vect_print_dump_info (REPORT_DETAILS))
3440 fprintf (vect_dump, "Aligned load, but unsupported type.");
3441 return false;
3442 }
3443
3444 /* The vector component type needs to be trivially convertible to the
3445 scalar lhs. This should always be the case. */
3446 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3447 {
3448 if (vect_print_dump_info (REPORT_DETAILS))
3449 fprintf (vect_dump, "??? operands of different types");
3450 return false;
3451 }
3452
3453 /* Check if the load is a part of an interleaving chain. */
3454 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3455 {
3456 strided_load = true;
3457 /* FORNOW */
3458 gcc_assert (! nested_in_vect_loop);
3459
3460 /* Check if interleaving is supported. */
3461 if (!vect_strided_load_supported (vectype)
3462 && !PURE_SLP_STMT (stmt_info) && !slp)
3463 return false;
3464 }
3465
3466 if (!vec_stmt) /* transformation not required. */
3467 {
3468 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3469 vect_model_load_cost (stmt_info, ncopies, NULL);
3470 return true;
3471 }
3472
3473 if (vect_print_dump_info (REPORT_DETAILS))
3474 fprintf (vect_dump, "transform load.");
3475
3476 /** Transform. **/
3477
3478 if (strided_load)
3479 {
3480 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3481 /* Check if the chain of loads is already vectorized. */
3482 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3483 {
3484 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3485 return true;
3486 }
3487 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3488 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3489
3490 /* VEC_NUM is the number of vect stmts to be created for this group. */
3491 if (slp)
3492 {
3493 strided_load = false;
3494 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3495 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3496 slp_perm = true;
3497 }
3498 else
3499 vec_num = group_size;
3500
3501 dr_chain = VEC_alloc (tree, heap, vec_num);
3502 }
3503 else
3504 {
3505 first_stmt = stmt;
3506 first_dr = dr;
3507 group_size = vec_num = 1;
3508 }
3509
3510 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3511 gcc_assert (alignment_support_scheme);
3512
3513 /* In case the vectorization factor (VF) is bigger than the number
3514 of elements that we can fit in a vectype (nunits), we have to generate
3515 more than one vector stmt - i.e - we need to "unroll" the
3516 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3517 from one copy of the vector stmt to the next, in the field
3518 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3519 stages to find the correct vector defs to be used when vectorizing
3520 stmts that use the defs of the current stmt. The example below illustrates
3521 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3522 4 vectorized stmts):
3523
3524 before vectorization:
3525 RELATED_STMT VEC_STMT
3526 S1: x = memref - -
3527 S2: z = x + 1 - -
3528
3529 step 1: vectorize stmt S1:
3530 We first create the vector stmt VS1_0, and, as usual, record a
3531 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3532 Next, we create the vector stmt VS1_1, and record a pointer to
3533 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3534 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3535 stmts and pointers:
3536 RELATED_STMT VEC_STMT
3537 VS1_0: vx0 = memref0 VS1_1 -
3538 VS1_1: vx1 = memref1 VS1_2 -
3539 VS1_2: vx2 = memref2 VS1_3 -
3540 VS1_3: vx3 = memref3 - -
3541 S1: x = load - VS1_0
3542 S2: z = x + 1 - -
3543
3544 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3545 information we recorded in RELATED_STMT field is used to vectorize
3546 stmt S2. */
3547
3548 /* In case of interleaving (non-unit strided access):
3549
3550 S1: x2 = &base + 2
3551 S2: x0 = &base
3552 S3: x1 = &base + 1
3553 S4: x3 = &base + 3
3554
3555 Vectorized loads are created in the order of memory accesses
3556 starting from the access of the first stmt of the chain:
3557
3558 VS1: vx0 = &base
3559 VS2: vx1 = &base + vec_size*1
3560 VS3: vx3 = &base + vec_size*2
3561 VS4: vx4 = &base + vec_size*3
3562
3563 Then permutation statements are generated:
3564
3565 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3566 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3567 ...
3568
3569 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3570 (the order of the data-refs in the output of vect_permute_load_chain
3571 corresponds to the order of scalar stmts in the interleaving chain - see
3572 the documentation of vect_permute_load_chain()).
3573 The generation of permutation stmts and recording them in
3574 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3575
3576 In case of both multiple types and interleaving, the vector loads and
3577 permutation stmts above are created for every copy. The result vector stmts
3578 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3579 STMT_VINFO_RELATED_STMT for the next copies. */
3580
3581 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3582 on a target that supports unaligned accesses (dr_unaligned_supported)
3583 we generate the following code:
3584 p = initial_addr;
3585 indx = 0;
3586 loop {
3587 p = p + indx * vectype_size;
3588 vec_dest = *(p);
3589 indx = indx + 1;
3590 }
3591
3592 Otherwise, the data reference is potentially unaligned on a target that
3593 does not support unaligned accesses (dr_explicit_realign_optimized) -
3594 then generate the following code, in which the data in each iteration is
3595 obtained by two vector loads, one from the previous iteration, and one
3596 from the current iteration:
3597 p1 = initial_addr;
3598 msq_init = *(floor(p1))
3599 p2 = initial_addr + VS - 1;
3600 realignment_token = call target_builtin;
3601 indx = 0;
3602 loop {
3603 p2 = p2 + indx * vectype_size
3604 lsq = *(floor(p2))
3605 vec_dest = realign_load (msq, lsq, realignment_token)
3606 indx = indx + 1;
3607 msq = lsq;
3608 } */
3609
3610 /* If the misalignment remains the same throughout the execution of the
3611 loop, we can create the init_addr and permutation mask at the loop
3612 preheader. Otherwise, it needs to be created inside the loop.
3613 This can only occur when vectorizing memory accesses in the inner-loop
3614 nested within an outer-loop that is being vectorized. */
3615
3616 if (loop && nested_in_vect_loop_p (loop, stmt)
3617 && (TREE_INT_CST_LOW (DR_STEP (dr))
3618 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3619 {
3620 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3621 compute_in_loop = true;
3622 }
3623
3624 if ((alignment_support_scheme == dr_explicit_realign_optimized
3625 || alignment_support_scheme == dr_explicit_realign)
3626 && !compute_in_loop)
3627 {
3628 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3629 alignment_support_scheme, NULL_TREE,
3630 &at_loop);
3631 if (alignment_support_scheme == dr_explicit_realign_optimized)
3632 {
3633 phi = SSA_NAME_DEF_STMT (msq);
3634 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3635 }
3636 }
3637 else
3638 at_loop = loop;
3639
3640 prev_stmt_info = NULL;
3641 for (j = 0; j < ncopies; j++)
3642 {
3643 /* 1. Create the vector pointer update chain. */
3644 if (j == 0)
3645 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3646 at_loop, offset,
3647 &dummy, &ptr_incr, false,
3648 &inv_p);
3649 else
3650 dataref_ptr =
3651 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3652
3653 for (i = 0; i < vec_num; i++)
3654 {
3655 if (i > 0)
3656 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3657 NULL_TREE);
3658
3659 /* 2. Create the vector-load in the loop. */
3660 switch (alignment_support_scheme)
3661 {
3662 case dr_aligned:
3663 gcc_assert (aligned_access_p (first_dr));
3664 data_ref = build_simple_mem_ref (dataref_ptr);
3665 break;
3666 case dr_unaligned_supported:
3667 {
3668 int mis = DR_MISALIGNMENT (first_dr);
3669 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3670
3671 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3672 data_ref =
3673 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3674 break;
3675 }
3676 case dr_explicit_realign:
3677 {
3678 tree ptr, bump;
3679 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3680
3681 if (compute_in_loop)
3682 msq = vect_setup_realignment (first_stmt, gsi,
3683 &realignment_token,
3684 dr_explicit_realign,
3685 dataref_ptr, NULL);
3686
3687 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3688 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3689 new_stmt = gimple_build_assign (vec_dest, data_ref);
3690 new_temp = make_ssa_name (vec_dest, new_stmt);
3691 gimple_assign_set_lhs (new_stmt, new_temp);
3692 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3693 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3694 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3695 msq = new_temp;
3696
3697 bump = size_binop (MULT_EXPR, vs_minus_1,
3698 TYPE_SIZE_UNIT (scalar_type));
3699 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3700 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3701 break;
3702 }
3703 case dr_explicit_realign_optimized:
3704 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3705 break;
3706 default:
3707 gcc_unreachable ();
3708 }
3709 /* If accesses through a pointer to vectype do not alias the original
3710 memory reference we have a problem. This should never happen. */
3711 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3712 get_alias_set (gimple_assign_rhs1 (stmt))));
3713 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3714 new_stmt = gimple_build_assign (vec_dest, data_ref);
3715 new_temp = make_ssa_name (vec_dest, new_stmt);
3716 gimple_assign_set_lhs (new_stmt, new_temp);
3717 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3718 mark_symbols_for_renaming (new_stmt);
3719
3720 /* 3. Handle explicit realignment if necessary/supported. Create in
3721 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3722 if (alignment_support_scheme == dr_explicit_realign_optimized
3723 || alignment_support_scheme == dr_explicit_realign)
3724 {
3725 tree tmp;
3726
3727 lsq = gimple_assign_lhs (new_stmt);
3728 if (!realignment_token)
3729 realignment_token = dataref_ptr;
3730 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3731 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3732 realignment_token);
3733 new_stmt = gimple_build_assign (vec_dest, tmp);
3734 new_temp = make_ssa_name (vec_dest, new_stmt);
3735 gimple_assign_set_lhs (new_stmt, new_temp);
3736 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3737
3738 if (alignment_support_scheme == dr_explicit_realign_optimized)
3739 {
3740 gcc_assert (phi);
3741 if (i == vec_num - 1 && j == ncopies - 1)
3742 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3743 UNKNOWN_LOCATION);
3744 msq = lsq;
3745 }
3746 }
3747
3748 /* 4. Handle invariant-load. */
3749 if (inv_p && !bb_vinfo)
3750 {
3751 gcc_assert (!strided_load);
3752 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3753 if (j == 0)
3754 {
3755 int k;
3756 tree t = NULL_TREE;
3757 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3758
3759 /* CHECKME: bitpos depends on endianess? */
3760 bitpos = bitsize_zero_node;
3761 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3762 bitsize, bitpos);
3763 vec_dest =
3764 vect_create_destination_var (scalar_dest, NULL_TREE);
3765 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3766 new_temp = make_ssa_name (vec_dest, new_stmt);
3767 gimple_assign_set_lhs (new_stmt, new_temp);
3768 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3769
3770 for (k = nunits - 1; k >= 0; --k)
3771 t = tree_cons (NULL_TREE, new_temp, t);
3772 /* FIXME: use build_constructor directly. */
3773 vec_inv = build_constructor_from_list (vectype, t);
3774 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3775 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3776 }
3777 else
3778 gcc_unreachable (); /* FORNOW. */
3779 }
3780
3781 /* Collect vector loads and later create their permutation in
3782 vect_transform_strided_load (). */
3783 if (strided_load || slp_perm)
3784 VEC_quick_push (tree, dr_chain, new_temp);
3785
3786 /* Store vector loads in the corresponding SLP_NODE. */
3787 if (slp && !slp_perm)
3788 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3789 }
3790
3791 if (slp && !slp_perm)
3792 continue;
3793
3794 if (slp_perm)
3795 {
3796 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3797 slp_node_instance, false))
3798 {
3799 VEC_free (tree, heap, dr_chain);
3800 return false;
3801 }
3802 }
3803 else
3804 {
3805 if (strided_load)
3806 {
3807 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3808 return false;
3809
3810 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3811 VEC_free (tree, heap, dr_chain);
3812 dr_chain = VEC_alloc (tree, heap, group_size);
3813 }
3814 else
3815 {
3816 if (j == 0)
3817 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3818 else
3819 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3820 prev_stmt_info = vinfo_for_stmt (new_stmt);
3821 }
3822 }
3823 }
3824
3825 if (dr_chain)
3826 VEC_free (tree, heap, dr_chain);
3827
3828 return true;
3829 }
3830
3831 /* Function vect_is_simple_cond.
3832
3833 Input:
3834 LOOP - the loop that is being vectorized.
3835 COND - Condition that is checked for simple use.
3836
3837 Returns whether a COND can be vectorized. Checks whether
3838 condition operands are supportable using vec_is_simple_use. */
3839
3840 static bool
3841 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3842 {
3843 tree lhs, rhs;
3844 tree def;
3845 enum vect_def_type dt;
3846
3847 if (!COMPARISON_CLASS_P (cond))
3848 return false;
3849
3850 lhs = TREE_OPERAND (cond, 0);
3851 rhs = TREE_OPERAND (cond, 1);
3852
3853 if (TREE_CODE (lhs) == SSA_NAME)
3854 {
3855 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3856 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3857 &dt))
3858 return false;
3859 }
3860 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3861 && TREE_CODE (lhs) != FIXED_CST)
3862 return false;
3863
3864 if (TREE_CODE (rhs) == SSA_NAME)
3865 {
3866 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3867 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3868 &dt))
3869 return false;
3870 }
3871 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3872 && TREE_CODE (rhs) != FIXED_CST)
3873 return false;
3874
3875 return true;
3876 }
3877
3878 /* vectorizable_condition.
3879
3880 Check if STMT is conditional modify expression that can be vectorized.
3881 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3882 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3883 at GSI.
3884
3885 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3886 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3887 else caluse if it is 2).
3888
3889 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3890
3891 bool
3892 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3893 gimple *vec_stmt, tree reduc_def, int reduc_index)
3894 {
3895 tree scalar_dest = NULL_TREE;
3896 tree vec_dest = NULL_TREE;
3897 tree op = NULL_TREE;
3898 tree cond_expr, then_clause, else_clause;
3899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3900 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3901 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3902 tree vec_compare, vec_cond_expr;
3903 tree new_temp;
3904 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3905 enum machine_mode vec_mode;
3906 tree def;
3907 enum vect_def_type dt;
3908 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3909 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3910 enum tree_code code;
3911
3912 /* FORNOW: unsupported in basic block SLP. */
3913 gcc_assert (loop_vinfo);
3914
3915 gcc_assert (ncopies >= 1);
3916 if (ncopies > 1)
3917 return false; /* FORNOW */
3918
3919 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3920 return false;
3921
3922 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3923 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3924 && reduc_def))
3925 return false;
3926
3927 /* FORNOW: SLP not supported. */
3928 if (STMT_SLP_TYPE (stmt_info))
3929 return false;
3930
3931 /* FORNOW: not yet supported. */
3932 if (STMT_VINFO_LIVE_P (stmt_info))
3933 {
3934 if (vect_print_dump_info (REPORT_DETAILS))
3935 fprintf (vect_dump, "value used after loop.");
3936 return false;
3937 }
3938
3939 /* Is vectorizable conditional operation? */
3940 if (!is_gimple_assign (stmt))
3941 return false;
3942
3943 code = gimple_assign_rhs_code (stmt);
3944
3945 if (code != COND_EXPR)
3946 return false;
3947
3948 gcc_assert (gimple_assign_single_p (stmt));
3949 op = gimple_assign_rhs1 (stmt);
3950 cond_expr = TREE_OPERAND (op, 0);
3951 then_clause = TREE_OPERAND (op, 1);
3952 else_clause = TREE_OPERAND (op, 2);
3953
3954 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3955 return false;
3956
3957 /* We do not handle two different vector types for the condition
3958 and the values. */
3959 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
3960 TREE_TYPE (vectype)))
3961 return false;
3962
3963 if (TREE_CODE (then_clause) == SSA_NAME)
3964 {
3965 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3966 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3967 &then_def_stmt, &def, &dt))
3968 return false;
3969 }
3970 else if (TREE_CODE (then_clause) != INTEGER_CST
3971 && TREE_CODE (then_clause) != REAL_CST
3972 && TREE_CODE (then_clause) != FIXED_CST)
3973 return false;
3974
3975 if (TREE_CODE (else_clause) == SSA_NAME)
3976 {
3977 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3978 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3979 &else_def_stmt, &def, &dt))
3980 return false;
3981 }
3982 else if (TREE_CODE (else_clause) != INTEGER_CST
3983 && TREE_CODE (else_clause) != REAL_CST
3984 && TREE_CODE (else_clause) != FIXED_CST)
3985 return false;
3986
3987
3988 vec_mode = TYPE_MODE (vectype);
3989
3990 if (!vec_stmt)
3991 {
3992 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3993 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3994 }
3995
3996 /* Transform */
3997
3998 /* Handle def. */
3999 scalar_dest = gimple_assign_lhs (stmt);
4000 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4001
4002 /* Handle cond expr. */
4003 vec_cond_lhs =
4004 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
4005 vec_cond_rhs =
4006 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
4007 if (reduc_index == 1)
4008 vec_then_clause = reduc_def;
4009 else
4010 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
4011 if (reduc_index == 2)
4012 vec_else_clause = reduc_def;
4013 else
4014 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
4015
4016 /* Arguments are ready. Create the new vector stmt. */
4017 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4018 vec_cond_lhs, vec_cond_rhs);
4019 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4020 vec_compare, vec_then_clause, vec_else_clause);
4021
4022 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4023 new_temp = make_ssa_name (vec_dest, *vec_stmt);
4024 gimple_assign_set_lhs (*vec_stmt, new_temp);
4025 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
4026
4027 return true;
4028 }
4029
4030
4031 /* Make sure the statement is vectorizable. */
4032
4033 bool
4034 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4035 {
4036 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4037 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4038 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4039 bool ok;
4040 tree scalar_type, vectype;
4041
4042 if (vect_print_dump_info (REPORT_DETAILS))
4043 {
4044 fprintf (vect_dump, "==> examining statement: ");
4045 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4046 }
4047
4048 if (gimple_has_volatile_ops (stmt))
4049 {
4050 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4051 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4052
4053 return false;
4054 }
4055
4056 /* Skip stmts that do not need to be vectorized. In loops this is expected
4057 to include:
4058 - the COND_EXPR which is the loop exit condition
4059 - any LABEL_EXPRs in the loop
4060 - computations that are used only for array indexing or loop control.
4061 In basic blocks we only analyze statements that are a part of some SLP
4062 instance, therefore, all the statements are relevant. */
4063
4064 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4065 && !STMT_VINFO_LIVE_P (stmt_info))
4066 {
4067 if (vect_print_dump_info (REPORT_DETAILS))
4068 fprintf (vect_dump, "irrelevant.");
4069
4070 return true;
4071 }
4072
4073 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4074 {
4075 case vect_internal_def:
4076 break;
4077
4078 case vect_reduction_def:
4079 case vect_nested_cycle:
4080 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4081 || relevance == vect_used_in_outer_by_reduction
4082 || relevance == vect_unused_in_scope));
4083 break;
4084
4085 case vect_induction_def:
4086 case vect_constant_def:
4087 case vect_external_def:
4088 case vect_unknown_def_type:
4089 default:
4090 gcc_unreachable ();
4091 }
4092
4093 if (bb_vinfo)
4094 {
4095 gcc_assert (PURE_SLP_STMT (stmt_info));
4096
4097 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4098 if (vect_print_dump_info (REPORT_DETAILS))
4099 {
4100 fprintf (vect_dump, "get vectype for scalar type: ");
4101 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4102 }
4103
4104 vectype = get_vectype_for_scalar_type (scalar_type);
4105 if (!vectype)
4106 {
4107 if (vect_print_dump_info (REPORT_DETAILS))
4108 {
4109 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4110 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4111 }
4112 return false;
4113 }
4114
4115 if (vect_print_dump_info (REPORT_DETAILS))
4116 {
4117 fprintf (vect_dump, "vectype: ");
4118 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4119 }
4120
4121 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4122 }
4123
4124 if (STMT_VINFO_RELEVANT_P (stmt_info))
4125 {
4126 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4127 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4128 *need_to_vectorize = true;
4129 }
4130
4131 ok = true;
4132 if (!bb_vinfo
4133 && (STMT_VINFO_RELEVANT_P (stmt_info)
4134 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4135 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4136 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4137 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4138 || vectorizable_operation (stmt, NULL, NULL, NULL)
4139 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4140 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4141 || vectorizable_call (stmt, NULL, NULL)
4142 || vectorizable_store (stmt, NULL, NULL, NULL)
4143 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4144 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4145 else
4146 {
4147 if (bb_vinfo)
4148 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4149 || vectorizable_assignment (stmt, NULL, NULL, node)
4150 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4151 || vectorizable_store (stmt, NULL, NULL, node));
4152 }
4153
4154 if (!ok)
4155 {
4156 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4157 {
4158 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4159 fprintf (vect_dump, "supported: ");
4160 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4161 }
4162
4163 return false;
4164 }
4165
4166 if (bb_vinfo)
4167 return true;
4168
4169 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4170 need extra handling, except for vectorizable reductions. */
4171 if (STMT_VINFO_LIVE_P (stmt_info)
4172 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4173 ok = vectorizable_live_operation (stmt, NULL, NULL);
4174
4175 if (!ok)
4176 {
4177 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4178 {
4179 fprintf (vect_dump, "not vectorized: live stmt not ");
4180 fprintf (vect_dump, "supported: ");
4181 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4182 }
4183
4184 return false;
4185 }
4186
4187 if (!PURE_SLP_STMT (stmt_info))
4188 {
4189 /* Groups of strided accesses whose size is not a power of 2 are not
4190 vectorizable yet using loop-vectorization. Therefore, if this stmt
4191 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4192 loop-based vectorized), the loop cannot be vectorized. */
4193 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4194 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4195 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4196 {
4197 if (vect_print_dump_info (REPORT_DETAILS))
4198 {
4199 fprintf (vect_dump, "not vectorized: the size of group "
4200 "of strided accesses is not a power of 2");
4201 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4202 }
4203
4204 return false;
4205 }
4206 }
4207
4208 return true;
4209 }
4210
4211
4212 /* Function vect_transform_stmt.
4213
4214 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4215
4216 bool
4217 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4218 bool *strided_store, slp_tree slp_node,
4219 slp_instance slp_node_instance)
4220 {
4221 bool is_store = false;
4222 gimple vec_stmt = NULL;
4223 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4224 gimple orig_stmt_in_pattern;
4225 bool done;
4226
4227 switch (STMT_VINFO_TYPE (stmt_info))
4228 {
4229 case type_demotion_vec_info_type:
4230 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4231 gcc_assert (done);
4232 break;
4233
4234 case type_promotion_vec_info_type:
4235 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4236 gcc_assert (done);
4237 break;
4238
4239 case type_conversion_vec_info_type:
4240 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4241 gcc_assert (done);
4242 break;
4243
4244 case induc_vec_info_type:
4245 gcc_assert (!slp_node);
4246 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4247 gcc_assert (done);
4248 break;
4249
4250 case op_vec_info_type:
4251 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4252 gcc_assert (done);
4253 break;
4254
4255 case assignment_vec_info_type:
4256 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4257 gcc_assert (done);
4258 break;
4259
4260 case load_vec_info_type:
4261 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4262 slp_node_instance);
4263 gcc_assert (done);
4264 break;
4265
4266 case store_vec_info_type:
4267 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4268 gcc_assert (done);
4269 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4270 {
4271 /* In case of interleaving, the whole chain is vectorized when the
4272 last store in the chain is reached. Store stmts before the last
4273 one are skipped, and there vec_stmt_info shouldn't be freed
4274 meanwhile. */
4275 *strided_store = true;
4276 if (STMT_VINFO_VEC_STMT (stmt_info))
4277 is_store = true;
4278 }
4279 else
4280 is_store = true;
4281 break;
4282
4283 case condition_vec_info_type:
4284 gcc_assert (!slp_node);
4285 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4286 gcc_assert (done);
4287 break;
4288
4289 case call_vec_info_type:
4290 gcc_assert (!slp_node);
4291 done = vectorizable_call (stmt, gsi, &vec_stmt);
4292 break;
4293
4294 case reduc_vec_info_type:
4295 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4296 gcc_assert (done);
4297 break;
4298
4299 default:
4300 if (!STMT_VINFO_LIVE_P (stmt_info))
4301 {
4302 if (vect_print_dump_info (REPORT_DETAILS))
4303 fprintf (vect_dump, "stmt not supported.");
4304 gcc_unreachable ();
4305 }
4306 }
4307
4308 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4309 is being vectorized, but outside the immediately enclosing loop. */
4310 if (vec_stmt
4311 && STMT_VINFO_LOOP_VINFO (stmt_info)
4312 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4313 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4314 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4315 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4316 || STMT_VINFO_RELEVANT (stmt_info) ==
4317 vect_used_in_outer_by_reduction))
4318 {
4319 struct loop *innerloop = LOOP_VINFO_LOOP (
4320 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4321 imm_use_iterator imm_iter;
4322 use_operand_p use_p;
4323 tree scalar_dest;
4324 gimple exit_phi;
4325
4326 if (vect_print_dump_info (REPORT_DETAILS))
4327 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4328
4329 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4330 (to be used when vectorizing outer-loop stmts that use the DEF of
4331 STMT). */
4332 if (gimple_code (stmt) == GIMPLE_PHI)
4333 scalar_dest = PHI_RESULT (stmt);
4334 else
4335 scalar_dest = gimple_assign_lhs (stmt);
4336
4337 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4338 {
4339 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4340 {
4341 exit_phi = USE_STMT (use_p);
4342 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4343 }
4344 }
4345 }
4346
4347 /* Handle stmts whose DEF is used outside the loop-nest that is
4348 being vectorized. */
4349 if (STMT_VINFO_LIVE_P (stmt_info)
4350 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4351 {
4352 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4353 gcc_assert (done);
4354 }
4355
4356 if (vec_stmt)
4357 {
4358 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4359 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4360 if (orig_stmt_in_pattern)
4361 {
4362 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4363 /* STMT was inserted by the vectorizer to replace a computation idiom.
4364 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4365 computed this idiom. We need to record a pointer to VEC_STMT in
4366 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4367 documentation of vect_pattern_recog. */
4368 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4369 {
4370 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4371 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4372 }
4373 }
4374 }
4375
4376 return is_store;
4377 }
4378
4379
4380 /* Remove a group of stores (for SLP or interleaving), free their
4381 stmt_vec_info. */
4382
4383 void
4384 vect_remove_stores (gimple first_stmt)
4385 {
4386 gimple next = first_stmt;
4387 gimple tmp;
4388 gimple_stmt_iterator next_si;
4389
4390 while (next)
4391 {
4392 /* Free the attached stmt_vec_info and remove the stmt. */
4393 next_si = gsi_for_stmt (next);
4394 gsi_remove (&next_si, true);
4395 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4396 free_stmt_vec_info (next);
4397 next = tmp;
4398 }
4399 }
4400
4401
4402 /* Function new_stmt_vec_info.
4403
4404 Create and initialize a new stmt_vec_info struct for STMT. */
4405
4406 stmt_vec_info
4407 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4408 bb_vec_info bb_vinfo)
4409 {
4410 stmt_vec_info res;
4411 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4412
4413 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4414 STMT_VINFO_STMT (res) = stmt;
4415 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4416 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4417 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4418 STMT_VINFO_LIVE_P (res) = false;
4419 STMT_VINFO_VECTYPE (res) = NULL;
4420 STMT_VINFO_VEC_STMT (res) = NULL;
4421 STMT_VINFO_VECTORIZABLE (res) = true;
4422 STMT_VINFO_IN_PATTERN_P (res) = false;
4423 STMT_VINFO_RELATED_STMT (res) = NULL;
4424 STMT_VINFO_DATA_REF (res) = NULL;
4425
4426 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4427 STMT_VINFO_DR_OFFSET (res) = NULL;
4428 STMT_VINFO_DR_INIT (res) = NULL;
4429 STMT_VINFO_DR_STEP (res) = NULL;
4430 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4431
4432 if (gimple_code (stmt) == GIMPLE_PHI
4433 && is_loop_header_bb_p (gimple_bb (stmt)))
4434 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4435 else
4436 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4437
4438 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4439 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4440 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4441 STMT_SLP_TYPE (res) = loop_vect;
4442 DR_GROUP_FIRST_DR (res) = NULL;
4443 DR_GROUP_NEXT_DR (res) = NULL;
4444 DR_GROUP_SIZE (res) = 0;
4445 DR_GROUP_STORE_COUNT (res) = 0;
4446 DR_GROUP_GAP (res) = 0;
4447 DR_GROUP_SAME_DR_STMT (res) = NULL;
4448 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4449
4450 return res;
4451 }
4452
4453
4454 /* Create a hash table for stmt_vec_info. */
4455
4456 void
4457 init_stmt_vec_info_vec (void)
4458 {
4459 gcc_assert (!stmt_vec_info_vec);
4460 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4461 }
4462
4463
4464 /* Free hash table for stmt_vec_info. */
4465
4466 void
4467 free_stmt_vec_info_vec (void)
4468 {
4469 gcc_assert (stmt_vec_info_vec);
4470 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4471 }
4472
4473
4474 /* Free stmt vectorization related info. */
4475
4476 void
4477 free_stmt_vec_info (gimple stmt)
4478 {
4479 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4480
4481 if (!stmt_info)
4482 return;
4483
4484 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4485 set_vinfo_for_stmt (stmt, NULL);
4486 free (stmt_info);
4487 }
4488
4489
4490 /* Function get_vectype_for_scalar_type.
4491
4492 Returns the vector type corresponding to SCALAR_TYPE as supported
4493 by the target. */
4494
4495 tree
4496 get_vectype_for_scalar_type (tree scalar_type)
4497 {
4498 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4499 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4500 int nunits;
4501 tree vectype;
4502
4503 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4504 return NULL_TREE;
4505
4506 /* We can't build a vector type of elements with alignment bigger than
4507 their size. */
4508 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4509 return NULL_TREE;
4510
4511 /* If we'd build a vector type of elements whose mode precision doesn't
4512 match their types precision we'll get mismatched types on vector
4513 extracts via BIT_FIELD_REFs. This effectively means we disable
4514 vectorization of bool and/or enum types in some languages. */
4515 if (INTEGRAL_TYPE_P (scalar_type)
4516 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4517 return NULL_TREE;
4518
4519 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4520 is expected. */
4521 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4522
4523 vectype = build_vector_type (scalar_type, nunits);
4524 if (vect_print_dump_info (REPORT_DETAILS))
4525 {
4526 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4527 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4528 }
4529
4530 if (!vectype)
4531 return NULL_TREE;
4532
4533 if (vect_print_dump_info (REPORT_DETAILS))
4534 {
4535 fprintf (vect_dump, "vectype: ");
4536 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4537 }
4538
4539 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4540 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4541 {
4542 if (vect_print_dump_info (REPORT_DETAILS))
4543 fprintf (vect_dump, "mode not supported by target.");
4544 return NULL_TREE;
4545 }
4546
4547 return vectype;
4548 }
4549
4550 /* Function get_same_sized_vectype
4551
4552 Returns a vector type corresponding to SCALAR_TYPE of size
4553 VECTOR_TYPE if supported by the target. */
4554
4555 tree
4556 get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4557 {
4558 return get_vectype_for_scalar_type (scalar_type);
4559 }
4560
4561 /* Function vect_is_simple_use.
4562
4563 Input:
4564 LOOP_VINFO - the vect info of the loop that is being vectorized.
4565 BB_VINFO - the vect info of the basic block that is being vectorized.
4566 OPERAND - operand of a stmt in the loop or bb.
4567 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4568
4569 Returns whether a stmt with OPERAND can be vectorized.
4570 For loops, supportable operands are constants, loop invariants, and operands
4571 that are defined by the current iteration of the loop. Unsupportable
4572 operands are those that are defined by a previous iteration of the loop (as
4573 is the case in reduction/induction computations).
4574 For basic blocks, supportable operands are constants and bb invariants.
4575 For now, operands defined outside the basic block are not supported. */
4576
4577 bool
4578 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4579 bb_vec_info bb_vinfo, gimple *def_stmt,
4580 tree *def, enum vect_def_type *dt)
4581 {
4582 basic_block bb;
4583 stmt_vec_info stmt_vinfo;
4584 struct loop *loop = NULL;
4585
4586 if (loop_vinfo)
4587 loop = LOOP_VINFO_LOOP (loop_vinfo);
4588
4589 *def_stmt = NULL;
4590 *def = NULL_TREE;
4591
4592 if (vect_print_dump_info (REPORT_DETAILS))
4593 {
4594 fprintf (vect_dump, "vect_is_simple_use: operand ");
4595 print_generic_expr (vect_dump, operand, TDF_SLIM);
4596 }
4597
4598 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4599 {
4600 *dt = vect_constant_def;
4601 return true;
4602 }
4603
4604 if (is_gimple_min_invariant (operand))
4605 {
4606 *def = operand;
4607 *dt = vect_external_def;
4608 return true;
4609 }
4610
4611 if (TREE_CODE (operand) == PAREN_EXPR)
4612 {
4613 if (vect_print_dump_info (REPORT_DETAILS))
4614 fprintf (vect_dump, "non-associatable copy.");
4615 operand = TREE_OPERAND (operand, 0);
4616 }
4617
4618 if (TREE_CODE (operand) != SSA_NAME)
4619 {
4620 if (vect_print_dump_info (REPORT_DETAILS))
4621 fprintf (vect_dump, "not ssa-name.");
4622 return false;
4623 }
4624
4625 *def_stmt = SSA_NAME_DEF_STMT (operand);
4626 if (*def_stmt == NULL)
4627 {
4628 if (vect_print_dump_info (REPORT_DETAILS))
4629 fprintf (vect_dump, "no def_stmt.");
4630 return false;
4631 }
4632
4633 if (vect_print_dump_info (REPORT_DETAILS))
4634 {
4635 fprintf (vect_dump, "def_stmt: ");
4636 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4637 }
4638
4639 /* Empty stmt is expected only in case of a function argument.
4640 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4641 if (gimple_nop_p (*def_stmt))
4642 {
4643 *def = operand;
4644 *dt = vect_external_def;
4645 return true;
4646 }
4647
4648 bb = gimple_bb (*def_stmt);
4649
4650 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4651 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4652 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4653 *dt = vect_external_def;
4654 else
4655 {
4656 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4657 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4658 }
4659
4660 if (*dt == vect_unknown_def_type)
4661 {
4662 if (vect_print_dump_info (REPORT_DETAILS))
4663 fprintf (vect_dump, "Unsupported pattern.");
4664 return false;
4665 }
4666
4667 if (vect_print_dump_info (REPORT_DETAILS))
4668 fprintf (vect_dump, "type of def: %d.",*dt);
4669
4670 switch (gimple_code (*def_stmt))
4671 {
4672 case GIMPLE_PHI:
4673 *def = gimple_phi_result (*def_stmt);
4674 break;
4675
4676 case GIMPLE_ASSIGN:
4677 *def = gimple_assign_lhs (*def_stmt);
4678 break;
4679
4680 case GIMPLE_CALL:
4681 *def = gimple_call_lhs (*def_stmt);
4682 if (*def != NULL)
4683 break;
4684 /* FALLTHRU */
4685 default:
4686 if (vect_print_dump_info (REPORT_DETAILS))
4687 fprintf (vect_dump, "unsupported defining stmt: ");
4688 return false;
4689 }
4690
4691 return true;
4692 }
4693
4694 /* Function vect_is_simple_use_1.
4695
4696 Same as vect_is_simple_use_1 but also determines the vector operand
4697 type of OPERAND and stores it to *VECTYPE. If the definition of
4698 OPERAND is vect_uninitialized_def, vect_constant_def or
4699 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4700 is responsible to compute the best suited vector type for the
4701 scalar operand. */
4702
4703 bool
4704 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4705 bb_vec_info bb_vinfo, gimple *def_stmt,
4706 tree *def, enum vect_def_type *dt, tree *vectype)
4707 {
4708 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4709 return false;
4710
4711 /* Now get a vector type if the def is internal, otherwise supply
4712 NULL_TREE and leave it up to the caller to figure out a proper
4713 type for the use stmt. */
4714 if (*dt == vect_internal_def
4715 || *dt == vect_induction_def
4716 || *dt == vect_reduction_def
4717 || *dt == vect_double_reduction_def
4718 || *dt == vect_nested_cycle)
4719 {
4720 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4721 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4722 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4723 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4724 gcc_assert (*vectype != NULL_TREE);
4725 }
4726 else if (*dt == vect_uninitialized_def
4727 || *dt == vect_constant_def
4728 || *dt == vect_external_def)
4729 *vectype = NULL_TREE;
4730 else
4731 gcc_unreachable ();
4732
4733 return true;
4734 }
4735
4736
4737 /* Function supportable_widening_operation
4738
4739 Check whether an operation represented by the code CODE is a
4740 widening operation that is supported by the target platform in
4741 vector form (i.e., when operating on arguments of type VECTYPE_IN
4742 producing a result of type VECTYPE_OUT).
4743
4744 Widening operations we currently support are NOP (CONVERT), FLOAT
4745 and WIDEN_MULT. This function checks if these operations are supported
4746 by the target platform either directly (via vector tree-codes), or via
4747 target builtins.
4748
4749 Output:
4750 - CODE1 and CODE2 are codes of vector operations to be used when
4751 vectorizing the operation, if available.
4752 - DECL1 and DECL2 are decls of target builtin functions to be used
4753 when vectorizing the operation, if available. In this case,
4754 CODE1 and CODE2 are CALL_EXPR.
4755 - MULTI_STEP_CVT determines the number of required intermediate steps in
4756 case of multi-step conversion (like char->short->int - in that case
4757 MULTI_STEP_CVT will be 1).
4758 - INTERM_TYPES contains the intermediate type required to perform the
4759 widening operation (short in the above example). */
4760
4761 bool
4762 supportable_widening_operation (enum tree_code code, gimple stmt,
4763 tree vectype_out, tree vectype_in,
4764 tree *decl1, tree *decl2,
4765 enum tree_code *code1, enum tree_code *code2,
4766 int *multi_step_cvt,
4767 VEC (tree, heap) **interm_types)
4768 {
4769 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4770 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4771 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4772 bool ordered_p;
4773 enum machine_mode vec_mode;
4774 enum insn_code icode1, icode2;
4775 optab optab1, optab2;
4776 tree vectype = vectype_in;
4777 tree wide_vectype = vectype_out;
4778 enum tree_code c1, c2;
4779
4780 /* The result of a vectorized widening operation usually requires two vectors
4781 (because the widened results do not fit int one vector). The generated
4782 vector results would normally be expected to be generated in the same
4783 order as in the original scalar computation, i.e. if 8 results are
4784 generated in each vector iteration, they are to be organized as follows:
4785 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4786
4787 However, in the special case that the result of the widening operation is
4788 used in a reduction computation only, the order doesn't matter (because
4789 when vectorizing a reduction we change the order of the computation).
4790 Some targets can take advantage of this and generate more efficient code.
4791 For example, targets like Altivec, that support widen_mult using a sequence
4792 of {mult_even,mult_odd} generate the following vectors:
4793 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4794
4795 When vectorizing outer-loops, we execute the inner-loop sequentially
4796 (each vectorized inner-loop iteration contributes to VF outer-loop
4797 iterations in parallel). We therefore don't allow to change the order
4798 of the computation in the inner-loop during outer-loop vectorization. */
4799
4800 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4801 && !nested_in_vect_loop_p (vect_loop, stmt))
4802 ordered_p = false;
4803 else
4804 ordered_p = true;
4805
4806 if (!ordered_p
4807 && code == WIDEN_MULT_EXPR
4808 && targetm.vectorize.builtin_mul_widen_even
4809 && targetm.vectorize.builtin_mul_widen_even (vectype)
4810 && targetm.vectorize.builtin_mul_widen_odd
4811 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4812 {
4813 if (vect_print_dump_info (REPORT_DETAILS))
4814 fprintf (vect_dump, "Unordered widening operation detected.");
4815
4816 *code1 = *code2 = CALL_EXPR;
4817 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4818 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4819 return true;
4820 }
4821
4822 switch (code)
4823 {
4824 case WIDEN_MULT_EXPR:
4825 if (BYTES_BIG_ENDIAN)
4826 {
4827 c1 = VEC_WIDEN_MULT_HI_EXPR;
4828 c2 = VEC_WIDEN_MULT_LO_EXPR;
4829 }
4830 else
4831 {
4832 c2 = VEC_WIDEN_MULT_HI_EXPR;
4833 c1 = VEC_WIDEN_MULT_LO_EXPR;
4834 }
4835 break;
4836
4837 CASE_CONVERT:
4838 if (BYTES_BIG_ENDIAN)
4839 {
4840 c1 = VEC_UNPACK_HI_EXPR;
4841 c2 = VEC_UNPACK_LO_EXPR;
4842 }
4843 else
4844 {
4845 c2 = VEC_UNPACK_HI_EXPR;
4846 c1 = VEC_UNPACK_LO_EXPR;
4847 }
4848 break;
4849
4850 case FLOAT_EXPR:
4851 if (BYTES_BIG_ENDIAN)
4852 {
4853 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4854 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4855 }
4856 else
4857 {
4858 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4859 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4860 }
4861 break;
4862
4863 case FIX_TRUNC_EXPR:
4864 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4865 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4866 computing the operation. */
4867 return false;
4868
4869 default:
4870 gcc_unreachable ();
4871 }
4872
4873 if (code == FIX_TRUNC_EXPR)
4874 {
4875 /* The signedness is determined from output operand. */
4876 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4877 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
4878 }
4879 else
4880 {
4881 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4882 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4883 }
4884
4885 if (!optab1 || !optab2)
4886 return false;
4887
4888 vec_mode = TYPE_MODE (vectype);
4889 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4890 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4891 == CODE_FOR_nothing)
4892 return false;
4893
4894 /* Check if it's a multi-step conversion that can be done using intermediate
4895 types. */
4896 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4897 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4898 {
4899 int i;
4900 tree prev_type = vectype, intermediate_type;
4901 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4902 optab optab3, optab4;
4903
4904 if (!CONVERT_EXPR_CODE_P (code))
4905 return false;
4906
4907 *code1 = c1;
4908 *code2 = c2;
4909
4910 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4911 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4912 to get to NARROW_VECTYPE, and fail if we do not. */
4913 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4914 for (i = 0; i < 3; i++)
4915 {
4916 intermediate_mode = insn_data[icode1].operand[0].mode;
4917 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4918 TYPE_UNSIGNED (prev_type));
4919 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4920 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4921
4922 if (!optab3 || !optab4
4923 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4924 == CODE_FOR_nothing
4925 || insn_data[icode1].operand[0].mode != intermediate_mode
4926 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4927 == CODE_FOR_nothing
4928 || insn_data[icode2].operand[0].mode != intermediate_mode
4929 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4930 == CODE_FOR_nothing
4931 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4932 == CODE_FOR_nothing)
4933 return false;
4934
4935 VEC_quick_push (tree, *interm_types, intermediate_type);
4936 (*multi_step_cvt)++;
4937
4938 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4939 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4940 return true;
4941
4942 prev_type = intermediate_type;
4943 prev_mode = intermediate_mode;
4944 }
4945
4946 return false;
4947 }
4948
4949 *code1 = c1;
4950 *code2 = c2;
4951 return true;
4952 }
4953
4954
4955 /* Function supportable_narrowing_operation
4956
4957 Check whether an operation represented by the code CODE is a
4958 narrowing operation that is supported by the target platform in
4959 vector form (i.e., when operating on arguments of type VECTYPE_IN
4960 and producing a result of type VECTYPE_OUT).
4961
4962 Narrowing operations we currently support are NOP (CONVERT) and
4963 FIX_TRUNC. This function checks if these operations are supported by
4964 the target platform directly via vector tree-codes.
4965
4966 Output:
4967 - CODE1 is the code of a vector operation to be used when
4968 vectorizing the operation, if available.
4969 - MULTI_STEP_CVT determines the number of required intermediate steps in
4970 case of multi-step conversion (like int->short->char - in that case
4971 MULTI_STEP_CVT will be 1).
4972 - INTERM_TYPES contains the intermediate type required to perform the
4973 narrowing operation (short in the above example). */
4974
4975 bool
4976 supportable_narrowing_operation (enum tree_code code,
4977 tree vectype_out, tree vectype_in,
4978 enum tree_code *code1, int *multi_step_cvt,
4979 VEC (tree, heap) **interm_types)
4980 {
4981 enum machine_mode vec_mode;
4982 enum insn_code icode1;
4983 optab optab1, interm_optab;
4984 tree vectype = vectype_in;
4985 tree narrow_vectype = vectype_out;
4986 enum tree_code c1;
4987 tree intermediate_type, prev_type;
4988 int i;
4989
4990 switch (code)
4991 {
4992 CASE_CONVERT:
4993 c1 = VEC_PACK_TRUNC_EXPR;
4994 break;
4995
4996 case FIX_TRUNC_EXPR:
4997 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4998 break;
4999
5000 case FLOAT_EXPR:
5001 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5002 tree code and optabs used for computing the operation. */
5003 return false;
5004
5005 default:
5006 gcc_unreachable ();
5007 }
5008
5009 if (code == FIX_TRUNC_EXPR)
5010 /* The signedness is determined from output operand. */
5011 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5012 else
5013 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5014
5015 if (!optab1)
5016 return false;
5017
5018 vec_mode = TYPE_MODE (vectype);
5019 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
5020 == CODE_FOR_nothing)
5021 return false;
5022
5023 /* Check if it's a multi-step conversion that can be done using intermediate
5024 types. */
5025 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5026 {
5027 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5028
5029 *code1 = c1;
5030 prev_type = vectype;
5031 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5032 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5033 to get to NARROW_VECTYPE, and fail if we do not. */
5034 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5035 for (i = 0; i < 3; i++)
5036 {
5037 intermediate_mode = insn_data[icode1].operand[0].mode;
5038 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5039 TYPE_UNSIGNED (prev_type));
5040 interm_optab = optab_for_tree_code (c1, intermediate_type,
5041 optab_default);
5042 if (!interm_optab
5043 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
5044 == CODE_FOR_nothing
5045 || insn_data[icode1].operand[0].mode != intermediate_mode
5046 || (icode1
5047 = interm_optab->handlers[(int) intermediate_mode].insn_code)
5048 == CODE_FOR_nothing)
5049 return false;
5050
5051 VEC_quick_push (tree, *interm_types, intermediate_type);
5052 (*multi_step_cvt)++;
5053
5054 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5055 return true;
5056
5057 prev_type = intermediate_type;
5058 prev_mode = intermediate_mode;
5059 }
5060
5061 return false;
5062 }
5063
5064 *code1 = c1;
5065 return true;
5066 }