]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
typeck.c (require_complete_type_sfinae): Add complain parm to...
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
62f7fd21
MM
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "target.h"
30#include "basic-block.h"
cf835838
JM
31#include "tree-pretty-print.h"
32#include "gimple-pretty-print.h"
ebfd146a
IR
33#include "tree-flow.h"
34#include "tree-dump.h"
35#include "cfgloop.h"
36#include "cfglayout.h"
37#include "expr.h"
38#include "recog.h"
39#include "optabs.h"
718f9c0f 40#include "diagnostic-core.h"
ebfd146a
IR
41#include "toplev.h"
42#include "tree-vectorizer.h"
43#include "langhooks.h"
44
45
46/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48/* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52static void
53vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55{
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
b8698a0f 67 /* This is the last stmt in a sequence that was detected as a
ebfd146a
IR
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96}
97
98
99/* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111static bool
112vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114{
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
8644a673 121 *relevant = vect_unused_in_scope;
ebfd146a
IR
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
b8698a0f
L
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
8644a673 128 *relevant = vect_used_in_scope;
ebfd146a
IR
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
5006671f 132 if (gimple_vdef (stmt))
ebfd146a
IR
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
8644a673 136 *relevant = vect_used_in_scope;
ebfd146a
IR
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
3157b0c2
AO
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
ebfd146a
IR
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164}
165
166
b8698a0f 167/* Function exist_non_indexing_operands_for_use_p
ebfd146a 168
ff802fa1 169 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
170 used in STMT for anything other than indexing an array. */
171
172static bool
173exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174{
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 177
ff802fa1 178 /* USE corresponds to some operand in STMT. If there is no data
ebfd146a
IR
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
59a05b0c 183
ebfd146a
IR
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
b8698a0f 191 so USE cannot correspond to any operands that are not used
ebfd146a
IR
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
ebfd146a
IR
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
59a05b0c
EB
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
ebfd146a 201 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209}
210
211
b8698a0f 212/*
ebfd146a
IR
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ff802fa1 218 that defined USE. This is done by calling mark_relevant and passing it
ebfd146a
IR
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 228 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 229 of the respective DEF_STMT is left unchanged.
b8698a0f
L
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237static bool
b8698a0f 238process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
ebfd146a
IR
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240{
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
b8698a0f 249 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a
IR
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
a70d6342 254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
b8698a0f 255 {
8644a673 256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
b8698a0f
L
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
7c5222ff 306
ebfd146a
IR
307 switch (relevant)
308 {
8644a673 309 case vect_unused_in_scope:
7c5222ff
IR
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 312 break;
7c5222ff 313
ebfd146a 314 case vect_used_in_outer_by_reduction:
7c5222ff 315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
316 relevant = vect_used_by_reduction;
317 break;
7c5222ff 318
ebfd146a 319 case vect_used_in_outer:
7c5222ff 320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 321 relevant = vect_used_in_scope;
ebfd146a 322 break;
7c5222ff 323
8644a673 324 case vect_used_in_scope:
ebfd146a
IR
325 break;
326
327 default:
328 gcc_unreachable ();
b8698a0f 329 }
ebfd146a
IR
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
06066f92 337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
7c5222ff 343
ebfd146a
IR
344 switch (relevant)
345 {
8644a673 346 case vect_unused_in_scope:
b8698a0f 347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
350 break;
351
ebfd146a
IR
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
8644a673 356 case vect_used_in_scope:
ebfd146a
IR
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367}
368
369
370/* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386bool
387vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388{
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
06066f92
IR
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
ebfd146a
IR
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 413 {
ebfd146a
IR
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
b8698a0f 431 }
ebfd146a
IR
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
b8698a0f
L
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
b8698a0f 468 those that are used by a reduction computation, and those that are
ff802fa1 469 (also) used by a regular computation. This allows us later on to
b8698a0f 470 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 471 order of the results that they produce does not have to be kept. */
ebfd146a 472
06066f92
IR
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
ebfd146a 476 {
06066f92
IR
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
b8698a0f 497 live_p = false;
06066f92 498 break;
b8698a0f 499
06066f92
IR
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
7c5222ff 507
06066f92
IR
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
7c5222ff 511
b8698a0f
L
512 live_p = false;
513 break;
514
06066f92
IR
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
7c5222ff 519 if (vect_print_dump_info (REPORT_DETAILS))
06066f92 520 fprintf (vect_dump, "unsupported use of double reduction.");
7c5222ff
IR
521
522 VEC_free (gimple, heap, worklist);
523 return false;
06066f92
IR
524 }
525
526 live_p = false;
b8698a0f 527 break;
7c5222ff 528
06066f92
IR
529 default:
530 break;
7c5222ff 531 }
b8698a0f 532
ebfd146a
IR
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546}
547
548
720f5239
IR
549/* Get cost by calling cost target builtin. */
550
551static inline
552int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553{
554 tree dummy_type = NULL;
555 int dummy = 0;
556
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
559}
560
ff802fa1
IR
561
562/* Get cost for STMT. */
563
ebfd146a
IR
564int
565cost_for_stmt (gimple stmt)
566{
567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
568
569 switch (STMT_VINFO_TYPE (stmt_info))
570 {
571 case load_vec_info_type:
720f5239 572 return vect_get_stmt_cost (scalar_load);
ebfd146a 573 case store_vec_info_type:
720f5239 574 return vect_get_stmt_cost (scalar_store);
ebfd146a
IR
575 case op_vec_info_type:
576 case condition_vec_info_type:
577 case assignment_vec_info_type:
578 case reduc_vec_info_type:
579 case induc_vec_info_type:
580 case type_promotion_vec_info_type:
581 case type_demotion_vec_info_type:
582 case type_conversion_vec_info_type:
583 case call_vec_info_type:
720f5239 584 return vect_get_stmt_cost (scalar_stmt);
ebfd146a
IR
585 case undef_vec_info_type:
586 default:
587 gcc_unreachable ();
588 }
589}
590
b8698a0f 591/* Function vect_model_simple_cost.
ebfd146a 592
b8698a0f 593 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
594 single op. Right now, this does not account for multiple insns that could
595 be generated for the single vector op. We will handle that shortly. */
596
597void
b8698a0f 598vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
599 enum vect_def_type *dt, slp_tree slp_node)
600{
601 int i;
602 int inside_cost = 0, outside_cost = 0;
603
604 /* The SLP costs were already calculated during SLP tree build. */
605 if (PURE_SLP_STMT (stmt_info))
606 return;
607
720f5239 608 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
609
610 /* FORNOW: Assuming maximum 2 args per stmts. */
611 for (i = 0; i < 2; i++)
612 {
8644a673 613 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
720f5239 614 outside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a 615 }
b8698a0f 616
ebfd146a
IR
617 if (vect_print_dump_info (REPORT_COST))
618 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
619 "outside_cost = %d .", inside_cost, outside_cost);
620
621 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
622 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
623 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
624}
625
626
b8698a0f
L
627/* Function vect_cost_strided_group_size
628
ebfd146a
IR
629 For strided load or store, return the group_size only if it is the first
630 load or store of a group, else return 1. This ensures that group size is
631 only returned once per group. */
632
633static int
634vect_cost_strided_group_size (stmt_vec_info stmt_info)
635{
636 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
637
638 if (first_stmt == STMT_VINFO_STMT (stmt_info))
639 return DR_GROUP_SIZE (stmt_info);
640
641 return 1;
642}
643
644
645/* Function vect_model_store_cost
646
647 Models cost for stores. In the case of strided accesses, one access
648 has the overhead of the strided access attributed to it. */
649
650void
b8698a0f 651vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
652 enum vect_def_type dt, slp_tree slp_node)
653{
654 int group_size;
720f5239
IR
655 unsigned int inside_cost = 0, outside_cost = 0;
656 struct data_reference *first_dr;
657 gimple first_stmt;
ebfd146a
IR
658
659 /* The SLP costs were already calculated during SLP tree build. */
660 if (PURE_SLP_STMT (stmt_info))
661 return;
662
8644a673 663 if (dt == vect_constant_def || dt == vect_external_def)
720f5239 664 outside_cost = vect_get_stmt_cost (scalar_to_vec);
ebfd146a
IR
665
666 /* Strided access? */
720f5239
IR
667 if (DR_GROUP_FIRST_DR (stmt_info))
668 {
669 if (slp_node)
670 {
671 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
672 group_size = 1;
673 }
674 else
675 {
676 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
677 group_size = vect_cost_strided_group_size (stmt_info);
678 }
679
680 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
681 }
ebfd146a
IR
682 /* Not a strided access. */
683 else
720f5239
IR
684 {
685 group_size = 1;
686 first_dr = STMT_VINFO_DATA_REF (stmt_info);
687 }
ebfd146a 688
b8698a0f 689 /* Is this an access in a group of stores, which provide strided access?
ebfd146a 690 If so, add in the cost of the permutes. */
b8698a0f 691 if (group_size > 1)
ebfd146a
IR
692 {
693 /* Uses a high and low interleave operation for each needed permute. */
b8698a0f 694 inside_cost = ncopies * exact_log2(group_size) * group_size
720f5239 695 * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
696
697 if (vect_print_dump_info (REPORT_COST))
698 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
699 group_size);
700
701 }
702
703 /* Costs of the stores. */
720f5239 704 vect_get_store_cost (first_dr, ncopies, &inside_cost);
ebfd146a
IR
705
706 if (vect_print_dump_info (REPORT_COST))
707 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
708 "outside_cost = %d .", inside_cost, outside_cost);
709
710 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
711 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
712 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
713}
714
715
720f5239
IR
716/* Calculate cost of DR's memory access. */
717void
718vect_get_store_cost (struct data_reference *dr, int ncopies,
719 unsigned int *inside_cost)
720{
721 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
722
723 switch (alignment_support_scheme)
724 {
725 case dr_aligned:
726 {
727 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
728
729 if (vect_print_dump_info (REPORT_COST))
730 fprintf (vect_dump, "vect_model_store_cost: aligned.");
731
732 break;
733 }
734
735 case dr_unaligned_supported:
736 {
737 gimple stmt = DR_STMT (dr);
738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
740
741 /* Here, we assign an additional cost for the unaligned store. */
742 *inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
744 vectype, DR_MISALIGNMENT (dr));
745
746 if (vect_print_dump_info (REPORT_COST))
747 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
748 "hardware.");
749
750 break;
751 }
752
753 default:
754 gcc_unreachable ();
755 }
756}
757
758
ebfd146a
IR
759/* Function vect_model_load_cost
760
761 Models cost for loads. In the case of strided accesses, the last access
762 has the overhead of the strided access attributed to it. Since unaligned
b8698a0f 763 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
764 access scheme chosen. */
765
766void
767vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
b8698a0f 768
ebfd146a
IR
769{
770 int group_size;
ebfd146a
IR
771 gimple first_stmt;
772 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
720f5239 773 unsigned int inside_cost = 0, outside_cost = 0;
ebfd146a
IR
774
775 /* The SLP costs were already calculated during SLP tree build. */
776 if (PURE_SLP_STMT (stmt_info))
777 return;
778
779 /* Strided accesses? */
780 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
781 if (first_stmt && !slp_node)
782 {
783 group_size = vect_cost_strided_group_size (stmt_info);
784 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
785 }
786 /* Not a strided access. */
787 else
788 {
789 group_size = 1;
790 first_dr = dr;
791 }
792
b8698a0f 793 /* Is this an access in a group of loads providing strided access?
ebfd146a 794 If so, add in the cost of the permutes. */
b8698a0f 795 if (group_size > 1)
ebfd146a
IR
796 {
797 /* Uses an even and odd extract operations for each needed permute. */
798 inside_cost = ncopies * exact_log2(group_size) * group_size
720f5239 799 * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
800
801 if (vect_print_dump_info (REPORT_COST))
802 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
803 group_size);
ebfd146a
IR
804 }
805
806 /* The loads themselves. */
720f5239
IR
807 vect_get_load_cost (first_dr, ncopies,
808 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
809 &inside_cost, &outside_cost);
810
811 if (vect_print_dump_info (REPORT_COST))
812 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
813 "outside_cost = %d .", inside_cost, outside_cost);
814
815 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
816 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
817 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
818}
819
820
821/* Calculate cost of DR's memory access. */
822void
823vect_get_load_cost (struct data_reference *dr, int ncopies,
824 bool add_realign_cost, unsigned int *inside_cost,
825 unsigned int *outside_cost)
826{
827 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
828
829 switch (alignment_support_scheme)
ebfd146a
IR
830 {
831 case dr_aligned:
832 {
9940b13c 833 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
ebfd146a
IR
834
835 if (vect_print_dump_info (REPORT_COST))
836 fprintf (vect_dump, "vect_model_load_cost: aligned.");
837
838 break;
839 }
840 case dr_unaligned_supported:
841 {
720f5239
IR
842 gimple stmt = DR_STMT (dr);
843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
844 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 845
720f5239
IR
846 /* Here, we assign an additional cost for the unaligned load. */
847 *inside_cost += ncopies
848 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
849 vectype, DR_MISALIGNMENT (dr));
ebfd146a
IR
850 if (vect_print_dump_info (REPORT_COST))
851 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
852 "hardware.");
853
854 break;
855 }
856 case dr_explicit_realign:
857 {
720f5239
IR
858 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
859 + vect_get_stmt_cost (vector_stmt));
ebfd146a
IR
860
861 /* FIXME: If the misalignment remains fixed across the iterations of
862 the containing loop, the following cost should be added to the
863 outside costs. */
864 if (targetm.vectorize.builtin_mask_for_load)
720f5239 865 *inside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
866
867 break;
868 }
869 case dr_explicit_realign_optimized:
870 {
871 if (vect_print_dump_info (REPORT_COST))
872 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
873 "pipelined.");
874
875 /* Unaligned software pipeline has a load of an address, an initial
ff802fa1 876 load, and possibly a mask operation to "prime" the loop. However,
ebfd146a
IR
877 if this is an access in a group of loads, which provide strided
878 access, then the above cost should only be considered for one
ff802fa1 879 access in the group. Inside the loop, there is a load op
ebfd146a
IR
880 and a realignment op. */
881
720f5239 882 if (add_realign_cost)
ebfd146a 883 {
720f5239 884 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
ebfd146a 885 if (targetm.vectorize.builtin_mask_for_load)
720f5239 886 *outside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
887 }
888
720f5239
IR
889 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
890 + vect_get_stmt_cost (vector_stmt));
ebfd146a
IR
891 break;
892 }
893
894 default:
895 gcc_unreachable ();
896 }
ebfd146a
IR
897}
898
899
900/* Function vect_init_vector.
901
902 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
ff802fa1
IR
903 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
904 is not NULL. Otherwise, place the initialization at the loop preheader.
b8698a0f 905 Return the DEF of INIT_STMT.
ebfd146a
IR
906 It will be used in the vectorization of STMT. */
907
908tree
909vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
910 gimple_stmt_iterator *gsi)
911{
912 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
913 tree new_var;
914 gimple init_stmt;
915 tree vec_oprnd;
916 edge pe;
917 tree new_temp;
918 basic_block new_bb;
b8698a0f 919
ebfd146a 920 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
b8698a0f 921 add_referenced_var (new_var);
ebfd146a
IR
922 init_stmt = gimple_build_assign (new_var, vector_var);
923 new_temp = make_ssa_name (new_var, init_stmt);
924 gimple_assign_set_lhs (init_stmt, new_temp);
925
926 if (gsi)
927 vect_finish_stmt_generation (stmt, init_stmt, gsi);
928 else
929 {
930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 931
a70d6342
IR
932 if (loop_vinfo)
933 {
934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
935
936 if (nested_in_vect_loop_p (loop, stmt))
937 loop = loop->inner;
b8698a0f 938
a70d6342
IR
939 pe = loop_preheader_edge (loop);
940 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
941 gcc_assert (!new_bb);
942 }
943 else
944 {
945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
946 basic_block bb;
947 gimple_stmt_iterator gsi_bb_start;
948
949 gcc_assert (bb_vinfo);
950 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 951 gsi_bb_start = gsi_after_labels (bb);
a70d6342
IR
952 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
953 }
ebfd146a
IR
954 }
955
956 if (vect_print_dump_info (REPORT_DETAILS))
957 {
958 fprintf (vect_dump, "created new init_stmt: ");
959 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
960 }
961
962 vec_oprnd = gimple_assign_lhs (init_stmt);
963 return vec_oprnd;
964}
965
a70d6342 966
ebfd146a
IR
967/* Function vect_get_vec_def_for_operand.
968
ff802fa1 969 OP is an operand in STMT. This function returns a (vector) def that will be
ebfd146a
IR
970 used in the vectorized stmt for STMT.
971
972 In the case that OP is an SSA_NAME which is defined in the loop, then
973 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
974
975 In case OP is an invariant or constant, a new stmt that creates a vector def
976 needs to be introduced. */
977
978tree
979vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
980{
981 tree vec_oprnd;
982 gimple vec_stmt;
983 gimple def_stmt;
984 stmt_vec_info def_stmt_info = NULL;
985 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
986 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
987 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
988 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
989 tree vec_inv;
990 tree vec_cst;
991 tree t = NULL_TREE;
992 tree def;
993 int i;
994 enum vect_def_type dt;
995 bool is_simple_use;
996 tree vector_type;
997
998 if (vect_print_dump_info (REPORT_DETAILS))
999 {
1000 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1001 print_generic_expr (vect_dump, op, TDF_SLIM);
1002 }
1003
b8698a0f 1004 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
a70d6342 1005 &dt);
ebfd146a
IR
1006 gcc_assert (is_simple_use);
1007 if (vect_print_dump_info (REPORT_DETAILS))
1008 {
1009 if (def)
1010 {
1011 fprintf (vect_dump, "def = ");
1012 print_generic_expr (vect_dump, def, TDF_SLIM);
1013 }
1014 if (def_stmt)
1015 {
1016 fprintf (vect_dump, " def_stmt = ");
1017 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1018 }
1019 }
1020
1021 switch (dt)
1022 {
1023 /* Case 1: operand is a constant. */
1024 case vect_constant_def:
1025 {
7569a6cc
RG
1026 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1027 gcc_assert (vector_type);
1028
b8698a0f 1029 if (scalar_def)
ebfd146a
IR
1030 *scalar_def = op;
1031
1032 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1033 if (vect_print_dump_info (REPORT_DETAILS))
1034 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1035
1036 for (i = nunits - 1; i >= 0; --i)
1037 {
1038 t = tree_cons (NULL_TREE, op, t);
1039 }
7569a6cc
RG
1040 vec_cst = build_vector (vector_type, t);
1041 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
ebfd146a
IR
1042 }
1043
1044 /* Case 2: operand is defined outside the loop - loop invariant. */
8644a673 1045 case vect_external_def:
ebfd146a
IR
1046 {
1047 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1048 gcc_assert (vector_type);
1049 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1050
b8698a0f 1051 if (scalar_def)
ebfd146a
IR
1052 *scalar_def = def;
1053
1054 /* Create 'vec_inv = {inv,inv,..,inv}' */
1055 if (vect_print_dump_info (REPORT_DETAILS))
1056 fprintf (vect_dump, "Create vector_inv.");
1057
1058 for (i = nunits - 1; i >= 0; --i)
1059 {
1060 t = tree_cons (NULL_TREE, def, t);
1061 }
1062
1063 /* FIXME: use build_constructor directly. */
1064 vec_inv = build_constructor_from_list (vector_type, t);
1065 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1066 }
1067
1068 /* Case 3: operand is defined inside the loop. */
8644a673 1069 case vect_internal_def:
ebfd146a 1070 {
b8698a0f 1071 if (scalar_def)
ebfd146a
IR
1072 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1073
1074 /* Get the def from the vectorized stmt. */
1075 def_stmt_info = vinfo_for_stmt (def_stmt);
1076 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1077 gcc_assert (vec_stmt);
1078 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1079 vec_oprnd = PHI_RESULT (vec_stmt);
1080 else if (is_gimple_call (vec_stmt))
1081 vec_oprnd = gimple_call_lhs (vec_stmt);
1082 else
1083 vec_oprnd = gimple_assign_lhs (vec_stmt);
1084 return vec_oprnd;
1085 }
1086
1087 /* Case 4: operand is defined by a loop header phi - reduction */
1088 case vect_reduction_def:
06066f92 1089 case vect_double_reduction_def:
7c5222ff 1090 case vect_nested_cycle:
ebfd146a
IR
1091 {
1092 struct loop *loop;
1093
1094 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
b8698a0f 1095 loop = (gimple_bb (def_stmt))->loop_father;
ebfd146a
IR
1096
1097 /* Get the def before the loop */
1098 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1099 return get_initial_def_for_reduction (stmt, op, scalar_def);
1100 }
1101
1102 /* Case 5: operand is defined by loop-header phi - induction. */
1103 case vect_induction_def:
1104 {
1105 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1106
1107 /* Get the def from the vectorized stmt. */
1108 def_stmt_info = vinfo_for_stmt (def_stmt);
1109 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1110 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1111 vec_oprnd = PHI_RESULT (vec_stmt);
1112 return vec_oprnd;
1113 }
1114
1115 default:
1116 gcc_unreachable ();
1117 }
1118}
1119
1120
1121/* Function vect_get_vec_def_for_stmt_copy
1122
ff802fa1 1123 Return a vector-def for an operand. This function is used when the
b8698a0f
L
1124 vectorized stmt to be created (by the caller to this function) is a "copy"
1125 created in case the vectorized result cannot fit in one vector, and several
ff802fa1 1126 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1127 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1128 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1129 DT is the type of the vector def VEC_OPRND.
1130
1131 Context:
1132 In case the vectorization factor (VF) is bigger than the number
1133 of elements that can fit in a vectype (nunits), we have to generate
ff802fa1 1134 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1135 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1136 smallest data-type determines the VF, and as a result, when vectorizing
1137 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1138 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1139 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1140 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1141 which VF=16 and nunits=4, so the number of copies required is 4):
1142
1143 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1144
ebfd146a
IR
1145 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1146 VS1.1: vx.1 = memref1 VS1.2
1147 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1148 VS1.3: vx.3 = memref3
ebfd146a
IR
1149
1150 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1151 VSnew.1: vz1 = vx.1 + ... VSnew.2
1152 VSnew.2: vz2 = vx.2 + ... VSnew.3
1153 VSnew.3: vz3 = vx.3 + ...
1154
1155 The vectorization of S1 is explained in vectorizable_load.
1156 The vectorization of S2:
b8698a0f
L
1157 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1158 the function 'vect_get_vec_def_for_operand' is called to
ff802fa1 1159 get the relevant vector-def for each operand of S2. For operand x it
ebfd146a
IR
1160 returns the vector-def 'vx.0'.
1161
b8698a0f
L
1162 To create the remaining copies of the vector-stmt (VSnew.j), this
1163 function is called to get the relevant vector-def for each operand. It is
1164 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1165 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1166
b8698a0f
L
1167 For example, to obtain the vector-def 'vx.1' in order to create the
1168 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1169 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1170 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1171 and return its def ('vx.1').
1172 Overall, to create the above sequence this function will be called 3 times:
1173 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1174 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1175 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1176
1177tree
1178vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1179{
1180 gimple vec_stmt_for_operand;
1181 stmt_vec_info def_stmt_info;
1182
1183 /* Do nothing; can reuse same def. */
8644a673 1184 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1185 return vec_oprnd;
1186
1187 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1188 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1189 gcc_assert (def_stmt_info);
1190 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1191 gcc_assert (vec_stmt_for_operand);
1192 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1193 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1194 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1195 else
1196 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1197 return vec_oprnd;
1198}
1199
1200
1201/* Get vectorized definitions for the operands to create a copy of an original
ff802fa1 1202 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
ebfd146a
IR
1203
1204static void
b8698a0f
L
1205vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1206 VEC(tree,heap) **vec_oprnds0,
ebfd146a
IR
1207 VEC(tree,heap) **vec_oprnds1)
1208{
1209 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1210
1211 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1212 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1213
1214 if (vec_oprnds1 && *vec_oprnds1)
1215 {
1216 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1217 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1218 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1219 }
1220}
1221
1222
ff802fa1
IR
1223/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1224 NULL. */
ebfd146a
IR
1225
1226static void
1227vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1228 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1229 slp_tree slp_node)
1230{
1231 if (slp_node)
b5aeb3bb 1232 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
ebfd146a
IR
1233 else
1234 {
1235 tree vec_oprnd;
1236
b8698a0f
L
1237 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1238 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
ebfd146a
IR
1239 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1240
1241 if (op1)
1242 {
b8698a0f
L
1243 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1244 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
ebfd146a
IR
1245 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1246 }
1247 }
1248}
1249
1250
1251/* Function vect_finish_stmt_generation.
1252
1253 Insert a new stmt. */
1254
1255void
1256vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1257 gimple_stmt_iterator *gsi)
1258{
1259 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 1261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
ebfd146a
IR
1262
1263 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1264
1265 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1266
b8698a0f 1267 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
a70d6342 1268 bb_vinfo));
ebfd146a
IR
1269
1270 if (vect_print_dump_info (REPORT_DETAILS))
1271 {
1272 fprintf (vect_dump, "add new stmt: ");
1273 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1274 }
1275
1276 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1277}
1278
1279/* Checks if CALL can be vectorized in type VECTYPE. Returns
1280 a function declaration if the target has a vectorized version
1281 of the function, or NULL_TREE if the function cannot be vectorized. */
1282
1283tree
1284vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1285{
1286 tree fndecl = gimple_call_fndecl (call);
ebfd146a
IR
1287
1288 /* We only handle functions that do not read or clobber memory -- i.e.
1289 const or novops ones. */
1290 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1291 return NULL_TREE;
1292
1293 if (!fndecl
1294 || TREE_CODE (fndecl) != FUNCTION_DECL
1295 || !DECL_BUILT_IN (fndecl))
1296 return NULL_TREE;
1297
62f7fd21 1298 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
ebfd146a
IR
1299 vectype_in);
1300}
1301
1302/* Function vectorizable_call.
1303
b8698a0f
L
1304 Check if STMT performs a function call that can be vectorized.
1305 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1306 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1307 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1308
1309static bool
1310vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1311{
1312 tree vec_dest;
1313 tree scalar_dest;
1314 tree op, type;
1315 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1316 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1317 tree vectype_out, vectype_in;
1318 int nunits_in;
1319 int nunits_out;
1320 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
b690cc0f 1321 tree fndecl, new_temp, def, rhs_type;
ebfd146a
IR
1322 gimple def_stmt;
1323 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
63827fb8 1324 gimple new_stmt = NULL;
ebfd146a
IR
1325 int ncopies, j;
1326 VEC(tree, heap) *vargs = NULL;
1327 enum { NARROW, NONE, WIDEN } modifier;
1328 size_t i, nargs;
1329
a70d6342
IR
1330 /* FORNOW: unsupported in basic block SLP. */
1331 gcc_assert (loop_vinfo);
b8698a0f 1332
ebfd146a
IR
1333 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1334 return false;
1335
8644a673 1336 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1337 return false;
1338
1339 /* FORNOW: SLP not supported. */
1340 if (STMT_SLP_TYPE (stmt_info))
1341 return false;
1342
1343 /* Is STMT a vectorizable call? */
1344 if (!is_gimple_call (stmt))
1345 return false;
1346
1347 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1348 return false;
1349
5a2c1986
IR
1350 if (stmt_could_throw_p (stmt))
1351 return false;
1352
b690cc0f
RG
1353 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1354
ebfd146a
IR
1355 /* Process function arguments. */
1356 rhs_type = NULL_TREE;
b690cc0f 1357 vectype_in = NULL_TREE;
ebfd146a
IR
1358 nargs = gimple_call_num_args (stmt);
1359
1360 /* Bail out if the function has more than two arguments, we
1361 do not have interesting builtin functions to vectorize with
1362 more than two arguments. No arguments is also not good. */
1363 if (nargs == 0 || nargs > 2)
1364 return false;
1365
1366 for (i = 0; i < nargs; i++)
1367 {
b690cc0f
RG
1368 tree opvectype;
1369
ebfd146a
IR
1370 op = gimple_call_arg (stmt, i);
1371
1372 /* We can only handle calls with arguments of the same type. */
1373 if (rhs_type
8533c9d8 1374 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a
IR
1375 {
1376 if (vect_print_dump_info (REPORT_DETAILS))
1377 fprintf (vect_dump, "argument types differ.");
1378 return false;
1379 }
b690cc0f
RG
1380 if (!rhs_type)
1381 rhs_type = TREE_TYPE (op);
ebfd146a 1382
b690cc0f
RG
1383 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1384 &def_stmt, &def, &dt[i], &opvectype))
ebfd146a
IR
1385 {
1386 if (vect_print_dump_info (REPORT_DETAILS))
1387 fprintf (vect_dump, "use not simple.");
1388 return false;
1389 }
ebfd146a 1390
b690cc0f
RG
1391 if (!vectype_in)
1392 vectype_in = opvectype;
1393 else if (opvectype
1394 && opvectype != vectype_in)
1395 {
1396 if (vect_print_dump_info (REPORT_DETAILS))
1397 fprintf (vect_dump, "argument vector types differ.");
1398 return false;
1399 }
1400 }
1401 /* If all arguments are external or constant defs use a vector type with
1402 the same size as the output vector type. */
ebfd146a 1403 if (!vectype_in)
b690cc0f 1404 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1405 if (vec_stmt)
1406 gcc_assert (vectype_in);
1407 if (!vectype_in)
1408 {
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 {
1411 fprintf (vect_dump, "no vectype for scalar type ");
1412 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1413 }
1414
1415 return false;
1416 }
ebfd146a
IR
1417
1418 /* FORNOW */
b690cc0f
RG
1419 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1420 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1421 if (nunits_in == nunits_out / 2)
1422 modifier = NARROW;
1423 else if (nunits_out == nunits_in)
1424 modifier = NONE;
1425 else if (nunits_out == nunits_in / 2)
1426 modifier = WIDEN;
1427 else
1428 return false;
1429
1430 /* For now, we only vectorize functions if a target specific builtin
1431 is available. TODO -- in some cases, it might be profitable to
1432 insert the calls for pieces of the vector, in order to be able
1433 to vectorize other operations in the loop. */
1434 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1435 if (fndecl == NULL_TREE)
1436 {
1437 if (vect_print_dump_info (REPORT_DETAILS))
1438 fprintf (vect_dump, "function is not vectorizable.");
1439
1440 return false;
1441 }
1442
5006671f 1443 gcc_assert (!gimple_vuse (stmt));
ebfd146a
IR
1444
1445 if (modifier == NARROW)
1446 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1447 else
1448 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1449
1450 /* Sanity check: make sure that at least one copy of the vectorized stmt
1451 needs to be generated. */
1452 gcc_assert (ncopies >= 1);
1453
1454 if (!vec_stmt) /* transformation not required. */
1455 {
1456 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1457 if (vect_print_dump_info (REPORT_DETAILS))
1458 fprintf (vect_dump, "=== vectorizable_call ===");
1459 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1460 return true;
1461 }
1462
1463 /** Transform. **/
1464
1465 if (vect_print_dump_info (REPORT_DETAILS))
1466 fprintf (vect_dump, "transform operation.");
1467
1468 /* Handle def. */
1469 scalar_dest = gimple_call_lhs (stmt);
1470 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1471
1472 prev_stmt_info = NULL;
1473 switch (modifier)
1474 {
1475 case NONE:
1476 for (j = 0; j < ncopies; ++j)
1477 {
1478 /* Build argument list for the vectorized call. */
1479 if (j == 0)
1480 vargs = VEC_alloc (tree, heap, nargs);
1481 else
1482 VEC_truncate (tree, vargs, 0);
1483
1484 for (i = 0; i < nargs; i++)
1485 {
1486 op = gimple_call_arg (stmt, i);
1487 if (j == 0)
1488 vec_oprnd0
1489 = vect_get_vec_def_for_operand (op, stmt, NULL);
1490 else
63827fb8
IR
1491 {
1492 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1493 vec_oprnd0
1494 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1495 }
ebfd146a
IR
1496
1497 VEC_quick_push (tree, vargs, vec_oprnd0);
1498 }
1499
1500 new_stmt = gimple_build_call_vec (fndecl, vargs);
1501 new_temp = make_ssa_name (vec_dest, new_stmt);
1502 gimple_call_set_lhs (new_stmt, new_temp);
1503
1504 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1505 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1506
1507 if (j == 0)
1508 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1509 else
1510 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1511
1512 prev_stmt_info = vinfo_for_stmt (new_stmt);
1513 }
1514
1515 break;
1516
1517 case NARROW:
1518 for (j = 0; j < ncopies; ++j)
1519 {
1520 /* Build argument list for the vectorized call. */
1521 if (j == 0)
1522 vargs = VEC_alloc (tree, heap, nargs * 2);
1523 else
1524 VEC_truncate (tree, vargs, 0);
1525
1526 for (i = 0; i < nargs; i++)
1527 {
1528 op = gimple_call_arg (stmt, i);
1529 if (j == 0)
1530 {
1531 vec_oprnd0
1532 = vect_get_vec_def_for_operand (op, stmt, NULL);
1533 vec_oprnd1
63827fb8 1534 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1535 }
1536 else
1537 {
63827fb8 1538 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
ebfd146a 1539 vec_oprnd0
63827fb8 1540 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 1541 vec_oprnd1
63827fb8 1542 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1543 }
1544
1545 VEC_quick_push (tree, vargs, vec_oprnd0);
1546 VEC_quick_push (tree, vargs, vec_oprnd1);
1547 }
1548
1549 new_stmt = gimple_build_call_vec (fndecl, vargs);
1550 new_temp = make_ssa_name (vec_dest, new_stmt);
1551 gimple_call_set_lhs (new_stmt, new_temp);
1552
1553 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1554 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1555
1556 if (j == 0)
1557 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1558 else
1559 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1560
1561 prev_stmt_info = vinfo_for_stmt (new_stmt);
1562 }
1563
1564 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1565
1566 break;
1567
1568 case WIDEN:
1569 /* No current target implements this case. */
1570 return false;
1571 }
1572
1573 VEC_free (tree, heap, vargs);
1574
1575 /* Update the exception handling table with the vector stmt if necessary. */
1576 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1577 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1578
1579 /* The call in STMT might prevent it from being removed in dce.
1580 We however cannot remove it here, due to the way the ssa name
1581 it defines is mapped to the new definition. So just replace
1582 rhs of the statement with something harmless. */
1583
1584 type = TREE_TYPE (scalar_dest);
1585 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1586 fold_convert (type, integer_zero_node));
1587 set_vinfo_for_stmt (new_stmt, stmt_info);
1588 set_vinfo_for_stmt (stmt, NULL);
1589 STMT_VINFO_STMT (stmt_info) = new_stmt;
1590 gsi_replace (gsi, new_stmt, false);
1591 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1592
1593 return true;
1594}
1595
1596
1597/* Function vect_gen_widened_results_half
1598
1599 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 1600 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ff802fa1 1601 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
ebfd146a
IR
1602 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1603 needs to be created (DECL is a function-decl of a target-builtin).
1604 STMT is the original scalar stmt that we are vectorizing. */
1605
1606static gimple
1607vect_gen_widened_results_half (enum tree_code code,
1608 tree decl,
1609 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1610 tree vec_dest, gimple_stmt_iterator *gsi,
1611 gimple stmt)
b8698a0f 1612{
ebfd146a 1613 gimple new_stmt;
b8698a0f
L
1614 tree new_temp;
1615
1616 /* Generate half of the widened result: */
1617 if (code == CALL_EXPR)
1618 {
1619 /* Target specific support */
ebfd146a
IR
1620 if (op_type == binary_op)
1621 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1622 else
1623 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1624 new_temp = make_ssa_name (vec_dest, new_stmt);
1625 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
1626 }
1627 else
ebfd146a 1628 {
b8698a0f
L
1629 /* Generic support */
1630 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
1631 if (op_type != binary_op)
1632 vec_oprnd1 = NULL;
1633 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1634 vec_oprnd1);
1635 new_temp = make_ssa_name (vec_dest, new_stmt);
1636 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 1637 }
ebfd146a
IR
1638 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1639
ebfd146a
IR
1640 return new_stmt;
1641}
1642
1643
b8698a0f
L
1644/* Check if STMT performs a conversion operation, that can be vectorized.
1645 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1646 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1647 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1648
1649static bool
1650vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1651 gimple *vec_stmt, slp_tree slp_node)
1652{
1653 tree vec_dest;
1654 tree scalar_dest;
1655 tree op0;
1656 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1658 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1659 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1660 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1661 tree new_temp;
1662 tree def;
1663 gimple def_stmt;
1664 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1665 gimple new_stmt = NULL;
1666 stmt_vec_info prev_stmt_info;
1667 int nunits_in;
1668 int nunits_out;
1669 tree vectype_out, vectype_in;
1670 int ncopies, j;
b690cc0f 1671 tree rhs_type;
ebfd146a
IR
1672 tree builtin_decl;
1673 enum { NARROW, NONE, WIDEN } modifier;
1674 int i;
1675 VEC(tree,heap) *vec_oprnds0 = NULL;
1676 tree vop0;
ebfd146a
IR
1677 VEC(tree,heap) *dummy = NULL;
1678 int dummy_int;
1679
1680 /* Is STMT a vectorizable conversion? */
1681
a70d6342
IR
1682 /* FORNOW: unsupported in basic block SLP. */
1683 gcc_assert (loop_vinfo);
b8698a0f 1684
ebfd146a
IR
1685 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1686 return false;
1687
8644a673 1688 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1689 return false;
1690
1691 if (!is_gimple_assign (stmt))
1692 return false;
1693
1694 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1695 return false;
1696
1697 code = gimple_assign_rhs_code (stmt);
1698 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1699 return false;
1700
1701 /* Check types of lhs and rhs. */
b690cc0f
RG
1702 scalar_dest = gimple_assign_lhs (stmt);
1703 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1704
ebfd146a
IR
1705 op0 = gimple_assign_rhs1 (stmt);
1706 rhs_type = TREE_TYPE (op0);
b690cc0f
RG
1707 /* Check the operands of the operation. */
1708 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1709 &def_stmt, &def, &dt[0], &vectype_in))
1710 {
1711 if (vect_print_dump_info (REPORT_DETAILS))
1712 fprintf (vect_dump, "use not simple.");
1713 return false;
1714 }
1715 /* If op0 is an external or constant defs use a vector type of
1716 the same size as the output vector type. */
ebfd146a 1717 if (!vectype_in)
b690cc0f 1718 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1719 if (vec_stmt)
1720 gcc_assert (vectype_in);
1721 if (!vectype_in)
1722 {
1723 if (vect_print_dump_info (REPORT_DETAILS))
1724 {
1725 fprintf (vect_dump, "no vectype for scalar type ");
1726 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1727 }
1728
1729 return false;
1730 }
ebfd146a
IR
1731
1732 /* FORNOW */
b690cc0f
RG
1733 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1734 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1735 if (nunits_in == nunits_out / 2)
1736 modifier = NARROW;
1737 else if (nunits_out == nunits_in)
1738 modifier = NONE;
1739 else if (nunits_out == nunits_in / 2)
1740 modifier = WIDEN;
1741 else
1742 return false;
1743
ebfd146a
IR
1744 if (modifier == NARROW)
1745 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1746 else
1747 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1748
ff802fa1
IR
1749 /* Multiple types in SLP are handled by creating the appropriate number of
1750 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1751 case of SLP. */
ebfd146a
IR
1752 if (slp_node)
1753 ncopies = 1;
b8698a0f 1754
ebfd146a
IR
1755 /* Sanity check: make sure that at least one copy of the vectorized stmt
1756 needs to be generated. */
1757 gcc_assert (ncopies >= 1);
1758
ebfd146a
IR
1759 /* Supportable by target? */
1760 if ((modifier == NONE
88dd7150 1761 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
ebfd146a 1762 || (modifier == WIDEN
b690cc0f
RG
1763 && !supportable_widening_operation (code, stmt,
1764 vectype_out, vectype_in,
ebfd146a
IR
1765 &decl1, &decl2,
1766 &code1, &code2,
1767 &dummy_int, &dummy))
1768 || (modifier == NARROW
b690cc0f 1769 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
ebfd146a
IR
1770 &code1, &dummy_int, &dummy)))
1771 {
1772 if (vect_print_dump_info (REPORT_DETAILS))
1773 fprintf (vect_dump, "conversion not supported by target.");
1774 return false;
1775 }
1776
1777 if (modifier != NONE)
1778 {
ebfd146a
IR
1779 /* FORNOW: SLP not supported. */
1780 if (STMT_SLP_TYPE (stmt_info))
b8698a0f 1781 return false;
ebfd146a
IR
1782 }
1783
1784 if (!vec_stmt) /* transformation not required. */
1785 {
1786 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1787 return true;
1788 }
1789
1790 /** Transform. **/
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "transform conversion.");
1793
1794 /* Handle def. */
1795 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1796
1797 if (modifier == NONE && !slp_node)
1798 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1799
1800 prev_stmt_info = NULL;
1801 switch (modifier)
1802 {
1803 case NONE:
1804 for (j = 0; j < ncopies; j++)
1805 {
ebfd146a 1806 if (j == 0)
b8698a0f 1807 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
ebfd146a
IR
1808 else
1809 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1810
1811 builtin_decl =
88dd7150
RG
1812 targetm.vectorize.builtin_conversion (code,
1813 vectype_out, vectype_in);
ac47786e 1814 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
b8698a0f 1815 {
ebfd146a
IR
1816 /* Arguments are ready. create the new vector stmt. */
1817 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1818 new_temp = make_ssa_name (vec_dest, new_stmt);
1819 gimple_call_set_lhs (new_stmt, new_temp);
1820 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1821 if (slp_node)
1822 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1823 }
1824
1825 if (j == 0)
1826 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1827 else
1828 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1829 prev_stmt_info = vinfo_for_stmt (new_stmt);
1830 }
1831 break;
1832
1833 case WIDEN:
1834 /* In case the vectorization factor (VF) is bigger than the number
1835 of elements that we can fit in a vectype (nunits), we have to
1836 generate more than one vector stmt - i.e - we need to "unroll"
1837 the vector stmt by a factor VF/nunits. */
1838 for (j = 0; j < ncopies; j++)
1839 {
1840 if (j == 0)
1841 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1842 else
1843 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1844
ebfd146a
IR
1845 /* Generate first half of the widened result: */
1846 new_stmt
b8698a0f 1847 = vect_gen_widened_results_half (code1, decl1,
ebfd146a
IR
1848 vec_oprnd0, vec_oprnd1,
1849 unary_op, vec_dest, gsi, stmt);
1850 if (j == 0)
1851 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1852 else
1853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1854 prev_stmt_info = vinfo_for_stmt (new_stmt);
1855
1856 /* Generate second half of the widened result: */
1857 new_stmt
1858 = vect_gen_widened_results_half (code2, decl2,
1859 vec_oprnd0, vec_oprnd1,
1860 unary_op, vec_dest, gsi, stmt);
1861 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1862 prev_stmt_info = vinfo_for_stmt (new_stmt);
1863 }
1864 break;
1865
1866 case NARROW:
1867 /* In case the vectorization factor (VF) is bigger than the number
1868 of elements that we can fit in a vectype (nunits), we have to
1869 generate more than one vector stmt - i.e - we need to "unroll"
1870 the vector stmt by a factor VF/nunits. */
1871 for (j = 0; j < ncopies; j++)
1872 {
1873 /* Handle uses. */
1874 if (j == 0)
1875 {
1876 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1877 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1878 }
1879 else
1880 {
1881 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1882 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1883 }
1884
1885 /* Arguments are ready. Create the new vector stmt. */
ebfd146a
IR
1886 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1887 vec_oprnd1);
1888 new_temp = make_ssa_name (vec_dest, new_stmt);
1889 gimple_assign_set_lhs (new_stmt, new_temp);
1890 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1891
1892 if (j == 0)
1893 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1894 else
1895 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1896
1897 prev_stmt_info = vinfo_for_stmt (new_stmt);
1898 }
1899
1900 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1901 }
1902
1903 if (vec_oprnds0)
b8698a0f 1904 VEC_free (tree, heap, vec_oprnds0);
ebfd146a
IR
1905
1906 return true;
1907}
ff802fa1
IR
1908
1909
ebfd146a
IR
1910/* Function vectorizable_assignment.
1911
b8698a0f
L
1912 Check if STMT performs an assignment (copy) that can be vectorized.
1913 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1914 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1915 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1916
1917static bool
1918vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1919 gimple *vec_stmt, slp_tree slp_node)
1920{
1921 tree vec_dest;
1922 tree scalar_dest;
1923 tree op;
1924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1925 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1926 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1927 tree new_temp;
1928 tree def;
1929 gimple def_stmt;
1930 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
fde9c428 1931 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
ebfd146a 1932 int ncopies;
f18b55bd 1933 int i, j;
ebfd146a
IR
1934 VEC(tree,heap) *vec_oprnds = NULL;
1935 tree vop;
a70d6342 1936 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
f18b55bd
IR
1937 gimple new_stmt = NULL;
1938 stmt_vec_info prev_stmt_info = NULL;
fde9c428
RG
1939 enum tree_code code;
1940 tree vectype_in;
ebfd146a
IR
1941
1942 /* Multiple types in SLP are handled by creating the appropriate number of
1943 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1944 case of SLP. */
1945 if (slp_node)
1946 ncopies = 1;
1947 else
1948 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1949
1950 gcc_assert (ncopies >= 1);
ebfd146a 1951
a70d6342 1952 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
1953 return false;
1954
8644a673 1955 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1956 return false;
1957
1958 /* Is vectorizable assignment? */
1959 if (!is_gimple_assign (stmt))
1960 return false;
1961
1962 scalar_dest = gimple_assign_lhs (stmt);
1963 if (TREE_CODE (scalar_dest) != SSA_NAME)
1964 return false;
1965
fde9c428 1966 code = gimple_assign_rhs_code (stmt);
ebfd146a 1967 if (gimple_assign_single_p (stmt)
fde9c428
RG
1968 || code == PAREN_EXPR
1969 || CONVERT_EXPR_CODE_P (code))
ebfd146a
IR
1970 op = gimple_assign_rhs1 (stmt);
1971 else
1972 return false;
1973
fde9c428
RG
1974 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1975 &def_stmt, &def, &dt[0], &vectype_in))
ebfd146a
IR
1976 {
1977 if (vect_print_dump_info (REPORT_DETAILS))
1978 fprintf (vect_dump, "use not simple.");
1979 return false;
1980 }
1981
fde9c428
RG
1982 /* We can handle NOP_EXPR conversions that do not change the number
1983 of elements or the vector size. */
1984 if (CONVERT_EXPR_CODE_P (code)
1985 && (!vectype_in
1986 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1987 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1988 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1989 return false;
1990
ebfd146a
IR
1991 if (!vec_stmt) /* transformation not required. */
1992 {
1993 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1994 if (vect_print_dump_info (REPORT_DETAILS))
1995 fprintf (vect_dump, "=== vectorizable_assignment ===");
1996 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1997 return true;
1998 }
1999
2000 /** Transform. **/
2001 if (vect_print_dump_info (REPORT_DETAILS))
2002 fprintf (vect_dump, "transform assignment.");
2003
2004 /* Handle def. */
2005 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2006
2007 /* Handle use. */
f18b55bd 2008 for (j = 0; j < ncopies; j++)
ebfd146a 2009 {
f18b55bd
IR
2010 /* Handle uses. */
2011 if (j == 0)
2012 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2013 else
2014 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2015
2016 /* Arguments are ready. create the new vector stmt. */
ac47786e 2017 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
f18b55bd 2018 {
fde9c428 2019 if (CONVERT_EXPR_CODE_P (code))
4a73490d 2020 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
f18b55bd
IR
2021 new_stmt = gimple_build_assign (vec_dest, vop);
2022 new_temp = make_ssa_name (vec_dest, new_stmt);
2023 gimple_assign_set_lhs (new_stmt, new_temp);
2024 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2025 if (slp_node)
2026 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2027 }
ebfd146a
IR
2028
2029 if (slp_node)
f18b55bd
IR
2030 continue;
2031
2032 if (j == 0)
2033 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2034 else
2035 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2036
2037 prev_stmt_info = vinfo_for_stmt (new_stmt);
2038 }
b8698a0f
L
2039
2040 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
2041 return true;
2042}
2043
2044/* Function vectorizable_operation.
2045
b8698a0f
L
2046 Check if STMT performs a binary or unary operation that can be vectorized.
2047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2050
2051static bool
2052vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2053 gimple *vec_stmt, slp_tree slp_node)
2054{
2055 tree vec_dest;
2056 tree scalar_dest;
2057 tree op0, op1 = NULL;
2058 tree vec_oprnd1 = NULL_TREE;
2059 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b690cc0f 2060 tree vectype;
ebfd146a
IR
2061 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2062 enum tree_code code;
2063 enum machine_mode vec_mode;
2064 tree new_temp;
2065 int op_type;
2066 optab optab;
2067 int icode;
2068 enum machine_mode optab_op2_mode;
2069 tree def;
2070 gimple def_stmt;
2071 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2072 gimple new_stmt = NULL;
2073 stmt_vec_info prev_stmt_info;
b690cc0f 2074 int nunits_in;
ebfd146a
IR
2075 int nunits_out;
2076 tree vectype_out;
2077 int ncopies;
2078 int j, i;
2079 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2080 tree vop0, vop1;
2081 unsigned int k;
ebfd146a 2082 bool scalar_shift_arg = false;
a70d6342
IR
2083 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2084 int vf;
2085
a70d6342 2086 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2087 return false;
2088
8644a673 2089 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2090 return false;
2091
2092 /* Is STMT a vectorizable binary/unary operation? */
2093 if (!is_gimple_assign (stmt))
2094 return false;
2095
2096 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2097 return false;
2098
ebfd146a
IR
2099 code = gimple_assign_rhs_code (stmt);
2100
2101 /* For pointer addition, we should use the normal plus for
2102 the vector addition. */
2103 if (code == POINTER_PLUS_EXPR)
2104 code = PLUS_EXPR;
2105
2106 /* Support only unary or binary operations. */
2107 op_type = TREE_CODE_LENGTH (code);
2108 if (op_type != unary_op && op_type != binary_op)
2109 {
2110 if (vect_print_dump_info (REPORT_DETAILS))
2111 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2112 return false;
2113 }
2114
b690cc0f
RG
2115 scalar_dest = gimple_assign_lhs (stmt);
2116 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2117
ebfd146a 2118 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2119 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2120 &def_stmt, &def, &dt[0], &vectype))
ebfd146a
IR
2121 {
2122 if (vect_print_dump_info (REPORT_DETAILS))
2123 fprintf (vect_dump, "use not simple.");
2124 return false;
2125 }
b690cc0f
RG
2126 /* If op0 is an external or constant def use a vector type with
2127 the same size as the output vector type. */
2128 if (!vectype)
2129 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2130 if (vec_stmt)
2131 gcc_assert (vectype);
2132 if (!vectype)
2133 {
2134 if (vect_print_dump_info (REPORT_DETAILS))
2135 {
2136 fprintf (vect_dump, "no vectype for scalar type ");
2137 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2138 }
2139
2140 return false;
2141 }
b690cc0f
RG
2142
2143 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2144 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2145 if (nunits_out != nunits_in)
2146 return false;
ebfd146a
IR
2147
2148 if (op_type == binary_op)
2149 {
2150 op1 = gimple_assign_rhs2 (stmt);
b8698a0f 2151 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 2152 &dt[1]))
ebfd146a
IR
2153 {
2154 if (vect_print_dump_info (REPORT_DETAILS))
2155 fprintf (vect_dump, "use not simple.");
2156 return false;
2157 }
2158 }
2159
b690cc0f
RG
2160 if (loop_vinfo)
2161 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2162 else
2163 vf = 1;
2164
2165 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 2166 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
b690cc0f
RG
2167 case of SLP. */
2168 if (slp_node)
2169 ncopies = 1;
2170 else
2171 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2172
2173 gcc_assert (ncopies >= 1);
2174
ebfd146a
IR
2175 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2176 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2177 shift optabs. */
2178 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2179 || code == RROTATE_EXPR)
2180 {
ebfd146a 2181 /* vector shifted by vector */
8644a673 2182 if (dt[1] == vect_internal_def)
ebfd146a
IR
2183 {
2184 optab = optab_for_tree_code (code, vectype, optab_vector);
2185 if (vect_print_dump_info (REPORT_DETAILS))
2186 fprintf (vect_dump, "vector/vector shift/rotate found.");
2187 }
2188
2189 /* See if the machine has a vector shifted by scalar insn and if not
2190 then see if it has a vector shifted by vector insn */
8644a673 2191 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
ebfd146a
IR
2192 {
2193 optab = optab_for_tree_code (code, vectype, optab_scalar);
2194 if (optab
947131ba 2195 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
ebfd146a
IR
2196 {
2197 scalar_shift_arg = true;
2198 if (vect_print_dump_info (REPORT_DETAILS))
2199 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2200 }
2201 else
2202 {
2203 optab = optab_for_tree_code (code, vectype, optab_vector);
ad6c0864 2204 if (optab
947131ba 2205 && (optab_handler (optab, TYPE_MODE (vectype))
ebfd146a 2206 != CODE_FOR_nothing))
ad6c0864
MM
2207 {
2208 if (vect_print_dump_info (REPORT_DETAILS))
2209 fprintf (vect_dump, "vector/vector shift/rotate found.");
2210
2211 /* Unlike the other binary operators, shifts/rotates have
2212 the rhs being int, instead of the same type as the lhs,
2213 so make sure the scalar is the right type if we are
2214 dealing with vectors of short/char. */
2215 if (dt[1] == vect_constant_def)
2216 op1 = fold_convert (TREE_TYPE (vectype), op1);
2217 }
ebfd146a
IR
2218 }
2219 }
2220
2221 else
2222 {
2223 if (vect_print_dump_info (REPORT_DETAILS))
2224 fprintf (vect_dump, "operand mode requires invariant argument.");
2225 return false;
2226 }
2227 }
2228 else
2229 optab = optab_for_tree_code (code, vectype, optab_default);
2230
2231 /* Supportable by target? */
2232 if (!optab)
2233 {
2234 if (vect_print_dump_info (REPORT_DETAILS))
2235 fprintf (vect_dump, "no optab.");
2236 return false;
2237 }
2238 vec_mode = TYPE_MODE (vectype);
947131ba 2239 icode = (int) optab_handler (optab, vec_mode);
ebfd146a
IR
2240 if (icode == CODE_FOR_nothing)
2241 {
2242 if (vect_print_dump_info (REPORT_DETAILS))
2243 fprintf (vect_dump, "op not supported by target.");
2244 /* Check only during analysis. */
2245 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
a70d6342 2246 || (vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2247 && !vec_stmt))
2248 return false;
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "proceeding using word mode.");
2251 }
2252
ff802fa1 2253 /* Worthwhile without SIMD support? Check only during analysis. */
ebfd146a 2254 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
a70d6342 2255 && vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2256 && !vec_stmt)
2257 {
2258 if (vect_print_dump_info (REPORT_DETAILS))
2259 fprintf (vect_dump, "not worthwhile without SIMD support.");
2260 return false;
2261 }
2262
2263 if (!vec_stmt) /* transformation not required. */
2264 {
2265 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2266 if (vect_print_dump_info (REPORT_DETAILS))
2267 fprintf (vect_dump, "=== vectorizable_operation ===");
2268 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2269 return true;
2270 }
2271
2272 /** Transform. **/
2273
2274 if (vect_print_dump_info (REPORT_DETAILS))
2275 fprintf (vect_dump, "transform binary/unary operation.");
2276
2277 /* Handle def. */
2278 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2279
ff802fa1 2280 /* Allocate VECs for vector operands. In case of SLP, vector operands are
ebfd146a 2281 created in the previous stages of the recursion, so no allocation is
ff802fa1 2282 needed, except for the case of shift with scalar shift argument. In that
ebfd146a
IR
2283 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2284 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
ff802fa1 2285 In case of loop-based vectorization we allocate VECs of size 1. We
b8698a0f 2286 allocate VEC_OPRNDS1 only in case of binary operation. */
ebfd146a
IR
2287 if (!slp_node)
2288 {
2289 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2290 if (op_type == binary_op)
2291 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2292 }
2293 else if (scalar_shift_arg)
b8698a0f 2294 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
ebfd146a
IR
2295
2296 /* In case the vectorization factor (VF) is bigger than the number
2297 of elements that we can fit in a vectype (nunits), we have to generate
2298 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 2299 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 2300 from one copy of the vector stmt to the next, in the field
ff802fa1 2301 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 2302 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
2303 stmts that use the defs of the current stmt. The example below
2304 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2305 we need to create 4 vectorized stmts):
ebfd146a
IR
2306
2307 before vectorization:
2308 RELATED_STMT VEC_STMT
2309 S1: x = memref - -
2310 S2: z = x + 1 - -
2311
2312 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2313 there):
2314 RELATED_STMT VEC_STMT
2315 VS1_0: vx0 = memref0 VS1_1 -
2316 VS1_1: vx1 = memref1 VS1_2 -
2317 VS1_2: vx2 = memref2 VS1_3 -
2318 VS1_3: vx3 = memref3 - -
2319 S1: x = load - VS1_0
2320 S2: z = x + 1 - -
2321
2322 step2: vectorize stmt S2 (done here):
2323 To vectorize stmt S2 we first need to find the relevant vector
ff802fa1 2324 def for the first operand 'x'. This is, as usual, obtained from
ebfd146a 2325 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
ff802fa1
IR
2326 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2327 relevant vector def 'vx0'. Having found 'vx0' we can generate
ebfd146a
IR
2328 the vector stmt VS2_0, and as usual, record it in the
2329 STMT_VINFO_VEC_STMT of stmt S2.
2330 When creating the second copy (VS2_1), we obtain the relevant vector
2331 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
ff802fa1
IR
2332 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2333 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
ebfd146a 2334 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
ff802fa1 2335 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
ebfd146a
IR
2336 chain of stmts and pointers:
2337 RELATED_STMT VEC_STMT
2338 VS1_0: vx0 = memref0 VS1_1 -
2339 VS1_1: vx1 = memref1 VS1_2 -
2340 VS1_2: vx2 = memref2 VS1_3 -
2341 VS1_3: vx3 = memref3 - -
2342 S1: x = load - VS1_0
2343 VS2_0: vz0 = vx0 + v1 VS2_1 -
2344 VS2_1: vz1 = vx1 + v1 VS2_2 -
2345 VS2_2: vz2 = vx2 + v1 VS2_3 -
2346 VS2_3: vz3 = vx3 + v1 - -
2347 S2: z = x + 1 - VS2_0 */
2348
2349 prev_stmt_info = NULL;
2350 for (j = 0; j < ncopies; j++)
2351 {
2352 /* Handle uses. */
2353 if (j == 0)
2354 {
2355 if (op_type == binary_op && scalar_shift_arg)
2356 {
b8698a0f 2357 /* Vector shl and shr insn patterns can be defined with scalar
ff802fa1 2358 operand 2 (shift operand). In this case, use constant or loop
b8698a0f 2359 invariant op1 directly, without extending it to vector mode
ebfd146a
IR
2360 first. */
2361 optab_op2_mode = insn_data[icode].operand[2].mode;
2362 if (!VECTOR_MODE_P (optab_op2_mode))
2363 {
2364 if (vect_print_dump_info (REPORT_DETAILS))
2365 fprintf (vect_dump, "operand 1 using scalar mode.");
2366 vec_oprnd1 = op1;
2367 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2368 if (slp_node)
2369 {
2370 /* Store vec_oprnd1 for every vector stmt to be created
ff802fa1
IR
2371 for SLP_NODE. We check during the analysis that all
2372 the shift arguments are the same.
b8698a0f
L
2373 TODO: Allow different constants for different vector
2374 stmts generated for an SLP instance. */
ebfd146a
IR
2375 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2376 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2377 }
2378 }
2379 }
b8698a0f
L
2380
2381 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2382 (a special case for certain kind of vector shifts); otherwise,
ebfd146a
IR
2383 operand 1 should be of a vector type (the usual case). */
2384 if (op_type == binary_op && !vec_oprnd1)
b8698a0f 2385 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
ebfd146a
IR
2386 slp_node);
2387 else
b8698a0f 2388 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
ebfd146a
IR
2389 slp_node);
2390 }
2391 else
2392 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2393
2394 /* Arguments are ready. Create the new vector stmt. */
ac47786e 2395 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
ebfd146a
IR
2396 {
2397 vop1 = ((op_type == binary_op)
2398 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2399 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2400 new_temp = make_ssa_name (vec_dest, new_stmt);
2401 gimple_assign_set_lhs (new_stmt, new_temp);
2402 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2403 if (slp_node)
2404 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2405 }
2406
2407 if (slp_node)
2408 continue;
2409
2410 if (j == 0)
2411 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2412 else
2413 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2414 prev_stmt_info = vinfo_for_stmt (new_stmt);
2415 }
2416
2417 VEC_free (tree, heap, vec_oprnds0);
2418 if (vec_oprnds1)
2419 VEC_free (tree, heap, vec_oprnds1);
2420
2421 return true;
2422}
2423
2424
ff802fa1 2425/* Get vectorized definitions for loop-based vectorization. For the first
b8698a0f
L
2426 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2427 scalar operand), and for the rest we get a copy with
ebfd146a
IR
2428 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2429 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2430 The vectors are collected into VEC_OPRNDS. */
2431
2432static void
b8698a0f 2433vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
ebfd146a
IR
2434 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2435{
2436 tree vec_oprnd;
2437
2438 /* Get first vector operand. */
2439 /* All the vector operands except the very first one (that is scalar oprnd)
2440 are stmt copies. */
b8698a0f 2441 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
ebfd146a
IR
2442 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2443 else
2444 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2445
2446 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2447
2448 /* Get second vector operand. */
2449 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2450 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
b8698a0f 2451
ebfd146a
IR
2452 *oprnd = vec_oprnd;
2453
b8698a0f 2454 /* For conversion in multiple steps, continue to get operands
ebfd146a
IR
2455 recursively. */
2456 if (multi_step_cvt)
b8698a0f 2457 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
ebfd146a
IR
2458}
2459
2460
2461/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
b8698a0f 2462 For multi-step conversions store the resulting vectors and call the function
ebfd146a
IR
2463 recursively. */
2464
2465static void
2466vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2467 int multi_step_cvt, gimple stmt,
2468 VEC (tree, heap) *vec_dsts,
2469 gimple_stmt_iterator *gsi,
2470 slp_tree slp_node, enum tree_code code,
2471 stmt_vec_info *prev_stmt_info)
2472{
2473 unsigned int i;
2474 tree vop0, vop1, new_tmp, vec_dest;
2475 gimple new_stmt;
2476 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2477
b8698a0f 2478 vec_dest = VEC_pop (tree, vec_dsts);
ebfd146a
IR
2479
2480 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2481 {
2482 /* Create demotion operation. */
2483 vop0 = VEC_index (tree, *vec_oprnds, i);
2484 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2485 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2486 new_tmp = make_ssa_name (vec_dest, new_stmt);
2487 gimple_assign_set_lhs (new_stmt, new_tmp);
2488 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2489
2490 if (multi_step_cvt)
2491 /* Store the resulting vector for next recursive call. */
b8698a0f 2492 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
ebfd146a
IR
2493 else
2494 {
b8698a0f 2495 /* This is the last step of the conversion sequence. Store the
ebfd146a
IR
2496 vectors in SLP_NODE or in vector info of the scalar statement
2497 (or in STMT_VINFO_RELATED_STMT chain). */
2498 if (slp_node)
2499 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2500 else
2501 {
2502 if (!*prev_stmt_info)
2503 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2504 else
2505 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2506
2507 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2508 }
2509 }
2510 }
2511
2512 /* For multi-step demotion operations we first generate demotion operations
b8698a0f 2513 from the source type to the intermediate types, and then combine the
ebfd146a
IR
2514 results (stored in VEC_OPRNDS) in demotion operation to the destination
2515 type. */
2516 if (multi_step_cvt)
2517 {
2518 /* At each level of recursion we have have of the operands we had at the
2519 previous level. */
2520 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
b8698a0f 2521 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
ebfd146a
IR
2522 stmt, vec_dsts, gsi, slp_node,
2523 code, prev_stmt_info);
2524 }
2525}
2526
2527
2528/* Function vectorizable_type_demotion
2529
2530 Check if STMT performs a binary or unary operation that involves
2531 type demotion, and if it can be vectorized.
2532 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2533 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2534 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2535
2536static bool
2537vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2538 gimple *vec_stmt, slp_tree slp_node)
2539{
2540 tree vec_dest;
2541 tree scalar_dest;
2542 tree op0;
2543 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2544 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2545 enum tree_code code, code1 = ERROR_MARK;
2546 tree def;
2547 gimple def_stmt;
2548 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2549 stmt_vec_info prev_stmt_info;
2550 int nunits_in;
2551 int nunits_out;
2552 tree vectype_out;
2553 int ncopies;
2554 int j, i;
2555 tree vectype_in;
2556 int multi_step_cvt = 0;
2557 VEC (tree, heap) *vec_oprnds0 = NULL;
2558 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2559 tree last_oprnd, intermediate_type;
2560
a70d6342
IR
2561 /* FORNOW: not supported by basic block SLP vectorization. */
2562 gcc_assert (loop_vinfo);
2563
ebfd146a
IR
2564 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2565 return false;
2566
8644a673 2567 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2568 return false;
2569
2570 /* Is STMT a vectorizable type-demotion operation? */
2571 if (!is_gimple_assign (stmt))
2572 return false;
2573
2574 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2575 return false;
2576
2577 code = gimple_assign_rhs_code (stmt);
2578 if (!CONVERT_EXPR_CODE_P (code))
2579 return false;
2580
b690cc0f
RG
2581 scalar_dest = gimple_assign_lhs (stmt);
2582 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2583
2584 /* Check the operands of the operation. */
ebfd146a 2585 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2586 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2587 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2588 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2589 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2590 && CONVERT_EXPR_CODE_P (code))))
2591 return false;
2592 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2593 &def_stmt, &def, &dt[0], &vectype_in))
2594 {
2595 if (vect_print_dump_info (REPORT_DETAILS))
2596 fprintf (vect_dump, "use not simple.");
2597 return false;
2598 }
2599 /* If op0 is an external def use a vector type with the
2600 same size as the output vector type if possible. */
2601 if (!vectype_in)
2602 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2603 if (vec_stmt)
2604 gcc_assert (vectype_in);
ebfd146a 2605 if (!vectype_in)
7d8930a0
IR
2606 {
2607 if (vect_print_dump_info (REPORT_DETAILS))
2608 {
2609 fprintf (vect_dump, "no vectype for scalar type ");
2610 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2611 }
2612
2613 return false;
2614 }
ebfd146a 2615
b690cc0f 2616 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2617 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2618 if (nunits_in >= nunits_out)
2619 return false;
2620
2621 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 2622 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a
IR
2623 case of SLP. */
2624 if (slp_node)
2625 ncopies = 1;
2626 else
2627 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
ebfd146a
IR
2628 gcc_assert (ncopies >= 1);
2629
ebfd146a 2630 /* Supportable by target? */
b690cc0f
RG
2631 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2632 &code1, &multi_step_cvt, &interm_types))
ebfd146a
IR
2633 return false;
2634
ebfd146a
IR
2635 if (!vec_stmt) /* transformation not required. */
2636 {
2637 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2638 if (vect_print_dump_info (REPORT_DETAILS))
2639 fprintf (vect_dump, "=== vectorizable_demotion ===");
2640 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2641 return true;
2642 }
2643
2644 /** Transform. **/
2645 if (vect_print_dump_info (REPORT_DETAILS))
2646 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2647 ncopies);
2648
b8698a0f
L
2649 /* In case of multi-step demotion, we first generate demotion operations to
2650 the intermediate types, and then from that types to the final one.
ebfd146a 2651 We create vector destinations for the intermediate type (TYPES) received
b8698a0f 2652 from supportable_narrowing_operation, and store them in the correct order
ebfd146a
IR
2653 for future use in vect_create_vectorized_demotion_stmts(). */
2654 if (multi_step_cvt)
2655 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2656 else
2657 vec_dsts = VEC_alloc (tree, heap, 1);
b8698a0f 2658
ebfd146a
IR
2659 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2660 VEC_quick_push (tree, vec_dsts, vec_dest);
2661
2662 if (multi_step_cvt)
2663 {
b8698a0f 2664 for (i = VEC_length (tree, interm_types) - 1;
ebfd146a
IR
2665 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2666 {
b8698a0f 2667 vec_dest = vect_create_destination_var (scalar_dest,
ebfd146a
IR
2668 intermediate_type);
2669 VEC_quick_push (tree, vec_dsts, vec_dest);
2670 }
2671 }
2672
2673 /* In case the vectorization factor (VF) is bigger than the number
2674 of elements that we can fit in a vectype (nunits), we have to generate
2675 more than one vector stmt - i.e - we need to "unroll" the
2676 vector stmt by a factor VF/nunits. */
2677 last_oprnd = op0;
2678 prev_stmt_info = NULL;
2679 for (j = 0; j < ncopies; j++)
2680 {
2681 /* Handle uses. */
2682 if (slp_node)
b5aeb3bb 2683 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
ebfd146a
IR
2684 else
2685 {
2686 VEC_free (tree, heap, vec_oprnds0);
2687 vec_oprnds0 = VEC_alloc (tree, heap,
2688 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
b8698a0f 2689 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
ebfd146a
IR
2690 vect_pow2 (multi_step_cvt) - 1);
2691 }
2692
2693 /* Arguments are ready. Create the new vector stmts. */
2694 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
b8698a0f 2695 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
ebfd146a 2696 multi_step_cvt, stmt, tmp_vec_dsts,
b8698a0f 2697 gsi, slp_node, code1,
ebfd146a
IR
2698 &prev_stmt_info);
2699 }
2700
2701 VEC_free (tree, heap, vec_oprnds0);
2702 VEC_free (tree, heap, vec_dsts);
2703 VEC_free (tree, heap, tmp_vec_dsts);
2704 VEC_free (tree, heap, interm_types);
2705
2706 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2707 return true;
2708}
2709
2710
2711/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
ff802fa1 2712 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
ebfd146a
IR
2713 the resulting vectors and call the function recursively. */
2714
2715static void
2716vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2717 VEC (tree, heap) **vec_oprnds1,
2718 int multi_step_cvt, gimple stmt,
2719 VEC (tree, heap) *vec_dsts,
2720 gimple_stmt_iterator *gsi,
2721 slp_tree slp_node, enum tree_code code1,
b8698a0f 2722 enum tree_code code2, tree decl1,
ebfd146a
IR
2723 tree decl2, int op_type,
2724 stmt_vec_info *prev_stmt_info)
2725{
2726 int i;
2727 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2728 gimple new_stmt1, new_stmt2;
2729 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2730 VEC (tree, heap) *vec_tmp;
2731
2732 vec_dest = VEC_pop (tree, vec_dsts);
2733 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2734
ac47786e 2735 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
ebfd146a
IR
2736 {
2737 if (op_type == binary_op)
2738 vop1 = VEC_index (tree, *vec_oprnds1, i);
2739 else
2740 vop1 = NULL_TREE;
2741
2742 /* Generate the two halves of promotion operation. */
b8698a0f 2743 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
ebfd146a
IR
2744 op_type, vec_dest, gsi, stmt);
2745 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2746 op_type, vec_dest, gsi, stmt);
2747 if (is_gimple_call (new_stmt1))
2748 {
2749 new_tmp1 = gimple_call_lhs (new_stmt1);
2750 new_tmp2 = gimple_call_lhs (new_stmt2);
2751 }
2752 else
2753 {
2754 new_tmp1 = gimple_assign_lhs (new_stmt1);
2755 new_tmp2 = gimple_assign_lhs (new_stmt2);
2756 }
2757
2758 if (multi_step_cvt)
2759 {
2760 /* Store the results for the recursive call. */
2761 VEC_quick_push (tree, vec_tmp, new_tmp1);
2762 VEC_quick_push (tree, vec_tmp, new_tmp2);
2763 }
2764 else
2765 {
2766 /* Last step of promotion sequience - store the results. */
2767 if (slp_node)
2768 {
2769 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2770 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2771 }
2772 else
2773 {
2774 if (!*prev_stmt_info)
2775 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2776 else
2777 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2778
2779 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2780 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2781 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2782 }
2783 }
2784 }
2785
2786 if (multi_step_cvt)
2787 {
b8698a0f 2788 /* For multi-step promotion operation we first generate we call the
ff802fa1 2789 function recurcively for every stage. We start from the input type,
ebfd146a
IR
2790 create promotion operations to the intermediate types, and then
2791 create promotions to the output type. */
2792 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
ebfd146a
IR
2793 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2794 multi_step_cvt - 1, stmt,
2795 vec_dsts, gsi, slp_node, code1,
2796 code2, decl2, decl2, op_type,
2797 prev_stmt_info);
2798 }
ff802fa1
IR
2799
2800 VEC_free (tree, heap, vec_tmp);
ebfd146a 2801}
b8698a0f 2802
ebfd146a
IR
2803
2804/* Function vectorizable_type_promotion
2805
2806 Check if STMT performs a binary or unary operation that involves
2807 type promotion, and if it can be vectorized.
2808 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2809 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2810 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2811
2812static bool
2813vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2814 gimple *vec_stmt, slp_tree slp_node)
2815{
2816 tree vec_dest;
2817 tree scalar_dest;
2818 tree op0, op1 = NULL;
2819 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2820 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2821 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2822 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2823 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
b8698a0f 2824 int op_type;
ebfd146a
IR
2825 tree def;
2826 gimple def_stmt;
2827 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2828 stmt_vec_info prev_stmt_info;
2829 int nunits_in;
2830 int nunits_out;
2831 tree vectype_out;
2832 int ncopies;
2833 int j, i;
2834 tree vectype_in;
2835 tree intermediate_type = NULL_TREE;
2836 int multi_step_cvt = 0;
2837 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2838 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
b8698a0f 2839
a70d6342
IR
2840 /* FORNOW: not supported by basic block SLP vectorization. */
2841 gcc_assert (loop_vinfo);
b8698a0f 2842
ebfd146a
IR
2843 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2844 return false;
2845
8644a673 2846 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2847 return false;
2848
2849 /* Is STMT a vectorizable type-promotion operation? */
2850 if (!is_gimple_assign (stmt))
2851 return false;
2852
2853 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2854 return false;
2855
2856 code = gimple_assign_rhs_code (stmt);
2857 if (!CONVERT_EXPR_CODE_P (code)
2858 && code != WIDEN_MULT_EXPR)
2859 return false;
2860
b690cc0f
RG
2861 scalar_dest = gimple_assign_lhs (stmt);
2862 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2863
2864 /* Check the operands of the operation. */
ebfd146a 2865 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2866 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2867 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2868 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2869 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2870 && CONVERT_EXPR_CODE_P (code))))
2871 return false;
2872 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2873 &def_stmt, &def, &dt[0], &vectype_in))
2874 {
2875 if (vect_print_dump_info (REPORT_DETAILS))
2876 fprintf (vect_dump, "use not simple.");
2877 return false;
2878 }
2879 /* If op0 is an external or constant def use a vector type with
2880 the same size as the output vector type. */
2881 if (!vectype_in)
2882 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2883 if (vec_stmt)
2884 gcc_assert (vectype_in);
ebfd146a 2885 if (!vectype_in)
7d8930a0
IR
2886 {
2887 if (vect_print_dump_info (REPORT_DETAILS))
2888 {
2889 fprintf (vect_dump, "no vectype for scalar type ");
2890 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2891 }
2892
2893 return false;
2894 }
ebfd146a 2895
b690cc0f 2896 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2897 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2898 if (nunits_in <= nunits_out)
2899 return false;
2900
2901 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 2902 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a
IR
2903 case of SLP. */
2904 if (slp_node)
2905 ncopies = 1;
2906 else
2907 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2908
2909 gcc_assert (ncopies >= 1);
2910
ebfd146a
IR
2911 op_type = TREE_CODE_LENGTH (code);
2912 if (op_type == binary_op)
2913 {
2914 op1 = gimple_assign_rhs2 (stmt);
a70d6342 2915 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
ebfd146a
IR
2916 {
2917 if (vect_print_dump_info (REPORT_DETAILS))
2918 fprintf (vect_dump, "use not simple.");
2919 return false;
2920 }
2921 }
2922
2923 /* Supportable by target? */
b690cc0f 2924 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
ebfd146a
IR
2925 &decl1, &decl2, &code1, &code2,
2926 &multi_step_cvt, &interm_types))
2927 return false;
2928
2929 /* Binary widening operation can only be supported directly by the
2930 architecture. */
2931 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2932
ebfd146a
IR
2933 if (!vec_stmt) /* transformation not required. */
2934 {
2935 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2936 if (vect_print_dump_info (REPORT_DETAILS))
2937 fprintf (vect_dump, "=== vectorizable_promotion ===");
2938 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2939 return true;
2940 }
2941
2942 /** Transform. **/
2943
2944 if (vect_print_dump_info (REPORT_DETAILS))
2945 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2946 ncopies);
2947
2948 /* Handle def. */
b8698a0f 2949 /* In case of multi-step promotion, we first generate promotion operations
ebfd146a 2950 to the intermediate types, and then from that types to the final one.
b8698a0f
L
2951 We store vector destination in VEC_DSTS in the correct order for
2952 recursive creation of promotion operations in
ebfd146a
IR
2953 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2954 according to TYPES recieved from supportable_widening_operation(). */
2955 if (multi_step_cvt)
2956 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2957 else
2958 vec_dsts = VEC_alloc (tree, heap, 1);
2959
2960 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2961 VEC_quick_push (tree, vec_dsts, vec_dest);
2962
2963 if (multi_step_cvt)
2964 {
2965 for (i = VEC_length (tree, interm_types) - 1;
2966 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2967 {
2968 vec_dest = vect_create_destination_var (scalar_dest,
2969 intermediate_type);
2970 VEC_quick_push (tree, vec_dsts, vec_dest);
2971 }
2972 }
b8698a0f 2973
ebfd146a
IR
2974 if (!slp_node)
2975 {
b8698a0f 2976 vec_oprnds0 = VEC_alloc (tree, heap,
ebfd146a
IR
2977 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2978 if (op_type == binary_op)
2979 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2980 }
2981
2982 /* In case the vectorization factor (VF) is bigger than the number
2983 of elements that we can fit in a vectype (nunits), we have to generate
2984 more than one vector stmt - i.e - we need to "unroll" the
2985 vector stmt by a factor VF/nunits. */
2986
2987 prev_stmt_info = NULL;
2988 for (j = 0; j < ncopies; j++)
2989 {
2990 /* Handle uses. */
2991 if (j == 0)
2992 {
2993 if (slp_node)
b5aeb3bb 2994 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
ebfd146a
IR
2995 else
2996 {
2997 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2998 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2999 if (op_type == binary_op)
3000 {
3001 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3002 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3003 }
3004 }
3005 }
3006 else
3007 {
3008 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3009 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3010 if (op_type == binary_op)
3011 {
3012 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3013 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3014 }
3015 }
3016
3017 /* Arguments are ready. Create the new vector stmts. */
3018 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3019 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
b8698a0f 3020 multi_step_cvt, stmt,
ebfd146a
IR
3021 tmp_vec_dsts,
3022 gsi, slp_node, code1, code2,
3023 decl1, decl2, op_type,
3024 &prev_stmt_info);
3025 }
3026
3027 VEC_free (tree, heap, vec_dsts);
3028 VEC_free (tree, heap, tmp_vec_dsts);
3029 VEC_free (tree, heap, interm_types);
3030 VEC_free (tree, heap, vec_oprnds0);
3031 VEC_free (tree, heap, vec_oprnds1);
3032
3033 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3034 return true;
3035}
3036
3037
3038/* Function vectorizable_store.
3039
b8698a0f
L
3040 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3041 can be vectorized.
3042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3043 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3045
3046static bool
3047vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3048 slp_tree slp_node)
3049{
3050 tree scalar_dest;
3051 tree data_ref;
3052 tree op;
3053 tree vec_oprnd = NULL_TREE;
3054 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3055 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3056 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3057 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3058 struct loop *loop = NULL;
ebfd146a
IR
3059 enum machine_mode vec_mode;
3060 tree dummy;
3061 enum dr_alignment_support alignment_support_scheme;
3062 tree def;
3063 gimple def_stmt;
3064 enum vect_def_type dt;
3065 stmt_vec_info prev_stmt_info = NULL;
3066 tree dataref_ptr = NULL_TREE;
3067 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3068 int ncopies;
3069 int j;
3070 gimple next_stmt, first_stmt = NULL;
3071 bool strided_store = false;
3072 unsigned int group_size, i;
3073 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3074 bool inv_p;
3075 VEC(tree,heap) *vec_oprnds = NULL;
3076 bool slp = (slp_node != NULL);
ebfd146a 3077 unsigned int vec_num;
a70d6342
IR
3078 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3079
3080 if (loop_vinfo)
3081 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
3082
3083 /* Multiple types in SLP are handled by creating the appropriate number of
3084 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3085 case of SLP. */
3086 if (slp)
3087 ncopies = 1;
3088 else
3089 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3090
3091 gcc_assert (ncopies >= 1);
3092
3093 /* FORNOW. This restriction should be relaxed. */
a70d6342 3094 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
ebfd146a
IR
3095 {
3096 if (vect_print_dump_info (REPORT_DETAILS))
3097 fprintf (vect_dump, "multiple types in nested loop.");
3098 return false;
3099 }
3100
a70d6342 3101 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3102 return false;
3103
8644a673 3104 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3105 return false;
3106
3107 /* Is vectorizable store? */
3108
3109 if (!is_gimple_assign (stmt))
3110 return false;
3111
3112 scalar_dest = gimple_assign_lhs (stmt);
3113 if (TREE_CODE (scalar_dest) != ARRAY_REF
3114 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
3115 && TREE_CODE (scalar_dest) != COMPONENT_REF
3116 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
70f34814
RG
3117 && TREE_CODE (scalar_dest) != REALPART_EXPR
3118 && TREE_CODE (scalar_dest) != MEM_REF)
ebfd146a
IR
3119 return false;
3120
3121 gcc_assert (gimple_assign_single_p (stmt));
3122 op = gimple_assign_rhs1 (stmt);
a70d6342 3123 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
ebfd146a
IR
3124 {
3125 if (vect_print_dump_info (REPORT_DETAILS))
3126 fprintf (vect_dump, "use not simple.");
3127 return false;
3128 }
3129
3130 /* The scalar rhs type needs to be trivially convertible to the vector
3131 component type. This should always be the case. */
3132 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
b8698a0f 3133 {
ebfd146a
IR
3134 if (vect_print_dump_info (REPORT_DETAILS))
3135 fprintf (vect_dump, "??? operands of different types");
3136 return false;
3137 }
3138
3139 vec_mode = TYPE_MODE (vectype);
3140 /* FORNOW. In some cases can vectorize even if data-type not supported
3141 (e.g. - array initialization with 0). */
947131ba 3142 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
ebfd146a
IR
3143 return false;
3144
3145 if (!STMT_VINFO_DATA_REF (stmt_info))
3146 return false;
3147
3148 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3149 {
3150 strided_store = true;
3151 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3152 if (!vect_strided_store_supported (vectype)
3153 && !PURE_SLP_STMT (stmt_info) && !slp)
3154 return false;
b8698a0f 3155
ebfd146a
IR
3156 if (first_stmt == stmt)
3157 {
3158 /* STMT is the leader of the group. Check the operands of all the
3159 stmts of the group. */
3160 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3161 while (next_stmt)
3162 {
3163 gcc_assert (gimple_assign_single_p (next_stmt));
3164 op = gimple_assign_rhs1 (next_stmt);
b8698a0f 3165 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
a70d6342 3166 &def, &dt))
ebfd146a
IR
3167 {
3168 if (vect_print_dump_info (REPORT_DETAILS))
3169 fprintf (vect_dump, "use not simple.");
3170 return false;
3171 }
3172 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3173 }
3174 }
3175 }
3176
3177 if (!vec_stmt) /* transformation not required. */
3178 {
3179 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3180 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3181 return true;
3182 }
3183
3184 /** Transform. **/
3185
3186 if (strided_store)
3187 {
3188 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3189 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3190
3191 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3192
3193 /* FORNOW */
a70d6342 3194 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
3195
3196 /* We vectorize all the stmts of the interleaving group when we
3197 reach the last stmt in the group. */
b8698a0f 3198 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
ebfd146a
IR
3199 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3200 && !slp)
3201 {
3202 *vec_stmt = NULL;
3203 return true;
3204 }
3205
3206 if (slp)
4b5caab7
IR
3207 {
3208 strided_store = false;
3209 /* VEC_NUM is the number of vect stmts to be created for this
3210 group. */
3211 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3212 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3213 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3214 }
ebfd146a 3215 else
4b5caab7
IR
3216 /* VEC_NUM is the number of vect stmts to be created for this
3217 group. */
ebfd146a
IR
3218 vec_num = group_size;
3219 }
b8698a0f 3220 else
ebfd146a
IR
3221 {
3222 first_stmt = stmt;
3223 first_dr = dr;
3224 group_size = vec_num = 1;
ebfd146a 3225 }
b8698a0f 3226
ebfd146a
IR
3227 if (vect_print_dump_info (REPORT_DETAILS))
3228 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3229
3230 dr_chain = VEC_alloc (tree, heap, group_size);
3231 oprnds = VEC_alloc (tree, heap, group_size);
3232
720f5239 3233 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 3234 gcc_assert (alignment_support_scheme);
ebfd146a
IR
3235
3236 /* In case the vectorization factor (VF) is bigger than the number
3237 of elements that we can fit in a vectype (nunits), we have to generate
3238 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 3239 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
3240 vect_get_vec_def_for_copy_stmt. */
3241
3242 /* In case of interleaving (non-unit strided access):
3243
3244 S1: &base + 2 = x2
3245 S2: &base = x0
3246 S3: &base + 1 = x1
3247 S4: &base + 3 = x3
3248
3249 We create vectorized stores starting from base address (the access of the
3250 first stmt in the chain (S2 in the above example), when the last store stmt
3251 of the chain (S4) is reached:
3252
3253 VS1: &base = vx2
3254 VS2: &base + vec_size*1 = vx0
3255 VS3: &base + vec_size*2 = vx1
3256 VS4: &base + vec_size*3 = vx3
3257
3258 Then permutation statements are generated:
3259
3260 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3261 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3262 ...
b8698a0f 3263
ebfd146a
IR
3264 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3265 (the order of the data-refs in the output of vect_permute_store_chain
3266 corresponds to the order of scalar stmts in the interleaving chain - see
3267 the documentation of vect_permute_store_chain()).
3268
3269 In case of both multiple types and interleaving, above vector stores and
ff802fa1 3270 permutation stmts are created for every copy. The result vector stmts are
ebfd146a 3271 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 3272 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
3273 */
3274
3275 prev_stmt_info = NULL;
3276 for (j = 0; j < ncopies; j++)
3277 {
3278 gimple new_stmt;
3279 gimple ptr_incr;
3280
3281 if (j == 0)
3282 {
3283 if (slp)
3284 {
3285 /* Get vectorized arguments for SLP_NODE. */
b5aeb3bb 3286 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
ebfd146a
IR
3287
3288 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3289 }
3290 else
3291 {
b8698a0f
L
3292 /* For interleaved stores we collect vectorized defs for all the
3293 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3294 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
3295 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3296
3297 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3298 OPRNDS are of size 1. */
b8698a0f 3299 next_stmt = first_stmt;
ebfd146a
IR
3300 for (i = 0; i < group_size; i++)
3301 {
b8698a0f
L
3302 /* Since gaps are not supported for interleaved stores,
3303 GROUP_SIZE is the exact number of stmts in the chain.
3304 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3305 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
3306 iteration of the loop will be executed. */
3307 gcc_assert (next_stmt
3308 && gimple_assign_single_p (next_stmt));
3309 op = gimple_assign_rhs1 (next_stmt);
3310
b8698a0f 3311 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
ebfd146a 3312 NULL);
b8698a0f
L
3313 VEC_quick_push(tree, dr_chain, vec_oprnd);
3314 VEC_quick_push(tree, oprnds, vec_oprnd);
ebfd146a
IR
3315 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3316 }
3317 }
3318
3319 /* We should have catched mismatched types earlier. */
3320 gcc_assert (useless_type_conversion_p (vectype,
3321 TREE_TYPE (vec_oprnd)));
b8698a0f
L
3322 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3323 &dummy, &ptr_incr, false,
5006671f 3324 &inv_p);
a70d6342 3325 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 3326 }
b8698a0f 3327 else
ebfd146a 3328 {
b8698a0f
L
3329 /* For interleaved stores we created vectorized defs for all the
3330 defs stored in OPRNDS in the previous iteration (previous copy).
3331 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
3332 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3333 next copy.
3334 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3335 OPRNDS are of size 1. */
3336 for (i = 0; i < group_size; i++)
3337 {
3338 op = VEC_index (tree, oprnds, i);
b8698a0f 3339 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 3340 &dt);
b8698a0f 3341 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
ebfd146a
IR
3342 VEC_replace(tree, dr_chain, i, vec_oprnd);
3343 VEC_replace(tree, oprnds, i, vec_oprnd);
3344 }
b8698a0f 3345 dataref_ptr =
ebfd146a
IR
3346 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3347 }
3348
3349 if (strided_store)
3350 {
b8698a0f 3351 result_chain = VEC_alloc (tree, heap, group_size);
ebfd146a
IR
3352 /* Permute. */
3353 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3354 &result_chain))
3355 return false;
3356 }
3357
3358 next_stmt = first_stmt;
3359 for (i = 0; i < vec_num; i++)
3360 {
be1ac4ec
RG
3361 struct ptr_info_def *pi;
3362
ebfd146a
IR
3363 if (i > 0)
3364 /* Bump the vector pointer. */
3365 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3366 NULL_TREE);
3367
3368 if (slp)
3369 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3370 else if (strided_store)
b8698a0f 3371 /* For strided stores vectorized defs are interleaved in
ebfd146a
IR
3372 vect_permute_store_chain(). */
3373 vec_oprnd = VEC_index (tree, result_chain, i);
3374
be1ac4ec
RG
3375 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3376 build_int_cst (reference_alias_ptr_type
3377 (DR_REF (first_dr)), 0));
3378 pi = get_ptr_info (dataref_ptr);
3379 pi->align = TYPE_ALIGN_UNIT (vectype);
8f439681 3380 if (aligned_access_p (first_dr))
be1ac4ec
RG
3381 pi->misalign = 0;
3382 else if (DR_MISALIGNMENT (first_dr) == -1)
3383 {
3384 TREE_TYPE (data_ref)
3385 = build_aligned_type (TREE_TYPE (data_ref),
3386 TYPE_ALIGN (TREE_TYPE (vectype)));
3387 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3388 pi->misalign = 0;
3389 }
3390 else
3391 {
3392 TREE_TYPE (data_ref)
3393 = build_aligned_type (TREE_TYPE (data_ref),
3394 TYPE_ALIGN (TREE_TYPE (vectype)));
3395 pi->misalign = DR_MISALIGNMENT (first_dr);
3396 }
8f439681 3397
ebfd146a
IR
3398 /* Arguments are ready. Create the new vector stmt. */
3399 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3400 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3401 mark_symbols_for_renaming (new_stmt);
3402
3403 if (slp)
3404 continue;
b8698a0f 3405
ebfd146a
IR
3406 if (j == 0)
3407 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3408 else
3409 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3410
3411 prev_stmt_info = vinfo_for_stmt (new_stmt);
3412 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3413 if (!next_stmt)
3414 break;
3415 }
3416 }
3417
b8698a0f
L
3418 VEC_free (tree, heap, dr_chain);
3419 VEC_free (tree, heap, oprnds);
ebfd146a 3420 if (result_chain)
b8698a0f 3421 VEC_free (tree, heap, result_chain);
ff802fa1
IR
3422 if (vec_oprnds)
3423 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
3424
3425 return true;
3426}
3427
3428/* vectorizable_load.
3429
b8698a0f
L
3430 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3431 can be vectorized.
3432 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3433 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3434 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3435
3436static bool
3437vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3438 slp_tree slp_node, slp_instance slp_node_instance)
3439{
3440 tree scalar_dest;
3441 tree vec_dest = NULL;
3442 tree data_ref = NULL;
3443 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 3444 stmt_vec_info prev_stmt_info;
ebfd146a 3445 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3446 struct loop *loop = NULL;
ebfd146a 3447 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 3448 bool nested_in_vect_loop = false;
ebfd146a
IR
3449 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3450 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3451 tree new_temp;
947131ba 3452 enum machine_mode mode;
ebfd146a
IR
3453 gimple new_stmt = NULL;
3454 tree dummy;
3455 enum dr_alignment_support alignment_support_scheme;
3456 tree dataref_ptr = NULL_TREE;
3457 gimple ptr_incr;
3458 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3459 int ncopies;
3460 int i, j, group_size;
3461 tree msq = NULL_TREE, lsq;
3462 tree offset = NULL_TREE;
3463 tree realignment_token = NULL_TREE;
3464 gimple phi = NULL;
3465 VEC(tree,heap) *dr_chain = NULL;
3466 bool strided_load = false;
3467 gimple first_stmt;
3468 tree scalar_type;
3469 bool inv_p;
3470 bool compute_in_loop = false;
3471 struct loop *at_loop;
3472 int vec_num;
3473 bool slp = (slp_node != NULL);
3474 bool slp_perm = false;
3475 enum tree_code code;
a70d6342
IR
3476 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3477 int vf;
3478
3479 if (loop_vinfo)
3480 {
3481 loop = LOOP_VINFO_LOOP (loop_vinfo);
3482 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3483 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3484 }
3485 else
3533e503 3486 vf = 1;
ebfd146a
IR
3487
3488 /* Multiple types in SLP are handled by creating the appropriate number of
ff802fa1 3489 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
ebfd146a
IR
3490 case of SLP. */
3491 if (slp)
3492 ncopies = 1;
3493 else
3494 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3495
3496 gcc_assert (ncopies >= 1);
3497
3498 /* FORNOW. This restriction should be relaxed. */
3499 if (nested_in_vect_loop && ncopies > 1)
3500 {
3501 if (vect_print_dump_info (REPORT_DETAILS))
3502 fprintf (vect_dump, "multiple types in nested loop.");
3503 return false;
3504 }
3505
a70d6342 3506 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3507 return false;
3508
8644a673 3509 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3510 return false;
3511
3512 /* Is vectorizable load? */
3513 if (!is_gimple_assign (stmt))
3514 return false;
3515
3516 scalar_dest = gimple_assign_lhs (stmt);
3517 if (TREE_CODE (scalar_dest) != SSA_NAME)
3518 return false;
3519
3520 code = gimple_assign_rhs_code (stmt);
3521 if (code != ARRAY_REF
3522 && code != INDIRECT_REF
e9dbe7bb
IR
3523 && code != COMPONENT_REF
3524 && code != IMAGPART_EXPR
70f34814
RG
3525 && code != REALPART_EXPR
3526 && code != MEM_REF)
ebfd146a
IR
3527 return false;
3528
3529 if (!STMT_VINFO_DATA_REF (stmt_info))
3530 return false;
3531
3532 scalar_type = TREE_TYPE (DR_REF (dr));
947131ba 3533 mode = TYPE_MODE (vectype);
ebfd146a
IR
3534
3535 /* FORNOW. In some cases can vectorize even if data-type not supported
3536 (e.g. - data copies). */
947131ba 3537 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
ebfd146a
IR
3538 {
3539 if (vect_print_dump_info (REPORT_DETAILS))
3540 fprintf (vect_dump, "Aligned load, but unsupported type.");
3541 return false;
3542 }
3543
3544 /* The vector component type needs to be trivially convertible to the
3545 scalar lhs. This should always be the case. */
3546 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
b8698a0f 3547 {
ebfd146a
IR
3548 if (vect_print_dump_info (REPORT_DETAILS))
3549 fprintf (vect_dump, "??? operands of different types");
3550 return false;
3551 }
3552
3553 /* Check if the load is a part of an interleaving chain. */
3554 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3555 {
3556 strided_load = true;
3557 /* FORNOW */
3558 gcc_assert (! nested_in_vect_loop);
3559
3560 /* Check if interleaving is supported. */
3561 if (!vect_strided_load_supported (vectype)
3562 && !PURE_SLP_STMT (stmt_info) && !slp)
3563 return false;
3564 }
3565
3566 if (!vec_stmt) /* transformation not required. */
3567 {
3568 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3569 vect_model_load_cost (stmt_info, ncopies, NULL);
3570 return true;
3571 }
3572
3573 if (vect_print_dump_info (REPORT_DETAILS))
3574 fprintf (vect_dump, "transform load.");
3575
3576 /** Transform. **/
3577
3578 if (strided_load)
3579 {
3580 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3581 /* Check if the chain of loads is already vectorized. */
3582 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3583 {
3584 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3585 return true;
3586 }
3587 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3588 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3589
3590 /* VEC_NUM is the number of vect stmts to be created for this group. */
3591 if (slp)
3592 {
3593 strided_load = false;
3594 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
a70d6342
IR
3595 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3596 slp_perm = true;
3597 }
ebfd146a
IR
3598 else
3599 vec_num = group_size;
3600
3601 dr_chain = VEC_alloc (tree, heap, vec_num);
3602 }
3603 else
3604 {
3605 first_stmt = stmt;
3606 first_dr = dr;
3607 group_size = vec_num = 1;
3608 }
3609
720f5239 3610 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a
IR
3611 gcc_assert (alignment_support_scheme);
3612
3613 /* In case the vectorization factor (VF) is bigger than the number
3614 of elements that we can fit in a vectype (nunits), we have to generate
3615 more than one vector stmt - i.e - we need to "unroll" the
ff802fa1 3616 vector stmt by a factor VF/nunits. In doing so, we record a pointer
ebfd146a 3617 from one copy of the vector stmt to the next, in the field
ff802fa1 3618 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
ebfd146a 3619 stages to find the correct vector defs to be used when vectorizing
ff802fa1
IR
3620 stmts that use the defs of the current stmt. The example below
3621 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3622 need to create 4 vectorized stmts):
ebfd146a
IR
3623
3624 before vectorization:
3625 RELATED_STMT VEC_STMT
3626 S1: x = memref - -
3627 S2: z = x + 1 - -
3628
3629 step 1: vectorize stmt S1:
3630 We first create the vector stmt VS1_0, and, as usual, record a
3631 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3632 Next, we create the vector stmt VS1_1, and record a pointer to
3633 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
ff802fa1 3634 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
ebfd146a
IR
3635 stmts and pointers:
3636 RELATED_STMT VEC_STMT
3637 VS1_0: vx0 = memref0 VS1_1 -
3638 VS1_1: vx1 = memref1 VS1_2 -
3639 VS1_2: vx2 = memref2 VS1_3 -
3640 VS1_3: vx3 = memref3 - -
3641 S1: x = load - VS1_0
3642 S2: z = x + 1 - -
3643
b8698a0f
L
3644 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3645 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
3646 stmt S2. */
3647
3648 /* In case of interleaving (non-unit strided access):
3649
3650 S1: x2 = &base + 2
3651 S2: x0 = &base
3652 S3: x1 = &base + 1
3653 S4: x3 = &base + 3
3654
b8698a0f 3655 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
3656 starting from the access of the first stmt of the chain:
3657
3658 VS1: vx0 = &base
3659 VS2: vx1 = &base + vec_size*1
3660 VS3: vx3 = &base + vec_size*2
3661 VS4: vx4 = &base + vec_size*3
3662
3663 Then permutation statements are generated:
3664
3665 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3666 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3667 ...
3668
3669 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3670 (the order of the data-refs in the output of vect_permute_load_chain
3671 corresponds to the order of scalar stmts in the interleaving chain - see
3672 the documentation of vect_permute_load_chain()).
3673 The generation of permutation stmts and recording them in
3674 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3675
b8698a0f 3676 In case of both multiple types and interleaving, the vector loads and
ff802fa1
IR
3677 permutation stmts above are created for every copy. The result vector
3678 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
3679 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
ebfd146a
IR
3680
3681 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3682 on a target that supports unaligned accesses (dr_unaligned_supported)
3683 we generate the following code:
3684 p = initial_addr;
3685 indx = 0;
3686 loop {
3687 p = p + indx * vectype_size;
3688 vec_dest = *(p);
3689 indx = indx + 1;
3690 }
3691
3692 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 3693 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
3694 then generate the following code, in which the data in each iteration is
3695 obtained by two vector loads, one from the previous iteration, and one
3696 from the current iteration:
3697 p1 = initial_addr;
3698 msq_init = *(floor(p1))
3699 p2 = initial_addr + VS - 1;
3700 realignment_token = call target_builtin;
3701 indx = 0;
3702 loop {
3703 p2 = p2 + indx * vectype_size
3704 lsq = *(floor(p2))
3705 vec_dest = realign_load (msq, lsq, realignment_token)
3706 indx = indx + 1;
3707 msq = lsq;
3708 } */
3709
3710 /* If the misalignment remains the same throughout the execution of the
3711 loop, we can create the init_addr and permutation mask at the loop
ff802fa1 3712 preheader. Otherwise, it needs to be created inside the loop.
ebfd146a
IR
3713 This can only occur when vectorizing memory accesses in the inner-loop
3714 nested within an outer-loop that is being vectorized. */
3715
a70d6342 3716 if (loop && nested_in_vect_loop_p (loop, stmt)
ebfd146a
IR
3717 && (TREE_INT_CST_LOW (DR_STEP (dr))
3718 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3719 {
3720 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3721 compute_in_loop = true;
3722 }
3723
3724 if ((alignment_support_scheme == dr_explicit_realign_optimized
3725 || alignment_support_scheme == dr_explicit_realign)
3726 && !compute_in_loop)
3727 {
3728 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3729 alignment_support_scheme, NULL_TREE,
3730 &at_loop);
3731 if (alignment_support_scheme == dr_explicit_realign_optimized)
3732 {
3733 phi = SSA_NAME_DEF_STMT (msq);
3734 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3735 }
3736 }
3737 else
3738 at_loop = loop;
3739
3740 prev_stmt_info = NULL;
3741 for (j = 0; j < ncopies; j++)
b8698a0f 3742 {
ebfd146a
IR
3743 /* 1. Create the vector pointer update chain. */
3744 if (j == 0)
3745 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
b8698a0f
L
3746 at_loop, offset,
3747 &dummy, &ptr_incr, false,
5006671f 3748 &inv_p);
ebfd146a 3749 else
b8698a0f 3750 dataref_ptr =
ebfd146a
IR
3751 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3752
3753 for (i = 0; i < vec_num; i++)
3754 {
3755 if (i > 0)
3756 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3757 NULL_TREE);
3758
3759 /* 2. Create the vector-load in the loop. */
3760 switch (alignment_support_scheme)
3761 {
3762 case dr_aligned:
ebfd146a
IR
3763 case dr_unaligned_supported:
3764 {
be1ac4ec
RG
3765 struct ptr_info_def *pi;
3766 data_ref
3767 = build2 (MEM_REF, vectype, dataref_ptr,
3768 build_int_cst (reference_alias_ptr_type
3769 (DR_REF (first_dr)), 0));
3770 pi = get_ptr_info (dataref_ptr);
3771 pi->align = TYPE_ALIGN_UNIT (vectype);
3772 if (alignment_support_scheme == dr_aligned)
3773 {
3774 gcc_assert (aligned_access_p (first_dr));
3775 pi->misalign = 0;
3776 }
3777 else if (DR_MISALIGNMENT (first_dr) == -1)
3778 {
3779 TREE_TYPE (data_ref)
3780 = build_aligned_type (TREE_TYPE (data_ref),
3781 TYPE_ALIGN (TREE_TYPE (vectype)));
3782 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3783 pi->misalign = 0;
3784 }
3785 else
3786 {
3787 TREE_TYPE (data_ref)
3788 = build_aligned_type (TREE_TYPE (data_ref),
3789 TYPE_ALIGN (TREE_TYPE (vectype)));
3790 pi->misalign = DR_MISALIGNMENT (first_dr);
3791 }
ebfd146a
IR
3792 break;
3793 }
3794 case dr_explicit_realign:
3795 {
3796 tree ptr, bump;
3797 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3798
3799 if (compute_in_loop)
3800 msq = vect_setup_realignment (first_stmt, gsi,
3801 &realignment_token,
b8698a0f 3802 dr_explicit_realign,
ebfd146a
IR
3803 dataref_ptr, NULL);
3804
75421dcd
RG
3805 new_stmt = gimple_build_assign_with_ops
3806 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3807 build_int_cst
3808 (TREE_TYPE (dataref_ptr),
3809 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3810 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3811 gimple_assign_set_lhs (new_stmt, ptr);
3812 vect_finish_stmt_generation (stmt, new_stmt, gsi);
20ede5c6
RG
3813 data_ref
3814 = build2 (MEM_REF, vectype, ptr,
3815 build_int_cst (reference_alias_ptr_type
3816 (DR_REF (first_dr)), 0));
ebfd146a
IR
3817 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3818 new_stmt = gimple_build_assign (vec_dest, data_ref);
3819 new_temp = make_ssa_name (vec_dest, new_stmt);
3820 gimple_assign_set_lhs (new_stmt, new_temp);
5006671f
RG
3821 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3822 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
ebfd146a 3823 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
3824 msq = new_temp;
3825
3826 bump = size_binop (MULT_EXPR, vs_minus_1,
3827 TYPE_SIZE_UNIT (scalar_type));
3828 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
75421dcd
RG
3829 new_stmt = gimple_build_assign_with_ops
3830 (BIT_AND_EXPR, NULL_TREE, ptr,
3831 build_int_cst
3832 (TREE_TYPE (ptr),
3833 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3834 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3835 gimple_assign_set_lhs (new_stmt, ptr);
3836 vect_finish_stmt_generation (stmt, new_stmt, gsi);
20ede5c6
RG
3837 data_ref
3838 = build2 (MEM_REF, vectype, ptr,
3839 build_int_cst (reference_alias_ptr_type
3840 (DR_REF (first_dr)), 0));
ebfd146a
IR
3841 break;
3842 }
3843 case dr_explicit_realign_optimized:
75421dcd
RG
3844 new_stmt = gimple_build_assign_with_ops
3845 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3846 build_int_cst
3847 (TREE_TYPE (dataref_ptr),
3848 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3849 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3850 gimple_assign_set_lhs (new_stmt, new_temp);
3851 vect_finish_stmt_generation (stmt, new_stmt, gsi);
20ede5c6
RG
3852 data_ref
3853 = build2 (MEM_REF, vectype, new_temp,
3854 build_int_cst (reference_alias_ptr_type
3855 (DR_REF (first_dr)), 0));
ebfd146a
IR
3856 break;
3857 default:
3858 gcc_unreachable ();
3859 }
3860 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3861 new_stmt = gimple_build_assign (vec_dest, data_ref);
3862 new_temp = make_ssa_name (vec_dest, new_stmt);
3863 gimple_assign_set_lhs (new_stmt, new_temp);
3864 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3865 mark_symbols_for_renaming (new_stmt);
3866
ff802fa1 3867 /* 3. Handle explicit realignment if necessary/supported. Create in
ebfd146a
IR
3868 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3869 if (alignment_support_scheme == dr_explicit_realign_optimized
3870 || alignment_support_scheme == dr_explicit_realign)
3871 {
3872 tree tmp;
3873
3874 lsq = gimple_assign_lhs (new_stmt);
3875 if (!realignment_token)
3876 realignment_token = dataref_ptr;
3877 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3878 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3879 realignment_token);
3880 new_stmt = gimple_build_assign (vec_dest, tmp);
3881 new_temp = make_ssa_name (vec_dest, new_stmt);
3882 gimple_assign_set_lhs (new_stmt, new_temp);
3883 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3884
3885 if (alignment_support_scheme == dr_explicit_realign_optimized)
3886 {
3887 gcc_assert (phi);
3888 if (i == vec_num - 1 && j == ncopies - 1)
f5045c96
AM
3889 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3890 UNKNOWN_LOCATION);
ebfd146a
IR
3891 msq = lsq;
3892 }
3893 }
3894
3895 /* 4. Handle invariant-load. */
a70d6342 3896 if (inv_p && !bb_vinfo)
ebfd146a
IR
3897 {
3898 gcc_assert (!strided_load);
3899 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3900 if (j == 0)
3901 {
3902 int k;
3903 tree t = NULL_TREE;
3904 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3905
3906 /* CHECKME: bitpos depends on endianess? */
3907 bitpos = bitsize_zero_node;
b8698a0f 3908 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
ebfd146a 3909 bitsize, bitpos);
b8698a0f 3910 vec_dest =
ebfd146a
IR
3911 vect_create_destination_var (scalar_dest, NULL_TREE);
3912 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3913 new_temp = make_ssa_name (vec_dest, new_stmt);
3914 gimple_assign_set_lhs (new_stmt, new_temp);
3915 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3916
3917 for (k = nunits - 1; k >= 0; --k)
3918 t = tree_cons (NULL_TREE, new_temp, t);
3919 /* FIXME: use build_constructor directly. */
3920 vec_inv = build_constructor_from_list (vectype, t);
3921 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3922 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3923 }
3924 else
3925 gcc_unreachable (); /* FORNOW. */
3926 }
3927
3928 /* Collect vector loads and later create their permutation in
3929 vect_transform_strided_load (). */
3930 if (strided_load || slp_perm)
3931 VEC_quick_push (tree, dr_chain, new_temp);
3932
3933 /* Store vector loads in the corresponding SLP_NODE. */
3934 if (slp && !slp_perm)
3935 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3936 }
3937
3938 if (slp && !slp_perm)
3939 continue;
3940
3941 if (slp_perm)
3942 {
a70d6342 3943 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
ebfd146a
IR
3944 slp_node_instance, false))
3945 {
3946 VEC_free (tree, heap, dr_chain);
3947 return false;
3948 }
3949 }
3950 else
3951 {
3952 if (strided_load)
3953 {
3954 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
b8698a0f 3955 return false;
ebfd146a
IR
3956
3957 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3958 VEC_free (tree, heap, dr_chain);
3959 dr_chain = VEC_alloc (tree, heap, group_size);
3960 }
3961 else
3962 {
3963 if (j == 0)
3964 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3965 else
3966 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3967 prev_stmt_info = vinfo_for_stmt (new_stmt);
3968 }
3969 }
3970 }
3971
3972 if (dr_chain)
3973 VEC_free (tree, heap, dr_chain);
3974
3975 return true;
3976}
3977
3978/* Function vect_is_simple_cond.
b8698a0f 3979
ebfd146a
IR
3980 Input:
3981 LOOP - the loop that is being vectorized.
3982 COND - Condition that is checked for simple use.
3983
3984 Returns whether a COND can be vectorized. Checks whether
3985 condition operands are supportable using vec_is_simple_use. */
3986
3987static bool
3988vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3989{
3990 tree lhs, rhs;
3991 tree def;
3992 enum vect_def_type dt;
3993
3994 if (!COMPARISON_CLASS_P (cond))
3995 return false;
3996
3997 lhs = TREE_OPERAND (cond, 0);
3998 rhs = TREE_OPERAND (cond, 1);
3999
4000 if (TREE_CODE (lhs) == SSA_NAME)
4001 {
4002 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
b8698a0f 4003 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
a70d6342 4004 &dt))
ebfd146a
IR
4005 return false;
4006 }
4007 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4008 && TREE_CODE (lhs) != FIXED_CST)
4009 return false;
4010
4011 if (TREE_CODE (rhs) == SSA_NAME)
4012 {
4013 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
b8698a0f 4014 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
a70d6342 4015 &dt))
ebfd146a
IR
4016 return false;
4017 }
4018 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4019 && TREE_CODE (rhs) != FIXED_CST)
4020 return false;
4021
4022 return true;
4023}
4024
4025/* vectorizable_condition.
4026
b8698a0f
L
4027 Check if STMT is conditional modify expression that can be vectorized.
4028 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4029 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
4030 at GSI.
4031
4032 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4033 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4034 else caluse if it is 2).
ebfd146a
IR
4035
4036 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4037
4bbe8262 4038bool
ebfd146a 4039vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4bbe8262 4040 gimple *vec_stmt, tree reduc_def, int reduc_index)
ebfd146a
IR
4041{
4042 tree scalar_dest = NULL_TREE;
4043 tree vec_dest = NULL_TREE;
4044 tree op = NULL_TREE;
4045 tree cond_expr, then_clause, else_clause;
4046 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4047 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ff802fa1
IR
4048 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4049 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
ebfd146a
IR
4050 tree vec_compare, vec_cond_expr;
4051 tree new_temp;
4052 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4053 enum machine_mode vec_mode;
4054 tree def;
a855b1b1 4055 enum vect_def_type dt, dts[4];
ebfd146a
IR
4056 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4057 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4058 enum tree_code code;
a855b1b1
MM
4059 stmt_vec_info prev_stmt_info = NULL;
4060 int j;
ebfd146a 4061
a70d6342
IR
4062 /* FORNOW: unsupported in basic block SLP. */
4063 gcc_assert (loop_vinfo);
b8698a0f 4064
ebfd146a 4065 gcc_assert (ncopies >= 1);
a855b1b1 4066 if (reduc_index && ncopies > 1)
ebfd146a
IR
4067 return false; /* FORNOW */
4068
4069 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4070 return false;
4071
4bbe8262
IR
4072 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4073 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4074 && reduc_def))
ebfd146a
IR
4075 return false;
4076
4077 /* FORNOW: SLP not supported. */
4078 if (STMT_SLP_TYPE (stmt_info))
4079 return false;
4080
4081 /* FORNOW: not yet supported. */
b8698a0f 4082 if (STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4083 {
4084 if (vect_print_dump_info (REPORT_DETAILS))
4085 fprintf (vect_dump, "value used after loop.");
4086 return false;
4087 }
4088
4089 /* Is vectorizable conditional operation? */
4090 if (!is_gimple_assign (stmt))
4091 return false;
4092
4093 code = gimple_assign_rhs_code (stmt);
4094
4095 if (code != COND_EXPR)
4096 return false;
4097
4098 gcc_assert (gimple_assign_single_p (stmt));
4099 op = gimple_assign_rhs1 (stmt);
4100 cond_expr = TREE_OPERAND (op, 0);
4101 then_clause = TREE_OPERAND (op, 1);
4102 else_clause = TREE_OPERAND (op, 2);
4103
4104 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4105 return false;
4106
4107 /* We do not handle two different vector types for the condition
4108 and the values. */
8533c9d8
SP
4109 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4110 TREE_TYPE (vectype)))
ebfd146a
IR
4111 return false;
4112
4113 if (TREE_CODE (then_clause) == SSA_NAME)
4114 {
4115 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
b8698a0f 4116 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
ebfd146a
IR
4117 &then_def_stmt, &def, &dt))
4118 return false;
4119 }
b8698a0f 4120 else if (TREE_CODE (then_clause) != INTEGER_CST
ebfd146a
IR
4121 && TREE_CODE (then_clause) != REAL_CST
4122 && TREE_CODE (then_clause) != FIXED_CST)
4123 return false;
4124
4125 if (TREE_CODE (else_clause) == SSA_NAME)
4126 {
4127 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
a70d6342 4128 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
ebfd146a
IR
4129 &else_def_stmt, &def, &dt))
4130 return false;
4131 }
b8698a0f 4132 else if (TREE_CODE (else_clause) != INTEGER_CST
ebfd146a
IR
4133 && TREE_CODE (else_clause) != REAL_CST
4134 && TREE_CODE (else_clause) != FIXED_CST)
4135 return false;
4136
4137
4138 vec_mode = TYPE_MODE (vectype);
4139
b8698a0f 4140 if (!vec_stmt)
ebfd146a
IR
4141 {
4142 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8e7aa1f9 4143 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
ebfd146a
IR
4144 }
4145
4146 /* Transform */
4147
4148 /* Handle def. */
4149 scalar_dest = gimple_assign_lhs (stmt);
4150 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4151
4152 /* Handle cond expr. */
a855b1b1
MM
4153 for (j = 0; j < ncopies; j++)
4154 {
4155 gimple new_stmt;
4156 if (j == 0)
4157 {
4158 gimple gtemp;
4159 vec_cond_lhs =
4160 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4161 stmt, NULL);
4162 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4163 NULL, &gtemp, &def, &dts[0]);
4164 vec_cond_rhs =
4165 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4166 stmt, NULL);
4167 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4168 NULL, &gtemp, &def, &dts[1]);
4169 if (reduc_index == 1)
4170 vec_then_clause = reduc_def;
4171 else
4172 {
4173 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4174 stmt, NULL);
4175 vect_is_simple_use (then_clause, loop_vinfo,
4176 NULL, &gtemp, &def, &dts[2]);
4177 }
4178 if (reduc_index == 2)
4179 vec_else_clause = reduc_def;
4180 else
4181 {
4182 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4183 stmt, NULL);
4184 vect_is_simple_use (else_clause, loop_vinfo,
4185 NULL, &gtemp, &def, &dts[3]);
4186 }
4187 }
4188 else
4189 {
4190 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4191 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4192 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4193 vec_then_clause);
4194 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4195 vec_else_clause);
4196 }
4197
4198 /* Arguments are ready. Create the new vector stmt. */
4199 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4200 vec_cond_lhs, vec_cond_rhs);
4201 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4202 vec_compare, vec_then_clause, vec_else_clause);
4203
4204 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4205 new_temp = make_ssa_name (vec_dest, new_stmt);
4206 gimple_assign_set_lhs (new_stmt, new_temp);
4207 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4208 if (j == 0)
4209 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4210 else
4211 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4212
4213 prev_stmt_info = vinfo_for_stmt (new_stmt);
4214 }
b8698a0f 4215
ebfd146a
IR
4216 return true;
4217}
4218
4219
8644a673 4220/* Make sure the statement is vectorizable. */
ebfd146a
IR
4221
4222bool
a70d6342 4223vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 4224{
8644a673 4225 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 4226 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 4227 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 4228 bool ok;
a70d6342 4229 tree scalar_type, vectype;
ebfd146a
IR
4230
4231 if (vect_print_dump_info (REPORT_DETAILS))
ebfd146a 4232 {
8644a673
IR
4233 fprintf (vect_dump, "==> examining statement: ");
4234 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4235 }
ebfd146a 4236
1825a1f3 4237 if (gimple_has_volatile_ops (stmt))
b8698a0f 4238 {
1825a1f3
IR
4239 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4240 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4241
4242 return false;
4243 }
b8698a0f
L
4244
4245 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
4246 to include:
4247 - the COND_EXPR which is the loop exit condition
4248 - any LABEL_EXPRs in the loop
b8698a0f 4249 - computations that are used only for array indexing or loop control.
8644a673
IR
4250 In basic blocks we only analyze statements that are a part of some SLP
4251 instance, therefore, all the statements are relevant. */
ebfd146a 4252
b8698a0f 4253 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 4254 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4255 {
4256 if (vect_print_dump_info (REPORT_DETAILS))
8644a673 4257 fprintf (vect_dump, "irrelevant.");
ebfd146a 4258
8644a673
IR
4259 return true;
4260 }
ebfd146a 4261
8644a673
IR
4262 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4263 {
4264 case vect_internal_def:
4265 break;
ebfd146a 4266
8644a673 4267 case vect_reduction_def:
7c5222ff 4268 case vect_nested_cycle:
a70d6342 4269 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
8644a673 4270 || relevance == vect_used_in_outer_by_reduction
a70d6342 4271 || relevance == vect_unused_in_scope));
8644a673
IR
4272 break;
4273
4274 case vect_induction_def:
4275 case vect_constant_def:
4276 case vect_external_def:
4277 case vect_unknown_def_type:
4278 default:
4279 gcc_unreachable ();
4280 }
ebfd146a 4281
a70d6342
IR
4282 if (bb_vinfo)
4283 {
4284 gcc_assert (PURE_SLP_STMT (stmt_info));
4285
b690cc0f 4286 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
a70d6342
IR
4287 if (vect_print_dump_info (REPORT_DETAILS))
4288 {
4289 fprintf (vect_dump, "get vectype for scalar type: ");
4290 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4291 }
4292
4293 vectype = get_vectype_for_scalar_type (scalar_type);
4294 if (!vectype)
4295 {
4296 if (vect_print_dump_info (REPORT_DETAILS))
4297 {
4298 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4299 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4300 }
4301 return false;
4302 }
4303
4304 if (vect_print_dump_info (REPORT_DETAILS))
4305 {
4306 fprintf (vect_dump, "vectype: ");
4307 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4308 }
4309
4310 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4311 }
4312
8644a673 4313 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 4314 {
8644a673
IR
4315 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4316 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4317 *need_to_vectorize = true;
ebfd146a
IR
4318 }
4319
8644a673 4320 ok = true;
b8698a0f 4321 if (!bb_vinfo
a70d6342
IR
4322 && (STMT_VINFO_RELEVANT_P (stmt_info)
4323 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8644a673
IR
4324 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4325 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4326 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4327 || vectorizable_operation (stmt, NULL, NULL, NULL)
4328 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4329 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4330 || vectorizable_call (stmt, NULL, NULL)
4331 || vectorizable_store (stmt, NULL, NULL, NULL)
b5aeb3bb 4332 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4bbe8262 4333 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
a70d6342
IR
4334 else
4335 {
4336 if (bb_vinfo)
4337 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4338 || vectorizable_assignment (stmt, NULL, NULL, node)
4339 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4340 || vectorizable_store (stmt, NULL, NULL, node));
b8698a0f 4341 }
8644a673
IR
4342
4343 if (!ok)
ebfd146a 4344 {
8644a673
IR
4345 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4346 {
4347 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4348 fprintf (vect_dump, "supported: ");
4349 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4350 }
b8698a0f 4351
ebfd146a
IR
4352 return false;
4353 }
4354
a70d6342
IR
4355 if (bb_vinfo)
4356 return true;
4357
8644a673
IR
4358 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4359 need extra handling, except for vectorizable reductions. */
4360 if (STMT_VINFO_LIVE_P (stmt_info)
4361 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4362 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 4363
8644a673 4364 if (!ok)
ebfd146a 4365 {
8644a673
IR
4366 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4367 {
4368 fprintf (vect_dump, "not vectorized: live stmt not ");
4369 fprintf (vect_dump, "supported: ");
4370 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4371 }
b8698a0f 4372
8644a673 4373 return false;
ebfd146a
IR
4374 }
4375
8644a673 4376 if (!PURE_SLP_STMT (stmt_info))
ebfd146a 4377 {
b8698a0f 4378 /* Groups of strided accesses whose size is not a power of 2 are not
ff802fa1 4379 vectorizable yet using loop-vectorization. Therefore, if this stmt
b8698a0f 4380 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
a70d6342 4381 loop-based vectorized), the loop cannot be vectorized. */
8644a673
IR
4382 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4383 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4384 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
ebfd146a 4385 {
8644a673
IR
4386 if (vect_print_dump_info (REPORT_DETAILS))
4387 {
4388 fprintf (vect_dump, "not vectorized: the size of group "
4389 "of strided accesses is not a power of 2");
4390 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4391 }
4392
ebfd146a
IR
4393 return false;
4394 }
4395 }
b8698a0f 4396
ebfd146a
IR
4397 return true;
4398}
4399
4400
4401/* Function vect_transform_stmt.
4402
4403 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4404
4405bool
4406vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
b8698a0f 4407 bool *strided_store, slp_tree slp_node,
ebfd146a
IR
4408 slp_instance slp_node_instance)
4409{
4410 bool is_store = false;
4411 gimple vec_stmt = NULL;
4412 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
039d9ea1 4413 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
ebfd146a 4414 bool done;
ebfd146a
IR
4415
4416 switch (STMT_VINFO_TYPE (stmt_info))
4417 {
4418 case type_demotion_vec_info_type:
4419 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4420 gcc_assert (done);
4421 break;
4422
4423 case type_promotion_vec_info_type:
4424 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4425 gcc_assert (done);
4426 break;
4427
4428 case type_conversion_vec_info_type:
4429 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4430 gcc_assert (done);
4431 break;
4432
4433 case induc_vec_info_type:
4434 gcc_assert (!slp_node);
4435 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4436 gcc_assert (done);
4437 break;
4438
4439 case op_vec_info_type:
4440 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4441 gcc_assert (done);
4442 break;
4443
4444 case assignment_vec_info_type:
4445 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4446 gcc_assert (done);
4447 break;
4448
4449 case load_vec_info_type:
b8698a0f 4450 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
4451 slp_node_instance);
4452 gcc_assert (done);
4453 break;
4454
4455 case store_vec_info_type:
4456 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4457 gcc_assert (done);
4458 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4459 {
4460 /* In case of interleaving, the whole chain is vectorized when the
ff802fa1 4461 last store in the chain is reached. Store stmts before the last
ebfd146a
IR
4462 one are skipped, and there vec_stmt_info shouldn't be freed
4463 meanwhile. */
4464 *strided_store = true;
4465 if (STMT_VINFO_VEC_STMT (stmt_info))
4466 is_store = true;
4467 }
4468 else
4469 is_store = true;
4470 break;
4471
4472 case condition_vec_info_type:
4473 gcc_assert (!slp_node);
4bbe8262 4474 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
ebfd146a
IR
4475 gcc_assert (done);
4476 break;
4477
4478 case call_vec_info_type:
4479 gcc_assert (!slp_node);
4480 done = vectorizable_call (stmt, gsi, &vec_stmt);
039d9ea1 4481 stmt = gsi_stmt (*gsi);
ebfd146a
IR
4482 break;
4483
4484 case reduc_vec_info_type:
b5aeb3bb 4485 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
ebfd146a
IR
4486 gcc_assert (done);
4487 break;
4488
4489 default:
4490 if (!STMT_VINFO_LIVE_P (stmt_info))
4491 {
4492 if (vect_print_dump_info (REPORT_DETAILS))
4493 fprintf (vect_dump, "stmt not supported.");
4494 gcc_unreachable ();
4495 }
4496 }
4497
4498 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4499 is being vectorized, but outside the immediately enclosing loop. */
4500 if (vec_stmt
a70d6342
IR
4501 && STMT_VINFO_LOOP_VINFO (stmt_info)
4502 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4503 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
4504 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4505 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 4506 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 4507 vect_used_in_outer_by_reduction))
ebfd146a 4508 {
a70d6342
IR
4509 struct loop *innerloop = LOOP_VINFO_LOOP (
4510 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
4511 imm_use_iterator imm_iter;
4512 use_operand_p use_p;
4513 tree scalar_dest;
4514 gimple exit_phi;
4515
4516 if (vect_print_dump_info (REPORT_DETAILS))
a70d6342 4517 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
ebfd146a
IR
4518
4519 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4520 (to be used when vectorizing outer-loop stmts that use the DEF of
4521 STMT). */
4522 if (gimple_code (stmt) == GIMPLE_PHI)
4523 scalar_dest = PHI_RESULT (stmt);
4524 else
4525 scalar_dest = gimple_assign_lhs (stmt);
4526
4527 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4528 {
4529 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4530 {
4531 exit_phi = USE_STMT (use_p);
4532 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4533 }
4534 }
4535 }
4536
4537 /* Handle stmts whose DEF is used outside the loop-nest that is
4538 being vectorized. */
4539 if (STMT_VINFO_LIVE_P (stmt_info)
4540 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4541 {
4542 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4543 gcc_assert (done);
4544 }
4545
4546 if (vec_stmt)
4547 {
4548 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4549 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4550 if (orig_stmt_in_pattern)
4551 {
4552 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4553 /* STMT was inserted by the vectorizer to replace a computation idiom.
b8698a0f
L
4554 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4555 computed this idiom. We need to record a pointer to VEC_STMT in
4556 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
ebfd146a
IR
4557 documentation of vect_pattern_recog. */
4558 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4559 {
039d9ea1
IR
4560 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4561 == orig_scalar_stmt);
ebfd146a
IR
4562 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4563 }
4564 }
4565 }
4566
b8698a0f 4567 return is_store;
ebfd146a
IR
4568}
4569
4570
b8698a0f 4571/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
4572 stmt_vec_info. */
4573
4574void
4575vect_remove_stores (gimple first_stmt)
4576{
4577 gimple next = first_stmt;
4578 gimple tmp;
4579 gimple_stmt_iterator next_si;
4580
4581 while (next)
4582 {
4583 /* Free the attached stmt_vec_info and remove the stmt. */
4584 next_si = gsi_for_stmt (next);
4585 gsi_remove (&next_si, true);
4586 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4587 free_stmt_vec_info (next);
4588 next = tmp;
4589 }
4590}
4591
4592
4593/* Function new_stmt_vec_info.
4594
4595 Create and initialize a new stmt_vec_info struct for STMT. */
4596
4597stmt_vec_info
b8698a0f 4598new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
a70d6342 4599 bb_vec_info bb_vinfo)
ebfd146a
IR
4600{
4601 stmt_vec_info res;
4602 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4603
4604 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4605 STMT_VINFO_STMT (res) = stmt;
4606 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
a70d6342 4607 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
8644a673 4608 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
4609 STMT_VINFO_LIVE_P (res) = false;
4610 STMT_VINFO_VECTYPE (res) = NULL;
4611 STMT_VINFO_VEC_STMT (res) = NULL;
4b5caab7 4612 STMT_VINFO_VECTORIZABLE (res) = true;
ebfd146a
IR
4613 STMT_VINFO_IN_PATTERN_P (res) = false;
4614 STMT_VINFO_RELATED_STMT (res) = NULL;
4615 STMT_VINFO_DATA_REF (res) = NULL;
4616
4617 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4618 STMT_VINFO_DR_OFFSET (res) = NULL;
4619 STMT_VINFO_DR_INIT (res) = NULL;
4620 STMT_VINFO_DR_STEP (res) = NULL;
4621 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4622
4623 if (gimple_code (stmt) == GIMPLE_PHI
4624 && is_loop_header_bb_p (gimple_bb (stmt)))
4625 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4626 else
8644a673
IR
4627 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4628
ebfd146a
IR
4629 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4630 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4631 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
32e8bb8e 4632 STMT_SLP_TYPE (res) = loop_vect;
ebfd146a
IR
4633 DR_GROUP_FIRST_DR (res) = NULL;
4634 DR_GROUP_NEXT_DR (res) = NULL;
4635 DR_GROUP_SIZE (res) = 0;
4636 DR_GROUP_STORE_COUNT (res) = 0;
4637 DR_GROUP_GAP (res) = 0;
4638 DR_GROUP_SAME_DR_STMT (res) = NULL;
4639 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4640
4641 return res;
4642}
4643
4644
4645/* Create a hash table for stmt_vec_info. */
4646
4647void
4648init_stmt_vec_info_vec (void)
4649{
4650 gcc_assert (!stmt_vec_info_vec);
4651 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4652}
4653
4654
4655/* Free hash table for stmt_vec_info. */
4656
4657void
4658free_stmt_vec_info_vec (void)
4659{
4660 gcc_assert (stmt_vec_info_vec);
4661 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4662}
4663
4664
4665/* Free stmt vectorization related info. */
4666
4667void
4668free_stmt_vec_info (gimple stmt)
4669{
4670 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4671
4672 if (!stmt_info)
4673 return;
4674
4675 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4676 set_vinfo_for_stmt (stmt, NULL);
4677 free (stmt_info);
4678}
4679
4680
4681/* Function get_vectype_for_scalar_type.
4682
4683 Returns the vector type corresponding to SCALAR_TYPE as supported
4684 by the target. */
4685
4686tree
4687get_vectype_for_scalar_type (tree scalar_type)
4688{
4689 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
2f816591 4690 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
4691 int nunits;
4692 tree vectype;
4693
26983c22
L
4694 if (nbytes == 0
4695 || (nbytes >= targetm.vectorize.units_per_simd_word (inner_mode)))
ebfd146a
IR
4696 return NULL_TREE;
4697
2f816591
RG
4698 /* We can't build a vector type of elements with alignment bigger than
4699 their size. */
4700 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4701 return NULL_TREE;
4702
6d7971b8
RG
4703 /* If we'd build a vector type of elements whose mode precision doesn't
4704 match their types precision we'll get mismatched types on vector
4705 extracts via BIT_FIELD_REFs. This effectively means we disable
4706 vectorization of bool and/or enum types in some languages. */
4707 if (INTEGRAL_TYPE_P (scalar_type)
4708 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4709 return NULL_TREE;
4710
26983c22
L
4711 /* FORNOW: Only a single vector size per mode
4712 (TARGET_VECTORIZE_UNITS_PER_SIMD_WORD) is expected. */
4713 nunits = targetm.vectorize.units_per_simd_word (inner_mode) / nbytes;
ebfd146a
IR
4714
4715 vectype = build_vector_type (scalar_type, nunits);
4716 if (vect_print_dump_info (REPORT_DETAILS))
4717 {
4718 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4719 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4720 }
4721
4722 if (!vectype)
4723 return NULL_TREE;
4724
4725 if (vect_print_dump_info (REPORT_DETAILS))
4726 {
4727 fprintf (vect_dump, "vectype: ");
4728 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4729 }
4730
4731 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4732 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4733 {
4734 if (vect_print_dump_info (REPORT_DETAILS))
4735 fprintf (vect_dump, "mode not supported by target.");
4736 return NULL_TREE;
4737 }
4738
4739 return vectype;
4740}
4741
b690cc0f
RG
4742/* Function get_same_sized_vectype
4743
4744 Returns a vector type corresponding to SCALAR_TYPE of size
4745 VECTOR_TYPE if supported by the target. */
4746
4747tree
4748get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4749{
4750 return get_vectype_for_scalar_type (scalar_type);
4751}
4752
ebfd146a
IR
4753/* Function vect_is_simple_use.
4754
4755 Input:
a70d6342
IR
4756 LOOP_VINFO - the vect info of the loop that is being vectorized.
4757 BB_VINFO - the vect info of the basic block that is being vectorized.
4758 OPERAND - operand of a stmt in the loop or bb.
ebfd146a
IR
4759 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4760
4761 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f 4762 For loops, supportable operands are constants, loop invariants, and operands
ff802fa1 4763 that are defined by the current iteration of the loop. Unsupportable
b8698a0f 4764 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
4765 is the case in reduction/induction computations).
4766 For basic blocks, supportable operands are constants and bb invariants.
4767 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
4768
4769bool
b8698a0f 4770vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
a70d6342 4771 bb_vec_info bb_vinfo, gimple *def_stmt,
ebfd146a 4772 tree *def, enum vect_def_type *dt)
b8698a0f 4773{
ebfd146a
IR
4774 basic_block bb;
4775 stmt_vec_info stmt_vinfo;
a70d6342 4776 struct loop *loop = NULL;
b8698a0f 4777
a70d6342
IR
4778 if (loop_vinfo)
4779 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
4780
4781 *def_stmt = NULL;
4782 *def = NULL_TREE;
b8698a0f 4783
ebfd146a
IR
4784 if (vect_print_dump_info (REPORT_DETAILS))
4785 {
4786 fprintf (vect_dump, "vect_is_simple_use: operand ");
4787 print_generic_expr (vect_dump, operand, TDF_SLIM);
4788 }
b8698a0f 4789
ebfd146a
IR
4790 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4791 {
4792 *dt = vect_constant_def;
4793 return true;
4794 }
b8698a0f 4795
ebfd146a
IR
4796 if (is_gimple_min_invariant (operand))
4797 {
4798 *def = operand;
8644a673 4799 *dt = vect_external_def;
ebfd146a
IR
4800 return true;
4801 }
4802
4803 if (TREE_CODE (operand) == PAREN_EXPR)
4804 {
4805 if (vect_print_dump_info (REPORT_DETAILS))
4806 fprintf (vect_dump, "non-associatable copy.");
4807 operand = TREE_OPERAND (operand, 0);
4808 }
b8698a0f 4809
ebfd146a
IR
4810 if (TREE_CODE (operand) != SSA_NAME)
4811 {
4812 if (vect_print_dump_info (REPORT_DETAILS))
4813 fprintf (vect_dump, "not ssa-name.");
4814 return false;
4815 }
b8698a0f 4816
ebfd146a
IR
4817 *def_stmt = SSA_NAME_DEF_STMT (operand);
4818 if (*def_stmt == NULL)
4819 {
4820 if (vect_print_dump_info (REPORT_DETAILS))
4821 fprintf (vect_dump, "no def_stmt.");
4822 return false;
4823 }
4824
4825 if (vect_print_dump_info (REPORT_DETAILS))
4826 {
4827 fprintf (vect_dump, "def_stmt: ");
4828 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4829 }
4830
8644a673 4831 /* Empty stmt is expected only in case of a function argument.
ebfd146a
IR
4832 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4833 if (gimple_nop_p (*def_stmt))
4834 {
4835 *def = operand;
8644a673 4836 *dt = vect_external_def;
ebfd146a
IR
4837 return true;
4838 }
4839
4840 bb = gimple_bb (*def_stmt);
a70d6342
IR
4841
4842 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4843 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
b8698a0f 4844 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
8644a673 4845 *dt = vect_external_def;
ebfd146a
IR
4846 else
4847 {
4848 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4849 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4850 }
4851
4852 if (*dt == vect_unknown_def_type)
4853 {
4854 if (vect_print_dump_info (REPORT_DETAILS))
4855 fprintf (vect_dump, "Unsupported pattern.");
4856 return false;
4857 }
4858
4859 if (vect_print_dump_info (REPORT_DETAILS))
4860 fprintf (vect_dump, "type of def: %d.",*dt);
4861
4862 switch (gimple_code (*def_stmt))
4863 {
4864 case GIMPLE_PHI:
4865 *def = gimple_phi_result (*def_stmt);
4866 break;
4867
4868 case GIMPLE_ASSIGN:
4869 *def = gimple_assign_lhs (*def_stmt);
4870 break;
4871
4872 case GIMPLE_CALL:
4873 *def = gimple_call_lhs (*def_stmt);
4874 if (*def != NULL)
4875 break;
4876 /* FALLTHRU */
4877 default:
4878 if (vect_print_dump_info (REPORT_DETAILS))
4879 fprintf (vect_dump, "unsupported defining stmt: ");
4880 return false;
4881 }
4882
4883 return true;
4884}
4885
b690cc0f
RG
4886/* Function vect_is_simple_use_1.
4887
4888 Same as vect_is_simple_use_1 but also determines the vector operand
4889 type of OPERAND and stores it to *VECTYPE. If the definition of
4890 OPERAND is vect_uninitialized_def, vect_constant_def or
4891 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4892 is responsible to compute the best suited vector type for the
4893 scalar operand. */
4894
4895bool
4896vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4897 bb_vec_info bb_vinfo, gimple *def_stmt,
4898 tree *def, enum vect_def_type *dt, tree *vectype)
4899{
4900 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4901 return false;
4902
4903 /* Now get a vector type if the def is internal, otherwise supply
4904 NULL_TREE and leave it up to the caller to figure out a proper
4905 type for the use stmt. */
4906 if (*dt == vect_internal_def
4907 || *dt == vect_induction_def
4908 || *dt == vect_reduction_def
4909 || *dt == vect_double_reduction_def
4910 || *dt == vect_nested_cycle)
4911 {
4912 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4913 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4914 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4915 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4916 gcc_assert (*vectype != NULL_TREE);
4917 }
4918 else if (*dt == vect_uninitialized_def
4919 || *dt == vect_constant_def
4920 || *dt == vect_external_def)
4921 *vectype = NULL_TREE;
4922 else
4923 gcc_unreachable ();
4924
4925 return true;
4926}
4927
ebfd146a
IR
4928
4929/* Function supportable_widening_operation
4930
b8698a0f
L
4931 Check whether an operation represented by the code CODE is a
4932 widening operation that is supported by the target platform in
b690cc0f
RG
4933 vector form (i.e., when operating on arguments of type VECTYPE_IN
4934 producing a result of type VECTYPE_OUT).
b8698a0f 4935
ebfd146a
IR
4936 Widening operations we currently support are NOP (CONVERT), FLOAT
4937 and WIDEN_MULT. This function checks if these operations are supported
4938 by the target platform either directly (via vector tree-codes), or via
4939 target builtins.
4940
4941 Output:
b8698a0f
L
4942 - CODE1 and CODE2 are codes of vector operations to be used when
4943 vectorizing the operation, if available.
ebfd146a 4944 - DECL1 and DECL2 are decls of target builtin functions to be used
ff802fa1 4945 when vectorizing the operation, if available. In this case,
b8698a0f 4946 CODE1 and CODE2 are CALL_EXPR.
ebfd146a
IR
4947 - MULTI_STEP_CVT determines the number of required intermediate steps in
4948 case of multi-step conversion (like char->short->int - in that case
4949 MULTI_STEP_CVT will be 1).
b8698a0f
L
4950 - INTERM_TYPES contains the intermediate type required to perform the
4951 widening operation (short in the above example). */
ebfd146a
IR
4952
4953bool
b690cc0f
RG
4954supportable_widening_operation (enum tree_code code, gimple stmt,
4955 tree vectype_out, tree vectype_in,
ebfd146a
IR
4956 tree *decl1, tree *decl2,
4957 enum tree_code *code1, enum tree_code *code2,
4958 int *multi_step_cvt,
4959 VEC (tree, heap) **interm_types)
4960{
4961 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4962 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4963 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4964 bool ordered_p;
4965 enum machine_mode vec_mode;
81f40b79 4966 enum insn_code icode1, icode2;
ebfd146a 4967 optab optab1, optab2;
b690cc0f
RG
4968 tree vectype = vectype_in;
4969 tree wide_vectype = vectype_out;
ebfd146a
IR
4970 enum tree_code c1, c2;
4971
4972 /* The result of a vectorized widening operation usually requires two vectors
b8698a0f
L
4973 (because the widened results do not fit int one vector). The generated
4974 vector results would normally be expected to be generated in the same
ebfd146a
IR
4975 order as in the original scalar computation, i.e. if 8 results are
4976 generated in each vector iteration, they are to be organized as follows:
b8698a0f 4977 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
ebfd146a 4978
b8698a0f 4979 However, in the special case that the result of the widening operation is
ebfd146a 4980 used in a reduction computation only, the order doesn't matter (because
b8698a0f 4981 when vectorizing a reduction we change the order of the computation).
ebfd146a
IR
4982 Some targets can take advantage of this and generate more efficient code.
4983 For example, targets like Altivec, that support widen_mult using a sequence
4984 of {mult_even,mult_odd} generate the following vectors:
4985 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4986
4987 When vectorizing outer-loops, we execute the inner-loop sequentially
b8698a0f 4988 (each vectorized inner-loop iteration contributes to VF outer-loop
ff802fa1 4989 iterations in parallel). We therefore don't allow to change the order
ebfd146a
IR
4990 of the computation in the inner-loop during outer-loop vectorization. */
4991
4992 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4993 && !nested_in_vect_loop_p (vect_loop, stmt))
4994 ordered_p = false;
4995 else
4996 ordered_p = true;
4997
4998 if (!ordered_p
4999 && code == WIDEN_MULT_EXPR
5000 && targetm.vectorize.builtin_mul_widen_even
5001 && targetm.vectorize.builtin_mul_widen_even (vectype)
5002 && targetm.vectorize.builtin_mul_widen_odd
5003 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5004 {
5005 if (vect_print_dump_info (REPORT_DETAILS))
5006 fprintf (vect_dump, "Unordered widening operation detected.");
5007
5008 *code1 = *code2 = CALL_EXPR;
5009 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5010 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5011 return true;
5012 }
5013
5014 switch (code)
5015 {
5016 case WIDEN_MULT_EXPR:
5017 if (BYTES_BIG_ENDIAN)
5018 {
5019 c1 = VEC_WIDEN_MULT_HI_EXPR;
5020 c2 = VEC_WIDEN_MULT_LO_EXPR;
5021 }
5022 else
5023 {
5024 c2 = VEC_WIDEN_MULT_HI_EXPR;
5025 c1 = VEC_WIDEN_MULT_LO_EXPR;
5026 }
5027 break;
5028
5029 CASE_CONVERT:
5030 if (BYTES_BIG_ENDIAN)
5031 {
5032 c1 = VEC_UNPACK_HI_EXPR;
5033 c2 = VEC_UNPACK_LO_EXPR;
5034 }
5035 else
5036 {
5037 c2 = VEC_UNPACK_HI_EXPR;
5038 c1 = VEC_UNPACK_LO_EXPR;
5039 }
5040 break;
5041
5042 case FLOAT_EXPR:
5043 if (BYTES_BIG_ENDIAN)
5044 {
5045 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5046 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5047 }
5048 else
5049 {
5050 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5051 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5052 }
5053 break;
5054
5055 case FIX_TRUNC_EXPR:
5056 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5057 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5058 computing the operation. */
5059 return false;
5060
5061 default:
5062 gcc_unreachable ();
5063 }
5064
5065 if (code == FIX_TRUNC_EXPR)
5066 {
5067 /* The signedness is determined from output operand. */
b690cc0f
RG
5068 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5069 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
5070 }
5071 else
5072 {
5073 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5074 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5075 }
5076
5077 if (!optab1 || !optab2)
5078 return false;
5079
5080 vec_mode = TYPE_MODE (vectype);
947131ba
RS
5081 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5082 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
5083 return false;
5084
b8698a0f 5085 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a
IR
5086 types. */
5087 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5088 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5089 {
5090 int i;
5091 tree prev_type = vectype, intermediate_type;
5092 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5093 optab optab3, optab4;
5094
5095 if (!CONVERT_EXPR_CODE_P (code))
5096 return false;
b8698a0f 5097
ebfd146a
IR
5098 *code1 = c1;
5099 *code2 = c2;
b8698a0f 5100
ebfd146a 5101 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
ff802fa1
IR
5102 intermediate steps in promotion sequence. We try
5103 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5104 not. */
ebfd146a
IR
5105 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5106 for (i = 0; i < 3; i++)
5107 {
5108 intermediate_mode = insn_data[icode1].operand[0].mode;
5109 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5110 TYPE_UNSIGNED (prev_type));
5111 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5112 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5113
5114 if (!optab3 || !optab4
947131ba
RS
5115 || ((icode1 = optab_handler (optab1, prev_mode))
5116 == CODE_FOR_nothing)
ebfd146a 5117 || insn_data[icode1].operand[0].mode != intermediate_mode
947131ba
RS
5118 || ((icode2 = optab_handler (optab2, prev_mode))
5119 == CODE_FOR_nothing)
ebfd146a 5120 || insn_data[icode2].operand[0].mode != intermediate_mode
947131ba
RS
5121 || ((icode1 = optab_handler (optab3, intermediate_mode))
5122 == CODE_FOR_nothing)
5123 || ((icode2 = optab_handler (optab4, intermediate_mode))
5124 == CODE_FOR_nothing))
ebfd146a
IR
5125 return false;
5126
5127 VEC_quick_push (tree, *interm_types, intermediate_type);
5128 (*multi_step_cvt)++;
5129
5130 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5131 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5132 return true;
5133
5134 prev_type = intermediate_type;
5135 prev_mode = intermediate_mode;
5136 }
5137
5138 return false;
5139 }
5140
5141 *code1 = c1;
5142 *code2 = c2;
5143 return true;
5144}
5145
5146
5147/* Function supportable_narrowing_operation
5148
b8698a0f
L
5149 Check whether an operation represented by the code CODE is a
5150 narrowing operation that is supported by the target platform in
b690cc0f
RG
5151 vector form (i.e., when operating on arguments of type VECTYPE_IN
5152 and producing a result of type VECTYPE_OUT).
b8698a0f 5153
ebfd146a 5154 Narrowing operations we currently support are NOP (CONVERT) and
ff802fa1 5155 FIX_TRUNC. This function checks if these operations are supported by
ebfd146a
IR
5156 the target platform directly via vector tree-codes.
5157
5158 Output:
b8698a0f
L
5159 - CODE1 is the code of a vector operation to be used when
5160 vectorizing the operation, if available.
ebfd146a
IR
5161 - MULTI_STEP_CVT determines the number of required intermediate steps in
5162 case of multi-step conversion (like int->short->char - in that case
5163 MULTI_STEP_CVT will be 1).
5164 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 5165 narrowing operation (short in the above example). */
ebfd146a
IR
5166
5167bool
5168supportable_narrowing_operation (enum tree_code code,
b690cc0f 5169 tree vectype_out, tree vectype_in,
ebfd146a
IR
5170 enum tree_code *code1, int *multi_step_cvt,
5171 VEC (tree, heap) **interm_types)
5172{
5173 enum machine_mode vec_mode;
5174 enum insn_code icode1;
5175 optab optab1, interm_optab;
b690cc0f
RG
5176 tree vectype = vectype_in;
5177 tree narrow_vectype = vectype_out;
ebfd146a
IR
5178 enum tree_code c1;
5179 tree intermediate_type, prev_type;
5180 int i;
5181
5182 switch (code)
5183 {
5184 CASE_CONVERT:
5185 c1 = VEC_PACK_TRUNC_EXPR;
5186 break;
5187
5188 case FIX_TRUNC_EXPR:
5189 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5190 break;
5191
5192 case FLOAT_EXPR:
5193 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5194 tree code and optabs used for computing the operation. */
5195 return false;
5196
5197 default:
5198 gcc_unreachable ();
5199 }
5200
5201 if (code == FIX_TRUNC_EXPR)
5202 /* The signedness is determined from output operand. */
b690cc0f 5203 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
5204 else
5205 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5206
5207 if (!optab1)
5208 return false;
5209
5210 vec_mode = TYPE_MODE (vectype);
947131ba 5211 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
5212 return false;
5213
5214 /* Check if it's a multi-step conversion that can be done using intermediate
5215 types. */
5216 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5217 {
5218 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5219
5220 *code1 = c1;
5221 prev_type = vectype;
5222 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
ff802fa1
IR
5223 intermediate steps in promotion sequence. We try
5224 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5225 not. */
ebfd146a
IR
5226 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5227 for (i = 0; i < 3; i++)
5228 {
5229 intermediate_mode = insn_data[icode1].operand[0].mode;
5230 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5231 TYPE_UNSIGNED (prev_type));
b8698a0f 5232 interm_optab = optab_for_tree_code (c1, intermediate_type,
ebfd146a 5233 optab_default);
b8698a0f 5234 if (!interm_optab
947131ba
RS
5235 || ((icode1 = optab_handler (optab1, prev_mode))
5236 == CODE_FOR_nothing)
ebfd146a 5237 || insn_data[icode1].operand[0].mode != intermediate_mode
947131ba
RS
5238 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5239 == CODE_FOR_nothing))
ebfd146a
IR
5240 return false;
5241
5242 VEC_quick_push (tree, *interm_types, intermediate_type);
5243 (*multi_step_cvt)++;
5244
5245 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5246 return true;
5247
5248 prev_type = intermediate_type;
5249 prev_mode = intermediate_mode;
5250 }
5251
5252 return false;
5253 }
5254
5255 *code1 = c1;
5256 return true;
5257}