]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
gthr-posix.h (pthread_cancel): Don't declare if compiling against Bionic C library.
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
62f7fd21
MM
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "target.h"
30#include "basic-block.h"
31#include "diagnostic.h"
cf835838
JM
32#include "tree-pretty-print.h"
33#include "gimple-pretty-print.h"
ebfd146a
IR
34#include "tree-flow.h"
35#include "tree-dump.h"
36#include "cfgloop.h"
37#include "cfglayout.h"
38#include "expr.h"
39#include "recog.h"
40#include "optabs.h"
41#include "toplev.h"
42#include "tree-vectorizer.h"
43#include "langhooks.h"
44
45
46/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48/* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52static void
53vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55{
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
b8698a0f 67 /* This is the last stmt in a sequence that was detected as a
ebfd146a
IR
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96}
97
98
99/* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111static bool
112vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114{
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
8644a673 121 *relevant = vect_unused_in_scope;
ebfd146a
IR
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
b8698a0f
L
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
8644a673 128 *relevant = vect_used_in_scope;
ebfd146a
IR
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
5006671f 132 if (gimple_vdef (stmt))
ebfd146a
IR
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
8644a673 136 *relevant = vect_used_in_scope;
ebfd146a
IR
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
3157b0c2
AO
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
ebfd146a
IR
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164}
165
166
b8698a0f 167/* Function exist_non_indexing_operands_for_use_p
ebfd146a 168
b8698a0f 169 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
170 used in STMT for anything other than indexing an array. */
171
172static bool
173exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174{
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 177
ebfd146a
IR
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
59a05b0c 183
ebfd146a
IR
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
b8698a0f 191 so USE cannot correspond to any operands that are not used
ebfd146a
IR
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
ebfd146a
IR
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
59a05b0c
EB
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
ebfd146a 201 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209}
210
211
b8698a0f 212/*
ebfd146a
IR
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ebfd146a
IR
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 228 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 229 of the respective DEF_STMT is left unchanged.
b8698a0f
L
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237static bool
b8698a0f 238process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
ebfd146a
IR
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240{
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
b8698a0f 249 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a
IR
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
a70d6342 254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
b8698a0f 255 {
8644a673 256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
b8698a0f
L
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
7c5222ff 306
ebfd146a
IR
307 switch (relevant)
308 {
8644a673 309 case vect_unused_in_scope:
7c5222ff
IR
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 312 break;
7c5222ff 313
ebfd146a 314 case vect_used_in_outer_by_reduction:
7c5222ff 315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
316 relevant = vect_used_by_reduction;
317 break;
7c5222ff 318
ebfd146a 319 case vect_used_in_outer:
7c5222ff 320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 321 relevant = vect_used_in_scope;
ebfd146a 322 break;
7c5222ff 323
8644a673 324 case vect_used_in_scope:
ebfd146a
IR
325 break;
326
327 default:
328 gcc_unreachable ();
b8698a0f 329 }
ebfd146a
IR
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
06066f92 337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
7c5222ff 343
ebfd146a
IR
344 switch (relevant)
345 {
8644a673 346 case vect_unused_in_scope:
b8698a0f 347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
350 break;
351
ebfd146a
IR
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
8644a673 356 case vect_used_in_scope:
ebfd146a
IR
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367}
368
369
370/* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386bool
387vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388{
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
06066f92
IR
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
ebfd146a
IR
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 413 {
ebfd146a
IR
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
b8698a0f 431 }
ebfd146a
IR
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
b8698a0f
L
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
b8698a0f
L
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 471 order of the results that they produce does not have to be kept. */
ebfd146a 472
06066f92
IR
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
ebfd146a 476 {
06066f92
IR
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
b8698a0f 497 live_p = false;
06066f92 498 break;
b8698a0f 499
06066f92
IR
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
7c5222ff 507
06066f92
IR
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
7c5222ff 511
b8698a0f
L
512 live_p = false;
513 break;
514
06066f92
IR
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
7c5222ff 519 if (vect_print_dump_info (REPORT_DETAILS))
06066f92 520 fprintf (vect_dump, "unsupported use of double reduction.");
7c5222ff
IR
521
522 VEC_free (gimple, heap, worklist);
523 return false;
06066f92
IR
524 }
525
526 live_p = false;
b8698a0f 527 break;
7c5222ff 528
06066f92
IR
529 default:
530 break;
7c5222ff 531 }
b8698a0f 532
ebfd146a
IR
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546}
547
548
549int
550cost_for_stmt (gimple stmt)
551{
552 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
553
554 switch (STMT_VINFO_TYPE (stmt_info))
555 {
556 case load_vec_info_type:
557 return TARG_SCALAR_LOAD_COST;
558 case store_vec_info_type:
559 return TARG_SCALAR_STORE_COST;
560 case op_vec_info_type:
561 case condition_vec_info_type:
562 case assignment_vec_info_type:
563 case reduc_vec_info_type:
564 case induc_vec_info_type:
565 case type_promotion_vec_info_type:
566 case type_demotion_vec_info_type:
567 case type_conversion_vec_info_type:
568 case call_vec_info_type:
569 return TARG_SCALAR_STMT_COST;
570 case undef_vec_info_type:
571 default:
572 gcc_unreachable ();
573 }
574}
575
b8698a0f 576/* Function vect_model_simple_cost.
ebfd146a 577
b8698a0f 578 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
579 single op. Right now, this does not account for multiple insns that could
580 be generated for the single vector op. We will handle that shortly. */
581
582void
b8698a0f 583vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
584 enum vect_def_type *dt, slp_tree slp_node)
585{
586 int i;
587 int inside_cost = 0, outside_cost = 0;
588
589 /* The SLP costs were already calculated during SLP tree build. */
590 if (PURE_SLP_STMT (stmt_info))
591 return;
592
593 inside_cost = ncopies * TARG_VEC_STMT_COST;
594
595 /* FORNOW: Assuming maximum 2 args per stmts. */
596 for (i = 0; i < 2; i++)
597 {
8644a673 598 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
b8698a0f 599 outside_cost += TARG_SCALAR_TO_VEC_COST;
ebfd146a 600 }
b8698a0f 601
ebfd146a
IR
602 if (vect_print_dump_info (REPORT_COST))
603 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
604 "outside_cost = %d .", inside_cost, outside_cost);
605
606 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
607 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
608 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
609}
610
611
b8698a0f
L
612/* Function vect_cost_strided_group_size
613
ebfd146a
IR
614 For strided load or store, return the group_size only if it is the first
615 load or store of a group, else return 1. This ensures that group size is
616 only returned once per group. */
617
618static int
619vect_cost_strided_group_size (stmt_vec_info stmt_info)
620{
621 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
622
623 if (first_stmt == STMT_VINFO_STMT (stmt_info))
624 return DR_GROUP_SIZE (stmt_info);
625
626 return 1;
627}
628
629
630/* Function vect_model_store_cost
631
632 Models cost for stores. In the case of strided accesses, one access
633 has the overhead of the strided access attributed to it. */
634
635void
b8698a0f 636vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
637 enum vect_def_type dt, slp_tree slp_node)
638{
639 int group_size;
640 int inside_cost = 0, outside_cost = 0;
641
642 /* The SLP costs were already calculated during SLP tree build. */
643 if (PURE_SLP_STMT (stmt_info))
644 return;
645
8644a673 646 if (dt == vect_constant_def || dt == vect_external_def)
ebfd146a
IR
647 outside_cost = TARG_SCALAR_TO_VEC_COST;
648
649 /* Strided access? */
b8698a0f 650 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
ebfd146a
IR
651 group_size = vect_cost_strided_group_size (stmt_info);
652 /* Not a strided access. */
653 else
654 group_size = 1;
655
b8698a0f 656 /* Is this an access in a group of stores, which provide strided access?
ebfd146a 657 If so, add in the cost of the permutes. */
b8698a0f 658 if (group_size > 1)
ebfd146a
IR
659 {
660 /* Uses a high and low interleave operation for each needed permute. */
b8698a0f 661 inside_cost = ncopies * exact_log2(group_size) * group_size
ebfd146a
IR
662 * TARG_VEC_STMT_COST;
663
664 if (vect_print_dump_info (REPORT_COST))
665 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
666 group_size);
667
668 }
669
670 /* Costs of the stores. */
671 inside_cost += ncopies * TARG_VEC_STORE_COST;
672
673 if (vect_print_dump_info (REPORT_COST))
674 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
675 "outside_cost = %d .", inside_cost, outside_cost);
676
677 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
678 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
679 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
680}
681
682
683/* Function vect_model_load_cost
684
685 Models cost for loads. In the case of strided accesses, the last access
686 has the overhead of the strided access attributed to it. Since unaligned
b8698a0f 687 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
688 access scheme chosen. */
689
690void
691vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
b8698a0f 692
ebfd146a
IR
693{
694 int group_size;
695 int alignment_support_cheme;
696 gimple first_stmt;
697 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
698 int inside_cost = 0, outside_cost = 0;
699
700 /* The SLP costs were already calculated during SLP tree build. */
701 if (PURE_SLP_STMT (stmt_info))
702 return;
703
704 /* Strided accesses? */
705 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
706 if (first_stmt && !slp_node)
707 {
708 group_size = vect_cost_strided_group_size (stmt_info);
709 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
710 }
711 /* Not a strided access. */
712 else
713 {
714 group_size = 1;
715 first_dr = dr;
716 }
717
718 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
719
b8698a0f 720 /* Is this an access in a group of loads providing strided access?
ebfd146a 721 If so, add in the cost of the permutes. */
b8698a0f 722 if (group_size > 1)
ebfd146a
IR
723 {
724 /* Uses an even and odd extract operations for each needed permute. */
725 inside_cost = ncopies * exact_log2(group_size) * group_size
726 * TARG_VEC_STMT_COST;
727
728 if (vect_print_dump_info (REPORT_COST))
729 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
730 group_size);
731
732 }
733
734 /* The loads themselves. */
735 switch (alignment_support_cheme)
736 {
737 case dr_aligned:
738 {
739 inside_cost += ncopies * TARG_VEC_LOAD_COST;
740
741 if (vect_print_dump_info (REPORT_COST))
742 fprintf (vect_dump, "vect_model_load_cost: aligned.");
743
744 break;
745 }
746 case dr_unaligned_supported:
747 {
748 /* Here, we assign an additional cost for the unaligned load. */
749 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
750
751 if (vect_print_dump_info (REPORT_COST))
752 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
753 "hardware.");
754
755 break;
756 }
757 case dr_explicit_realign:
758 {
759 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
760
761 /* FIXME: If the misalignment remains fixed across the iterations of
762 the containing loop, the following cost should be added to the
763 outside costs. */
764 if (targetm.vectorize.builtin_mask_for_load)
765 inside_cost += TARG_VEC_STMT_COST;
766
767 break;
768 }
769 case dr_explicit_realign_optimized:
770 {
771 if (vect_print_dump_info (REPORT_COST))
772 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
773 "pipelined.");
774
775 /* Unaligned software pipeline has a load of an address, an initial
776 load, and possibly a mask operation to "prime" the loop. However,
777 if this is an access in a group of loads, which provide strided
778 access, then the above cost should only be considered for one
779 access in the group. Inside the loop, there is a load op
780 and a realignment op. */
781
782 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
783 {
784 outside_cost = 2*TARG_VEC_STMT_COST;
785 if (targetm.vectorize.builtin_mask_for_load)
786 outside_cost += TARG_VEC_STMT_COST;
787 }
788
789 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
790
791 break;
792 }
793
794 default:
795 gcc_unreachable ();
796 }
b8698a0f 797
ebfd146a
IR
798 if (vect_print_dump_info (REPORT_COST))
799 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
800 "outside_cost = %d .", inside_cost, outside_cost);
801
802 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
803 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
804 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
805}
806
807
808/* Function vect_init_vector.
809
810 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
811 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
812 is not NULL. Otherwise, place the initialization at the loop preheader.
b8698a0f 813 Return the DEF of INIT_STMT.
ebfd146a
IR
814 It will be used in the vectorization of STMT. */
815
816tree
817vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
818 gimple_stmt_iterator *gsi)
819{
820 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
821 tree new_var;
822 gimple init_stmt;
823 tree vec_oprnd;
824 edge pe;
825 tree new_temp;
826 basic_block new_bb;
b8698a0f 827
ebfd146a 828 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
b8698a0f 829 add_referenced_var (new_var);
ebfd146a
IR
830 init_stmt = gimple_build_assign (new_var, vector_var);
831 new_temp = make_ssa_name (new_var, init_stmt);
832 gimple_assign_set_lhs (init_stmt, new_temp);
833
834 if (gsi)
835 vect_finish_stmt_generation (stmt, init_stmt, gsi);
836 else
837 {
838 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 839
a70d6342
IR
840 if (loop_vinfo)
841 {
842 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
843
844 if (nested_in_vect_loop_p (loop, stmt))
845 loop = loop->inner;
b8698a0f 846
a70d6342
IR
847 pe = loop_preheader_edge (loop);
848 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
849 gcc_assert (!new_bb);
850 }
851 else
852 {
853 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
854 basic_block bb;
855 gimple_stmt_iterator gsi_bb_start;
856
857 gcc_assert (bb_vinfo);
858 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 859 gsi_bb_start = gsi_after_labels (bb);
a70d6342
IR
860 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
861 }
ebfd146a
IR
862 }
863
864 if (vect_print_dump_info (REPORT_DETAILS))
865 {
866 fprintf (vect_dump, "created new init_stmt: ");
867 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
868 }
869
870 vec_oprnd = gimple_assign_lhs (init_stmt);
871 return vec_oprnd;
872}
873
a70d6342 874
ebfd146a
IR
875/* Function vect_get_vec_def_for_operand.
876
877 OP is an operand in STMT. This function returns a (vector) def that will be
878 used in the vectorized stmt for STMT.
879
880 In the case that OP is an SSA_NAME which is defined in the loop, then
881 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
882
883 In case OP is an invariant or constant, a new stmt that creates a vector def
884 needs to be introduced. */
885
886tree
887vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
888{
889 tree vec_oprnd;
890 gimple vec_stmt;
891 gimple def_stmt;
892 stmt_vec_info def_stmt_info = NULL;
893 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
894 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
895 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
896 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
897 tree vec_inv;
898 tree vec_cst;
899 tree t = NULL_TREE;
900 tree def;
901 int i;
902 enum vect_def_type dt;
903 bool is_simple_use;
904 tree vector_type;
905
906 if (vect_print_dump_info (REPORT_DETAILS))
907 {
908 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
909 print_generic_expr (vect_dump, op, TDF_SLIM);
910 }
911
b8698a0f 912 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
a70d6342 913 &dt);
ebfd146a
IR
914 gcc_assert (is_simple_use);
915 if (vect_print_dump_info (REPORT_DETAILS))
916 {
917 if (def)
918 {
919 fprintf (vect_dump, "def = ");
920 print_generic_expr (vect_dump, def, TDF_SLIM);
921 }
922 if (def_stmt)
923 {
924 fprintf (vect_dump, " def_stmt = ");
925 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
926 }
927 }
928
929 switch (dt)
930 {
931 /* Case 1: operand is a constant. */
932 case vect_constant_def:
933 {
7569a6cc
RG
934 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
935 gcc_assert (vector_type);
936
b8698a0f 937 if (scalar_def)
ebfd146a
IR
938 *scalar_def = op;
939
940 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
941 if (vect_print_dump_info (REPORT_DETAILS))
942 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
943
944 for (i = nunits - 1; i >= 0; --i)
945 {
946 t = tree_cons (NULL_TREE, op, t);
947 }
7569a6cc
RG
948 vec_cst = build_vector (vector_type, t);
949 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
ebfd146a
IR
950 }
951
952 /* Case 2: operand is defined outside the loop - loop invariant. */
8644a673 953 case vect_external_def:
ebfd146a
IR
954 {
955 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
956 gcc_assert (vector_type);
957 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
958
b8698a0f 959 if (scalar_def)
ebfd146a
IR
960 *scalar_def = def;
961
962 /* Create 'vec_inv = {inv,inv,..,inv}' */
963 if (vect_print_dump_info (REPORT_DETAILS))
964 fprintf (vect_dump, "Create vector_inv.");
965
966 for (i = nunits - 1; i >= 0; --i)
967 {
968 t = tree_cons (NULL_TREE, def, t);
969 }
970
971 /* FIXME: use build_constructor directly. */
972 vec_inv = build_constructor_from_list (vector_type, t);
973 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
974 }
975
976 /* Case 3: operand is defined inside the loop. */
8644a673 977 case vect_internal_def:
ebfd146a 978 {
b8698a0f 979 if (scalar_def)
ebfd146a
IR
980 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
981
982 /* Get the def from the vectorized stmt. */
983 def_stmt_info = vinfo_for_stmt (def_stmt);
984 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
985 gcc_assert (vec_stmt);
986 if (gimple_code (vec_stmt) == GIMPLE_PHI)
987 vec_oprnd = PHI_RESULT (vec_stmt);
988 else if (is_gimple_call (vec_stmt))
989 vec_oprnd = gimple_call_lhs (vec_stmt);
990 else
991 vec_oprnd = gimple_assign_lhs (vec_stmt);
992 return vec_oprnd;
993 }
994
995 /* Case 4: operand is defined by a loop header phi - reduction */
996 case vect_reduction_def:
06066f92 997 case vect_double_reduction_def:
7c5222ff 998 case vect_nested_cycle:
ebfd146a
IR
999 {
1000 struct loop *loop;
1001
1002 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
b8698a0f 1003 loop = (gimple_bb (def_stmt))->loop_father;
ebfd146a
IR
1004
1005 /* Get the def before the loop */
1006 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1007 return get_initial_def_for_reduction (stmt, op, scalar_def);
1008 }
1009
1010 /* Case 5: operand is defined by loop-header phi - induction. */
1011 case vect_induction_def:
1012 {
1013 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1014
1015 /* Get the def from the vectorized stmt. */
1016 def_stmt_info = vinfo_for_stmt (def_stmt);
1017 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1018 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1019 vec_oprnd = PHI_RESULT (vec_stmt);
1020 return vec_oprnd;
1021 }
1022
1023 default:
1024 gcc_unreachable ();
1025 }
1026}
1027
1028
1029/* Function vect_get_vec_def_for_stmt_copy
1030
b8698a0f
L
1031 Return a vector-def for an operand. This function is used when the
1032 vectorized stmt to be created (by the caller to this function) is a "copy"
1033 created in case the vectorized result cannot fit in one vector, and several
1034 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1035 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1036 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1037 DT is the type of the vector def VEC_OPRND.
1038
1039 Context:
1040 In case the vectorization factor (VF) is bigger than the number
1041 of elements that can fit in a vectype (nunits), we have to generate
1042 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1043 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1044 smallest data-type determines the VF, and as a result, when vectorizing
1045 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1046 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1047 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1048 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1049 which VF=16 and nunits=4, so the number of copies required is 4):
1050
1051 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1052
ebfd146a
IR
1053 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1054 VS1.1: vx.1 = memref1 VS1.2
1055 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1056 VS1.3: vx.3 = memref3
ebfd146a
IR
1057
1058 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1059 VSnew.1: vz1 = vx.1 + ... VSnew.2
1060 VSnew.2: vz2 = vx.2 + ... VSnew.3
1061 VSnew.3: vz3 = vx.3 + ...
1062
1063 The vectorization of S1 is explained in vectorizable_load.
1064 The vectorization of S2:
b8698a0f
L
1065 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1066 the function 'vect_get_vec_def_for_operand' is called to
ebfd146a
IR
1067 get the relevant vector-def for each operand of S2. For operand x it
1068 returns the vector-def 'vx.0'.
1069
b8698a0f
L
1070 To create the remaining copies of the vector-stmt (VSnew.j), this
1071 function is called to get the relevant vector-def for each operand. It is
1072 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1073 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1074
b8698a0f
L
1075 For example, to obtain the vector-def 'vx.1' in order to create the
1076 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1077 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1078 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1079 and return its def ('vx.1').
1080 Overall, to create the above sequence this function will be called 3 times:
1081 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1082 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1083 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1084
1085tree
1086vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1087{
1088 gimple vec_stmt_for_operand;
1089 stmt_vec_info def_stmt_info;
1090
1091 /* Do nothing; can reuse same def. */
8644a673 1092 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1093 return vec_oprnd;
1094
1095 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1096 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1097 gcc_assert (def_stmt_info);
1098 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1099 gcc_assert (vec_stmt_for_operand);
1100 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1101 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1102 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1103 else
1104 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1105 return vec_oprnd;
1106}
1107
1108
1109/* Get vectorized definitions for the operands to create a copy of an original
1110 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1111
1112static void
b8698a0f
L
1113vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1114 VEC(tree,heap) **vec_oprnds0,
ebfd146a
IR
1115 VEC(tree,heap) **vec_oprnds1)
1116{
1117 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1118
1119 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1120 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1121
1122 if (vec_oprnds1 && *vec_oprnds1)
1123 {
1124 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1125 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1126 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1127 }
1128}
1129
1130
1131/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1132
1133static void
1134vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1135 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1136 slp_tree slp_node)
1137{
1138 if (slp_node)
b5aeb3bb 1139 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
ebfd146a
IR
1140 else
1141 {
1142 tree vec_oprnd;
1143
b8698a0f
L
1144 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1145 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
ebfd146a
IR
1146 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1147
1148 if (op1)
1149 {
b8698a0f
L
1150 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1151 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
ebfd146a
IR
1152 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1153 }
1154 }
1155}
1156
1157
1158/* Function vect_finish_stmt_generation.
1159
1160 Insert a new stmt. */
1161
1162void
1163vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1164 gimple_stmt_iterator *gsi)
1165{
1166 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1167 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 1168 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
ebfd146a
IR
1169
1170 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1171
1172 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1173
b8698a0f 1174 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
a70d6342 1175 bb_vinfo));
ebfd146a
IR
1176
1177 if (vect_print_dump_info (REPORT_DETAILS))
1178 {
1179 fprintf (vect_dump, "add new stmt: ");
1180 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1181 }
1182
1183 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1184}
1185
1186/* Checks if CALL can be vectorized in type VECTYPE. Returns
1187 a function declaration if the target has a vectorized version
1188 of the function, or NULL_TREE if the function cannot be vectorized. */
1189
1190tree
1191vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1192{
1193 tree fndecl = gimple_call_fndecl (call);
ebfd146a
IR
1194
1195 /* We only handle functions that do not read or clobber memory -- i.e.
1196 const or novops ones. */
1197 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1198 return NULL_TREE;
1199
1200 if (!fndecl
1201 || TREE_CODE (fndecl) != FUNCTION_DECL
1202 || !DECL_BUILT_IN (fndecl))
1203 return NULL_TREE;
1204
62f7fd21 1205 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
ebfd146a
IR
1206 vectype_in);
1207}
1208
1209/* Function vectorizable_call.
1210
b8698a0f
L
1211 Check if STMT performs a function call that can be vectorized.
1212 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1213 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1214 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1215
1216static bool
1217vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1218{
1219 tree vec_dest;
1220 tree scalar_dest;
1221 tree op, type;
1222 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1223 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1224 tree vectype_out, vectype_in;
1225 int nunits_in;
1226 int nunits_out;
1227 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
b690cc0f 1228 tree fndecl, new_temp, def, rhs_type;
ebfd146a
IR
1229 gimple def_stmt;
1230 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
63827fb8 1231 gimple new_stmt = NULL;
ebfd146a
IR
1232 int ncopies, j;
1233 VEC(tree, heap) *vargs = NULL;
1234 enum { NARROW, NONE, WIDEN } modifier;
1235 size_t i, nargs;
1236
a70d6342
IR
1237 /* FORNOW: unsupported in basic block SLP. */
1238 gcc_assert (loop_vinfo);
b8698a0f 1239
ebfd146a
IR
1240 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1241 return false;
1242
8644a673 1243 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1244 return false;
1245
1246 /* FORNOW: SLP not supported. */
1247 if (STMT_SLP_TYPE (stmt_info))
1248 return false;
1249
1250 /* Is STMT a vectorizable call? */
1251 if (!is_gimple_call (stmt))
1252 return false;
1253
1254 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1255 return false;
1256
b690cc0f
RG
1257 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1258
ebfd146a
IR
1259 /* Process function arguments. */
1260 rhs_type = NULL_TREE;
b690cc0f 1261 vectype_in = NULL_TREE;
ebfd146a
IR
1262 nargs = gimple_call_num_args (stmt);
1263
1264 /* Bail out if the function has more than two arguments, we
1265 do not have interesting builtin functions to vectorize with
1266 more than two arguments. No arguments is also not good. */
1267 if (nargs == 0 || nargs > 2)
1268 return false;
1269
1270 for (i = 0; i < nargs; i++)
1271 {
b690cc0f
RG
1272 tree opvectype;
1273
ebfd146a
IR
1274 op = gimple_call_arg (stmt, i);
1275
1276 /* We can only handle calls with arguments of the same type. */
1277 if (rhs_type
8533c9d8 1278 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a
IR
1279 {
1280 if (vect_print_dump_info (REPORT_DETAILS))
1281 fprintf (vect_dump, "argument types differ.");
1282 return false;
1283 }
b690cc0f
RG
1284 if (!rhs_type)
1285 rhs_type = TREE_TYPE (op);
ebfd146a 1286
b690cc0f
RG
1287 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1288 &def_stmt, &def, &dt[i], &opvectype))
ebfd146a
IR
1289 {
1290 if (vect_print_dump_info (REPORT_DETAILS))
1291 fprintf (vect_dump, "use not simple.");
1292 return false;
1293 }
ebfd146a 1294
b690cc0f
RG
1295 if (!vectype_in)
1296 vectype_in = opvectype;
1297 else if (opvectype
1298 && opvectype != vectype_in)
1299 {
1300 if (vect_print_dump_info (REPORT_DETAILS))
1301 fprintf (vect_dump, "argument vector types differ.");
1302 return false;
1303 }
1304 }
1305 /* If all arguments are external or constant defs use a vector type with
1306 the same size as the output vector type. */
ebfd146a 1307 if (!vectype_in)
b690cc0f 1308 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1309 if (vec_stmt)
1310 gcc_assert (vectype_in);
1311 if (!vectype_in)
1312 {
1313 if (vect_print_dump_info (REPORT_DETAILS))
1314 {
1315 fprintf (vect_dump, "no vectype for scalar type ");
1316 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1317 }
1318
1319 return false;
1320 }
ebfd146a
IR
1321
1322 /* FORNOW */
b690cc0f
RG
1323 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1324 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1325 if (nunits_in == nunits_out / 2)
1326 modifier = NARROW;
1327 else if (nunits_out == nunits_in)
1328 modifier = NONE;
1329 else if (nunits_out == nunits_in / 2)
1330 modifier = WIDEN;
1331 else
1332 return false;
1333
1334 /* For now, we only vectorize functions if a target specific builtin
1335 is available. TODO -- in some cases, it might be profitable to
1336 insert the calls for pieces of the vector, in order to be able
1337 to vectorize other operations in the loop. */
1338 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1339 if (fndecl == NULL_TREE)
1340 {
1341 if (vect_print_dump_info (REPORT_DETAILS))
1342 fprintf (vect_dump, "function is not vectorizable.");
1343
1344 return false;
1345 }
1346
5006671f 1347 gcc_assert (!gimple_vuse (stmt));
ebfd146a
IR
1348
1349 if (modifier == NARROW)
1350 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1351 else
1352 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1353
1354 /* Sanity check: make sure that at least one copy of the vectorized stmt
1355 needs to be generated. */
1356 gcc_assert (ncopies >= 1);
1357
1358 if (!vec_stmt) /* transformation not required. */
1359 {
1360 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1361 if (vect_print_dump_info (REPORT_DETAILS))
1362 fprintf (vect_dump, "=== vectorizable_call ===");
1363 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1364 return true;
1365 }
1366
1367 /** Transform. **/
1368
1369 if (vect_print_dump_info (REPORT_DETAILS))
1370 fprintf (vect_dump, "transform operation.");
1371
1372 /* Handle def. */
1373 scalar_dest = gimple_call_lhs (stmt);
1374 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1375
1376 prev_stmt_info = NULL;
1377 switch (modifier)
1378 {
1379 case NONE:
1380 for (j = 0; j < ncopies; ++j)
1381 {
1382 /* Build argument list for the vectorized call. */
1383 if (j == 0)
1384 vargs = VEC_alloc (tree, heap, nargs);
1385 else
1386 VEC_truncate (tree, vargs, 0);
1387
1388 for (i = 0; i < nargs; i++)
1389 {
1390 op = gimple_call_arg (stmt, i);
1391 if (j == 0)
1392 vec_oprnd0
1393 = vect_get_vec_def_for_operand (op, stmt, NULL);
1394 else
63827fb8
IR
1395 {
1396 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1397 vec_oprnd0
1398 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1399 }
ebfd146a
IR
1400
1401 VEC_quick_push (tree, vargs, vec_oprnd0);
1402 }
1403
1404 new_stmt = gimple_build_call_vec (fndecl, vargs);
1405 new_temp = make_ssa_name (vec_dest, new_stmt);
1406 gimple_call_set_lhs (new_stmt, new_temp);
1407
1408 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1409 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1410
1411 if (j == 0)
1412 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1413 else
1414 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1415
1416 prev_stmt_info = vinfo_for_stmt (new_stmt);
1417 }
1418
1419 break;
1420
1421 case NARROW:
1422 for (j = 0; j < ncopies; ++j)
1423 {
1424 /* Build argument list for the vectorized call. */
1425 if (j == 0)
1426 vargs = VEC_alloc (tree, heap, nargs * 2);
1427 else
1428 VEC_truncate (tree, vargs, 0);
1429
1430 for (i = 0; i < nargs; i++)
1431 {
1432 op = gimple_call_arg (stmt, i);
1433 if (j == 0)
1434 {
1435 vec_oprnd0
1436 = vect_get_vec_def_for_operand (op, stmt, NULL);
1437 vec_oprnd1
63827fb8 1438 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1439 }
1440 else
1441 {
63827fb8 1442 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
ebfd146a 1443 vec_oprnd0
63827fb8 1444 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 1445 vec_oprnd1
63827fb8 1446 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1447 }
1448
1449 VEC_quick_push (tree, vargs, vec_oprnd0);
1450 VEC_quick_push (tree, vargs, vec_oprnd1);
1451 }
1452
1453 new_stmt = gimple_build_call_vec (fndecl, vargs);
1454 new_temp = make_ssa_name (vec_dest, new_stmt);
1455 gimple_call_set_lhs (new_stmt, new_temp);
1456
1457 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1458 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1459
1460 if (j == 0)
1461 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1462 else
1463 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1464
1465 prev_stmt_info = vinfo_for_stmt (new_stmt);
1466 }
1467
1468 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1469
1470 break;
1471
1472 case WIDEN:
1473 /* No current target implements this case. */
1474 return false;
1475 }
1476
1477 VEC_free (tree, heap, vargs);
1478
1479 /* Update the exception handling table with the vector stmt if necessary. */
1480 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1481 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1482
1483 /* The call in STMT might prevent it from being removed in dce.
1484 We however cannot remove it here, due to the way the ssa name
1485 it defines is mapped to the new definition. So just replace
1486 rhs of the statement with something harmless. */
1487
1488 type = TREE_TYPE (scalar_dest);
1489 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1490 fold_convert (type, integer_zero_node));
1491 set_vinfo_for_stmt (new_stmt, stmt_info);
1492 set_vinfo_for_stmt (stmt, NULL);
1493 STMT_VINFO_STMT (stmt_info) = new_stmt;
1494 gsi_replace (gsi, new_stmt, false);
1495 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1496
1497 return true;
1498}
1499
1500
1501/* Function vect_gen_widened_results_half
1502
1503 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 1504 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ebfd146a
IR
1505 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1506 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1507 needs to be created (DECL is a function-decl of a target-builtin).
1508 STMT is the original scalar stmt that we are vectorizing. */
1509
1510static gimple
1511vect_gen_widened_results_half (enum tree_code code,
1512 tree decl,
1513 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1514 tree vec_dest, gimple_stmt_iterator *gsi,
1515 gimple stmt)
b8698a0f 1516{
ebfd146a 1517 gimple new_stmt;
b8698a0f
L
1518 tree new_temp;
1519
1520 /* Generate half of the widened result: */
1521 if (code == CALL_EXPR)
1522 {
1523 /* Target specific support */
ebfd146a
IR
1524 if (op_type == binary_op)
1525 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1526 else
1527 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1528 new_temp = make_ssa_name (vec_dest, new_stmt);
1529 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
1530 }
1531 else
ebfd146a 1532 {
b8698a0f
L
1533 /* Generic support */
1534 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
1535 if (op_type != binary_op)
1536 vec_oprnd1 = NULL;
1537 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1538 vec_oprnd1);
1539 new_temp = make_ssa_name (vec_dest, new_stmt);
1540 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 1541 }
ebfd146a
IR
1542 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1543
ebfd146a
IR
1544 return new_stmt;
1545}
1546
1547
b8698a0f
L
1548/* Check if STMT performs a conversion operation, that can be vectorized.
1549 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1550 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1551 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1552
1553static bool
1554vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1555 gimple *vec_stmt, slp_tree slp_node)
1556{
1557 tree vec_dest;
1558 tree scalar_dest;
1559 tree op0;
1560 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1561 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1562 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1563 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1564 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1565 tree new_temp;
1566 tree def;
1567 gimple def_stmt;
1568 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1569 gimple new_stmt = NULL;
1570 stmt_vec_info prev_stmt_info;
1571 int nunits_in;
1572 int nunits_out;
1573 tree vectype_out, vectype_in;
1574 int ncopies, j;
b690cc0f 1575 tree rhs_type;
ebfd146a
IR
1576 tree builtin_decl;
1577 enum { NARROW, NONE, WIDEN } modifier;
1578 int i;
1579 VEC(tree,heap) *vec_oprnds0 = NULL;
1580 tree vop0;
ebfd146a
IR
1581 VEC(tree,heap) *dummy = NULL;
1582 int dummy_int;
1583
1584 /* Is STMT a vectorizable conversion? */
1585
a70d6342
IR
1586 /* FORNOW: unsupported in basic block SLP. */
1587 gcc_assert (loop_vinfo);
b8698a0f 1588
ebfd146a
IR
1589 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1590 return false;
1591
8644a673 1592 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1593 return false;
1594
1595 if (!is_gimple_assign (stmt))
1596 return false;
1597
1598 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1599 return false;
1600
1601 code = gimple_assign_rhs_code (stmt);
1602 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1603 return false;
1604
1605 /* Check types of lhs and rhs. */
b690cc0f
RG
1606 scalar_dest = gimple_assign_lhs (stmt);
1607 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1608
ebfd146a
IR
1609 op0 = gimple_assign_rhs1 (stmt);
1610 rhs_type = TREE_TYPE (op0);
b690cc0f
RG
1611 /* Check the operands of the operation. */
1612 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1613 &def_stmt, &def, &dt[0], &vectype_in))
1614 {
1615 if (vect_print_dump_info (REPORT_DETAILS))
1616 fprintf (vect_dump, "use not simple.");
1617 return false;
1618 }
1619 /* If op0 is an external or constant defs use a vector type of
1620 the same size as the output vector type. */
ebfd146a 1621 if (!vectype_in)
b690cc0f 1622 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1623 if (vec_stmt)
1624 gcc_assert (vectype_in);
1625 if (!vectype_in)
1626 {
1627 if (vect_print_dump_info (REPORT_DETAILS))
1628 {
1629 fprintf (vect_dump, "no vectype for scalar type ");
1630 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1631 }
1632
1633 return false;
1634 }
ebfd146a
IR
1635
1636 /* FORNOW */
b690cc0f
RG
1637 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1638 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1639 if (nunits_in == nunits_out / 2)
1640 modifier = NARROW;
1641 else if (nunits_out == nunits_in)
1642 modifier = NONE;
1643 else if (nunits_out == nunits_in / 2)
1644 modifier = WIDEN;
1645 else
1646 return false;
1647
ebfd146a
IR
1648 if (modifier == NARROW)
1649 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1650 else
1651 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1652
1653 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1654 this, so we can safely override NCOPIES with 1 here. */
1655 if (slp_node)
1656 ncopies = 1;
b8698a0f 1657
ebfd146a
IR
1658 /* Sanity check: make sure that at least one copy of the vectorized stmt
1659 needs to be generated. */
1660 gcc_assert (ncopies >= 1);
1661
ebfd146a
IR
1662 /* Supportable by target? */
1663 if ((modifier == NONE
88dd7150 1664 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
ebfd146a 1665 || (modifier == WIDEN
b690cc0f
RG
1666 && !supportable_widening_operation (code, stmt,
1667 vectype_out, vectype_in,
ebfd146a
IR
1668 &decl1, &decl2,
1669 &code1, &code2,
1670 &dummy_int, &dummy))
1671 || (modifier == NARROW
b690cc0f 1672 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
ebfd146a
IR
1673 &code1, &dummy_int, &dummy)))
1674 {
1675 if (vect_print_dump_info (REPORT_DETAILS))
1676 fprintf (vect_dump, "conversion not supported by target.");
1677 return false;
1678 }
1679
1680 if (modifier != NONE)
1681 {
ebfd146a
IR
1682 /* FORNOW: SLP not supported. */
1683 if (STMT_SLP_TYPE (stmt_info))
b8698a0f 1684 return false;
ebfd146a
IR
1685 }
1686
1687 if (!vec_stmt) /* transformation not required. */
1688 {
1689 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1690 return true;
1691 }
1692
1693 /** Transform. **/
1694 if (vect_print_dump_info (REPORT_DETAILS))
1695 fprintf (vect_dump, "transform conversion.");
1696
1697 /* Handle def. */
1698 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1699
1700 if (modifier == NONE && !slp_node)
1701 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1702
1703 prev_stmt_info = NULL;
1704 switch (modifier)
1705 {
1706 case NONE:
1707 for (j = 0; j < ncopies; j++)
1708 {
ebfd146a 1709 if (j == 0)
b8698a0f 1710 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
ebfd146a
IR
1711 else
1712 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1713
1714 builtin_decl =
88dd7150
RG
1715 targetm.vectorize.builtin_conversion (code,
1716 vectype_out, vectype_in);
ebfd146a 1717 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
b8698a0f 1718 {
ebfd146a
IR
1719 /* Arguments are ready. create the new vector stmt. */
1720 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1721 new_temp = make_ssa_name (vec_dest, new_stmt);
1722 gimple_call_set_lhs (new_stmt, new_temp);
1723 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1724 if (slp_node)
1725 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1726 }
1727
1728 if (j == 0)
1729 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1730 else
1731 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1732 prev_stmt_info = vinfo_for_stmt (new_stmt);
1733 }
1734 break;
1735
1736 case WIDEN:
1737 /* In case the vectorization factor (VF) is bigger than the number
1738 of elements that we can fit in a vectype (nunits), we have to
1739 generate more than one vector stmt - i.e - we need to "unroll"
1740 the vector stmt by a factor VF/nunits. */
1741 for (j = 0; j < ncopies; j++)
1742 {
1743 if (j == 0)
1744 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1745 else
1746 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1747
ebfd146a
IR
1748 /* Generate first half of the widened result: */
1749 new_stmt
b8698a0f 1750 = vect_gen_widened_results_half (code1, decl1,
ebfd146a
IR
1751 vec_oprnd0, vec_oprnd1,
1752 unary_op, vec_dest, gsi, stmt);
1753 if (j == 0)
1754 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1755 else
1756 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1757 prev_stmt_info = vinfo_for_stmt (new_stmt);
1758
1759 /* Generate second half of the widened result: */
1760 new_stmt
1761 = vect_gen_widened_results_half (code2, decl2,
1762 vec_oprnd0, vec_oprnd1,
1763 unary_op, vec_dest, gsi, stmt);
1764 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1765 prev_stmt_info = vinfo_for_stmt (new_stmt);
1766 }
1767 break;
1768
1769 case NARROW:
1770 /* In case the vectorization factor (VF) is bigger than the number
1771 of elements that we can fit in a vectype (nunits), we have to
1772 generate more than one vector stmt - i.e - we need to "unroll"
1773 the vector stmt by a factor VF/nunits. */
1774 for (j = 0; j < ncopies; j++)
1775 {
1776 /* Handle uses. */
1777 if (j == 0)
1778 {
1779 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1780 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1781 }
1782 else
1783 {
1784 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1785 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1786 }
1787
1788 /* Arguments are ready. Create the new vector stmt. */
ebfd146a
IR
1789 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1790 vec_oprnd1);
1791 new_temp = make_ssa_name (vec_dest, new_stmt);
1792 gimple_assign_set_lhs (new_stmt, new_temp);
1793 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1794
1795 if (j == 0)
1796 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1797 else
1798 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1799
1800 prev_stmt_info = vinfo_for_stmt (new_stmt);
1801 }
1802
1803 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1804 }
1805
1806 if (vec_oprnds0)
b8698a0f 1807 VEC_free (tree, heap, vec_oprnds0);
ebfd146a
IR
1808
1809 return true;
1810}
1811/* Function vectorizable_assignment.
1812
b8698a0f
L
1813 Check if STMT performs an assignment (copy) that can be vectorized.
1814 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1815 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1816 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1817
1818static bool
1819vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1820 gimple *vec_stmt, slp_tree slp_node)
1821{
1822 tree vec_dest;
1823 tree scalar_dest;
1824 tree op;
1825 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1826 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1827 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1828 tree new_temp;
1829 tree def;
1830 gimple def_stmt;
1831 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1832 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1833 int ncopies;
f18b55bd 1834 int i, j;
ebfd146a
IR
1835 VEC(tree,heap) *vec_oprnds = NULL;
1836 tree vop;
a70d6342 1837 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
f18b55bd
IR
1838 gimple new_stmt = NULL;
1839 stmt_vec_info prev_stmt_info = NULL;
ebfd146a
IR
1840
1841 /* Multiple types in SLP are handled by creating the appropriate number of
1842 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1843 case of SLP. */
1844 if (slp_node)
1845 ncopies = 1;
1846 else
1847 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1848
1849 gcc_assert (ncopies >= 1);
ebfd146a 1850
a70d6342 1851 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
1852 return false;
1853
8644a673 1854 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1855 return false;
1856
1857 /* Is vectorizable assignment? */
1858 if (!is_gimple_assign (stmt))
1859 return false;
1860
1861 scalar_dest = gimple_assign_lhs (stmt);
1862 if (TREE_CODE (scalar_dest) != SSA_NAME)
1863 return false;
1864
1865 if (gimple_assign_single_p (stmt)
1866 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1867 op = gimple_assign_rhs1 (stmt);
1868 else
1869 return false;
1870
a70d6342 1871 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
ebfd146a
IR
1872 {
1873 if (vect_print_dump_info (REPORT_DETAILS))
1874 fprintf (vect_dump, "use not simple.");
1875 return false;
1876 }
1877
1878 if (!vec_stmt) /* transformation not required. */
1879 {
1880 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1881 if (vect_print_dump_info (REPORT_DETAILS))
1882 fprintf (vect_dump, "=== vectorizable_assignment ===");
1883 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1884 return true;
1885 }
1886
1887 /** Transform. **/
1888 if (vect_print_dump_info (REPORT_DETAILS))
1889 fprintf (vect_dump, "transform assignment.");
1890
1891 /* Handle def. */
1892 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1893
1894 /* Handle use. */
f18b55bd 1895 for (j = 0; j < ncopies; j++)
ebfd146a 1896 {
f18b55bd
IR
1897 /* Handle uses. */
1898 if (j == 0)
1899 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1900 else
1901 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1902
1903 /* Arguments are ready. create the new vector stmt. */
1904 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1905 {
1906 new_stmt = gimple_build_assign (vec_dest, vop);
1907 new_temp = make_ssa_name (vec_dest, new_stmt);
1908 gimple_assign_set_lhs (new_stmt, new_temp);
1909 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1910 if (slp_node)
1911 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1912 }
ebfd146a
IR
1913
1914 if (slp_node)
f18b55bd
IR
1915 continue;
1916
1917 if (j == 0)
1918 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1919 else
1920 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1921
1922 prev_stmt_info = vinfo_for_stmt (new_stmt);
1923 }
b8698a0f
L
1924
1925 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
1926 return true;
1927}
1928
1929/* Function vectorizable_operation.
1930
b8698a0f
L
1931 Check if STMT performs a binary or unary operation that can be vectorized.
1932 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1933 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1934 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1935
1936static bool
1937vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1938 gimple *vec_stmt, slp_tree slp_node)
1939{
1940 tree vec_dest;
1941 tree scalar_dest;
1942 tree op0, op1 = NULL;
1943 tree vec_oprnd1 = NULL_TREE;
1944 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b690cc0f 1945 tree vectype;
ebfd146a
IR
1946 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1947 enum tree_code code;
1948 enum machine_mode vec_mode;
1949 tree new_temp;
1950 int op_type;
1951 optab optab;
1952 int icode;
1953 enum machine_mode optab_op2_mode;
1954 tree def;
1955 gimple def_stmt;
1956 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1957 gimple new_stmt = NULL;
1958 stmt_vec_info prev_stmt_info;
b690cc0f 1959 int nunits_in;
ebfd146a
IR
1960 int nunits_out;
1961 tree vectype_out;
1962 int ncopies;
1963 int j, i;
1964 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1965 tree vop0, vop1;
1966 unsigned int k;
ebfd146a 1967 bool scalar_shift_arg = false;
a70d6342
IR
1968 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1969 int vf;
1970
a70d6342 1971 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
1972 return false;
1973
8644a673 1974 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1975 return false;
1976
1977 /* Is STMT a vectorizable binary/unary operation? */
1978 if (!is_gimple_assign (stmt))
1979 return false;
1980
1981 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1982 return false;
1983
ebfd146a
IR
1984 code = gimple_assign_rhs_code (stmt);
1985
1986 /* For pointer addition, we should use the normal plus for
1987 the vector addition. */
1988 if (code == POINTER_PLUS_EXPR)
1989 code = PLUS_EXPR;
1990
1991 /* Support only unary or binary operations. */
1992 op_type = TREE_CODE_LENGTH (code);
1993 if (op_type != unary_op && op_type != binary_op)
1994 {
1995 if (vect_print_dump_info (REPORT_DETAILS))
1996 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1997 return false;
1998 }
1999
b690cc0f
RG
2000 scalar_dest = gimple_assign_lhs (stmt);
2001 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2002
ebfd146a 2003 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2004 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2005 &def_stmt, &def, &dt[0], &vectype))
ebfd146a
IR
2006 {
2007 if (vect_print_dump_info (REPORT_DETAILS))
2008 fprintf (vect_dump, "use not simple.");
2009 return false;
2010 }
b690cc0f
RG
2011 /* If op0 is an external or constant def use a vector type with
2012 the same size as the output vector type. */
2013 if (!vectype)
2014 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2015 if (vec_stmt)
2016 gcc_assert (vectype);
2017 if (!vectype)
2018 {
2019 if (vect_print_dump_info (REPORT_DETAILS))
2020 {
2021 fprintf (vect_dump, "no vectype for scalar type ");
2022 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2023 }
2024
2025 return false;
2026 }
b690cc0f
RG
2027
2028 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2029 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2030 if (nunits_out != nunits_in)
2031 return false;
ebfd146a
IR
2032
2033 if (op_type == binary_op)
2034 {
2035 op1 = gimple_assign_rhs2 (stmt);
b8698a0f 2036 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 2037 &dt[1]))
ebfd146a
IR
2038 {
2039 if (vect_print_dump_info (REPORT_DETAILS))
2040 fprintf (vect_dump, "use not simple.");
2041 return false;
2042 }
2043 }
2044
b690cc0f
RG
2045 if (loop_vinfo)
2046 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2047 else
2048 vf = 1;
2049
2050 /* Multiple types in SLP are handled by creating the appropriate number of
2051 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2052 case of SLP. */
2053 if (slp_node)
2054 ncopies = 1;
2055 else
2056 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2057
2058 gcc_assert (ncopies >= 1);
2059
ebfd146a
IR
2060 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2061 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2062 shift optabs. */
2063 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2064 || code == RROTATE_EXPR)
2065 {
ebfd146a 2066 /* vector shifted by vector */
8644a673 2067 if (dt[1] == vect_internal_def)
ebfd146a
IR
2068 {
2069 optab = optab_for_tree_code (code, vectype, optab_vector);
2070 if (vect_print_dump_info (REPORT_DETAILS))
2071 fprintf (vect_dump, "vector/vector shift/rotate found.");
2072 }
2073
2074 /* See if the machine has a vector shifted by scalar insn and if not
2075 then see if it has a vector shifted by vector insn */
8644a673 2076 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
ebfd146a
IR
2077 {
2078 optab = optab_for_tree_code (code, vectype, optab_scalar);
2079 if (optab
2080 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2081 != CODE_FOR_nothing))
2082 {
2083 scalar_shift_arg = true;
2084 if (vect_print_dump_info (REPORT_DETAILS))
2085 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2086 }
2087 else
2088 {
2089 optab = optab_for_tree_code (code, vectype, optab_vector);
ad6c0864 2090 if (optab
ebfd146a
IR
2091 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2092 != CODE_FOR_nothing))
ad6c0864
MM
2093 {
2094 if (vect_print_dump_info (REPORT_DETAILS))
2095 fprintf (vect_dump, "vector/vector shift/rotate found.");
2096
2097 /* Unlike the other binary operators, shifts/rotates have
2098 the rhs being int, instead of the same type as the lhs,
2099 so make sure the scalar is the right type if we are
2100 dealing with vectors of short/char. */
2101 if (dt[1] == vect_constant_def)
2102 op1 = fold_convert (TREE_TYPE (vectype), op1);
2103 }
ebfd146a
IR
2104 }
2105 }
2106
2107 else
2108 {
2109 if (vect_print_dump_info (REPORT_DETAILS))
2110 fprintf (vect_dump, "operand mode requires invariant argument.");
2111 return false;
2112 }
2113 }
2114 else
2115 optab = optab_for_tree_code (code, vectype, optab_default);
2116
2117 /* Supportable by target? */
2118 if (!optab)
2119 {
2120 if (vect_print_dump_info (REPORT_DETAILS))
2121 fprintf (vect_dump, "no optab.");
2122 return false;
2123 }
2124 vec_mode = TYPE_MODE (vectype);
2125 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2126 if (icode == CODE_FOR_nothing)
2127 {
2128 if (vect_print_dump_info (REPORT_DETAILS))
2129 fprintf (vect_dump, "op not supported by target.");
2130 /* Check only during analysis. */
2131 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
a70d6342 2132 || (vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2133 && !vec_stmt))
2134 return false;
2135 if (vect_print_dump_info (REPORT_DETAILS))
2136 fprintf (vect_dump, "proceeding using word mode.");
2137 }
2138
2139 /* Worthwhile without SIMD support? Check only during analysis. */
2140 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
a70d6342 2141 && vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2142 && !vec_stmt)
2143 {
2144 if (vect_print_dump_info (REPORT_DETAILS))
2145 fprintf (vect_dump, "not worthwhile without SIMD support.");
2146 return false;
2147 }
2148
2149 if (!vec_stmt) /* transformation not required. */
2150 {
2151 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2152 if (vect_print_dump_info (REPORT_DETAILS))
2153 fprintf (vect_dump, "=== vectorizable_operation ===");
2154 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2155 return true;
2156 }
2157
2158 /** Transform. **/
2159
2160 if (vect_print_dump_info (REPORT_DETAILS))
2161 fprintf (vect_dump, "transform binary/unary operation.");
2162
2163 /* Handle def. */
2164 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2165
b8698a0f 2166 /* Allocate VECs for vector operands. In case of SLP, vector operands are
ebfd146a
IR
2167 created in the previous stages of the recursion, so no allocation is
2168 needed, except for the case of shift with scalar shift argument. In that
2169 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2170 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
b8698a0f
L
2171 In case of loop-based vectorization we allocate VECs of size 1. We
2172 allocate VEC_OPRNDS1 only in case of binary operation. */
ebfd146a
IR
2173 if (!slp_node)
2174 {
2175 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2176 if (op_type == binary_op)
2177 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2178 }
2179 else if (scalar_shift_arg)
b8698a0f 2180 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
ebfd146a
IR
2181
2182 /* In case the vectorization factor (VF) is bigger than the number
2183 of elements that we can fit in a vectype (nunits), we have to generate
2184 more than one vector stmt - i.e - we need to "unroll" the
2185 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2186 from one copy of the vector stmt to the next, in the field
2187 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2188 stages to find the correct vector defs to be used when vectorizing
2189 stmts that use the defs of the current stmt. The example below illustrates
2190 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2191 4 vectorized stmts):
2192
2193 before vectorization:
2194 RELATED_STMT VEC_STMT
2195 S1: x = memref - -
2196 S2: z = x + 1 - -
2197
2198 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2199 there):
2200 RELATED_STMT VEC_STMT
2201 VS1_0: vx0 = memref0 VS1_1 -
2202 VS1_1: vx1 = memref1 VS1_2 -
2203 VS1_2: vx2 = memref2 VS1_3 -
2204 VS1_3: vx3 = memref3 - -
2205 S1: x = load - VS1_0
2206 S2: z = x + 1 - -
2207
2208 step2: vectorize stmt S2 (done here):
2209 To vectorize stmt S2 we first need to find the relevant vector
2210 def for the first operand 'x'. This is, as usual, obtained from
2211 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2212 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2213 relevant vector def 'vx0'. Having found 'vx0' we can generate
2214 the vector stmt VS2_0, and as usual, record it in the
2215 STMT_VINFO_VEC_STMT of stmt S2.
2216 When creating the second copy (VS2_1), we obtain the relevant vector
2217 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2218 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2219 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2220 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2221 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2222 chain of stmts and pointers:
2223 RELATED_STMT VEC_STMT
2224 VS1_0: vx0 = memref0 VS1_1 -
2225 VS1_1: vx1 = memref1 VS1_2 -
2226 VS1_2: vx2 = memref2 VS1_3 -
2227 VS1_3: vx3 = memref3 - -
2228 S1: x = load - VS1_0
2229 VS2_0: vz0 = vx0 + v1 VS2_1 -
2230 VS2_1: vz1 = vx1 + v1 VS2_2 -
2231 VS2_2: vz2 = vx2 + v1 VS2_3 -
2232 VS2_3: vz3 = vx3 + v1 - -
2233 S2: z = x + 1 - VS2_0 */
2234
2235 prev_stmt_info = NULL;
2236 for (j = 0; j < ncopies; j++)
2237 {
2238 /* Handle uses. */
2239 if (j == 0)
2240 {
2241 if (op_type == binary_op && scalar_shift_arg)
2242 {
b8698a0f
L
2243 /* Vector shl and shr insn patterns can be defined with scalar
2244 operand 2 (shift operand). In this case, use constant or loop
2245 invariant op1 directly, without extending it to vector mode
ebfd146a
IR
2246 first. */
2247 optab_op2_mode = insn_data[icode].operand[2].mode;
2248 if (!VECTOR_MODE_P (optab_op2_mode))
2249 {
2250 if (vect_print_dump_info (REPORT_DETAILS))
2251 fprintf (vect_dump, "operand 1 using scalar mode.");
2252 vec_oprnd1 = op1;
2253 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2254 if (slp_node)
2255 {
2256 /* Store vec_oprnd1 for every vector stmt to be created
2257 for SLP_NODE. We check during the analysis that all the
b8698a0f
L
2258 shift arguments are the same.
2259 TODO: Allow different constants for different vector
2260 stmts generated for an SLP instance. */
ebfd146a
IR
2261 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2262 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2263 }
2264 }
2265 }
b8698a0f
L
2266
2267 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2268 (a special case for certain kind of vector shifts); otherwise,
ebfd146a
IR
2269 operand 1 should be of a vector type (the usual case). */
2270 if (op_type == binary_op && !vec_oprnd1)
b8698a0f 2271 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
ebfd146a
IR
2272 slp_node);
2273 else
b8698a0f 2274 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
ebfd146a
IR
2275 slp_node);
2276 }
2277 else
2278 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2279
2280 /* Arguments are ready. Create the new vector stmt. */
2281 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2282 {
2283 vop1 = ((op_type == binary_op)
2284 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2285 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2286 new_temp = make_ssa_name (vec_dest, new_stmt);
2287 gimple_assign_set_lhs (new_stmt, new_temp);
2288 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2289 if (slp_node)
2290 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2291 }
2292
2293 if (slp_node)
2294 continue;
2295
2296 if (j == 0)
2297 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2298 else
2299 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2300 prev_stmt_info = vinfo_for_stmt (new_stmt);
2301 }
2302
2303 VEC_free (tree, heap, vec_oprnds0);
2304 if (vec_oprnds1)
2305 VEC_free (tree, heap, vec_oprnds1);
2306
2307 return true;
2308}
2309
2310
2311/* Get vectorized definitions for loop-based vectorization. For the first
b8698a0f
L
2312 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2313 scalar operand), and for the rest we get a copy with
ebfd146a
IR
2314 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2315 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2316 The vectors are collected into VEC_OPRNDS. */
2317
2318static void
b8698a0f 2319vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
ebfd146a
IR
2320 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2321{
2322 tree vec_oprnd;
2323
2324 /* Get first vector operand. */
2325 /* All the vector operands except the very first one (that is scalar oprnd)
2326 are stmt copies. */
b8698a0f 2327 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
ebfd146a
IR
2328 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2329 else
2330 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2331
2332 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2333
2334 /* Get second vector operand. */
2335 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2336 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
b8698a0f 2337
ebfd146a
IR
2338 *oprnd = vec_oprnd;
2339
b8698a0f 2340 /* For conversion in multiple steps, continue to get operands
ebfd146a
IR
2341 recursively. */
2342 if (multi_step_cvt)
b8698a0f 2343 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
ebfd146a
IR
2344}
2345
2346
2347/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
b8698a0f 2348 For multi-step conversions store the resulting vectors and call the function
ebfd146a
IR
2349 recursively. */
2350
2351static void
2352vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2353 int multi_step_cvt, gimple stmt,
2354 VEC (tree, heap) *vec_dsts,
2355 gimple_stmt_iterator *gsi,
2356 slp_tree slp_node, enum tree_code code,
2357 stmt_vec_info *prev_stmt_info)
2358{
2359 unsigned int i;
2360 tree vop0, vop1, new_tmp, vec_dest;
2361 gimple new_stmt;
2362 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2363
b8698a0f 2364 vec_dest = VEC_pop (tree, vec_dsts);
ebfd146a
IR
2365
2366 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2367 {
2368 /* Create demotion operation. */
2369 vop0 = VEC_index (tree, *vec_oprnds, i);
2370 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2371 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2372 new_tmp = make_ssa_name (vec_dest, new_stmt);
2373 gimple_assign_set_lhs (new_stmt, new_tmp);
2374 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2375
2376 if (multi_step_cvt)
2377 /* Store the resulting vector for next recursive call. */
b8698a0f 2378 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
ebfd146a
IR
2379 else
2380 {
b8698a0f 2381 /* This is the last step of the conversion sequence. Store the
ebfd146a
IR
2382 vectors in SLP_NODE or in vector info of the scalar statement
2383 (or in STMT_VINFO_RELATED_STMT chain). */
2384 if (slp_node)
2385 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2386 else
2387 {
2388 if (!*prev_stmt_info)
2389 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2390 else
2391 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2392
2393 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2394 }
2395 }
2396 }
2397
2398 /* For multi-step demotion operations we first generate demotion operations
b8698a0f 2399 from the source type to the intermediate types, and then combine the
ebfd146a
IR
2400 results (stored in VEC_OPRNDS) in demotion operation to the destination
2401 type. */
2402 if (multi_step_cvt)
2403 {
2404 /* At each level of recursion we have have of the operands we had at the
2405 previous level. */
2406 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
b8698a0f 2407 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
ebfd146a
IR
2408 stmt, vec_dsts, gsi, slp_node,
2409 code, prev_stmt_info);
2410 }
2411}
2412
2413
2414/* Function vectorizable_type_demotion
2415
2416 Check if STMT performs a binary or unary operation that involves
2417 type demotion, and if it can be vectorized.
2418 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2419 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2420 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2421
2422static bool
2423vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2424 gimple *vec_stmt, slp_tree slp_node)
2425{
2426 tree vec_dest;
2427 tree scalar_dest;
2428 tree op0;
2429 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2430 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2431 enum tree_code code, code1 = ERROR_MARK;
2432 tree def;
2433 gimple def_stmt;
2434 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2435 stmt_vec_info prev_stmt_info;
2436 int nunits_in;
2437 int nunits_out;
2438 tree vectype_out;
2439 int ncopies;
2440 int j, i;
2441 tree vectype_in;
2442 int multi_step_cvt = 0;
2443 VEC (tree, heap) *vec_oprnds0 = NULL;
2444 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2445 tree last_oprnd, intermediate_type;
2446
a70d6342
IR
2447 /* FORNOW: not supported by basic block SLP vectorization. */
2448 gcc_assert (loop_vinfo);
2449
ebfd146a
IR
2450 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2451 return false;
2452
8644a673 2453 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2454 return false;
2455
2456 /* Is STMT a vectorizable type-demotion operation? */
2457 if (!is_gimple_assign (stmt))
2458 return false;
2459
2460 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2461 return false;
2462
2463 code = gimple_assign_rhs_code (stmt);
2464 if (!CONVERT_EXPR_CODE_P (code))
2465 return false;
2466
b690cc0f
RG
2467 scalar_dest = gimple_assign_lhs (stmt);
2468 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2469
2470 /* Check the operands of the operation. */
ebfd146a 2471 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2472 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2473 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2474 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2475 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2476 && CONVERT_EXPR_CODE_P (code))))
2477 return false;
2478 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2479 &def_stmt, &def, &dt[0], &vectype_in))
2480 {
2481 if (vect_print_dump_info (REPORT_DETAILS))
2482 fprintf (vect_dump, "use not simple.");
2483 return false;
2484 }
2485 /* If op0 is an external def use a vector type with the
2486 same size as the output vector type if possible. */
2487 if (!vectype_in)
2488 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2489 if (vec_stmt)
2490 gcc_assert (vectype_in);
ebfd146a 2491 if (!vectype_in)
7d8930a0
IR
2492 {
2493 if (vect_print_dump_info (REPORT_DETAILS))
2494 {
2495 fprintf (vect_dump, "no vectype for scalar type ");
2496 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2497 }
2498
2499 return false;
2500 }
ebfd146a 2501
b690cc0f 2502 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2503 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2504 if (nunits_in >= nunits_out)
2505 return false;
2506
2507 /* Multiple types in SLP are handled by creating the appropriate number of
2508 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2509 case of SLP. */
2510 if (slp_node)
2511 ncopies = 1;
2512 else
2513 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
ebfd146a
IR
2514 gcc_assert (ncopies >= 1);
2515
ebfd146a 2516 /* Supportable by target? */
b690cc0f
RG
2517 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2518 &code1, &multi_step_cvt, &interm_types))
ebfd146a
IR
2519 return false;
2520
ebfd146a
IR
2521 if (!vec_stmt) /* transformation not required. */
2522 {
2523 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2524 if (vect_print_dump_info (REPORT_DETAILS))
2525 fprintf (vect_dump, "=== vectorizable_demotion ===");
2526 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2527 return true;
2528 }
2529
2530 /** Transform. **/
2531 if (vect_print_dump_info (REPORT_DETAILS))
2532 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2533 ncopies);
2534
b8698a0f
L
2535 /* In case of multi-step demotion, we first generate demotion operations to
2536 the intermediate types, and then from that types to the final one.
ebfd146a 2537 We create vector destinations for the intermediate type (TYPES) received
b8698a0f 2538 from supportable_narrowing_operation, and store them in the correct order
ebfd146a
IR
2539 for future use in vect_create_vectorized_demotion_stmts(). */
2540 if (multi_step_cvt)
2541 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2542 else
2543 vec_dsts = VEC_alloc (tree, heap, 1);
b8698a0f 2544
ebfd146a
IR
2545 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2546 VEC_quick_push (tree, vec_dsts, vec_dest);
2547
2548 if (multi_step_cvt)
2549 {
b8698a0f 2550 for (i = VEC_length (tree, interm_types) - 1;
ebfd146a
IR
2551 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2552 {
b8698a0f 2553 vec_dest = vect_create_destination_var (scalar_dest,
ebfd146a
IR
2554 intermediate_type);
2555 VEC_quick_push (tree, vec_dsts, vec_dest);
2556 }
2557 }
2558
2559 /* In case the vectorization factor (VF) is bigger than the number
2560 of elements that we can fit in a vectype (nunits), we have to generate
2561 more than one vector stmt - i.e - we need to "unroll" the
2562 vector stmt by a factor VF/nunits. */
2563 last_oprnd = op0;
2564 prev_stmt_info = NULL;
2565 for (j = 0; j < ncopies; j++)
2566 {
2567 /* Handle uses. */
2568 if (slp_node)
b5aeb3bb 2569 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
ebfd146a
IR
2570 else
2571 {
2572 VEC_free (tree, heap, vec_oprnds0);
2573 vec_oprnds0 = VEC_alloc (tree, heap,
2574 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
b8698a0f 2575 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
ebfd146a
IR
2576 vect_pow2 (multi_step_cvt) - 1);
2577 }
2578
2579 /* Arguments are ready. Create the new vector stmts. */
2580 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
b8698a0f 2581 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
ebfd146a 2582 multi_step_cvt, stmt, tmp_vec_dsts,
b8698a0f 2583 gsi, slp_node, code1,
ebfd146a
IR
2584 &prev_stmt_info);
2585 }
2586
2587 VEC_free (tree, heap, vec_oprnds0);
2588 VEC_free (tree, heap, vec_dsts);
2589 VEC_free (tree, heap, tmp_vec_dsts);
2590 VEC_free (tree, heap, interm_types);
2591
2592 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2593 return true;
2594}
2595
2596
2597/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
b8698a0f 2598 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
ebfd146a
IR
2599 the resulting vectors and call the function recursively. */
2600
2601static void
2602vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2603 VEC (tree, heap) **vec_oprnds1,
2604 int multi_step_cvt, gimple stmt,
2605 VEC (tree, heap) *vec_dsts,
2606 gimple_stmt_iterator *gsi,
2607 slp_tree slp_node, enum tree_code code1,
b8698a0f 2608 enum tree_code code2, tree decl1,
ebfd146a
IR
2609 tree decl2, int op_type,
2610 stmt_vec_info *prev_stmt_info)
2611{
2612 int i;
2613 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2614 gimple new_stmt1, new_stmt2;
2615 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2616 VEC (tree, heap) *vec_tmp;
2617
2618 vec_dest = VEC_pop (tree, vec_dsts);
2619 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2620
2621 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2622 {
2623 if (op_type == binary_op)
2624 vop1 = VEC_index (tree, *vec_oprnds1, i);
2625 else
2626 vop1 = NULL_TREE;
2627
2628 /* Generate the two halves of promotion operation. */
b8698a0f 2629 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
ebfd146a
IR
2630 op_type, vec_dest, gsi, stmt);
2631 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2632 op_type, vec_dest, gsi, stmt);
2633 if (is_gimple_call (new_stmt1))
2634 {
2635 new_tmp1 = gimple_call_lhs (new_stmt1);
2636 new_tmp2 = gimple_call_lhs (new_stmt2);
2637 }
2638 else
2639 {
2640 new_tmp1 = gimple_assign_lhs (new_stmt1);
2641 new_tmp2 = gimple_assign_lhs (new_stmt2);
2642 }
2643
2644 if (multi_step_cvt)
2645 {
2646 /* Store the results for the recursive call. */
2647 VEC_quick_push (tree, vec_tmp, new_tmp1);
2648 VEC_quick_push (tree, vec_tmp, new_tmp2);
2649 }
2650 else
2651 {
2652 /* Last step of promotion sequience - store the results. */
2653 if (slp_node)
2654 {
2655 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2656 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2657 }
2658 else
2659 {
2660 if (!*prev_stmt_info)
2661 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2662 else
2663 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2664
2665 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2666 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2667 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2668 }
2669 }
2670 }
2671
2672 if (multi_step_cvt)
2673 {
b8698a0f 2674 /* For multi-step promotion operation we first generate we call the
ebfd146a
IR
2675 function recurcively for every stage. We start from the input type,
2676 create promotion operations to the intermediate types, and then
2677 create promotions to the output type. */
2678 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2679 VEC_free (tree, heap, vec_tmp);
2680 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2681 multi_step_cvt - 1, stmt,
2682 vec_dsts, gsi, slp_node, code1,
2683 code2, decl2, decl2, op_type,
2684 prev_stmt_info);
2685 }
2686}
b8698a0f 2687
ebfd146a
IR
2688
2689/* Function vectorizable_type_promotion
2690
2691 Check if STMT performs a binary or unary operation that involves
2692 type promotion, and if it can be vectorized.
2693 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2694 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2695 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2696
2697static bool
2698vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2699 gimple *vec_stmt, slp_tree slp_node)
2700{
2701 tree vec_dest;
2702 tree scalar_dest;
2703 tree op0, op1 = NULL;
2704 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2705 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2706 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2707 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2708 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
b8698a0f 2709 int op_type;
ebfd146a
IR
2710 tree def;
2711 gimple def_stmt;
2712 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2713 stmt_vec_info prev_stmt_info;
2714 int nunits_in;
2715 int nunits_out;
2716 tree vectype_out;
2717 int ncopies;
2718 int j, i;
2719 tree vectype_in;
2720 tree intermediate_type = NULL_TREE;
2721 int multi_step_cvt = 0;
2722 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2723 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
b8698a0f 2724
a70d6342
IR
2725 /* FORNOW: not supported by basic block SLP vectorization. */
2726 gcc_assert (loop_vinfo);
b8698a0f 2727
ebfd146a
IR
2728 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2729 return false;
2730
8644a673 2731 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2732 return false;
2733
2734 /* Is STMT a vectorizable type-promotion operation? */
2735 if (!is_gimple_assign (stmt))
2736 return false;
2737
2738 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2739 return false;
2740
2741 code = gimple_assign_rhs_code (stmt);
2742 if (!CONVERT_EXPR_CODE_P (code)
2743 && code != WIDEN_MULT_EXPR)
2744 return false;
2745
b690cc0f
RG
2746 scalar_dest = gimple_assign_lhs (stmt);
2747 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2748
2749 /* Check the operands of the operation. */
ebfd146a 2750 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2751 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2752 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2753 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2754 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2755 && CONVERT_EXPR_CODE_P (code))))
2756 return false;
2757 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2758 &def_stmt, &def, &dt[0], &vectype_in))
2759 {
2760 if (vect_print_dump_info (REPORT_DETAILS))
2761 fprintf (vect_dump, "use not simple.");
2762 return false;
2763 }
2764 /* If op0 is an external or constant def use a vector type with
2765 the same size as the output vector type. */
2766 if (!vectype_in)
2767 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2768 if (vec_stmt)
2769 gcc_assert (vectype_in);
ebfd146a 2770 if (!vectype_in)
7d8930a0
IR
2771 {
2772 if (vect_print_dump_info (REPORT_DETAILS))
2773 {
2774 fprintf (vect_dump, "no vectype for scalar type ");
2775 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2776 }
2777
2778 return false;
2779 }
ebfd146a 2780
b690cc0f 2781 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2782 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2783 if (nunits_in <= nunits_out)
2784 return false;
2785
2786 /* Multiple types in SLP are handled by creating the appropriate number of
2787 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2788 case of SLP. */
2789 if (slp_node)
2790 ncopies = 1;
2791 else
2792 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2793
2794 gcc_assert (ncopies >= 1);
2795
ebfd146a
IR
2796 op_type = TREE_CODE_LENGTH (code);
2797 if (op_type == binary_op)
2798 {
2799 op1 = gimple_assign_rhs2 (stmt);
a70d6342 2800 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
ebfd146a
IR
2801 {
2802 if (vect_print_dump_info (REPORT_DETAILS))
2803 fprintf (vect_dump, "use not simple.");
2804 return false;
2805 }
2806 }
2807
2808 /* Supportable by target? */
b690cc0f 2809 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
ebfd146a
IR
2810 &decl1, &decl2, &code1, &code2,
2811 &multi_step_cvt, &interm_types))
2812 return false;
2813
2814 /* Binary widening operation can only be supported directly by the
2815 architecture. */
2816 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2817
ebfd146a
IR
2818 if (!vec_stmt) /* transformation not required. */
2819 {
2820 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2821 if (vect_print_dump_info (REPORT_DETAILS))
2822 fprintf (vect_dump, "=== vectorizable_promotion ===");
2823 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2824 return true;
2825 }
2826
2827 /** Transform. **/
2828
2829 if (vect_print_dump_info (REPORT_DETAILS))
2830 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2831 ncopies);
2832
2833 /* Handle def. */
b8698a0f 2834 /* In case of multi-step promotion, we first generate promotion operations
ebfd146a 2835 to the intermediate types, and then from that types to the final one.
b8698a0f
L
2836 We store vector destination in VEC_DSTS in the correct order for
2837 recursive creation of promotion operations in
ebfd146a
IR
2838 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2839 according to TYPES recieved from supportable_widening_operation(). */
2840 if (multi_step_cvt)
2841 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2842 else
2843 vec_dsts = VEC_alloc (tree, heap, 1);
2844
2845 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2846 VEC_quick_push (tree, vec_dsts, vec_dest);
2847
2848 if (multi_step_cvt)
2849 {
2850 for (i = VEC_length (tree, interm_types) - 1;
2851 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2852 {
2853 vec_dest = vect_create_destination_var (scalar_dest,
2854 intermediate_type);
2855 VEC_quick_push (tree, vec_dsts, vec_dest);
2856 }
2857 }
b8698a0f 2858
ebfd146a
IR
2859 if (!slp_node)
2860 {
b8698a0f 2861 vec_oprnds0 = VEC_alloc (tree, heap,
ebfd146a
IR
2862 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2863 if (op_type == binary_op)
2864 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2865 }
2866
2867 /* In case the vectorization factor (VF) is bigger than the number
2868 of elements that we can fit in a vectype (nunits), we have to generate
2869 more than one vector stmt - i.e - we need to "unroll" the
2870 vector stmt by a factor VF/nunits. */
2871
2872 prev_stmt_info = NULL;
2873 for (j = 0; j < ncopies; j++)
2874 {
2875 /* Handle uses. */
2876 if (j == 0)
2877 {
2878 if (slp_node)
b5aeb3bb 2879 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
ebfd146a
IR
2880 else
2881 {
2882 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2883 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2884 if (op_type == binary_op)
2885 {
2886 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2887 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2888 }
2889 }
2890 }
2891 else
2892 {
2893 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2894 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2895 if (op_type == binary_op)
2896 {
2897 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2898 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2899 }
2900 }
2901
2902 /* Arguments are ready. Create the new vector stmts. */
2903 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2904 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
b8698a0f 2905 multi_step_cvt, stmt,
ebfd146a
IR
2906 tmp_vec_dsts,
2907 gsi, slp_node, code1, code2,
2908 decl1, decl2, op_type,
2909 &prev_stmt_info);
2910 }
2911
2912 VEC_free (tree, heap, vec_dsts);
2913 VEC_free (tree, heap, tmp_vec_dsts);
2914 VEC_free (tree, heap, interm_types);
2915 VEC_free (tree, heap, vec_oprnds0);
2916 VEC_free (tree, heap, vec_oprnds1);
2917
2918 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2919 return true;
2920}
2921
2922
2923/* Function vectorizable_store.
2924
b8698a0f
L
2925 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2926 can be vectorized.
2927 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2928 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2929 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2930
2931static bool
2932vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2933 slp_tree slp_node)
2934{
2935 tree scalar_dest;
2936 tree data_ref;
2937 tree op;
2938 tree vec_oprnd = NULL_TREE;
2939 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2940 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2941 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2942 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 2943 struct loop *loop = NULL;
ebfd146a
IR
2944 enum machine_mode vec_mode;
2945 tree dummy;
2946 enum dr_alignment_support alignment_support_scheme;
2947 tree def;
2948 gimple def_stmt;
2949 enum vect_def_type dt;
2950 stmt_vec_info prev_stmt_info = NULL;
2951 tree dataref_ptr = NULL_TREE;
2952 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2953 int ncopies;
2954 int j;
2955 gimple next_stmt, first_stmt = NULL;
2956 bool strided_store = false;
2957 unsigned int group_size, i;
2958 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2959 bool inv_p;
2960 VEC(tree,heap) *vec_oprnds = NULL;
2961 bool slp = (slp_node != NULL);
ebfd146a 2962 unsigned int vec_num;
a70d6342
IR
2963 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2964
2965 if (loop_vinfo)
2966 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
2967
2968 /* Multiple types in SLP are handled by creating the appropriate number of
2969 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2970 case of SLP. */
2971 if (slp)
2972 ncopies = 1;
2973 else
2974 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2975
2976 gcc_assert (ncopies >= 1);
2977
2978 /* FORNOW. This restriction should be relaxed. */
a70d6342 2979 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
ebfd146a
IR
2980 {
2981 if (vect_print_dump_info (REPORT_DETAILS))
2982 fprintf (vect_dump, "multiple types in nested loop.");
2983 return false;
2984 }
2985
a70d6342 2986 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2987 return false;
2988
8644a673 2989 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2990 return false;
2991
2992 /* Is vectorizable store? */
2993
2994 if (!is_gimple_assign (stmt))
2995 return false;
2996
2997 scalar_dest = gimple_assign_lhs (stmt);
2998 if (TREE_CODE (scalar_dest) != ARRAY_REF
2999 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
3000 && TREE_CODE (scalar_dest) != COMPONENT_REF
3001 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3002 && TREE_CODE (scalar_dest) != REALPART_EXPR)
ebfd146a
IR
3003 return false;
3004
3005 gcc_assert (gimple_assign_single_p (stmt));
3006 op = gimple_assign_rhs1 (stmt);
a70d6342 3007 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
ebfd146a
IR
3008 {
3009 if (vect_print_dump_info (REPORT_DETAILS))
3010 fprintf (vect_dump, "use not simple.");
3011 return false;
3012 }
3013
3014 /* The scalar rhs type needs to be trivially convertible to the vector
3015 component type. This should always be the case. */
3016 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
b8698a0f 3017 {
ebfd146a
IR
3018 if (vect_print_dump_info (REPORT_DETAILS))
3019 fprintf (vect_dump, "??? operands of different types");
3020 return false;
3021 }
3022
3023 vec_mode = TYPE_MODE (vectype);
3024 /* FORNOW. In some cases can vectorize even if data-type not supported
3025 (e.g. - array initialization with 0). */
3026 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
3027 return false;
3028
3029 if (!STMT_VINFO_DATA_REF (stmt_info))
3030 return false;
3031
3032 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3033 {
3034 strided_store = true;
3035 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3036 if (!vect_strided_store_supported (vectype)
3037 && !PURE_SLP_STMT (stmt_info) && !slp)
3038 return false;
b8698a0f 3039
ebfd146a
IR
3040 if (first_stmt == stmt)
3041 {
3042 /* STMT is the leader of the group. Check the operands of all the
3043 stmts of the group. */
3044 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3045 while (next_stmt)
3046 {
3047 gcc_assert (gimple_assign_single_p (next_stmt));
3048 op = gimple_assign_rhs1 (next_stmt);
b8698a0f 3049 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
a70d6342 3050 &def, &dt))
ebfd146a
IR
3051 {
3052 if (vect_print_dump_info (REPORT_DETAILS))
3053 fprintf (vect_dump, "use not simple.");
3054 return false;
3055 }
3056 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3057 }
3058 }
3059 }
3060
3061 if (!vec_stmt) /* transformation not required. */
3062 {
3063 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3064 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3065 return true;
3066 }
3067
3068 /** Transform. **/
3069
3070 if (strided_store)
3071 {
3072 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3073 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3074
3075 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3076
3077 /* FORNOW */
a70d6342 3078 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
3079
3080 /* We vectorize all the stmts of the interleaving group when we
3081 reach the last stmt in the group. */
b8698a0f 3082 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
ebfd146a
IR
3083 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3084 && !slp)
3085 {
3086 *vec_stmt = NULL;
3087 return true;
3088 }
3089
3090 if (slp)
4b5caab7
IR
3091 {
3092 strided_store = false;
3093 /* VEC_NUM is the number of vect stmts to be created for this
3094 group. */
3095 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3096 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3097 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3098 }
ebfd146a 3099 else
4b5caab7
IR
3100 /* VEC_NUM is the number of vect stmts to be created for this
3101 group. */
ebfd146a
IR
3102 vec_num = group_size;
3103 }
b8698a0f 3104 else
ebfd146a
IR
3105 {
3106 first_stmt = stmt;
3107 first_dr = dr;
3108 group_size = vec_num = 1;
ebfd146a 3109 }
b8698a0f 3110
ebfd146a
IR
3111 if (vect_print_dump_info (REPORT_DETAILS))
3112 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3113
3114 dr_chain = VEC_alloc (tree, heap, group_size);
3115 oprnds = VEC_alloc (tree, heap, group_size);
3116
3117 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3118 gcc_assert (alignment_support_scheme);
ebfd146a
IR
3119
3120 /* In case the vectorization factor (VF) is bigger than the number
3121 of elements that we can fit in a vectype (nunits), we have to generate
3122 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 3123 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
3124 vect_get_vec_def_for_copy_stmt. */
3125
3126 /* In case of interleaving (non-unit strided access):
3127
3128 S1: &base + 2 = x2
3129 S2: &base = x0
3130 S3: &base + 1 = x1
3131 S4: &base + 3 = x3
3132
3133 We create vectorized stores starting from base address (the access of the
3134 first stmt in the chain (S2 in the above example), when the last store stmt
3135 of the chain (S4) is reached:
3136
3137 VS1: &base = vx2
3138 VS2: &base + vec_size*1 = vx0
3139 VS3: &base + vec_size*2 = vx1
3140 VS4: &base + vec_size*3 = vx3
3141
3142 Then permutation statements are generated:
3143
3144 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3145 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3146 ...
b8698a0f 3147
ebfd146a
IR
3148 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3149 (the order of the data-refs in the output of vect_permute_store_chain
3150 corresponds to the order of scalar stmts in the interleaving chain - see
3151 the documentation of vect_permute_store_chain()).
3152
3153 In case of both multiple types and interleaving, above vector stores and
3154 permutation stmts are created for every copy. The result vector stmts are
3155 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 3156 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
3157 */
3158
3159 prev_stmt_info = NULL;
3160 for (j = 0; j < ncopies; j++)
3161 {
3162 gimple new_stmt;
3163 gimple ptr_incr;
3164
3165 if (j == 0)
3166 {
3167 if (slp)
3168 {
3169 /* Get vectorized arguments for SLP_NODE. */
b5aeb3bb 3170 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
ebfd146a
IR
3171
3172 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3173 }
3174 else
3175 {
b8698a0f
L
3176 /* For interleaved stores we collect vectorized defs for all the
3177 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3178 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
3179 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3180
3181 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3182 OPRNDS are of size 1. */
b8698a0f 3183 next_stmt = first_stmt;
ebfd146a
IR
3184 for (i = 0; i < group_size; i++)
3185 {
b8698a0f
L
3186 /* Since gaps are not supported for interleaved stores,
3187 GROUP_SIZE is the exact number of stmts in the chain.
3188 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3189 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
3190 iteration of the loop will be executed. */
3191 gcc_assert (next_stmt
3192 && gimple_assign_single_p (next_stmt));
3193 op = gimple_assign_rhs1 (next_stmt);
3194
b8698a0f 3195 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
ebfd146a 3196 NULL);
b8698a0f
L
3197 VEC_quick_push(tree, dr_chain, vec_oprnd);
3198 VEC_quick_push(tree, oprnds, vec_oprnd);
ebfd146a
IR
3199 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3200 }
3201 }
3202
3203 /* We should have catched mismatched types earlier. */
3204 gcc_assert (useless_type_conversion_p (vectype,
3205 TREE_TYPE (vec_oprnd)));
b8698a0f
L
3206 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3207 &dummy, &ptr_incr, false,
5006671f 3208 &inv_p);
a70d6342 3209 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 3210 }
b8698a0f 3211 else
ebfd146a 3212 {
b8698a0f
L
3213 /* For interleaved stores we created vectorized defs for all the
3214 defs stored in OPRNDS in the previous iteration (previous copy).
3215 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
3216 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3217 next copy.
3218 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3219 OPRNDS are of size 1. */
3220 for (i = 0; i < group_size; i++)
3221 {
3222 op = VEC_index (tree, oprnds, i);
b8698a0f 3223 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 3224 &dt);
b8698a0f 3225 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
ebfd146a
IR
3226 VEC_replace(tree, dr_chain, i, vec_oprnd);
3227 VEC_replace(tree, oprnds, i, vec_oprnd);
3228 }
b8698a0f 3229 dataref_ptr =
ebfd146a
IR
3230 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3231 }
3232
3233 if (strided_store)
3234 {
b8698a0f 3235 result_chain = VEC_alloc (tree, heap, group_size);
ebfd146a
IR
3236 /* Permute. */
3237 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3238 &result_chain))
3239 return false;
3240 }
3241
3242 next_stmt = first_stmt;
3243 for (i = 0; i < vec_num; i++)
3244 {
3245 if (i > 0)
3246 /* Bump the vector pointer. */
3247 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3248 NULL_TREE);
3249
3250 if (slp)
3251 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3252 else if (strided_store)
b8698a0f 3253 /* For strided stores vectorized defs are interleaved in
ebfd146a
IR
3254 vect_permute_store_chain(). */
3255 vec_oprnd = VEC_index (tree, result_chain, i);
3256
8f439681
RE
3257 if (aligned_access_p (first_dr))
3258 data_ref = build_fold_indirect_ref (dataref_ptr);
3259 else
3260 {
3261 int mis = DR_MISALIGNMENT (first_dr);
3262 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3263 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3264 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3265 }
3266
5006671f
RG
3267 /* If accesses through a pointer to vectype do not alias the original
3268 memory reference we have a problem. This should never happen. */
3269 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3270 get_alias_set (gimple_assign_lhs (stmt))));
ebfd146a
IR
3271
3272 /* Arguments are ready. Create the new vector stmt. */
3273 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3274 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3275 mark_symbols_for_renaming (new_stmt);
3276
3277 if (slp)
3278 continue;
b8698a0f 3279
ebfd146a
IR
3280 if (j == 0)
3281 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3282 else
3283 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3284
3285 prev_stmt_info = vinfo_for_stmt (new_stmt);
3286 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3287 if (!next_stmt)
3288 break;
3289 }
3290 }
3291
b8698a0f
L
3292 VEC_free (tree, heap, dr_chain);
3293 VEC_free (tree, heap, oprnds);
ebfd146a 3294 if (result_chain)
b8698a0f 3295 VEC_free (tree, heap, result_chain);
ebfd146a
IR
3296
3297 return true;
3298}
3299
3300/* vectorizable_load.
3301
b8698a0f
L
3302 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3303 can be vectorized.
3304 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3305 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3306 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3307
3308static bool
3309vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3310 slp_tree slp_node, slp_instance slp_node_instance)
3311{
3312 tree scalar_dest;
3313 tree vec_dest = NULL;
3314 tree data_ref = NULL;
3315 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 3316 stmt_vec_info prev_stmt_info;
ebfd146a 3317 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3318 struct loop *loop = NULL;
ebfd146a 3319 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 3320 bool nested_in_vect_loop = false;
ebfd146a
IR
3321 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3322 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3323 tree new_temp;
3324 int mode;
3325 gimple new_stmt = NULL;
3326 tree dummy;
3327 enum dr_alignment_support alignment_support_scheme;
3328 tree dataref_ptr = NULL_TREE;
3329 gimple ptr_incr;
3330 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3331 int ncopies;
3332 int i, j, group_size;
3333 tree msq = NULL_TREE, lsq;
3334 tree offset = NULL_TREE;
3335 tree realignment_token = NULL_TREE;
3336 gimple phi = NULL;
3337 VEC(tree,heap) *dr_chain = NULL;
3338 bool strided_load = false;
3339 gimple first_stmt;
3340 tree scalar_type;
3341 bool inv_p;
3342 bool compute_in_loop = false;
3343 struct loop *at_loop;
3344 int vec_num;
3345 bool slp = (slp_node != NULL);
3346 bool slp_perm = false;
3347 enum tree_code code;
a70d6342
IR
3348 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3349 int vf;
3350
3351 if (loop_vinfo)
3352 {
3353 loop = LOOP_VINFO_LOOP (loop_vinfo);
3354 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3355 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3356 }
3357 else
3533e503 3358 vf = 1;
ebfd146a
IR
3359
3360 /* Multiple types in SLP are handled by creating the appropriate number of
3361 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3362 case of SLP. */
3363 if (slp)
3364 ncopies = 1;
3365 else
3366 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3367
3368 gcc_assert (ncopies >= 1);
3369
3370 /* FORNOW. This restriction should be relaxed. */
3371 if (nested_in_vect_loop && ncopies > 1)
3372 {
3373 if (vect_print_dump_info (REPORT_DETAILS))
3374 fprintf (vect_dump, "multiple types in nested loop.");
3375 return false;
3376 }
3377
a70d6342 3378 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3379 return false;
3380
8644a673 3381 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3382 return false;
3383
3384 /* Is vectorizable load? */
3385 if (!is_gimple_assign (stmt))
3386 return false;
3387
3388 scalar_dest = gimple_assign_lhs (stmt);
3389 if (TREE_CODE (scalar_dest) != SSA_NAME)
3390 return false;
3391
3392 code = gimple_assign_rhs_code (stmt);
3393 if (code != ARRAY_REF
3394 && code != INDIRECT_REF
e9dbe7bb
IR
3395 && code != COMPONENT_REF
3396 && code != IMAGPART_EXPR
3397 && code != REALPART_EXPR)
ebfd146a
IR
3398 return false;
3399
3400 if (!STMT_VINFO_DATA_REF (stmt_info))
3401 return false;
3402
3403 scalar_type = TREE_TYPE (DR_REF (dr));
3404 mode = (int) TYPE_MODE (vectype);
3405
3406 /* FORNOW. In some cases can vectorize even if data-type not supported
3407 (e.g. - data copies). */
3408 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3409 {
3410 if (vect_print_dump_info (REPORT_DETAILS))
3411 fprintf (vect_dump, "Aligned load, but unsupported type.");
3412 return false;
3413 }
3414
3415 /* The vector component type needs to be trivially convertible to the
3416 scalar lhs. This should always be the case. */
3417 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
b8698a0f 3418 {
ebfd146a
IR
3419 if (vect_print_dump_info (REPORT_DETAILS))
3420 fprintf (vect_dump, "??? operands of different types");
3421 return false;
3422 }
3423
3424 /* Check if the load is a part of an interleaving chain. */
3425 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3426 {
3427 strided_load = true;
3428 /* FORNOW */
3429 gcc_assert (! nested_in_vect_loop);
3430
3431 /* Check if interleaving is supported. */
3432 if (!vect_strided_load_supported (vectype)
3433 && !PURE_SLP_STMT (stmt_info) && !slp)
3434 return false;
3435 }
3436
3437 if (!vec_stmt) /* transformation not required. */
3438 {
3439 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3440 vect_model_load_cost (stmt_info, ncopies, NULL);
3441 return true;
3442 }
3443
3444 if (vect_print_dump_info (REPORT_DETAILS))
3445 fprintf (vect_dump, "transform load.");
3446
3447 /** Transform. **/
3448
3449 if (strided_load)
3450 {
3451 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3452 /* Check if the chain of loads is already vectorized. */
3453 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3454 {
3455 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3456 return true;
3457 }
3458 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3459 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3460
3461 /* VEC_NUM is the number of vect stmts to be created for this group. */
3462 if (slp)
3463 {
3464 strided_load = false;
3465 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
a70d6342
IR
3466 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3467 slp_perm = true;
3468 }
ebfd146a
IR
3469 else
3470 vec_num = group_size;
3471
3472 dr_chain = VEC_alloc (tree, heap, vec_num);
3473 }
3474 else
3475 {
3476 first_stmt = stmt;
3477 first_dr = dr;
3478 group_size = vec_num = 1;
3479 }
3480
3481 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3482 gcc_assert (alignment_support_scheme);
3483
3484 /* In case the vectorization factor (VF) is bigger than the number
3485 of elements that we can fit in a vectype (nunits), we have to generate
3486 more than one vector stmt - i.e - we need to "unroll" the
3487 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3488 from one copy of the vector stmt to the next, in the field
3489 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3490 stages to find the correct vector defs to be used when vectorizing
3491 stmts that use the defs of the current stmt. The example below illustrates
3492 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3493 4 vectorized stmts):
3494
3495 before vectorization:
3496 RELATED_STMT VEC_STMT
3497 S1: x = memref - -
3498 S2: z = x + 1 - -
3499
3500 step 1: vectorize stmt S1:
3501 We first create the vector stmt VS1_0, and, as usual, record a
3502 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3503 Next, we create the vector stmt VS1_1, and record a pointer to
3504 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3505 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3506 stmts and pointers:
3507 RELATED_STMT VEC_STMT
3508 VS1_0: vx0 = memref0 VS1_1 -
3509 VS1_1: vx1 = memref1 VS1_2 -
3510 VS1_2: vx2 = memref2 VS1_3 -
3511 VS1_3: vx3 = memref3 - -
3512 S1: x = load - VS1_0
3513 S2: z = x + 1 - -
3514
b8698a0f
L
3515 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3516 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
3517 stmt S2. */
3518
3519 /* In case of interleaving (non-unit strided access):
3520
3521 S1: x2 = &base + 2
3522 S2: x0 = &base
3523 S3: x1 = &base + 1
3524 S4: x3 = &base + 3
3525
b8698a0f 3526 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
3527 starting from the access of the first stmt of the chain:
3528
3529 VS1: vx0 = &base
3530 VS2: vx1 = &base + vec_size*1
3531 VS3: vx3 = &base + vec_size*2
3532 VS4: vx4 = &base + vec_size*3
3533
3534 Then permutation statements are generated:
3535
3536 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3537 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3538 ...
3539
3540 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3541 (the order of the data-refs in the output of vect_permute_load_chain
3542 corresponds to the order of scalar stmts in the interleaving chain - see
3543 the documentation of vect_permute_load_chain()).
3544 The generation of permutation stmts and recording them in
3545 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3546
b8698a0f 3547 In case of both multiple types and interleaving, the vector loads and
ebfd146a
IR
3548 permutation stmts above are created for every copy. The result vector stmts
3549 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3550 STMT_VINFO_RELATED_STMT for the next copies. */
3551
3552 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3553 on a target that supports unaligned accesses (dr_unaligned_supported)
3554 we generate the following code:
3555 p = initial_addr;
3556 indx = 0;
3557 loop {
3558 p = p + indx * vectype_size;
3559 vec_dest = *(p);
3560 indx = indx + 1;
3561 }
3562
3563 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 3564 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
3565 then generate the following code, in which the data in each iteration is
3566 obtained by two vector loads, one from the previous iteration, and one
3567 from the current iteration:
3568 p1 = initial_addr;
3569 msq_init = *(floor(p1))
3570 p2 = initial_addr + VS - 1;
3571 realignment_token = call target_builtin;
3572 indx = 0;
3573 loop {
3574 p2 = p2 + indx * vectype_size
3575 lsq = *(floor(p2))
3576 vec_dest = realign_load (msq, lsq, realignment_token)
3577 indx = indx + 1;
3578 msq = lsq;
3579 } */
3580
3581 /* If the misalignment remains the same throughout the execution of the
3582 loop, we can create the init_addr and permutation mask at the loop
3583 preheader. Otherwise, it needs to be created inside the loop.
3584 This can only occur when vectorizing memory accesses in the inner-loop
3585 nested within an outer-loop that is being vectorized. */
3586
a70d6342 3587 if (loop && nested_in_vect_loop_p (loop, stmt)
ebfd146a
IR
3588 && (TREE_INT_CST_LOW (DR_STEP (dr))
3589 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3590 {
3591 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3592 compute_in_loop = true;
3593 }
3594
3595 if ((alignment_support_scheme == dr_explicit_realign_optimized
3596 || alignment_support_scheme == dr_explicit_realign)
3597 && !compute_in_loop)
3598 {
3599 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3600 alignment_support_scheme, NULL_TREE,
3601 &at_loop);
3602 if (alignment_support_scheme == dr_explicit_realign_optimized)
3603 {
3604 phi = SSA_NAME_DEF_STMT (msq);
3605 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3606 }
3607 }
3608 else
3609 at_loop = loop;
3610
3611 prev_stmt_info = NULL;
3612 for (j = 0; j < ncopies; j++)
b8698a0f 3613 {
ebfd146a
IR
3614 /* 1. Create the vector pointer update chain. */
3615 if (j == 0)
3616 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
b8698a0f
L
3617 at_loop, offset,
3618 &dummy, &ptr_incr, false,
5006671f 3619 &inv_p);
ebfd146a 3620 else
b8698a0f 3621 dataref_ptr =
ebfd146a
IR
3622 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3623
3624 for (i = 0; i < vec_num; i++)
3625 {
3626 if (i > 0)
3627 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3628 NULL_TREE);
3629
3630 /* 2. Create the vector-load in the loop. */
3631 switch (alignment_support_scheme)
3632 {
3633 case dr_aligned:
3634 gcc_assert (aligned_access_p (first_dr));
3635 data_ref = build_fold_indirect_ref (dataref_ptr);
3636 break;
3637 case dr_unaligned_supported:
3638 {
3639 int mis = DR_MISALIGNMENT (first_dr);
3640 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3641
3642 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3643 data_ref =
3644 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3645 break;
3646 }
3647 case dr_explicit_realign:
3648 {
3649 tree ptr, bump;
3650 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3651
3652 if (compute_in_loop)
3653 msq = vect_setup_realignment (first_stmt, gsi,
3654 &realignment_token,
b8698a0f 3655 dr_explicit_realign,
ebfd146a
IR
3656 dataref_ptr, NULL);
3657
3658 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3659 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3660 new_stmt = gimple_build_assign (vec_dest, data_ref);
3661 new_temp = make_ssa_name (vec_dest, new_stmt);
3662 gimple_assign_set_lhs (new_stmt, new_temp);
5006671f
RG
3663 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3664 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
ebfd146a 3665 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
3666 msq = new_temp;
3667
3668 bump = size_binop (MULT_EXPR, vs_minus_1,
3669 TYPE_SIZE_UNIT (scalar_type));
3670 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3671 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3672 break;
3673 }
3674 case dr_explicit_realign_optimized:
3675 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3676 break;
3677 default:
3678 gcc_unreachable ();
3679 }
5006671f
RG
3680 /* If accesses through a pointer to vectype do not alias the original
3681 memory reference we have a problem. This should never happen. */
3682 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3683 get_alias_set (gimple_assign_rhs1 (stmt))));
ebfd146a
IR
3684 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3685 new_stmt = gimple_build_assign (vec_dest, data_ref);
3686 new_temp = make_ssa_name (vec_dest, new_stmt);
3687 gimple_assign_set_lhs (new_stmt, new_temp);
3688 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3689 mark_symbols_for_renaming (new_stmt);
3690
3691 /* 3. Handle explicit realignment if necessary/supported. Create in
3692 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3693 if (alignment_support_scheme == dr_explicit_realign_optimized
3694 || alignment_support_scheme == dr_explicit_realign)
3695 {
3696 tree tmp;
3697
3698 lsq = gimple_assign_lhs (new_stmt);
3699 if (!realignment_token)
3700 realignment_token = dataref_ptr;
3701 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3702 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3703 realignment_token);
3704 new_stmt = gimple_build_assign (vec_dest, tmp);
3705 new_temp = make_ssa_name (vec_dest, new_stmt);
3706 gimple_assign_set_lhs (new_stmt, new_temp);
3707 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3708
3709 if (alignment_support_scheme == dr_explicit_realign_optimized)
3710 {
3711 gcc_assert (phi);
3712 if (i == vec_num - 1 && j == ncopies - 1)
f5045c96
AM
3713 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3714 UNKNOWN_LOCATION);
ebfd146a
IR
3715 msq = lsq;
3716 }
3717 }
3718
3719 /* 4. Handle invariant-load. */
a70d6342 3720 if (inv_p && !bb_vinfo)
ebfd146a
IR
3721 {
3722 gcc_assert (!strided_load);
3723 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3724 if (j == 0)
3725 {
3726 int k;
3727 tree t = NULL_TREE;
3728 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3729
3730 /* CHECKME: bitpos depends on endianess? */
3731 bitpos = bitsize_zero_node;
b8698a0f 3732 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
ebfd146a 3733 bitsize, bitpos);
b8698a0f 3734 vec_dest =
ebfd146a
IR
3735 vect_create_destination_var (scalar_dest, NULL_TREE);
3736 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3737 new_temp = make_ssa_name (vec_dest, new_stmt);
3738 gimple_assign_set_lhs (new_stmt, new_temp);
3739 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3740
3741 for (k = nunits - 1; k >= 0; --k)
3742 t = tree_cons (NULL_TREE, new_temp, t);
3743 /* FIXME: use build_constructor directly. */
3744 vec_inv = build_constructor_from_list (vectype, t);
3745 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3746 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3747 }
3748 else
3749 gcc_unreachable (); /* FORNOW. */
3750 }
3751
3752 /* Collect vector loads and later create their permutation in
3753 vect_transform_strided_load (). */
3754 if (strided_load || slp_perm)
3755 VEC_quick_push (tree, dr_chain, new_temp);
3756
3757 /* Store vector loads in the corresponding SLP_NODE. */
3758 if (slp && !slp_perm)
3759 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3760 }
3761
3762 if (slp && !slp_perm)
3763 continue;
3764
3765 if (slp_perm)
3766 {
a70d6342 3767 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
ebfd146a
IR
3768 slp_node_instance, false))
3769 {
3770 VEC_free (tree, heap, dr_chain);
3771 return false;
3772 }
3773 }
3774 else
3775 {
3776 if (strided_load)
3777 {
3778 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
b8698a0f 3779 return false;
ebfd146a
IR
3780
3781 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3782 VEC_free (tree, heap, dr_chain);
3783 dr_chain = VEC_alloc (tree, heap, group_size);
3784 }
3785 else
3786 {
3787 if (j == 0)
3788 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3789 else
3790 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3791 prev_stmt_info = vinfo_for_stmt (new_stmt);
3792 }
3793 }
3794 }
3795
3796 if (dr_chain)
3797 VEC_free (tree, heap, dr_chain);
3798
3799 return true;
3800}
3801
3802/* Function vect_is_simple_cond.
b8698a0f 3803
ebfd146a
IR
3804 Input:
3805 LOOP - the loop that is being vectorized.
3806 COND - Condition that is checked for simple use.
3807
3808 Returns whether a COND can be vectorized. Checks whether
3809 condition operands are supportable using vec_is_simple_use. */
3810
3811static bool
3812vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3813{
3814 tree lhs, rhs;
3815 tree def;
3816 enum vect_def_type dt;
3817
3818 if (!COMPARISON_CLASS_P (cond))
3819 return false;
3820
3821 lhs = TREE_OPERAND (cond, 0);
3822 rhs = TREE_OPERAND (cond, 1);
3823
3824 if (TREE_CODE (lhs) == SSA_NAME)
3825 {
3826 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
b8698a0f 3827 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
a70d6342 3828 &dt))
ebfd146a
IR
3829 return false;
3830 }
3831 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3832 && TREE_CODE (lhs) != FIXED_CST)
3833 return false;
3834
3835 if (TREE_CODE (rhs) == SSA_NAME)
3836 {
3837 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
b8698a0f 3838 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
a70d6342 3839 &dt))
ebfd146a
IR
3840 return false;
3841 }
3842 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3843 && TREE_CODE (rhs) != FIXED_CST)
3844 return false;
3845
3846 return true;
3847}
3848
3849/* vectorizable_condition.
3850
b8698a0f
L
3851 Check if STMT is conditional modify expression that can be vectorized.
3852 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3853 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
3854 at GSI.
3855
3856 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3857 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3858 else caluse if it is 2).
ebfd146a
IR
3859
3860 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3861
4bbe8262 3862bool
ebfd146a 3863vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4bbe8262 3864 gimple *vec_stmt, tree reduc_def, int reduc_index)
ebfd146a
IR
3865{
3866 tree scalar_dest = NULL_TREE;
3867 tree vec_dest = NULL_TREE;
3868 tree op = NULL_TREE;
3869 tree cond_expr, then_clause, else_clause;
3870 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3871 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3872 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3873 tree vec_compare, vec_cond_expr;
3874 tree new_temp;
3875 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3876 enum machine_mode vec_mode;
3877 tree def;
3878 enum vect_def_type dt;
3879 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3880 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3881 enum tree_code code;
3882
a70d6342
IR
3883 /* FORNOW: unsupported in basic block SLP. */
3884 gcc_assert (loop_vinfo);
b8698a0f 3885
ebfd146a
IR
3886 gcc_assert (ncopies >= 1);
3887 if (ncopies > 1)
3888 return false; /* FORNOW */
3889
3890 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3891 return false;
3892
4bbe8262
IR
3893 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3894 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3895 && reduc_def))
ebfd146a
IR
3896 return false;
3897
3898 /* FORNOW: SLP not supported. */
3899 if (STMT_SLP_TYPE (stmt_info))
3900 return false;
3901
3902 /* FORNOW: not yet supported. */
b8698a0f 3903 if (STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
3904 {
3905 if (vect_print_dump_info (REPORT_DETAILS))
3906 fprintf (vect_dump, "value used after loop.");
3907 return false;
3908 }
3909
3910 /* Is vectorizable conditional operation? */
3911 if (!is_gimple_assign (stmt))
3912 return false;
3913
3914 code = gimple_assign_rhs_code (stmt);
3915
3916 if (code != COND_EXPR)
3917 return false;
3918
3919 gcc_assert (gimple_assign_single_p (stmt));
3920 op = gimple_assign_rhs1 (stmt);
3921 cond_expr = TREE_OPERAND (op, 0);
3922 then_clause = TREE_OPERAND (op, 1);
3923 else_clause = TREE_OPERAND (op, 2);
3924
3925 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3926 return false;
3927
3928 /* We do not handle two different vector types for the condition
3929 and the values. */
8533c9d8
SP
3930 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
3931 TREE_TYPE (vectype)))
ebfd146a
IR
3932 return false;
3933
3934 if (TREE_CODE (then_clause) == SSA_NAME)
3935 {
3936 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
b8698a0f 3937 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
ebfd146a
IR
3938 &then_def_stmt, &def, &dt))
3939 return false;
3940 }
b8698a0f 3941 else if (TREE_CODE (then_clause) != INTEGER_CST
ebfd146a
IR
3942 && TREE_CODE (then_clause) != REAL_CST
3943 && TREE_CODE (then_clause) != FIXED_CST)
3944 return false;
3945
3946 if (TREE_CODE (else_clause) == SSA_NAME)
3947 {
3948 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
a70d6342 3949 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
ebfd146a
IR
3950 &else_def_stmt, &def, &dt))
3951 return false;
3952 }
b8698a0f 3953 else if (TREE_CODE (else_clause) != INTEGER_CST
ebfd146a
IR
3954 && TREE_CODE (else_clause) != REAL_CST
3955 && TREE_CODE (else_clause) != FIXED_CST)
3956 return false;
3957
3958
3959 vec_mode = TYPE_MODE (vectype);
3960
b8698a0f 3961 if (!vec_stmt)
ebfd146a
IR
3962 {
3963 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8e7aa1f9 3964 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
ebfd146a
IR
3965 }
3966
3967 /* Transform */
3968
3969 /* Handle def. */
3970 scalar_dest = gimple_assign_lhs (stmt);
3971 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3972
3973 /* Handle cond expr. */
b8698a0f 3974 vec_cond_lhs =
ebfd146a 3975 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
b8698a0f 3976 vec_cond_rhs =
ebfd146a 3977 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
4bbe8262
IR
3978 if (reduc_index == 1)
3979 vec_then_clause = reduc_def;
3980 else
3981 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3982 if (reduc_index == 2)
3983 vec_else_clause = reduc_def;
3984 else
3985 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
ebfd146a
IR
3986
3987 /* Arguments are ready. Create the new vector stmt. */
b8698a0f 3988 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
ebfd146a 3989 vec_cond_lhs, vec_cond_rhs);
b8698a0f 3990 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
ebfd146a
IR
3991 vec_compare, vec_then_clause, vec_else_clause);
3992
3993 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3994 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3995 gimple_assign_set_lhs (*vec_stmt, new_temp);
3996 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
b8698a0f 3997
ebfd146a
IR
3998 return true;
3999}
4000
4001
8644a673 4002/* Make sure the statement is vectorizable. */
ebfd146a
IR
4003
4004bool
a70d6342 4005vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 4006{
8644a673 4007 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 4008 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 4009 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 4010 bool ok;
a70d6342 4011 tree scalar_type, vectype;
ebfd146a
IR
4012
4013 if (vect_print_dump_info (REPORT_DETAILS))
ebfd146a 4014 {
8644a673
IR
4015 fprintf (vect_dump, "==> examining statement: ");
4016 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4017 }
ebfd146a 4018
1825a1f3 4019 if (gimple_has_volatile_ops (stmt))
b8698a0f 4020 {
1825a1f3
IR
4021 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4022 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4023
4024 return false;
4025 }
b8698a0f
L
4026
4027 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
4028 to include:
4029 - the COND_EXPR which is the loop exit condition
4030 - any LABEL_EXPRs in the loop
b8698a0f 4031 - computations that are used only for array indexing or loop control.
8644a673
IR
4032 In basic blocks we only analyze statements that are a part of some SLP
4033 instance, therefore, all the statements are relevant. */
ebfd146a 4034
b8698a0f 4035 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 4036 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4037 {
4038 if (vect_print_dump_info (REPORT_DETAILS))
8644a673 4039 fprintf (vect_dump, "irrelevant.");
ebfd146a 4040
8644a673
IR
4041 return true;
4042 }
ebfd146a 4043
8644a673
IR
4044 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4045 {
4046 case vect_internal_def:
4047 break;
ebfd146a 4048
8644a673 4049 case vect_reduction_def:
7c5222ff 4050 case vect_nested_cycle:
a70d6342 4051 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
8644a673 4052 || relevance == vect_used_in_outer_by_reduction
a70d6342 4053 || relevance == vect_unused_in_scope));
8644a673
IR
4054 break;
4055
4056 case vect_induction_def:
4057 case vect_constant_def:
4058 case vect_external_def:
4059 case vect_unknown_def_type:
4060 default:
4061 gcc_unreachable ();
4062 }
ebfd146a 4063
a70d6342
IR
4064 if (bb_vinfo)
4065 {
4066 gcc_assert (PURE_SLP_STMT (stmt_info));
4067
b690cc0f 4068 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
a70d6342
IR
4069 if (vect_print_dump_info (REPORT_DETAILS))
4070 {
4071 fprintf (vect_dump, "get vectype for scalar type: ");
4072 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4073 }
4074
4075 vectype = get_vectype_for_scalar_type (scalar_type);
4076 if (!vectype)
4077 {
4078 if (vect_print_dump_info (REPORT_DETAILS))
4079 {
4080 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4081 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4082 }
4083 return false;
4084 }
4085
4086 if (vect_print_dump_info (REPORT_DETAILS))
4087 {
4088 fprintf (vect_dump, "vectype: ");
4089 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4090 }
4091
4092 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4093 }
4094
8644a673 4095 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 4096 {
8644a673
IR
4097 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4098 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4099 *need_to_vectorize = true;
ebfd146a
IR
4100 }
4101
8644a673 4102 ok = true;
b8698a0f 4103 if (!bb_vinfo
a70d6342
IR
4104 && (STMT_VINFO_RELEVANT_P (stmt_info)
4105 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8644a673
IR
4106 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4107 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4108 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4109 || vectorizable_operation (stmt, NULL, NULL, NULL)
4110 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4111 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4112 || vectorizable_call (stmt, NULL, NULL)
4113 || vectorizable_store (stmt, NULL, NULL, NULL)
b5aeb3bb 4114 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4bbe8262 4115 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
a70d6342
IR
4116 else
4117 {
4118 if (bb_vinfo)
4119 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4120 || vectorizable_assignment (stmt, NULL, NULL, node)
4121 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4122 || vectorizable_store (stmt, NULL, NULL, node));
b8698a0f 4123 }
8644a673
IR
4124
4125 if (!ok)
ebfd146a 4126 {
8644a673
IR
4127 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4128 {
4129 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4130 fprintf (vect_dump, "supported: ");
4131 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4132 }
b8698a0f 4133
ebfd146a
IR
4134 return false;
4135 }
4136
a70d6342
IR
4137 if (bb_vinfo)
4138 return true;
4139
8644a673
IR
4140 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4141 need extra handling, except for vectorizable reductions. */
4142 if (STMT_VINFO_LIVE_P (stmt_info)
4143 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4144 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 4145
8644a673 4146 if (!ok)
ebfd146a 4147 {
8644a673
IR
4148 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4149 {
4150 fprintf (vect_dump, "not vectorized: live stmt not ");
4151 fprintf (vect_dump, "supported: ");
4152 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4153 }
b8698a0f 4154
8644a673 4155 return false;
ebfd146a
IR
4156 }
4157
8644a673 4158 if (!PURE_SLP_STMT (stmt_info))
ebfd146a 4159 {
b8698a0f
L
4160 /* Groups of strided accesses whose size is not a power of 2 are not
4161 vectorizable yet using loop-vectorization. Therefore, if this stmt
4162 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
a70d6342 4163 loop-based vectorized), the loop cannot be vectorized. */
8644a673
IR
4164 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4165 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4166 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
ebfd146a 4167 {
8644a673
IR
4168 if (vect_print_dump_info (REPORT_DETAILS))
4169 {
4170 fprintf (vect_dump, "not vectorized: the size of group "
4171 "of strided accesses is not a power of 2");
4172 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4173 }
4174
ebfd146a
IR
4175 return false;
4176 }
4177 }
b8698a0f 4178
ebfd146a
IR
4179 return true;
4180}
4181
4182
4183/* Function vect_transform_stmt.
4184
4185 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4186
4187bool
4188vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
b8698a0f 4189 bool *strided_store, slp_tree slp_node,
ebfd146a
IR
4190 slp_instance slp_node_instance)
4191{
4192 bool is_store = false;
4193 gimple vec_stmt = NULL;
4194 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4195 gimple orig_stmt_in_pattern;
4196 bool done;
ebfd146a
IR
4197
4198 switch (STMT_VINFO_TYPE (stmt_info))
4199 {
4200 case type_demotion_vec_info_type:
4201 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4202 gcc_assert (done);
4203 break;
4204
4205 case type_promotion_vec_info_type:
4206 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4207 gcc_assert (done);
4208 break;
4209
4210 case type_conversion_vec_info_type:
4211 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4212 gcc_assert (done);
4213 break;
4214
4215 case induc_vec_info_type:
4216 gcc_assert (!slp_node);
4217 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4218 gcc_assert (done);
4219 break;
4220
4221 case op_vec_info_type:
4222 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4223 gcc_assert (done);
4224 break;
4225
4226 case assignment_vec_info_type:
4227 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4228 gcc_assert (done);
4229 break;
4230
4231 case load_vec_info_type:
b8698a0f 4232 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
4233 slp_node_instance);
4234 gcc_assert (done);
4235 break;
4236
4237 case store_vec_info_type:
4238 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4239 gcc_assert (done);
4240 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4241 {
4242 /* In case of interleaving, the whole chain is vectorized when the
4243 last store in the chain is reached. Store stmts before the last
4244 one are skipped, and there vec_stmt_info shouldn't be freed
4245 meanwhile. */
4246 *strided_store = true;
4247 if (STMT_VINFO_VEC_STMT (stmt_info))
4248 is_store = true;
4249 }
4250 else
4251 is_store = true;
4252 break;
4253
4254 case condition_vec_info_type:
4255 gcc_assert (!slp_node);
4bbe8262 4256 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
ebfd146a
IR
4257 gcc_assert (done);
4258 break;
4259
4260 case call_vec_info_type:
4261 gcc_assert (!slp_node);
4262 done = vectorizable_call (stmt, gsi, &vec_stmt);
4263 break;
4264
4265 case reduc_vec_info_type:
b5aeb3bb 4266 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
ebfd146a
IR
4267 gcc_assert (done);
4268 break;
4269
4270 default:
4271 if (!STMT_VINFO_LIVE_P (stmt_info))
4272 {
4273 if (vect_print_dump_info (REPORT_DETAILS))
4274 fprintf (vect_dump, "stmt not supported.");
4275 gcc_unreachable ();
4276 }
4277 }
4278
4279 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4280 is being vectorized, but outside the immediately enclosing loop. */
4281 if (vec_stmt
a70d6342
IR
4282 && STMT_VINFO_LOOP_VINFO (stmt_info)
4283 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4284 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
4285 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4286 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 4287 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 4288 vect_used_in_outer_by_reduction))
ebfd146a 4289 {
a70d6342
IR
4290 struct loop *innerloop = LOOP_VINFO_LOOP (
4291 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
4292 imm_use_iterator imm_iter;
4293 use_operand_p use_p;
4294 tree scalar_dest;
4295 gimple exit_phi;
4296
4297 if (vect_print_dump_info (REPORT_DETAILS))
a70d6342 4298 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
ebfd146a
IR
4299
4300 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4301 (to be used when vectorizing outer-loop stmts that use the DEF of
4302 STMT). */
4303 if (gimple_code (stmt) == GIMPLE_PHI)
4304 scalar_dest = PHI_RESULT (stmt);
4305 else
4306 scalar_dest = gimple_assign_lhs (stmt);
4307
4308 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4309 {
4310 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4311 {
4312 exit_phi = USE_STMT (use_p);
4313 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4314 }
4315 }
4316 }
4317
4318 /* Handle stmts whose DEF is used outside the loop-nest that is
4319 being vectorized. */
4320 if (STMT_VINFO_LIVE_P (stmt_info)
4321 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4322 {
4323 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4324 gcc_assert (done);
4325 }
4326
4327 if (vec_stmt)
4328 {
4329 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4330 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4331 if (orig_stmt_in_pattern)
4332 {
4333 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4334 /* STMT was inserted by the vectorizer to replace a computation idiom.
b8698a0f
L
4335 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4336 computed this idiom. We need to record a pointer to VEC_STMT in
4337 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
ebfd146a
IR
4338 documentation of vect_pattern_recog. */
4339 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4340 {
4341 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4342 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4343 }
4344 }
4345 }
4346
b8698a0f 4347 return is_store;
ebfd146a
IR
4348}
4349
4350
b8698a0f 4351/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
4352 stmt_vec_info. */
4353
4354void
4355vect_remove_stores (gimple first_stmt)
4356{
4357 gimple next = first_stmt;
4358 gimple tmp;
4359 gimple_stmt_iterator next_si;
4360
4361 while (next)
4362 {
4363 /* Free the attached stmt_vec_info and remove the stmt. */
4364 next_si = gsi_for_stmt (next);
4365 gsi_remove (&next_si, true);
4366 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4367 free_stmt_vec_info (next);
4368 next = tmp;
4369 }
4370}
4371
4372
4373/* Function new_stmt_vec_info.
4374
4375 Create and initialize a new stmt_vec_info struct for STMT. */
4376
4377stmt_vec_info
b8698a0f 4378new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
a70d6342 4379 bb_vec_info bb_vinfo)
ebfd146a
IR
4380{
4381 stmt_vec_info res;
4382 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4383
4384 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4385 STMT_VINFO_STMT (res) = stmt;
4386 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
a70d6342 4387 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
8644a673 4388 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
4389 STMT_VINFO_LIVE_P (res) = false;
4390 STMT_VINFO_VECTYPE (res) = NULL;
4391 STMT_VINFO_VEC_STMT (res) = NULL;
4b5caab7 4392 STMT_VINFO_VECTORIZABLE (res) = true;
ebfd146a
IR
4393 STMT_VINFO_IN_PATTERN_P (res) = false;
4394 STMT_VINFO_RELATED_STMT (res) = NULL;
4395 STMT_VINFO_DATA_REF (res) = NULL;
4396
4397 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4398 STMT_VINFO_DR_OFFSET (res) = NULL;
4399 STMT_VINFO_DR_INIT (res) = NULL;
4400 STMT_VINFO_DR_STEP (res) = NULL;
4401 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4402
4403 if (gimple_code (stmt) == GIMPLE_PHI
4404 && is_loop_header_bb_p (gimple_bb (stmt)))
4405 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4406 else
8644a673
IR
4407 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4408
ebfd146a
IR
4409 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4410 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4411 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
32e8bb8e 4412 STMT_SLP_TYPE (res) = loop_vect;
ebfd146a
IR
4413 DR_GROUP_FIRST_DR (res) = NULL;
4414 DR_GROUP_NEXT_DR (res) = NULL;
4415 DR_GROUP_SIZE (res) = 0;
4416 DR_GROUP_STORE_COUNT (res) = 0;
4417 DR_GROUP_GAP (res) = 0;
4418 DR_GROUP_SAME_DR_STMT (res) = NULL;
4419 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4420
4421 return res;
4422}
4423
4424
4425/* Create a hash table for stmt_vec_info. */
4426
4427void
4428init_stmt_vec_info_vec (void)
4429{
4430 gcc_assert (!stmt_vec_info_vec);
4431 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4432}
4433
4434
4435/* Free hash table for stmt_vec_info. */
4436
4437void
4438free_stmt_vec_info_vec (void)
4439{
4440 gcc_assert (stmt_vec_info_vec);
4441 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4442}
4443
4444
4445/* Free stmt vectorization related info. */
4446
4447void
4448free_stmt_vec_info (gimple stmt)
4449{
4450 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4451
4452 if (!stmt_info)
4453 return;
4454
4455 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4456 set_vinfo_for_stmt (stmt, NULL);
4457 free (stmt_info);
4458}
4459
4460
4461/* Function get_vectype_for_scalar_type.
4462
4463 Returns the vector type corresponding to SCALAR_TYPE as supported
4464 by the target. */
4465
4466tree
4467get_vectype_for_scalar_type (tree scalar_type)
4468{
4469 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
2f816591 4470 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
4471 int nunits;
4472 tree vectype;
4473
4474 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4475 return NULL_TREE;
4476
2f816591
RG
4477 /* We can't build a vector type of elements with alignment bigger than
4478 their size. */
4479 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4480 return NULL_TREE;
4481
6d7971b8
RG
4482 /* If we'd build a vector type of elements whose mode precision doesn't
4483 match their types precision we'll get mismatched types on vector
4484 extracts via BIT_FIELD_REFs. This effectively means we disable
4485 vectorization of bool and/or enum types in some languages. */
4486 if (INTEGRAL_TYPE_P (scalar_type)
4487 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4488 return NULL_TREE;
4489
ebfd146a
IR
4490 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4491 is expected. */
4492 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4493
4494 vectype = build_vector_type (scalar_type, nunits);
4495 if (vect_print_dump_info (REPORT_DETAILS))
4496 {
4497 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4498 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4499 }
4500
4501 if (!vectype)
4502 return NULL_TREE;
4503
4504 if (vect_print_dump_info (REPORT_DETAILS))
4505 {
4506 fprintf (vect_dump, "vectype: ");
4507 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4508 }
4509
4510 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4511 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4512 {
4513 if (vect_print_dump_info (REPORT_DETAILS))
4514 fprintf (vect_dump, "mode not supported by target.");
4515 return NULL_TREE;
4516 }
4517
4518 return vectype;
4519}
4520
b690cc0f
RG
4521/* Function get_same_sized_vectype
4522
4523 Returns a vector type corresponding to SCALAR_TYPE of size
4524 VECTOR_TYPE if supported by the target. */
4525
4526tree
4527get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4528{
4529 return get_vectype_for_scalar_type (scalar_type);
4530}
4531
ebfd146a
IR
4532/* Function vect_is_simple_use.
4533
4534 Input:
a70d6342
IR
4535 LOOP_VINFO - the vect info of the loop that is being vectorized.
4536 BB_VINFO - the vect info of the basic block that is being vectorized.
4537 OPERAND - operand of a stmt in the loop or bb.
ebfd146a
IR
4538 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4539
4540 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f
L
4541 For loops, supportable operands are constants, loop invariants, and operands
4542 that are defined by the current iteration of the loop. Unsupportable
4543 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
4544 is the case in reduction/induction computations).
4545 For basic blocks, supportable operands are constants and bb invariants.
4546 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
4547
4548bool
b8698a0f 4549vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
a70d6342 4550 bb_vec_info bb_vinfo, gimple *def_stmt,
ebfd146a 4551 tree *def, enum vect_def_type *dt)
b8698a0f 4552{
ebfd146a
IR
4553 basic_block bb;
4554 stmt_vec_info stmt_vinfo;
a70d6342 4555 struct loop *loop = NULL;
b8698a0f 4556
a70d6342
IR
4557 if (loop_vinfo)
4558 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
4559
4560 *def_stmt = NULL;
4561 *def = NULL_TREE;
b8698a0f 4562
ebfd146a
IR
4563 if (vect_print_dump_info (REPORT_DETAILS))
4564 {
4565 fprintf (vect_dump, "vect_is_simple_use: operand ");
4566 print_generic_expr (vect_dump, operand, TDF_SLIM);
4567 }
b8698a0f 4568
ebfd146a
IR
4569 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4570 {
4571 *dt = vect_constant_def;
4572 return true;
4573 }
b8698a0f 4574
ebfd146a
IR
4575 if (is_gimple_min_invariant (operand))
4576 {
4577 *def = operand;
8644a673 4578 *dt = vect_external_def;
ebfd146a
IR
4579 return true;
4580 }
4581
4582 if (TREE_CODE (operand) == PAREN_EXPR)
4583 {
4584 if (vect_print_dump_info (REPORT_DETAILS))
4585 fprintf (vect_dump, "non-associatable copy.");
4586 operand = TREE_OPERAND (operand, 0);
4587 }
b8698a0f 4588
ebfd146a
IR
4589 if (TREE_CODE (operand) != SSA_NAME)
4590 {
4591 if (vect_print_dump_info (REPORT_DETAILS))
4592 fprintf (vect_dump, "not ssa-name.");
4593 return false;
4594 }
b8698a0f 4595
ebfd146a
IR
4596 *def_stmt = SSA_NAME_DEF_STMT (operand);
4597 if (*def_stmt == NULL)
4598 {
4599 if (vect_print_dump_info (REPORT_DETAILS))
4600 fprintf (vect_dump, "no def_stmt.");
4601 return false;
4602 }
4603
4604 if (vect_print_dump_info (REPORT_DETAILS))
4605 {
4606 fprintf (vect_dump, "def_stmt: ");
4607 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4608 }
4609
8644a673 4610 /* Empty stmt is expected only in case of a function argument.
ebfd146a
IR
4611 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4612 if (gimple_nop_p (*def_stmt))
4613 {
4614 *def = operand;
8644a673 4615 *dt = vect_external_def;
ebfd146a
IR
4616 return true;
4617 }
4618
4619 bb = gimple_bb (*def_stmt);
a70d6342
IR
4620
4621 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4622 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
b8698a0f 4623 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
8644a673 4624 *dt = vect_external_def;
ebfd146a
IR
4625 else
4626 {
4627 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4628 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4629 }
4630
4631 if (*dt == vect_unknown_def_type)
4632 {
4633 if (vect_print_dump_info (REPORT_DETAILS))
4634 fprintf (vect_dump, "Unsupported pattern.");
4635 return false;
4636 }
4637
4638 if (vect_print_dump_info (REPORT_DETAILS))
4639 fprintf (vect_dump, "type of def: %d.",*dt);
4640
4641 switch (gimple_code (*def_stmt))
4642 {
4643 case GIMPLE_PHI:
4644 *def = gimple_phi_result (*def_stmt);
4645 break;
4646
4647 case GIMPLE_ASSIGN:
4648 *def = gimple_assign_lhs (*def_stmt);
4649 break;
4650
4651 case GIMPLE_CALL:
4652 *def = gimple_call_lhs (*def_stmt);
4653 if (*def != NULL)
4654 break;
4655 /* FALLTHRU */
4656 default:
4657 if (vect_print_dump_info (REPORT_DETAILS))
4658 fprintf (vect_dump, "unsupported defining stmt: ");
4659 return false;
4660 }
4661
4662 return true;
4663}
4664
b690cc0f
RG
4665/* Function vect_is_simple_use_1.
4666
4667 Same as vect_is_simple_use_1 but also determines the vector operand
4668 type of OPERAND and stores it to *VECTYPE. If the definition of
4669 OPERAND is vect_uninitialized_def, vect_constant_def or
4670 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4671 is responsible to compute the best suited vector type for the
4672 scalar operand. */
4673
4674bool
4675vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4676 bb_vec_info bb_vinfo, gimple *def_stmt,
4677 tree *def, enum vect_def_type *dt, tree *vectype)
4678{
4679 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4680 return false;
4681
4682 /* Now get a vector type if the def is internal, otherwise supply
4683 NULL_TREE and leave it up to the caller to figure out a proper
4684 type for the use stmt. */
4685 if (*dt == vect_internal_def
4686 || *dt == vect_induction_def
4687 || *dt == vect_reduction_def
4688 || *dt == vect_double_reduction_def
4689 || *dt == vect_nested_cycle)
4690 {
4691 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4692 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4693 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4694 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4695 gcc_assert (*vectype != NULL_TREE);
4696 }
4697 else if (*dt == vect_uninitialized_def
4698 || *dt == vect_constant_def
4699 || *dt == vect_external_def)
4700 *vectype = NULL_TREE;
4701 else
4702 gcc_unreachable ();
4703
4704 return true;
4705}
4706
ebfd146a
IR
4707
4708/* Function supportable_widening_operation
4709
b8698a0f
L
4710 Check whether an operation represented by the code CODE is a
4711 widening operation that is supported by the target platform in
b690cc0f
RG
4712 vector form (i.e., when operating on arguments of type VECTYPE_IN
4713 producing a result of type VECTYPE_OUT).
b8698a0f 4714
ebfd146a
IR
4715 Widening operations we currently support are NOP (CONVERT), FLOAT
4716 and WIDEN_MULT. This function checks if these operations are supported
4717 by the target platform either directly (via vector tree-codes), or via
4718 target builtins.
4719
4720 Output:
b8698a0f
L
4721 - CODE1 and CODE2 are codes of vector operations to be used when
4722 vectorizing the operation, if available.
ebfd146a
IR
4723 - DECL1 and DECL2 are decls of target builtin functions to be used
4724 when vectorizing the operation, if available. In this case,
b8698a0f 4725 CODE1 and CODE2 are CALL_EXPR.
ebfd146a
IR
4726 - MULTI_STEP_CVT determines the number of required intermediate steps in
4727 case of multi-step conversion (like char->short->int - in that case
4728 MULTI_STEP_CVT will be 1).
b8698a0f
L
4729 - INTERM_TYPES contains the intermediate type required to perform the
4730 widening operation (short in the above example). */
ebfd146a
IR
4731
4732bool
b690cc0f
RG
4733supportable_widening_operation (enum tree_code code, gimple stmt,
4734 tree vectype_out, tree vectype_in,
ebfd146a
IR
4735 tree *decl1, tree *decl2,
4736 enum tree_code *code1, enum tree_code *code2,
4737 int *multi_step_cvt,
4738 VEC (tree, heap) **interm_types)
4739{
4740 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4741 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4742 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4743 bool ordered_p;
4744 enum machine_mode vec_mode;
81f40b79 4745 enum insn_code icode1, icode2;
ebfd146a 4746 optab optab1, optab2;
b690cc0f
RG
4747 tree vectype = vectype_in;
4748 tree wide_vectype = vectype_out;
ebfd146a
IR
4749 enum tree_code c1, c2;
4750
4751 /* The result of a vectorized widening operation usually requires two vectors
b8698a0f
L
4752 (because the widened results do not fit int one vector). The generated
4753 vector results would normally be expected to be generated in the same
ebfd146a
IR
4754 order as in the original scalar computation, i.e. if 8 results are
4755 generated in each vector iteration, they are to be organized as follows:
b8698a0f 4756 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
ebfd146a 4757
b8698a0f 4758 However, in the special case that the result of the widening operation is
ebfd146a 4759 used in a reduction computation only, the order doesn't matter (because
b8698a0f 4760 when vectorizing a reduction we change the order of the computation).
ebfd146a
IR
4761 Some targets can take advantage of this and generate more efficient code.
4762 For example, targets like Altivec, that support widen_mult using a sequence
4763 of {mult_even,mult_odd} generate the following vectors:
4764 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4765
4766 When vectorizing outer-loops, we execute the inner-loop sequentially
b8698a0f
L
4767 (each vectorized inner-loop iteration contributes to VF outer-loop
4768 iterations in parallel). We therefore don't allow to change the order
ebfd146a
IR
4769 of the computation in the inner-loop during outer-loop vectorization. */
4770
4771 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4772 && !nested_in_vect_loop_p (vect_loop, stmt))
4773 ordered_p = false;
4774 else
4775 ordered_p = true;
4776
4777 if (!ordered_p
4778 && code == WIDEN_MULT_EXPR
4779 && targetm.vectorize.builtin_mul_widen_even
4780 && targetm.vectorize.builtin_mul_widen_even (vectype)
4781 && targetm.vectorize.builtin_mul_widen_odd
4782 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4783 {
4784 if (vect_print_dump_info (REPORT_DETAILS))
4785 fprintf (vect_dump, "Unordered widening operation detected.");
4786
4787 *code1 = *code2 = CALL_EXPR;
4788 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4789 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4790 return true;
4791 }
4792
4793 switch (code)
4794 {
4795 case WIDEN_MULT_EXPR:
4796 if (BYTES_BIG_ENDIAN)
4797 {
4798 c1 = VEC_WIDEN_MULT_HI_EXPR;
4799 c2 = VEC_WIDEN_MULT_LO_EXPR;
4800 }
4801 else
4802 {
4803 c2 = VEC_WIDEN_MULT_HI_EXPR;
4804 c1 = VEC_WIDEN_MULT_LO_EXPR;
4805 }
4806 break;
4807
4808 CASE_CONVERT:
4809 if (BYTES_BIG_ENDIAN)
4810 {
4811 c1 = VEC_UNPACK_HI_EXPR;
4812 c2 = VEC_UNPACK_LO_EXPR;
4813 }
4814 else
4815 {
4816 c2 = VEC_UNPACK_HI_EXPR;
4817 c1 = VEC_UNPACK_LO_EXPR;
4818 }
4819 break;
4820
4821 case FLOAT_EXPR:
4822 if (BYTES_BIG_ENDIAN)
4823 {
4824 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4825 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4826 }
4827 else
4828 {
4829 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4830 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4831 }
4832 break;
4833
4834 case FIX_TRUNC_EXPR:
4835 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4836 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4837 computing the operation. */
4838 return false;
4839
4840 default:
4841 gcc_unreachable ();
4842 }
4843
4844 if (code == FIX_TRUNC_EXPR)
4845 {
4846 /* The signedness is determined from output operand. */
b690cc0f
RG
4847 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4848 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
4849 }
4850 else
4851 {
4852 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4853 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4854 }
4855
4856 if (!optab1 || !optab2)
4857 return false;
4858
4859 vec_mode = TYPE_MODE (vectype);
4860 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4861 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4862 == CODE_FOR_nothing)
4863 return false;
4864
b8698a0f 4865 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a
IR
4866 types. */
4867 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4868 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4869 {
4870 int i;
4871 tree prev_type = vectype, intermediate_type;
4872 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4873 optab optab3, optab4;
4874
4875 if (!CONVERT_EXPR_CODE_P (code))
4876 return false;
b8698a0f 4877
ebfd146a
IR
4878 *code1 = c1;
4879 *code2 = c2;
b8698a0f 4880
ebfd146a
IR
4881 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4882 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4883 to get to NARROW_VECTYPE, and fail if we do not. */
4884 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4885 for (i = 0; i < 3; i++)
4886 {
4887 intermediate_mode = insn_data[icode1].operand[0].mode;
4888 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4889 TYPE_UNSIGNED (prev_type));
4890 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4891 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4892
4893 if (!optab3 || !optab4
4894 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4895 == CODE_FOR_nothing
4896 || insn_data[icode1].operand[0].mode != intermediate_mode
4897 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4898 == CODE_FOR_nothing
4899 || insn_data[icode2].operand[0].mode != intermediate_mode
b8698a0f 4900 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
ebfd146a
IR
4901 == CODE_FOR_nothing
4902 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4903 == CODE_FOR_nothing)
4904 return false;
4905
4906 VEC_quick_push (tree, *interm_types, intermediate_type);
4907 (*multi_step_cvt)++;
4908
4909 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4910 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4911 return true;
4912
4913 prev_type = intermediate_type;
4914 prev_mode = intermediate_mode;
4915 }
4916
4917 return false;
4918 }
4919
4920 *code1 = c1;
4921 *code2 = c2;
4922 return true;
4923}
4924
4925
4926/* Function supportable_narrowing_operation
4927
b8698a0f
L
4928 Check whether an operation represented by the code CODE is a
4929 narrowing operation that is supported by the target platform in
b690cc0f
RG
4930 vector form (i.e., when operating on arguments of type VECTYPE_IN
4931 and producing a result of type VECTYPE_OUT).
b8698a0f 4932
ebfd146a
IR
4933 Narrowing operations we currently support are NOP (CONVERT) and
4934 FIX_TRUNC. This function checks if these operations are supported by
4935 the target platform directly via vector tree-codes.
4936
4937 Output:
b8698a0f
L
4938 - CODE1 is the code of a vector operation to be used when
4939 vectorizing the operation, if available.
ebfd146a
IR
4940 - MULTI_STEP_CVT determines the number of required intermediate steps in
4941 case of multi-step conversion (like int->short->char - in that case
4942 MULTI_STEP_CVT will be 1).
4943 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 4944 narrowing operation (short in the above example). */
ebfd146a
IR
4945
4946bool
4947supportable_narrowing_operation (enum tree_code code,
b690cc0f 4948 tree vectype_out, tree vectype_in,
ebfd146a
IR
4949 enum tree_code *code1, int *multi_step_cvt,
4950 VEC (tree, heap) **interm_types)
4951{
4952 enum machine_mode vec_mode;
4953 enum insn_code icode1;
4954 optab optab1, interm_optab;
b690cc0f
RG
4955 tree vectype = vectype_in;
4956 tree narrow_vectype = vectype_out;
ebfd146a
IR
4957 enum tree_code c1;
4958 tree intermediate_type, prev_type;
4959 int i;
4960
4961 switch (code)
4962 {
4963 CASE_CONVERT:
4964 c1 = VEC_PACK_TRUNC_EXPR;
4965 break;
4966
4967 case FIX_TRUNC_EXPR:
4968 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4969 break;
4970
4971 case FLOAT_EXPR:
4972 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4973 tree code and optabs used for computing the operation. */
4974 return false;
4975
4976 default:
4977 gcc_unreachable ();
4978 }
4979
4980 if (code == FIX_TRUNC_EXPR)
4981 /* The signedness is determined from output operand. */
b690cc0f 4982 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
4983 else
4984 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4985
4986 if (!optab1)
4987 return false;
4988
4989 vec_mode = TYPE_MODE (vectype);
b8698a0f 4990 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
ebfd146a
IR
4991 == CODE_FOR_nothing)
4992 return false;
4993
4994 /* Check if it's a multi-step conversion that can be done using intermediate
4995 types. */
4996 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4997 {
4998 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4999
5000 *code1 = c1;
5001 prev_type = vectype;
5002 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5003 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5004 to get to NARROW_VECTYPE, and fail if we do not. */
5005 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5006 for (i = 0; i < 3; i++)
5007 {
5008 intermediate_mode = insn_data[icode1].operand[0].mode;
5009 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5010 TYPE_UNSIGNED (prev_type));
b8698a0f 5011 interm_optab = optab_for_tree_code (c1, intermediate_type,
ebfd146a 5012 optab_default);
b8698a0f 5013 if (!interm_optab
ebfd146a
IR
5014 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
5015 == CODE_FOR_nothing
5016 || insn_data[icode1].operand[0].mode != intermediate_mode
b8698a0f 5017 || (icode1
ebfd146a
IR
5018 = interm_optab->handlers[(int) intermediate_mode].insn_code)
5019 == CODE_FOR_nothing)
5020 return false;
5021
5022 VEC_quick_push (tree, *interm_types, intermediate_type);
5023 (*multi_step_cvt)++;
5024
5025 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5026 return true;
5027
5028 prev_type = intermediate_type;
5029 prev_mode = intermediate_mode;
5030 }
5031
5032 return false;
5033 }
5034
5035 *code1 = c1;
5036 return true;
5037}