]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-stmts.c
re PR tree-optimization/33244 (Missed opportunities for vectorization)
[thirdparty/gcc.git] / gcc / tree-vect-stmts.c
CommitLineData
ebfd146a 1/* Statement Analysis and Transformation for Vectorization
62f7fd21
MM
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "target.h"
30#include "basic-block.h"
cf835838
JM
31#include "tree-pretty-print.h"
32#include "gimple-pretty-print.h"
ebfd146a
IR
33#include "tree-flow.h"
34#include "tree-dump.h"
35#include "cfgloop.h"
36#include "cfglayout.h"
37#include "expr.h"
38#include "recog.h"
39#include "optabs.h"
718f9c0f 40#include "diagnostic-core.h"
ebfd146a
IR
41#include "toplev.h"
42#include "tree-vectorizer.h"
43#include "langhooks.h"
44
45
46/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48/* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52static void
53vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55{
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
b8698a0f 67 /* This is the last stmt in a sequence that was detected as a
ebfd146a
IR
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96}
97
98
99/* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111static bool
112vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114{
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
8644a673 121 *relevant = vect_unused_in_scope;
ebfd146a
IR
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
b8698a0f
L
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
8644a673 128 *relevant = vect_used_in_scope;
ebfd146a
IR
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
5006671f 132 if (gimple_vdef (stmt))
ebfd146a
IR
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
8644a673 136 *relevant = vect_used_in_scope;
ebfd146a
IR
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
3157b0c2
AO
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
ebfd146a
IR
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164}
165
166
b8698a0f 167/* Function exist_non_indexing_operands_for_use_p
ebfd146a 168
b8698a0f 169 USE is one of the uses attached to STMT. Check if USE is
ebfd146a
IR
170 used in STMT for anything other than indexing an array. */
171
172static bool
173exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174{
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
59a05b0c 177
ebfd146a
IR
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
59a05b0c 183
ebfd146a
IR
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
b8698a0f 191 so USE cannot correspond to any operands that are not used
ebfd146a
IR
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
ebfd146a
IR
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
59a05b0c
EB
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
ebfd146a 201 operand = gimple_assign_rhs1 (stmt);
ebfd146a
IR
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209}
210
211
b8698a0f 212/*
ebfd146a
IR
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
b8698a0f 217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
ebfd146a
IR
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
b8698a0f 228 which does not need to be directly vectorized, then the liveness/relevance
ebfd146a 229 of the respective DEF_STMT is left unchanged.
b8698a0f
L
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
ebfd146a
IR
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237static bool
b8698a0f 238process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
ebfd146a
IR
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240{
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
b8698a0f 249 /* case 1: we are only interested in uses that need to be vectorized. Uses
ebfd146a
IR
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
a70d6342 254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
b8698a0f 255 {
8644a673 256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
b8698a0f
L
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
ebfd146a
IR
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
b8698a0f 290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
8644a673 291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
ebfd146a
IR
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
7c5222ff 306
ebfd146a
IR
307 switch (relevant)
308 {
8644a673 309 case vect_unused_in_scope:
7c5222ff
IR
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
ebfd146a 312 break;
7c5222ff 313
ebfd146a 314 case vect_used_in_outer_by_reduction:
7c5222ff 315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
ebfd146a
IR
316 relevant = vect_used_by_reduction;
317 break;
7c5222ff 318
ebfd146a 319 case vect_used_in_outer:
7c5222ff 320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
8644a673 321 relevant = vect_used_in_scope;
ebfd146a 322 break;
7c5222ff 323
8644a673 324 case vect_used_in_scope:
ebfd146a
IR
325 break;
326
327 default:
328 gcc_unreachable ();
b8698a0f 329 }
ebfd146a
IR
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
06066f92 337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
ebfd146a
IR
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
7c5222ff 343
ebfd146a
IR
344 switch (relevant)
345 {
8644a673 346 case vect_unused_in_scope:
b8698a0f 347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
06066f92 348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
a70d6342 349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
ebfd146a
IR
350 break;
351
ebfd146a
IR
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
8644a673 356 case vect_used_in_scope:
ebfd146a
IR
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367}
368
369
370/* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386bool
387vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388{
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
06066f92
IR
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
ebfd146a
IR
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
b8698a0f 413 {
ebfd146a
IR
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
b8698a0f 431 }
ebfd146a
IR
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
b8698a0f
L
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
ebfd146a
IR
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
b8698a0f
L
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
7c5222ff 471 order of the results that they produce does not have to be kept. */
ebfd146a 472
06066f92
IR
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
ebfd146a 476 {
06066f92
IR
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
b8698a0f 497 live_p = false;
06066f92 498 break;
b8698a0f 499
06066f92
IR
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
7c5222ff 507
06066f92
IR
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
7c5222ff 511
b8698a0f
L
512 live_p = false;
513 break;
514
06066f92
IR
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
7c5222ff 519 if (vect_print_dump_info (REPORT_DETAILS))
06066f92 520 fprintf (vect_dump, "unsupported use of double reduction.");
7c5222ff
IR
521
522 VEC_free (gimple, heap, worklist);
523 return false;
06066f92
IR
524 }
525
526 live_p = false;
b8698a0f 527 break;
7c5222ff 528
06066f92
IR
529 default:
530 break;
7c5222ff 531 }
b8698a0f 532
ebfd146a
IR
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546}
547
548
720f5239
IR
549/* Get cost by calling cost target builtin. */
550
551static inline
552int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553{
554 tree dummy_type = NULL;
555 int dummy = 0;
556
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
559}
560
ebfd146a
IR
561int
562cost_for_stmt (gimple stmt)
563{
564 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
565
566 switch (STMT_VINFO_TYPE (stmt_info))
567 {
568 case load_vec_info_type:
720f5239 569 return vect_get_stmt_cost (scalar_load);
ebfd146a 570 case store_vec_info_type:
720f5239 571 return vect_get_stmt_cost (scalar_store);
ebfd146a
IR
572 case op_vec_info_type:
573 case condition_vec_info_type:
574 case assignment_vec_info_type:
575 case reduc_vec_info_type:
576 case induc_vec_info_type:
577 case type_promotion_vec_info_type:
578 case type_demotion_vec_info_type:
579 case type_conversion_vec_info_type:
580 case call_vec_info_type:
720f5239 581 return vect_get_stmt_cost (scalar_stmt);
ebfd146a
IR
582 case undef_vec_info_type:
583 default:
584 gcc_unreachable ();
585 }
586}
587
b8698a0f 588/* Function vect_model_simple_cost.
ebfd146a 589
b8698a0f 590 Models cost for simple operations, i.e. those that only emit ncopies of a
ebfd146a
IR
591 single op. Right now, this does not account for multiple insns that could
592 be generated for the single vector op. We will handle that shortly. */
593
594void
b8698a0f 595vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
596 enum vect_def_type *dt, slp_tree slp_node)
597{
598 int i;
599 int inside_cost = 0, outside_cost = 0;
600
601 /* The SLP costs were already calculated during SLP tree build. */
602 if (PURE_SLP_STMT (stmt_info))
603 return;
604
720f5239 605 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
606
607 /* FORNOW: Assuming maximum 2 args per stmts. */
608 for (i = 0; i < 2; i++)
609 {
8644a673 610 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
720f5239 611 outside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a 612 }
b8698a0f 613
ebfd146a
IR
614 if (vect_print_dump_info (REPORT_COST))
615 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
616 "outside_cost = %d .", inside_cost, outside_cost);
617
618 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
619 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
620 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
621}
622
623
b8698a0f
L
624/* Function vect_cost_strided_group_size
625
ebfd146a
IR
626 For strided load or store, return the group_size only if it is the first
627 load or store of a group, else return 1. This ensures that group size is
628 only returned once per group. */
629
630static int
631vect_cost_strided_group_size (stmt_vec_info stmt_info)
632{
633 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
634
635 if (first_stmt == STMT_VINFO_STMT (stmt_info))
636 return DR_GROUP_SIZE (stmt_info);
637
638 return 1;
639}
640
641
642/* Function vect_model_store_cost
643
644 Models cost for stores. In the case of strided accesses, one access
645 has the overhead of the strided access attributed to it. */
646
647void
b8698a0f 648vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
ebfd146a
IR
649 enum vect_def_type dt, slp_tree slp_node)
650{
651 int group_size;
720f5239
IR
652 unsigned int inside_cost = 0, outside_cost = 0;
653 struct data_reference *first_dr;
654 gimple first_stmt;
ebfd146a
IR
655
656 /* The SLP costs were already calculated during SLP tree build. */
657 if (PURE_SLP_STMT (stmt_info))
658 return;
659
8644a673 660 if (dt == vect_constant_def || dt == vect_external_def)
720f5239 661 outside_cost = vect_get_stmt_cost (scalar_to_vec);
ebfd146a
IR
662
663 /* Strided access? */
720f5239
IR
664 if (DR_GROUP_FIRST_DR (stmt_info))
665 {
666 if (slp_node)
667 {
668 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
669 group_size = 1;
670 }
671 else
672 {
673 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
674 group_size = vect_cost_strided_group_size (stmt_info);
675 }
676
677 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
678 }
ebfd146a
IR
679 /* Not a strided access. */
680 else
720f5239
IR
681 {
682 group_size = 1;
683 first_dr = STMT_VINFO_DATA_REF (stmt_info);
684 }
ebfd146a 685
b8698a0f 686 /* Is this an access in a group of stores, which provide strided access?
ebfd146a 687 If so, add in the cost of the permutes. */
b8698a0f 688 if (group_size > 1)
ebfd146a
IR
689 {
690 /* Uses a high and low interleave operation for each needed permute. */
b8698a0f 691 inside_cost = ncopies * exact_log2(group_size) * group_size
720f5239 692 * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
693
694 if (vect_print_dump_info (REPORT_COST))
695 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
696 group_size);
697
698 }
699
700 /* Costs of the stores. */
720f5239 701 vect_get_store_cost (first_dr, ncopies, &inside_cost);
ebfd146a
IR
702
703 if (vect_print_dump_info (REPORT_COST))
704 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
705 "outside_cost = %d .", inside_cost, outside_cost);
706
707 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
708 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
709 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
710}
711
712
720f5239
IR
713/* Calculate cost of DR's memory access. */
714void
715vect_get_store_cost (struct data_reference *dr, int ncopies,
716 unsigned int *inside_cost)
717{
718 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
719
720 switch (alignment_support_scheme)
721 {
722 case dr_aligned:
723 {
724 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
725
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_store_cost: aligned.");
728
729 break;
730 }
731
732 case dr_unaligned_supported:
733 {
734 gimple stmt = DR_STMT (dr);
735 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
736 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
737
738 /* Here, we assign an additional cost for the unaligned store. */
739 *inside_cost += ncopies
740 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
741 vectype, DR_MISALIGNMENT (dr));
742
743 if (vect_print_dump_info (REPORT_COST))
744 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
745 "hardware.");
746
747 break;
748 }
749
750 default:
751 gcc_unreachable ();
752 }
753}
754
755
ebfd146a
IR
756/* Function vect_model_load_cost
757
758 Models cost for loads. In the case of strided accesses, the last access
759 has the overhead of the strided access attributed to it. Since unaligned
b8698a0f 760 accesses are supported for loads, we also account for the costs of the
ebfd146a
IR
761 access scheme chosen. */
762
763void
764vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
b8698a0f 765
ebfd146a
IR
766{
767 int group_size;
ebfd146a
IR
768 gimple first_stmt;
769 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
720f5239 770 unsigned int inside_cost = 0, outside_cost = 0;
ebfd146a
IR
771
772 /* The SLP costs were already calculated during SLP tree build. */
773 if (PURE_SLP_STMT (stmt_info))
774 return;
775
776 /* Strided accesses? */
777 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
778 if (first_stmt && !slp_node)
779 {
780 group_size = vect_cost_strided_group_size (stmt_info);
781 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
782 }
783 /* Not a strided access. */
784 else
785 {
786 group_size = 1;
787 first_dr = dr;
788 }
789
b8698a0f 790 /* Is this an access in a group of loads providing strided access?
ebfd146a 791 If so, add in the cost of the permutes. */
b8698a0f 792 if (group_size > 1)
ebfd146a
IR
793 {
794 /* Uses an even and odd extract operations for each needed permute. */
795 inside_cost = ncopies * exact_log2(group_size) * group_size
720f5239 796 * vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
797
798 if (vect_print_dump_info (REPORT_COST))
799 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
800 group_size);
ebfd146a
IR
801 }
802
803 /* The loads themselves. */
720f5239
IR
804 vect_get_load_cost (first_dr, ncopies,
805 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
806 &inside_cost, &outside_cost);
807
808 if (vect_print_dump_info (REPORT_COST))
809 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
810 "outside_cost = %d .", inside_cost, outside_cost);
811
812 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
813 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
814 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
815}
816
817
818/* Calculate cost of DR's memory access. */
819void
820vect_get_load_cost (struct data_reference *dr, int ncopies,
821 bool add_realign_cost, unsigned int *inside_cost,
822 unsigned int *outside_cost)
823{
824 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
825
826 switch (alignment_support_scheme)
ebfd146a
IR
827 {
828 case dr_aligned:
829 {
9940b13c 830 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
ebfd146a
IR
831
832 if (vect_print_dump_info (REPORT_COST))
833 fprintf (vect_dump, "vect_model_load_cost: aligned.");
834
835 break;
836 }
837 case dr_unaligned_supported:
838 {
720f5239
IR
839 gimple stmt = DR_STMT (dr);
840 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
841 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 842
720f5239
IR
843 /* Here, we assign an additional cost for the unaligned load. */
844 *inside_cost += ncopies
845 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
846 vectype, DR_MISALIGNMENT (dr));
ebfd146a
IR
847 if (vect_print_dump_info (REPORT_COST))
848 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
849 "hardware.");
850
851 break;
852 }
853 case dr_explicit_realign:
854 {
720f5239
IR
855 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
856 + vect_get_stmt_cost (vector_stmt));
ebfd146a
IR
857
858 /* FIXME: If the misalignment remains fixed across the iterations of
859 the containing loop, the following cost should be added to the
860 outside costs. */
861 if (targetm.vectorize.builtin_mask_for_load)
720f5239 862 *inside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
863
864 break;
865 }
866 case dr_explicit_realign_optimized:
867 {
868 if (vect_print_dump_info (REPORT_COST))
869 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
870 "pipelined.");
871
872 /* Unaligned software pipeline has a load of an address, an initial
873 load, and possibly a mask operation to "prime" the loop. However,
874 if this is an access in a group of loads, which provide strided
875 access, then the above cost should only be considered for one
876 access in the group. Inside the loop, there is a load op
877 and a realignment op. */
878
720f5239 879 if (add_realign_cost)
ebfd146a 880 {
720f5239 881 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
ebfd146a 882 if (targetm.vectorize.builtin_mask_for_load)
720f5239 883 *outside_cost += vect_get_stmt_cost (vector_stmt);
ebfd146a
IR
884 }
885
720f5239
IR
886 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
887 + vect_get_stmt_cost (vector_stmt));
ebfd146a
IR
888 break;
889 }
890
891 default:
892 gcc_unreachable ();
893 }
ebfd146a
IR
894}
895
896
897/* Function vect_init_vector.
898
899 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
900 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
901 is not NULL. Otherwise, place the initialization at the loop preheader.
b8698a0f 902 Return the DEF of INIT_STMT.
ebfd146a
IR
903 It will be used in the vectorization of STMT. */
904
905tree
906vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
907 gimple_stmt_iterator *gsi)
908{
909 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
910 tree new_var;
911 gimple init_stmt;
912 tree vec_oprnd;
913 edge pe;
914 tree new_temp;
915 basic_block new_bb;
b8698a0f 916
ebfd146a 917 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
b8698a0f 918 add_referenced_var (new_var);
ebfd146a
IR
919 init_stmt = gimple_build_assign (new_var, vector_var);
920 new_temp = make_ssa_name (new_var, init_stmt);
921 gimple_assign_set_lhs (init_stmt, new_temp);
922
923 if (gsi)
924 vect_finish_stmt_generation (stmt, init_stmt, gsi);
925 else
926 {
927 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
b8698a0f 928
a70d6342
IR
929 if (loop_vinfo)
930 {
931 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
932
933 if (nested_in_vect_loop_p (loop, stmt))
934 loop = loop->inner;
b8698a0f 935
a70d6342
IR
936 pe = loop_preheader_edge (loop);
937 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
938 gcc_assert (!new_bb);
939 }
940 else
941 {
942 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
943 basic_block bb;
944 gimple_stmt_iterator gsi_bb_start;
945
946 gcc_assert (bb_vinfo);
947 bb = BB_VINFO_BB (bb_vinfo);
12aaf609 948 gsi_bb_start = gsi_after_labels (bb);
a70d6342
IR
949 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
950 }
ebfd146a
IR
951 }
952
953 if (vect_print_dump_info (REPORT_DETAILS))
954 {
955 fprintf (vect_dump, "created new init_stmt: ");
956 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
957 }
958
959 vec_oprnd = gimple_assign_lhs (init_stmt);
960 return vec_oprnd;
961}
962
a70d6342 963
ebfd146a
IR
964/* Function vect_get_vec_def_for_operand.
965
966 OP is an operand in STMT. This function returns a (vector) def that will be
967 used in the vectorized stmt for STMT.
968
969 In the case that OP is an SSA_NAME which is defined in the loop, then
970 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
971
972 In case OP is an invariant or constant, a new stmt that creates a vector def
973 needs to be introduced. */
974
975tree
976vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
977{
978 tree vec_oprnd;
979 gimple vec_stmt;
980 gimple def_stmt;
981 stmt_vec_info def_stmt_info = NULL;
982 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
983 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
984 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
985 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
986 tree vec_inv;
987 tree vec_cst;
988 tree t = NULL_TREE;
989 tree def;
990 int i;
991 enum vect_def_type dt;
992 bool is_simple_use;
993 tree vector_type;
994
995 if (vect_print_dump_info (REPORT_DETAILS))
996 {
997 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
998 print_generic_expr (vect_dump, op, TDF_SLIM);
999 }
1000
b8698a0f 1001 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
a70d6342 1002 &dt);
ebfd146a
IR
1003 gcc_assert (is_simple_use);
1004 if (vect_print_dump_info (REPORT_DETAILS))
1005 {
1006 if (def)
1007 {
1008 fprintf (vect_dump, "def = ");
1009 print_generic_expr (vect_dump, def, TDF_SLIM);
1010 }
1011 if (def_stmt)
1012 {
1013 fprintf (vect_dump, " def_stmt = ");
1014 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1015 }
1016 }
1017
1018 switch (dt)
1019 {
1020 /* Case 1: operand is a constant. */
1021 case vect_constant_def:
1022 {
7569a6cc
RG
1023 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1024 gcc_assert (vector_type);
1025
b8698a0f 1026 if (scalar_def)
ebfd146a
IR
1027 *scalar_def = op;
1028
1029 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1030 if (vect_print_dump_info (REPORT_DETAILS))
1031 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1032
1033 for (i = nunits - 1; i >= 0; --i)
1034 {
1035 t = tree_cons (NULL_TREE, op, t);
1036 }
7569a6cc
RG
1037 vec_cst = build_vector (vector_type, t);
1038 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
ebfd146a
IR
1039 }
1040
1041 /* Case 2: operand is defined outside the loop - loop invariant. */
8644a673 1042 case vect_external_def:
ebfd146a
IR
1043 {
1044 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1045 gcc_assert (vector_type);
1046 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1047
b8698a0f 1048 if (scalar_def)
ebfd146a
IR
1049 *scalar_def = def;
1050
1051 /* Create 'vec_inv = {inv,inv,..,inv}' */
1052 if (vect_print_dump_info (REPORT_DETAILS))
1053 fprintf (vect_dump, "Create vector_inv.");
1054
1055 for (i = nunits - 1; i >= 0; --i)
1056 {
1057 t = tree_cons (NULL_TREE, def, t);
1058 }
1059
1060 /* FIXME: use build_constructor directly. */
1061 vec_inv = build_constructor_from_list (vector_type, t);
1062 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1063 }
1064
1065 /* Case 3: operand is defined inside the loop. */
8644a673 1066 case vect_internal_def:
ebfd146a 1067 {
b8698a0f 1068 if (scalar_def)
ebfd146a
IR
1069 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1070
1071 /* Get the def from the vectorized stmt. */
1072 def_stmt_info = vinfo_for_stmt (def_stmt);
1073 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1074 gcc_assert (vec_stmt);
1075 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1076 vec_oprnd = PHI_RESULT (vec_stmt);
1077 else if (is_gimple_call (vec_stmt))
1078 vec_oprnd = gimple_call_lhs (vec_stmt);
1079 else
1080 vec_oprnd = gimple_assign_lhs (vec_stmt);
1081 return vec_oprnd;
1082 }
1083
1084 /* Case 4: operand is defined by a loop header phi - reduction */
1085 case vect_reduction_def:
06066f92 1086 case vect_double_reduction_def:
7c5222ff 1087 case vect_nested_cycle:
ebfd146a
IR
1088 {
1089 struct loop *loop;
1090
1091 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
b8698a0f 1092 loop = (gimple_bb (def_stmt))->loop_father;
ebfd146a
IR
1093
1094 /* Get the def before the loop */
1095 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1096 return get_initial_def_for_reduction (stmt, op, scalar_def);
1097 }
1098
1099 /* Case 5: operand is defined by loop-header phi - induction. */
1100 case vect_induction_def:
1101 {
1102 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1103
1104 /* Get the def from the vectorized stmt. */
1105 def_stmt_info = vinfo_for_stmt (def_stmt);
1106 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1107 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1108 vec_oprnd = PHI_RESULT (vec_stmt);
1109 return vec_oprnd;
1110 }
1111
1112 default:
1113 gcc_unreachable ();
1114 }
1115}
1116
1117
1118/* Function vect_get_vec_def_for_stmt_copy
1119
b8698a0f
L
1120 Return a vector-def for an operand. This function is used when the
1121 vectorized stmt to be created (by the caller to this function) is a "copy"
1122 created in case the vectorized result cannot fit in one vector, and several
1123 copies of the vector-stmt are required. In this case the vector-def is
ebfd146a 1124 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
b8698a0f 1125 of the stmt that defines VEC_OPRND.
ebfd146a
IR
1126 DT is the type of the vector def VEC_OPRND.
1127
1128 Context:
1129 In case the vectorization factor (VF) is bigger than the number
1130 of elements that can fit in a vectype (nunits), we have to generate
1131 more than one vector stmt to vectorize the scalar stmt. This situation
b8698a0f 1132 arises when there are multiple data-types operated upon in the loop; the
ebfd146a
IR
1133 smallest data-type determines the VF, and as a result, when vectorizing
1134 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1135 vector stmt (each computing a vector of 'nunits' results, and together
b8698a0f 1136 computing 'VF' results in each iteration). This function is called when
ebfd146a
IR
1137 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1138 which VF=16 and nunits=4, so the number of copies required is 4):
1139
1140 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
b8698a0f 1141
ebfd146a
IR
1142 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1143 VS1.1: vx.1 = memref1 VS1.2
1144 VS1.2: vx.2 = memref2 VS1.3
b8698a0f 1145 VS1.3: vx.3 = memref3
ebfd146a
IR
1146
1147 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1148 VSnew.1: vz1 = vx.1 + ... VSnew.2
1149 VSnew.2: vz2 = vx.2 + ... VSnew.3
1150 VSnew.3: vz3 = vx.3 + ...
1151
1152 The vectorization of S1 is explained in vectorizable_load.
1153 The vectorization of S2:
b8698a0f
L
1154 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1155 the function 'vect_get_vec_def_for_operand' is called to
ebfd146a
IR
1156 get the relevant vector-def for each operand of S2. For operand x it
1157 returns the vector-def 'vx.0'.
1158
b8698a0f
L
1159 To create the remaining copies of the vector-stmt (VSnew.j), this
1160 function is called to get the relevant vector-def for each operand. It is
1161 obtained from the respective VS1.j stmt, which is recorded in the
ebfd146a
IR
1162 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1163
b8698a0f
L
1164 For example, to obtain the vector-def 'vx.1' in order to create the
1165 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1166 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
ebfd146a
IR
1167 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1168 and return its def ('vx.1').
1169 Overall, to create the above sequence this function will be called 3 times:
1170 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1171 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1172 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1173
1174tree
1175vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1176{
1177 gimple vec_stmt_for_operand;
1178 stmt_vec_info def_stmt_info;
1179
1180 /* Do nothing; can reuse same def. */
8644a673 1181 if (dt == vect_external_def || dt == vect_constant_def )
ebfd146a
IR
1182 return vec_oprnd;
1183
1184 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1185 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1186 gcc_assert (def_stmt_info);
1187 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1188 gcc_assert (vec_stmt_for_operand);
1189 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1190 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1191 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1192 else
1193 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1194 return vec_oprnd;
1195}
1196
1197
1198/* Get vectorized definitions for the operands to create a copy of an original
1199 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1200
1201static void
b8698a0f
L
1202vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1203 VEC(tree,heap) **vec_oprnds0,
ebfd146a
IR
1204 VEC(tree,heap) **vec_oprnds1)
1205{
1206 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1207
1208 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1209 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1210
1211 if (vec_oprnds1 && *vec_oprnds1)
1212 {
1213 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1214 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1215 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1216 }
1217}
1218
1219
1220/* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1221
1222static void
1223vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1224 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1225 slp_tree slp_node)
1226{
1227 if (slp_node)
b5aeb3bb 1228 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
ebfd146a
IR
1229 else
1230 {
1231 tree vec_oprnd;
1232
b8698a0f
L
1233 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1234 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
ebfd146a
IR
1235 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1236
1237 if (op1)
1238 {
b8698a0f
L
1239 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1240 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
ebfd146a
IR
1241 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1242 }
1243 }
1244}
1245
1246
1247/* Function vect_finish_stmt_generation.
1248
1249 Insert a new stmt. */
1250
1251void
1252vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1253 gimple_stmt_iterator *gsi)
1254{
1255 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1256 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 1257 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
ebfd146a
IR
1258
1259 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1260
1261 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1262
b8698a0f 1263 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
a70d6342 1264 bb_vinfo));
ebfd146a
IR
1265
1266 if (vect_print_dump_info (REPORT_DETAILS))
1267 {
1268 fprintf (vect_dump, "add new stmt: ");
1269 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1270 }
1271
1272 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1273}
1274
1275/* Checks if CALL can be vectorized in type VECTYPE. Returns
1276 a function declaration if the target has a vectorized version
1277 of the function, or NULL_TREE if the function cannot be vectorized. */
1278
1279tree
1280vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1281{
1282 tree fndecl = gimple_call_fndecl (call);
ebfd146a
IR
1283
1284 /* We only handle functions that do not read or clobber memory -- i.e.
1285 const or novops ones. */
1286 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1287 return NULL_TREE;
1288
1289 if (!fndecl
1290 || TREE_CODE (fndecl) != FUNCTION_DECL
1291 || !DECL_BUILT_IN (fndecl))
1292 return NULL_TREE;
1293
62f7fd21 1294 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
ebfd146a
IR
1295 vectype_in);
1296}
1297
1298/* Function vectorizable_call.
1299
b8698a0f
L
1300 Check if STMT performs a function call that can be vectorized.
1301 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1302 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1303 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1304
1305static bool
1306vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1307{
1308 tree vec_dest;
1309 tree scalar_dest;
1310 tree op, type;
1311 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1312 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1313 tree vectype_out, vectype_in;
1314 int nunits_in;
1315 int nunits_out;
1316 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
b690cc0f 1317 tree fndecl, new_temp, def, rhs_type;
ebfd146a
IR
1318 gimple def_stmt;
1319 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
63827fb8 1320 gimple new_stmt = NULL;
ebfd146a
IR
1321 int ncopies, j;
1322 VEC(tree, heap) *vargs = NULL;
1323 enum { NARROW, NONE, WIDEN } modifier;
1324 size_t i, nargs;
1325
a70d6342
IR
1326 /* FORNOW: unsupported in basic block SLP. */
1327 gcc_assert (loop_vinfo);
b8698a0f 1328
ebfd146a
IR
1329 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1330 return false;
1331
8644a673 1332 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1333 return false;
1334
1335 /* FORNOW: SLP not supported. */
1336 if (STMT_SLP_TYPE (stmt_info))
1337 return false;
1338
1339 /* Is STMT a vectorizable call? */
1340 if (!is_gimple_call (stmt))
1341 return false;
1342
1343 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1344 return false;
1345
b690cc0f
RG
1346 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1347
ebfd146a
IR
1348 /* Process function arguments. */
1349 rhs_type = NULL_TREE;
b690cc0f 1350 vectype_in = NULL_TREE;
ebfd146a
IR
1351 nargs = gimple_call_num_args (stmt);
1352
1353 /* Bail out if the function has more than two arguments, we
1354 do not have interesting builtin functions to vectorize with
1355 more than two arguments. No arguments is also not good. */
1356 if (nargs == 0 || nargs > 2)
1357 return false;
1358
1359 for (i = 0; i < nargs; i++)
1360 {
b690cc0f
RG
1361 tree opvectype;
1362
ebfd146a
IR
1363 op = gimple_call_arg (stmt, i);
1364
1365 /* We can only handle calls with arguments of the same type. */
1366 if (rhs_type
8533c9d8 1367 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
ebfd146a
IR
1368 {
1369 if (vect_print_dump_info (REPORT_DETAILS))
1370 fprintf (vect_dump, "argument types differ.");
1371 return false;
1372 }
b690cc0f
RG
1373 if (!rhs_type)
1374 rhs_type = TREE_TYPE (op);
ebfd146a 1375
b690cc0f
RG
1376 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1377 &def_stmt, &def, &dt[i], &opvectype))
ebfd146a
IR
1378 {
1379 if (vect_print_dump_info (REPORT_DETAILS))
1380 fprintf (vect_dump, "use not simple.");
1381 return false;
1382 }
ebfd146a 1383
b690cc0f
RG
1384 if (!vectype_in)
1385 vectype_in = opvectype;
1386 else if (opvectype
1387 && opvectype != vectype_in)
1388 {
1389 if (vect_print_dump_info (REPORT_DETAILS))
1390 fprintf (vect_dump, "argument vector types differ.");
1391 return false;
1392 }
1393 }
1394 /* If all arguments are external or constant defs use a vector type with
1395 the same size as the output vector type. */
ebfd146a 1396 if (!vectype_in)
b690cc0f 1397 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1398 if (vec_stmt)
1399 gcc_assert (vectype_in);
1400 if (!vectype_in)
1401 {
1402 if (vect_print_dump_info (REPORT_DETAILS))
1403 {
1404 fprintf (vect_dump, "no vectype for scalar type ");
1405 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1406 }
1407
1408 return false;
1409 }
ebfd146a
IR
1410
1411 /* FORNOW */
b690cc0f
RG
1412 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1413 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1414 if (nunits_in == nunits_out / 2)
1415 modifier = NARROW;
1416 else if (nunits_out == nunits_in)
1417 modifier = NONE;
1418 else if (nunits_out == nunits_in / 2)
1419 modifier = WIDEN;
1420 else
1421 return false;
1422
1423 /* For now, we only vectorize functions if a target specific builtin
1424 is available. TODO -- in some cases, it might be profitable to
1425 insert the calls for pieces of the vector, in order to be able
1426 to vectorize other operations in the loop. */
1427 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1428 if (fndecl == NULL_TREE)
1429 {
1430 if (vect_print_dump_info (REPORT_DETAILS))
1431 fprintf (vect_dump, "function is not vectorizable.");
1432
1433 return false;
1434 }
1435
5006671f 1436 gcc_assert (!gimple_vuse (stmt));
ebfd146a
IR
1437
1438 if (modifier == NARROW)
1439 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1440 else
1441 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1442
1443 /* Sanity check: make sure that at least one copy of the vectorized stmt
1444 needs to be generated. */
1445 gcc_assert (ncopies >= 1);
1446
1447 if (!vec_stmt) /* transformation not required. */
1448 {
1449 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1450 if (vect_print_dump_info (REPORT_DETAILS))
1451 fprintf (vect_dump, "=== vectorizable_call ===");
1452 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1453 return true;
1454 }
1455
1456 /** Transform. **/
1457
1458 if (vect_print_dump_info (REPORT_DETAILS))
1459 fprintf (vect_dump, "transform operation.");
1460
1461 /* Handle def. */
1462 scalar_dest = gimple_call_lhs (stmt);
1463 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1464
1465 prev_stmt_info = NULL;
1466 switch (modifier)
1467 {
1468 case NONE:
1469 for (j = 0; j < ncopies; ++j)
1470 {
1471 /* Build argument list for the vectorized call. */
1472 if (j == 0)
1473 vargs = VEC_alloc (tree, heap, nargs);
1474 else
1475 VEC_truncate (tree, vargs, 0);
1476
1477 for (i = 0; i < nargs; i++)
1478 {
1479 op = gimple_call_arg (stmt, i);
1480 if (j == 0)
1481 vec_oprnd0
1482 = vect_get_vec_def_for_operand (op, stmt, NULL);
1483 else
63827fb8
IR
1484 {
1485 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1486 vec_oprnd0
1487 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1488 }
ebfd146a
IR
1489
1490 VEC_quick_push (tree, vargs, vec_oprnd0);
1491 }
1492
1493 new_stmt = gimple_build_call_vec (fndecl, vargs);
1494 new_temp = make_ssa_name (vec_dest, new_stmt);
1495 gimple_call_set_lhs (new_stmt, new_temp);
1496
1497 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1498 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1499
1500 if (j == 0)
1501 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1502 else
1503 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1504
1505 prev_stmt_info = vinfo_for_stmt (new_stmt);
1506 }
1507
1508 break;
1509
1510 case NARROW:
1511 for (j = 0; j < ncopies; ++j)
1512 {
1513 /* Build argument list for the vectorized call. */
1514 if (j == 0)
1515 vargs = VEC_alloc (tree, heap, nargs * 2);
1516 else
1517 VEC_truncate (tree, vargs, 0);
1518
1519 for (i = 0; i < nargs; i++)
1520 {
1521 op = gimple_call_arg (stmt, i);
1522 if (j == 0)
1523 {
1524 vec_oprnd0
1525 = vect_get_vec_def_for_operand (op, stmt, NULL);
1526 vec_oprnd1
63827fb8 1527 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1528 }
1529 else
1530 {
63827fb8 1531 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
ebfd146a 1532 vec_oprnd0
63827fb8 1533 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
ebfd146a 1534 vec_oprnd1
63827fb8 1535 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
ebfd146a
IR
1536 }
1537
1538 VEC_quick_push (tree, vargs, vec_oprnd0);
1539 VEC_quick_push (tree, vargs, vec_oprnd1);
1540 }
1541
1542 new_stmt = gimple_build_call_vec (fndecl, vargs);
1543 new_temp = make_ssa_name (vec_dest, new_stmt);
1544 gimple_call_set_lhs (new_stmt, new_temp);
1545
1546 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7411b8f0 1547 mark_symbols_for_renaming (new_stmt);
ebfd146a
IR
1548
1549 if (j == 0)
1550 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1551 else
1552 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1553
1554 prev_stmt_info = vinfo_for_stmt (new_stmt);
1555 }
1556
1557 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1558
1559 break;
1560
1561 case WIDEN:
1562 /* No current target implements this case. */
1563 return false;
1564 }
1565
1566 VEC_free (tree, heap, vargs);
1567
1568 /* Update the exception handling table with the vector stmt if necessary. */
1569 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1570 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1571
1572 /* The call in STMT might prevent it from being removed in dce.
1573 We however cannot remove it here, due to the way the ssa name
1574 it defines is mapped to the new definition. So just replace
1575 rhs of the statement with something harmless. */
1576
1577 type = TREE_TYPE (scalar_dest);
1578 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1579 fold_convert (type, integer_zero_node));
1580 set_vinfo_for_stmt (new_stmt, stmt_info);
1581 set_vinfo_for_stmt (stmt, NULL);
1582 STMT_VINFO_STMT (stmt_info) = new_stmt;
1583 gsi_replace (gsi, new_stmt, false);
1584 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1585
1586 return true;
1587}
1588
1589
1590/* Function vect_gen_widened_results_half
1591
1592 Create a vector stmt whose code, type, number of arguments, and result
b8698a0f 1593 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
ebfd146a
IR
1594 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1595 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1596 needs to be created (DECL is a function-decl of a target-builtin).
1597 STMT is the original scalar stmt that we are vectorizing. */
1598
1599static gimple
1600vect_gen_widened_results_half (enum tree_code code,
1601 tree decl,
1602 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1603 tree vec_dest, gimple_stmt_iterator *gsi,
1604 gimple stmt)
b8698a0f 1605{
ebfd146a 1606 gimple new_stmt;
b8698a0f
L
1607 tree new_temp;
1608
1609 /* Generate half of the widened result: */
1610 if (code == CALL_EXPR)
1611 {
1612 /* Target specific support */
ebfd146a
IR
1613 if (op_type == binary_op)
1614 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1615 else
1616 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1617 new_temp = make_ssa_name (vec_dest, new_stmt);
1618 gimple_call_set_lhs (new_stmt, new_temp);
b8698a0f
L
1619 }
1620 else
ebfd146a 1621 {
b8698a0f
L
1622 /* Generic support */
1623 gcc_assert (op_type == TREE_CODE_LENGTH (code));
ebfd146a
IR
1624 if (op_type != binary_op)
1625 vec_oprnd1 = NULL;
1626 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1627 vec_oprnd1);
1628 new_temp = make_ssa_name (vec_dest, new_stmt);
1629 gimple_assign_set_lhs (new_stmt, new_temp);
b8698a0f 1630 }
ebfd146a
IR
1631 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1632
ebfd146a
IR
1633 return new_stmt;
1634}
1635
1636
b8698a0f
L
1637/* Check if STMT performs a conversion operation, that can be vectorized.
1638 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1639 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1640 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1641
1642static bool
1643vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1644 gimple *vec_stmt, slp_tree slp_node)
1645{
1646 tree vec_dest;
1647 tree scalar_dest;
1648 tree op0;
1649 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1650 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1651 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1652 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1653 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1654 tree new_temp;
1655 tree def;
1656 gimple def_stmt;
1657 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1658 gimple new_stmt = NULL;
1659 stmt_vec_info prev_stmt_info;
1660 int nunits_in;
1661 int nunits_out;
1662 tree vectype_out, vectype_in;
1663 int ncopies, j;
b690cc0f 1664 tree rhs_type;
ebfd146a
IR
1665 tree builtin_decl;
1666 enum { NARROW, NONE, WIDEN } modifier;
1667 int i;
1668 VEC(tree,heap) *vec_oprnds0 = NULL;
1669 tree vop0;
ebfd146a
IR
1670 VEC(tree,heap) *dummy = NULL;
1671 int dummy_int;
1672
1673 /* Is STMT a vectorizable conversion? */
1674
a70d6342
IR
1675 /* FORNOW: unsupported in basic block SLP. */
1676 gcc_assert (loop_vinfo);
b8698a0f 1677
ebfd146a
IR
1678 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1679 return false;
1680
8644a673 1681 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1682 return false;
1683
1684 if (!is_gimple_assign (stmt))
1685 return false;
1686
1687 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1688 return false;
1689
1690 code = gimple_assign_rhs_code (stmt);
1691 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1692 return false;
1693
1694 /* Check types of lhs and rhs. */
b690cc0f
RG
1695 scalar_dest = gimple_assign_lhs (stmt);
1696 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1697
ebfd146a
IR
1698 op0 = gimple_assign_rhs1 (stmt);
1699 rhs_type = TREE_TYPE (op0);
b690cc0f
RG
1700 /* Check the operands of the operation. */
1701 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1702 &def_stmt, &def, &dt[0], &vectype_in))
1703 {
1704 if (vect_print_dump_info (REPORT_DETAILS))
1705 fprintf (vect_dump, "use not simple.");
1706 return false;
1707 }
1708 /* If op0 is an external or constant defs use a vector type of
1709 the same size as the output vector type. */
ebfd146a 1710 if (!vectype_in)
b690cc0f 1711 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
7d8930a0
IR
1712 if (vec_stmt)
1713 gcc_assert (vectype_in);
1714 if (!vectype_in)
1715 {
1716 if (vect_print_dump_info (REPORT_DETAILS))
1717 {
1718 fprintf (vect_dump, "no vectype for scalar type ");
1719 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1720 }
1721
1722 return false;
1723 }
ebfd146a
IR
1724
1725 /* FORNOW */
b690cc0f
RG
1726 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1727 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
ebfd146a
IR
1728 if (nunits_in == nunits_out / 2)
1729 modifier = NARROW;
1730 else if (nunits_out == nunits_in)
1731 modifier = NONE;
1732 else if (nunits_out == nunits_in / 2)
1733 modifier = WIDEN;
1734 else
1735 return false;
1736
ebfd146a
IR
1737 if (modifier == NARROW)
1738 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1739 else
1740 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1741
1742 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1743 this, so we can safely override NCOPIES with 1 here. */
1744 if (slp_node)
1745 ncopies = 1;
b8698a0f 1746
ebfd146a
IR
1747 /* Sanity check: make sure that at least one copy of the vectorized stmt
1748 needs to be generated. */
1749 gcc_assert (ncopies >= 1);
1750
ebfd146a
IR
1751 /* Supportable by target? */
1752 if ((modifier == NONE
88dd7150 1753 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
ebfd146a 1754 || (modifier == WIDEN
b690cc0f
RG
1755 && !supportable_widening_operation (code, stmt,
1756 vectype_out, vectype_in,
ebfd146a
IR
1757 &decl1, &decl2,
1758 &code1, &code2,
1759 &dummy_int, &dummy))
1760 || (modifier == NARROW
b690cc0f 1761 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
ebfd146a
IR
1762 &code1, &dummy_int, &dummy)))
1763 {
1764 if (vect_print_dump_info (REPORT_DETAILS))
1765 fprintf (vect_dump, "conversion not supported by target.");
1766 return false;
1767 }
1768
1769 if (modifier != NONE)
1770 {
ebfd146a
IR
1771 /* FORNOW: SLP not supported. */
1772 if (STMT_SLP_TYPE (stmt_info))
b8698a0f 1773 return false;
ebfd146a
IR
1774 }
1775
1776 if (!vec_stmt) /* transformation not required. */
1777 {
1778 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1779 return true;
1780 }
1781
1782 /** Transform. **/
1783 if (vect_print_dump_info (REPORT_DETAILS))
1784 fprintf (vect_dump, "transform conversion.");
1785
1786 /* Handle def. */
1787 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1788
1789 if (modifier == NONE && !slp_node)
1790 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1791
1792 prev_stmt_info = NULL;
1793 switch (modifier)
1794 {
1795 case NONE:
1796 for (j = 0; j < ncopies; j++)
1797 {
ebfd146a 1798 if (j == 0)
b8698a0f 1799 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
ebfd146a
IR
1800 else
1801 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1802
1803 builtin_decl =
88dd7150
RG
1804 targetm.vectorize.builtin_conversion (code,
1805 vectype_out, vectype_in);
ac47786e 1806 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
b8698a0f 1807 {
ebfd146a
IR
1808 /* Arguments are ready. create the new vector stmt. */
1809 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1810 new_temp = make_ssa_name (vec_dest, new_stmt);
1811 gimple_call_set_lhs (new_stmt, new_temp);
1812 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
1813 if (slp_node)
1814 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1815 }
1816
1817 if (j == 0)
1818 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1819 else
1820 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1821 prev_stmt_info = vinfo_for_stmt (new_stmt);
1822 }
1823 break;
1824
1825 case WIDEN:
1826 /* In case the vectorization factor (VF) is bigger than the number
1827 of elements that we can fit in a vectype (nunits), we have to
1828 generate more than one vector stmt - i.e - we need to "unroll"
1829 the vector stmt by a factor VF/nunits. */
1830 for (j = 0; j < ncopies; j++)
1831 {
1832 if (j == 0)
1833 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1834 else
1835 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1836
ebfd146a
IR
1837 /* Generate first half of the widened result: */
1838 new_stmt
b8698a0f 1839 = vect_gen_widened_results_half (code1, decl1,
ebfd146a
IR
1840 vec_oprnd0, vec_oprnd1,
1841 unary_op, vec_dest, gsi, stmt);
1842 if (j == 0)
1843 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1844 else
1845 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1846 prev_stmt_info = vinfo_for_stmt (new_stmt);
1847
1848 /* Generate second half of the widened result: */
1849 new_stmt
1850 = vect_gen_widened_results_half (code2, decl2,
1851 vec_oprnd0, vec_oprnd1,
1852 unary_op, vec_dest, gsi, stmt);
1853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1854 prev_stmt_info = vinfo_for_stmt (new_stmt);
1855 }
1856 break;
1857
1858 case NARROW:
1859 /* In case the vectorization factor (VF) is bigger than the number
1860 of elements that we can fit in a vectype (nunits), we have to
1861 generate more than one vector stmt - i.e - we need to "unroll"
1862 the vector stmt by a factor VF/nunits. */
1863 for (j = 0; j < ncopies; j++)
1864 {
1865 /* Handle uses. */
1866 if (j == 0)
1867 {
1868 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1869 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1870 }
1871 else
1872 {
1873 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1874 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1875 }
1876
1877 /* Arguments are ready. Create the new vector stmt. */
ebfd146a
IR
1878 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1879 vec_oprnd1);
1880 new_temp = make_ssa_name (vec_dest, new_stmt);
1881 gimple_assign_set_lhs (new_stmt, new_temp);
1882 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1883
1884 if (j == 0)
1885 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1886 else
1887 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1888
1889 prev_stmt_info = vinfo_for_stmt (new_stmt);
1890 }
1891
1892 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1893 }
1894
1895 if (vec_oprnds0)
b8698a0f 1896 VEC_free (tree, heap, vec_oprnds0);
ebfd146a
IR
1897
1898 return true;
1899}
1900/* Function vectorizable_assignment.
1901
b8698a0f
L
1902 Check if STMT performs an assignment (copy) that can be vectorized.
1903 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
1904 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1905 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1906
1907static bool
1908vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1909 gimple *vec_stmt, slp_tree slp_node)
1910{
1911 tree vec_dest;
1912 tree scalar_dest;
1913 tree op;
1914 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1915 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1916 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1917 tree new_temp;
1918 tree def;
1919 gimple def_stmt;
1920 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
fde9c428 1921 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
ebfd146a 1922 int ncopies;
f18b55bd 1923 int i, j;
ebfd146a
IR
1924 VEC(tree,heap) *vec_oprnds = NULL;
1925 tree vop;
a70d6342 1926 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
f18b55bd
IR
1927 gimple new_stmt = NULL;
1928 stmt_vec_info prev_stmt_info = NULL;
fde9c428
RG
1929 enum tree_code code;
1930 tree vectype_in;
ebfd146a
IR
1931
1932 /* Multiple types in SLP are handled by creating the appropriate number of
1933 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1934 case of SLP. */
1935 if (slp_node)
1936 ncopies = 1;
1937 else
1938 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1939
1940 gcc_assert (ncopies >= 1);
ebfd146a 1941
a70d6342 1942 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
1943 return false;
1944
8644a673 1945 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
1946 return false;
1947
1948 /* Is vectorizable assignment? */
1949 if (!is_gimple_assign (stmt))
1950 return false;
1951
1952 scalar_dest = gimple_assign_lhs (stmt);
1953 if (TREE_CODE (scalar_dest) != SSA_NAME)
1954 return false;
1955
fde9c428 1956 code = gimple_assign_rhs_code (stmt);
ebfd146a 1957 if (gimple_assign_single_p (stmt)
fde9c428
RG
1958 || code == PAREN_EXPR
1959 || CONVERT_EXPR_CODE_P (code))
ebfd146a
IR
1960 op = gimple_assign_rhs1 (stmt);
1961 else
1962 return false;
1963
fde9c428
RG
1964 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1965 &def_stmt, &def, &dt[0], &vectype_in))
ebfd146a
IR
1966 {
1967 if (vect_print_dump_info (REPORT_DETAILS))
1968 fprintf (vect_dump, "use not simple.");
1969 return false;
1970 }
1971
fde9c428
RG
1972 /* We can handle NOP_EXPR conversions that do not change the number
1973 of elements or the vector size. */
1974 if (CONVERT_EXPR_CODE_P (code)
1975 && (!vectype_in
1976 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1977 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1978 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1979 return false;
1980
ebfd146a
IR
1981 if (!vec_stmt) /* transformation not required. */
1982 {
1983 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1984 if (vect_print_dump_info (REPORT_DETAILS))
1985 fprintf (vect_dump, "=== vectorizable_assignment ===");
1986 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1987 return true;
1988 }
1989
1990 /** Transform. **/
1991 if (vect_print_dump_info (REPORT_DETAILS))
1992 fprintf (vect_dump, "transform assignment.");
1993
1994 /* Handle def. */
1995 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1996
1997 /* Handle use. */
f18b55bd 1998 for (j = 0; j < ncopies; j++)
ebfd146a 1999 {
f18b55bd
IR
2000 /* Handle uses. */
2001 if (j == 0)
2002 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2003 else
2004 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2005
2006 /* Arguments are ready. create the new vector stmt. */
ac47786e 2007 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
f18b55bd 2008 {
fde9c428 2009 if (CONVERT_EXPR_CODE_P (code))
4a73490d 2010 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
f18b55bd
IR
2011 new_stmt = gimple_build_assign (vec_dest, vop);
2012 new_temp = make_ssa_name (vec_dest, new_stmt);
2013 gimple_assign_set_lhs (new_stmt, new_temp);
2014 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2015 if (slp_node)
2016 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2017 }
ebfd146a
IR
2018
2019 if (slp_node)
f18b55bd
IR
2020 continue;
2021
2022 if (j == 0)
2023 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2024 else
2025 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2026
2027 prev_stmt_info = vinfo_for_stmt (new_stmt);
2028 }
b8698a0f
L
2029
2030 VEC_free (tree, heap, vec_oprnds);
ebfd146a
IR
2031 return true;
2032}
2033
2034/* Function vectorizable_operation.
2035
b8698a0f
L
2036 Check if STMT performs a binary or unary operation that can be vectorized.
2037 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
2038 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2039 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2040
2041static bool
2042vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2043 gimple *vec_stmt, slp_tree slp_node)
2044{
2045 tree vec_dest;
2046 tree scalar_dest;
2047 tree op0, op1 = NULL;
2048 tree vec_oprnd1 = NULL_TREE;
2049 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b690cc0f 2050 tree vectype;
ebfd146a
IR
2051 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2052 enum tree_code code;
2053 enum machine_mode vec_mode;
2054 tree new_temp;
2055 int op_type;
2056 optab optab;
2057 int icode;
2058 enum machine_mode optab_op2_mode;
2059 tree def;
2060 gimple def_stmt;
2061 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2062 gimple new_stmt = NULL;
2063 stmt_vec_info prev_stmt_info;
b690cc0f 2064 int nunits_in;
ebfd146a
IR
2065 int nunits_out;
2066 tree vectype_out;
2067 int ncopies;
2068 int j, i;
2069 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2070 tree vop0, vop1;
2071 unsigned int k;
ebfd146a 2072 bool scalar_shift_arg = false;
a70d6342
IR
2073 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2074 int vf;
2075
a70d6342 2076 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
2077 return false;
2078
8644a673 2079 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2080 return false;
2081
2082 /* Is STMT a vectorizable binary/unary operation? */
2083 if (!is_gimple_assign (stmt))
2084 return false;
2085
2086 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2087 return false;
2088
ebfd146a
IR
2089 code = gimple_assign_rhs_code (stmt);
2090
2091 /* For pointer addition, we should use the normal plus for
2092 the vector addition. */
2093 if (code == POINTER_PLUS_EXPR)
2094 code = PLUS_EXPR;
2095
2096 /* Support only unary or binary operations. */
2097 op_type = TREE_CODE_LENGTH (code);
2098 if (op_type != unary_op && op_type != binary_op)
2099 {
2100 if (vect_print_dump_info (REPORT_DETAILS))
2101 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2102 return false;
2103 }
2104
b690cc0f
RG
2105 scalar_dest = gimple_assign_lhs (stmt);
2106 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2107
ebfd146a 2108 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2109 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2110 &def_stmt, &def, &dt[0], &vectype))
ebfd146a
IR
2111 {
2112 if (vect_print_dump_info (REPORT_DETAILS))
2113 fprintf (vect_dump, "use not simple.");
2114 return false;
2115 }
b690cc0f
RG
2116 /* If op0 is an external or constant def use a vector type with
2117 the same size as the output vector type. */
2118 if (!vectype)
2119 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2120 if (vec_stmt)
2121 gcc_assert (vectype);
2122 if (!vectype)
2123 {
2124 if (vect_print_dump_info (REPORT_DETAILS))
2125 {
2126 fprintf (vect_dump, "no vectype for scalar type ");
2127 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2128 }
2129
2130 return false;
2131 }
b690cc0f
RG
2132
2133 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2134 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2135 if (nunits_out != nunits_in)
2136 return false;
ebfd146a
IR
2137
2138 if (op_type == binary_op)
2139 {
2140 op1 = gimple_assign_rhs2 (stmt);
b8698a0f 2141 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 2142 &dt[1]))
ebfd146a
IR
2143 {
2144 if (vect_print_dump_info (REPORT_DETAILS))
2145 fprintf (vect_dump, "use not simple.");
2146 return false;
2147 }
2148 }
2149
b690cc0f
RG
2150 if (loop_vinfo)
2151 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2152 else
2153 vf = 1;
2154
2155 /* Multiple types in SLP are handled by creating the appropriate number of
2156 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2157 case of SLP. */
2158 if (slp_node)
2159 ncopies = 1;
2160 else
2161 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2162
2163 gcc_assert (ncopies >= 1);
2164
ebfd146a
IR
2165 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2166 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2167 shift optabs. */
2168 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2169 || code == RROTATE_EXPR)
2170 {
ebfd146a 2171 /* vector shifted by vector */
8644a673 2172 if (dt[1] == vect_internal_def)
ebfd146a
IR
2173 {
2174 optab = optab_for_tree_code (code, vectype, optab_vector);
2175 if (vect_print_dump_info (REPORT_DETAILS))
2176 fprintf (vect_dump, "vector/vector shift/rotate found.");
2177 }
2178
2179 /* See if the machine has a vector shifted by scalar insn and if not
2180 then see if it has a vector shifted by vector insn */
8644a673 2181 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
ebfd146a
IR
2182 {
2183 optab = optab_for_tree_code (code, vectype, optab_scalar);
2184 if (optab
947131ba 2185 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
ebfd146a
IR
2186 {
2187 scalar_shift_arg = true;
2188 if (vect_print_dump_info (REPORT_DETAILS))
2189 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2190 }
2191 else
2192 {
2193 optab = optab_for_tree_code (code, vectype, optab_vector);
ad6c0864 2194 if (optab
947131ba 2195 && (optab_handler (optab, TYPE_MODE (vectype))
ebfd146a 2196 != CODE_FOR_nothing))
ad6c0864
MM
2197 {
2198 if (vect_print_dump_info (REPORT_DETAILS))
2199 fprintf (vect_dump, "vector/vector shift/rotate found.");
2200
2201 /* Unlike the other binary operators, shifts/rotates have
2202 the rhs being int, instead of the same type as the lhs,
2203 so make sure the scalar is the right type if we are
2204 dealing with vectors of short/char. */
2205 if (dt[1] == vect_constant_def)
2206 op1 = fold_convert (TREE_TYPE (vectype), op1);
2207 }
ebfd146a
IR
2208 }
2209 }
2210
2211 else
2212 {
2213 if (vect_print_dump_info (REPORT_DETAILS))
2214 fprintf (vect_dump, "operand mode requires invariant argument.");
2215 return false;
2216 }
2217 }
2218 else
2219 optab = optab_for_tree_code (code, vectype, optab_default);
2220
2221 /* Supportable by target? */
2222 if (!optab)
2223 {
2224 if (vect_print_dump_info (REPORT_DETAILS))
2225 fprintf (vect_dump, "no optab.");
2226 return false;
2227 }
2228 vec_mode = TYPE_MODE (vectype);
947131ba 2229 icode = (int) optab_handler (optab, vec_mode);
ebfd146a
IR
2230 if (icode == CODE_FOR_nothing)
2231 {
2232 if (vect_print_dump_info (REPORT_DETAILS))
2233 fprintf (vect_dump, "op not supported by target.");
2234 /* Check only during analysis. */
2235 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
a70d6342 2236 || (vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2237 && !vec_stmt))
2238 return false;
2239 if (vect_print_dump_info (REPORT_DETAILS))
2240 fprintf (vect_dump, "proceeding using word mode.");
2241 }
2242
2243 /* Worthwhile without SIMD support? Check only during analysis. */
2244 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
a70d6342 2245 && vf < vect_min_worthwhile_factor (code)
ebfd146a
IR
2246 && !vec_stmt)
2247 {
2248 if (vect_print_dump_info (REPORT_DETAILS))
2249 fprintf (vect_dump, "not worthwhile without SIMD support.");
2250 return false;
2251 }
2252
2253 if (!vec_stmt) /* transformation not required. */
2254 {
2255 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2256 if (vect_print_dump_info (REPORT_DETAILS))
2257 fprintf (vect_dump, "=== vectorizable_operation ===");
2258 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2259 return true;
2260 }
2261
2262 /** Transform. **/
2263
2264 if (vect_print_dump_info (REPORT_DETAILS))
2265 fprintf (vect_dump, "transform binary/unary operation.");
2266
2267 /* Handle def. */
2268 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2269
b8698a0f 2270 /* Allocate VECs for vector operands. In case of SLP, vector operands are
ebfd146a
IR
2271 created in the previous stages of the recursion, so no allocation is
2272 needed, except for the case of shift with scalar shift argument. In that
2273 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2274 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
b8698a0f
L
2275 In case of loop-based vectorization we allocate VECs of size 1. We
2276 allocate VEC_OPRNDS1 only in case of binary operation. */
ebfd146a
IR
2277 if (!slp_node)
2278 {
2279 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2280 if (op_type == binary_op)
2281 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2282 }
2283 else if (scalar_shift_arg)
b8698a0f 2284 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
ebfd146a
IR
2285
2286 /* In case the vectorization factor (VF) is bigger than the number
2287 of elements that we can fit in a vectype (nunits), we have to generate
2288 more than one vector stmt - i.e - we need to "unroll" the
2289 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2290 from one copy of the vector stmt to the next, in the field
2291 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2292 stages to find the correct vector defs to be used when vectorizing
2293 stmts that use the defs of the current stmt. The example below illustrates
2294 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2295 4 vectorized stmts):
2296
2297 before vectorization:
2298 RELATED_STMT VEC_STMT
2299 S1: x = memref - -
2300 S2: z = x + 1 - -
2301
2302 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2303 there):
2304 RELATED_STMT VEC_STMT
2305 VS1_0: vx0 = memref0 VS1_1 -
2306 VS1_1: vx1 = memref1 VS1_2 -
2307 VS1_2: vx2 = memref2 VS1_3 -
2308 VS1_3: vx3 = memref3 - -
2309 S1: x = load - VS1_0
2310 S2: z = x + 1 - -
2311
2312 step2: vectorize stmt S2 (done here):
2313 To vectorize stmt S2 we first need to find the relevant vector
2314 def for the first operand 'x'. This is, as usual, obtained from
2315 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2316 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2317 relevant vector def 'vx0'. Having found 'vx0' we can generate
2318 the vector stmt VS2_0, and as usual, record it in the
2319 STMT_VINFO_VEC_STMT of stmt S2.
2320 When creating the second copy (VS2_1), we obtain the relevant vector
2321 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2322 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2323 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2324 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2325 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2326 chain of stmts and pointers:
2327 RELATED_STMT VEC_STMT
2328 VS1_0: vx0 = memref0 VS1_1 -
2329 VS1_1: vx1 = memref1 VS1_2 -
2330 VS1_2: vx2 = memref2 VS1_3 -
2331 VS1_3: vx3 = memref3 - -
2332 S1: x = load - VS1_0
2333 VS2_0: vz0 = vx0 + v1 VS2_1 -
2334 VS2_1: vz1 = vx1 + v1 VS2_2 -
2335 VS2_2: vz2 = vx2 + v1 VS2_3 -
2336 VS2_3: vz3 = vx3 + v1 - -
2337 S2: z = x + 1 - VS2_0 */
2338
2339 prev_stmt_info = NULL;
2340 for (j = 0; j < ncopies; j++)
2341 {
2342 /* Handle uses. */
2343 if (j == 0)
2344 {
2345 if (op_type == binary_op && scalar_shift_arg)
2346 {
b8698a0f
L
2347 /* Vector shl and shr insn patterns can be defined with scalar
2348 operand 2 (shift operand). In this case, use constant or loop
2349 invariant op1 directly, without extending it to vector mode
ebfd146a
IR
2350 first. */
2351 optab_op2_mode = insn_data[icode].operand[2].mode;
2352 if (!VECTOR_MODE_P (optab_op2_mode))
2353 {
2354 if (vect_print_dump_info (REPORT_DETAILS))
2355 fprintf (vect_dump, "operand 1 using scalar mode.");
2356 vec_oprnd1 = op1;
2357 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2358 if (slp_node)
2359 {
2360 /* Store vec_oprnd1 for every vector stmt to be created
2361 for SLP_NODE. We check during the analysis that all the
b8698a0f
L
2362 shift arguments are the same.
2363 TODO: Allow different constants for different vector
2364 stmts generated for an SLP instance. */
ebfd146a
IR
2365 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2366 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2367 }
2368 }
2369 }
b8698a0f
L
2370
2371 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2372 (a special case for certain kind of vector shifts); otherwise,
ebfd146a
IR
2373 operand 1 should be of a vector type (the usual case). */
2374 if (op_type == binary_op && !vec_oprnd1)
b8698a0f 2375 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
ebfd146a
IR
2376 slp_node);
2377 else
b8698a0f 2378 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
ebfd146a
IR
2379 slp_node);
2380 }
2381 else
2382 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2383
2384 /* Arguments are ready. Create the new vector stmt. */
ac47786e 2385 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
ebfd146a
IR
2386 {
2387 vop1 = ((op_type == binary_op)
2388 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2389 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2390 new_temp = make_ssa_name (vec_dest, new_stmt);
2391 gimple_assign_set_lhs (new_stmt, new_temp);
2392 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2393 if (slp_node)
2394 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2395 }
2396
2397 if (slp_node)
2398 continue;
2399
2400 if (j == 0)
2401 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2402 else
2403 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2404 prev_stmt_info = vinfo_for_stmt (new_stmt);
2405 }
2406
2407 VEC_free (tree, heap, vec_oprnds0);
2408 if (vec_oprnds1)
2409 VEC_free (tree, heap, vec_oprnds1);
2410
2411 return true;
2412}
2413
2414
2415/* Get vectorized definitions for loop-based vectorization. For the first
b8698a0f
L
2416 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2417 scalar operand), and for the rest we get a copy with
ebfd146a
IR
2418 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2419 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2420 The vectors are collected into VEC_OPRNDS. */
2421
2422static void
b8698a0f 2423vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
ebfd146a
IR
2424 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2425{
2426 tree vec_oprnd;
2427
2428 /* Get first vector operand. */
2429 /* All the vector operands except the very first one (that is scalar oprnd)
2430 are stmt copies. */
b8698a0f 2431 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
ebfd146a
IR
2432 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2433 else
2434 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2435
2436 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2437
2438 /* Get second vector operand. */
2439 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2440 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
b8698a0f 2441
ebfd146a
IR
2442 *oprnd = vec_oprnd;
2443
b8698a0f 2444 /* For conversion in multiple steps, continue to get operands
ebfd146a
IR
2445 recursively. */
2446 if (multi_step_cvt)
b8698a0f 2447 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
ebfd146a
IR
2448}
2449
2450
2451/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
b8698a0f 2452 For multi-step conversions store the resulting vectors and call the function
ebfd146a
IR
2453 recursively. */
2454
2455static void
2456vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2457 int multi_step_cvt, gimple stmt,
2458 VEC (tree, heap) *vec_dsts,
2459 gimple_stmt_iterator *gsi,
2460 slp_tree slp_node, enum tree_code code,
2461 stmt_vec_info *prev_stmt_info)
2462{
2463 unsigned int i;
2464 tree vop0, vop1, new_tmp, vec_dest;
2465 gimple new_stmt;
2466 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2467
b8698a0f 2468 vec_dest = VEC_pop (tree, vec_dsts);
ebfd146a
IR
2469
2470 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2471 {
2472 /* Create demotion operation. */
2473 vop0 = VEC_index (tree, *vec_oprnds, i);
2474 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2475 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2476 new_tmp = make_ssa_name (vec_dest, new_stmt);
2477 gimple_assign_set_lhs (new_stmt, new_tmp);
2478 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2479
2480 if (multi_step_cvt)
2481 /* Store the resulting vector for next recursive call. */
b8698a0f 2482 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
ebfd146a
IR
2483 else
2484 {
b8698a0f 2485 /* This is the last step of the conversion sequence. Store the
ebfd146a
IR
2486 vectors in SLP_NODE or in vector info of the scalar statement
2487 (or in STMT_VINFO_RELATED_STMT chain). */
2488 if (slp_node)
2489 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2490 else
2491 {
2492 if (!*prev_stmt_info)
2493 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2494 else
2495 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2496
2497 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2498 }
2499 }
2500 }
2501
2502 /* For multi-step demotion operations we first generate demotion operations
b8698a0f 2503 from the source type to the intermediate types, and then combine the
ebfd146a
IR
2504 results (stored in VEC_OPRNDS) in demotion operation to the destination
2505 type. */
2506 if (multi_step_cvt)
2507 {
2508 /* At each level of recursion we have have of the operands we had at the
2509 previous level. */
2510 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
b8698a0f 2511 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
ebfd146a
IR
2512 stmt, vec_dsts, gsi, slp_node,
2513 code, prev_stmt_info);
2514 }
2515}
2516
2517
2518/* Function vectorizable_type_demotion
2519
2520 Check if STMT performs a binary or unary operation that involves
2521 type demotion, and if it can be vectorized.
2522 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2523 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2524 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2525
2526static bool
2527vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2528 gimple *vec_stmt, slp_tree slp_node)
2529{
2530 tree vec_dest;
2531 tree scalar_dest;
2532 tree op0;
2533 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2534 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2535 enum tree_code code, code1 = ERROR_MARK;
2536 tree def;
2537 gimple def_stmt;
2538 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2539 stmt_vec_info prev_stmt_info;
2540 int nunits_in;
2541 int nunits_out;
2542 tree vectype_out;
2543 int ncopies;
2544 int j, i;
2545 tree vectype_in;
2546 int multi_step_cvt = 0;
2547 VEC (tree, heap) *vec_oprnds0 = NULL;
2548 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2549 tree last_oprnd, intermediate_type;
2550
a70d6342
IR
2551 /* FORNOW: not supported by basic block SLP vectorization. */
2552 gcc_assert (loop_vinfo);
2553
ebfd146a
IR
2554 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2555 return false;
2556
8644a673 2557 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2558 return false;
2559
2560 /* Is STMT a vectorizable type-demotion operation? */
2561 if (!is_gimple_assign (stmt))
2562 return false;
2563
2564 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2565 return false;
2566
2567 code = gimple_assign_rhs_code (stmt);
2568 if (!CONVERT_EXPR_CODE_P (code))
2569 return false;
2570
b690cc0f
RG
2571 scalar_dest = gimple_assign_lhs (stmt);
2572 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2573
2574 /* Check the operands of the operation. */
ebfd146a 2575 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2576 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2577 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2578 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2579 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2580 && CONVERT_EXPR_CODE_P (code))))
2581 return false;
2582 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2583 &def_stmt, &def, &dt[0], &vectype_in))
2584 {
2585 if (vect_print_dump_info (REPORT_DETAILS))
2586 fprintf (vect_dump, "use not simple.");
2587 return false;
2588 }
2589 /* If op0 is an external def use a vector type with the
2590 same size as the output vector type if possible. */
2591 if (!vectype_in)
2592 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2593 if (vec_stmt)
2594 gcc_assert (vectype_in);
ebfd146a 2595 if (!vectype_in)
7d8930a0
IR
2596 {
2597 if (vect_print_dump_info (REPORT_DETAILS))
2598 {
2599 fprintf (vect_dump, "no vectype for scalar type ");
2600 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2601 }
2602
2603 return false;
2604 }
ebfd146a 2605
b690cc0f 2606 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2607 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2608 if (nunits_in >= nunits_out)
2609 return false;
2610
2611 /* Multiple types in SLP are handled by creating the appropriate number of
2612 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2613 case of SLP. */
2614 if (slp_node)
2615 ncopies = 1;
2616 else
2617 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
ebfd146a
IR
2618 gcc_assert (ncopies >= 1);
2619
ebfd146a 2620 /* Supportable by target? */
b690cc0f
RG
2621 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2622 &code1, &multi_step_cvt, &interm_types))
ebfd146a
IR
2623 return false;
2624
ebfd146a
IR
2625 if (!vec_stmt) /* transformation not required. */
2626 {
2627 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2628 if (vect_print_dump_info (REPORT_DETAILS))
2629 fprintf (vect_dump, "=== vectorizable_demotion ===");
2630 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2631 return true;
2632 }
2633
2634 /** Transform. **/
2635 if (vect_print_dump_info (REPORT_DETAILS))
2636 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2637 ncopies);
2638
b8698a0f
L
2639 /* In case of multi-step demotion, we first generate demotion operations to
2640 the intermediate types, and then from that types to the final one.
ebfd146a 2641 We create vector destinations for the intermediate type (TYPES) received
b8698a0f 2642 from supportable_narrowing_operation, and store them in the correct order
ebfd146a
IR
2643 for future use in vect_create_vectorized_demotion_stmts(). */
2644 if (multi_step_cvt)
2645 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2646 else
2647 vec_dsts = VEC_alloc (tree, heap, 1);
b8698a0f 2648
ebfd146a
IR
2649 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2650 VEC_quick_push (tree, vec_dsts, vec_dest);
2651
2652 if (multi_step_cvt)
2653 {
b8698a0f 2654 for (i = VEC_length (tree, interm_types) - 1;
ebfd146a
IR
2655 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2656 {
b8698a0f 2657 vec_dest = vect_create_destination_var (scalar_dest,
ebfd146a
IR
2658 intermediate_type);
2659 VEC_quick_push (tree, vec_dsts, vec_dest);
2660 }
2661 }
2662
2663 /* In case the vectorization factor (VF) is bigger than the number
2664 of elements that we can fit in a vectype (nunits), we have to generate
2665 more than one vector stmt - i.e - we need to "unroll" the
2666 vector stmt by a factor VF/nunits. */
2667 last_oprnd = op0;
2668 prev_stmt_info = NULL;
2669 for (j = 0; j < ncopies; j++)
2670 {
2671 /* Handle uses. */
2672 if (slp_node)
b5aeb3bb 2673 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
ebfd146a
IR
2674 else
2675 {
2676 VEC_free (tree, heap, vec_oprnds0);
2677 vec_oprnds0 = VEC_alloc (tree, heap,
2678 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
b8698a0f 2679 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
ebfd146a
IR
2680 vect_pow2 (multi_step_cvt) - 1);
2681 }
2682
2683 /* Arguments are ready. Create the new vector stmts. */
2684 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
b8698a0f 2685 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
ebfd146a 2686 multi_step_cvt, stmt, tmp_vec_dsts,
b8698a0f 2687 gsi, slp_node, code1,
ebfd146a
IR
2688 &prev_stmt_info);
2689 }
2690
2691 VEC_free (tree, heap, vec_oprnds0);
2692 VEC_free (tree, heap, vec_dsts);
2693 VEC_free (tree, heap, tmp_vec_dsts);
2694 VEC_free (tree, heap, interm_types);
2695
2696 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2697 return true;
2698}
2699
2700
2701/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
b8698a0f 2702 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
ebfd146a
IR
2703 the resulting vectors and call the function recursively. */
2704
2705static void
2706vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2707 VEC (tree, heap) **vec_oprnds1,
2708 int multi_step_cvt, gimple stmt,
2709 VEC (tree, heap) *vec_dsts,
2710 gimple_stmt_iterator *gsi,
2711 slp_tree slp_node, enum tree_code code1,
b8698a0f 2712 enum tree_code code2, tree decl1,
ebfd146a
IR
2713 tree decl2, int op_type,
2714 stmt_vec_info *prev_stmt_info)
2715{
2716 int i;
2717 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2718 gimple new_stmt1, new_stmt2;
2719 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2720 VEC (tree, heap) *vec_tmp;
2721
2722 vec_dest = VEC_pop (tree, vec_dsts);
2723 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2724
ac47786e 2725 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
ebfd146a
IR
2726 {
2727 if (op_type == binary_op)
2728 vop1 = VEC_index (tree, *vec_oprnds1, i);
2729 else
2730 vop1 = NULL_TREE;
2731
2732 /* Generate the two halves of promotion operation. */
b8698a0f 2733 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
ebfd146a
IR
2734 op_type, vec_dest, gsi, stmt);
2735 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2736 op_type, vec_dest, gsi, stmt);
2737 if (is_gimple_call (new_stmt1))
2738 {
2739 new_tmp1 = gimple_call_lhs (new_stmt1);
2740 new_tmp2 = gimple_call_lhs (new_stmt2);
2741 }
2742 else
2743 {
2744 new_tmp1 = gimple_assign_lhs (new_stmt1);
2745 new_tmp2 = gimple_assign_lhs (new_stmt2);
2746 }
2747
2748 if (multi_step_cvt)
2749 {
2750 /* Store the results for the recursive call. */
2751 VEC_quick_push (tree, vec_tmp, new_tmp1);
2752 VEC_quick_push (tree, vec_tmp, new_tmp2);
2753 }
2754 else
2755 {
2756 /* Last step of promotion sequience - store the results. */
2757 if (slp_node)
2758 {
2759 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2760 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2761 }
2762 else
2763 {
2764 if (!*prev_stmt_info)
2765 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2766 else
2767 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2768
2769 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2770 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2771 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2772 }
2773 }
2774 }
2775
2776 if (multi_step_cvt)
2777 {
b8698a0f 2778 /* For multi-step promotion operation we first generate we call the
ebfd146a
IR
2779 function recurcively for every stage. We start from the input type,
2780 create promotion operations to the intermediate types, and then
2781 create promotions to the output type. */
2782 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2783 VEC_free (tree, heap, vec_tmp);
2784 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2785 multi_step_cvt - 1, stmt,
2786 vec_dsts, gsi, slp_node, code1,
2787 code2, decl2, decl2, op_type,
2788 prev_stmt_info);
2789 }
2790}
b8698a0f 2791
ebfd146a
IR
2792
2793/* Function vectorizable_type_promotion
2794
2795 Check if STMT performs a binary or unary operation that involves
2796 type promotion, and if it can be vectorized.
2797 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2798 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2799 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2800
2801static bool
2802vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2803 gimple *vec_stmt, slp_tree slp_node)
2804{
2805 tree vec_dest;
2806 tree scalar_dest;
2807 tree op0, op1 = NULL;
2808 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2809 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2810 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2811 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2812 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
b8698a0f 2813 int op_type;
ebfd146a
IR
2814 tree def;
2815 gimple def_stmt;
2816 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2817 stmt_vec_info prev_stmt_info;
2818 int nunits_in;
2819 int nunits_out;
2820 tree vectype_out;
2821 int ncopies;
2822 int j, i;
2823 tree vectype_in;
2824 tree intermediate_type = NULL_TREE;
2825 int multi_step_cvt = 0;
2826 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2827 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
b8698a0f 2828
a70d6342
IR
2829 /* FORNOW: not supported by basic block SLP vectorization. */
2830 gcc_assert (loop_vinfo);
b8698a0f 2831
ebfd146a
IR
2832 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2833 return false;
2834
8644a673 2835 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
2836 return false;
2837
2838 /* Is STMT a vectorizable type-promotion operation? */
2839 if (!is_gimple_assign (stmt))
2840 return false;
2841
2842 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2843 return false;
2844
2845 code = gimple_assign_rhs_code (stmt);
2846 if (!CONVERT_EXPR_CODE_P (code)
2847 && code != WIDEN_MULT_EXPR)
2848 return false;
2849
b690cc0f
RG
2850 scalar_dest = gimple_assign_lhs (stmt);
2851 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2852
2853 /* Check the operands of the operation. */
ebfd146a 2854 op0 = gimple_assign_rhs1 (stmt);
b690cc0f
RG
2855 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2856 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2857 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2858 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2859 && CONVERT_EXPR_CODE_P (code))))
2860 return false;
2861 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2862 &def_stmt, &def, &dt[0], &vectype_in))
2863 {
2864 if (vect_print_dump_info (REPORT_DETAILS))
2865 fprintf (vect_dump, "use not simple.");
2866 return false;
2867 }
2868 /* If op0 is an external or constant def use a vector type with
2869 the same size as the output vector type. */
2870 if (!vectype_in)
2871 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
7d8930a0
IR
2872 if (vec_stmt)
2873 gcc_assert (vectype_in);
ebfd146a 2874 if (!vectype_in)
7d8930a0
IR
2875 {
2876 if (vect_print_dump_info (REPORT_DETAILS))
2877 {
2878 fprintf (vect_dump, "no vectype for scalar type ");
2879 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2880 }
2881
2882 return false;
2883 }
ebfd146a 2884
b690cc0f 2885 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
ebfd146a
IR
2886 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2887 if (nunits_in <= nunits_out)
2888 return false;
2889
2890 /* Multiple types in SLP are handled by creating the appropriate number of
2891 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2892 case of SLP. */
2893 if (slp_node)
2894 ncopies = 1;
2895 else
2896 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2897
2898 gcc_assert (ncopies >= 1);
2899
ebfd146a
IR
2900 op_type = TREE_CODE_LENGTH (code);
2901 if (op_type == binary_op)
2902 {
2903 op1 = gimple_assign_rhs2 (stmt);
a70d6342 2904 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
ebfd146a
IR
2905 {
2906 if (vect_print_dump_info (REPORT_DETAILS))
2907 fprintf (vect_dump, "use not simple.");
2908 return false;
2909 }
2910 }
2911
2912 /* Supportable by target? */
b690cc0f 2913 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
ebfd146a
IR
2914 &decl1, &decl2, &code1, &code2,
2915 &multi_step_cvt, &interm_types))
2916 return false;
2917
2918 /* Binary widening operation can only be supported directly by the
2919 architecture. */
2920 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2921
ebfd146a
IR
2922 if (!vec_stmt) /* transformation not required. */
2923 {
2924 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2925 if (vect_print_dump_info (REPORT_DETAILS))
2926 fprintf (vect_dump, "=== vectorizable_promotion ===");
2927 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2928 return true;
2929 }
2930
2931 /** Transform. **/
2932
2933 if (vect_print_dump_info (REPORT_DETAILS))
2934 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2935 ncopies);
2936
2937 /* Handle def. */
b8698a0f 2938 /* In case of multi-step promotion, we first generate promotion operations
ebfd146a 2939 to the intermediate types, and then from that types to the final one.
b8698a0f
L
2940 We store vector destination in VEC_DSTS in the correct order for
2941 recursive creation of promotion operations in
ebfd146a
IR
2942 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2943 according to TYPES recieved from supportable_widening_operation(). */
2944 if (multi_step_cvt)
2945 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2946 else
2947 vec_dsts = VEC_alloc (tree, heap, 1);
2948
2949 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2950 VEC_quick_push (tree, vec_dsts, vec_dest);
2951
2952 if (multi_step_cvt)
2953 {
2954 for (i = VEC_length (tree, interm_types) - 1;
2955 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2956 {
2957 vec_dest = vect_create_destination_var (scalar_dest,
2958 intermediate_type);
2959 VEC_quick_push (tree, vec_dsts, vec_dest);
2960 }
2961 }
b8698a0f 2962
ebfd146a
IR
2963 if (!slp_node)
2964 {
b8698a0f 2965 vec_oprnds0 = VEC_alloc (tree, heap,
ebfd146a
IR
2966 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2967 if (op_type == binary_op)
2968 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2969 }
2970
2971 /* In case the vectorization factor (VF) is bigger than the number
2972 of elements that we can fit in a vectype (nunits), we have to generate
2973 more than one vector stmt - i.e - we need to "unroll" the
2974 vector stmt by a factor VF/nunits. */
2975
2976 prev_stmt_info = NULL;
2977 for (j = 0; j < ncopies; j++)
2978 {
2979 /* Handle uses. */
2980 if (j == 0)
2981 {
2982 if (slp_node)
b5aeb3bb 2983 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
ebfd146a
IR
2984 else
2985 {
2986 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2987 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2988 if (op_type == binary_op)
2989 {
2990 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2991 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2992 }
2993 }
2994 }
2995 else
2996 {
2997 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2998 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2999 if (op_type == binary_op)
3000 {
3001 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3002 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3003 }
3004 }
3005
3006 /* Arguments are ready. Create the new vector stmts. */
3007 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3008 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
b8698a0f 3009 multi_step_cvt, stmt,
ebfd146a
IR
3010 tmp_vec_dsts,
3011 gsi, slp_node, code1, code2,
3012 decl1, decl2, op_type,
3013 &prev_stmt_info);
3014 }
3015
3016 VEC_free (tree, heap, vec_dsts);
3017 VEC_free (tree, heap, tmp_vec_dsts);
3018 VEC_free (tree, heap, interm_types);
3019 VEC_free (tree, heap, vec_oprnds0);
3020 VEC_free (tree, heap, vec_oprnds1);
3021
3022 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3023 return true;
3024}
3025
3026
3027/* Function vectorizable_store.
3028
b8698a0f
L
3029 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3030 can be vectorized.
3031 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3032 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3033 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3034
3035static bool
3036vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3037 slp_tree slp_node)
3038{
3039 tree scalar_dest;
3040 tree data_ref;
3041 tree op;
3042 tree vec_oprnd = NULL_TREE;
3043 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3044 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3045 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3046 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3047 struct loop *loop = NULL;
ebfd146a
IR
3048 enum machine_mode vec_mode;
3049 tree dummy;
3050 enum dr_alignment_support alignment_support_scheme;
3051 tree def;
3052 gimple def_stmt;
3053 enum vect_def_type dt;
3054 stmt_vec_info prev_stmt_info = NULL;
3055 tree dataref_ptr = NULL_TREE;
3056 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3057 int ncopies;
3058 int j;
3059 gimple next_stmt, first_stmt = NULL;
3060 bool strided_store = false;
3061 unsigned int group_size, i;
3062 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3063 bool inv_p;
3064 VEC(tree,heap) *vec_oprnds = NULL;
3065 bool slp = (slp_node != NULL);
ebfd146a 3066 unsigned int vec_num;
a70d6342
IR
3067 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3068
3069 if (loop_vinfo)
3070 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
3071
3072 /* Multiple types in SLP are handled by creating the appropriate number of
3073 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3074 case of SLP. */
3075 if (slp)
3076 ncopies = 1;
3077 else
3078 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3079
3080 gcc_assert (ncopies >= 1);
3081
3082 /* FORNOW. This restriction should be relaxed. */
a70d6342 3083 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
ebfd146a
IR
3084 {
3085 if (vect_print_dump_info (REPORT_DETAILS))
3086 fprintf (vect_dump, "multiple types in nested loop.");
3087 return false;
3088 }
3089
a70d6342 3090 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3091 return false;
3092
8644a673 3093 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3094 return false;
3095
3096 /* Is vectorizable store? */
3097
3098 if (!is_gimple_assign (stmt))
3099 return false;
3100
3101 scalar_dest = gimple_assign_lhs (stmt);
3102 if (TREE_CODE (scalar_dest) != ARRAY_REF
3103 && TREE_CODE (scalar_dest) != INDIRECT_REF
e9dbe7bb
IR
3104 && TREE_CODE (scalar_dest) != COMPONENT_REF
3105 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
70f34814
RG
3106 && TREE_CODE (scalar_dest) != REALPART_EXPR
3107 && TREE_CODE (scalar_dest) != MEM_REF)
ebfd146a
IR
3108 return false;
3109
3110 gcc_assert (gimple_assign_single_p (stmt));
3111 op = gimple_assign_rhs1 (stmt);
a70d6342 3112 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
ebfd146a
IR
3113 {
3114 if (vect_print_dump_info (REPORT_DETAILS))
3115 fprintf (vect_dump, "use not simple.");
3116 return false;
3117 }
3118
3119 /* The scalar rhs type needs to be trivially convertible to the vector
3120 component type. This should always be the case. */
3121 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
b8698a0f 3122 {
ebfd146a
IR
3123 if (vect_print_dump_info (REPORT_DETAILS))
3124 fprintf (vect_dump, "??? operands of different types");
3125 return false;
3126 }
3127
3128 vec_mode = TYPE_MODE (vectype);
3129 /* FORNOW. In some cases can vectorize even if data-type not supported
3130 (e.g. - array initialization with 0). */
947131ba 3131 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
ebfd146a
IR
3132 return false;
3133
3134 if (!STMT_VINFO_DATA_REF (stmt_info))
3135 return false;
3136
3137 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3138 {
3139 strided_store = true;
3140 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3141 if (!vect_strided_store_supported (vectype)
3142 && !PURE_SLP_STMT (stmt_info) && !slp)
3143 return false;
b8698a0f 3144
ebfd146a
IR
3145 if (first_stmt == stmt)
3146 {
3147 /* STMT is the leader of the group. Check the operands of all the
3148 stmts of the group. */
3149 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3150 while (next_stmt)
3151 {
3152 gcc_assert (gimple_assign_single_p (next_stmt));
3153 op = gimple_assign_rhs1 (next_stmt);
b8698a0f 3154 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
a70d6342 3155 &def, &dt))
ebfd146a
IR
3156 {
3157 if (vect_print_dump_info (REPORT_DETAILS))
3158 fprintf (vect_dump, "use not simple.");
3159 return false;
3160 }
3161 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3162 }
3163 }
3164 }
3165
3166 if (!vec_stmt) /* transformation not required. */
3167 {
3168 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3169 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3170 return true;
3171 }
3172
3173 /** Transform. **/
3174
3175 if (strided_store)
3176 {
3177 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3178 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3179
3180 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3181
3182 /* FORNOW */
a70d6342 3183 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
ebfd146a
IR
3184
3185 /* We vectorize all the stmts of the interleaving group when we
3186 reach the last stmt in the group. */
b8698a0f 3187 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
ebfd146a
IR
3188 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3189 && !slp)
3190 {
3191 *vec_stmt = NULL;
3192 return true;
3193 }
3194
3195 if (slp)
4b5caab7
IR
3196 {
3197 strided_store = false;
3198 /* VEC_NUM is the number of vect stmts to be created for this
3199 group. */
3200 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3201 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3202 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3203 }
ebfd146a 3204 else
4b5caab7
IR
3205 /* VEC_NUM is the number of vect stmts to be created for this
3206 group. */
ebfd146a
IR
3207 vec_num = group_size;
3208 }
b8698a0f 3209 else
ebfd146a
IR
3210 {
3211 first_stmt = stmt;
3212 first_dr = dr;
3213 group_size = vec_num = 1;
ebfd146a 3214 }
b8698a0f 3215
ebfd146a
IR
3216 if (vect_print_dump_info (REPORT_DETAILS))
3217 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3218
3219 dr_chain = VEC_alloc (tree, heap, group_size);
3220 oprnds = VEC_alloc (tree, heap, group_size);
3221
720f5239 3222 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a 3223 gcc_assert (alignment_support_scheme);
ebfd146a
IR
3224
3225 /* In case the vectorization factor (VF) is bigger than the number
3226 of elements that we can fit in a vectype (nunits), we have to generate
3227 more than one vector stmt - i.e - we need to "unroll" the
b8698a0f 3228 vector stmt by a factor VF/nunits. For more details see documentation in
ebfd146a
IR
3229 vect_get_vec_def_for_copy_stmt. */
3230
3231 /* In case of interleaving (non-unit strided access):
3232
3233 S1: &base + 2 = x2
3234 S2: &base = x0
3235 S3: &base + 1 = x1
3236 S4: &base + 3 = x3
3237
3238 We create vectorized stores starting from base address (the access of the
3239 first stmt in the chain (S2 in the above example), when the last store stmt
3240 of the chain (S4) is reached:
3241
3242 VS1: &base = vx2
3243 VS2: &base + vec_size*1 = vx0
3244 VS3: &base + vec_size*2 = vx1
3245 VS4: &base + vec_size*3 = vx3
3246
3247 Then permutation statements are generated:
3248
3249 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3250 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3251 ...
b8698a0f 3252
ebfd146a
IR
3253 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3254 (the order of the data-refs in the output of vect_permute_store_chain
3255 corresponds to the order of scalar stmts in the interleaving chain - see
3256 the documentation of vect_permute_store_chain()).
3257
3258 In case of both multiple types and interleaving, above vector stores and
3259 permutation stmts are created for every copy. The result vector stmts are
3260 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
b8698a0f 3261 STMT_VINFO_RELATED_STMT for the next copies.
ebfd146a
IR
3262 */
3263
3264 prev_stmt_info = NULL;
3265 for (j = 0; j < ncopies; j++)
3266 {
3267 gimple new_stmt;
3268 gimple ptr_incr;
3269
3270 if (j == 0)
3271 {
3272 if (slp)
3273 {
3274 /* Get vectorized arguments for SLP_NODE. */
b5aeb3bb 3275 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
ebfd146a
IR
3276
3277 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3278 }
3279 else
3280 {
b8698a0f
L
3281 /* For interleaved stores we collect vectorized defs for all the
3282 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3283 used as an input to vect_permute_store_chain(), and OPRNDS as
ebfd146a
IR
3284 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3285
3286 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3287 OPRNDS are of size 1. */
b8698a0f 3288 next_stmt = first_stmt;
ebfd146a
IR
3289 for (i = 0; i < group_size; i++)
3290 {
b8698a0f
L
3291 /* Since gaps are not supported for interleaved stores,
3292 GROUP_SIZE is the exact number of stmts in the chain.
3293 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3294 there is no interleaving, GROUP_SIZE is 1, and only one
ebfd146a
IR
3295 iteration of the loop will be executed. */
3296 gcc_assert (next_stmt
3297 && gimple_assign_single_p (next_stmt));
3298 op = gimple_assign_rhs1 (next_stmt);
3299
b8698a0f 3300 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
ebfd146a 3301 NULL);
b8698a0f
L
3302 VEC_quick_push(tree, dr_chain, vec_oprnd);
3303 VEC_quick_push(tree, oprnds, vec_oprnd);
ebfd146a
IR
3304 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3305 }
3306 }
3307
3308 /* We should have catched mismatched types earlier. */
3309 gcc_assert (useless_type_conversion_p (vectype,
3310 TREE_TYPE (vec_oprnd)));
b8698a0f
L
3311 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3312 &dummy, &ptr_incr, false,
5006671f 3313 &inv_p);
a70d6342 3314 gcc_assert (bb_vinfo || !inv_p);
ebfd146a 3315 }
b8698a0f 3316 else
ebfd146a 3317 {
b8698a0f
L
3318 /* For interleaved stores we created vectorized defs for all the
3319 defs stored in OPRNDS in the previous iteration (previous copy).
3320 DR_CHAIN is then used as an input to vect_permute_store_chain(),
ebfd146a
IR
3321 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3322 next copy.
3323 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3324 OPRNDS are of size 1. */
3325 for (i = 0; i < group_size; i++)
3326 {
3327 op = VEC_index (tree, oprnds, i);
b8698a0f 3328 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
a70d6342 3329 &dt);
b8698a0f 3330 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
ebfd146a
IR
3331 VEC_replace(tree, dr_chain, i, vec_oprnd);
3332 VEC_replace(tree, oprnds, i, vec_oprnd);
3333 }
b8698a0f 3334 dataref_ptr =
ebfd146a
IR
3335 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3336 }
3337
3338 if (strided_store)
3339 {
b8698a0f 3340 result_chain = VEC_alloc (tree, heap, group_size);
ebfd146a
IR
3341 /* Permute. */
3342 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3343 &result_chain))
3344 return false;
3345 }
3346
3347 next_stmt = first_stmt;
3348 for (i = 0; i < vec_num; i++)
3349 {
be1ac4ec
RG
3350 struct ptr_info_def *pi;
3351
ebfd146a
IR
3352 if (i > 0)
3353 /* Bump the vector pointer. */
3354 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3355 NULL_TREE);
3356
3357 if (slp)
3358 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3359 else if (strided_store)
b8698a0f 3360 /* For strided stores vectorized defs are interleaved in
ebfd146a
IR
3361 vect_permute_store_chain(). */
3362 vec_oprnd = VEC_index (tree, result_chain, i);
3363
be1ac4ec
RG
3364 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3365 build_int_cst (reference_alias_ptr_type
3366 (DR_REF (first_dr)), 0));
3367 pi = get_ptr_info (dataref_ptr);
3368 pi->align = TYPE_ALIGN_UNIT (vectype);
8f439681 3369 if (aligned_access_p (first_dr))
be1ac4ec
RG
3370 pi->misalign = 0;
3371 else if (DR_MISALIGNMENT (first_dr) == -1)
3372 {
3373 TREE_TYPE (data_ref)
3374 = build_aligned_type (TREE_TYPE (data_ref),
3375 TYPE_ALIGN (TREE_TYPE (vectype)));
3376 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3377 pi->misalign = 0;
3378 }
3379 else
3380 {
3381 TREE_TYPE (data_ref)
3382 = build_aligned_type (TREE_TYPE (data_ref),
3383 TYPE_ALIGN (TREE_TYPE (vectype)));
3384 pi->misalign = DR_MISALIGNMENT (first_dr);
3385 }
8f439681 3386
ebfd146a
IR
3387 /* Arguments are ready. Create the new vector stmt. */
3388 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3389 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3390 mark_symbols_for_renaming (new_stmt);
3391
3392 if (slp)
3393 continue;
b8698a0f 3394
ebfd146a
IR
3395 if (j == 0)
3396 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3397 else
3398 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3399
3400 prev_stmt_info = vinfo_for_stmt (new_stmt);
3401 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3402 if (!next_stmt)
3403 break;
3404 }
3405 }
3406
b8698a0f
L
3407 VEC_free (tree, heap, dr_chain);
3408 VEC_free (tree, heap, oprnds);
ebfd146a 3409 if (result_chain)
b8698a0f 3410 VEC_free (tree, heap, result_chain);
ebfd146a
IR
3411
3412 return true;
3413}
3414
3415/* vectorizable_load.
3416
b8698a0f
L
3417 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3418 can be vectorized.
3419 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
ebfd146a
IR
3420 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3421 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3422
3423static bool
3424vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3425 slp_tree slp_node, slp_instance slp_node_instance)
3426{
3427 tree scalar_dest;
3428 tree vec_dest = NULL;
3429 tree data_ref = NULL;
3430 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b8698a0f 3431 stmt_vec_info prev_stmt_info;
ebfd146a 3432 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 3433 struct loop *loop = NULL;
ebfd146a 3434 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
a70d6342 3435 bool nested_in_vect_loop = false;
ebfd146a
IR
3436 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3437 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3438 tree new_temp;
947131ba 3439 enum machine_mode mode;
ebfd146a
IR
3440 gimple new_stmt = NULL;
3441 tree dummy;
3442 enum dr_alignment_support alignment_support_scheme;
3443 tree dataref_ptr = NULL_TREE;
3444 gimple ptr_incr;
3445 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3446 int ncopies;
3447 int i, j, group_size;
3448 tree msq = NULL_TREE, lsq;
3449 tree offset = NULL_TREE;
3450 tree realignment_token = NULL_TREE;
3451 gimple phi = NULL;
3452 VEC(tree,heap) *dr_chain = NULL;
3453 bool strided_load = false;
3454 gimple first_stmt;
3455 tree scalar_type;
3456 bool inv_p;
3457 bool compute_in_loop = false;
3458 struct loop *at_loop;
3459 int vec_num;
3460 bool slp = (slp_node != NULL);
3461 bool slp_perm = false;
3462 enum tree_code code;
a70d6342
IR
3463 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3464 int vf;
3465
3466 if (loop_vinfo)
3467 {
3468 loop = LOOP_VINFO_LOOP (loop_vinfo);
3469 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3470 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3471 }
3472 else
3533e503 3473 vf = 1;
ebfd146a
IR
3474
3475 /* Multiple types in SLP are handled by creating the appropriate number of
3476 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3477 case of SLP. */
3478 if (slp)
3479 ncopies = 1;
3480 else
3481 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3482
3483 gcc_assert (ncopies >= 1);
3484
3485 /* FORNOW. This restriction should be relaxed. */
3486 if (nested_in_vect_loop && ncopies > 1)
3487 {
3488 if (vect_print_dump_info (REPORT_DETAILS))
3489 fprintf (vect_dump, "multiple types in nested loop.");
3490 return false;
3491 }
3492
a70d6342 3493 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
ebfd146a
IR
3494 return false;
3495
8644a673 3496 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
ebfd146a
IR
3497 return false;
3498
3499 /* Is vectorizable load? */
3500 if (!is_gimple_assign (stmt))
3501 return false;
3502
3503 scalar_dest = gimple_assign_lhs (stmt);
3504 if (TREE_CODE (scalar_dest) != SSA_NAME)
3505 return false;
3506
3507 code = gimple_assign_rhs_code (stmt);
3508 if (code != ARRAY_REF
3509 && code != INDIRECT_REF
e9dbe7bb
IR
3510 && code != COMPONENT_REF
3511 && code != IMAGPART_EXPR
70f34814
RG
3512 && code != REALPART_EXPR
3513 && code != MEM_REF)
ebfd146a
IR
3514 return false;
3515
3516 if (!STMT_VINFO_DATA_REF (stmt_info))
3517 return false;
3518
3519 scalar_type = TREE_TYPE (DR_REF (dr));
947131ba 3520 mode = TYPE_MODE (vectype);
ebfd146a
IR
3521
3522 /* FORNOW. In some cases can vectorize even if data-type not supported
3523 (e.g. - data copies). */
947131ba 3524 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
ebfd146a
IR
3525 {
3526 if (vect_print_dump_info (REPORT_DETAILS))
3527 fprintf (vect_dump, "Aligned load, but unsupported type.");
3528 return false;
3529 }
3530
3531 /* The vector component type needs to be trivially convertible to the
3532 scalar lhs. This should always be the case. */
3533 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
b8698a0f 3534 {
ebfd146a
IR
3535 if (vect_print_dump_info (REPORT_DETAILS))
3536 fprintf (vect_dump, "??? operands of different types");
3537 return false;
3538 }
3539
3540 /* Check if the load is a part of an interleaving chain. */
3541 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3542 {
3543 strided_load = true;
3544 /* FORNOW */
3545 gcc_assert (! nested_in_vect_loop);
3546
3547 /* Check if interleaving is supported. */
3548 if (!vect_strided_load_supported (vectype)
3549 && !PURE_SLP_STMT (stmt_info) && !slp)
3550 return false;
3551 }
3552
3553 if (!vec_stmt) /* transformation not required. */
3554 {
3555 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3556 vect_model_load_cost (stmt_info, ncopies, NULL);
3557 return true;
3558 }
3559
3560 if (vect_print_dump_info (REPORT_DETAILS))
3561 fprintf (vect_dump, "transform load.");
3562
3563 /** Transform. **/
3564
3565 if (strided_load)
3566 {
3567 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3568 /* Check if the chain of loads is already vectorized. */
3569 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3570 {
3571 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3572 return true;
3573 }
3574 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3575 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3576
3577 /* VEC_NUM is the number of vect stmts to be created for this group. */
3578 if (slp)
3579 {
3580 strided_load = false;
3581 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
a70d6342
IR
3582 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3583 slp_perm = true;
3584 }
ebfd146a
IR
3585 else
3586 vec_num = group_size;
3587
3588 dr_chain = VEC_alloc (tree, heap, vec_num);
3589 }
3590 else
3591 {
3592 first_stmt = stmt;
3593 first_dr = dr;
3594 group_size = vec_num = 1;
3595 }
3596
720f5239 3597 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
ebfd146a
IR
3598 gcc_assert (alignment_support_scheme);
3599
3600 /* In case the vectorization factor (VF) is bigger than the number
3601 of elements that we can fit in a vectype (nunits), we have to generate
3602 more than one vector stmt - i.e - we need to "unroll" the
3603 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3604 from one copy of the vector stmt to the next, in the field
3605 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3606 stages to find the correct vector defs to be used when vectorizing
3607 stmts that use the defs of the current stmt. The example below illustrates
3608 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3609 4 vectorized stmts):
3610
3611 before vectorization:
3612 RELATED_STMT VEC_STMT
3613 S1: x = memref - -
3614 S2: z = x + 1 - -
3615
3616 step 1: vectorize stmt S1:
3617 We first create the vector stmt VS1_0, and, as usual, record a
3618 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3619 Next, we create the vector stmt VS1_1, and record a pointer to
3620 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3621 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3622 stmts and pointers:
3623 RELATED_STMT VEC_STMT
3624 VS1_0: vx0 = memref0 VS1_1 -
3625 VS1_1: vx1 = memref1 VS1_2 -
3626 VS1_2: vx2 = memref2 VS1_3 -
3627 VS1_3: vx3 = memref3 - -
3628 S1: x = load - VS1_0
3629 S2: z = x + 1 - -
3630
b8698a0f
L
3631 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3632 information we recorded in RELATED_STMT field is used to vectorize
ebfd146a
IR
3633 stmt S2. */
3634
3635 /* In case of interleaving (non-unit strided access):
3636
3637 S1: x2 = &base + 2
3638 S2: x0 = &base
3639 S3: x1 = &base + 1
3640 S4: x3 = &base + 3
3641
b8698a0f 3642 Vectorized loads are created in the order of memory accesses
ebfd146a
IR
3643 starting from the access of the first stmt of the chain:
3644
3645 VS1: vx0 = &base
3646 VS2: vx1 = &base + vec_size*1
3647 VS3: vx3 = &base + vec_size*2
3648 VS4: vx4 = &base + vec_size*3
3649
3650 Then permutation statements are generated:
3651
3652 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3653 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3654 ...
3655
3656 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3657 (the order of the data-refs in the output of vect_permute_load_chain
3658 corresponds to the order of scalar stmts in the interleaving chain - see
3659 the documentation of vect_permute_load_chain()).
3660 The generation of permutation stmts and recording them in
3661 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3662
b8698a0f 3663 In case of both multiple types and interleaving, the vector loads and
ebfd146a
IR
3664 permutation stmts above are created for every copy. The result vector stmts
3665 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3666 STMT_VINFO_RELATED_STMT for the next copies. */
3667
3668 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3669 on a target that supports unaligned accesses (dr_unaligned_supported)
3670 we generate the following code:
3671 p = initial_addr;
3672 indx = 0;
3673 loop {
3674 p = p + indx * vectype_size;
3675 vec_dest = *(p);
3676 indx = indx + 1;
3677 }
3678
3679 Otherwise, the data reference is potentially unaligned on a target that
b8698a0f 3680 does not support unaligned accesses (dr_explicit_realign_optimized) -
ebfd146a
IR
3681 then generate the following code, in which the data in each iteration is
3682 obtained by two vector loads, one from the previous iteration, and one
3683 from the current iteration:
3684 p1 = initial_addr;
3685 msq_init = *(floor(p1))
3686 p2 = initial_addr + VS - 1;
3687 realignment_token = call target_builtin;
3688 indx = 0;
3689 loop {
3690 p2 = p2 + indx * vectype_size
3691 lsq = *(floor(p2))
3692 vec_dest = realign_load (msq, lsq, realignment_token)
3693 indx = indx + 1;
3694 msq = lsq;
3695 } */
3696
3697 /* If the misalignment remains the same throughout the execution of the
3698 loop, we can create the init_addr and permutation mask at the loop
3699 preheader. Otherwise, it needs to be created inside the loop.
3700 This can only occur when vectorizing memory accesses in the inner-loop
3701 nested within an outer-loop that is being vectorized. */
3702
a70d6342 3703 if (loop && nested_in_vect_loop_p (loop, stmt)
ebfd146a
IR
3704 && (TREE_INT_CST_LOW (DR_STEP (dr))
3705 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3706 {
3707 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3708 compute_in_loop = true;
3709 }
3710
3711 if ((alignment_support_scheme == dr_explicit_realign_optimized
3712 || alignment_support_scheme == dr_explicit_realign)
3713 && !compute_in_loop)
3714 {
3715 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3716 alignment_support_scheme, NULL_TREE,
3717 &at_loop);
3718 if (alignment_support_scheme == dr_explicit_realign_optimized)
3719 {
3720 phi = SSA_NAME_DEF_STMT (msq);
3721 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3722 }
3723 }
3724 else
3725 at_loop = loop;
3726
3727 prev_stmt_info = NULL;
3728 for (j = 0; j < ncopies; j++)
b8698a0f 3729 {
ebfd146a
IR
3730 /* 1. Create the vector pointer update chain. */
3731 if (j == 0)
3732 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
b8698a0f
L
3733 at_loop, offset,
3734 &dummy, &ptr_incr, false,
5006671f 3735 &inv_p);
ebfd146a 3736 else
b8698a0f 3737 dataref_ptr =
ebfd146a
IR
3738 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3739
3740 for (i = 0; i < vec_num; i++)
3741 {
3742 if (i > 0)
3743 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3744 NULL_TREE);
3745
3746 /* 2. Create the vector-load in the loop. */
3747 switch (alignment_support_scheme)
3748 {
3749 case dr_aligned:
ebfd146a
IR
3750 case dr_unaligned_supported:
3751 {
be1ac4ec
RG
3752 struct ptr_info_def *pi;
3753 data_ref
3754 = build2 (MEM_REF, vectype, dataref_ptr,
3755 build_int_cst (reference_alias_ptr_type
3756 (DR_REF (first_dr)), 0));
3757 pi = get_ptr_info (dataref_ptr);
3758 pi->align = TYPE_ALIGN_UNIT (vectype);
3759 if (alignment_support_scheme == dr_aligned)
3760 {
3761 gcc_assert (aligned_access_p (first_dr));
3762 pi->misalign = 0;
3763 }
3764 else if (DR_MISALIGNMENT (first_dr) == -1)
3765 {
3766 TREE_TYPE (data_ref)
3767 = build_aligned_type (TREE_TYPE (data_ref),
3768 TYPE_ALIGN (TREE_TYPE (vectype)));
3769 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3770 pi->misalign = 0;
3771 }
3772 else
3773 {
3774 TREE_TYPE (data_ref)
3775 = build_aligned_type (TREE_TYPE (data_ref),
3776 TYPE_ALIGN (TREE_TYPE (vectype)));
3777 pi->misalign = DR_MISALIGNMENT (first_dr);
3778 }
ebfd146a
IR
3779 break;
3780 }
3781 case dr_explicit_realign:
3782 {
3783 tree ptr, bump;
3784 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3785
3786 if (compute_in_loop)
3787 msq = vect_setup_realignment (first_stmt, gsi,
3788 &realignment_token,
b8698a0f 3789 dr_explicit_realign,
ebfd146a
IR
3790 dataref_ptr, NULL);
3791
75421dcd
RG
3792 new_stmt = gimple_build_assign_with_ops
3793 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3794 build_int_cst
3795 (TREE_TYPE (dataref_ptr),
3796 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3797 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3798 gimple_assign_set_lhs (new_stmt, ptr);
3799 vect_finish_stmt_generation (stmt, new_stmt, gsi);
20ede5c6
RG
3800 data_ref
3801 = build2 (MEM_REF, vectype, ptr,
3802 build_int_cst (reference_alias_ptr_type
3803 (DR_REF (first_dr)), 0));
ebfd146a
IR
3804 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3805 new_stmt = gimple_build_assign (vec_dest, data_ref);
3806 new_temp = make_ssa_name (vec_dest, new_stmt);
3807 gimple_assign_set_lhs (new_stmt, new_temp);
5006671f
RG
3808 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3809 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
ebfd146a 3810 vect_finish_stmt_generation (stmt, new_stmt, gsi);
ebfd146a
IR
3811 msq = new_temp;
3812
3813 bump = size_binop (MULT_EXPR, vs_minus_1,
3814 TYPE_SIZE_UNIT (scalar_type));
3815 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
75421dcd
RG
3816 new_stmt = gimple_build_assign_with_ops
3817 (BIT_AND_EXPR, NULL_TREE, ptr,
3818 build_int_cst
3819 (TREE_TYPE (ptr),
3820 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3821 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3822 gimple_assign_set_lhs (new_stmt, ptr);
3823 vect_finish_stmt_generation (stmt, new_stmt, gsi);
20ede5c6
RG
3824 data_ref
3825 = build2 (MEM_REF, vectype, ptr,
3826 build_int_cst (reference_alias_ptr_type
3827 (DR_REF (first_dr)), 0));
ebfd146a
IR
3828 break;
3829 }
3830 case dr_explicit_realign_optimized:
75421dcd
RG
3831 new_stmt = gimple_build_assign_with_ops
3832 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3833 build_int_cst
3834 (TREE_TYPE (dataref_ptr),
3835 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3836 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3837 gimple_assign_set_lhs (new_stmt, new_temp);
3838 vect_finish_stmt_generation (stmt, new_stmt, gsi);
20ede5c6
RG
3839 data_ref
3840 = build2 (MEM_REF, vectype, new_temp,
3841 build_int_cst (reference_alias_ptr_type
3842 (DR_REF (first_dr)), 0));
ebfd146a
IR
3843 break;
3844 default:
3845 gcc_unreachable ();
3846 }
3847 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3848 new_stmt = gimple_build_assign (vec_dest, data_ref);
3849 new_temp = make_ssa_name (vec_dest, new_stmt);
3850 gimple_assign_set_lhs (new_stmt, new_temp);
3851 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3852 mark_symbols_for_renaming (new_stmt);
3853
3854 /* 3. Handle explicit realignment if necessary/supported. Create in
3855 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3856 if (alignment_support_scheme == dr_explicit_realign_optimized
3857 || alignment_support_scheme == dr_explicit_realign)
3858 {
3859 tree tmp;
3860
3861 lsq = gimple_assign_lhs (new_stmt);
3862 if (!realignment_token)
3863 realignment_token = dataref_ptr;
3864 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3865 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3866 realignment_token);
3867 new_stmt = gimple_build_assign (vec_dest, tmp);
3868 new_temp = make_ssa_name (vec_dest, new_stmt);
3869 gimple_assign_set_lhs (new_stmt, new_temp);
3870 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3871
3872 if (alignment_support_scheme == dr_explicit_realign_optimized)
3873 {
3874 gcc_assert (phi);
3875 if (i == vec_num - 1 && j == ncopies - 1)
f5045c96
AM
3876 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3877 UNKNOWN_LOCATION);
ebfd146a
IR
3878 msq = lsq;
3879 }
3880 }
3881
3882 /* 4. Handle invariant-load. */
a70d6342 3883 if (inv_p && !bb_vinfo)
ebfd146a
IR
3884 {
3885 gcc_assert (!strided_load);
3886 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3887 if (j == 0)
3888 {
3889 int k;
3890 tree t = NULL_TREE;
3891 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3892
3893 /* CHECKME: bitpos depends on endianess? */
3894 bitpos = bitsize_zero_node;
b8698a0f 3895 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
ebfd146a 3896 bitsize, bitpos);
b8698a0f 3897 vec_dest =
ebfd146a
IR
3898 vect_create_destination_var (scalar_dest, NULL_TREE);
3899 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3900 new_temp = make_ssa_name (vec_dest, new_stmt);
3901 gimple_assign_set_lhs (new_stmt, new_temp);
3902 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3903
3904 for (k = nunits - 1; k >= 0; --k)
3905 t = tree_cons (NULL_TREE, new_temp, t);
3906 /* FIXME: use build_constructor directly. */
3907 vec_inv = build_constructor_from_list (vectype, t);
3908 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3909 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3910 }
3911 else
3912 gcc_unreachable (); /* FORNOW. */
3913 }
3914
3915 /* Collect vector loads and later create their permutation in
3916 vect_transform_strided_load (). */
3917 if (strided_load || slp_perm)
3918 VEC_quick_push (tree, dr_chain, new_temp);
3919
3920 /* Store vector loads in the corresponding SLP_NODE. */
3921 if (slp && !slp_perm)
3922 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3923 }
3924
3925 if (slp && !slp_perm)
3926 continue;
3927
3928 if (slp_perm)
3929 {
a70d6342 3930 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
ebfd146a
IR
3931 slp_node_instance, false))
3932 {
3933 VEC_free (tree, heap, dr_chain);
3934 return false;
3935 }
3936 }
3937 else
3938 {
3939 if (strided_load)
3940 {
3941 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
b8698a0f 3942 return false;
ebfd146a
IR
3943
3944 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3945 VEC_free (tree, heap, dr_chain);
3946 dr_chain = VEC_alloc (tree, heap, group_size);
3947 }
3948 else
3949 {
3950 if (j == 0)
3951 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3952 else
3953 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3954 prev_stmt_info = vinfo_for_stmt (new_stmt);
3955 }
3956 }
3957 }
3958
3959 if (dr_chain)
3960 VEC_free (tree, heap, dr_chain);
3961
3962 return true;
3963}
3964
3965/* Function vect_is_simple_cond.
b8698a0f 3966
ebfd146a
IR
3967 Input:
3968 LOOP - the loop that is being vectorized.
3969 COND - Condition that is checked for simple use.
3970
3971 Returns whether a COND can be vectorized. Checks whether
3972 condition operands are supportable using vec_is_simple_use. */
3973
3974static bool
3975vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3976{
3977 tree lhs, rhs;
3978 tree def;
3979 enum vect_def_type dt;
3980
3981 if (!COMPARISON_CLASS_P (cond))
3982 return false;
3983
3984 lhs = TREE_OPERAND (cond, 0);
3985 rhs = TREE_OPERAND (cond, 1);
3986
3987 if (TREE_CODE (lhs) == SSA_NAME)
3988 {
3989 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
b8698a0f 3990 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
a70d6342 3991 &dt))
ebfd146a
IR
3992 return false;
3993 }
3994 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3995 && TREE_CODE (lhs) != FIXED_CST)
3996 return false;
3997
3998 if (TREE_CODE (rhs) == SSA_NAME)
3999 {
4000 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
b8698a0f 4001 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
a70d6342 4002 &dt))
ebfd146a
IR
4003 return false;
4004 }
4005 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4006 && TREE_CODE (rhs) != FIXED_CST)
4007 return false;
4008
4009 return true;
4010}
4011
4012/* vectorizable_condition.
4013
b8698a0f
L
4014 Check if STMT is conditional modify expression that can be vectorized.
4015 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4016 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4bbe8262
IR
4017 at GSI.
4018
4019 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4020 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4021 else caluse if it is 2).
ebfd146a
IR
4022
4023 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4024
4bbe8262 4025bool
ebfd146a 4026vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4bbe8262 4027 gimple *vec_stmt, tree reduc_def, int reduc_index)
ebfd146a
IR
4028{
4029 tree scalar_dest = NULL_TREE;
4030 tree vec_dest = NULL_TREE;
4031 tree op = NULL_TREE;
4032 tree cond_expr, then_clause, else_clause;
4033 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4034 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4035 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
4036 tree vec_compare, vec_cond_expr;
4037 tree new_temp;
4038 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4039 enum machine_mode vec_mode;
4040 tree def;
4041 enum vect_def_type dt;
4042 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4043 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4044 enum tree_code code;
4045
a70d6342
IR
4046 /* FORNOW: unsupported in basic block SLP. */
4047 gcc_assert (loop_vinfo);
b8698a0f 4048
ebfd146a
IR
4049 gcc_assert (ncopies >= 1);
4050 if (ncopies > 1)
4051 return false; /* FORNOW */
4052
4053 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4054 return false;
4055
4bbe8262
IR
4056 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4057 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4058 && reduc_def))
ebfd146a
IR
4059 return false;
4060
4061 /* FORNOW: SLP not supported. */
4062 if (STMT_SLP_TYPE (stmt_info))
4063 return false;
4064
4065 /* FORNOW: not yet supported. */
b8698a0f 4066 if (STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4067 {
4068 if (vect_print_dump_info (REPORT_DETAILS))
4069 fprintf (vect_dump, "value used after loop.");
4070 return false;
4071 }
4072
4073 /* Is vectorizable conditional operation? */
4074 if (!is_gimple_assign (stmt))
4075 return false;
4076
4077 code = gimple_assign_rhs_code (stmt);
4078
4079 if (code != COND_EXPR)
4080 return false;
4081
4082 gcc_assert (gimple_assign_single_p (stmt));
4083 op = gimple_assign_rhs1 (stmt);
4084 cond_expr = TREE_OPERAND (op, 0);
4085 then_clause = TREE_OPERAND (op, 1);
4086 else_clause = TREE_OPERAND (op, 2);
4087
4088 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4089 return false;
4090
4091 /* We do not handle two different vector types for the condition
4092 and the values. */
8533c9d8
SP
4093 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4094 TREE_TYPE (vectype)))
ebfd146a
IR
4095 return false;
4096
4097 if (TREE_CODE (then_clause) == SSA_NAME)
4098 {
4099 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
b8698a0f 4100 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
ebfd146a
IR
4101 &then_def_stmt, &def, &dt))
4102 return false;
4103 }
b8698a0f 4104 else if (TREE_CODE (then_clause) != INTEGER_CST
ebfd146a
IR
4105 && TREE_CODE (then_clause) != REAL_CST
4106 && TREE_CODE (then_clause) != FIXED_CST)
4107 return false;
4108
4109 if (TREE_CODE (else_clause) == SSA_NAME)
4110 {
4111 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
a70d6342 4112 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
ebfd146a
IR
4113 &else_def_stmt, &def, &dt))
4114 return false;
4115 }
b8698a0f 4116 else if (TREE_CODE (else_clause) != INTEGER_CST
ebfd146a
IR
4117 && TREE_CODE (else_clause) != REAL_CST
4118 && TREE_CODE (else_clause) != FIXED_CST)
4119 return false;
4120
4121
4122 vec_mode = TYPE_MODE (vectype);
4123
b8698a0f 4124 if (!vec_stmt)
ebfd146a
IR
4125 {
4126 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8e7aa1f9 4127 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
ebfd146a
IR
4128 }
4129
4130 /* Transform */
4131
4132 /* Handle def. */
4133 scalar_dest = gimple_assign_lhs (stmt);
4134 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4135
4136 /* Handle cond expr. */
b8698a0f 4137 vec_cond_lhs =
ebfd146a 4138 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
b8698a0f 4139 vec_cond_rhs =
ebfd146a 4140 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
4bbe8262
IR
4141 if (reduc_index == 1)
4142 vec_then_clause = reduc_def;
4143 else
4144 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
4145 if (reduc_index == 2)
4146 vec_else_clause = reduc_def;
4147 else
4148 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
ebfd146a
IR
4149
4150 /* Arguments are ready. Create the new vector stmt. */
b8698a0f 4151 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
ebfd146a 4152 vec_cond_lhs, vec_cond_rhs);
b8698a0f 4153 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
ebfd146a
IR
4154 vec_compare, vec_then_clause, vec_else_clause);
4155
4156 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4157 new_temp = make_ssa_name (vec_dest, *vec_stmt);
4158 gimple_assign_set_lhs (*vec_stmt, new_temp);
4159 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
b8698a0f 4160
ebfd146a
IR
4161 return true;
4162}
4163
4164
8644a673 4165/* Make sure the statement is vectorizable. */
ebfd146a
IR
4166
4167bool
a70d6342 4168vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
ebfd146a 4169{
8644a673 4170 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
a70d6342 4171 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 4172 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
ebfd146a 4173 bool ok;
a70d6342 4174 tree scalar_type, vectype;
ebfd146a
IR
4175
4176 if (vect_print_dump_info (REPORT_DETAILS))
ebfd146a 4177 {
8644a673
IR
4178 fprintf (vect_dump, "==> examining statement: ");
4179 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4180 }
ebfd146a 4181
1825a1f3 4182 if (gimple_has_volatile_ops (stmt))
b8698a0f 4183 {
1825a1f3
IR
4184 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4185 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4186
4187 return false;
4188 }
b8698a0f
L
4189
4190 /* Skip stmts that do not need to be vectorized. In loops this is expected
8644a673
IR
4191 to include:
4192 - the COND_EXPR which is the loop exit condition
4193 - any LABEL_EXPRs in the loop
b8698a0f 4194 - computations that are used only for array indexing or loop control.
8644a673
IR
4195 In basic blocks we only analyze statements that are a part of some SLP
4196 instance, therefore, all the statements are relevant. */
ebfd146a 4197
b8698a0f 4198 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8644a673 4199 && !STMT_VINFO_LIVE_P (stmt_info))
ebfd146a
IR
4200 {
4201 if (vect_print_dump_info (REPORT_DETAILS))
8644a673 4202 fprintf (vect_dump, "irrelevant.");
ebfd146a 4203
8644a673
IR
4204 return true;
4205 }
ebfd146a 4206
8644a673
IR
4207 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4208 {
4209 case vect_internal_def:
4210 break;
ebfd146a 4211
8644a673 4212 case vect_reduction_def:
7c5222ff 4213 case vect_nested_cycle:
a70d6342 4214 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
8644a673 4215 || relevance == vect_used_in_outer_by_reduction
a70d6342 4216 || relevance == vect_unused_in_scope));
8644a673
IR
4217 break;
4218
4219 case vect_induction_def:
4220 case vect_constant_def:
4221 case vect_external_def:
4222 case vect_unknown_def_type:
4223 default:
4224 gcc_unreachable ();
4225 }
ebfd146a 4226
a70d6342
IR
4227 if (bb_vinfo)
4228 {
4229 gcc_assert (PURE_SLP_STMT (stmt_info));
4230
b690cc0f 4231 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
a70d6342
IR
4232 if (vect_print_dump_info (REPORT_DETAILS))
4233 {
4234 fprintf (vect_dump, "get vectype for scalar type: ");
4235 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4236 }
4237
4238 vectype = get_vectype_for_scalar_type (scalar_type);
4239 if (!vectype)
4240 {
4241 if (vect_print_dump_info (REPORT_DETAILS))
4242 {
4243 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4244 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4245 }
4246 return false;
4247 }
4248
4249 if (vect_print_dump_info (REPORT_DETAILS))
4250 {
4251 fprintf (vect_dump, "vectype: ");
4252 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4253 }
4254
4255 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4256 }
4257
8644a673 4258 if (STMT_VINFO_RELEVANT_P (stmt_info))
ebfd146a 4259 {
8644a673
IR
4260 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4261 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4262 *need_to_vectorize = true;
ebfd146a
IR
4263 }
4264
8644a673 4265 ok = true;
b8698a0f 4266 if (!bb_vinfo
a70d6342
IR
4267 && (STMT_VINFO_RELEVANT_P (stmt_info)
4268 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8644a673
IR
4269 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4270 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4271 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4272 || vectorizable_operation (stmt, NULL, NULL, NULL)
4273 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4274 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4275 || vectorizable_call (stmt, NULL, NULL)
4276 || vectorizable_store (stmt, NULL, NULL, NULL)
b5aeb3bb 4277 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4bbe8262 4278 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
a70d6342
IR
4279 else
4280 {
4281 if (bb_vinfo)
4282 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4283 || vectorizable_assignment (stmt, NULL, NULL, node)
4284 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4285 || vectorizable_store (stmt, NULL, NULL, node));
b8698a0f 4286 }
8644a673
IR
4287
4288 if (!ok)
ebfd146a 4289 {
8644a673
IR
4290 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4291 {
4292 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4293 fprintf (vect_dump, "supported: ");
4294 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4295 }
b8698a0f 4296
ebfd146a
IR
4297 return false;
4298 }
4299
a70d6342
IR
4300 if (bb_vinfo)
4301 return true;
4302
8644a673
IR
4303 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4304 need extra handling, except for vectorizable reductions. */
4305 if (STMT_VINFO_LIVE_P (stmt_info)
4306 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4307 ok = vectorizable_live_operation (stmt, NULL, NULL);
ebfd146a 4308
8644a673 4309 if (!ok)
ebfd146a 4310 {
8644a673
IR
4311 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4312 {
4313 fprintf (vect_dump, "not vectorized: live stmt not ");
4314 fprintf (vect_dump, "supported: ");
4315 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4316 }
b8698a0f 4317
8644a673 4318 return false;
ebfd146a
IR
4319 }
4320
8644a673 4321 if (!PURE_SLP_STMT (stmt_info))
ebfd146a 4322 {
b8698a0f
L
4323 /* Groups of strided accesses whose size is not a power of 2 are not
4324 vectorizable yet using loop-vectorization. Therefore, if this stmt
4325 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
a70d6342 4326 loop-based vectorized), the loop cannot be vectorized. */
8644a673
IR
4327 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4328 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4329 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
ebfd146a 4330 {
8644a673
IR
4331 if (vect_print_dump_info (REPORT_DETAILS))
4332 {
4333 fprintf (vect_dump, "not vectorized: the size of group "
4334 "of strided accesses is not a power of 2");
4335 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4336 }
4337
ebfd146a
IR
4338 return false;
4339 }
4340 }
b8698a0f 4341
ebfd146a
IR
4342 return true;
4343}
4344
4345
4346/* Function vect_transform_stmt.
4347
4348 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4349
4350bool
4351vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
b8698a0f 4352 bool *strided_store, slp_tree slp_node,
ebfd146a
IR
4353 slp_instance slp_node_instance)
4354{
4355 bool is_store = false;
4356 gimple vec_stmt = NULL;
4357 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4358 gimple orig_stmt_in_pattern;
4359 bool done;
ebfd146a
IR
4360
4361 switch (STMT_VINFO_TYPE (stmt_info))
4362 {
4363 case type_demotion_vec_info_type:
4364 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4365 gcc_assert (done);
4366 break;
4367
4368 case type_promotion_vec_info_type:
4369 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4370 gcc_assert (done);
4371 break;
4372
4373 case type_conversion_vec_info_type:
4374 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4375 gcc_assert (done);
4376 break;
4377
4378 case induc_vec_info_type:
4379 gcc_assert (!slp_node);
4380 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4381 gcc_assert (done);
4382 break;
4383
4384 case op_vec_info_type:
4385 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4386 gcc_assert (done);
4387 break;
4388
4389 case assignment_vec_info_type:
4390 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4391 gcc_assert (done);
4392 break;
4393
4394 case load_vec_info_type:
b8698a0f 4395 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
ebfd146a
IR
4396 slp_node_instance);
4397 gcc_assert (done);
4398 break;
4399
4400 case store_vec_info_type:
4401 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4402 gcc_assert (done);
4403 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4404 {
4405 /* In case of interleaving, the whole chain is vectorized when the
4406 last store in the chain is reached. Store stmts before the last
4407 one are skipped, and there vec_stmt_info shouldn't be freed
4408 meanwhile. */
4409 *strided_store = true;
4410 if (STMT_VINFO_VEC_STMT (stmt_info))
4411 is_store = true;
4412 }
4413 else
4414 is_store = true;
4415 break;
4416
4417 case condition_vec_info_type:
4418 gcc_assert (!slp_node);
4bbe8262 4419 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
ebfd146a
IR
4420 gcc_assert (done);
4421 break;
4422
4423 case call_vec_info_type:
4424 gcc_assert (!slp_node);
4425 done = vectorizable_call (stmt, gsi, &vec_stmt);
4426 break;
4427
4428 case reduc_vec_info_type:
b5aeb3bb 4429 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
ebfd146a
IR
4430 gcc_assert (done);
4431 break;
4432
4433 default:
4434 if (!STMT_VINFO_LIVE_P (stmt_info))
4435 {
4436 if (vect_print_dump_info (REPORT_DETAILS))
4437 fprintf (vect_dump, "stmt not supported.");
4438 gcc_unreachable ();
4439 }
4440 }
4441
4442 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4443 is being vectorized, but outside the immediately enclosing loop. */
4444 if (vec_stmt
a70d6342
IR
4445 && STMT_VINFO_LOOP_VINFO (stmt_info)
4446 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4447 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
ebfd146a
IR
4448 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4449 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
b8698a0f 4450 || STMT_VINFO_RELEVANT (stmt_info) ==
a70d6342 4451 vect_used_in_outer_by_reduction))
ebfd146a 4452 {
a70d6342
IR
4453 struct loop *innerloop = LOOP_VINFO_LOOP (
4454 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
ebfd146a
IR
4455 imm_use_iterator imm_iter;
4456 use_operand_p use_p;
4457 tree scalar_dest;
4458 gimple exit_phi;
4459
4460 if (vect_print_dump_info (REPORT_DETAILS))
a70d6342 4461 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
ebfd146a
IR
4462
4463 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4464 (to be used when vectorizing outer-loop stmts that use the DEF of
4465 STMT). */
4466 if (gimple_code (stmt) == GIMPLE_PHI)
4467 scalar_dest = PHI_RESULT (stmt);
4468 else
4469 scalar_dest = gimple_assign_lhs (stmt);
4470
4471 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4472 {
4473 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4474 {
4475 exit_phi = USE_STMT (use_p);
4476 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4477 }
4478 }
4479 }
4480
4481 /* Handle stmts whose DEF is used outside the loop-nest that is
4482 being vectorized. */
4483 if (STMT_VINFO_LIVE_P (stmt_info)
4484 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4485 {
4486 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4487 gcc_assert (done);
4488 }
4489
4490 if (vec_stmt)
4491 {
4492 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4493 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4494 if (orig_stmt_in_pattern)
4495 {
4496 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4497 /* STMT was inserted by the vectorizer to replace a computation idiom.
b8698a0f
L
4498 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4499 computed this idiom. We need to record a pointer to VEC_STMT in
4500 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
ebfd146a
IR
4501 documentation of vect_pattern_recog. */
4502 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4503 {
4504 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4505 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4506 }
4507 }
4508 }
4509
b8698a0f 4510 return is_store;
ebfd146a
IR
4511}
4512
4513
b8698a0f 4514/* Remove a group of stores (for SLP or interleaving), free their
ebfd146a
IR
4515 stmt_vec_info. */
4516
4517void
4518vect_remove_stores (gimple first_stmt)
4519{
4520 gimple next = first_stmt;
4521 gimple tmp;
4522 gimple_stmt_iterator next_si;
4523
4524 while (next)
4525 {
4526 /* Free the attached stmt_vec_info and remove the stmt. */
4527 next_si = gsi_for_stmt (next);
4528 gsi_remove (&next_si, true);
4529 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4530 free_stmt_vec_info (next);
4531 next = tmp;
4532 }
4533}
4534
4535
4536/* Function new_stmt_vec_info.
4537
4538 Create and initialize a new stmt_vec_info struct for STMT. */
4539
4540stmt_vec_info
b8698a0f 4541new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
a70d6342 4542 bb_vec_info bb_vinfo)
ebfd146a
IR
4543{
4544 stmt_vec_info res;
4545 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4546
4547 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4548 STMT_VINFO_STMT (res) = stmt;
4549 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
a70d6342 4550 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
8644a673 4551 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
ebfd146a
IR
4552 STMT_VINFO_LIVE_P (res) = false;
4553 STMT_VINFO_VECTYPE (res) = NULL;
4554 STMT_VINFO_VEC_STMT (res) = NULL;
4b5caab7 4555 STMT_VINFO_VECTORIZABLE (res) = true;
ebfd146a
IR
4556 STMT_VINFO_IN_PATTERN_P (res) = false;
4557 STMT_VINFO_RELATED_STMT (res) = NULL;
4558 STMT_VINFO_DATA_REF (res) = NULL;
4559
4560 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4561 STMT_VINFO_DR_OFFSET (res) = NULL;
4562 STMT_VINFO_DR_INIT (res) = NULL;
4563 STMT_VINFO_DR_STEP (res) = NULL;
4564 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4565
4566 if (gimple_code (stmt) == GIMPLE_PHI
4567 && is_loop_header_bb_p (gimple_bb (stmt)))
4568 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4569 else
8644a673
IR
4570 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4571
ebfd146a
IR
4572 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4573 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4574 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
32e8bb8e 4575 STMT_SLP_TYPE (res) = loop_vect;
ebfd146a
IR
4576 DR_GROUP_FIRST_DR (res) = NULL;
4577 DR_GROUP_NEXT_DR (res) = NULL;
4578 DR_GROUP_SIZE (res) = 0;
4579 DR_GROUP_STORE_COUNT (res) = 0;
4580 DR_GROUP_GAP (res) = 0;
4581 DR_GROUP_SAME_DR_STMT (res) = NULL;
4582 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4583
4584 return res;
4585}
4586
4587
4588/* Create a hash table for stmt_vec_info. */
4589
4590void
4591init_stmt_vec_info_vec (void)
4592{
4593 gcc_assert (!stmt_vec_info_vec);
4594 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4595}
4596
4597
4598/* Free hash table for stmt_vec_info. */
4599
4600void
4601free_stmt_vec_info_vec (void)
4602{
4603 gcc_assert (stmt_vec_info_vec);
4604 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4605}
4606
4607
4608/* Free stmt vectorization related info. */
4609
4610void
4611free_stmt_vec_info (gimple stmt)
4612{
4613 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4614
4615 if (!stmt_info)
4616 return;
4617
4618 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4619 set_vinfo_for_stmt (stmt, NULL);
4620 free (stmt_info);
4621}
4622
4623
4624/* Function get_vectype_for_scalar_type.
4625
4626 Returns the vector type corresponding to SCALAR_TYPE as supported
4627 by the target. */
4628
4629tree
4630get_vectype_for_scalar_type (tree scalar_type)
4631{
4632 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
2f816591 4633 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
ebfd146a
IR
4634 int nunits;
4635 tree vectype;
4636
4637 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4638 return NULL_TREE;
4639
2f816591
RG
4640 /* We can't build a vector type of elements with alignment bigger than
4641 their size. */
4642 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4643 return NULL_TREE;
4644
6d7971b8
RG
4645 /* If we'd build a vector type of elements whose mode precision doesn't
4646 match their types precision we'll get mismatched types on vector
4647 extracts via BIT_FIELD_REFs. This effectively means we disable
4648 vectorization of bool and/or enum types in some languages. */
4649 if (INTEGRAL_TYPE_P (scalar_type)
4650 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4651 return NULL_TREE;
4652
ebfd146a
IR
4653 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4654 is expected. */
4655 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4656
4657 vectype = build_vector_type (scalar_type, nunits);
4658 if (vect_print_dump_info (REPORT_DETAILS))
4659 {
4660 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4661 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4662 }
4663
4664 if (!vectype)
4665 return NULL_TREE;
4666
4667 if (vect_print_dump_info (REPORT_DETAILS))
4668 {
4669 fprintf (vect_dump, "vectype: ");
4670 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4671 }
4672
4673 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4674 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4675 {
4676 if (vect_print_dump_info (REPORT_DETAILS))
4677 fprintf (vect_dump, "mode not supported by target.");
4678 return NULL_TREE;
4679 }
4680
4681 return vectype;
4682}
4683
b690cc0f
RG
4684/* Function get_same_sized_vectype
4685
4686 Returns a vector type corresponding to SCALAR_TYPE of size
4687 VECTOR_TYPE if supported by the target. */
4688
4689tree
4690get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4691{
4692 return get_vectype_for_scalar_type (scalar_type);
4693}
4694
ebfd146a
IR
4695/* Function vect_is_simple_use.
4696
4697 Input:
a70d6342
IR
4698 LOOP_VINFO - the vect info of the loop that is being vectorized.
4699 BB_VINFO - the vect info of the basic block that is being vectorized.
4700 OPERAND - operand of a stmt in the loop or bb.
ebfd146a
IR
4701 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4702
4703 Returns whether a stmt with OPERAND can be vectorized.
b8698a0f
L
4704 For loops, supportable operands are constants, loop invariants, and operands
4705 that are defined by the current iteration of the loop. Unsupportable
4706 operands are those that are defined by a previous iteration of the loop (as
a70d6342
IR
4707 is the case in reduction/induction computations).
4708 For basic blocks, supportable operands are constants and bb invariants.
4709 For now, operands defined outside the basic block are not supported. */
ebfd146a
IR
4710
4711bool
b8698a0f 4712vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
a70d6342 4713 bb_vec_info bb_vinfo, gimple *def_stmt,
ebfd146a 4714 tree *def, enum vect_def_type *dt)
b8698a0f 4715{
ebfd146a
IR
4716 basic_block bb;
4717 stmt_vec_info stmt_vinfo;
a70d6342 4718 struct loop *loop = NULL;
b8698a0f 4719
a70d6342
IR
4720 if (loop_vinfo)
4721 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
4722
4723 *def_stmt = NULL;
4724 *def = NULL_TREE;
b8698a0f 4725
ebfd146a
IR
4726 if (vect_print_dump_info (REPORT_DETAILS))
4727 {
4728 fprintf (vect_dump, "vect_is_simple_use: operand ");
4729 print_generic_expr (vect_dump, operand, TDF_SLIM);
4730 }
b8698a0f 4731
ebfd146a
IR
4732 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4733 {
4734 *dt = vect_constant_def;
4735 return true;
4736 }
b8698a0f 4737
ebfd146a
IR
4738 if (is_gimple_min_invariant (operand))
4739 {
4740 *def = operand;
8644a673 4741 *dt = vect_external_def;
ebfd146a
IR
4742 return true;
4743 }
4744
4745 if (TREE_CODE (operand) == PAREN_EXPR)
4746 {
4747 if (vect_print_dump_info (REPORT_DETAILS))
4748 fprintf (vect_dump, "non-associatable copy.");
4749 operand = TREE_OPERAND (operand, 0);
4750 }
b8698a0f 4751
ebfd146a
IR
4752 if (TREE_CODE (operand) != SSA_NAME)
4753 {
4754 if (vect_print_dump_info (REPORT_DETAILS))
4755 fprintf (vect_dump, "not ssa-name.");
4756 return false;
4757 }
b8698a0f 4758
ebfd146a
IR
4759 *def_stmt = SSA_NAME_DEF_STMT (operand);
4760 if (*def_stmt == NULL)
4761 {
4762 if (vect_print_dump_info (REPORT_DETAILS))
4763 fprintf (vect_dump, "no def_stmt.");
4764 return false;
4765 }
4766
4767 if (vect_print_dump_info (REPORT_DETAILS))
4768 {
4769 fprintf (vect_dump, "def_stmt: ");
4770 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4771 }
4772
8644a673 4773 /* Empty stmt is expected only in case of a function argument.
ebfd146a
IR
4774 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4775 if (gimple_nop_p (*def_stmt))
4776 {
4777 *def = operand;
8644a673 4778 *dt = vect_external_def;
ebfd146a
IR
4779 return true;
4780 }
4781
4782 bb = gimple_bb (*def_stmt);
a70d6342
IR
4783
4784 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4785 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
b8698a0f 4786 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
8644a673 4787 *dt = vect_external_def;
ebfd146a
IR
4788 else
4789 {
4790 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4791 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4792 }
4793
4794 if (*dt == vect_unknown_def_type)
4795 {
4796 if (vect_print_dump_info (REPORT_DETAILS))
4797 fprintf (vect_dump, "Unsupported pattern.");
4798 return false;
4799 }
4800
4801 if (vect_print_dump_info (REPORT_DETAILS))
4802 fprintf (vect_dump, "type of def: %d.",*dt);
4803
4804 switch (gimple_code (*def_stmt))
4805 {
4806 case GIMPLE_PHI:
4807 *def = gimple_phi_result (*def_stmt);
4808 break;
4809
4810 case GIMPLE_ASSIGN:
4811 *def = gimple_assign_lhs (*def_stmt);
4812 break;
4813
4814 case GIMPLE_CALL:
4815 *def = gimple_call_lhs (*def_stmt);
4816 if (*def != NULL)
4817 break;
4818 /* FALLTHRU */
4819 default:
4820 if (vect_print_dump_info (REPORT_DETAILS))
4821 fprintf (vect_dump, "unsupported defining stmt: ");
4822 return false;
4823 }
4824
4825 return true;
4826}
4827
b690cc0f
RG
4828/* Function vect_is_simple_use_1.
4829
4830 Same as vect_is_simple_use_1 but also determines the vector operand
4831 type of OPERAND and stores it to *VECTYPE. If the definition of
4832 OPERAND is vect_uninitialized_def, vect_constant_def or
4833 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4834 is responsible to compute the best suited vector type for the
4835 scalar operand. */
4836
4837bool
4838vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4839 bb_vec_info bb_vinfo, gimple *def_stmt,
4840 tree *def, enum vect_def_type *dt, tree *vectype)
4841{
4842 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4843 return false;
4844
4845 /* Now get a vector type if the def is internal, otherwise supply
4846 NULL_TREE and leave it up to the caller to figure out a proper
4847 type for the use stmt. */
4848 if (*dt == vect_internal_def
4849 || *dt == vect_induction_def
4850 || *dt == vect_reduction_def
4851 || *dt == vect_double_reduction_def
4852 || *dt == vect_nested_cycle)
4853 {
4854 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4855 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4856 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4857 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4858 gcc_assert (*vectype != NULL_TREE);
4859 }
4860 else if (*dt == vect_uninitialized_def
4861 || *dt == vect_constant_def
4862 || *dt == vect_external_def)
4863 *vectype = NULL_TREE;
4864 else
4865 gcc_unreachable ();
4866
4867 return true;
4868}
4869
ebfd146a
IR
4870
4871/* Function supportable_widening_operation
4872
b8698a0f
L
4873 Check whether an operation represented by the code CODE is a
4874 widening operation that is supported by the target platform in
b690cc0f
RG
4875 vector form (i.e., when operating on arguments of type VECTYPE_IN
4876 producing a result of type VECTYPE_OUT).
b8698a0f 4877
ebfd146a
IR
4878 Widening operations we currently support are NOP (CONVERT), FLOAT
4879 and WIDEN_MULT. This function checks if these operations are supported
4880 by the target platform either directly (via vector tree-codes), or via
4881 target builtins.
4882
4883 Output:
b8698a0f
L
4884 - CODE1 and CODE2 are codes of vector operations to be used when
4885 vectorizing the operation, if available.
ebfd146a
IR
4886 - DECL1 and DECL2 are decls of target builtin functions to be used
4887 when vectorizing the operation, if available. In this case,
b8698a0f 4888 CODE1 and CODE2 are CALL_EXPR.
ebfd146a
IR
4889 - MULTI_STEP_CVT determines the number of required intermediate steps in
4890 case of multi-step conversion (like char->short->int - in that case
4891 MULTI_STEP_CVT will be 1).
b8698a0f
L
4892 - INTERM_TYPES contains the intermediate type required to perform the
4893 widening operation (short in the above example). */
ebfd146a
IR
4894
4895bool
b690cc0f
RG
4896supportable_widening_operation (enum tree_code code, gimple stmt,
4897 tree vectype_out, tree vectype_in,
ebfd146a
IR
4898 tree *decl1, tree *decl2,
4899 enum tree_code *code1, enum tree_code *code2,
4900 int *multi_step_cvt,
4901 VEC (tree, heap) **interm_types)
4902{
4903 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4904 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4905 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4906 bool ordered_p;
4907 enum machine_mode vec_mode;
81f40b79 4908 enum insn_code icode1, icode2;
ebfd146a 4909 optab optab1, optab2;
b690cc0f
RG
4910 tree vectype = vectype_in;
4911 tree wide_vectype = vectype_out;
ebfd146a
IR
4912 enum tree_code c1, c2;
4913
4914 /* The result of a vectorized widening operation usually requires two vectors
b8698a0f
L
4915 (because the widened results do not fit int one vector). The generated
4916 vector results would normally be expected to be generated in the same
ebfd146a
IR
4917 order as in the original scalar computation, i.e. if 8 results are
4918 generated in each vector iteration, they are to be organized as follows:
b8698a0f 4919 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
ebfd146a 4920
b8698a0f 4921 However, in the special case that the result of the widening operation is
ebfd146a 4922 used in a reduction computation only, the order doesn't matter (because
b8698a0f 4923 when vectorizing a reduction we change the order of the computation).
ebfd146a
IR
4924 Some targets can take advantage of this and generate more efficient code.
4925 For example, targets like Altivec, that support widen_mult using a sequence
4926 of {mult_even,mult_odd} generate the following vectors:
4927 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4928
4929 When vectorizing outer-loops, we execute the inner-loop sequentially
b8698a0f
L
4930 (each vectorized inner-loop iteration contributes to VF outer-loop
4931 iterations in parallel). We therefore don't allow to change the order
ebfd146a
IR
4932 of the computation in the inner-loop during outer-loop vectorization. */
4933
4934 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4935 && !nested_in_vect_loop_p (vect_loop, stmt))
4936 ordered_p = false;
4937 else
4938 ordered_p = true;
4939
4940 if (!ordered_p
4941 && code == WIDEN_MULT_EXPR
4942 && targetm.vectorize.builtin_mul_widen_even
4943 && targetm.vectorize.builtin_mul_widen_even (vectype)
4944 && targetm.vectorize.builtin_mul_widen_odd
4945 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4946 {
4947 if (vect_print_dump_info (REPORT_DETAILS))
4948 fprintf (vect_dump, "Unordered widening operation detected.");
4949
4950 *code1 = *code2 = CALL_EXPR;
4951 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4952 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4953 return true;
4954 }
4955
4956 switch (code)
4957 {
4958 case WIDEN_MULT_EXPR:
4959 if (BYTES_BIG_ENDIAN)
4960 {
4961 c1 = VEC_WIDEN_MULT_HI_EXPR;
4962 c2 = VEC_WIDEN_MULT_LO_EXPR;
4963 }
4964 else
4965 {
4966 c2 = VEC_WIDEN_MULT_HI_EXPR;
4967 c1 = VEC_WIDEN_MULT_LO_EXPR;
4968 }
4969 break;
4970
4971 CASE_CONVERT:
4972 if (BYTES_BIG_ENDIAN)
4973 {
4974 c1 = VEC_UNPACK_HI_EXPR;
4975 c2 = VEC_UNPACK_LO_EXPR;
4976 }
4977 else
4978 {
4979 c2 = VEC_UNPACK_HI_EXPR;
4980 c1 = VEC_UNPACK_LO_EXPR;
4981 }
4982 break;
4983
4984 case FLOAT_EXPR:
4985 if (BYTES_BIG_ENDIAN)
4986 {
4987 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4988 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4989 }
4990 else
4991 {
4992 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4993 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4994 }
4995 break;
4996
4997 case FIX_TRUNC_EXPR:
4998 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4999 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5000 computing the operation. */
5001 return false;
5002
5003 default:
5004 gcc_unreachable ();
5005 }
5006
5007 if (code == FIX_TRUNC_EXPR)
5008 {
5009 /* The signedness is determined from output operand. */
b690cc0f
RG
5010 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5011 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
ebfd146a
IR
5012 }
5013 else
5014 {
5015 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5016 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5017 }
5018
5019 if (!optab1 || !optab2)
5020 return false;
5021
5022 vec_mode = TYPE_MODE (vectype);
947131ba
RS
5023 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5024 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
5025 return false;
5026
b8698a0f 5027 /* Check if it's a multi-step conversion that can be done using intermediate
ebfd146a
IR
5028 types. */
5029 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5030 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5031 {
5032 int i;
5033 tree prev_type = vectype, intermediate_type;
5034 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5035 optab optab3, optab4;
5036
5037 if (!CONVERT_EXPR_CODE_P (code))
5038 return false;
b8698a0f 5039
ebfd146a
IR
5040 *code1 = c1;
5041 *code2 = c2;
b8698a0f 5042
ebfd146a
IR
5043 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5044 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5045 to get to NARROW_VECTYPE, and fail if we do not. */
5046 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5047 for (i = 0; i < 3; i++)
5048 {
5049 intermediate_mode = insn_data[icode1].operand[0].mode;
5050 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5051 TYPE_UNSIGNED (prev_type));
5052 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5053 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5054
5055 if (!optab3 || !optab4
947131ba
RS
5056 || ((icode1 = optab_handler (optab1, prev_mode))
5057 == CODE_FOR_nothing)
ebfd146a 5058 || insn_data[icode1].operand[0].mode != intermediate_mode
947131ba
RS
5059 || ((icode2 = optab_handler (optab2, prev_mode))
5060 == CODE_FOR_nothing)
ebfd146a 5061 || insn_data[icode2].operand[0].mode != intermediate_mode
947131ba
RS
5062 || ((icode1 = optab_handler (optab3, intermediate_mode))
5063 == CODE_FOR_nothing)
5064 || ((icode2 = optab_handler (optab4, intermediate_mode))
5065 == CODE_FOR_nothing))
ebfd146a
IR
5066 return false;
5067
5068 VEC_quick_push (tree, *interm_types, intermediate_type);
5069 (*multi_step_cvt)++;
5070
5071 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5072 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5073 return true;
5074
5075 prev_type = intermediate_type;
5076 prev_mode = intermediate_mode;
5077 }
5078
5079 return false;
5080 }
5081
5082 *code1 = c1;
5083 *code2 = c2;
5084 return true;
5085}
5086
5087
5088/* Function supportable_narrowing_operation
5089
b8698a0f
L
5090 Check whether an operation represented by the code CODE is a
5091 narrowing operation that is supported by the target platform in
b690cc0f
RG
5092 vector form (i.e., when operating on arguments of type VECTYPE_IN
5093 and producing a result of type VECTYPE_OUT).
b8698a0f 5094
ebfd146a
IR
5095 Narrowing operations we currently support are NOP (CONVERT) and
5096 FIX_TRUNC. This function checks if these operations are supported by
5097 the target platform directly via vector tree-codes.
5098
5099 Output:
b8698a0f
L
5100 - CODE1 is the code of a vector operation to be used when
5101 vectorizing the operation, if available.
ebfd146a
IR
5102 - MULTI_STEP_CVT determines the number of required intermediate steps in
5103 case of multi-step conversion (like int->short->char - in that case
5104 MULTI_STEP_CVT will be 1).
5105 - INTERM_TYPES contains the intermediate type required to perform the
b8698a0f 5106 narrowing operation (short in the above example). */
ebfd146a
IR
5107
5108bool
5109supportable_narrowing_operation (enum tree_code code,
b690cc0f 5110 tree vectype_out, tree vectype_in,
ebfd146a
IR
5111 enum tree_code *code1, int *multi_step_cvt,
5112 VEC (tree, heap) **interm_types)
5113{
5114 enum machine_mode vec_mode;
5115 enum insn_code icode1;
5116 optab optab1, interm_optab;
b690cc0f
RG
5117 tree vectype = vectype_in;
5118 tree narrow_vectype = vectype_out;
ebfd146a
IR
5119 enum tree_code c1;
5120 tree intermediate_type, prev_type;
5121 int i;
5122
5123 switch (code)
5124 {
5125 CASE_CONVERT:
5126 c1 = VEC_PACK_TRUNC_EXPR;
5127 break;
5128
5129 case FIX_TRUNC_EXPR:
5130 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5131 break;
5132
5133 case FLOAT_EXPR:
5134 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5135 tree code and optabs used for computing the operation. */
5136 return false;
5137
5138 default:
5139 gcc_unreachable ();
5140 }
5141
5142 if (code == FIX_TRUNC_EXPR)
5143 /* The signedness is determined from output operand. */
b690cc0f 5144 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
ebfd146a
IR
5145 else
5146 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5147
5148 if (!optab1)
5149 return false;
5150
5151 vec_mode = TYPE_MODE (vectype);
947131ba 5152 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
ebfd146a
IR
5153 return false;
5154
5155 /* Check if it's a multi-step conversion that can be done using intermediate
5156 types. */
5157 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5158 {
5159 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5160
5161 *code1 = c1;
5162 prev_type = vectype;
5163 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5164 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5165 to get to NARROW_VECTYPE, and fail if we do not. */
5166 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5167 for (i = 0; i < 3; i++)
5168 {
5169 intermediate_mode = insn_data[icode1].operand[0].mode;
5170 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5171 TYPE_UNSIGNED (prev_type));
b8698a0f 5172 interm_optab = optab_for_tree_code (c1, intermediate_type,
ebfd146a 5173 optab_default);
b8698a0f 5174 if (!interm_optab
947131ba
RS
5175 || ((icode1 = optab_handler (optab1, prev_mode))
5176 == CODE_FOR_nothing)
ebfd146a 5177 || insn_data[icode1].operand[0].mode != intermediate_mode
947131ba
RS
5178 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5179 == CODE_FOR_nothing))
ebfd146a
IR
5180 return false;
5181
5182 VEC_quick_push (tree, *interm_types, intermediate_type);
5183 (*multi_step_cvt)++;
5184
5185 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5186 return true;
5187
5188 prev_type = intermediate_type;
5189 prev_mode = intermediate_mode;
5190 }
5191
5192 return false;
5193 }
5194
5195 *code1 = c1;
5196 return true;
5197}