]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vectorizer.c
[6/6] Preprocessor forced macro location
[thirdparty/gcc.git] / gcc / tree-vectorizer.c
CommitLineData
ebfd146a 1/* Vectorizer
85ec4feb 2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
b8698a0f 3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
79fe1b3b
DN
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
79fe1b3b
DN
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
79fe1b3b 20
ebfd146a 21/* Loop and basic block vectorizer.
7ccf35ed 22
b8698a0f
L
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
ebfd146a
IR
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
b8698a0f 28
ebfd146a 29 The rest of the vectorizer's code is organized as follows:
b8698a0f
L
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
ebfd146a
IR
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
b8698a0f 37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
ebfd146a
IR
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
40
41 Here's a poor attempt at illustrating that:
42
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
55*/
89d67cca 56
ebfd146a
IR
57#include "config.h"
58#include "system.h"
59#include "coretypes.h"
c7131fb2 60#include "backend.h"
ebfd146a 61#include "tree.h"
c7131fb2 62#include "gimple.h"
957060b5
AM
63#include "predict.h"
64#include "tree-pass.h"
c7131fb2 65#include "ssa.h"
957060b5 66#include "cgraph.h"
40e23961 67#include "fold-const.h"
d8a2d370 68#include "stor-layout.h"
5be5c238
AM
69#include "gimple-iterator.h"
70#include "gimple-walk.h"
e28030cf 71#include "tree-ssa-loop-manip.h"
01d32b2b 72#include "tree-ssa-loop-niter.h"
5ce9450f 73#include "tree-cfg.h"
ebfd146a 74#include "cfgloop.h"
ebfd146a 75#include "tree-vectorizer.h"
74bf76ed 76#include "tree-ssa-propagate.h"
c716e67f 77#include "dbgcnt.h"
e5d8bd8c 78#include "tree-scalar-evolution.h"
314e6352
ML
79#include "stringpool.h"
80#include "attribs.h"
68435eb2 81#include "gimple-pretty-print.h"
f4ebbd24 82#include "opt-problem.h"
41241199 83#include "internal-fn.h"
e5d8bd8c 84
89d67cca 85
4f5b9c80
DM
86/* Loop or bb location, with hotness information. */
87dump_user_location_t vect_location;
ad2dd72a 88
68435eb2
RB
89/* Dump a cost entry according to args to F. */
90
91void
92dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
269ba950 93 stmt_vec_info stmt_info, int misalign, unsigned cost,
68435eb2
RB
94 enum vect_cost_model_location where)
95{
96 fprintf (f, "%p ", data);
97 if (stmt_info)
98 {
99 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
100 fprintf (f, " ");
101 }
102 else
103 fprintf (f, "<unknown> ");
104 fprintf (f, "%d times ", count);
105 const char *ks = "unknown";
106 switch (kind)
107 {
108 case scalar_stmt:
109 ks = "scalar_stmt";
110 break;
111 case scalar_load:
112 ks = "scalar_load";
113 break;
114 case scalar_store:
115 ks = "scalar_store";
116 break;
117 case vector_stmt:
118 ks = "vector_stmt";
119 break;
120 case vector_load:
121 ks = "vector_load";
122 break;
123 case vector_gather_load:
124 ks = "vector_gather_load";
125 break;
126 case unaligned_load:
127 ks = "unaligned_load";
128 break;
129 case unaligned_store:
130 ks = "unaligned_store";
131 break;
132 case vector_store:
c885142a 133 ks = "vector_store";
68435eb2
RB
134 break;
135 case vector_scatter_store:
c885142a 136 ks = "vector_scatter_store";
68435eb2
RB
137 break;
138 case vec_to_scalar:
c885142a 139 ks = "vec_to_scalar";
68435eb2
RB
140 break;
141 case scalar_to_vec:
c885142a 142 ks = "scalar_to_vec";
68435eb2
RB
143 break;
144 case cond_branch_not_taken:
c885142a 145 ks = "cond_branch_not_taken";
68435eb2
RB
146 break;
147 case cond_branch_taken:
c885142a 148 ks = "cond_branch_taken";
68435eb2
RB
149 break;
150 case vec_perm:
c885142a 151 ks = "vec_perm";
68435eb2
RB
152 break;
153 case vec_promote_demote:
c885142a 154 ks = "vec_promote_demote";
68435eb2
RB
155 break;
156 case vec_construct:
c885142a 157 ks = "vec_construct";
68435eb2
RB
158 break;
159 }
160 fprintf (f, "%s ", ks);
161 if (kind == unaligned_load || kind == unaligned_store)
162 fprintf (f, "(misalign %d) ", misalign);
269ba950 163 fprintf (f, "costs %u ", cost);
68435eb2
RB
164 const char *ws = "unknown";
165 switch (where)
166 {
167 case vect_prologue:
168 ws = "prologue";
169 break;
170 case vect_body:
171 ws = "body";
172 break;
173 case vect_epilogue:
174 ws = "epilogue";
175 break;
176 }
177 fprintf (f, "in %s\n", ws);
178}
74bf76ed
JJ
179\f
180/* For mapping simduid to vectorization factor. */
181
95fbe13e 182struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
74bf76ed
JJ
183{
184 unsigned int simduid;
d9f21f6a 185 poly_uint64 vf;
74bf76ed
JJ
186
187 /* hash_table support. */
67f58944
TS
188 static inline hashval_t hash (const simduid_to_vf *);
189 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
74bf76ed
JJ
190};
191
192inline hashval_t
67f58944 193simduid_to_vf::hash (const simduid_to_vf *p)
74bf76ed
JJ
194{
195 return p->simduid;
196}
197
198inline int
67f58944 199simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
74bf76ed
JJ
200{
201 return p1->simduid == p2->simduid;
202}
203
204/* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
206
207 _7 = GOMP_SIMD_LANE (simduid.0)
208 ...
209 ...
210 D.1737[_7] = stuff;
211
212
acf0174b
JJ
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
214 simduid.0. */
74bf76ed 215
95fbe13e 216struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
74bf76ed
JJ
217{
218 tree decl;
219 unsigned int simduid;
220
221 /* hash_table support. */
67f58944
TS
222 static inline hashval_t hash (const simd_array_to_simduid *);
223 static inline int equal (const simd_array_to_simduid *,
224 const simd_array_to_simduid *);
74bf76ed
JJ
225};
226
227inline hashval_t
67f58944 228simd_array_to_simduid::hash (const simd_array_to_simduid *p)
74bf76ed
JJ
229{
230 return DECL_UID (p->decl);
231}
232
233inline int
67f58944
TS
234simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
235 const simd_array_to_simduid *p2)
74bf76ed
JJ
236{
237 return p1->decl == p2->decl;
238}
239
d9a6bd32
JJ
240/* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
74bf76ed
JJ
243
244static void
8c8b9f32 245adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
74bf76ed
JJ
246{
247 basic_block bb;
248
11cd3bed 249 FOR_EACH_BB_FN (bb, cfun)
74bf76ed
JJ
250 {
251 gimple_stmt_iterator i;
252
d9a6bd32 253 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
74bf76ed 254 {
d9f21f6a 255 poly_uint64 vf = 1;
74bf76ed 256 enum internal_fn ifn;
355fe088 257 gimple *stmt = gsi_stmt (i);
74bf76ed
JJ
258 tree t;
259 if (!is_gimple_call (stmt)
260 || !gimple_call_internal_p (stmt))
d9a6bd32
JJ
261 {
262 gsi_next (&i);
263 continue;
264 }
74bf76ed
JJ
265 ifn = gimple_call_internal_fn (stmt);
266 switch (ifn)
267 {
268 case IFN_GOMP_SIMD_LANE:
269 case IFN_GOMP_SIMD_VF:
270 case IFN_GOMP_SIMD_LAST_LANE:
271 break;
d9a6bd32
JJ
272 case IFN_GOMP_SIMD_ORDERED_START:
273 case IFN_GOMP_SIMD_ORDERED_END:
e4606348
JJ
274 if (integer_onep (gimple_call_arg (stmt, 0)))
275 {
276 enum built_in_function bcode
277 = (ifn == IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END);
280 gimple *g
281 = gimple_build_call (builtin_decl_explicit (bcode), 0);
282 tree vdef = gimple_vdef (stmt);
283 gimple_set_vdef (g, vdef);
284 SSA_NAME_DEF_STMT (vdef) = g;
285 gimple_set_vuse (g, gimple_vuse (stmt));
286 gsi_replace (&i, g, true);
287 continue;
288 }
d9a6bd32
JJ
289 gsi_remove (&i, true);
290 unlink_stmt_vdef (stmt);
291 continue;
74bf76ed 292 default:
d9a6bd32 293 gsi_next (&i);
74bf76ed
JJ
294 continue;
295 }
296 tree arg = gimple_call_arg (stmt, 0);
297 gcc_assert (arg != NULL_TREE);
298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
299 simduid_to_vf *p = NULL, data;
300 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
677ef4dd
YR
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb->loop_father && bb->loop_father->safelen > 0)
304 bb->loop_father->safelen = 0;
8c8b9f32
JJ
305 if (htab)
306 {
307 p = htab->find (&data);
308 if (p)
309 vf = p->vf;
310 }
74bf76ed
JJ
311 switch (ifn)
312 {
313 case IFN_GOMP_SIMD_VF:
314 t = build_int_cst (unsigned_type_node, vf);
315 break;
316 case IFN_GOMP_SIMD_LANE:
317 t = build_int_cst (unsigned_type_node, 0);
318 break;
319 case IFN_GOMP_SIMD_LAST_LANE:
320 t = gimple_call_arg (stmt, 1);
321 break;
322 default:
323 gcc_unreachable ();
324 }
f408a635
RB
325 tree lhs = gimple_call_lhs (stmt);
326 if (lhs)
327 replace_uses_by (lhs, t);
328 release_defs (stmt);
329 gsi_remove (&i, true);
74bf76ed
JJ
330 }
331 }
332}
89d67cca 333
74bf76ed
JJ
334/* Helper structure for note_simd_array_uses. */
335
336struct note_simd_array_uses_struct
337{
c203e8a7 338 hash_table<simd_array_to_simduid> **htab;
74bf76ed
JJ
339 unsigned int simduid;
340};
341
342/* Callback for note_simd_array_uses, called through walk_gimple_op. */
343
344static tree
345note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
346{
347 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
348 struct note_simd_array_uses_struct *ns
349 = (struct note_simd_array_uses_struct *) wi->info;
350
351 if (TYPE_P (*tp))
352 *walk_subtrees = 0;
353 else if (VAR_P (*tp)
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
355 && DECL_CONTEXT (*tp) == current_function_decl)
356 {
357 simd_array_to_simduid data;
c203e8a7
TS
358 if (!*ns->htab)
359 *ns->htab = new hash_table<simd_array_to_simduid> (15);
74bf76ed
JJ
360 data.decl = *tp;
361 data.simduid = ns->simduid;
c203e8a7 362 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
74bf76ed
JJ
363 if (*slot == NULL)
364 {
365 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
366 *p = data;
367 *slot = p;
368 }
369 else if ((*slot)->simduid != ns->simduid)
370 (*slot)->simduid = -1U;
371 *walk_subtrees = 0;
372 }
373 return NULL_TREE;
374}
375
376/* Find "omp simd array" temporaries and map them to corresponding
377 simduid. */
378
379static void
c203e8a7 380note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
74bf76ed
JJ
381{
382 basic_block bb;
383 gimple_stmt_iterator gsi;
384 struct walk_stmt_info wi;
385 struct note_simd_array_uses_struct ns;
386
387 memset (&wi, 0, sizeof (wi));
388 wi.info = &ns;
389 ns.htab = htab;
390
11cd3bed 391 FOR_EACH_BB_FN (bb, cfun)
74bf76ed
JJ
392 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
393 {
355fe088 394 gimple *stmt = gsi_stmt (gsi);
74bf76ed
JJ
395 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
396 continue;
397 switch (gimple_call_internal_fn (stmt))
398 {
399 case IFN_GOMP_SIMD_LANE:
400 case IFN_GOMP_SIMD_VF:
401 case IFN_GOMP_SIMD_LAST_LANE:
402 break;
403 default:
404 continue;
405 }
406 tree lhs = gimple_call_lhs (stmt);
407 if (lhs == NULL_TREE)
408 continue;
409 imm_use_iterator use_iter;
355fe088 410 gimple *use_stmt;
74bf76ed
JJ
411 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
413 if (!is_gimple_debug (use_stmt))
414 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
415 }
416}
8c8b9f32
JJ
417
418/* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
420
421static void
422shrink_simd_arrays
423 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
424 hash_table<simduid_to_vf> *simduid_to_vf_htab)
425{
426 for (hash_table<simd_array_to_simduid>::iterator iter
427 = simd_array_to_simduid_htab->begin ();
428 iter != simd_array_to_simduid_htab->end (); ++iter)
429 if ((*iter)->simduid != -1U)
430 {
431 tree decl = (*iter)->decl;
d9f21f6a 432 poly_uint64 vf = 1;
8c8b9f32
JJ
433 if (simduid_to_vf_htab)
434 {
435 simduid_to_vf *p = NULL, data;
436 data.simduid = (*iter)->simduid;
437 p = simduid_to_vf_htab->find (&data);
438 if (p)
439 vf = p->vf;
440 }
441 tree atype
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
443 TREE_TYPE (decl) = atype;
444 relayout_decl (decl);
445 }
446
447 delete simd_array_to_simduid_htab;
448}
ebfd146a 449\f
2c515559
RS
450/* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
452
ca823c85
RB
453vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
454 vec_info_shared *shared_)
2c515559 455 : kind (kind_in),
ca823c85 456 shared (shared_),
2c515559
RS
457 target_cost_data (target_cost_data_in)
458{
f8c0baaf 459 stmt_vec_infos.create (50);
2c515559 460}
c716e67f 461
2c515559 462vec_info::~vec_info ()
c716e67f 463{
2c515559 464 slp_instance instance;
c716e67f
XDL
465 unsigned int i;
466
2c515559 467 FOR_EACH_VEC_ELT (slp_instances, i, instance)
6e2dd807 468 vect_free_slp_instance (instance, true);
2c515559 469
2c515559 470 destroy_cost_data (target_cost_data);
6ef709e5 471 free_stmt_vec_infos ();
c716e67f
XDL
472}
473
ca823c85
RB
474vec_info_shared::vec_info_shared ()
475 : datarefs (vNULL),
476 datarefs_copy (vNULL),
477 ddrs (vNULL)
478{
479}
480
481vec_info_shared::~vec_info_shared ()
482{
483 free_data_refs (datarefs);
484 free_dependence_relations (ddrs);
485 datarefs_copy.release ();
486}
487
488void
489vec_info_shared::save_datarefs ()
490{
491 if (!flag_checking)
492 return;
493 datarefs_copy.reserve_exact (datarefs.length ());
494 for (unsigned i = 0; i < datarefs.length (); ++i)
495 datarefs_copy.quick_push (*datarefs[i]);
496}
497
498void
499vec_info_shared::check_datarefs ()
500{
501 if (!flag_checking)
502 return;
503 gcc_assert (datarefs.length () == datarefs_copy.length ());
504 for (unsigned i = 0; i < datarefs.length (); ++i)
505 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
506 gcc_unreachable ();
507}
508
4fbeb363
RS
509/* Record that STMT belongs to the vectorizable region. Create and return
510 an associated stmt_vec_info. */
511
512stmt_vec_info
513vec_info::add_stmt (gimple *stmt)
514{
6ef709e5 515 stmt_vec_info res = new_stmt_vec_info (stmt);
4fbeb363
RS
516 set_vinfo_for_stmt (stmt, res);
517 return res;
518}
519
6585ff8f
RS
520/* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
521 return null. It is safe to call this function on any statement, even if
522 it might not be part of the vectorizable region. */
523
524stmt_vec_info
525vec_info::lookup_stmt (gimple *stmt)
526{
527 unsigned int uid = gimple_uid (stmt);
528 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
529 {
530 stmt_vec_info res = stmt_vec_infos[uid - 1];
531 if (res && res->stmt == stmt)
532 return res;
533 }
534 return NULL;
535}
536
c98d0595
RS
537/* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
538 return that stmt_vec_info, otherwise return null. It is safe to call
539 this on arbitrary operands. */
540
541stmt_vec_info
542vec_info::lookup_def (tree name)
543{
544 if (TREE_CODE (name) == SSA_NAME
545 && !SSA_NAME_IS_DEFAULT_DEF (name))
546 return lookup_stmt (SSA_NAME_DEF_STMT (name));
547 return NULL;
548}
549
0d0a4e20
RS
550/* See whether there is a single non-debug statement that uses LHS and
551 whether that statement has an associated stmt_vec_info. Return the
552 stmt_vec_info if so, otherwise return null. */
553
554stmt_vec_info
555vec_info::lookup_single_use (tree lhs)
556{
557 use_operand_p dummy;
558 gimple *use_stmt;
559 if (single_imm_use (lhs, &dummy, &use_stmt))
560 return lookup_stmt (use_stmt);
561 return NULL;
562}
563
f5ae2856
RS
564/* Return vectorization information about DR. */
565
566dr_vec_info *
567vec_info::lookup_dr (data_reference *dr)
568{
569 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
570 /* DR_STMT should never refer to a stmt in a pattern replacement. */
571 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
572 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
573}
574
f44fb7aa
RS
575/* Record that NEW_STMT_INFO now implements the same data reference
576 as OLD_STMT_INFO. */
577
578void
579vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
580{
581 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
582 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
583 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
584 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
585 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
586 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
587 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
588}
589
b5b56c2a
RS
590/* Permanently remove the statement described by STMT_INFO from the
591 function. */
592
593void
594vec_info::remove_stmt (stmt_vec_info stmt_info)
595{
596 gcc_assert (!stmt_info->pattern_stmt_p);
458135c0 597 set_vinfo_for_stmt (stmt_info->stmt, NULL);
b5b56c2a
RS
598 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
599 unlink_stmt_vdef (stmt_info->stmt);
600 gsi_remove (&si, true);
601 release_defs (stmt_info->stmt);
602 free_stmt_vec_info (stmt_info);
603}
604
9d97912b
RS
605/* Replace the statement at GSI by NEW_STMT, both the vectorization
606 information and the function itself. STMT_INFO describes the statement
607 at GSI. */
608
609void
610vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
611 gimple *new_stmt)
612{
613 gimple *old_stmt = stmt_info->stmt;
614 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
615 set_vinfo_for_stmt (old_stmt, NULL);
616 set_vinfo_for_stmt (new_stmt, stmt_info);
617 stmt_info->stmt = new_stmt;
618 gsi_replace (gsi, new_stmt, true);
619}
620
6ef709e5
RS
621/* Create and initialize a new stmt_vec_info struct for STMT. */
622
623stmt_vec_info
624vec_info::new_stmt_vec_info (gimple *stmt)
625{
626 stmt_vec_info res = XCNEW (struct _stmt_vec_info);
627 res->vinfo = this;
628 res->stmt = stmt;
629
630 STMT_VINFO_TYPE (res) = undef_vec_info_type;
631 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
632 STMT_VINFO_VECTORIZABLE (res) = true;
633 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
634 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
635
636 if (gimple_code (stmt) == GIMPLE_PHI
637 && is_loop_header_bb_p (gimple_bb (stmt)))
638 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
639 else
640 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
641
642 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
643 STMT_SLP_TYPE (res) = loop_vect;
644
645 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
646 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
647
648 return res;
649}
650
651/* Associate STMT with INFO. */
652
653void
654vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
655{
656 unsigned int uid = gimple_uid (stmt);
657 if (uid == 0)
658 {
659 gcc_checking_assert (info);
660 uid = stmt_vec_infos.length () + 1;
661 gimple_set_uid (stmt, uid);
662 stmt_vec_infos.safe_push (info);
663 }
664 else
665 {
ddf98a96 666 gcc_checking_assert (info == NULL);
6ef709e5
RS
667 stmt_vec_infos[uid - 1] = info;
668 }
669}
670
671/* Free the contents of stmt_vec_infos. */
672
673void
674vec_info::free_stmt_vec_infos (void)
675{
676 unsigned int i;
677 stmt_vec_info info;
678 FOR_EACH_VEC_ELT (stmt_vec_infos, i, info)
ddf98a96 679 if (info != NULL)
6ef709e5
RS
680 free_stmt_vec_info (info);
681 stmt_vec_infos.release ();
682}
683
684/* Free STMT_INFO. */
685
686void
687vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
688{
689 if (stmt_info->pattern_stmt_p)
690 {
691 gimple_set_bb (stmt_info->stmt, NULL);
692 tree lhs = gimple_get_lhs (stmt_info->stmt);
693 if (lhs && TREE_CODE (lhs) == SSA_NAME)
694 release_ssa_name (lhs);
695 }
696
697 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
698 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
699 free (stmt_info);
700}
701
01d32b2b
BC
702/* A helper function to free scev and LOOP niter information, as well as
703 clear loop constraint LOOP_C_FINITE. */
704
705void
706vect_free_loop_info_assumptions (struct loop *loop)
707{
708 scev_reset_htab ();
709 /* We need to explicitly reset upper bound information since they are
adb7eaa2 710 used even after free_numbers_of_iterations_estimates. */
01d32b2b
BC
711 loop->any_upper_bound = false;
712 loop->any_likely_upper_bound = false;
adb7eaa2 713 free_numbers_of_iterations_estimates (loop);
01d32b2b
BC
714 loop_constraint_clear (loop, LOOP_C_FINITE);
715}
c716e67f 716
5ce9450f
JJ
717/* If LOOP has been versioned during ifcvt, return the internal call
718 guarding it. */
719
355fe088 720static gimple *
5ce9450f
JJ
721vect_loop_vectorized_call (struct loop *loop)
722{
723 basic_block bb = loop_preheader_edge (loop)->src;
355fe088 724 gimple *g;
5ce9450f
JJ
725 do
726 {
727 g = last_stmt (bb);
728 if (g)
729 break;
730 if (!single_pred_p (bb))
731 break;
732 bb = single_pred (bb);
733 }
734 while (1);
735 if (g && gimple_code (g) == GIMPLE_COND)
736 {
737 gimple_stmt_iterator gsi = gsi_for_stmt (g);
738 gsi_prev (&gsi);
739 if (!gsi_end_p (gsi))
740 {
741 g = gsi_stmt (gsi);
8e4284d0 742 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
5ce9450f
JJ
743 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
744 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
745 return g;
746 }
747 }
748 return NULL;
749}
750
542e7230
BC
751/* If LOOP has been versioned during loop distribution, return the gurading
752 internal call. */
753
754static gimple *
755vect_loop_dist_alias_call (struct loop *loop)
756{
757 basic_block bb;
758 basic_block entry;
759 struct loop *outer, *orig;
760 gimple_stmt_iterator gsi;
761 gimple *g;
762
763 if (loop->orig_loop_num == 0)
764 return NULL;
765
766 orig = get_loop (cfun, loop->orig_loop_num);
767 if (orig == NULL)
768 {
769 /* The original loop is somehow destroyed. Clear the information. */
770 loop->orig_loop_num = 0;
771 return NULL;
772 }
773
774 if (loop != orig)
775 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
776 else
777 bb = loop_preheader_edge (loop)->src;
778
779 outer = bb->loop_father;
780 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
781
782 /* Look upward in dominance tree. */
783 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
784 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
785 {
786 g = last_stmt (bb);
787 if (g == NULL || gimple_code (g) != GIMPLE_COND)
788 continue;
789
790 gsi = gsi_for_stmt (g);
791 gsi_prev (&gsi);
792 if (gsi_end_p (gsi))
793 continue;
794
795 g = gsi_stmt (gsi);
796 /* The guarding internal function call must have the same distribution
797 alias id. */
798 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
799 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
800 return g;
801 }
802 return NULL;
803}
804
2fc3c9a5
AK
805/* Set the uids of all the statements in basic blocks inside loop
806 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
807 call guarding the loop which has been if converted. */
808static void
355fe088 809set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
2fc3c9a5
AK
810{
811 tree arg = gimple_call_arg (loop_vectorized_call, 1);
812 basic_block *bbs;
813 unsigned int i;
814 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
815
816 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
cb330ba5 817 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
2fc3c9a5 818 == loop_vectorized_call);
cb330ba5
JJ
819 /* If we are going to vectorize outer loop, prevent vectorization
820 of the inner loop in the scalar loop - either the scalar loop is
821 thrown away, so it is a wasted work, or is used only for
822 a few iterations. */
823 if (scalar_loop->inner)
824 {
825 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
826 if (g)
827 {
828 arg = gimple_call_arg (g, 0);
829 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
542e7230 830 fold_loop_internal_call (g, boolean_false_node);
cb330ba5
JJ
831 }
832 }
2fc3c9a5
AK
833 bbs = get_loop_body (scalar_loop);
834 for (i = 0; i < scalar_loop->num_nodes; i++)
835 {
836 basic_block bb = bbs[i];
837 gimple_stmt_iterator gsi;
838 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
839 {
355fe088 840 gimple *phi = gsi_stmt (gsi);
2fc3c9a5
AK
841 gimple_set_uid (phi, 0);
842 }
843 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
844 {
355fe088 845 gimple *stmt = gsi_stmt (gsi);
2fc3c9a5
AK
846 gimple_set_uid (stmt, 0);
847 }
848 }
849 free (bbs);
850}
5ce9450f 851
5b04d77e
RB
852/* Try to vectorize LOOP. */
853
854static unsigned
855try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
856 unsigned *num_vectorized_loops,
857 loop_p loop, loop_vec_info orig_loop_vinfo,
858 gimple *loop_vectorized_call,
859 gimple *loop_dist_alias_call)
860{
861 unsigned ret = 0;
ca823c85 862 vec_info_shared shared;
5b04d77e 863 vect_location = find_loop_location (loop);
4f5b9c80 864 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
5b04d77e 865 && dump_enabled_p ())
f4ebbd24
DM
866 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
867 "\nAnalyzing loop at %s:%d\n",
4f5b9c80
DM
868 LOCATION_FILE (vect_location.get_location_t ()),
869 LOCATION_LINE (vect_location.get_location_t ()));
5b04d77e 870
f4ebbd24
DM
871 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
872 opt_loop_vec_info loop_vinfo
873 = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
5b04d77e
RB
874 loop->aux = loop_vinfo;
875
f4ebbd24
DM
876 if (!loop_vinfo)
877 if (dump_enabled_p ())
878 if (opt_problem *problem = loop_vinfo.get_problem ())
879 {
880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
881 "couldn't vectorize loop\n");
882 problem->emit_and_clear ();
883 }
884
5b04d77e
RB
885 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
886 {
887 /* Free existing information if loop is analyzed with some
888 assumptions. */
889 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
890 vect_free_loop_info_assumptions (loop);
891
892 /* If we applied if-conversion then try to vectorize the
893 BB of innermost loops.
894 ??? Ideally BB vectorization would learn to vectorize
895 control flow by applying if-conversion on-the-fly, the
896 following retains the if-converted loop body even when
897 only non-if-converted parts took part in BB vectorization. */
898 if (flag_tree_slp_vectorize != 0
899 && loop_vectorized_call
900 && ! loop->inner)
901 {
902 basic_block bb = loop->header;
41241199 903 bool require_loop_vectorize = false;
5b04d77e
RB
904 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
905 !gsi_end_p (gsi); gsi_next (&gsi))
906 {
907 gimple *stmt = gsi_stmt (gsi);
41241199
RL
908 gcall *call = dyn_cast <gcall *> (stmt);
909 if (call && gimple_call_internal_p (call))
5b04d77e 910 {
41241199
RL
911 internal_fn ifn = gimple_call_internal_fn (call);
912 if (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE
913 /* Don't keep the if-converted parts when the ifn with
914 specifc type is not supported by the backend. */
915 || (direct_internal_fn_p (ifn)
916 && !direct_internal_fn_supported_p
917 (call, OPTIMIZE_FOR_SPEED)))
918 {
919 require_loop_vectorize = true;
920 break;
921 }
5b04d77e
RB
922 }
923 gimple_set_uid (stmt, -1);
924 gimple_set_visited (stmt, false);
925 }
41241199 926 if (!require_loop_vectorize && vect_slp_bb (bb))
5b04d77e
RB
927 {
928 dump_printf_loc (MSG_NOTE, vect_location,
929 "basic block vectorized\n");
930 fold_loop_internal_call (loop_vectorized_call,
931 boolean_true_node);
932 loop_vectorized_call = NULL;
933 ret |= TODO_cleanup_cfg;
934 }
935 }
936 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
937 loop, don't vectorize its inner loop; we'll attempt to
938 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
939 loop version. */
940 if (loop_vectorized_call && loop->inner)
941 loop->inner->dont_vectorize = true;
942 return ret;
943 }
944
945 if (!dbg_cnt (vect_loop))
946 {
947 /* Free existing information if loop is analyzed with some
948 assumptions. */
949 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
950 vect_free_loop_info_assumptions (loop);
951 return ret;
952 }
953
954 if (loop_vectorized_call)
955 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
956
957 unsigned HOST_WIDE_INT bytes;
958 if (current_vector_size.is_constant (&bytes))
959 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
6f795a92 960 "loop vectorized using %wu byte vectors\n", bytes);
5b04d77e
RB
961 else
962 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
963 "loop vectorized using variable length vectors\n");
964
965 loop_p new_loop = vect_transform_loop (loop_vinfo);
966 (*num_vectorized_loops)++;
967 /* Now that the loop has been vectorized, allow it to be unrolled
968 etc. */
969 loop->force_vectorize = false;
970
971 if (loop->simduid)
972 {
973 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
974 if (!simduid_to_vf_htab)
975 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
976 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
977 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
978 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
979 = simduid_to_vf_data;
980 }
981
982 if (loop_vectorized_call)
983 {
984 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
985 loop_vectorized_call = NULL;
986 ret |= TODO_cleanup_cfg;
987 }
988 if (loop_dist_alias_call)
989 {
990 tree value = gimple_call_arg (loop_dist_alias_call, 1);
991 fold_loop_internal_call (loop_dist_alias_call, value);
992 loop_dist_alias_call = NULL;
993 ret |= TODO_cleanup_cfg;
994 }
995
996 /* Epilogue of vectorized loop must be vectorized too. */
997 if (new_loop)
998 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
999 new_loop, loop_vinfo, NULL, NULL);
1000
1001 return ret;
1002}
1003
1004/* Try to vectorize LOOP. */
1005
1006static unsigned
1007try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1008 unsigned *num_vectorized_loops, loop_p loop)
1009{
1010 if (!((flag_tree_loop_vectorize
1011 && optimize_loop_nest_for_speed_p (loop))
1012 || loop->force_vectorize))
1013 return 0;
1014
1015 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1016 loop, NULL,
1017 vect_loop_vectorized_call (loop),
1018 vect_loop_dist_alias_call (loop));
1019}
1020
1021
79fe1b3b 1022/* Function vectorize_loops.
b8698a0f 1023
8644a673 1024 Entry point to loop vectorization phase. */
79fe1b3b 1025
4d2280f6 1026unsigned
d73be268 1027vectorize_loops (void)
79fe1b3b 1028{
b52485c6 1029 unsigned int i;
79fe1b3b 1030 unsigned int num_vectorized_loops = 0;
42fd6772 1031 unsigned int vect_loops_num;
42fd6772 1032 struct loop *loop;
c203e8a7
TS
1033 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1034 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
5ce9450f
JJ
1035 bool any_ifcvt_loops = false;
1036 unsigned ret = 0;
79fe1b3b 1037
0fc822d0 1038 vect_loops_num = number_of_loops (cfun);
f9be04cd
RG
1039
1040 /* Bail out if there are no loops. */
1041 if (vect_loops_num <= 1)
8c8b9f32 1042 return 0;
74bf76ed
JJ
1043
1044 if (cfun->has_simduid_loops)
1045 note_simd_array_uses (&simd_array_to_simduid_htab);
f9be04cd 1046
79fe1b3b
DN
1047 /* ----------- Analyze loops. ----------- */
1048
b8698a0f 1049 /* If some loop was duplicated, it gets bigger number
ff802fa1 1050 than all previously defined loops. This fact allows us to run
79fe1b3b 1051 only over initial loops skipping newly generated ones. */
f0bd40b1 1052 FOR_EACH_LOOP (loop, 0)
5ce9450f 1053 if (loop->dont_vectorize)
8bcf15f6 1054 {
cb330ba5
JJ
1055 any_ifcvt_loops = true;
1056 /* If-conversion sometimes versions both the outer loop
1057 (for the case when outer loop vectorization might be
1058 desirable) as well as the inner loop in the scalar version
1059 of the loop. So we have:
1060 if (LOOP_VECTORIZED (1, 3))
1061 {
1062 loop1
1063 loop2
1064 }
1065 else
1066 loop3 (copy of loop1)
1067 if (LOOP_VECTORIZED (4, 5))
1068 loop4 (copy of loop2)
1069 else
1070 loop5 (copy of loop4)
1071 If FOR_EACH_LOOP gives us loop3 first (which has
1072 dont_vectorize set), make sure to process loop1 before loop4;
1073 so that we can prevent vectorization of loop4 if loop1
1074 is successfully vectorized. */
1075 if (loop->inner)
1076 {
1077 gimple *loop_vectorized_call
1078 = vect_loop_vectorized_call (loop);
1079 if (loop_vectorized_call
1080 && vect_loop_vectorized_call (loop->inner))
1081 {
1082 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1083 struct loop *vector_loop
1084 = get_loop (cfun, tree_to_shwi (arg));
1085 if (vector_loop && vector_loop != loop)
1086 {
cb330ba5 1087 /* Make sure we don't vectorize it twice. */
5b04d77e
RB
1088 vector_loop->dont_vectorize = true;
1089 ret |= try_vectorize_loop (simduid_to_vf_htab,
1090 &num_vectorized_loops,
1091 vector_loop);
cb330ba5
JJ
1092 }
1093 }
1094 }
1095 }
1096 else
5b04d77e
RB
1097 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1098 loop);
8644a673 1099
4f5b9c80 1100 vect_location = dump_user_location_t ();
79fe1b3b 1101
01902653 1102 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
73fbfcad
SS
1103 if (dump_enabled_p ()
1104 || (num_vectorized_loops > 0 && dump_enabled_p ()))
ccb3ad87 1105 dump_printf_loc (MSG_NOTE, vect_location,
78c60e3d
SS
1106 "vectorized %u loops in function.\n",
1107 num_vectorized_loops);
79fe1b3b
DN
1108
1109 /* ----------- Finalize. ----------- */
1110
5ce9450f 1111 if (any_ifcvt_loops)
2817a2b6 1112 for (i = 1; i < number_of_loops (cfun); i++)
5ce9450f
JJ
1113 {
1114 loop = get_loop (cfun, i);
1115 if (loop && loop->dont_vectorize)
1116 {
355fe088 1117 gimple *g = vect_loop_vectorized_call (loop);
5ce9450f
JJ
1118 if (g)
1119 {
542e7230
BC
1120 fold_loop_internal_call (g, boolean_false_node);
1121 ret |= TODO_cleanup_cfg;
1122 g = NULL;
1123 }
1124 else
1125 g = vect_loop_dist_alias_call (loop);
1126
1127 if (g)
1128 {
1129 fold_loop_internal_call (g, boolean_false_node);
5ce9450f
JJ
1130 ret |= TODO_cleanup_cfg;
1131 }
1132 }
1133 }
1134
5b04d77e 1135 for (i = 1; i < number_of_loops (cfun); i++)
79fe1b3b 1136 {
6775f1f3 1137 loop_vec_info loop_vinfo;
2d4dc223 1138 bool has_mask_store;
6775f1f3 1139
0fc822d0 1140 loop = get_loop (cfun, i);
5b04d77e 1141 if (!loop || !loop->aux)
6775f1f3 1142 continue;
3d9a9f94 1143 loop_vinfo = (loop_vec_info) loop->aux;
5b04d77e 1144 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
2c515559 1145 delete loop_vinfo;
76a34e3f
RS
1146 if (has_mask_store
1147 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
2d4dc223 1148 optimize_mask_stores (loop);
79fe1b3b
DN
1149 loop->aux = NULL;
1150 }
4d2280f6 1151
d9a6bd32 1152 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
74bf76ed 1153 if (cfun->has_simduid_loops)
8c8b9f32 1154 adjust_simduid_builtins (simduid_to_vf_htab);
74bf76ed
JJ
1155
1156 /* Shrink any "omp array simd" temporary arrays to the
1157 actual vectorization factors. */
c203e8a7 1158 if (simd_array_to_simduid_htab)
8c8b9f32
JJ
1159 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1160 delete simduid_to_vf_htab;
1161 cfun->has_simduid_loops = false;
74bf76ed 1162
789c34e3
RB
1163 if (num_vectorized_loops > 0)
1164 {
1165 /* If we vectorized any loop only virtual SSA form needs to be updated.
1166 ??? Also while we try hard to update loop-closed SSA form we fail
1167 to properly do this in some corner-cases (see PR56286). */
1168 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1169 return TODO_cleanup_cfg;
1170 }
1171
5ce9450f 1172 return ret;
79fe1b3b 1173}
b8698a0f 1174
f4b3ca72 1175
8c8b9f32
JJ
1176/* Entry point to the simduid cleanup pass. */
1177
1178namespace {
1179
1180const pass_data pass_data_simduid_cleanup =
1181{
1182 GIMPLE_PASS, /* type */
1183 "simduid", /* name */
1184 OPTGROUP_NONE, /* optinfo_flags */
1185 TV_NONE, /* tv_id */
1186 ( PROP_ssa | PROP_cfg ), /* properties_required */
1187 0, /* properties_provided */
1188 0, /* properties_destroyed */
1189 0, /* todo_flags_start */
1190 0, /* todo_flags_finish */
1191};
1192
1193class pass_simduid_cleanup : public gimple_opt_pass
1194{
1195public:
1196 pass_simduid_cleanup (gcc::context *ctxt)
1197 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1198 {}
1199
1200 /* opt_pass methods: */
1201 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1202 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1203 virtual unsigned int execute (function *);
1204
1205}; // class pass_simduid_cleanup
1206
1207unsigned int
1208pass_simduid_cleanup::execute (function *fun)
1209{
1210 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1211
1212 note_simd_array_uses (&simd_array_to_simduid_htab);
1213
d9a6bd32 1214 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
8c8b9f32
JJ
1215 adjust_simduid_builtins (NULL);
1216
1217 /* Shrink any "omp array simd" temporary arrays to the
1218 actual vectorization factors. */
1219 if (simd_array_to_simduid_htab)
1220 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1221 fun->has_simduid_loops = false;
1222 return 0;
1223}
1224
1225} // anon namespace
1226
1227gimple_opt_pass *
1228make_pass_simduid_cleanup (gcc::context *ctxt)
1229{
1230 return new pass_simduid_cleanup (ctxt);
1231}
1232
1233
a70d6342
IR
1234/* Entry point to basic block SLP phase. */
1235
27a4cd48
DM
1236namespace {
1237
1238const pass_data pass_data_slp_vectorize =
a70d6342 1239{
27a4cd48
DM
1240 GIMPLE_PASS, /* type */
1241 "slp", /* name */
1242 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
27a4cd48
DM
1243 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1244 ( PROP_ssa | PROP_cfg ), /* properties_required */
1245 0, /* properties_provided */
1246 0, /* properties_destroyed */
1247 0, /* todo_flags_start */
3bea341f 1248 TODO_update_ssa, /* todo_flags_finish */
a70d6342
IR
1249};
1250
27a4cd48
DM
1251class pass_slp_vectorize : public gimple_opt_pass
1252{
1253public:
c3284718
RS
1254 pass_slp_vectorize (gcc::context *ctxt)
1255 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
27a4cd48
DM
1256 {}
1257
1258 /* opt_pass methods: */
e5d8bd8c 1259 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1a3d085c 1260 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
be55bfe6 1261 virtual unsigned int execute (function *);
27a4cd48
DM
1262
1263}; // class pass_slp_vectorize
1264
be55bfe6
TS
1265unsigned int
1266pass_slp_vectorize::execute (function *fun)
1267{
1268 basic_block bb;
1269
e5d8bd8c
RB
1270 bool in_loop_pipeline = scev_initialized_p ();
1271 if (!in_loop_pipeline)
1272 {
1273 loop_optimizer_init (LOOPS_NORMAL);
1274 scev_initialize ();
1275 }
1276
c2a12ca0 1277 /* Mark all stmts as not belonging to the current region and unvisited. */
61d371eb
RB
1278 FOR_EACH_BB_FN (bb, fun)
1279 {
1280 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1281 gsi_next (&gsi))
c2a12ca0
RB
1282 {
1283 gimple *stmt = gsi_stmt (gsi);
1284 gimple_set_uid (stmt, -1);
1285 gimple_set_visited (stmt, false);
1286 }
61d371eb
RB
1287 }
1288
be55bfe6
TS
1289 FOR_EACH_BB_FN (bb, fun)
1290 {
428db0ba 1291 if (vect_slp_bb (bb))
d1ac60d5 1292 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
be55bfe6
TS
1293 }
1294
e5d8bd8c
RB
1295 if (!in_loop_pipeline)
1296 {
1297 scev_finalize ();
1298 loop_optimizer_finalize ();
1299 }
1300
be55bfe6
TS
1301 return 0;
1302}
1303
27a4cd48
DM
1304} // anon namespace
1305
1306gimple_opt_pass *
1307make_pass_slp_vectorize (gcc::context *ctxt)
1308{
1309 return new pass_slp_vectorize (ctxt);
1310}
1311
a70d6342 1312
f4b3ca72
JH
1313/* Increase alignment of global arrays to improve vectorization potential.
1314 TODO:
1315 - Consider also structs that have an array field.
1316 - Use ipa analysis to prune arrays that can't be vectorized?
1317 This should involve global alignment analysis and in the future also
1318 array padding. */
1319
550fa093
PK
1320static unsigned get_vec_alignment_for_type (tree);
1321static hash_map<tree, unsigned> *type_align_map;
1322
1323/* Return alignment of array's vector type corresponding to scalar type.
1324 0 if no vector type exists. */
1325static unsigned
1326get_vec_alignment_for_array_type (tree type)
1327{
1328 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
cf736b09 1329 poly_uint64 array_size, vector_size;
550fa093
PK
1330
1331 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1332 if (!vectype
cf736b09
RS
1333 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1334 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1335 || maybe_lt (array_size, vector_size))
550fa093
PK
1336 return 0;
1337
1338 return TYPE_ALIGN (vectype);
1339}
1340
1341/* Return alignment of field having maximum alignment of vector type
1342 corresponding to it's scalar type. For now, we only consider fields whose
1343 offset is a multiple of it's vector alignment.
1344 0 if no suitable field is found. */
1345static unsigned
1346get_vec_alignment_for_record_type (tree type)
1347{
1348 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1349
1350 unsigned max_align = 0, alignment;
1351 HOST_WIDE_INT offset;
1352 tree offset_tree;
1353
1354 if (TYPE_PACKED (type))
1355 return 0;
1356
1357 unsigned *slot = type_align_map->get (type);
1358 if (slot)
1359 return *slot;
1360
1361 for (tree field = first_field (type);
1362 field != NULL_TREE;
1363 field = DECL_CHAIN (field))
1364 {
1365 /* Skip if not FIELD_DECL or if alignment is set by user. */
1366 if (TREE_CODE (field) != FIELD_DECL
1367 || DECL_USER_ALIGN (field)
1368 || DECL_ARTIFICIAL (field))
1369 continue;
1370
1371 /* We don't need to process the type further if offset is variable,
1372 since the offsets of remaining members will also be variable. */
1373 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1374 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1375 break;
1376
1377 /* Similarly stop processing the type if offset_tree
1378 does not fit in unsigned HOST_WIDE_INT. */
1379 offset_tree = bit_position (field);
1380 if (!tree_fits_uhwi_p (offset_tree))
1381 break;
1382
1383 offset = tree_to_uhwi (offset_tree);
1384 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1385
1386 /* Get maximum alignment of vectorized field/array among those members
1387 whose offset is multiple of the vector alignment. */
1388 if (alignment
1389 && (offset % alignment == 0)
1390 && (alignment > max_align))
1391 max_align = alignment;
1392 }
1393
1394 type_align_map->put (type, max_align);
1395 return max_align;
1396}
1397
1398/* Return alignment of vector type corresponding to decl's scalar type
1399 or 0 if it doesn't exist or the vector alignment is lesser than
1400 decl's alignment. */
1401static unsigned
1402get_vec_alignment_for_type (tree type)
1403{
1404 if (type == NULL_TREE)
1405 return 0;
1406
1407 gcc_assert (TYPE_P (type));
1408
1409 static unsigned alignment = 0;
1410 switch (TREE_CODE (type))
1411 {
1412 case ARRAY_TYPE:
1413 alignment = get_vec_alignment_for_array_type (type);
1414 break;
1415 case RECORD_TYPE:
1416 alignment = get_vec_alignment_for_record_type (type);
1417 break;
1418 default:
1419 alignment = 0;
1420 break;
1421 }
1422
1423 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1424}
1425
1426/* Entry point to increase_alignment pass. */
f4b3ca72
JH
1427static unsigned int
1428increase_alignment (void)
1429{
2c8326a5 1430 varpool_node *vnode;
f4b3ca72 1431
4f5b9c80 1432 vect_location = dump_user_location_t ();
550fa093 1433 type_align_map = new hash_map<tree, unsigned>;
a3d7af04 1434
f4b3ca72 1435 /* Increase the alignment of all global arrays for vectorization. */
65c70e6b 1436 FOR_EACH_DEFINED_VARIABLE (vnode)
f4b3ca72 1437 {
550fa093 1438 tree decl = vnode->decl;
f4b3ca72
JH
1439 unsigned int alignment;
1440
550fa093
PK
1441 if ((decl_in_symtab_p (decl)
1442 && !symtab_node::get (decl)->can_increase_alignment_p ())
1443 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1444 continue;
1445
1446 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1447 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
ebfd146a 1448 {
550fa093 1449 vnode->increase_alignment (alignment);
3c2a8ed0 1450 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
ebfd146a 1451 }
f4b3ca72 1452 }
550fa093
PK
1453
1454 delete type_align_map;
f4b3ca72
JH
1455 return 0;
1456}
1457
ebfd146a 1458
27a4cd48
DM
1459namespace {
1460
1461const pass_data pass_data_ipa_increase_alignment =
f4b3ca72 1462{
27a4cd48
DM
1463 SIMPLE_IPA_PASS, /* type */
1464 "increase_alignment", /* name */
1465 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
27a4cd48
DM
1466 TV_IPA_OPT, /* tv_id */
1467 0, /* properties_required */
1468 0, /* properties_provided */
1469 0, /* properties_destroyed */
1470 0, /* todo_flags_start */
1471 0, /* todo_flags_finish */
f4b3ca72 1472};
27a4cd48
DM
1473
1474class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1475{
1476public:
c3284718
RS
1477 pass_ipa_increase_alignment (gcc::context *ctxt)
1478 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
27a4cd48
DM
1479 {}
1480
1481 /* opt_pass methods: */
1a3d085c
TS
1482 virtual bool gate (function *)
1483 {
1484 return flag_section_anchors && flag_tree_loop_vectorize;
1485 }
1486
be55bfe6 1487 virtual unsigned int execute (function *) { return increase_alignment (); }
27a4cd48
DM
1488
1489}; // class pass_ipa_increase_alignment
1490
1491} // anon namespace
1492
1493simple_ipa_opt_pass *
1494make_pass_ipa_increase_alignment (gcc::context *ctxt)
1495{
1496 return new pass_ipa_increase_alignment (ctxt);
1497}