]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vectorizer.c
Allow automatics in equivalences
[thirdparty/gcc.git] / gcc / tree-vectorizer.c
1 /* Vectorizer
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Loop and basic block vectorizer.
22
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
28
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
40
41 Here's a poor attempt at illustrating that:
42
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
55 */
56
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83 #include "internal-fn.h"
84
85
86 /* Loop or bb location, with hotness information. */
87 dump_user_location_t vect_location;
88
89 /* auto_purge_vect_location's dtor: reset the vect_location
90 global, to avoid stale location_t values that could reference
91 GC-ed blocks. */
92
93 auto_purge_vect_location::~auto_purge_vect_location ()
94 {
95 vect_location = dump_user_location_t ();
96 }
97
98 /* Dump a cost entry according to args to F. */
99
100 void
101 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
102 stmt_vec_info stmt_info, int misalign, unsigned cost,
103 enum vect_cost_model_location where)
104 {
105 fprintf (f, "%p ", data);
106 if (stmt_info)
107 {
108 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
109 fprintf (f, " ");
110 }
111 else
112 fprintf (f, "<unknown> ");
113 fprintf (f, "%d times ", count);
114 const char *ks = "unknown";
115 switch (kind)
116 {
117 case scalar_stmt:
118 ks = "scalar_stmt";
119 break;
120 case scalar_load:
121 ks = "scalar_load";
122 break;
123 case scalar_store:
124 ks = "scalar_store";
125 break;
126 case vector_stmt:
127 ks = "vector_stmt";
128 break;
129 case vector_load:
130 ks = "vector_load";
131 break;
132 case vector_gather_load:
133 ks = "vector_gather_load";
134 break;
135 case unaligned_load:
136 ks = "unaligned_load";
137 break;
138 case unaligned_store:
139 ks = "unaligned_store";
140 break;
141 case vector_store:
142 ks = "vector_store";
143 break;
144 case vector_scatter_store:
145 ks = "vector_scatter_store";
146 break;
147 case vec_to_scalar:
148 ks = "vec_to_scalar";
149 break;
150 case scalar_to_vec:
151 ks = "scalar_to_vec";
152 break;
153 case cond_branch_not_taken:
154 ks = "cond_branch_not_taken";
155 break;
156 case cond_branch_taken:
157 ks = "cond_branch_taken";
158 break;
159 case vec_perm:
160 ks = "vec_perm";
161 break;
162 case vec_promote_demote:
163 ks = "vec_promote_demote";
164 break;
165 case vec_construct:
166 ks = "vec_construct";
167 break;
168 }
169 fprintf (f, "%s ", ks);
170 if (kind == unaligned_load || kind == unaligned_store)
171 fprintf (f, "(misalign %d) ", misalign);
172 fprintf (f, "costs %u ", cost);
173 const char *ws = "unknown";
174 switch (where)
175 {
176 case vect_prologue:
177 ws = "prologue";
178 break;
179 case vect_body:
180 ws = "body";
181 break;
182 case vect_epilogue:
183 ws = "epilogue";
184 break;
185 }
186 fprintf (f, "in %s\n", ws);
187 }
188 \f
189 /* For mapping simduid to vectorization factor. */
190
191 class simduid_to_vf : public free_ptr_hash<simduid_to_vf>
192 {
193 public:
194 unsigned int simduid;
195 poly_uint64 vf;
196
197 /* hash_table support. */
198 static inline hashval_t hash (const simduid_to_vf *);
199 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
200 };
201
202 inline hashval_t
203 simduid_to_vf::hash (const simduid_to_vf *p)
204 {
205 return p->simduid;
206 }
207
208 inline int
209 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
210 {
211 return p1->simduid == p2->simduid;
212 }
213
214 /* This hash maps the OMP simd array to the corresponding simduid used
215 to index into it. Like thus,
216
217 _7 = GOMP_SIMD_LANE (simduid.0)
218 ...
219 ...
220 D.1737[_7] = stuff;
221
222
223 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
224 simduid.0. */
225
226 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
227 {
228 tree decl;
229 unsigned int simduid;
230
231 /* hash_table support. */
232 static inline hashval_t hash (const simd_array_to_simduid *);
233 static inline int equal (const simd_array_to_simduid *,
234 const simd_array_to_simduid *);
235 };
236
237 inline hashval_t
238 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
239 {
240 return DECL_UID (p->decl);
241 }
242
243 inline int
244 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
245 const simd_array_to_simduid *p2)
246 {
247 return p1->decl == p2->decl;
248 }
249
250 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
251 into their corresponding constants and remove
252 IFN_GOMP_SIMD_ORDERED_{START,END}. */
253
254 static void
255 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
256 {
257 basic_block bb;
258
259 FOR_EACH_BB_FN (bb, cfun)
260 {
261 gimple_stmt_iterator i;
262
263 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
264 {
265 poly_uint64 vf = 1;
266 enum internal_fn ifn;
267 gimple *stmt = gsi_stmt (i);
268 tree t;
269 if (!is_gimple_call (stmt)
270 || !gimple_call_internal_p (stmt))
271 {
272 gsi_next (&i);
273 continue;
274 }
275 ifn = gimple_call_internal_fn (stmt);
276 switch (ifn)
277 {
278 case IFN_GOMP_SIMD_LANE:
279 case IFN_GOMP_SIMD_VF:
280 case IFN_GOMP_SIMD_LAST_LANE:
281 break;
282 case IFN_GOMP_SIMD_ORDERED_START:
283 case IFN_GOMP_SIMD_ORDERED_END:
284 if (integer_onep (gimple_call_arg (stmt, 0)))
285 {
286 enum built_in_function bcode
287 = (ifn == IFN_GOMP_SIMD_ORDERED_START
288 ? BUILT_IN_GOMP_ORDERED_START
289 : BUILT_IN_GOMP_ORDERED_END);
290 gimple *g
291 = gimple_build_call (builtin_decl_explicit (bcode), 0);
292 gimple_move_vops (g, stmt);
293 gsi_replace (&i, g, true);
294 continue;
295 }
296 gsi_remove (&i, true);
297 unlink_stmt_vdef (stmt);
298 continue;
299 default:
300 gsi_next (&i);
301 continue;
302 }
303 tree arg = gimple_call_arg (stmt, 0);
304 gcc_assert (arg != NULL_TREE);
305 gcc_assert (TREE_CODE (arg) == SSA_NAME);
306 simduid_to_vf *p = NULL, data;
307 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
308 /* Need to nullify loop safelen field since it's value is not
309 valid after transformation. */
310 if (bb->loop_father && bb->loop_father->safelen > 0)
311 bb->loop_father->safelen = 0;
312 if (htab)
313 {
314 p = htab->find (&data);
315 if (p)
316 vf = p->vf;
317 }
318 switch (ifn)
319 {
320 case IFN_GOMP_SIMD_VF:
321 t = build_int_cst (unsigned_type_node, vf);
322 break;
323 case IFN_GOMP_SIMD_LANE:
324 t = build_int_cst (unsigned_type_node, 0);
325 break;
326 case IFN_GOMP_SIMD_LAST_LANE:
327 t = gimple_call_arg (stmt, 1);
328 break;
329 default:
330 gcc_unreachable ();
331 }
332 tree lhs = gimple_call_lhs (stmt);
333 if (lhs)
334 replace_uses_by (lhs, t);
335 release_defs (stmt);
336 gsi_remove (&i, true);
337 }
338 }
339 }
340
341 /* Helper structure for note_simd_array_uses. */
342
343 struct note_simd_array_uses_struct
344 {
345 hash_table<simd_array_to_simduid> **htab;
346 unsigned int simduid;
347 };
348
349 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
350
351 static tree
352 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
353 {
354 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
355 struct note_simd_array_uses_struct *ns
356 = (struct note_simd_array_uses_struct *) wi->info;
357
358 if (TYPE_P (*tp))
359 *walk_subtrees = 0;
360 else if (VAR_P (*tp)
361 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
362 && DECL_CONTEXT (*tp) == current_function_decl)
363 {
364 simd_array_to_simduid data;
365 if (!*ns->htab)
366 *ns->htab = new hash_table<simd_array_to_simduid> (15);
367 data.decl = *tp;
368 data.simduid = ns->simduid;
369 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
370 if (*slot == NULL)
371 {
372 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
373 *p = data;
374 *slot = p;
375 }
376 else if ((*slot)->simduid != ns->simduid)
377 (*slot)->simduid = -1U;
378 *walk_subtrees = 0;
379 }
380 return NULL_TREE;
381 }
382
383 /* Find "omp simd array" temporaries and map them to corresponding
384 simduid. */
385
386 static void
387 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
388 {
389 basic_block bb;
390 gimple_stmt_iterator gsi;
391 struct walk_stmt_info wi;
392 struct note_simd_array_uses_struct ns;
393
394 memset (&wi, 0, sizeof (wi));
395 wi.info = &ns;
396 ns.htab = htab;
397
398 FOR_EACH_BB_FN (bb, cfun)
399 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
400 {
401 gimple *stmt = gsi_stmt (gsi);
402 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
403 continue;
404 switch (gimple_call_internal_fn (stmt))
405 {
406 case IFN_GOMP_SIMD_LANE:
407 case IFN_GOMP_SIMD_VF:
408 case IFN_GOMP_SIMD_LAST_LANE:
409 break;
410 default:
411 continue;
412 }
413 tree lhs = gimple_call_lhs (stmt);
414 if (lhs == NULL_TREE)
415 continue;
416 imm_use_iterator use_iter;
417 gimple *use_stmt;
418 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
419 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
420 if (!is_gimple_debug (use_stmt))
421 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
422 }
423 }
424
425 /* Shrink arrays with "omp simd array" attribute to the corresponding
426 vectorization factor. */
427
428 static void
429 shrink_simd_arrays
430 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
431 hash_table<simduid_to_vf> *simduid_to_vf_htab)
432 {
433 for (hash_table<simd_array_to_simduid>::iterator iter
434 = simd_array_to_simduid_htab->begin ();
435 iter != simd_array_to_simduid_htab->end (); ++iter)
436 if ((*iter)->simduid != -1U)
437 {
438 tree decl = (*iter)->decl;
439 poly_uint64 vf = 1;
440 if (simduid_to_vf_htab)
441 {
442 simduid_to_vf *p = NULL, data;
443 data.simduid = (*iter)->simduid;
444 p = simduid_to_vf_htab->find (&data);
445 if (p)
446 vf = p->vf;
447 }
448 tree atype
449 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
450 TREE_TYPE (decl) = atype;
451 relayout_decl (decl);
452 }
453
454 delete simd_array_to_simduid_htab;
455 }
456 \f
457 /* Initialize the vec_info with kind KIND_IN and target cost data
458 TARGET_COST_DATA_IN. */
459
460 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
461 vec_info_shared *shared_)
462 : kind (kind_in),
463 shared (shared_),
464 target_cost_data (target_cost_data_in)
465 {
466 stmt_vec_infos.create (50);
467 }
468
469 vec_info::~vec_info ()
470 {
471 slp_instance instance;
472 unsigned int i;
473
474 FOR_EACH_VEC_ELT (slp_instances, i, instance)
475 vect_free_slp_instance (instance, true);
476
477 destroy_cost_data (target_cost_data);
478 free_stmt_vec_infos ();
479 }
480
481 vec_info_shared::vec_info_shared ()
482 : datarefs (vNULL),
483 datarefs_copy (vNULL),
484 ddrs (vNULL)
485 {
486 }
487
488 vec_info_shared::~vec_info_shared ()
489 {
490 free_data_refs (datarefs);
491 free_dependence_relations (ddrs);
492 datarefs_copy.release ();
493 }
494
495 void
496 vec_info_shared::save_datarefs ()
497 {
498 if (!flag_checking)
499 return;
500 datarefs_copy.reserve_exact (datarefs.length ());
501 for (unsigned i = 0; i < datarefs.length (); ++i)
502 datarefs_copy.quick_push (*datarefs[i]);
503 }
504
505 void
506 vec_info_shared::check_datarefs ()
507 {
508 if (!flag_checking)
509 return;
510 gcc_assert (datarefs.length () == datarefs_copy.length ());
511 for (unsigned i = 0; i < datarefs.length (); ++i)
512 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
513 gcc_unreachable ();
514 }
515
516 /* Record that STMT belongs to the vectorizable region. Create and return
517 an associated stmt_vec_info. */
518
519 stmt_vec_info
520 vec_info::add_stmt (gimple *stmt)
521 {
522 stmt_vec_info res = new_stmt_vec_info (stmt);
523 set_vinfo_for_stmt (stmt, res);
524 return res;
525 }
526
527 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
528 return null. It is safe to call this function on any statement, even if
529 it might not be part of the vectorizable region. */
530
531 stmt_vec_info
532 vec_info::lookup_stmt (gimple *stmt)
533 {
534 unsigned int uid = gimple_uid (stmt);
535 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
536 {
537 stmt_vec_info res = stmt_vec_infos[uid - 1];
538 if (res && res->stmt == stmt)
539 return res;
540 }
541 return NULL;
542 }
543
544 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
545 return that stmt_vec_info, otherwise return null. It is safe to call
546 this on arbitrary operands. */
547
548 stmt_vec_info
549 vec_info::lookup_def (tree name)
550 {
551 if (TREE_CODE (name) == SSA_NAME
552 && !SSA_NAME_IS_DEFAULT_DEF (name))
553 return lookup_stmt (SSA_NAME_DEF_STMT (name));
554 return NULL;
555 }
556
557 /* See whether there is a single non-debug statement that uses LHS and
558 whether that statement has an associated stmt_vec_info. Return the
559 stmt_vec_info if so, otherwise return null. */
560
561 stmt_vec_info
562 vec_info::lookup_single_use (tree lhs)
563 {
564 use_operand_p dummy;
565 gimple *use_stmt;
566 if (single_imm_use (lhs, &dummy, &use_stmt))
567 return lookup_stmt (use_stmt);
568 return NULL;
569 }
570
571 /* Return vectorization information about DR. */
572
573 dr_vec_info *
574 vec_info::lookup_dr (data_reference *dr)
575 {
576 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
577 /* DR_STMT should never refer to a stmt in a pattern replacement. */
578 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
579 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
580 }
581
582 /* Record that NEW_STMT_INFO now implements the same data reference
583 as OLD_STMT_INFO. */
584
585 void
586 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
587 {
588 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
589 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
590 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
591 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
592 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
593 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
594 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
595 }
596
597 /* Permanently remove the statement described by STMT_INFO from the
598 function. */
599
600 void
601 vec_info::remove_stmt (stmt_vec_info stmt_info)
602 {
603 gcc_assert (!stmt_info->pattern_stmt_p);
604 set_vinfo_for_stmt (stmt_info->stmt, NULL);
605 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
606 unlink_stmt_vdef (stmt_info->stmt);
607 gsi_remove (&si, true);
608 release_defs (stmt_info->stmt);
609 free_stmt_vec_info (stmt_info);
610 }
611
612 /* Replace the statement at GSI by NEW_STMT, both the vectorization
613 information and the function itself. STMT_INFO describes the statement
614 at GSI. */
615
616 void
617 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
618 gimple *new_stmt)
619 {
620 gimple *old_stmt = stmt_info->stmt;
621 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
622 set_vinfo_for_stmt (old_stmt, NULL);
623 set_vinfo_for_stmt (new_stmt, stmt_info);
624 stmt_info->stmt = new_stmt;
625 gsi_replace (gsi, new_stmt, true);
626 }
627
628 /* Create and initialize a new stmt_vec_info struct for STMT. */
629
630 stmt_vec_info
631 vec_info::new_stmt_vec_info (gimple *stmt)
632 {
633 stmt_vec_info res = XCNEW (class _stmt_vec_info);
634 res->vinfo = this;
635 res->stmt = stmt;
636
637 STMT_VINFO_TYPE (res) = undef_vec_info_type;
638 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
639 STMT_VINFO_VECTORIZABLE (res) = true;
640 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
641 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
642 STMT_VINFO_SLP_VECT_ONLY (res) = false;
643
644 if (gimple_code (stmt) == GIMPLE_PHI
645 && is_loop_header_bb_p (gimple_bb (stmt)))
646 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
647 else
648 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
649
650 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
651 STMT_SLP_TYPE (res) = loop_vect;
652
653 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
654 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
655
656 return res;
657 }
658
659 /* Associate STMT with INFO. */
660
661 void
662 vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
663 {
664 unsigned int uid = gimple_uid (stmt);
665 if (uid == 0)
666 {
667 gcc_checking_assert (info);
668 uid = stmt_vec_infos.length () + 1;
669 gimple_set_uid (stmt, uid);
670 stmt_vec_infos.safe_push (info);
671 }
672 else
673 {
674 gcc_checking_assert (info == NULL);
675 stmt_vec_infos[uid - 1] = info;
676 }
677 }
678
679 /* Free the contents of stmt_vec_infos. */
680
681 void
682 vec_info::free_stmt_vec_infos (void)
683 {
684 unsigned int i;
685 stmt_vec_info info;
686 FOR_EACH_VEC_ELT (stmt_vec_infos, i, info)
687 if (info != NULL)
688 free_stmt_vec_info (info);
689 stmt_vec_infos.release ();
690 }
691
692 /* Free STMT_INFO. */
693
694 void
695 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
696 {
697 if (stmt_info->pattern_stmt_p)
698 {
699 gimple_set_bb (stmt_info->stmt, NULL);
700 tree lhs = gimple_get_lhs (stmt_info->stmt);
701 if (lhs && TREE_CODE (lhs) == SSA_NAME)
702 release_ssa_name (lhs);
703 }
704
705 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
706 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
707 free (stmt_info);
708 }
709
710 /* A helper function to free scev and LOOP niter information, as well as
711 clear loop constraint LOOP_C_FINITE. */
712
713 void
714 vect_free_loop_info_assumptions (class loop *loop)
715 {
716 scev_reset_htab ();
717 /* We need to explicitly reset upper bound information since they are
718 used even after free_numbers_of_iterations_estimates. */
719 loop->any_upper_bound = false;
720 loop->any_likely_upper_bound = false;
721 free_numbers_of_iterations_estimates (loop);
722 loop_constraint_clear (loop, LOOP_C_FINITE);
723 }
724
725 /* If LOOP has been versioned during ifcvt, return the internal call
726 guarding it. */
727
728 gimple *
729 vect_loop_vectorized_call (class loop *loop, gcond **cond)
730 {
731 basic_block bb = loop_preheader_edge (loop)->src;
732 gimple *g;
733 do
734 {
735 g = last_stmt (bb);
736 if (g)
737 break;
738 if (!single_pred_p (bb))
739 break;
740 bb = single_pred (bb);
741 }
742 while (1);
743 if (g && gimple_code (g) == GIMPLE_COND)
744 {
745 if (cond)
746 *cond = as_a <gcond *> (g);
747 gimple_stmt_iterator gsi = gsi_for_stmt (g);
748 gsi_prev (&gsi);
749 if (!gsi_end_p (gsi))
750 {
751 g = gsi_stmt (gsi);
752 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
753 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
754 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
755 return g;
756 }
757 }
758 return NULL;
759 }
760
761 /* If LOOP has been versioned during loop distribution, return the gurading
762 internal call. */
763
764 static gimple *
765 vect_loop_dist_alias_call (class loop *loop)
766 {
767 basic_block bb;
768 basic_block entry;
769 class loop *outer, *orig;
770 gimple_stmt_iterator gsi;
771 gimple *g;
772
773 if (loop->orig_loop_num == 0)
774 return NULL;
775
776 orig = get_loop (cfun, loop->orig_loop_num);
777 if (orig == NULL)
778 {
779 /* The original loop is somehow destroyed. Clear the information. */
780 loop->orig_loop_num = 0;
781 return NULL;
782 }
783
784 if (loop != orig)
785 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
786 else
787 bb = loop_preheader_edge (loop)->src;
788
789 outer = bb->loop_father;
790 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
791
792 /* Look upward in dominance tree. */
793 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
794 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
795 {
796 g = last_stmt (bb);
797 if (g == NULL || gimple_code (g) != GIMPLE_COND)
798 continue;
799
800 gsi = gsi_for_stmt (g);
801 gsi_prev (&gsi);
802 if (gsi_end_p (gsi))
803 continue;
804
805 g = gsi_stmt (gsi);
806 /* The guarding internal function call must have the same distribution
807 alias id. */
808 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
809 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
810 return g;
811 }
812 return NULL;
813 }
814
815 /* Set the uids of all the statements in basic blocks inside loop
816 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
817 call guarding the loop which has been if converted. */
818 static void
819 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
820 {
821 tree arg = gimple_call_arg (loop_vectorized_call, 1);
822 basic_block *bbs;
823 unsigned int i;
824 class loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
825
826 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
827 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
828 == loop_vectorized_call);
829 /* If we are going to vectorize outer loop, prevent vectorization
830 of the inner loop in the scalar loop - either the scalar loop is
831 thrown away, so it is a wasted work, or is used only for
832 a few iterations. */
833 if (scalar_loop->inner)
834 {
835 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
836 if (g)
837 {
838 arg = gimple_call_arg (g, 0);
839 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
840 fold_loop_internal_call (g, boolean_false_node);
841 }
842 }
843 bbs = get_loop_body (scalar_loop);
844 for (i = 0; i < scalar_loop->num_nodes; i++)
845 {
846 basic_block bb = bbs[i];
847 gimple_stmt_iterator gsi;
848 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
849 {
850 gimple *phi = gsi_stmt (gsi);
851 gimple_set_uid (phi, 0);
852 }
853 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
854 {
855 gimple *stmt = gsi_stmt (gsi);
856 gimple_set_uid (stmt, 0);
857 }
858 }
859 free (bbs);
860 }
861
862 /* Try to vectorize LOOP. */
863
864 static unsigned
865 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
866 unsigned *num_vectorized_loops,
867 loop_p loop, loop_vec_info orig_loop_vinfo,
868 gimple *loop_vectorized_call,
869 gimple *loop_dist_alias_call)
870 {
871 unsigned ret = 0;
872 vec_info_shared shared;
873 auto_purge_vect_location sentinel;
874 vect_location = find_loop_location (loop);
875 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
876 && dump_enabled_p ())
877 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
878 "\nAnalyzing loop at %s:%d\n",
879 LOCATION_FILE (vect_location.get_location_t ()),
880 LOCATION_LINE (vect_location.get_location_t ()));
881
882 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
883 opt_loop_vec_info loop_vinfo
884 = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
885 loop->aux = loop_vinfo;
886
887 if (!loop_vinfo)
888 if (dump_enabled_p ())
889 if (opt_problem *problem = loop_vinfo.get_problem ())
890 {
891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
892 "couldn't vectorize loop\n");
893 problem->emit_and_clear ();
894 }
895
896 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
897 {
898 /* Free existing information if loop is analyzed with some
899 assumptions. */
900 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
901 vect_free_loop_info_assumptions (loop);
902
903 /* If we applied if-conversion then try to vectorize the
904 BB of innermost loops.
905 ??? Ideally BB vectorization would learn to vectorize
906 control flow by applying if-conversion on-the-fly, the
907 following retains the if-converted loop body even when
908 only non-if-converted parts took part in BB vectorization. */
909 if (flag_tree_slp_vectorize != 0
910 && loop_vectorized_call
911 && ! loop->inner)
912 {
913 basic_block bb = loop->header;
914 bool require_loop_vectorize = false;
915 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
916 !gsi_end_p (gsi); gsi_next (&gsi))
917 {
918 gimple *stmt = gsi_stmt (gsi);
919 gcall *call = dyn_cast <gcall *> (stmt);
920 if (call && gimple_call_internal_p (call))
921 {
922 internal_fn ifn = gimple_call_internal_fn (call);
923 if (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE
924 /* Don't keep the if-converted parts when the ifn with
925 specifc type is not supported by the backend. */
926 || (direct_internal_fn_p (ifn)
927 && !direct_internal_fn_supported_p
928 (call, OPTIMIZE_FOR_SPEED)))
929 {
930 require_loop_vectorize = true;
931 break;
932 }
933 }
934 gimple_set_uid (stmt, -1);
935 gimple_set_visited (stmt, false);
936 }
937 if (!require_loop_vectorize && vect_slp_bb (bb))
938 {
939 if (dump_enabled_p ())
940 dump_printf_loc (MSG_NOTE, vect_location,
941 "basic block vectorized\n");
942 fold_loop_internal_call (loop_vectorized_call,
943 boolean_true_node);
944 loop_vectorized_call = NULL;
945 ret |= TODO_cleanup_cfg;
946 }
947 }
948 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
949 loop, don't vectorize its inner loop; we'll attempt to
950 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
951 loop version. */
952 if (loop_vectorized_call && loop->inner)
953 loop->inner->dont_vectorize = true;
954 return ret;
955 }
956
957 if (!dbg_cnt (vect_loop))
958 {
959 /* Free existing information if loop is analyzed with some
960 assumptions. */
961 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
962 vect_free_loop_info_assumptions (loop);
963 return ret;
964 }
965
966 if (loop_vectorized_call)
967 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
968
969 unsigned HOST_WIDE_INT bytes;
970 if (dump_enabled_p ())
971 {
972 if (current_vector_size.is_constant (&bytes))
973 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
974 "loop vectorized using %wu byte vectors\n", bytes);
975 else
976 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
977 "loop vectorized using variable length vectors\n");
978 }
979
980 loop_p new_loop = vect_transform_loop (loop_vinfo);
981 (*num_vectorized_loops)++;
982 /* Now that the loop has been vectorized, allow it to be unrolled
983 etc. */
984 loop->force_vectorize = false;
985
986 if (loop->simduid)
987 {
988 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
989 if (!simduid_to_vf_htab)
990 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
991 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
992 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
993 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
994 = simduid_to_vf_data;
995 }
996
997 if (loop_vectorized_call)
998 {
999 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
1000 loop_vectorized_call = NULL;
1001 ret |= TODO_cleanup_cfg;
1002 }
1003 if (loop_dist_alias_call)
1004 {
1005 tree value = gimple_call_arg (loop_dist_alias_call, 1);
1006 fold_loop_internal_call (loop_dist_alias_call, value);
1007 loop_dist_alias_call = NULL;
1008 ret |= TODO_cleanup_cfg;
1009 }
1010
1011 /* Epilogue of vectorized loop must be vectorized too. */
1012 if (new_loop)
1013 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1014 new_loop, loop_vinfo, NULL, NULL);
1015
1016 return ret;
1017 }
1018
1019 /* Try to vectorize LOOP. */
1020
1021 static unsigned
1022 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1023 unsigned *num_vectorized_loops, loop_p loop)
1024 {
1025 if (!((flag_tree_loop_vectorize
1026 && optimize_loop_nest_for_speed_p (loop))
1027 || loop->force_vectorize))
1028 return 0;
1029
1030 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1031 loop, NULL,
1032 vect_loop_vectorized_call (loop),
1033 vect_loop_dist_alias_call (loop));
1034 }
1035
1036
1037 /* Function vectorize_loops.
1038
1039 Entry point to loop vectorization phase. */
1040
1041 unsigned
1042 vectorize_loops (void)
1043 {
1044 unsigned int i;
1045 unsigned int num_vectorized_loops = 0;
1046 unsigned int vect_loops_num;
1047 class loop *loop;
1048 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1049 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1050 bool any_ifcvt_loops = false;
1051 unsigned ret = 0;
1052
1053 vect_loops_num = number_of_loops (cfun);
1054
1055 /* Bail out if there are no loops. */
1056 if (vect_loops_num <= 1)
1057 return 0;
1058
1059 if (cfun->has_simduid_loops)
1060 note_simd_array_uses (&simd_array_to_simduid_htab);
1061
1062 /* ----------- Analyze loops. ----------- */
1063
1064 /* If some loop was duplicated, it gets bigger number
1065 than all previously defined loops. This fact allows us to run
1066 only over initial loops skipping newly generated ones. */
1067 FOR_EACH_LOOP (loop, 0)
1068 if (loop->dont_vectorize)
1069 {
1070 any_ifcvt_loops = true;
1071 /* If-conversion sometimes versions both the outer loop
1072 (for the case when outer loop vectorization might be
1073 desirable) as well as the inner loop in the scalar version
1074 of the loop. So we have:
1075 if (LOOP_VECTORIZED (1, 3))
1076 {
1077 loop1
1078 loop2
1079 }
1080 else
1081 loop3 (copy of loop1)
1082 if (LOOP_VECTORIZED (4, 5))
1083 loop4 (copy of loop2)
1084 else
1085 loop5 (copy of loop4)
1086 If FOR_EACH_LOOP gives us loop3 first (which has
1087 dont_vectorize set), make sure to process loop1 before loop4;
1088 so that we can prevent vectorization of loop4 if loop1
1089 is successfully vectorized. */
1090 if (loop->inner)
1091 {
1092 gimple *loop_vectorized_call
1093 = vect_loop_vectorized_call (loop);
1094 if (loop_vectorized_call
1095 && vect_loop_vectorized_call (loop->inner))
1096 {
1097 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1098 class loop *vector_loop
1099 = get_loop (cfun, tree_to_shwi (arg));
1100 if (vector_loop && vector_loop != loop)
1101 {
1102 /* Make sure we don't vectorize it twice. */
1103 vector_loop->dont_vectorize = true;
1104 ret |= try_vectorize_loop (simduid_to_vf_htab,
1105 &num_vectorized_loops,
1106 vector_loop);
1107 }
1108 }
1109 }
1110 }
1111 else
1112 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1113 loop);
1114
1115 vect_location = dump_user_location_t ();
1116
1117 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
1118 if (dump_enabled_p ()
1119 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1120 dump_printf_loc (MSG_NOTE, vect_location,
1121 "vectorized %u loops in function.\n",
1122 num_vectorized_loops);
1123
1124 /* ----------- Finalize. ----------- */
1125
1126 if (any_ifcvt_loops)
1127 for (i = 1; i < number_of_loops (cfun); i++)
1128 {
1129 loop = get_loop (cfun, i);
1130 if (loop && loop->dont_vectorize)
1131 {
1132 gimple *g = vect_loop_vectorized_call (loop);
1133 if (g)
1134 {
1135 fold_loop_internal_call (g, boolean_false_node);
1136 ret |= TODO_cleanup_cfg;
1137 g = NULL;
1138 }
1139 else
1140 g = vect_loop_dist_alias_call (loop);
1141
1142 if (g)
1143 {
1144 fold_loop_internal_call (g, boolean_false_node);
1145 ret |= TODO_cleanup_cfg;
1146 }
1147 }
1148 }
1149
1150 for (i = 1; i < number_of_loops (cfun); i++)
1151 {
1152 loop_vec_info loop_vinfo;
1153 bool has_mask_store;
1154
1155 loop = get_loop (cfun, i);
1156 if (!loop || !loop->aux)
1157 continue;
1158 loop_vinfo = (loop_vec_info) loop->aux;
1159 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1160 delete loop_vinfo;
1161 if (has_mask_store
1162 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1163 optimize_mask_stores (loop);
1164 loop->aux = NULL;
1165 }
1166
1167 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1168 if (cfun->has_simduid_loops)
1169 adjust_simduid_builtins (simduid_to_vf_htab);
1170
1171 /* Shrink any "omp array simd" temporary arrays to the
1172 actual vectorization factors. */
1173 if (simd_array_to_simduid_htab)
1174 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1175 delete simduid_to_vf_htab;
1176 cfun->has_simduid_loops = false;
1177
1178 if (num_vectorized_loops > 0)
1179 {
1180 /* If we vectorized any loop only virtual SSA form needs to be updated.
1181 ??? Also while we try hard to update loop-closed SSA form we fail
1182 to properly do this in some corner-cases (see PR56286). */
1183 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1184 return TODO_cleanup_cfg;
1185 }
1186
1187 return ret;
1188 }
1189
1190
1191 /* Entry point to the simduid cleanup pass. */
1192
1193 namespace {
1194
1195 const pass_data pass_data_simduid_cleanup =
1196 {
1197 GIMPLE_PASS, /* type */
1198 "simduid", /* name */
1199 OPTGROUP_NONE, /* optinfo_flags */
1200 TV_NONE, /* tv_id */
1201 ( PROP_ssa | PROP_cfg ), /* properties_required */
1202 0, /* properties_provided */
1203 0, /* properties_destroyed */
1204 0, /* todo_flags_start */
1205 0, /* todo_flags_finish */
1206 };
1207
1208 class pass_simduid_cleanup : public gimple_opt_pass
1209 {
1210 public:
1211 pass_simduid_cleanup (gcc::context *ctxt)
1212 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1213 {}
1214
1215 /* opt_pass methods: */
1216 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1217 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1218 virtual unsigned int execute (function *);
1219
1220 }; // class pass_simduid_cleanup
1221
1222 unsigned int
1223 pass_simduid_cleanup::execute (function *fun)
1224 {
1225 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1226
1227 note_simd_array_uses (&simd_array_to_simduid_htab);
1228
1229 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1230 adjust_simduid_builtins (NULL);
1231
1232 /* Shrink any "omp array simd" temporary arrays to the
1233 actual vectorization factors. */
1234 if (simd_array_to_simduid_htab)
1235 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1236 fun->has_simduid_loops = false;
1237 return 0;
1238 }
1239
1240 } // anon namespace
1241
1242 gimple_opt_pass *
1243 make_pass_simduid_cleanup (gcc::context *ctxt)
1244 {
1245 return new pass_simduid_cleanup (ctxt);
1246 }
1247
1248
1249 /* Entry point to basic block SLP phase. */
1250
1251 namespace {
1252
1253 const pass_data pass_data_slp_vectorize =
1254 {
1255 GIMPLE_PASS, /* type */
1256 "slp", /* name */
1257 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1258 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1259 ( PROP_ssa | PROP_cfg ), /* properties_required */
1260 0, /* properties_provided */
1261 0, /* properties_destroyed */
1262 0, /* todo_flags_start */
1263 TODO_update_ssa, /* todo_flags_finish */
1264 };
1265
1266 class pass_slp_vectorize : public gimple_opt_pass
1267 {
1268 public:
1269 pass_slp_vectorize (gcc::context *ctxt)
1270 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1271 {}
1272
1273 /* opt_pass methods: */
1274 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1275 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1276 virtual unsigned int execute (function *);
1277
1278 }; // class pass_slp_vectorize
1279
1280 unsigned int
1281 pass_slp_vectorize::execute (function *fun)
1282 {
1283 auto_purge_vect_location sentinel;
1284 basic_block bb;
1285
1286 bool in_loop_pipeline = scev_initialized_p ();
1287 if (!in_loop_pipeline)
1288 {
1289 loop_optimizer_init (LOOPS_NORMAL);
1290 scev_initialize ();
1291 }
1292
1293 /* Mark all stmts as not belonging to the current region and unvisited. */
1294 FOR_EACH_BB_FN (bb, fun)
1295 {
1296 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1297 gsi_next (&gsi))
1298 {
1299 gimple *stmt = gsi_stmt (gsi);
1300 gimple_set_uid (stmt, -1);
1301 gimple_set_visited (stmt, false);
1302 }
1303 }
1304
1305 FOR_EACH_BB_FN (bb, fun)
1306 {
1307 if (vect_slp_bb (bb))
1308 if (dump_enabled_p ())
1309 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1310 }
1311
1312 if (!in_loop_pipeline)
1313 {
1314 scev_finalize ();
1315 loop_optimizer_finalize ();
1316 }
1317
1318 return 0;
1319 }
1320
1321 } // anon namespace
1322
1323 gimple_opt_pass *
1324 make_pass_slp_vectorize (gcc::context *ctxt)
1325 {
1326 return new pass_slp_vectorize (ctxt);
1327 }
1328
1329
1330 /* Increase alignment of global arrays to improve vectorization potential.
1331 TODO:
1332 - Consider also structs that have an array field.
1333 - Use ipa analysis to prune arrays that can't be vectorized?
1334 This should involve global alignment analysis and in the future also
1335 array padding. */
1336
1337 static unsigned get_vec_alignment_for_type (tree);
1338 static hash_map<tree, unsigned> *type_align_map;
1339
1340 /* Return alignment of array's vector type corresponding to scalar type.
1341 0 if no vector type exists. */
1342 static unsigned
1343 get_vec_alignment_for_array_type (tree type)
1344 {
1345 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1346 poly_uint64 array_size, vector_size;
1347
1348 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1349 if (!vectype
1350 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1351 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1352 || maybe_lt (array_size, vector_size))
1353 return 0;
1354
1355 return TYPE_ALIGN (vectype);
1356 }
1357
1358 /* Return alignment of field having maximum alignment of vector type
1359 corresponding to it's scalar type. For now, we only consider fields whose
1360 offset is a multiple of it's vector alignment.
1361 0 if no suitable field is found. */
1362 static unsigned
1363 get_vec_alignment_for_record_type (tree type)
1364 {
1365 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1366
1367 unsigned max_align = 0, alignment;
1368 HOST_WIDE_INT offset;
1369 tree offset_tree;
1370
1371 if (TYPE_PACKED (type))
1372 return 0;
1373
1374 unsigned *slot = type_align_map->get (type);
1375 if (slot)
1376 return *slot;
1377
1378 for (tree field = first_field (type);
1379 field != NULL_TREE;
1380 field = DECL_CHAIN (field))
1381 {
1382 /* Skip if not FIELD_DECL or if alignment is set by user. */
1383 if (TREE_CODE (field) != FIELD_DECL
1384 || DECL_USER_ALIGN (field)
1385 || DECL_ARTIFICIAL (field))
1386 continue;
1387
1388 /* We don't need to process the type further if offset is variable,
1389 since the offsets of remaining members will also be variable. */
1390 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1391 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1392 break;
1393
1394 /* Similarly stop processing the type if offset_tree
1395 does not fit in unsigned HOST_WIDE_INT. */
1396 offset_tree = bit_position (field);
1397 if (!tree_fits_uhwi_p (offset_tree))
1398 break;
1399
1400 offset = tree_to_uhwi (offset_tree);
1401 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1402
1403 /* Get maximum alignment of vectorized field/array among those members
1404 whose offset is multiple of the vector alignment. */
1405 if (alignment
1406 && (offset % alignment == 0)
1407 && (alignment > max_align))
1408 max_align = alignment;
1409 }
1410
1411 type_align_map->put (type, max_align);
1412 return max_align;
1413 }
1414
1415 /* Return alignment of vector type corresponding to decl's scalar type
1416 or 0 if it doesn't exist or the vector alignment is lesser than
1417 decl's alignment. */
1418 static unsigned
1419 get_vec_alignment_for_type (tree type)
1420 {
1421 if (type == NULL_TREE)
1422 return 0;
1423
1424 gcc_assert (TYPE_P (type));
1425
1426 static unsigned alignment = 0;
1427 switch (TREE_CODE (type))
1428 {
1429 case ARRAY_TYPE:
1430 alignment = get_vec_alignment_for_array_type (type);
1431 break;
1432 case RECORD_TYPE:
1433 alignment = get_vec_alignment_for_record_type (type);
1434 break;
1435 default:
1436 alignment = 0;
1437 break;
1438 }
1439
1440 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1441 }
1442
1443 /* Entry point to increase_alignment pass. */
1444 static unsigned int
1445 increase_alignment (void)
1446 {
1447 varpool_node *vnode;
1448
1449 vect_location = dump_user_location_t ();
1450 type_align_map = new hash_map<tree, unsigned>;
1451
1452 /* Increase the alignment of all global arrays for vectorization. */
1453 FOR_EACH_DEFINED_VARIABLE (vnode)
1454 {
1455 tree decl = vnode->decl;
1456 unsigned int alignment;
1457
1458 if ((decl_in_symtab_p (decl)
1459 && !symtab_node::get (decl)->can_increase_alignment_p ())
1460 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1461 continue;
1462
1463 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1464 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1465 {
1466 vnode->increase_alignment (alignment);
1467 if (dump_enabled_p ())
1468 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
1469 }
1470 }
1471
1472 delete type_align_map;
1473 return 0;
1474 }
1475
1476
1477 namespace {
1478
1479 const pass_data pass_data_ipa_increase_alignment =
1480 {
1481 SIMPLE_IPA_PASS, /* type */
1482 "increase_alignment", /* name */
1483 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1484 TV_IPA_OPT, /* tv_id */
1485 0, /* properties_required */
1486 0, /* properties_provided */
1487 0, /* properties_destroyed */
1488 0, /* todo_flags_start */
1489 0, /* todo_flags_finish */
1490 };
1491
1492 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1493 {
1494 public:
1495 pass_ipa_increase_alignment (gcc::context *ctxt)
1496 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1497 {}
1498
1499 /* opt_pass methods: */
1500 virtual bool gate (function *)
1501 {
1502 return flag_section_anchors && flag_tree_loop_vectorize;
1503 }
1504
1505 virtual unsigned int execute (function *) { return increase_alignment (); }
1506
1507 }; // class pass_ipa_increase_alignment
1508
1509 } // anon namespace
1510
1511 simple_ipa_opt_pass *
1512 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1513 {
1514 return new pass_ipa_increase_alignment (ctxt);
1515 }