]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
2014-10-27 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "hashtab.h"
26 #include "hash-set.h"
27 #include "vec.h"
28 #include "machmode.h"
29 #include "hard-reg-set.h"
30 #include "input.h"
31 #include "function.h"
32 #include "predict.h"
33 #include "dominance.h"
34 #include "cfg.h"
35 #include "cfganal.h"
36 #include "basic-block.h"
37 #include "cfgloop.h"
38 #include "diagnostic-core.h"
39 #include "flags.h"
40 #include "tree.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimple-iterator.h"
47 #include "gimple-ssa.h"
48 #include "dumpfile.h"
49
50 static void flow_loops_cfg_dump (FILE *);
51 \f
52 /* Dump loop related CFG information. */
53
54 static void
55 flow_loops_cfg_dump (FILE *file)
56 {
57 basic_block bb;
58
59 if (!file)
60 return;
61
62 FOR_EACH_BB_FN (bb, cfun)
63 {
64 edge succ;
65 edge_iterator ei;
66
67 fprintf (file, ";; %d succs { ", bb->index);
68 FOR_EACH_EDGE (succ, ei, bb->succs)
69 fprintf (file, "%d ", succ->dest->index);
70 fprintf (file, "}\n");
71 }
72 }
73
74 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
75
76 bool
77 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
78 {
79 unsigned odepth = loop_depth (outer);
80
81 return (loop_depth (loop) > odepth
82 && (*loop->superloops)[odepth] == outer);
83 }
84
85 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
86 loops within LOOP. */
87
88 struct loop *
89 superloop_at_depth (struct loop *loop, unsigned depth)
90 {
91 unsigned ldepth = loop_depth (loop);
92
93 gcc_assert (depth <= ldepth);
94
95 if (depth == ldepth)
96 return loop;
97
98 return (*loop->superloops)[depth];
99 }
100
101 /* Returns the list of the latch edges of LOOP. */
102
103 static vec<edge>
104 get_loop_latch_edges (const struct loop *loop)
105 {
106 edge_iterator ei;
107 edge e;
108 vec<edge> ret = vNULL;
109
110 FOR_EACH_EDGE (e, ei, loop->header->preds)
111 {
112 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
113 ret.safe_push (e);
114 }
115
116 return ret;
117 }
118
119 /* Dump the loop information specified by LOOP to the stream FILE
120 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
121
122 void
123 flow_loop_dump (const struct loop *loop, FILE *file,
124 void (*loop_dump_aux) (const struct loop *, FILE *, int),
125 int verbose)
126 {
127 basic_block *bbs;
128 unsigned i;
129 vec<edge> latches;
130 edge e;
131
132 if (! loop || ! loop->header)
133 return;
134
135 fprintf (file, ";;\n;; Loop %d\n", loop->num);
136
137 fprintf (file, ";; header %d, ", loop->header->index);
138 if (loop->latch)
139 fprintf (file, "latch %d\n", loop->latch->index);
140 else
141 {
142 fprintf (file, "multiple latches:");
143 latches = get_loop_latch_edges (loop);
144 FOR_EACH_VEC_ELT (latches, i, e)
145 fprintf (file, " %d", e->src->index);
146 latches.release ();
147 fprintf (file, "\n");
148 }
149
150 fprintf (file, ";; depth %d, outer %ld\n",
151 loop_depth (loop), (long) (loop_outer (loop)
152 ? loop_outer (loop)->num : -1));
153
154 fprintf (file, ";; nodes:");
155 bbs = get_loop_body (loop);
156 for (i = 0; i < loop->num_nodes; i++)
157 fprintf (file, " %d", bbs[i]->index);
158 free (bbs);
159 fprintf (file, "\n");
160
161 if (loop_dump_aux)
162 loop_dump_aux (loop, file, verbose);
163 }
164
165 /* Dump the loop information about loops to the stream FILE,
166 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
167
168 void
169 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
170 {
171 struct loop *loop;
172
173 if (!current_loops || ! file)
174 return;
175
176 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
177
178 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
179 {
180 flow_loop_dump (loop, file, loop_dump_aux, verbose);
181 }
182
183 if (verbose)
184 flow_loops_cfg_dump (file);
185 }
186
187 /* Free data allocated for LOOP. */
188
189 void
190 flow_loop_free (struct loop *loop)
191 {
192 struct loop_exit *exit, *next;
193
194 vec_free (loop->superloops);
195
196 /* Break the list of the loop exit records. They will be freed when the
197 corresponding edge is rescanned or removed, and this avoids
198 accessing the (already released) head of the list stored in the
199 loop structure. */
200 for (exit = loop->exits->next; exit != loop->exits; exit = next)
201 {
202 next = exit->next;
203 exit->next = exit;
204 exit->prev = exit;
205 }
206
207 ggc_free (loop->exits);
208 ggc_free (loop);
209 }
210
211 /* Free all the memory allocated for LOOPS. */
212
213 void
214 flow_loops_free (struct loops *loops)
215 {
216 if (loops->larray)
217 {
218 unsigned i;
219 loop_p loop;
220
221 /* Free the loop descriptors. */
222 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
223 {
224 if (!loop)
225 continue;
226
227 flow_loop_free (loop);
228 }
229
230 vec_free (loops->larray);
231 }
232 }
233
234 /* Find the nodes contained within the LOOP with header HEADER.
235 Return the number of nodes within the loop. */
236
237 int
238 flow_loop_nodes_find (basic_block header, struct loop *loop)
239 {
240 vec<basic_block> stack = vNULL;
241 int num_nodes = 1;
242 edge latch;
243 edge_iterator latch_ei;
244
245 header->loop_father = loop;
246
247 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
248 {
249 if (latch->src->loop_father == loop
250 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
251 continue;
252
253 num_nodes++;
254 stack.safe_push (latch->src);
255 latch->src->loop_father = loop;
256
257 while (!stack.is_empty ())
258 {
259 basic_block node;
260 edge e;
261 edge_iterator ei;
262
263 node = stack.pop ();
264
265 FOR_EACH_EDGE (e, ei, node->preds)
266 {
267 basic_block ancestor = e->src;
268
269 if (ancestor->loop_father != loop)
270 {
271 ancestor->loop_father = loop;
272 num_nodes++;
273 stack.safe_push (ancestor);
274 }
275 }
276 }
277 }
278 stack.release ();
279
280 return num_nodes;
281 }
282
283 /* Records the vector of superloops of the loop LOOP, whose immediate
284 superloop is FATHER. */
285
286 static void
287 establish_preds (struct loop *loop, struct loop *father)
288 {
289 loop_p ploop;
290 unsigned depth = loop_depth (father) + 1;
291 unsigned i;
292
293 loop->superloops = 0;
294 vec_alloc (loop->superloops, depth);
295 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
296 loop->superloops->quick_push (ploop);
297 loop->superloops->quick_push (father);
298
299 for (ploop = loop->inner; ploop; ploop = ploop->next)
300 establish_preds (ploop, loop);
301 }
302
303 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
304 added loop. If LOOP has some children, take care of that their
305 pred field will be initialized correctly. */
306
307 void
308 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
309 {
310 loop->next = father->inner;
311 father->inner = loop;
312
313 establish_preds (loop, father);
314 }
315
316 /* Remove LOOP from the loop hierarchy tree. */
317
318 void
319 flow_loop_tree_node_remove (struct loop *loop)
320 {
321 struct loop *prev, *father;
322
323 father = loop_outer (loop);
324
325 /* Remove loop from the list of sons. */
326 if (father->inner == loop)
327 father->inner = loop->next;
328 else
329 {
330 for (prev = father->inner; prev->next != loop; prev = prev->next)
331 continue;
332 prev->next = loop->next;
333 }
334
335 loop->superloops = NULL;
336 }
337
338 /* Allocates and returns new loop structure. */
339
340 struct loop *
341 alloc_loop (void)
342 {
343 struct loop *loop = ggc_cleared_alloc<struct loop> ();
344
345 loop->exits = ggc_cleared_alloc<loop_exit> ();
346 loop->exits->next = loop->exits->prev = loop->exits;
347 loop->can_be_parallel = false;
348 loop->nb_iterations_upper_bound = 0;
349 loop->nb_iterations_estimate = 0;
350 return loop;
351 }
352
353 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
354 (including the root of the loop tree). */
355
356 void
357 init_loops_structure (struct function *fn,
358 struct loops *loops, unsigned num_loops)
359 {
360 struct loop *root;
361
362 memset (loops, 0, sizeof *loops);
363 vec_alloc (loops->larray, num_loops);
364
365 /* Dummy loop containing whole function. */
366 root = alloc_loop ();
367 root->num_nodes = n_basic_blocks_for_fn (fn);
368 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
369 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
370 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
371 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
372
373 loops->larray->quick_push (root);
374 loops->tree_root = root;
375 }
376
377 /* Returns whether HEADER is a loop header. */
378
379 bool
380 bb_loop_header_p (basic_block header)
381 {
382 edge_iterator ei;
383 edge e;
384
385 /* If we have an abnormal predecessor, do not consider the
386 loop (not worth the problems). */
387 if (bb_has_abnormal_pred (header))
388 return false;
389
390 /* Look for back edges where a predecessor is dominated
391 by this block. A natural loop has a single entry
392 node (header) that dominates all the nodes in the
393 loop. It also has single back edge to the header
394 from a latch node. */
395 FOR_EACH_EDGE (e, ei, header->preds)
396 {
397 basic_block latch = e->src;
398 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
399 && dominated_by_p (CDI_DOMINATORS, latch, header))
400 return true;
401 }
402
403 return false;
404 }
405
406 /* Find all the natural loops in the function and save in LOOPS structure and
407 recalculate loop_father information in basic block structures.
408 If LOOPS is non-NULL then the loop structures for already recorded loops
409 will be re-used and their number will not change. We assume that no
410 stale loops exist in LOOPS.
411 When LOOPS is NULL it is allocated and re-built from scratch.
412 Return the built LOOPS structure. */
413
414 struct loops *
415 flow_loops_find (struct loops *loops)
416 {
417 bool from_scratch = (loops == NULL);
418 int *rc_order;
419 int b;
420 unsigned i;
421
422 /* Ensure that the dominators are computed. */
423 calculate_dominance_info (CDI_DOMINATORS);
424
425 if (!loops)
426 {
427 loops = ggc_cleared_alloc<struct loops> ();
428 init_loops_structure (cfun, loops, 1);
429 }
430
431 /* Ensure that loop exits were released. */
432 gcc_assert (loops->exits == NULL);
433
434 /* Taking care of this degenerate case makes the rest of
435 this code simpler. */
436 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
437 return loops;
438
439 /* The root loop node contains all basic-blocks. */
440 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
441
442 /* Compute depth first search order of the CFG so that outer
443 natural loops will be found before inner natural loops. */
444 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
445 pre_and_rev_post_order_compute (NULL, rc_order, false);
446
447 /* Gather all loop headers in reverse completion order and allocate
448 loop structures for loops that are not already present. */
449 auto_vec<loop_p> larray (loops->larray->length ());
450 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
451 {
452 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
453 if (bb_loop_header_p (header))
454 {
455 struct loop *loop;
456
457 /* The current active loop tree has valid loop-fathers for
458 header blocks. */
459 if (!from_scratch
460 && header->loop_father->header == header)
461 {
462 loop = header->loop_father;
463 /* If we found an existing loop remove it from the
464 loop tree. It is going to be inserted again
465 below. */
466 flow_loop_tree_node_remove (loop);
467 }
468 else
469 {
470 /* Otherwise allocate a new loop structure for the loop. */
471 loop = alloc_loop ();
472 /* ??? We could re-use unused loop slots here. */
473 loop->num = loops->larray->length ();
474 vec_safe_push (loops->larray, loop);
475 loop->header = header;
476
477 if (!from_scratch
478 && dump_file && (dump_flags & TDF_DETAILS))
479 fprintf (dump_file, "flow_loops_find: discovered new "
480 "loop %d with header %d\n",
481 loop->num, header->index);
482 }
483 /* Reset latch, we recompute it below. */
484 loop->latch = NULL;
485 larray.safe_push (loop);
486 }
487
488 /* Make blocks part of the loop root node at start. */
489 header->loop_father = loops->tree_root;
490 }
491
492 free (rc_order);
493
494 /* Now iterate over the loops found, insert them into the loop tree
495 and assign basic-block ownership. */
496 for (i = 0; i < larray.length (); ++i)
497 {
498 struct loop *loop = larray[i];
499 basic_block header = loop->header;
500 edge_iterator ei;
501 edge e;
502
503 flow_loop_tree_node_add (header->loop_father, loop);
504 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
505
506 /* Look for the latch for this header block, if it has just a
507 single one. */
508 FOR_EACH_EDGE (e, ei, header->preds)
509 {
510 basic_block latch = e->src;
511
512 if (flow_bb_inside_loop_p (loop, latch))
513 {
514 if (loop->latch != NULL)
515 {
516 /* More than one latch edge. */
517 loop->latch = NULL;
518 break;
519 }
520 loop->latch = latch;
521 }
522 }
523 }
524
525 return loops;
526 }
527
528 /* Ratio of frequencies of edges so that one of more latch edges is
529 considered to belong to inner loop with same header. */
530 #define HEAVY_EDGE_RATIO 8
531
532 /* Minimum number of samples for that we apply
533 find_subloop_latch_edge_by_profile heuristics. */
534 #define HEAVY_EDGE_MIN_SAMPLES 10
535
536 /* If the profile info is available, finds an edge in LATCHES that much more
537 frequent than the remaining edges. Returns such an edge, or NULL if we do
538 not find one.
539
540 We do not use guessed profile here, only the measured one. The guessed
541 profile is usually too flat and unreliable for this (and it is mostly based
542 on the loop structure of the program, so it does not make much sense to
543 derive the loop structure from it). */
544
545 static edge
546 find_subloop_latch_edge_by_profile (vec<edge> latches)
547 {
548 unsigned i;
549 edge e, me = NULL;
550 gcov_type mcount = 0, tcount = 0;
551
552 FOR_EACH_VEC_ELT (latches, i, e)
553 {
554 if (e->count > mcount)
555 {
556 me = e;
557 mcount = e->count;
558 }
559 tcount += e->count;
560 }
561
562 if (tcount < HEAVY_EDGE_MIN_SAMPLES
563 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
564 return NULL;
565
566 if (dump_file)
567 fprintf (dump_file,
568 "Found latch edge %d -> %d using profile information.\n",
569 me->src->index, me->dest->index);
570 return me;
571 }
572
573 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
574 on the structure of induction variables. Returns this edge, or NULL if we
575 do not find any.
576
577 We are quite conservative, and look just for an obvious simple innermost
578 loop (which is the case where we would lose the most performance by not
579 disambiguating the loop). More precisely, we look for the following
580 situation: The source of the chosen latch edge dominates sources of all
581 the other latch edges. Additionally, the header does not contain a phi node
582 such that the argument from the chosen edge is equal to the argument from
583 another edge. */
584
585 static edge
586 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
587 {
588 edge e, latch = latches[0];
589 unsigned i;
590 gimple phi;
591 gimple_stmt_iterator psi;
592 tree lop;
593 basic_block bb;
594
595 /* Find the candidate for the latch edge. */
596 for (i = 1; latches.iterate (i, &e); i++)
597 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
598 latch = e;
599
600 /* Verify that it dominates all the latch edges. */
601 FOR_EACH_VEC_ELT (latches, i, e)
602 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
603 return NULL;
604
605 /* Check for a phi node that would deny that this is a latch edge of
606 a subloop. */
607 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
608 {
609 phi = gsi_stmt (psi);
610 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
611
612 /* Ignore the values that are not changed inside the subloop. */
613 if (TREE_CODE (lop) != SSA_NAME
614 || SSA_NAME_DEF_STMT (lop) == phi)
615 continue;
616 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
617 if (!bb || !flow_bb_inside_loop_p (loop, bb))
618 continue;
619
620 FOR_EACH_VEC_ELT (latches, i, e)
621 if (e != latch
622 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
623 return NULL;
624 }
625
626 if (dump_file)
627 fprintf (dump_file,
628 "Found latch edge %d -> %d using iv structure.\n",
629 latch->src->index, latch->dest->index);
630 return latch;
631 }
632
633 /* If we can determine that one of the several latch edges of LOOP behaves
634 as a latch edge of a separate subloop, returns this edge. Otherwise
635 returns NULL. */
636
637 static edge
638 find_subloop_latch_edge (struct loop *loop)
639 {
640 vec<edge> latches = get_loop_latch_edges (loop);
641 edge latch = NULL;
642
643 if (latches.length () > 1)
644 {
645 latch = find_subloop_latch_edge_by_profile (latches);
646
647 if (!latch
648 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
649 should use cfghook for this, but it is hard to imagine it would
650 be useful elsewhere. */
651 && current_ir_type () == IR_GIMPLE)
652 latch = find_subloop_latch_edge_by_ivs (loop, latches);
653 }
654
655 latches.release ();
656 return latch;
657 }
658
659 /* Callback for make_forwarder_block. Returns true if the edge E is marked
660 in the set MFB_REIS_SET. */
661
662 static hash_set<edge> *mfb_reis_set;
663 static bool
664 mfb_redirect_edges_in_set (edge e)
665 {
666 return mfb_reis_set->contains (e);
667 }
668
669 /* Creates a subloop of LOOP with latch edge LATCH. */
670
671 static void
672 form_subloop (struct loop *loop, edge latch)
673 {
674 edge_iterator ei;
675 edge e, new_entry;
676 struct loop *new_loop;
677
678 mfb_reis_set = new hash_set<edge>;
679 FOR_EACH_EDGE (e, ei, loop->header->preds)
680 {
681 if (e != latch)
682 mfb_reis_set->add (e);
683 }
684 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
685 NULL);
686 delete mfb_reis_set;
687
688 loop->header = new_entry->src;
689
690 /* Find the blocks and subloops that belong to the new loop, and add it to
691 the appropriate place in the loop tree. */
692 new_loop = alloc_loop ();
693 new_loop->header = new_entry->dest;
694 new_loop->latch = latch->src;
695 add_loop (new_loop, loop);
696 }
697
698 /* Make all the latch edges of LOOP to go to a single forwarder block --
699 a new latch of LOOP. */
700
701 static void
702 merge_latch_edges (struct loop *loop)
703 {
704 vec<edge> latches = get_loop_latch_edges (loop);
705 edge latch, e;
706 unsigned i;
707
708 gcc_assert (latches.length () > 0);
709
710 if (latches.length () == 1)
711 loop->latch = latches[0]->src;
712 else
713 {
714 if (dump_file)
715 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
716
717 mfb_reis_set = new hash_set<edge>;
718 FOR_EACH_VEC_ELT (latches, i, e)
719 mfb_reis_set->add (e);
720 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
721 NULL);
722 delete mfb_reis_set;
723
724 loop->header = latch->dest;
725 loop->latch = latch->src;
726 }
727
728 latches.release ();
729 }
730
731 /* LOOP may have several latch edges. Transform it into (possibly several)
732 loops with single latch edge. */
733
734 static void
735 disambiguate_multiple_latches (struct loop *loop)
736 {
737 edge e;
738
739 /* We eliminate the multiple latches by splitting the header to the forwarder
740 block F and the rest R, and redirecting the edges. There are two cases:
741
742 1) If there is a latch edge E that corresponds to a subloop (we guess
743 that based on profile -- if it is taken much more often than the
744 remaining edges; and on trees, using the information about induction
745 variables of the loops), we redirect E to R, all the remaining edges to
746 F, then rescan the loops and try again for the outer loop.
747 2) If there is no such edge, we redirect all latch edges to F, and the
748 entry edges to R, thus making F the single latch of the loop. */
749
750 if (dump_file)
751 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
752 loop->num);
753
754 /* During latch merging, we may need to redirect the entry edges to a new
755 block. This would cause problems if the entry edge was the one from the
756 entry block. To avoid having to handle this case specially, split
757 such entry edge. */
758 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
759 if (e)
760 split_edge (e);
761
762 while (1)
763 {
764 e = find_subloop_latch_edge (loop);
765 if (!e)
766 break;
767
768 form_subloop (loop, e);
769 }
770
771 merge_latch_edges (loop);
772 }
773
774 /* Split loops with multiple latch edges. */
775
776 void
777 disambiguate_loops_with_multiple_latches (void)
778 {
779 struct loop *loop;
780
781 FOR_EACH_LOOP (loop, 0)
782 {
783 if (!loop->latch)
784 disambiguate_multiple_latches (loop);
785 }
786 }
787
788 /* Return nonzero if basic block BB belongs to LOOP. */
789 bool
790 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
791 {
792 struct loop *source_loop;
793
794 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
795 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
796 return 0;
797
798 source_loop = bb->loop_father;
799 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
800 }
801
802 /* Enumeration predicate for get_loop_body_with_size. */
803 static bool
804 glb_enum_p (const_basic_block bb, const void *glb_loop)
805 {
806 const struct loop *const loop = (const struct loop *) glb_loop;
807 return (bb != loop->header
808 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
809 }
810
811 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
812 order against direction of edges from latch. Specially, if
813 header != latch, latch is the 1-st block. LOOP cannot be the fake
814 loop tree root, and its size must be at most MAX_SIZE. The blocks
815 in the LOOP body are stored to BODY, and the size of the LOOP is
816 returned. */
817
818 unsigned
819 get_loop_body_with_size (const struct loop *loop, basic_block *body,
820 unsigned max_size)
821 {
822 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
823 body, max_size, loop);
824 }
825
826 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
827 order against direction of edges from latch. Specially, if
828 header != latch, latch is the 1-st block. */
829
830 basic_block *
831 get_loop_body (const struct loop *loop)
832 {
833 basic_block *body, bb;
834 unsigned tv = 0;
835
836 gcc_assert (loop->num_nodes);
837
838 body = XNEWVEC (basic_block, loop->num_nodes);
839
840 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
841 {
842 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
843 special-case the fake loop that contains the whole function. */
844 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
845 body[tv++] = loop->header;
846 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
847 FOR_EACH_BB_FN (bb, cfun)
848 body[tv++] = bb;
849 }
850 else
851 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
852
853 gcc_assert (tv == loop->num_nodes);
854 return body;
855 }
856
857 /* Fills dominance descendants inside LOOP of the basic block BB into
858 array TOVISIT from index *TV. */
859
860 static void
861 fill_sons_in_loop (const struct loop *loop, basic_block bb,
862 basic_block *tovisit, int *tv)
863 {
864 basic_block son, postpone = NULL;
865
866 tovisit[(*tv)++] = bb;
867 for (son = first_dom_son (CDI_DOMINATORS, bb);
868 son;
869 son = next_dom_son (CDI_DOMINATORS, son))
870 {
871 if (!flow_bb_inside_loop_p (loop, son))
872 continue;
873
874 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
875 {
876 postpone = son;
877 continue;
878 }
879 fill_sons_in_loop (loop, son, tovisit, tv);
880 }
881
882 if (postpone)
883 fill_sons_in_loop (loop, postpone, tovisit, tv);
884 }
885
886 /* Gets body of a LOOP (that must be different from the outermost loop)
887 sorted by dominance relation. Additionally, if a basic block s dominates
888 the latch, then only blocks dominated by s are be after it. */
889
890 basic_block *
891 get_loop_body_in_dom_order (const struct loop *loop)
892 {
893 basic_block *tovisit;
894 int tv;
895
896 gcc_assert (loop->num_nodes);
897
898 tovisit = XNEWVEC (basic_block, loop->num_nodes);
899
900 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
901
902 tv = 0;
903 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
904
905 gcc_assert (tv == (int) loop->num_nodes);
906
907 return tovisit;
908 }
909
910 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
911
912 basic_block *
913 get_loop_body_in_custom_order (const struct loop *loop,
914 int (*bb_comparator) (const void *, const void *))
915 {
916 basic_block *bbs = get_loop_body (loop);
917
918 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
919
920 return bbs;
921 }
922
923 /* Get body of a LOOP in breadth first sort order. */
924
925 basic_block *
926 get_loop_body_in_bfs_order (const struct loop *loop)
927 {
928 basic_block *blocks;
929 basic_block bb;
930 bitmap visited;
931 unsigned int i = 0;
932 unsigned int vc = 1;
933
934 gcc_assert (loop->num_nodes);
935 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
936
937 blocks = XNEWVEC (basic_block, loop->num_nodes);
938 visited = BITMAP_ALLOC (NULL);
939
940 bb = loop->header;
941 while (i < loop->num_nodes)
942 {
943 edge e;
944 edge_iterator ei;
945
946 if (bitmap_set_bit (visited, bb->index))
947 /* This basic block is now visited */
948 blocks[i++] = bb;
949
950 FOR_EACH_EDGE (e, ei, bb->succs)
951 {
952 if (flow_bb_inside_loop_p (loop, e->dest))
953 {
954 if (bitmap_set_bit (visited, e->dest->index))
955 blocks[i++] = e->dest;
956 }
957 }
958
959 gcc_assert (i >= vc);
960
961 bb = blocks[vc++];
962 }
963
964 BITMAP_FREE (visited);
965 return blocks;
966 }
967
968 /* Hash function for struct loop_exit. */
969
970 hashval_t
971 loop_exit_hasher::hash (loop_exit *exit)
972 {
973 return htab_hash_pointer (exit->e);
974 }
975
976 /* Equality function for struct loop_exit. Compares with edge. */
977
978 bool
979 loop_exit_hasher::equal (loop_exit *exit, edge e)
980 {
981 return exit->e == e;
982 }
983
984 /* Frees the list of loop exit descriptions EX. */
985
986 void
987 loop_exit_hasher::remove (loop_exit *exit)
988 {
989 loop_exit *next;
990 for (; exit; exit = next)
991 {
992 next = exit->next_e;
993
994 exit->next->prev = exit->prev;
995 exit->prev->next = exit->next;
996
997 ggc_free (exit);
998 }
999 }
1000
1001 /* Returns the list of records for E as an exit of a loop. */
1002
1003 static struct loop_exit *
1004 get_exit_descriptions (edge e)
1005 {
1006 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
1007 }
1008
1009 /* Updates the lists of loop exits in that E appears.
1010 If REMOVED is true, E is being removed, and we
1011 just remove it from the lists of exits.
1012 If NEW_EDGE is true and E is not a loop exit, we
1013 do not try to remove it from loop exit lists. */
1014
1015 void
1016 rescan_loop_exit (edge e, bool new_edge, bool removed)
1017 {
1018 struct loop_exit *exits = NULL, *exit;
1019 struct loop *aloop, *cloop;
1020
1021 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1022 return;
1023
1024 if (!removed
1025 && e->src->loop_father != NULL
1026 && e->dest->loop_father != NULL
1027 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1028 {
1029 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1030 for (aloop = e->src->loop_father;
1031 aloop != cloop;
1032 aloop = loop_outer (aloop))
1033 {
1034 exit = ggc_alloc<loop_exit> ();
1035 exit->e = e;
1036
1037 exit->next = aloop->exits->next;
1038 exit->prev = aloop->exits;
1039 exit->next->prev = exit;
1040 exit->prev->next = exit;
1041
1042 exit->next_e = exits;
1043 exits = exit;
1044 }
1045 }
1046
1047 if (!exits && new_edge)
1048 return;
1049
1050 loop_exit **slot
1051 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1052 exits ? INSERT : NO_INSERT);
1053 if (!slot)
1054 return;
1055
1056 if (exits)
1057 {
1058 if (*slot)
1059 loop_exit_hasher::remove (*slot);
1060 *slot = exits;
1061 }
1062 else
1063 current_loops->exits->clear_slot (slot);
1064 }
1065
1066 /* For each loop, record list of exit edges, and start maintaining these
1067 lists. */
1068
1069 void
1070 record_loop_exits (void)
1071 {
1072 basic_block bb;
1073 edge_iterator ei;
1074 edge e;
1075
1076 if (!current_loops)
1077 return;
1078
1079 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1080 return;
1081 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1082
1083 gcc_assert (current_loops->exits == NULL);
1084 current_loops->exits
1085 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1086
1087 FOR_EACH_BB_FN (bb, cfun)
1088 {
1089 FOR_EACH_EDGE (e, ei, bb->succs)
1090 {
1091 rescan_loop_exit (e, true, false);
1092 }
1093 }
1094 }
1095
1096 /* Dumps information about the exit in *SLOT to FILE.
1097 Callback for htab_traverse. */
1098
1099 int
1100 dump_recorded_exit (loop_exit **slot, FILE *file)
1101 {
1102 struct loop_exit *exit = *slot;
1103 unsigned n = 0;
1104 edge e = exit->e;
1105
1106 for (; exit != NULL; exit = exit->next_e)
1107 n++;
1108
1109 fprintf (file, "Edge %d->%d exits %u loops\n",
1110 e->src->index, e->dest->index, n);
1111
1112 return 1;
1113 }
1114
1115 /* Dumps the recorded exits of loops to FILE. */
1116
1117 extern void dump_recorded_exits (FILE *);
1118 void
1119 dump_recorded_exits (FILE *file)
1120 {
1121 if (!current_loops->exits)
1122 return;
1123 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1124 }
1125
1126 /* Releases lists of loop exits. */
1127
1128 void
1129 release_recorded_exits (void)
1130 {
1131 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1132 current_loops->exits->empty ();
1133 current_loops->exits = NULL;
1134 loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1135 }
1136
1137 /* Returns the list of the exit edges of a LOOP. */
1138
1139 vec<edge>
1140 get_loop_exit_edges (const struct loop *loop)
1141 {
1142 vec<edge> edges = vNULL;
1143 edge e;
1144 unsigned i;
1145 basic_block *body;
1146 edge_iterator ei;
1147 struct loop_exit *exit;
1148
1149 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1150
1151 /* If we maintain the lists of exits, use them. Otherwise we must
1152 scan the body of the loop. */
1153 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1154 {
1155 for (exit = loop->exits->next; exit->e; exit = exit->next)
1156 edges.safe_push (exit->e);
1157 }
1158 else
1159 {
1160 body = get_loop_body (loop);
1161 for (i = 0; i < loop->num_nodes; i++)
1162 FOR_EACH_EDGE (e, ei, body[i]->succs)
1163 {
1164 if (!flow_bb_inside_loop_p (loop, e->dest))
1165 edges.safe_push (e);
1166 }
1167 free (body);
1168 }
1169
1170 return edges;
1171 }
1172
1173 /* Counts the number of conditional branches inside LOOP. */
1174
1175 unsigned
1176 num_loop_branches (const struct loop *loop)
1177 {
1178 unsigned i, n;
1179 basic_block * body;
1180
1181 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1182
1183 body = get_loop_body (loop);
1184 n = 0;
1185 for (i = 0; i < loop->num_nodes; i++)
1186 if (EDGE_COUNT (body[i]->succs) >= 2)
1187 n++;
1188 free (body);
1189
1190 return n;
1191 }
1192
1193 /* Adds basic block BB to LOOP. */
1194 void
1195 add_bb_to_loop (basic_block bb, struct loop *loop)
1196 {
1197 unsigned i;
1198 loop_p ploop;
1199 edge_iterator ei;
1200 edge e;
1201
1202 gcc_assert (bb->loop_father == NULL);
1203 bb->loop_father = loop;
1204 loop->num_nodes++;
1205 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1206 ploop->num_nodes++;
1207
1208 FOR_EACH_EDGE (e, ei, bb->succs)
1209 {
1210 rescan_loop_exit (e, true, false);
1211 }
1212 FOR_EACH_EDGE (e, ei, bb->preds)
1213 {
1214 rescan_loop_exit (e, true, false);
1215 }
1216 }
1217
1218 /* Remove basic block BB from loops. */
1219 void
1220 remove_bb_from_loops (basic_block bb)
1221 {
1222 unsigned i;
1223 struct loop *loop = bb->loop_father;
1224 loop_p ploop;
1225 edge_iterator ei;
1226 edge e;
1227
1228 gcc_assert (loop != NULL);
1229 loop->num_nodes--;
1230 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1231 ploop->num_nodes--;
1232 bb->loop_father = NULL;
1233
1234 FOR_EACH_EDGE (e, ei, bb->succs)
1235 {
1236 rescan_loop_exit (e, false, true);
1237 }
1238 FOR_EACH_EDGE (e, ei, bb->preds)
1239 {
1240 rescan_loop_exit (e, false, true);
1241 }
1242 }
1243
1244 /* Finds nearest common ancestor in loop tree for given loops. */
1245 struct loop *
1246 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1247 {
1248 unsigned sdepth, ddepth;
1249
1250 if (!loop_s) return loop_d;
1251 if (!loop_d) return loop_s;
1252
1253 sdepth = loop_depth (loop_s);
1254 ddepth = loop_depth (loop_d);
1255
1256 if (sdepth < ddepth)
1257 loop_d = (*loop_d->superloops)[sdepth];
1258 else if (sdepth > ddepth)
1259 loop_s = (*loop_s->superloops)[ddepth];
1260
1261 while (loop_s != loop_d)
1262 {
1263 loop_s = loop_outer (loop_s);
1264 loop_d = loop_outer (loop_d);
1265 }
1266 return loop_s;
1267 }
1268
1269 /* Removes LOOP from structures and frees its data. */
1270
1271 void
1272 delete_loop (struct loop *loop)
1273 {
1274 /* Remove the loop from structure. */
1275 flow_loop_tree_node_remove (loop);
1276
1277 /* Remove loop from loops array. */
1278 (*current_loops->larray)[loop->num] = NULL;
1279
1280 /* Free loop data. */
1281 flow_loop_free (loop);
1282 }
1283
1284 /* Cancels the LOOP; it must be innermost one. */
1285
1286 static void
1287 cancel_loop (struct loop *loop)
1288 {
1289 basic_block *bbs;
1290 unsigned i;
1291 struct loop *outer = loop_outer (loop);
1292
1293 gcc_assert (!loop->inner);
1294
1295 /* Move blocks up one level (they should be removed as soon as possible). */
1296 bbs = get_loop_body (loop);
1297 for (i = 0; i < loop->num_nodes; i++)
1298 bbs[i]->loop_father = outer;
1299
1300 free (bbs);
1301 delete_loop (loop);
1302 }
1303
1304 /* Cancels LOOP and all its subloops. */
1305 void
1306 cancel_loop_tree (struct loop *loop)
1307 {
1308 while (loop->inner)
1309 cancel_loop_tree (loop->inner);
1310 cancel_loop (loop);
1311 }
1312
1313 /* Checks that information about loops is correct
1314 -- sizes of loops are all right
1315 -- results of get_loop_body really belong to the loop
1316 -- loop header have just single entry edge and single latch edge
1317 -- loop latches have only single successor that is header of their loop
1318 -- irreducible loops are correctly marked
1319 -- the cached loop depth and loop father of each bb is correct
1320 */
1321 DEBUG_FUNCTION void
1322 verify_loop_structure (void)
1323 {
1324 unsigned *sizes, i, j;
1325 sbitmap irreds;
1326 basic_block bb, *bbs;
1327 struct loop *loop;
1328 int err = 0;
1329 edge e;
1330 unsigned num = number_of_loops (cfun);
1331 struct loop_exit *exit, *mexit;
1332 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1333 sbitmap visited;
1334
1335 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1336 {
1337 error ("loop verification on loop tree that needs fixup");
1338 err = 1;
1339 }
1340
1341 /* We need up-to-date dominators, compute or verify them. */
1342 if (!dom_available)
1343 calculate_dominance_info (CDI_DOMINATORS);
1344 else
1345 verify_dominators (CDI_DOMINATORS);
1346
1347 /* Check the headers. */
1348 FOR_EACH_BB_FN (bb, cfun)
1349 if (bb_loop_header_p (bb))
1350 {
1351 if (bb->loop_father->header == NULL)
1352 {
1353 error ("loop with header %d marked for removal", bb->index);
1354 err = 1;
1355 }
1356 else if (bb->loop_father->header != bb)
1357 {
1358 error ("loop with header %d not in loop tree", bb->index);
1359 err = 1;
1360 }
1361 }
1362 else if (bb->loop_father->header == bb)
1363 {
1364 error ("non-loop with header %d not marked for removal", bb->index);
1365 err = 1;
1366 }
1367
1368 /* Check the recorded loop father and sizes of loops. */
1369 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1370 bitmap_clear (visited);
1371 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1372 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1373 {
1374 unsigned n;
1375
1376 if (loop->header == NULL)
1377 {
1378 error ("removed loop %d in loop tree", loop->num);
1379 err = 1;
1380 continue;
1381 }
1382
1383 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1384 if (loop->num_nodes != n)
1385 {
1386 error ("size of loop %d should be %d, not %d",
1387 loop->num, n, loop->num_nodes);
1388 err = 1;
1389 }
1390
1391 for (j = 0; j < n; j++)
1392 {
1393 bb = bbs[j];
1394
1395 if (!flow_bb_inside_loop_p (loop, bb))
1396 {
1397 error ("bb %d does not belong to loop %d",
1398 bb->index, loop->num);
1399 err = 1;
1400 }
1401
1402 /* Ignore this block if it is in an inner loop. */
1403 if (bitmap_bit_p (visited, bb->index))
1404 continue;
1405 bitmap_set_bit (visited, bb->index);
1406
1407 if (bb->loop_father != loop)
1408 {
1409 error ("bb %d has father loop %d, should be loop %d",
1410 bb->index, bb->loop_father->num, loop->num);
1411 err = 1;
1412 }
1413 }
1414 }
1415 free (bbs);
1416 sbitmap_free (visited);
1417
1418 /* Check headers and latches. */
1419 FOR_EACH_LOOP (loop, 0)
1420 {
1421 i = loop->num;
1422 if (loop->header == NULL)
1423 continue;
1424 if (!bb_loop_header_p (loop->header))
1425 {
1426 error ("loop %d%'s header is not a loop header", i);
1427 err = 1;
1428 }
1429 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1430 && EDGE_COUNT (loop->header->preds) != 2)
1431 {
1432 error ("loop %d%'s header does not have exactly 2 entries", i);
1433 err = 1;
1434 }
1435 if (loop->latch)
1436 {
1437 if (!find_edge (loop->latch, loop->header))
1438 {
1439 error ("loop %d%'s latch does not have an edge to its header", i);
1440 err = 1;
1441 }
1442 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1443 {
1444 error ("loop %d%'s latch is not dominated by its header", i);
1445 err = 1;
1446 }
1447 }
1448 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1449 {
1450 if (!single_succ_p (loop->latch))
1451 {
1452 error ("loop %d%'s latch does not have exactly 1 successor", i);
1453 err = 1;
1454 }
1455 if (single_succ (loop->latch) != loop->header)
1456 {
1457 error ("loop %d%'s latch does not have header as successor", i);
1458 err = 1;
1459 }
1460 if (loop->latch->loop_father != loop)
1461 {
1462 error ("loop %d%'s latch does not belong directly to it", i);
1463 err = 1;
1464 }
1465 }
1466 if (loop->header->loop_father != loop)
1467 {
1468 error ("loop %d%'s header does not belong directly to it", i);
1469 err = 1;
1470 }
1471 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1472 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1473 {
1474 error ("loop %d%'s latch is marked as part of irreducible region", i);
1475 err = 1;
1476 }
1477 }
1478
1479 /* Check irreducible loops. */
1480 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1481 {
1482 /* Record old info. */
1483 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1484 FOR_EACH_BB_FN (bb, cfun)
1485 {
1486 edge_iterator ei;
1487 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1488 bitmap_set_bit (irreds, bb->index);
1489 else
1490 bitmap_clear_bit (irreds, bb->index);
1491 FOR_EACH_EDGE (e, ei, bb->succs)
1492 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1493 e->flags |= EDGE_ALL_FLAGS + 1;
1494 }
1495
1496 /* Recount it. */
1497 mark_irreducible_loops ();
1498
1499 /* Compare. */
1500 FOR_EACH_BB_FN (bb, cfun)
1501 {
1502 edge_iterator ei;
1503
1504 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1505 && !bitmap_bit_p (irreds, bb->index))
1506 {
1507 error ("basic block %d should be marked irreducible", bb->index);
1508 err = 1;
1509 }
1510 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1511 && bitmap_bit_p (irreds, bb->index))
1512 {
1513 error ("basic block %d should not be marked irreducible", bb->index);
1514 err = 1;
1515 }
1516 FOR_EACH_EDGE (e, ei, bb->succs)
1517 {
1518 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1519 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1520 {
1521 error ("edge from %d to %d should be marked irreducible",
1522 e->src->index, e->dest->index);
1523 err = 1;
1524 }
1525 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1526 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1527 {
1528 error ("edge from %d to %d should not be marked irreducible",
1529 e->src->index, e->dest->index);
1530 err = 1;
1531 }
1532 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1533 }
1534 }
1535 free (irreds);
1536 }
1537
1538 /* Check the recorded loop exits. */
1539 FOR_EACH_LOOP (loop, 0)
1540 {
1541 if (!loop->exits || loop->exits->e != NULL)
1542 {
1543 error ("corrupted head of the exits list of loop %d",
1544 loop->num);
1545 err = 1;
1546 }
1547 else
1548 {
1549 /* Check that the list forms a cycle, and all elements except
1550 for the head are nonnull. */
1551 for (mexit = loop->exits, exit = mexit->next, i = 0;
1552 exit->e && exit != mexit;
1553 exit = exit->next)
1554 {
1555 if (i++ & 1)
1556 mexit = mexit->next;
1557 }
1558
1559 if (exit != loop->exits)
1560 {
1561 error ("corrupted exits list of loop %d", loop->num);
1562 err = 1;
1563 }
1564 }
1565
1566 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1567 {
1568 if (loop->exits->next != loop->exits)
1569 {
1570 error ("nonempty exits list of loop %d, but exits are not recorded",
1571 loop->num);
1572 err = 1;
1573 }
1574 }
1575 }
1576
1577 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1578 {
1579 unsigned n_exits = 0, eloops;
1580
1581 sizes = XCNEWVEC (unsigned, num);
1582 memset (sizes, 0, sizeof (unsigned) * num);
1583 FOR_EACH_BB_FN (bb, cfun)
1584 {
1585 edge_iterator ei;
1586 if (bb->loop_father == current_loops->tree_root)
1587 continue;
1588 FOR_EACH_EDGE (e, ei, bb->succs)
1589 {
1590 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1591 continue;
1592
1593 n_exits++;
1594 exit = get_exit_descriptions (e);
1595 if (!exit)
1596 {
1597 error ("exit %d->%d not recorded",
1598 e->src->index, e->dest->index);
1599 err = 1;
1600 }
1601 eloops = 0;
1602 for (; exit; exit = exit->next_e)
1603 eloops++;
1604
1605 for (loop = bb->loop_father;
1606 loop != e->dest->loop_father
1607 /* When a loop exit is also an entry edge which
1608 can happen when avoiding CFG manipulations
1609 then the last loop exited is the outer loop
1610 of the loop entered. */
1611 && loop != loop_outer (e->dest->loop_father);
1612 loop = loop_outer (loop))
1613 {
1614 eloops--;
1615 sizes[loop->num]++;
1616 }
1617
1618 if (eloops != 0)
1619 {
1620 error ("wrong list of exited loops for edge %d->%d",
1621 e->src->index, e->dest->index);
1622 err = 1;
1623 }
1624 }
1625 }
1626
1627 if (n_exits != current_loops->exits->elements ())
1628 {
1629 error ("too many loop exits recorded");
1630 err = 1;
1631 }
1632
1633 FOR_EACH_LOOP (loop, 0)
1634 {
1635 eloops = 0;
1636 for (exit = loop->exits->next; exit->e; exit = exit->next)
1637 eloops++;
1638 if (eloops != sizes[loop->num])
1639 {
1640 error ("%d exits recorded for loop %d (having %d exits)",
1641 eloops, loop->num, sizes[loop->num]);
1642 err = 1;
1643 }
1644 }
1645
1646 free (sizes);
1647 }
1648
1649 gcc_assert (!err);
1650
1651 if (!dom_available)
1652 free_dominance_info (CDI_DOMINATORS);
1653 }
1654
1655 /* Returns latch edge of LOOP. */
1656 edge
1657 loop_latch_edge (const struct loop *loop)
1658 {
1659 return find_edge (loop->latch, loop->header);
1660 }
1661
1662 /* Returns preheader edge of LOOP. */
1663 edge
1664 loop_preheader_edge (const struct loop *loop)
1665 {
1666 edge e;
1667 edge_iterator ei;
1668
1669 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1670
1671 FOR_EACH_EDGE (e, ei, loop->header->preds)
1672 if (e->src != loop->latch)
1673 break;
1674
1675 return e;
1676 }
1677
1678 /* Returns true if E is an exit of LOOP. */
1679
1680 bool
1681 loop_exit_edge_p (const struct loop *loop, const_edge e)
1682 {
1683 return (flow_bb_inside_loop_p (loop, e->src)
1684 && !flow_bb_inside_loop_p (loop, e->dest));
1685 }
1686
1687 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1688 or more than one exit. If loops do not have the exits recorded, NULL
1689 is returned always. */
1690
1691 edge
1692 single_exit (const struct loop *loop)
1693 {
1694 struct loop_exit *exit = loop->exits->next;
1695
1696 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1697 return NULL;
1698
1699 if (exit->e && exit->next == loop->exits)
1700 return exit->e;
1701 else
1702 return NULL;
1703 }
1704
1705 /* Returns true when BB has an incoming edge exiting LOOP. */
1706
1707 bool
1708 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1709 {
1710 edge e;
1711 edge_iterator ei;
1712
1713 FOR_EACH_EDGE (e, ei, bb->preds)
1714 if (loop_exit_edge_p (loop, e))
1715 return true;
1716
1717 return false;
1718 }
1719
1720 /* Returns true when BB has an outgoing edge exiting LOOP. */
1721
1722 bool
1723 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1724 {
1725 edge e;
1726 edge_iterator ei;
1727
1728 FOR_EACH_EDGE (e, ei, bb->succs)
1729 if (loop_exit_edge_p (loop, e))
1730 return true;
1731
1732 return false;
1733 }
1734
1735 /* Return location corresponding to the loop control condition if possible. */
1736
1737 location_t
1738 get_loop_location (struct loop *loop)
1739 {
1740 rtx_insn *insn = NULL;
1741 struct niter_desc *desc = NULL;
1742 edge exit;
1743
1744 /* For a for or while loop, we would like to return the location
1745 of the for or while statement, if possible. To do this, look
1746 for the branch guarding the loop back-edge. */
1747
1748 /* If this is a simple loop with an in_edge, then the loop control
1749 branch is typically at the end of its source. */
1750 desc = get_simple_loop_desc (loop);
1751 if (desc->in_edge)
1752 {
1753 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1754 {
1755 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1756 return INSN_LOCATION (insn);
1757 }
1758 }
1759 /* If loop has a single exit, then the loop control branch
1760 must be at the end of its source. */
1761 if ((exit = single_exit (loop)))
1762 {
1763 FOR_BB_INSNS_REVERSE (exit->src, insn)
1764 {
1765 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1766 return INSN_LOCATION (insn);
1767 }
1768 }
1769 /* Next check the latch, to see if it is non-empty. */
1770 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1771 {
1772 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1773 return INSN_LOCATION (insn);
1774 }
1775 /* Finally, if none of the above identifies the loop control branch,
1776 return the first location in the loop header. */
1777 FOR_BB_INSNS (loop->header, insn)
1778 {
1779 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1780 return INSN_LOCATION (insn);
1781 }
1782 /* If all else fails, simply return the current function location. */
1783 return DECL_SOURCE_LOCATION (current_function_decl);
1784 }
1785
1786 /* Records that every statement in LOOP is executed I_BOUND times.
1787 REALISTIC is true if I_BOUND is expected to be close to the real number
1788 of iterations. UPPER is true if we are sure the loop iterates at most
1789 I_BOUND times. */
1790
1791 void
1792 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1793 bool realistic, bool upper)
1794 {
1795 /* Update the bounds only when there is no previous estimation, or when the
1796 current estimation is smaller. */
1797 if (upper
1798 && (!loop->any_upper_bound
1799 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1800 {
1801 loop->any_upper_bound = true;
1802 loop->nb_iterations_upper_bound = i_bound;
1803 }
1804 if (realistic
1805 && (!loop->any_estimate
1806 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1807 {
1808 loop->any_estimate = true;
1809 loop->nb_iterations_estimate = i_bound;
1810 }
1811
1812 /* If an upper bound is smaller than the realistic estimate of the
1813 number of iterations, use the upper bound instead. */
1814 if (loop->any_upper_bound
1815 && loop->any_estimate
1816 && wi::ltu_p (loop->nb_iterations_upper_bound,
1817 loop->nb_iterations_estimate))
1818 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1819 }
1820
1821 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1822 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1823 on the number of iterations of LOOP could not be derived, returns -1. */
1824
1825 HOST_WIDE_INT
1826 get_estimated_loop_iterations_int (struct loop *loop)
1827 {
1828 widest_int nit;
1829 HOST_WIDE_INT hwi_nit;
1830
1831 if (!get_estimated_loop_iterations (loop, &nit))
1832 return -1;
1833
1834 if (!wi::fits_shwi_p (nit))
1835 return -1;
1836 hwi_nit = nit.to_shwi ();
1837
1838 return hwi_nit < 0 ? -1 : hwi_nit;
1839 }
1840
1841 /* Returns an upper bound on the number of executions of statements
1842 in the LOOP. For statements before the loop exit, this exceeds
1843 the number of execution of the latch by one. */
1844
1845 HOST_WIDE_INT
1846 max_stmt_executions_int (struct loop *loop)
1847 {
1848 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1849 HOST_WIDE_INT snit;
1850
1851 if (nit == -1)
1852 return -1;
1853
1854 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1855
1856 /* If the computation overflows, return -1. */
1857 return snit < 0 ? -1 : snit;
1858 }
1859
1860 /* Sets NIT to the estimated number of executions of the latch of the
1861 LOOP. If we have no reliable estimate, the function returns false, otherwise
1862 returns true. */
1863
1864 bool
1865 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1866 {
1867 /* Even if the bound is not recorded, possibly we can derrive one from
1868 profile. */
1869 if (!loop->any_estimate)
1870 {
1871 if (loop->header->count)
1872 {
1873 *nit = gcov_type_to_wide_int
1874 (expected_loop_iterations_unbounded (loop) + 1);
1875 return true;
1876 }
1877 return false;
1878 }
1879
1880 *nit = loop->nb_iterations_estimate;
1881 return true;
1882 }
1883
1884 /* Sets NIT to an upper bound for the maximum number of executions of the
1885 latch of the LOOP. If we have no reliable estimate, the function returns
1886 false, otherwise returns true. */
1887
1888 bool
1889 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1890 {
1891 if (!loop->any_upper_bound)
1892 return false;
1893
1894 *nit = loop->nb_iterations_upper_bound;
1895 return true;
1896 }
1897
1898 /* Similar to get_max_loop_iterations, but returns the estimate only
1899 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1900 on the number of iterations of LOOP could not be derived, returns -1. */
1901
1902 HOST_WIDE_INT
1903 get_max_loop_iterations_int (struct loop *loop)
1904 {
1905 widest_int nit;
1906 HOST_WIDE_INT hwi_nit;
1907
1908 if (!get_max_loop_iterations (loop, &nit))
1909 return -1;
1910
1911 if (!wi::fits_shwi_p (nit))
1912 return -1;
1913 hwi_nit = nit.to_shwi ();
1914
1915 return hwi_nit < 0 ? -1 : hwi_nit;
1916 }
1917
1918 /* Returns the loop depth of the loop BB belongs to. */
1919
1920 int
1921 bb_loop_depth (const_basic_block bb)
1922 {
1923 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1924 }
1925
1926 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1927
1928 void
1929 mark_loop_for_removal (loop_p loop)
1930 {
1931 loop->former_header = loop->header;
1932 loop->header = NULL;
1933 loop->latch = NULL;
1934 loops_state_set (LOOPS_NEED_FIXUP);
1935 }
1936