]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "cfgloop.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "gimple-iterator.h"
33 #include "gimple-ssa.h"
34 #include "pointer-set.h"
35 #include "ggc.h"
36 #include "dumpfile.h"
37
38 static void flow_loops_cfg_dump (FILE *);
39 \f
40 /* Dump loop related CFG information. */
41
42 static void
43 flow_loops_cfg_dump (FILE *file)
44 {
45 basic_block bb;
46
47 if (!file)
48 return;
49
50 FOR_EACH_BB (bb)
51 {
52 edge succ;
53 edge_iterator ei;
54
55 fprintf (file, ";; %d succs { ", bb->index);
56 FOR_EACH_EDGE (succ, ei, bb->succs)
57 fprintf (file, "%d ", succ->dest->index);
58 fprintf (file, "}\n");
59 }
60 }
61
62 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
63
64 bool
65 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
66 {
67 unsigned odepth = loop_depth (outer);
68
69 return (loop_depth (loop) > odepth
70 && (*loop->superloops)[odepth] == outer);
71 }
72
73 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
74 loops within LOOP. */
75
76 struct loop *
77 superloop_at_depth (struct loop *loop, unsigned depth)
78 {
79 unsigned ldepth = loop_depth (loop);
80
81 gcc_assert (depth <= ldepth);
82
83 if (depth == ldepth)
84 return loop;
85
86 return (*loop->superloops)[depth];
87 }
88
89 /* Returns the list of the latch edges of LOOP. */
90
91 static vec<edge>
92 get_loop_latch_edges (const struct loop *loop)
93 {
94 edge_iterator ei;
95 edge e;
96 vec<edge> ret = vNULL;
97
98 FOR_EACH_EDGE (e, ei, loop->header->preds)
99 {
100 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
101 ret.safe_push (e);
102 }
103
104 return ret;
105 }
106
107 /* Dump the loop information specified by LOOP to the stream FILE
108 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
109
110 void
111 flow_loop_dump (const struct loop *loop, FILE *file,
112 void (*loop_dump_aux) (const struct loop *, FILE *, int),
113 int verbose)
114 {
115 basic_block *bbs;
116 unsigned i;
117 vec<edge> latches;
118 edge e;
119
120 if (! loop || ! loop->header)
121 return;
122
123 fprintf (file, ";;\n;; Loop %d\n", loop->num);
124
125 fprintf (file, ";; header %d, ", loop->header->index);
126 if (loop->latch)
127 fprintf (file, "latch %d\n", loop->latch->index);
128 else
129 {
130 fprintf (file, "multiple latches:");
131 latches = get_loop_latch_edges (loop);
132 FOR_EACH_VEC_ELT (latches, i, e)
133 fprintf (file, " %d", e->src->index);
134 latches.release ();
135 fprintf (file, "\n");
136 }
137
138 fprintf (file, ";; depth %d, outer %ld\n",
139 loop_depth (loop), (long) (loop_outer (loop)
140 ? loop_outer (loop)->num : -1));
141
142 fprintf (file, ";; nodes:");
143 bbs = get_loop_body (loop);
144 for (i = 0; i < loop->num_nodes; i++)
145 fprintf (file, " %d", bbs[i]->index);
146 free (bbs);
147 fprintf (file, "\n");
148
149 if (loop_dump_aux)
150 loop_dump_aux (loop, file, verbose);
151 }
152
153 /* Dump the loop information about loops to the stream FILE,
154 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
155
156 void
157 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
158 {
159 loop_iterator li;
160 struct loop *loop;
161
162 if (!current_loops || ! file)
163 return;
164
165 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
166
167 FOR_EACH_LOOP (li, loop, LI_INCLUDE_ROOT)
168 {
169 flow_loop_dump (loop, file, loop_dump_aux, verbose);
170 }
171
172 if (verbose)
173 flow_loops_cfg_dump (file);
174 }
175
176 /* Free data allocated for LOOP. */
177
178 void
179 flow_loop_free (struct loop *loop)
180 {
181 struct loop_exit *exit, *next;
182
183 vec_free (loop->superloops);
184
185 /* Break the list of the loop exit records. They will be freed when the
186 corresponding edge is rescanned or removed, and this avoids
187 accessing the (already released) head of the list stored in the
188 loop structure. */
189 for (exit = loop->exits->next; exit != loop->exits; exit = next)
190 {
191 next = exit->next;
192 exit->next = exit;
193 exit->prev = exit;
194 }
195
196 ggc_free (loop->exits);
197 ggc_free (loop);
198 }
199
200 /* Free all the memory allocated for LOOPS. */
201
202 void
203 flow_loops_free (struct loops *loops)
204 {
205 if (loops->larray)
206 {
207 unsigned i;
208 loop_p loop;
209
210 /* Free the loop descriptors. */
211 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
212 {
213 if (!loop)
214 continue;
215
216 flow_loop_free (loop);
217 }
218
219 vec_free (loops->larray);
220 }
221 }
222
223 /* Find the nodes contained within the LOOP with header HEADER.
224 Return the number of nodes within the loop. */
225
226 int
227 flow_loop_nodes_find (basic_block header, struct loop *loop)
228 {
229 vec<basic_block> stack = vNULL;
230 int num_nodes = 1;
231 edge latch;
232 edge_iterator latch_ei;
233
234 header->loop_father = loop;
235
236 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
237 {
238 if (latch->src->loop_father == loop
239 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
240 continue;
241
242 num_nodes++;
243 stack.safe_push (latch->src);
244 latch->src->loop_father = loop;
245
246 while (!stack.is_empty ())
247 {
248 basic_block node;
249 edge e;
250 edge_iterator ei;
251
252 node = stack.pop ();
253
254 FOR_EACH_EDGE (e, ei, node->preds)
255 {
256 basic_block ancestor = e->src;
257
258 if (ancestor->loop_father != loop)
259 {
260 ancestor->loop_father = loop;
261 num_nodes++;
262 stack.safe_push (ancestor);
263 }
264 }
265 }
266 }
267 stack.release ();
268
269 return num_nodes;
270 }
271
272 /* Records the vector of superloops of the loop LOOP, whose immediate
273 superloop is FATHER. */
274
275 static void
276 establish_preds (struct loop *loop, struct loop *father)
277 {
278 loop_p ploop;
279 unsigned depth = loop_depth (father) + 1;
280 unsigned i;
281
282 loop->superloops = 0;
283 vec_alloc (loop->superloops, depth);
284 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
285 loop->superloops->quick_push (ploop);
286 loop->superloops->quick_push (father);
287
288 for (ploop = loop->inner; ploop; ploop = ploop->next)
289 establish_preds (ploop, loop);
290 }
291
292 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
293 added loop. If LOOP has some children, take care of that their
294 pred field will be initialized correctly. */
295
296 void
297 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
298 {
299 loop->next = father->inner;
300 father->inner = loop;
301
302 establish_preds (loop, father);
303 }
304
305 /* Remove LOOP from the loop hierarchy tree. */
306
307 void
308 flow_loop_tree_node_remove (struct loop *loop)
309 {
310 struct loop *prev, *father;
311
312 father = loop_outer (loop);
313
314 /* Remove loop from the list of sons. */
315 if (father->inner == loop)
316 father->inner = loop->next;
317 else
318 {
319 for (prev = father->inner; prev->next != loop; prev = prev->next)
320 continue;
321 prev->next = loop->next;
322 }
323
324 loop->superloops = NULL;
325 }
326
327 /* Allocates and returns new loop structure. */
328
329 struct loop *
330 alloc_loop (void)
331 {
332 struct loop *loop = ggc_alloc_cleared_loop ();
333
334 loop->exits = ggc_alloc_cleared_loop_exit ();
335 loop->exits->next = loop->exits->prev = loop->exits;
336 loop->can_be_parallel = false;
337 loop->nb_iterations_upper_bound = 0;
338 loop->nb_iterations_estimate = 0;
339 return loop;
340 }
341
342 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
343 (including the root of the loop tree). */
344
345 void
346 init_loops_structure (struct function *fn,
347 struct loops *loops, unsigned num_loops)
348 {
349 struct loop *root;
350
351 memset (loops, 0, sizeof *loops);
352 vec_alloc (loops->larray, num_loops);
353
354 /* Dummy loop containing whole function. */
355 root = alloc_loop ();
356 root->num_nodes = n_basic_blocks_for_function (fn);
357 root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
358 root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
359 ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
360 EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
361
362 loops->larray->quick_push (root);
363 loops->tree_root = root;
364 }
365
366 /* Returns whether HEADER is a loop header. */
367
368 bool
369 bb_loop_header_p (basic_block header)
370 {
371 edge_iterator ei;
372 edge e;
373
374 /* If we have an abnormal predecessor, do not consider the
375 loop (not worth the problems). */
376 if (bb_has_abnormal_pred (header))
377 return false;
378
379 /* Look for back edges where a predecessor is dominated
380 by this block. A natural loop has a single entry
381 node (header) that dominates all the nodes in the
382 loop. It also has single back edge to the header
383 from a latch node. */
384 FOR_EACH_EDGE (e, ei, header->preds)
385 {
386 basic_block latch = e->src;
387 if (latch != ENTRY_BLOCK_PTR
388 && dominated_by_p (CDI_DOMINATORS, latch, header))
389 return true;
390 }
391
392 return false;
393 }
394
395 /* Find all the natural loops in the function and save in LOOPS structure and
396 recalculate loop_father information in basic block structures.
397 If LOOPS is non-NULL then the loop structures for already recorded loops
398 will be re-used and their number will not change. We assume that no
399 stale loops exist in LOOPS.
400 When LOOPS is NULL it is allocated and re-built from scratch.
401 Return the built LOOPS structure. */
402
403 struct loops *
404 flow_loops_find (struct loops *loops)
405 {
406 bool from_scratch = (loops == NULL);
407 int *rc_order;
408 int b;
409 unsigned i;
410 vec<loop_p> larray;
411
412 /* Ensure that the dominators are computed. */
413 calculate_dominance_info (CDI_DOMINATORS);
414
415 if (!loops)
416 {
417 loops = ggc_alloc_cleared_loops ();
418 init_loops_structure (cfun, loops, 1);
419 }
420
421 /* Ensure that loop exits were released. */
422 gcc_assert (loops->exits == NULL);
423
424 /* Taking care of this degenerate case makes the rest of
425 this code simpler. */
426 if (n_basic_blocks == NUM_FIXED_BLOCKS)
427 return loops;
428
429 /* The root loop node contains all basic-blocks. */
430 loops->tree_root->num_nodes = n_basic_blocks;
431
432 /* Compute depth first search order of the CFG so that outer
433 natural loops will be found before inner natural loops. */
434 rc_order = XNEWVEC (int, n_basic_blocks);
435 pre_and_rev_post_order_compute (NULL, rc_order, false);
436
437 /* Gather all loop headers in reverse completion order and allocate
438 loop structures for loops that are not already present. */
439 larray.create (loops->larray->length ());
440 for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
441 {
442 basic_block header = BASIC_BLOCK (rc_order[b]);
443 if (bb_loop_header_p (header))
444 {
445 struct loop *loop;
446
447 /* The current active loop tree has valid loop-fathers for
448 header blocks. */
449 if (!from_scratch
450 && header->loop_father->header == header)
451 {
452 loop = header->loop_father;
453 /* If we found an existing loop remove it from the
454 loop tree. It is going to be inserted again
455 below. */
456 flow_loop_tree_node_remove (loop);
457 }
458 else
459 {
460 /* Otherwise allocate a new loop structure for the loop. */
461 loop = alloc_loop ();
462 /* ??? We could re-use unused loop slots here. */
463 loop->num = loops->larray->length ();
464 vec_safe_push (loops->larray, loop);
465 loop->header = header;
466
467 if (!from_scratch
468 && dump_file && (dump_flags & TDF_DETAILS))
469 fprintf (dump_file, "flow_loops_find: discovered new "
470 "loop %d with header %d\n",
471 loop->num, header->index);
472 }
473 /* Reset latch, we recompute it below. */
474 loop->latch = NULL;
475 larray.safe_push (loop);
476 }
477
478 /* Make blocks part of the loop root node at start. */
479 header->loop_father = loops->tree_root;
480 }
481
482 free (rc_order);
483
484 /* Now iterate over the loops found, insert them into the loop tree
485 and assign basic-block ownership. */
486 for (i = 0; i < larray.length (); ++i)
487 {
488 struct loop *loop = larray[i];
489 basic_block header = loop->header;
490 edge_iterator ei;
491 edge e;
492
493 flow_loop_tree_node_add (header->loop_father, loop);
494 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
495
496 /* Look for the latch for this header block, if it has just a
497 single one. */
498 FOR_EACH_EDGE (e, ei, header->preds)
499 {
500 basic_block latch = e->src;
501
502 if (flow_bb_inside_loop_p (loop, latch))
503 {
504 if (loop->latch != NULL)
505 {
506 /* More than one latch edge. */
507 loop->latch = NULL;
508 break;
509 }
510 loop->latch = latch;
511 }
512 }
513 }
514
515 larray.release ();
516
517 return loops;
518 }
519
520 /* Ratio of frequencies of edges so that one of more latch edges is
521 considered to belong to inner loop with same header. */
522 #define HEAVY_EDGE_RATIO 8
523
524 /* Minimum number of samples for that we apply
525 find_subloop_latch_edge_by_profile heuristics. */
526 #define HEAVY_EDGE_MIN_SAMPLES 10
527
528 /* If the profile info is available, finds an edge in LATCHES that much more
529 frequent than the remaining edges. Returns such an edge, or NULL if we do
530 not find one.
531
532 We do not use guessed profile here, only the measured one. The guessed
533 profile is usually too flat and unreliable for this (and it is mostly based
534 on the loop structure of the program, so it does not make much sense to
535 derive the loop structure from it). */
536
537 static edge
538 find_subloop_latch_edge_by_profile (vec<edge> latches)
539 {
540 unsigned i;
541 edge e, me = NULL;
542 gcov_type mcount = 0, tcount = 0;
543
544 FOR_EACH_VEC_ELT (latches, i, e)
545 {
546 if (e->count > mcount)
547 {
548 me = e;
549 mcount = e->count;
550 }
551 tcount += e->count;
552 }
553
554 if (tcount < HEAVY_EDGE_MIN_SAMPLES
555 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
556 return NULL;
557
558 if (dump_file)
559 fprintf (dump_file,
560 "Found latch edge %d -> %d using profile information.\n",
561 me->src->index, me->dest->index);
562 return me;
563 }
564
565 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
566 on the structure of induction variables. Returns this edge, or NULL if we
567 do not find any.
568
569 We are quite conservative, and look just for an obvious simple innermost
570 loop (which is the case where we would lose the most performance by not
571 disambiguating the loop). More precisely, we look for the following
572 situation: The source of the chosen latch edge dominates sources of all
573 the other latch edges. Additionally, the header does not contain a phi node
574 such that the argument from the chosen edge is equal to the argument from
575 another edge. */
576
577 static edge
578 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
579 {
580 edge e, latch = latches[0];
581 unsigned i;
582 gimple phi;
583 gimple_stmt_iterator psi;
584 tree lop;
585 basic_block bb;
586
587 /* Find the candidate for the latch edge. */
588 for (i = 1; latches.iterate (i, &e); i++)
589 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
590 latch = e;
591
592 /* Verify that it dominates all the latch edges. */
593 FOR_EACH_VEC_ELT (latches, i, e)
594 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
595 return NULL;
596
597 /* Check for a phi node that would deny that this is a latch edge of
598 a subloop. */
599 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
600 {
601 phi = gsi_stmt (psi);
602 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
603
604 /* Ignore the values that are not changed inside the subloop. */
605 if (TREE_CODE (lop) != SSA_NAME
606 || SSA_NAME_DEF_STMT (lop) == phi)
607 continue;
608 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
609 if (!bb || !flow_bb_inside_loop_p (loop, bb))
610 continue;
611
612 FOR_EACH_VEC_ELT (latches, i, e)
613 if (e != latch
614 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
615 return NULL;
616 }
617
618 if (dump_file)
619 fprintf (dump_file,
620 "Found latch edge %d -> %d using iv structure.\n",
621 latch->src->index, latch->dest->index);
622 return latch;
623 }
624
625 /* If we can determine that one of the several latch edges of LOOP behaves
626 as a latch edge of a separate subloop, returns this edge. Otherwise
627 returns NULL. */
628
629 static edge
630 find_subloop_latch_edge (struct loop *loop)
631 {
632 vec<edge> latches = get_loop_latch_edges (loop);
633 edge latch = NULL;
634
635 if (latches.length () > 1)
636 {
637 latch = find_subloop_latch_edge_by_profile (latches);
638
639 if (!latch
640 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
641 should use cfghook for this, but it is hard to imagine it would
642 be useful elsewhere. */
643 && current_ir_type () == IR_GIMPLE)
644 latch = find_subloop_latch_edge_by_ivs (loop, latches);
645 }
646
647 latches.release ();
648 return latch;
649 }
650
651 /* Callback for make_forwarder_block. Returns true if the edge E is marked
652 in the set MFB_REIS_SET. */
653
654 static struct pointer_set_t *mfb_reis_set;
655 static bool
656 mfb_redirect_edges_in_set (edge e)
657 {
658 return pointer_set_contains (mfb_reis_set, e);
659 }
660
661 /* Creates a subloop of LOOP with latch edge LATCH. */
662
663 static void
664 form_subloop (struct loop *loop, edge latch)
665 {
666 edge_iterator ei;
667 edge e, new_entry;
668 struct loop *new_loop;
669
670 mfb_reis_set = pointer_set_create ();
671 FOR_EACH_EDGE (e, ei, loop->header->preds)
672 {
673 if (e != latch)
674 pointer_set_insert (mfb_reis_set, e);
675 }
676 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
677 NULL);
678 pointer_set_destroy (mfb_reis_set);
679
680 loop->header = new_entry->src;
681
682 /* Find the blocks and subloops that belong to the new loop, and add it to
683 the appropriate place in the loop tree. */
684 new_loop = alloc_loop ();
685 new_loop->header = new_entry->dest;
686 new_loop->latch = latch->src;
687 add_loop (new_loop, loop);
688 }
689
690 /* Make all the latch edges of LOOP to go to a single forwarder block --
691 a new latch of LOOP. */
692
693 static void
694 merge_latch_edges (struct loop *loop)
695 {
696 vec<edge> latches = get_loop_latch_edges (loop);
697 edge latch, e;
698 unsigned i;
699
700 gcc_assert (latches.length () > 0);
701
702 if (latches.length () == 1)
703 loop->latch = latches[0]->src;
704 else
705 {
706 if (dump_file)
707 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
708
709 mfb_reis_set = pointer_set_create ();
710 FOR_EACH_VEC_ELT (latches, i, e)
711 pointer_set_insert (mfb_reis_set, e);
712 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
713 NULL);
714 pointer_set_destroy (mfb_reis_set);
715
716 loop->header = latch->dest;
717 loop->latch = latch->src;
718 }
719
720 latches.release ();
721 }
722
723 /* LOOP may have several latch edges. Transform it into (possibly several)
724 loops with single latch edge. */
725
726 static void
727 disambiguate_multiple_latches (struct loop *loop)
728 {
729 edge e;
730
731 /* We eliminate the multiple latches by splitting the header to the forwarder
732 block F and the rest R, and redirecting the edges. There are two cases:
733
734 1) If there is a latch edge E that corresponds to a subloop (we guess
735 that based on profile -- if it is taken much more often than the
736 remaining edges; and on trees, using the information about induction
737 variables of the loops), we redirect E to R, all the remaining edges to
738 F, then rescan the loops and try again for the outer loop.
739 2) If there is no such edge, we redirect all latch edges to F, and the
740 entry edges to R, thus making F the single latch of the loop. */
741
742 if (dump_file)
743 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
744 loop->num);
745
746 /* During latch merging, we may need to redirect the entry edges to a new
747 block. This would cause problems if the entry edge was the one from the
748 entry block. To avoid having to handle this case specially, split
749 such entry edge. */
750 e = find_edge (ENTRY_BLOCK_PTR, loop->header);
751 if (e)
752 split_edge (e);
753
754 while (1)
755 {
756 e = find_subloop_latch_edge (loop);
757 if (!e)
758 break;
759
760 form_subloop (loop, e);
761 }
762
763 merge_latch_edges (loop);
764 }
765
766 /* Split loops with multiple latch edges. */
767
768 void
769 disambiguate_loops_with_multiple_latches (void)
770 {
771 loop_iterator li;
772 struct loop *loop;
773
774 FOR_EACH_LOOP (li, loop, 0)
775 {
776 if (!loop->latch)
777 disambiguate_multiple_latches (loop);
778 }
779 }
780
781 /* Return nonzero if basic block BB belongs to LOOP. */
782 bool
783 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
784 {
785 struct loop *source_loop;
786
787 if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
788 return 0;
789
790 source_loop = bb->loop_father;
791 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
792 }
793
794 /* Enumeration predicate for get_loop_body_with_size. */
795 static bool
796 glb_enum_p (const_basic_block bb, const void *glb_loop)
797 {
798 const struct loop *const loop = (const struct loop *) glb_loop;
799 return (bb != loop->header
800 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
801 }
802
803 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
804 order against direction of edges from latch. Specially, if
805 header != latch, latch is the 1-st block. LOOP cannot be the fake
806 loop tree root, and its size must be at most MAX_SIZE. The blocks
807 in the LOOP body are stored to BODY, and the size of the LOOP is
808 returned. */
809
810 unsigned
811 get_loop_body_with_size (const struct loop *loop, basic_block *body,
812 unsigned max_size)
813 {
814 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
815 body, max_size, loop);
816 }
817
818 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
819 order against direction of edges from latch. Specially, if
820 header != latch, latch is the 1-st block. */
821
822 basic_block *
823 get_loop_body (const struct loop *loop)
824 {
825 basic_block *body, bb;
826 unsigned tv = 0;
827
828 gcc_assert (loop->num_nodes);
829
830 body = XNEWVEC (basic_block, loop->num_nodes);
831
832 if (loop->latch == EXIT_BLOCK_PTR)
833 {
834 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
835 special-case the fake loop that contains the whole function. */
836 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
837 body[tv++] = loop->header;
838 body[tv++] = EXIT_BLOCK_PTR;
839 FOR_EACH_BB (bb)
840 body[tv++] = bb;
841 }
842 else
843 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
844
845 gcc_assert (tv == loop->num_nodes);
846 return body;
847 }
848
849 /* Fills dominance descendants inside LOOP of the basic block BB into
850 array TOVISIT from index *TV. */
851
852 static void
853 fill_sons_in_loop (const struct loop *loop, basic_block bb,
854 basic_block *tovisit, int *tv)
855 {
856 basic_block son, postpone = NULL;
857
858 tovisit[(*tv)++] = bb;
859 for (son = first_dom_son (CDI_DOMINATORS, bb);
860 son;
861 son = next_dom_son (CDI_DOMINATORS, son))
862 {
863 if (!flow_bb_inside_loop_p (loop, son))
864 continue;
865
866 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
867 {
868 postpone = son;
869 continue;
870 }
871 fill_sons_in_loop (loop, son, tovisit, tv);
872 }
873
874 if (postpone)
875 fill_sons_in_loop (loop, postpone, tovisit, tv);
876 }
877
878 /* Gets body of a LOOP (that must be different from the outermost loop)
879 sorted by dominance relation. Additionally, if a basic block s dominates
880 the latch, then only blocks dominated by s are be after it. */
881
882 basic_block *
883 get_loop_body_in_dom_order (const struct loop *loop)
884 {
885 basic_block *tovisit;
886 int tv;
887
888 gcc_assert (loop->num_nodes);
889
890 tovisit = XNEWVEC (basic_block, loop->num_nodes);
891
892 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
893
894 tv = 0;
895 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
896
897 gcc_assert (tv == (int) loop->num_nodes);
898
899 return tovisit;
900 }
901
902 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
903
904 basic_block *
905 get_loop_body_in_custom_order (const struct loop *loop,
906 int (*bb_comparator) (const void *, const void *))
907 {
908 basic_block *bbs = get_loop_body (loop);
909
910 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
911
912 return bbs;
913 }
914
915 /* Get body of a LOOP in breadth first sort order. */
916
917 basic_block *
918 get_loop_body_in_bfs_order (const struct loop *loop)
919 {
920 basic_block *blocks;
921 basic_block bb;
922 bitmap visited;
923 unsigned int i = 0;
924 unsigned int vc = 1;
925
926 gcc_assert (loop->num_nodes);
927 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
928
929 blocks = XNEWVEC (basic_block, loop->num_nodes);
930 visited = BITMAP_ALLOC (NULL);
931
932 bb = loop->header;
933 while (i < loop->num_nodes)
934 {
935 edge e;
936 edge_iterator ei;
937
938 if (bitmap_set_bit (visited, bb->index))
939 /* This basic block is now visited */
940 blocks[i++] = bb;
941
942 FOR_EACH_EDGE (e, ei, bb->succs)
943 {
944 if (flow_bb_inside_loop_p (loop, e->dest))
945 {
946 if (bitmap_set_bit (visited, e->dest->index))
947 blocks[i++] = e->dest;
948 }
949 }
950
951 gcc_assert (i >= vc);
952
953 bb = blocks[vc++];
954 }
955
956 BITMAP_FREE (visited);
957 return blocks;
958 }
959
960 /* Hash function for struct loop_exit. */
961
962 static hashval_t
963 loop_exit_hash (const void *ex)
964 {
965 const struct loop_exit *const exit = (const struct loop_exit *) ex;
966
967 return htab_hash_pointer (exit->e);
968 }
969
970 /* Equality function for struct loop_exit. Compares with edge. */
971
972 static int
973 loop_exit_eq (const void *ex, const void *e)
974 {
975 const struct loop_exit *const exit = (const struct loop_exit *) ex;
976
977 return exit->e == e;
978 }
979
980 /* Frees the list of loop exit descriptions EX. */
981
982 static void
983 loop_exit_free (void *ex)
984 {
985 struct loop_exit *exit = (struct loop_exit *) ex, *next;
986
987 for (; exit; exit = next)
988 {
989 next = exit->next_e;
990
991 exit->next->prev = exit->prev;
992 exit->prev->next = exit->next;
993
994 ggc_free (exit);
995 }
996 }
997
998 /* Returns the list of records for E as an exit of a loop. */
999
1000 static struct loop_exit *
1001 get_exit_descriptions (edge e)
1002 {
1003 return (struct loop_exit *) htab_find_with_hash (current_loops->exits, e,
1004 htab_hash_pointer (e));
1005 }
1006
1007 /* Updates the lists of loop exits in that E appears.
1008 If REMOVED is true, E is being removed, and we
1009 just remove it from the lists of exits.
1010 If NEW_EDGE is true and E is not a loop exit, we
1011 do not try to remove it from loop exit lists. */
1012
1013 void
1014 rescan_loop_exit (edge e, bool new_edge, bool removed)
1015 {
1016 void **slot;
1017 struct loop_exit *exits = NULL, *exit;
1018 struct loop *aloop, *cloop;
1019
1020 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1021 return;
1022
1023 if (!removed
1024 && e->src->loop_father != NULL
1025 && e->dest->loop_father != NULL
1026 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1027 {
1028 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1029 for (aloop = e->src->loop_father;
1030 aloop != cloop;
1031 aloop = loop_outer (aloop))
1032 {
1033 exit = ggc_alloc_loop_exit ();
1034 exit->e = e;
1035
1036 exit->next = aloop->exits->next;
1037 exit->prev = aloop->exits;
1038 exit->next->prev = exit;
1039 exit->prev->next = exit;
1040
1041 exit->next_e = exits;
1042 exits = exit;
1043 }
1044 }
1045
1046 if (!exits && new_edge)
1047 return;
1048
1049 slot = htab_find_slot_with_hash (current_loops->exits, e,
1050 htab_hash_pointer (e),
1051 exits ? INSERT : NO_INSERT);
1052 if (!slot)
1053 return;
1054
1055 if (exits)
1056 {
1057 if (*slot)
1058 loop_exit_free (*slot);
1059 *slot = exits;
1060 }
1061 else
1062 htab_clear_slot (current_loops->exits, slot);
1063 }
1064
1065 /* For each loop, record list of exit edges, and start maintaining these
1066 lists. */
1067
1068 void
1069 record_loop_exits (void)
1070 {
1071 basic_block bb;
1072 edge_iterator ei;
1073 edge e;
1074
1075 if (!current_loops)
1076 return;
1077
1078 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1079 return;
1080 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1081
1082 gcc_assert (current_loops->exits == NULL);
1083 current_loops->exits = htab_create_ggc (2 * number_of_loops (cfun),
1084 loop_exit_hash, loop_exit_eq,
1085 loop_exit_free);
1086
1087 FOR_EACH_BB (bb)
1088 {
1089 FOR_EACH_EDGE (e, ei, bb->succs)
1090 {
1091 rescan_loop_exit (e, true, false);
1092 }
1093 }
1094 }
1095
1096 /* Dumps information about the exit in *SLOT to FILE.
1097 Callback for htab_traverse. */
1098
1099 static int
1100 dump_recorded_exit (void **slot, void *file)
1101 {
1102 struct loop_exit *exit = (struct loop_exit *) *slot;
1103 unsigned n = 0;
1104 edge e = exit->e;
1105
1106 for (; exit != NULL; exit = exit->next_e)
1107 n++;
1108
1109 fprintf ((FILE*) file, "Edge %d->%d exits %u loops\n",
1110 e->src->index, e->dest->index, n);
1111
1112 return 1;
1113 }
1114
1115 /* Dumps the recorded exits of loops to FILE. */
1116
1117 extern void dump_recorded_exits (FILE *);
1118 void
1119 dump_recorded_exits (FILE *file)
1120 {
1121 if (!current_loops->exits)
1122 return;
1123 htab_traverse (current_loops->exits, dump_recorded_exit, file);
1124 }
1125
1126 /* Releases lists of loop exits. */
1127
1128 void
1129 release_recorded_exits (void)
1130 {
1131 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1132 htab_delete (current_loops->exits);
1133 current_loops->exits = NULL;
1134 loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1135 }
1136
1137 /* Returns the list of the exit edges of a LOOP. */
1138
1139 vec<edge>
1140 get_loop_exit_edges (const struct loop *loop)
1141 {
1142 vec<edge> edges = vNULL;
1143 edge e;
1144 unsigned i;
1145 basic_block *body;
1146 edge_iterator ei;
1147 struct loop_exit *exit;
1148
1149 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
1150
1151 /* If we maintain the lists of exits, use them. Otherwise we must
1152 scan the body of the loop. */
1153 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1154 {
1155 for (exit = loop->exits->next; exit->e; exit = exit->next)
1156 edges.safe_push (exit->e);
1157 }
1158 else
1159 {
1160 body = get_loop_body (loop);
1161 for (i = 0; i < loop->num_nodes; i++)
1162 FOR_EACH_EDGE (e, ei, body[i]->succs)
1163 {
1164 if (!flow_bb_inside_loop_p (loop, e->dest))
1165 edges.safe_push (e);
1166 }
1167 free (body);
1168 }
1169
1170 return edges;
1171 }
1172
1173 /* Counts the number of conditional branches inside LOOP. */
1174
1175 unsigned
1176 num_loop_branches (const struct loop *loop)
1177 {
1178 unsigned i, n;
1179 basic_block * body;
1180
1181 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
1182
1183 body = get_loop_body (loop);
1184 n = 0;
1185 for (i = 0; i < loop->num_nodes; i++)
1186 if (EDGE_COUNT (body[i]->succs) >= 2)
1187 n++;
1188 free (body);
1189
1190 return n;
1191 }
1192
1193 /* Adds basic block BB to LOOP. */
1194 void
1195 add_bb_to_loop (basic_block bb, struct loop *loop)
1196 {
1197 unsigned i;
1198 loop_p ploop;
1199 edge_iterator ei;
1200 edge e;
1201
1202 gcc_assert (bb->loop_father == NULL);
1203 bb->loop_father = loop;
1204 loop->num_nodes++;
1205 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1206 ploop->num_nodes++;
1207
1208 FOR_EACH_EDGE (e, ei, bb->succs)
1209 {
1210 rescan_loop_exit (e, true, false);
1211 }
1212 FOR_EACH_EDGE (e, ei, bb->preds)
1213 {
1214 rescan_loop_exit (e, true, false);
1215 }
1216 }
1217
1218 /* Remove basic block BB from loops. */
1219 void
1220 remove_bb_from_loops (basic_block bb)
1221 {
1222 unsigned i;
1223 struct loop *loop = bb->loop_father;
1224 loop_p ploop;
1225 edge_iterator ei;
1226 edge e;
1227
1228 gcc_assert (loop != NULL);
1229 loop->num_nodes--;
1230 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1231 ploop->num_nodes--;
1232 bb->loop_father = NULL;
1233
1234 FOR_EACH_EDGE (e, ei, bb->succs)
1235 {
1236 rescan_loop_exit (e, false, true);
1237 }
1238 FOR_EACH_EDGE (e, ei, bb->preds)
1239 {
1240 rescan_loop_exit (e, false, true);
1241 }
1242 }
1243
1244 /* Finds nearest common ancestor in loop tree for given loops. */
1245 struct loop *
1246 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1247 {
1248 unsigned sdepth, ddepth;
1249
1250 if (!loop_s) return loop_d;
1251 if (!loop_d) return loop_s;
1252
1253 sdepth = loop_depth (loop_s);
1254 ddepth = loop_depth (loop_d);
1255
1256 if (sdepth < ddepth)
1257 loop_d = (*loop_d->superloops)[sdepth];
1258 else if (sdepth > ddepth)
1259 loop_s = (*loop_s->superloops)[ddepth];
1260
1261 while (loop_s != loop_d)
1262 {
1263 loop_s = loop_outer (loop_s);
1264 loop_d = loop_outer (loop_d);
1265 }
1266 return loop_s;
1267 }
1268
1269 /* Removes LOOP from structures and frees its data. */
1270
1271 void
1272 delete_loop (struct loop *loop)
1273 {
1274 /* Remove the loop from structure. */
1275 flow_loop_tree_node_remove (loop);
1276
1277 /* Remove loop from loops array. */
1278 (*current_loops->larray)[loop->num] = NULL;
1279
1280 /* Free loop data. */
1281 flow_loop_free (loop);
1282 }
1283
1284 /* Cancels the LOOP; it must be innermost one. */
1285
1286 static void
1287 cancel_loop (struct loop *loop)
1288 {
1289 basic_block *bbs;
1290 unsigned i;
1291 struct loop *outer = loop_outer (loop);
1292
1293 gcc_assert (!loop->inner);
1294
1295 /* Move blocks up one level (they should be removed as soon as possible). */
1296 bbs = get_loop_body (loop);
1297 for (i = 0; i < loop->num_nodes; i++)
1298 bbs[i]->loop_father = outer;
1299
1300 free (bbs);
1301 delete_loop (loop);
1302 }
1303
1304 /* Cancels LOOP and all its subloops. */
1305 void
1306 cancel_loop_tree (struct loop *loop)
1307 {
1308 while (loop->inner)
1309 cancel_loop_tree (loop->inner);
1310 cancel_loop (loop);
1311 }
1312
1313 /* Checks that information about loops is correct
1314 -- sizes of loops are all right
1315 -- results of get_loop_body really belong to the loop
1316 -- loop header have just single entry edge and single latch edge
1317 -- loop latches have only single successor that is header of their loop
1318 -- irreducible loops are correctly marked
1319 -- the cached loop depth and loop father of each bb is correct
1320 */
1321 DEBUG_FUNCTION void
1322 verify_loop_structure (void)
1323 {
1324 unsigned *sizes, i, j;
1325 sbitmap irreds;
1326 basic_block bb, *bbs;
1327 struct loop *loop;
1328 int err = 0;
1329 edge e;
1330 unsigned num = number_of_loops (cfun);
1331 loop_iterator li;
1332 struct loop_exit *exit, *mexit;
1333 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1334 sbitmap visited;
1335
1336 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1337 {
1338 error ("loop verification on loop tree that needs fixup");
1339 err = 1;
1340 }
1341
1342 /* We need up-to-date dominators, compute or verify them. */
1343 if (!dom_available)
1344 calculate_dominance_info (CDI_DOMINATORS);
1345 else
1346 verify_dominators (CDI_DOMINATORS);
1347
1348 /* Check the headers. */
1349 FOR_EACH_BB (bb)
1350 if (bb_loop_header_p (bb))
1351 {
1352 if (bb->loop_father->header == NULL)
1353 {
1354 error ("loop with header %d marked for removal", bb->index);
1355 err = 1;
1356 }
1357 else if (bb->loop_father->header != bb)
1358 {
1359 error ("loop with header %d not in loop tree", bb->index);
1360 err = 1;
1361 }
1362 }
1363 else if (bb->loop_father->header == bb)
1364 {
1365 error ("non-loop with header %d not marked for removal", bb->index);
1366 err = 1;
1367 }
1368
1369 /* Check the recorded loop father and sizes of loops. */
1370 visited = sbitmap_alloc (last_basic_block);
1371 bitmap_clear (visited);
1372 bbs = XNEWVEC (basic_block, n_basic_blocks);
1373 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1374 {
1375 unsigned n;
1376
1377 if (loop->header == NULL)
1378 {
1379 error ("removed loop %d in loop tree", loop->num);
1380 err = 1;
1381 continue;
1382 }
1383
1384 n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
1385 if (loop->num_nodes != n)
1386 {
1387 error ("size of loop %d should be %d, not %d",
1388 loop->num, n, loop->num_nodes);
1389 err = 1;
1390 }
1391
1392 for (j = 0; j < n; j++)
1393 {
1394 bb = bbs[j];
1395
1396 if (!flow_bb_inside_loop_p (loop, bb))
1397 {
1398 error ("bb %d does not belong to loop %d",
1399 bb->index, loop->num);
1400 err = 1;
1401 }
1402
1403 /* Ignore this block if it is in an inner loop. */
1404 if (bitmap_bit_p (visited, bb->index))
1405 continue;
1406 bitmap_set_bit (visited, bb->index);
1407
1408 if (bb->loop_father != loop)
1409 {
1410 error ("bb %d has father loop %d, should be loop %d",
1411 bb->index, bb->loop_father->num, loop->num);
1412 err = 1;
1413 }
1414 }
1415 }
1416 free (bbs);
1417 sbitmap_free (visited);
1418
1419 /* Check headers and latches. */
1420 FOR_EACH_LOOP (li, loop, 0)
1421 {
1422 i = loop->num;
1423 if (loop->header == NULL)
1424 continue;
1425 if (!bb_loop_header_p (loop->header))
1426 {
1427 error ("loop %d%'s header is not a loop header", i);
1428 err = 1;
1429 }
1430 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1431 && EDGE_COUNT (loop->header->preds) != 2)
1432 {
1433 error ("loop %d%'s header does not have exactly 2 entries", i);
1434 err = 1;
1435 }
1436 if (loop->latch)
1437 {
1438 if (!find_edge (loop->latch, loop->header))
1439 {
1440 error ("loop %d%'s latch does not have an edge to its header", i);
1441 err = 1;
1442 }
1443 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1444 {
1445 error ("loop %d%'s latch is not dominated by its header", i);
1446 err = 1;
1447 }
1448 }
1449 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1450 {
1451 if (!single_succ_p (loop->latch))
1452 {
1453 error ("loop %d%'s latch does not have exactly 1 successor", i);
1454 err = 1;
1455 }
1456 if (single_succ (loop->latch) != loop->header)
1457 {
1458 error ("loop %d%'s latch does not have header as successor", i);
1459 err = 1;
1460 }
1461 if (loop->latch->loop_father != loop)
1462 {
1463 error ("loop %d%'s latch does not belong directly to it", i);
1464 err = 1;
1465 }
1466 }
1467 if (loop->header->loop_father != loop)
1468 {
1469 error ("loop %d%'s header does not belong directly to it", i);
1470 err = 1;
1471 }
1472 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1473 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1474 {
1475 error ("loop %d%'s latch is marked as part of irreducible region", i);
1476 err = 1;
1477 }
1478 }
1479
1480 /* Check irreducible loops. */
1481 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1482 {
1483 /* Record old info. */
1484 irreds = sbitmap_alloc (last_basic_block);
1485 FOR_EACH_BB (bb)
1486 {
1487 edge_iterator ei;
1488 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1489 bitmap_set_bit (irreds, bb->index);
1490 else
1491 bitmap_clear_bit (irreds, bb->index);
1492 FOR_EACH_EDGE (e, ei, bb->succs)
1493 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1494 e->flags |= EDGE_ALL_FLAGS + 1;
1495 }
1496
1497 /* Recount it. */
1498 mark_irreducible_loops ();
1499
1500 /* Compare. */
1501 FOR_EACH_BB (bb)
1502 {
1503 edge_iterator ei;
1504
1505 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1506 && !bitmap_bit_p (irreds, bb->index))
1507 {
1508 error ("basic block %d should be marked irreducible", bb->index);
1509 err = 1;
1510 }
1511 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1512 && bitmap_bit_p (irreds, bb->index))
1513 {
1514 error ("basic block %d should not be marked irreducible", bb->index);
1515 err = 1;
1516 }
1517 FOR_EACH_EDGE (e, ei, bb->succs)
1518 {
1519 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1520 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1521 {
1522 error ("edge from %d to %d should be marked irreducible",
1523 e->src->index, e->dest->index);
1524 err = 1;
1525 }
1526 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1527 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1528 {
1529 error ("edge from %d to %d should not be marked irreducible",
1530 e->src->index, e->dest->index);
1531 err = 1;
1532 }
1533 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1534 }
1535 }
1536 free (irreds);
1537 }
1538
1539 /* Check the recorded loop exits. */
1540 FOR_EACH_LOOP (li, loop, 0)
1541 {
1542 if (!loop->exits || loop->exits->e != NULL)
1543 {
1544 error ("corrupted head of the exits list of loop %d",
1545 loop->num);
1546 err = 1;
1547 }
1548 else
1549 {
1550 /* Check that the list forms a cycle, and all elements except
1551 for the head are nonnull. */
1552 for (mexit = loop->exits, exit = mexit->next, i = 0;
1553 exit->e && exit != mexit;
1554 exit = exit->next)
1555 {
1556 if (i++ & 1)
1557 mexit = mexit->next;
1558 }
1559
1560 if (exit != loop->exits)
1561 {
1562 error ("corrupted exits list of loop %d", loop->num);
1563 err = 1;
1564 }
1565 }
1566
1567 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1568 {
1569 if (loop->exits->next != loop->exits)
1570 {
1571 error ("nonempty exits list of loop %d, but exits are not recorded",
1572 loop->num);
1573 err = 1;
1574 }
1575 }
1576 }
1577
1578 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1579 {
1580 unsigned n_exits = 0, eloops;
1581
1582 sizes = XCNEWVEC (unsigned, num);
1583 memset (sizes, 0, sizeof (unsigned) * num);
1584 FOR_EACH_BB (bb)
1585 {
1586 edge_iterator ei;
1587 if (bb->loop_father == current_loops->tree_root)
1588 continue;
1589 FOR_EACH_EDGE (e, ei, bb->succs)
1590 {
1591 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1592 continue;
1593
1594 n_exits++;
1595 exit = get_exit_descriptions (e);
1596 if (!exit)
1597 {
1598 error ("exit %d->%d not recorded",
1599 e->src->index, e->dest->index);
1600 err = 1;
1601 }
1602 eloops = 0;
1603 for (; exit; exit = exit->next_e)
1604 eloops++;
1605
1606 for (loop = bb->loop_father;
1607 loop != e->dest->loop_father
1608 /* When a loop exit is also an entry edge which
1609 can happen when avoiding CFG manipulations
1610 then the last loop exited is the outer loop
1611 of the loop entered. */
1612 && loop != loop_outer (e->dest->loop_father);
1613 loop = loop_outer (loop))
1614 {
1615 eloops--;
1616 sizes[loop->num]++;
1617 }
1618
1619 if (eloops != 0)
1620 {
1621 error ("wrong list of exited loops for edge %d->%d",
1622 e->src->index, e->dest->index);
1623 err = 1;
1624 }
1625 }
1626 }
1627
1628 if (n_exits != htab_elements (current_loops->exits))
1629 {
1630 error ("too many loop exits recorded");
1631 err = 1;
1632 }
1633
1634 FOR_EACH_LOOP (li, loop, 0)
1635 {
1636 eloops = 0;
1637 for (exit = loop->exits->next; exit->e; exit = exit->next)
1638 eloops++;
1639 if (eloops != sizes[loop->num])
1640 {
1641 error ("%d exits recorded for loop %d (having %d exits)",
1642 eloops, loop->num, sizes[loop->num]);
1643 err = 1;
1644 }
1645 }
1646
1647 free (sizes);
1648 }
1649
1650 gcc_assert (!err);
1651
1652 if (!dom_available)
1653 free_dominance_info (CDI_DOMINATORS);
1654 }
1655
1656 /* Returns latch edge of LOOP. */
1657 edge
1658 loop_latch_edge (const struct loop *loop)
1659 {
1660 return find_edge (loop->latch, loop->header);
1661 }
1662
1663 /* Returns preheader edge of LOOP. */
1664 edge
1665 loop_preheader_edge (const struct loop *loop)
1666 {
1667 edge e;
1668 edge_iterator ei;
1669
1670 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1671
1672 FOR_EACH_EDGE (e, ei, loop->header->preds)
1673 if (e->src != loop->latch)
1674 break;
1675
1676 return e;
1677 }
1678
1679 /* Returns true if E is an exit of LOOP. */
1680
1681 bool
1682 loop_exit_edge_p (const struct loop *loop, const_edge e)
1683 {
1684 return (flow_bb_inside_loop_p (loop, e->src)
1685 && !flow_bb_inside_loop_p (loop, e->dest));
1686 }
1687
1688 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1689 or more than one exit. If loops do not have the exits recorded, NULL
1690 is returned always. */
1691
1692 edge
1693 single_exit (const struct loop *loop)
1694 {
1695 struct loop_exit *exit = loop->exits->next;
1696
1697 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1698 return NULL;
1699
1700 if (exit->e && exit->next == loop->exits)
1701 return exit->e;
1702 else
1703 return NULL;
1704 }
1705
1706 /* Returns true when BB has an incoming edge exiting LOOP. */
1707
1708 bool
1709 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1710 {
1711 edge e;
1712 edge_iterator ei;
1713
1714 FOR_EACH_EDGE (e, ei, bb->preds)
1715 if (loop_exit_edge_p (loop, e))
1716 return true;
1717
1718 return false;
1719 }
1720
1721 /* Returns true when BB has an outgoing edge exiting LOOP. */
1722
1723 bool
1724 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1725 {
1726 edge e;
1727 edge_iterator ei;
1728
1729 FOR_EACH_EDGE (e, ei, bb->succs)
1730 if (loop_exit_edge_p (loop, e))
1731 return true;
1732
1733 return false;
1734 }
1735
1736 /* Return location corresponding to the loop control condition if possible. */
1737
1738 location_t
1739 get_loop_location (struct loop *loop)
1740 {
1741 rtx insn = NULL;
1742 struct niter_desc *desc = NULL;
1743 edge exit;
1744
1745 /* For a for or while loop, we would like to return the location
1746 of the for or while statement, if possible. To do this, look
1747 for the branch guarding the loop back-edge. */
1748
1749 /* If this is a simple loop with an in_edge, then the loop control
1750 branch is typically at the end of its source. */
1751 desc = get_simple_loop_desc (loop);
1752 if (desc->in_edge)
1753 {
1754 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1755 {
1756 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1757 return INSN_LOCATION (insn);
1758 }
1759 }
1760 /* If loop has a single exit, then the loop control branch
1761 must be at the end of its source. */
1762 if ((exit = single_exit (loop)))
1763 {
1764 FOR_BB_INSNS_REVERSE (exit->src, insn)
1765 {
1766 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1767 return INSN_LOCATION (insn);
1768 }
1769 }
1770 /* Next check the latch, to see if it is non-empty. */
1771 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1772 {
1773 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1774 return INSN_LOCATION (insn);
1775 }
1776 /* Finally, if none of the above identifies the loop control branch,
1777 return the first location in the loop header. */
1778 FOR_BB_INSNS (loop->header, insn)
1779 {
1780 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1781 return INSN_LOCATION (insn);
1782 }
1783 /* If all else fails, simply return the current function location. */
1784 return DECL_SOURCE_LOCATION (current_function_decl);
1785 }
1786
1787 /* Records that every statement in LOOP is executed I_BOUND times.
1788 REALISTIC is true if I_BOUND is expected to be close to the real number
1789 of iterations. UPPER is true if we are sure the loop iterates at most
1790 I_BOUND times. */
1791
1792 void
1793 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1794 bool realistic, bool upper)
1795 {
1796 /* Update the bounds only when there is no previous estimation, or when the
1797 current estimation is smaller. */
1798 if (upper
1799 && (!loop->any_upper_bound
1800 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1801 {
1802 loop->any_upper_bound = true;
1803 loop->nb_iterations_upper_bound = i_bound;
1804 }
1805 if (realistic
1806 && (!loop->any_estimate
1807 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1808 {
1809 loop->any_estimate = true;
1810 loop->nb_iterations_estimate = i_bound;
1811 }
1812
1813 /* If an upper bound is smaller than the realistic estimate of the
1814 number of iterations, use the upper bound instead. */
1815 if (loop->any_upper_bound
1816 && loop->any_estimate
1817 && wi::ltu_p (loop->nb_iterations_upper_bound,
1818 loop->nb_iterations_estimate))
1819 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1820 }
1821
1822 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1823 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1824 on the number of iterations of LOOP could not be derived, returns -1. */
1825
1826 HOST_WIDE_INT
1827 get_estimated_loop_iterations_int (struct loop *loop)
1828 {
1829 widest_int nit;
1830 HOST_WIDE_INT hwi_nit;
1831
1832 if (!get_estimated_loop_iterations (loop, &nit))
1833 return -1;
1834
1835 if (!wi::fits_shwi_p (nit))
1836 return -1;
1837 hwi_nit = nit.to_shwi ();
1838
1839 return hwi_nit < 0 ? -1 : hwi_nit;
1840 }
1841
1842 /* Returns an upper bound on the number of executions of statements
1843 in the LOOP. For statements before the loop exit, this exceeds
1844 the number of execution of the latch by one. */
1845
1846 HOST_WIDE_INT
1847 max_stmt_executions_int (struct loop *loop)
1848 {
1849 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1850 HOST_WIDE_INT snit;
1851
1852 if (nit == -1)
1853 return -1;
1854
1855 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1856
1857 /* If the computation overflows, return -1. */
1858 return snit < 0 ? -1 : snit;
1859 }
1860
1861 /* Sets NIT to the estimated number of executions of the latch of the
1862 LOOP. If we have no reliable estimate, the function returns false, otherwise
1863 returns true. */
1864
1865 bool
1866 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1867 {
1868 /* Even if the bound is not recorded, possibly we can derrive one from
1869 profile. */
1870 if (!loop->any_estimate)
1871 {
1872 if (loop->header->count)
1873 {
1874 *nit = gcov_type_to_wide_int
1875 (expected_loop_iterations_unbounded (loop) + 1);
1876 return true;
1877 }
1878 return false;
1879 }
1880
1881 *nit = loop->nb_iterations_estimate;
1882 return true;
1883 }
1884
1885 /* Sets NIT to an upper bound for the maximum number of executions of the
1886 latch of the LOOP. If we have no reliable estimate, the function returns
1887 false, otherwise returns true. */
1888
1889 bool
1890 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1891 {
1892 if (!loop->any_upper_bound)
1893 return false;
1894
1895 *nit = loop->nb_iterations_upper_bound;
1896 return true;
1897 }
1898
1899 /* Similar to get_max_loop_iterations, but returns the estimate only
1900 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1901 on the number of iterations of LOOP could not be derived, returns -1. */
1902
1903 HOST_WIDE_INT
1904 get_max_loop_iterations_int (struct loop *loop)
1905 {
1906 widest_int nit;
1907 HOST_WIDE_INT hwi_nit;
1908
1909 if (!get_max_loop_iterations (loop, &nit))
1910 return -1;
1911
1912 if (!wi::fits_shwi_p (nit))
1913 return -1;
1914 hwi_nit = nit.to_shwi ();
1915
1916 return hwi_nit < 0 ? -1 : hwi_nit;
1917 }
1918
1919 /* Returns the loop depth of the loop BB belongs to. */
1920
1921 int
1922 bb_loop_depth (const_basic_block bb)
1923 {
1924 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1925 }