]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
2015-10-27 Richard Biener <rguenther@suse.de>
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "rtl.h"
28 #include "cfganal.h"
29 #include "cfgloop.h"
30 #include "diagnostic-core.h"
31 #include "flags.h"
32 #include "fold-const.h"
33 #include "internal-fn.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "dumpfile.h"
37
38 static void flow_loops_cfg_dump (FILE *);
39 \f
40 /* Dump loop related CFG information. */
41
42 static void
43 flow_loops_cfg_dump (FILE *file)
44 {
45 basic_block bb;
46
47 if (!file)
48 return;
49
50 FOR_EACH_BB_FN (bb, cfun)
51 {
52 edge succ;
53 edge_iterator ei;
54
55 fprintf (file, ";; %d succs { ", bb->index);
56 FOR_EACH_EDGE (succ, ei, bb->succs)
57 fprintf (file, "%d ", succ->dest->index);
58 fprintf (file, "}\n");
59 }
60 }
61
62 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
63
64 bool
65 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
66 {
67 unsigned odepth = loop_depth (outer);
68
69 return (loop_depth (loop) > odepth
70 && (*loop->superloops)[odepth] == outer);
71 }
72
73 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
74 loops within LOOP. */
75
76 struct loop *
77 superloop_at_depth (struct loop *loop, unsigned depth)
78 {
79 unsigned ldepth = loop_depth (loop);
80
81 gcc_assert (depth <= ldepth);
82
83 if (depth == ldepth)
84 return loop;
85
86 return (*loop->superloops)[depth];
87 }
88
89 /* Returns the list of the latch edges of LOOP. */
90
91 static vec<edge>
92 get_loop_latch_edges (const struct loop *loop)
93 {
94 edge_iterator ei;
95 edge e;
96 vec<edge> ret = vNULL;
97
98 FOR_EACH_EDGE (e, ei, loop->header->preds)
99 {
100 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
101 ret.safe_push (e);
102 }
103
104 return ret;
105 }
106
107 /* Dump the loop information specified by LOOP to the stream FILE
108 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
109
110 void
111 flow_loop_dump (const struct loop *loop, FILE *file,
112 void (*loop_dump_aux) (const struct loop *, FILE *, int),
113 int verbose)
114 {
115 basic_block *bbs;
116 unsigned i;
117 vec<edge> latches;
118 edge e;
119
120 if (! loop || ! loop->header)
121 return;
122
123 fprintf (file, ";;\n;; Loop %d\n", loop->num);
124
125 fprintf (file, ";; header %d, ", loop->header->index);
126 if (loop->latch)
127 fprintf (file, "latch %d\n", loop->latch->index);
128 else
129 {
130 fprintf (file, "multiple latches:");
131 latches = get_loop_latch_edges (loop);
132 FOR_EACH_VEC_ELT (latches, i, e)
133 fprintf (file, " %d", e->src->index);
134 latches.release ();
135 fprintf (file, "\n");
136 }
137
138 fprintf (file, ";; depth %d, outer %ld\n",
139 loop_depth (loop), (long) (loop_outer (loop)
140 ? loop_outer (loop)->num : -1));
141
142 fprintf (file, ";; nodes:");
143 bbs = get_loop_body (loop);
144 for (i = 0; i < loop->num_nodes; i++)
145 fprintf (file, " %d", bbs[i]->index);
146 free (bbs);
147 fprintf (file, "\n");
148
149 if (loop_dump_aux)
150 loop_dump_aux (loop, file, verbose);
151 }
152
153 /* Dump the loop information about loops to the stream FILE,
154 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
155
156 void
157 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
158 {
159 struct loop *loop;
160
161 if (!current_loops || ! file)
162 return;
163
164 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
165
166 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
167 {
168 flow_loop_dump (loop, file, loop_dump_aux, verbose);
169 }
170
171 if (verbose)
172 flow_loops_cfg_dump (file);
173 }
174
175 /* Free data allocated for LOOP. */
176
177 void
178 flow_loop_free (struct loop *loop)
179 {
180 struct loop_exit *exit, *next;
181
182 vec_free (loop->superloops);
183
184 /* Break the list of the loop exit records. They will be freed when the
185 corresponding edge is rescanned or removed, and this avoids
186 accessing the (already released) head of the list stored in the
187 loop structure. */
188 for (exit = loop->exits->next; exit != loop->exits; exit = next)
189 {
190 next = exit->next;
191 exit->next = exit;
192 exit->prev = exit;
193 }
194
195 ggc_free (loop->exits);
196 ggc_free (loop);
197 }
198
199 /* Free all the memory allocated for LOOPS. */
200
201 void
202 flow_loops_free (struct loops *loops)
203 {
204 if (loops->larray)
205 {
206 unsigned i;
207 loop_p loop;
208
209 /* Free the loop descriptors. */
210 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
211 {
212 if (!loop)
213 continue;
214
215 flow_loop_free (loop);
216 }
217
218 vec_free (loops->larray);
219 }
220 }
221
222 /* Find the nodes contained within the LOOP with header HEADER.
223 Return the number of nodes within the loop. */
224
225 int
226 flow_loop_nodes_find (basic_block header, struct loop *loop)
227 {
228 vec<basic_block> stack = vNULL;
229 int num_nodes = 1;
230 edge latch;
231 edge_iterator latch_ei;
232
233 header->loop_father = loop;
234
235 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
236 {
237 if (latch->src->loop_father == loop
238 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
239 continue;
240
241 num_nodes++;
242 stack.safe_push (latch->src);
243 latch->src->loop_father = loop;
244
245 while (!stack.is_empty ())
246 {
247 basic_block node;
248 edge e;
249 edge_iterator ei;
250
251 node = stack.pop ();
252
253 FOR_EACH_EDGE (e, ei, node->preds)
254 {
255 basic_block ancestor = e->src;
256
257 if (ancestor->loop_father != loop)
258 {
259 ancestor->loop_father = loop;
260 num_nodes++;
261 stack.safe_push (ancestor);
262 }
263 }
264 }
265 }
266 stack.release ();
267
268 return num_nodes;
269 }
270
271 /* Records the vector of superloops of the loop LOOP, whose immediate
272 superloop is FATHER. */
273
274 static void
275 establish_preds (struct loop *loop, struct loop *father)
276 {
277 loop_p ploop;
278 unsigned depth = loop_depth (father) + 1;
279 unsigned i;
280
281 loop->superloops = 0;
282 vec_alloc (loop->superloops, depth);
283 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
284 loop->superloops->quick_push (ploop);
285 loop->superloops->quick_push (father);
286
287 for (ploop = loop->inner; ploop; ploop = ploop->next)
288 establish_preds (ploop, loop);
289 }
290
291 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
292 added loop. If LOOP has some children, take care of that their
293 pred field will be initialized correctly. */
294
295 void
296 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
297 {
298 loop->next = father->inner;
299 father->inner = loop;
300
301 establish_preds (loop, father);
302 }
303
304 /* Remove LOOP from the loop hierarchy tree. */
305
306 void
307 flow_loop_tree_node_remove (struct loop *loop)
308 {
309 struct loop *prev, *father;
310
311 father = loop_outer (loop);
312
313 /* Remove loop from the list of sons. */
314 if (father->inner == loop)
315 father->inner = loop->next;
316 else
317 {
318 for (prev = father->inner; prev->next != loop; prev = prev->next)
319 continue;
320 prev->next = loop->next;
321 }
322
323 loop->superloops = NULL;
324 }
325
326 /* Allocates and returns new loop structure. */
327
328 struct loop *
329 alloc_loop (void)
330 {
331 struct loop *loop = ggc_cleared_alloc<struct loop> ();
332
333 loop->exits = ggc_cleared_alloc<loop_exit> ();
334 loop->exits->next = loop->exits->prev = loop->exits;
335 loop->can_be_parallel = false;
336 loop->nb_iterations_upper_bound = 0;
337 loop->nb_iterations_estimate = 0;
338 return loop;
339 }
340
341 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
342 (including the root of the loop tree). */
343
344 void
345 init_loops_structure (struct function *fn,
346 struct loops *loops, unsigned num_loops)
347 {
348 struct loop *root;
349
350 memset (loops, 0, sizeof *loops);
351 vec_alloc (loops->larray, num_loops);
352
353 /* Dummy loop containing whole function. */
354 root = alloc_loop ();
355 root->num_nodes = n_basic_blocks_for_fn (fn);
356 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
357 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
358 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
359 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
360
361 loops->larray->quick_push (root);
362 loops->tree_root = root;
363 }
364
365 /* Returns whether HEADER is a loop header. */
366
367 bool
368 bb_loop_header_p (basic_block header)
369 {
370 edge_iterator ei;
371 edge e;
372
373 /* If we have an abnormal predecessor, do not consider the
374 loop (not worth the problems). */
375 if (bb_has_abnormal_pred (header))
376 return false;
377
378 /* Look for back edges where a predecessor is dominated
379 by this block. A natural loop has a single entry
380 node (header) that dominates all the nodes in the
381 loop. It also has single back edge to the header
382 from a latch node. */
383 FOR_EACH_EDGE (e, ei, header->preds)
384 {
385 basic_block latch = e->src;
386 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
387 && dominated_by_p (CDI_DOMINATORS, latch, header))
388 return true;
389 }
390
391 return false;
392 }
393
394 /* Find all the natural loops in the function and save in LOOPS structure and
395 recalculate loop_father information in basic block structures.
396 If LOOPS is non-NULL then the loop structures for already recorded loops
397 will be re-used and their number will not change. We assume that no
398 stale loops exist in LOOPS.
399 When LOOPS is NULL it is allocated and re-built from scratch.
400 Return the built LOOPS structure. */
401
402 struct loops *
403 flow_loops_find (struct loops *loops)
404 {
405 bool from_scratch = (loops == NULL);
406 int *rc_order;
407 int b;
408 unsigned i;
409
410 /* Ensure that the dominators are computed. */
411 calculate_dominance_info (CDI_DOMINATORS);
412
413 if (!loops)
414 {
415 loops = ggc_cleared_alloc<struct loops> ();
416 init_loops_structure (cfun, loops, 1);
417 }
418
419 /* Ensure that loop exits were released. */
420 gcc_assert (loops->exits == NULL);
421
422 /* Taking care of this degenerate case makes the rest of
423 this code simpler. */
424 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
425 return loops;
426
427 /* The root loop node contains all basic-blocks. */
428 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
429
430 /* Compute depth first search order of the CFG so that outer
431 natural loops will be found before inner natural loops. */
432 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
433 pre_and_rev_post_order_compute (NULL, rc_order, false);
434
435 /* Gather all loop headers in reverse completion order and allocate
436 loop structures for loops that are not already present. */
437 auto_vec<loop_p> larray (loops->larray->length ());
438 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
439 {
440 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
441 if (bb_loop_header_p (header))
442 {
443 struct loop *loop;
444
445 /* The current active loop tree has valid loop-fathers for
446 header blocks. */
447 if (!from_scratch
448 && header->loop_father->header == header)
449 {
450 loop = header->loop_father;
451 /* If we found an existing loop remove it from the
452 loop tree. It is going to be inserted again
453 below. */
454 flow_loop_tree_node_remove (loop);
455 }
456 else
457 {
458 /* Otherwise allocate a new loop structure for the loop. */
459 loop = alloc_loop ();
460 /* ??? We could re-use unused loop slots here. */
461 loop->num = loops->larray->length ();
462 vec_safe_push (loops->larray, loop);
463 loop->header = header;
464
465 if (!from_scratch
466 && dump_file && (dump_flags & TDF_DETAILS))
467 fprintf (dump_file, "flow_loops_find: discovered new "
468 "loop %d with header %d\n",
469 loop->num, header->index);
470 }
471 /* Reset latch, we recompute it below. */
472 loop->latch = NULL;
473 larray.safe_push (loop);
474 }
475
476 /* Make blocks part of the loop root node at start. */
477 header->loop_father = loops->tree_root;
478 }
479
480 free (rc_order);
481
482 /* Now iterate over the loops found, insert them into the loop tree
483 and assign basic-block ownership. */
484 for (i = 0; i < larray.length (); ++i)
485 {
486 struct loop *loop = larray[i];
487 basic_block header = loop->header;
488 edge_iterator ei;
489 edge e;
490
491 flow_loop_tree_node_add (header->loop_father, loop);
492 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
493
494 /* Look for the latch for this header block, if it has just a
495 single one. */
496 FOR_EACH_EDGE (e, ei, header->preds)
497 {
498 basic_block latch = e->src;
499
500 if (flow_bb_inside_loop_p (loop, latch))
501 {
502 if (loop->latch != NULL)
503 {
504 /* More than one latch edge. */
505 loop->latch = NULL;
506 break;
507 }
508 loop->latch = latch;
509 }
510 }
511 }
512
513 return loops;
514 }
515
516 /* Ratio of frequencies of edges so that one of more latch edges is
517 considered to belong to inner loop with same header. */
518 #define HEAVY_EDGE_RATIO 8
519
520 /* Minimum number of samples for that we apply
521 find_subloop_latch_edge_by_profile heuristics. */
522 #define HEAVY_EDGE_MIN_SAMPLES 10
523
524 /* If the profile info is available, finds an edge in LATCHES that much more
525 frequent than the remaining edges. Returns such an edge, or NULL if we do
526 not find one.
527
528 We do not use guessed profile here, only the measured one. The guessed
529 profile is usually too flat and unreliable for this (and it is mostly based
530 on the loop structure of the program, so it does not make much sense to
531 derive the loop structure from it). */
532
533 static edge
534 find_subloop_latch_edge_by_profile (vec<edge> latches)
535 {
536 unsigned i;
537 edge e, me = NULL;
538 gcov_type mcount = 0, tcount = 0;
539
540 FOR_EACH_VEC_ELT (latches, i, e)
541 {
542 if (e->count > mcount)
543 {
544 me = e;
545 mcount = e->count;
546 }
547 tcount += e->count;
548 }
549
550 if (tcount < HEAVY_EDGE_MIN_SAMPLES
551 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
552 return NULL;
553
554 if (dump_file)
555 fprintf (dump_file,
556 "Found latch edge %d -> %d using profile information.\n",
557 me->src->index, me->dest->index);
558 return me;
559 }
560
561 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
562 on the structure of induction variables. Returns this edge, or NULL if we
563 do not find any.
564
565 We are quite conservative, and look just for an obvious simple innermost
566 loop (which is the case where we would lose the most performance by not
567 disambiguating the loop). More precisely, we look for the following
568 situation: The source of the chosen latch edge dominates sources of all
569 the other latch edges. Additionally, the header does not contain a phi node
570 such that the argument from the chosen edge is equal to the argument from
571 another edge. */
572
573 static edge
574 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
575 {
576 edge e, latch = latches[0];
577 unsigned i;
578 gphi *phi;
579 gphi_iterator psi;
580 tree lop;
581 basic_block bb;
582
583 /* Find the candidate for the latch edge. */
584 for (i = 1; latches.iterate (i, &e); i++)
585 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
586 latch = e;
587
588 /* Verify that it dominates all the latch edges. */
589 FOR_EACH_VEC_ELT (latches, i, e)
590 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
591 return NULL;
592
593 /* Check for a phi node that would deny that this is a latch edge of
594 a subloop. */
595 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
596 {
597 phi = psi.phi ();
598 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
599
600 /* Ignore the values that are not changed inside the subloop. */
601 if (TREE_CODE (lop) != SSA_NAME
602 || SSA_NAME_DEF_STMT (lop) == phi)
603 continue;
604 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
605 if (!bb || !flow_bb_inside_loop_p (loop, bb))
606 continue;
607
608 FOR_EACH_VEC_ELT (latches, i, e)
609 if (e != latch
610 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
611 return NULL;
612 }
613
614 if (dump_file)
615 fprintf (dump_file,
616 "Found latch edge %d -> %d using iv structure.\n",
617 latch->src->index, latch->dest->index);
618 return latch;
619 }
620
621 /* If we can determine that one of the several latch edges of LOOP behaves
622 as a latch edge of a separate subloop, returns this edge. Otherwise
623 returns NULL. */
624
625 static edge
626 find_subloop_latch_edge (struct loop *loop)
627 {
628 vec<edge> latches = get_loop_latch_edges (loop);
629 edge latch = NULL;
630
631 if (latches.length () > 1)
632 {
633 latch = find_subloop_latch_edge_by_profile (latches);
634
635 if (!latch
636 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
637 should use cfghook for this, but it is hard to imagine it would
638 be useful elsewhere. */
639 && current_ir_type () == IR_GIMPLE)
640 latch = find_subloop_latch_edge_by_ivs (loop, latches);
641 }
642
643 latches.release ();
644 return latch;
645 }
646
647 /* Callback for make_forwarder_block. Returns true if the edge E is marked
648 in the set MFB_REIS_SET. */
649
650 static hash_set<edge> *mfb_reis_set;
651 static bool
652 mfb_redirect_edges_in_set (edge e)
653 {
654 return mfb_reis_set->contains (e);
655 }
656
657 /* Creates a subloop of LOOP with latch edge LATCH. */
658
659 static void
660 form_subloop (struct loop *loop, edge latch)
661 {
662 edge_iterator ei;
663 edge e, new_entry;
664 struct loop *new_loop;
665
666 mfb_reis_set = new hash_set<edge>;
667 FOR_EACH_EDGE (e, ei, loop->header->preds)
668 {
669 if (e != latch)
670 mfb_reis_set->add (e);
671 }
672 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
673 NULL);
674 delete mfb_reis_set;
675
676 loop->header = new_entry->src;
677
678 /* Find the blocks and subloops that belong to the new loop, and add it to
679 the appropriate place in the loop tree. */
680 new_loop = alloc_loop ();
681 new_loop->header = new_entry->dest;
682 new_loop->latch = latch->src;
683 add_loop (new_loop, loop);
684 }
685
686 /* Make all the latch edges of LOOP to go to a single forwarder block --
687 a new latch of LOOP. */
688
689 static void
690 merge_latch_edges (struct loop *loop)
691 {
692 vec<edge> latches = get_loop_latch_edges (loop);
693 edge latch, e;
694 unsigned i;
695
696 gcc_assert (latches.length () > 0);
697
698 if (latches.length () == 1)
699 loop->latch = latches[0]->src;
700 else
701 {
702 if (dump_file)
703 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
704
705 mfb_reis_set = new hash_set<edge>;
706 FOR_EACH_VEC_ELT (latches, i, e)
707 mfb_reis_set->add (e);
708 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
709 NULL);
710 delete mfb_reis_set;
711
712 loop->header = latch->dest;
713 loop->latch = latch->src;
714 }
715
716 latches.release ();
717 }
718
719 /* LOOP may have several latch edges. Transform it into (possibly several)
720 loops with single latch edge. */
721
722 static void
723 disambiguate_multiple_latches (struct loop *loop)
724 {
725 edge e;
726
727 /* We eliminate the multiple latches by splitting the header to the forwarder
728 block F and the rest R, and redirecting the edges. There are two cases:
729
730 1) If there is a latch edge E that corresponds to a subloop (we guess
731 that based on profile -- if it is taken much more often than the
732 remaining edges; and on trees, using the information about induction
733 variables of the loops), we redirect E to R, all the remaining edges to
734 F, then rescan the loops and try again for the outer loop.
735 2) If there is no such edge, we redirect all latch edges to F, and the
736 entry edges to R, thus making F the single latch of the loop. */
737
738 if (dump_file)
739 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
740 loop->num);
741
742 /* During latch merging, we may need to redirect the entry edges to a new
743 block. This would cause problems if the entry edge was the one from the
744 entry block. To avoid having to handle this case specially, split
745 such entry edge. */
746 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
747 if (e)
748 split_edge (e);
749
750 while (1)
751 {
752 e = find_subloop_latch_edge (loop);
753 if (!e)
754 break;
755
756 form_subloop (loop, e);
757 }
758
759 merge_latch_edges (loop);
760 }
761
762 /* Split loops with multiple latch edges. */
763
764 void
765 disambiguate_loops_with_multiple_latches (void)
766 {
767 struct loop *loop;
768
769 FOR_EACH_LOOP (loop, 0)
770 {
771 if (!loop->latch)
772 disambiguate_multiple_latches (loop);
773 }
774 }
775
776 /* Return nonzero if basic block BB belongs to LOOP. */
777 bool
778 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
779 {
780 struct loop *source_loop;
781
782 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
783 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
784 return 0;
785
786 source_loop = bb->loop_father;
787 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
788 }
789
790 /* Enumeration predicate for get_loop_body_with_size. */
791 static bool
792 glb_enum_p (const_basic_block bb, const void *glb_loop)
793 {
794 const struct loop *const loop = (const struct loop *) glb_loop;
795 return (bb != loop->header
796 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
797 }
798
799 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
800 order against direction of edges from latch. Specially, if
801 header != latch, latch is the 1-st block. LOOP cannot be the fake
802 loop tree root, and its size must be at most MAX_SIZE. The blocks
803 in the LOOP body are stored to BODY, and the size of the LOOP is
804 returned. */
805
806 unsigned
807 get_loop_body_with_size (const struct loop *loop, basic_block *body,
808 unsigned max_size)
809 {
810 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
811 body, max_size, loop);
812 }
813
814 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
815 order against direction of edges from latch. Specially, if
816 header != latch, latch is the 1-st block. */
817
818 basic_block *
819 get_loop_body (const struct loop *loop)
820 {
821 basic_block *body, bb;
822 unsigned tv = 0;
823
824 gcc_assert (loop->num_nodes);
825
826 body = XNEWVEC (basic_block, loop->num_nodes);
827
828 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
829 {
830 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
831 special-case the fake loop that contains the whole function. */
832 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
833 body[tv++] = loop->header;
834 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
835 FOR_EACH_BB_FN (bb, cfun)
836 body[tv++] = bb;
837 }
838 else
839 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
840
841 gcc_assert (tv == loop->num_nodes);
842 return body;
843 }
844
845 /* Fills dominance descendants inside LOOP of the basic block BB into
846 array TOVISIT from index *TV. */
847
848 static void
849 fill_sons_in_loop (const struct loop *loop, basic_block bb,
850 basic_block *tovisit, int *tv)
851 {
852 basic_block son, postpone = NULL;
853
854 tovisit[(*tv)++] = bb;
855 for (son = first_dom_son (CDI_DOMINATORS, bb);
856 son;
857 son = next_dom_son (CDI_DOMINATORS, son))
858 {
859 if (!flow_bb_inside_loop_p (loop, son))
860 continue;
861
862 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
863 {
864 postpone = son;
865 continue;
866 }
867 fill_sons_in_loop (loop, son, tovisit, tv);
868 }
869
870 if (postpone)
871 fill_sons_in_loop (loop, postpone, tovisit, tv);
872 }
873
874 /* Gets body of a LOOP (that must be different from the outermost loop)
875 sorted by dominance relation. Additionally, if a basic block s dominates
876 the latch, then only blocks dominated by s are be after it. */
877
878 basic_block *
879 get_loop_body_in_dom_order (const struct loop *loop)
880 {
881 basic_block *tovisit;
882 int tv;
883
884 gcc_assert (loop->num_nodes);
885
886 tovisit = XNEWVEC (basic_block, loop->num_nodes);
887
888 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
889
890 tv = 0;
891 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
892
893 gcc_assert (tv == (int) loop->num_nodes);
894
895 return tovisit;
896 }
897
898 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
899
900 basic_block *
901 get_loop_body_in_custom_order (const struct loop *loop,
902 int (*bb_comparator) (const void *, const void *))
903 {
904 basic_block *bbs = get_loop_body (loop);
905
906 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
907
908 return bbs;
909 }
910
911 /* Get body of a LOOP in breadth first sort order. */
912
913 basic_block *
914 get_loop_body_in_bfs_order (const struct loop *loop)
915 {
916 basic_block *blocks;
917 basic_block bb;
918 bitmap visited;
919 unsigned int i = 0;
920 unsigned int vc = 1;
921
922 gcc_assert (loop->num_nodes);
923 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
924
925 blocks = XNEWVEC (basic_block, loop->num_nodes);
926 visited = BITMAP_ALLOC (NULL);
927
928 bb = loop->header;
929 while (i < loop->num_nodes)
930 {
931 edge e;
932 edge_iterator ei;
933
934 if (bitmap_set_bit (visited, bb->index))
935 /* This basic block is now visited */
936 blocks[i++] = bb;
937
938 FOR_EACH_EDGE (e, ei, bb->succs)
939 {
940 if (flow_bb_inside_loop_p (loop, e->dest))
941 {
942 if (bitmap_set_bit (visited, e->dest->index))
943 blocks[i++] = e->dest;
944 }
945 }
946
947 gcc_assert (i > vc);
948
949 bb = blocks[vc++];
950 }
951
952 BITMAP_FREE (visited);
953 return blocks;
954 }
955
956 /* Hash function for struct loop_exit. */
957
958 hashval_t
959 loop_exit_hasher::hash (loop_exit *exit)
960 {
961 return htab_hash_pointer (exit->e);
962 }
963
964 /* Equality function for struct loop_exit. Compares with edge. */
965
966 bool
967 loop_exit_hasher::equal (loop_exit *exit, edge e)
968 {
969 return exit->e == e;
970 }
971
972 /* Frees the list of loop exit descriptions EX. */
973
974 void
975 loop_exit_hasher::remove (loop_exit *exit)
976 {
977 loop_exit *next;
978 for (; exit; exit = next)
979 {
980 next = exit->next_e;
981
982 exit->next->prev = exit->prev;
983 exit->prev->next = exit->next;
984
985 ggc_free (exit);
986 }
987 }
988
989 /* Returns the list of records for E as an exit of a loop. */
990
991 static struct loop_exit *
992 get_exit_descriptions (edge e)
993 {
994 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
995 }
996
997 /* Updates the lists of loop exits in that E appears.
998 If REMOVED is true, E is being removed, and we
999 just remove it from the lists of exits.
1000 If NEW_EDGE is true and E is not a loop exit, we
1001 do not try to remove it from loop exit lists. */
1002
1003 void
1004 rescan_loop_exit (edge e, bool new_edge, bool removed)
1005 {
1006 struct loop_exit *exits = NULL, *exit;
1007 struct loop *aloop, *cloop;
1008
1009 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1010 return;
1011
1012 if (!removed
1013 && e->src->loop_father != NULL
1014 && e->dest->loop_father != NULL
1015 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1016 {
1017 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1018 for (aloop = e->src->loop_father;
1019 aloop != cloop;
1020 aloop = loop_outer (aloop))
1021 {
1022 exit = ggc_alloc<loop_exit> ();
1023 exit->e = e;
1024
1025 exit->next = aloop->exits->next;
1026 exit->prev = aloop->exits;
1027 exit->next->prev = exit;
1028 exit->prev->next = exit;
1029
1030 exit->next_e = exits;
1031 exits = exit;
1032 }
1033 }
1034
1035 if (!exits && new_edge)
1036 return;
1037
1038 loop_exit **slot
1039 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1040 exits ? INSERT : NO_INSERT);
1041 if (!slot)
1042 return;
1043
1044 if (exits)
1045 {
1046 if (*slot)
1047 loop_exit_hasher::remove (*slot);
1048 *slot = exits;
1049 }
1050 else
1051 current_loops->exits->clear_slot (slot);
1052 }
1053
1054 /* For each loop, record list of exit edges, and start maintaining these
1055 lists. */
1056
1057 void
1058 record_loop_exits (void)
1059 {
1060 basic_block bb;
1061 edge_iterator ei;
1062 edge e;
1063
1064 if (!current_loops)
1065 return;
1066
1067 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1068 return;
1069 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1070
1071 gcc_assert (current_loops->exits == NULL);
1072 current_loops->exits
1073 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1074
1075 FOR_EACH_BB_FN (bb, cfun)
1076 {
1077 FOR_EACH_EDGE (e, ei, bb->succs)
1078 {
1079 rescan_loop_exit (e, true, false);
1080 }
1081 }
1082 }
1083
1084 /* Dumps information about the exit in *SLOT to FILE.
1085 Callback for htab_traverse. */
1086
1087 int
1088 dump_recorded_exit (loop_exit **slot, FILE *file)
1089 {
1090 struct loop_exit *exit = *slot;
1091 unsigned n = 0;
1092 edge e = exit->e;
1093
1094 for (; exit != NULL; exit = exit->next_e)
1095 n++;
1096
1097 fprintf (file, "Edge %d->%d exits %u loops\n",
1098 e->src->index, e->dest->index, n);
1099
1100 return 1;
1101 }
1102
1103 /* Dumps the recorded exits of loops to FILE. */
1104
1105 extern void dump_recorded_exits (FILE *);
1106 void
1107 dump_recorded_exits (FILE *file)
1108 {
1109 if (!current_loops->exits)
1110 return;
1111 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1112 }
1113
1114 /* Releases lists of loop exits. */
1115
1116 void
1117 release_recorded_exits (function *fn)
1118 {
1119 gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS));
1120 loops_for_fn (fn)->exits->empty ();
1121 loops_for_fn (fn)->exits = NULL;
1122 loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
1123 }
1124
1125 /* Returns the list of the exit edges of a LOOP. */
1126
1127 vec<edge>
1128 get_loop_exit_edges (const struct loop *loop)
1129 {
1130 vec<edge> edges = vNULL;
1131 edge e;
1132 unsigned i;
1133 basic_block *body;
1134 edge_iterator ei;
1135 struct loop_exit *exit;
1136
1137 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1138
1139 /* If we maintain the lists of exits, use them. Otherwise we must
1140 scan the body of the loop. */
1141 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1142 {
1143 for (exit = loop->exits->next; exit->e; exit = exit->next)
1144 edges.safe_push (exit->e);
1145 }
1146 else
1147 {
1148 body = get_loop_body (loop);
1149 for (i = 0; i < loop->num_nodes; i++)
1150 FOR_EACH_EDGE (e, ei, body[i]->succs)
1151 {
1152 if (!flow_bb_inside_loop_p (loop, e->dest))
1153 edges.safe_push (e);
1154 }
1155 free (body);
1156 }
1157
1158 return edges;
1159 }
1160
1161 /* Counts the number of conditional branches inside LOOP. */
1162
1163 unsigned
1164 num_loop_branches (const struct loop *loop)
1165 {
1166 unsigned i, n;
1167 basic_block * body;
1168
1169 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1170
1171 body = get_loop_body (loop);
1172 n = 0;
1173 for (i = 0; i < loop->num_nodes; i++)
1174 if (EDGE_COUNT (body[i]->succs) >= 2)
1175 n++;
1176 free (body);
1177
1178 return n;
1179 }
1180
1181 /* Adds basic block BB to LOOP. */
1182 void
1183 add_bb_to_loop (basic_block bb, struct loop *loop)
1184 {
1185 unsigned i;
1186 loop_p ploop;
1187 edge_iterator ei;
1188 edge e;
1189
1190 gcc_assert (bb->loop_father == NULL);
1191 bb->loop_father = loop;
1192 loop->num_nodes++;
1193 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1194 ploop->num_nodes++;
1195
1196 FOR_EACH_EDGE (e, ei, bb->succs)
1197 {
1198 rescan_loop_exit (e, true, false);
1199 }
1200 FOR_EACH_EDGE (e, ei, bb->preds)
1201 {
1202 rescan_loop_exit (e, true, false);
1203 }
1204 }
1205
1206 /* Remove basic block BB from loops. */
1207 void
1208 remove_bb_from_loops (basic_block bb)
1209 {
1210 unsigned i;
1211 struct loop *loop = bb->loop_father;
1212 loop_p ploop;
1213 edge_iterator ei;
1214 edge e;
1215
1216 gcc_assert (loop != NULL);
1217 loop->num_nodes--;
1218 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1219 ploop->num_nodes--;
1220 bb->loop_father = NULL;
1221
1222 FOR_EACH_EDGE (e, ei, bb->succs)
1223 {
1224 rescan_loop_exit (e, false, true);
1225 }
1226 FOR_EACH_EDGE (e, ei, bb->preds)
1227 {
1228 rescan_loop_exit (e, false, true);
1229 }
1230 }
1231
1232 /* Finds nearest common ancestor in loop tree for given loops. */
1233 struct loop *
1234 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1235 {
1236 unsigned sdepth, ddepth;
1237
1238 if (!loop_s) return loop_d;
1239 if (!loop_d) return loop_s;
1240
1241 sdepth = loop_depth (loop_s);
1242 ddepth = loop_depth (loop_d);
1243
1244 if (sdepth < ddepth)
1245 loop_d = (*loop_d->superloops)[sdepth];
1246 else if (sdepth > ddepth)
1247 loop_s = (*loop_s->superloops)[ddepth];
1248
1249 while (loop_s != loop_d)
1250 {
1251 loop_s = loop_outer (loop_s);
1252 loop_d = loop_outer (loop_d);
1253 }
1254 return loop_s;
1255 }
1256
1257 /* Removes LOOP from structures and frees its data. */
1258
1259 void
1260 delete_loop (struct loop *loop)
1261 {
1262 /* Remove the loop from structure. */
1263 flow_loop_tree_node_remove (loop);
1264
1265 /* Remove loop from loops array. */
1266 (*current_loops->larray)[loop->num] = NULL;
1267
1268 /* Free loop data. */
1269 flow_loop_free (loop);
1270 }
1271
1272 /* Cancels the LOOP; it must be innermost one. */
1273
1274 static void
1275 cancel_loop (struct loop *loop)
1276 {
1277 basic_block *bbs;
1278 unsigned i;
1279 struct loop *outer = loop_outer (loop);
1280
1281 gcc_assert (!loop->inner);
1282
1283 /* Move blocks up one level (they should be removed as soon as possible). */
1284 bbs = get_loop_body (loop);
1285 for (i = 0; i < loop->num_nodes; i++)
1286 bbs[i]->loop_father = outer;
1287
1288 free (bbs);
1289 delete_loop (loop);
1290 }
1291
1292 /* Cancels LOOP and all its subloops. */
1293 void
1294 cancel_loop_tree (struct loop *loop)
1295 {
1296 while (loop->inner)
1297 cancel_loop_tree (loop->inner);
1298 cancel_loop (loop);
1299 }
1300
1301 /* Checks that information about loops is correct
1302 -- sizes of loops are all right
1303 -- results of get_loop_body really belong to the loop
1304 -- loop header have just single entry edge and single latch edge
1305 -- loop latches have only single successor that is header of their loop
1306 -- irreducible loops are correctly marked
1307 -- the cached loop depth and loop father of each bb is correct
1308 */
1309 DEBUG_FUNCTION void
1310 verify_loop_structure (void)
1311 {
1312 unsigned *sizes, i, j;
1313 sbitmap irreds;
1314 basic_block bb, *bbs;
1315 struct loop *loop;
1316 int err = 0;
1317 edge e;
1318 unsigned num = number_of_loops (cfun);
1319 struct loop_exit *exit, *mexit;
1320 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1321 sbitmap visited;
1322
1323 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1324 {
1325 error ("loop verification on loop tree that needs fixup");
1326 err = 1;
1327 }
1328
1329 /* We need up-to-date dominators, compute or verify them. */
1330 if (!dom_available)
1331 calculate_dominance_info (CDI_DOMINATORS);
1332 else
1333 verify_dominators (CDI_DOMINATORS);
1334
1335 /* Check the loop tree root. */
1336 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1337 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1338 || (current_loops->tree_root->num_nodes
1339 != (unsigned) n_basic_blocks_for_fn (cfun)))
1340 {
1341 error ("corrupt loop tree root");
1342 err = 1;
1343 }
1344
1345 /* Check the headers. */
1346 FOR_EACH_BB_FN (bb, cfun)
1347 if (bb_loop_header_p (bb))
1348 {
1349 if (bb->loop_father->header == NULL)
1350 {
1351 error ("loop with header %d marked for removal", bb->index);
1352 err = 1;
1353 }
1354 else if (bb->loop_father->header != bb)
1355 {
1356 error ("loop with header %d not in loop tree", bb->index);
1357 err = 1;
1358 }
1359 }
1360 else if (bb->loop_father->header == bb)
1361 {
1362 error ("non-loop with header %d not marked for removal", bb->index);
1363 err = 1;
1364 }
1365
1366 /* Check the recorded loop father and sizes of loops. */
1367 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1368 bitmap_clear (visited);
1369 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1370 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1371 {
1372 unsigned n;
1373
1374 if (loop->header == NULL)
1375 {
1376 error ("removed loop %d in loop tree", loop->num);
1377 err = 1;
1378 continue;
1379 }
1380
1381 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1382 if (loop->num_nodes != n)
1383 {
1384 error ("size of loop %d should be %d, not %d",
1385 loop->num, n, loop->num_nodes);
1386 err = 1;
1387 }
1388
1389 for (j = 0; j < n; j++)
1390 {
1391 bb = bbs[j];
1392
1393 if (!flow_bb_inside_loop_p (loop, bb))
1394 {
1395 error ("bb %d does not belong to loop %d",
1396 bb->index, loop->num);
1397 err = 1;
1398 }
1399
1400 /* Ignore this block if it is in an inner loop. */
1401 if (bitmap_bit_p (visited, bb->index))
1402 continue;
1403 bitmap_set_bit (visited, bb->index);
1404
1405 if (bb->loop_father != loop)
1406 {
1407 error ("bb %d has father loop %d, should be loop %d",
1408 bb->index, bb->loop_father->num, loop->num);
1409 err = 1;
1410 }
1411 }
1412 }
1413 free (bbs);
1414 sbitmap_free (visited);
1415
1416 /* Check headers and latches. */
1417 FOR_EACH_LOOP (loop, 0)
1418 {
1419 i = loop->num;
1420 if (loop->header == NULL)
1421 continue;
1422 if (!bb_loop_header_p (loop->header))
1423 {
1424 error ("loop %d%'s header is not a loop header", i);
1425 err = 1;
1426 }
1427 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1428 && EDGE_COUNT (loop->header->preds) != 2)
1429 {
1430 error ("loop %d%'s header does not have exactly 2 entries", i);
1431 err = 1;
1432 }
1433 if (loop->latch)
1434 {
1435 if (!find_edge (loop->latch, loop->header))
1436 {
1437 error ("loop %d%'s latch does not have an edge to its header", i);
1438 err = 1;
1439 }
1440 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1441 {
1442 error ("loop %d%'s latch is not dominated by its header", i);
1443 err = 1;
1444 }
1445 }
1446 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1447 {
1448 if (!single_succ_p (loop->latch))
1449 {
1450 error ("loop %d%'s latch does not have exactly 1 successor", i);
1451 err = 1;
1452 }
1453 if (single_succ (loop->latch) != loop->header)
1454 {
1455 error ("loop %d%'s latch does not have header as successor", i);
1456 err = 1;
1457 }
1458 if (loop->latch->loop_father != loop)
1459 {
1460 error ("loop %d%'s latch does not belong directly to it", i);
1461 err = 1;
1462 }
1463 }
1464 if (loop->header->loop_father != loop)
1465 {
1466 error ("loop %d%'s header does not belong directly to it", i);
1467 err = 1;
1468 }
1469 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1470 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1471 {
1472 error ("loop %d%'s latch is marked as part of irreducible region", i);
1473 err = 1;
1474 }
1475 }
1476
1477 /* Check irreducible loops. */
1478 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1479 {
1480 /* Record old info. */
1481 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1482 FOR_EACH_BB_FN (bb, cfun)
1483 {
1484 edge_iterator ei;
1485 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1486 bitmap_set_bit (irreds, bb->index);
1487 else
1488 bitmap_clear_bit (irreds, bb->index);
1489 FOR_EACH_EDGE (e, ei, bb->succs)
1490 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1491 e->flags |= EDGE_ALL_FLAGS + 1;
1492 }
1493
1494 /* Recount it. */
1495 mark_irreducible_loops ();
1496
1497 /* Compare. */
1498 FOR_EACH_BB_FN (bb, cfun)
1499 {
1500 edge_iterator ei;
1501
1502 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1503 && !bitmap_bit_p (irreds, bb->index))
1504 {
1505 error ("basic block %d should be marked irreducible", bb->index);
1506 err = 1;
1507 }
1508 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1509 && bitmap_bit_p (irreds, bb->index))
1510 {
1511 error ("basic block %d should not be marked irreducible", bb->index);
1512 err = 1;
1513 }
1514 FOR_EACH_EDGE (e, ei, bb->succs)
1515 {
1516 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1517 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1518 {
1519 error ("edge from %d to %d should be marked irreducible",
1520 e->src->index, e->dest->index);
1521 err = 1;
1522 }
1523 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1524 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1525 {
1526 error ("edge from %d to %d should not be marked irreducible",
1527 e->src->index, e->dest->index);
1528 err = 1;
1529 }
1530 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1531 }
1532 }
1533 free (irreds);
1534 }
1535
1536 /* Check the recorded loop exits. */
1537 FOR_EACH_LOOP (loop, 0)
1538 {
1539 if (!loop->exits || loop->exits->e != NULL)
1540 {
1541 error ("corrupted head of the exits list of loop %d",
1542 loop->num);
1543 err = 1;
1544 }
1545 else
1546 {
1547 /* Check that the list forms a cycle, and all elements except
1548 for the head are nonnull. */
1549 for (mexit = loop->exits, exit = mexit->next, i = 0;
1550 exit->e && exit != mexit;
1551 exit = exit->next)
1552 {
1553 if (i++ & 1)
1554 mexit = mexit->next;
1555 }
1556
1557 if (exit != loop->exits)
1558 {
1559 error ("corrupted exits list of loop %d", loop->num);
1560 err = 1;
1561 }
1562 }
1563
1564 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1565 {
1566 if (loop->exits->next != loop->exits)
1567 {
1568 error ("nonempty exits list of loop %d, but exits are not recorded",
1569 loop->num);
1570 err = 1;
1571 }
1572 }
1573 }
1574
1575 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1576 {
1577 unsigned n_exits = 0, eloops;
1578
1579 sizes = XCNEWVEC (unsigned, num);
1580 memset (sizes, 0, sizeof (unsigned) * num);
1581 FOR_EACH_BB_FN (bb, cfun)
1582 {
1583 edge_iterator ei;
1584 if (bb->loop_father == current_loops->tree_root)
1585 continue;
1586 FOR_EACH_EDGE (e, ei, bb->succs)
1587 {
1588 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1589 continue;
1590
1591 n_exits++;
1592 exit = get_exit_descriptions (e);
1593 if (!exit)
1594 {
1595 error ("exit %d->%d not recorded",
1596 e->src->index, e->dest->index);
1597 err = 1;
1598 }
1599 eloops = 0;
1600 for (; exit; exit = exit->next_e)
1601 eloops++;
1602
1603 for (loop = bb->loop_father;
1604 loop != e->dest->loop_father
1605 /* When a loop exit is also an entry edge which
1606 can happen when avoiding CFG manipulations
1607 then the last loop exited is the outer loop
1608 of the loop entered. */
1609 && loop != loop_outer (e->dest->loop_father);
1610 loop = loop_outer (loop))
1611 {
1612 eloops--;
1613 sizes[loop->num]++;
1614 }
1615
1616 if (eloops != 0)
1617 {
1618 error ("wrong list of exited loops for edge %d->%d",
1619 e->src->index, e->dest->index);
1620 err = 1;
1621 }
1622 }
1623 }
1624
1625 if (n_exits != current_loops->exits->elements ())
1626 {
1627 error ("too many loop exits recorded");
1628 err = 1;
1629 }
1630
1631 FOR_EACH_LOOP (loop, 0)
1632 {
1633 eloops = 0;
1634 for (exit = loop->exits->next; exit->e; exit = exit->next)
1635 eloops++;
1636 if (eloops != sizes[loop->num])
1637 {
1638 error ("%d exits recorded for loop %d (having %d exits)",
1639 eloops, loop->num, sizes[loop->num]);
1640 err = 1;
1641 }
1642 }
1643
1644 free (sizes);
1645 }
1646
1647 gcc_assert (!err);
1648
1649 if (!dom_available)
1650 free_dominance_info (CDI_DOMINATORS);
1651 }
1652
1653 /* Returns latch edge of LOOP. */
1654 edge
1655 loop_latch_edge (const struct loop *loop)
1656 {
1657 return find_edge (loop->latch, loop->header);
1658 }
1659
1660 /* Returns preheader edge of LOOP. */
1661 edge
1662 loop_preheader_edge (const struct loop *loop)
1663 {
1664 edge e;
1665 edge_iterator ei;
1666
1667 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1668
1669 FOR_EACH_EDGE (e, ei, loop->header->preds)
1670 if (e->src != loop->latch)
1671 break;
1672
1673 return e;
1674 }
1675
1676 /* Returns true if E is an exit of LOOP. */
1677
1678 bool
1679 loop_exit_edge_p (const struct loop *loop, const_edge e)
1680 {
1681 return (flow_bb_inside_loop_p (loop, e->src)
1682 && !flow_bb_inside_loop_p (loop, e->dest));
1683 }
1684
1685 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1686 or more than one exit. If loops do not have the exits recorded, NULL
1687 is returned always. */
1688
1689 edge
1690 single_exit (const struct loop *loop)
1691 {
1692 struct loop_exit *exit = loop->exits->next;
1693
1694 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1695 return NULL;
1696
1697 if (exit->e && exit->next == loop->exits)
1698 return exit->e;
1699 else
1700 return NULL;
1701 }
1702
1703 /* Returns true when BB has an incoming edge exiting LOOP. */
1704
1705 bool
1706 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1707 {
1708 edge e;
1709 edge_iterator ei;
1710
1711 FOR_EACH_EDGE (e, ei, bb->preds)
1712 if (loop_exit_edge_p (loop, e))
1713 return true;
1714
1715 return false;
1716 }
1717
1718 /* Returns true when BB has an outgoing edge exiting LOOP. */
1719
1720 bool
1721 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1722 {
1723 edge e;
1724 edge_iterator ei;
1725
1726 FOR_EACH_EDGE (e, ei, bb->succs)
1727 if (loop_exit_edge_p (loop, e))
1728 return true;
1729
1730 return false;
1731 }
1732
1733 /* Return location corresponding to the loop control condition if possible. */
1734
1735 location_t
1736 get_loop_location (struct loop *loop)
1737 {
1738 rtx_insn *insn = NULL;
1739 struct niter_desc *desc = NULL;
1740 edge exit;
1741
1742 /* For a for or while loop, we would like to return the location
1743 of the for or while statement, if possible. To do this, look
1744 for the branch guarding the loop back-edge. */
1745
1746 /* If this is a simple loop with an in_edge, then the loop control
1747 branch is typically at the end of its source. */
1748 desc = get_simple_loop_desc (loop);
1749 if (desc->in_edge)
1750 {
1751 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1752 {
1753 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1754 return INSN_LOCATION (insn);
1755 }
1756 }
1757 /* If loop has a single exit, then the loop control branch
1758 must be at the end of its source. */
1759 if ((exit = single_exit (loop)))
1760 {
1761 FOR_BB_INSNS_REVERSE (exit->src, insn)
1762 {
1763 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1764 return INSN_LOCATION (insn);
1765 }
1766 }
1767 /* Next check the latch, to see if it is non-empty. */
1768 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1769 {
1770 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1771 return INSN_LOCATION (insn);
1772 }
1773 /* Finally, if none of the above identifies the loop control branch,
1774 return the first location in the loop header. */
1775 FOR_BB_INSNS (loop->header, insn)
1776 {
1777 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1778 return INSN_LOCATION (insn);
1779 }
1780 /* If all else fails, simply return the current function location. */
1781 return DECL_SOURCE_LOCATION (current_function_decl);
1782 }
1783
1784 /* Records that every statement in LOOP is executed I_BOUND times.
1785 REALISTIC is true if I_BOUND is expected to be close to the real number
1786 of iterations. UPPER is true if we are sure the loop iterates at most
1787 I_BOUND times. */
1788
1789 void
1790 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1791 bool realistic, bool upper)
1792 {
1793 /* Update the bounds only when there is no previous estimation, or when the
1794 current estimation is smaller. */
1795 if (upper
1796 && (!loop->any_upper_bound
1797 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1798 {
1799 loop->any_upper_bound = true;
1800 loop->nb_iterations_upper_bound = i_bound;
1801 }
1802 if (realistic
1803 && (!loop->any_estimate
1804 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1805 {
1806 loop->any_estimate = true;
1807 loop->nb_iterations_estimate = i_bound;
1808 }
1809
1810 /* If an upper bound is smaller than the realistic estimate of the
1811 number of iterations, use the upper bound instead. */
1812 if (loop->any_upper_bound
1813 && loop->any_estimate
1814 && wi::ltu_p (loop->nb_iterations_upper_bound,
1815 loop->nb_iterations_estimate))
1816 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1817 }
1818
1819 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1820 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1821 on the number of iterations of LOOP could not be derived, returns -1. */
1822
1823 HOST_WIDE_INT
1824 get_estimated_loop_iterations_int (struct loop *loop)
1825 {
1826 widest_int nit;
1827 HOST_WIDE_INT hwi_nit;
1828
1829 if (!get_estimated_loop_iterations (loop, &nit))
1830 return -1;
1831
1832 if (!wi::fits_shwi_p (nit))
1833 return -1;
1834 hwi_nit = nit.to_shwi ();
1835
1836 return hwi_nit < 0 ? -1 : hwi_nit;
1837 }
1838
1839 /* Returns an upper bound on the number of executions of statements
1840 in the LOOP. For statements before the loop exit, this exceeds
1841 the number of execution of the latch by one. */
1842
1843 HOST_WIDE_INT
1844 max_stmt_executions_int (struct loop *loop)
1845 {
1846 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1847 HOST_WIDE_INT snit;
1848
1849 if (nit == -1)
1850 return -1;
1851
1852 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1853
1854 /* If the computation overflows, return -1. */
1855 return snit < 0 ? -1 : snit;
1856 }
1857
1858 /* Sets NIT to the estimated number of executions of the latch of the
1859 LOOP. If we have no reliable estimate, the function returns false, otherwise
1860 returns true. */
1861
1862 bool
1863 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1864 {
1865 /* Even if the bound is not recorded, possibly we can derrive one from
1866 profile. */
1867 if (!loop->any_estimate)
1868 {
1869 if (loop->header->count)
1870 {
1871 *nit = gcov_type_to_wide_int
1872 (expected_loop_iterations_unbounded (loop) + 1);
1873 return true;
1874 }
1875 return false;
1876 }
1877
1878 *nit = loop->nb_iterations_estimate;
1879 return true;
1880 }
1881
1882 /* Sets NIT to an upper bound for the maximum number of executions of the
1883 latch of the LOOP. If we have no reliable estimate, the function returns
1884 false, otherwise returns true. */
1885
1886 bool
1887 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1888 {
1889 if (!loop->any_upper_bound)
1890 return false;
1891
1892 *nit = loop->nb_iterations_upper_bound;
1893 return true;
1894 }
1895
1896 /* Similar to get_max_loop_iterations, but returns the estimate only
1897 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1898 on the number of iterations of LOOP could not be derived, returns -1. */
1899
1900 HOST_WIDE_INT
1901 get_max_loop_iterations_int (struct loop *loop)
1902 {
1903 widest_int nit;
1904 HOST_WIDE_INT hwi_nit;
1905
1906 if (!get_max_loop_iterations (loop, &nit))
1907 return -1;
1908
1909 if (!wi::fits_shwi_p (nit))
1910 return -1;
1911 hwi_nit = nit.to_shwi ();
1912
1913 return hwi_nit < 0 ? -1 : hwi_nit;
1914 }
1915
1916 /* Returns the loop depth of the loop BB belongs to. */
1917
1918 int
1919 bb_loop_depth (const_basic_block bb)
1920 {
1921 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1922 }
1923
1924 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1925
1926 void
1927 mark_loop_for_removal (loop_p loop)
1928 {
1929 if (loop->header == NULL)
1930 return;
1931 loop->former_header = loop->header;
1932 loop->header = NULL;
1933 loop->latch = NULL;
1934 loops_state_set (LOOPS_NEED_FIXUP);
1935 }