]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "cfgloop.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "tree.h"
31 #include "tree-ssa.h"
32 #include "pointer-set.h"
33 #include "ggc.h"
34 #include "dumpfile.h"
35
36 static void flow_loops_cfg_dump (FILE *);
37 \f
38 /* Dump loop related CFG information. */
39
40 static void
41 flow_loops_cfg_dump (FILE *file)
42 {
43 basic_block bb;
44
45 if (!file)
46 return;
47
48 FOR_EACH_BB (bb)
49 {
50 edge succ;
51 edge_iterator ei;
52
53 fprintf (file, ";; %d succs { ", bb->index);
54 FOR_EACH_EDGE (succ, ei, bb->succs)
55 fprintf (file, "%d ", succ->dest->index);
56 fprintf (file, "}\n");
57 }
58 }
59
60 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
61
62 bool
63 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
64 {
65 unsigned odepth = loop_depth (outer);
66
67 return (loop_depth (loop) > odepth
68 && (*loop->superloops)[odepth] == outer);
69 }
70
71 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
72 loops within LOOP. */
73
74 struct loop *
75 superloop_at_depth (struct loop *loop, unsigned depth)
76 {
77 unsigned ldepth = loop_depth (loop);
78
79 gcc_assert (depth <= ldepth);
80
81 if (depth == ldepth)
82 return loop;
83
84 return (*loop->superloops)[depth];
85 }
86
87 /* Returns the list of the latch edges of LOOP. */
88
89 static vec<edge>
90 get_loop_latch_edges (const struct loop *loop)
91 {
92 edge_iterator ei;
93 edge e;
94 vec<edge> ret = vNULL;
95
96 FOR_EACH_EDGE (e, ei, loop->header->preds)
97 {
98 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
99 ret.safe_push (e);
100 }
101
102 return ret;
103 }
104
105 /* Dump the loop information specified by LOOP to the stream FILE
106 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
107
108 void
109 flow_loop_dump (const struct loop *loop, FILE *file,
110 void (*loop_dump_aux) (const struct loop *, FILE *, int),
111 int verbose)
112 {
113 basic_block *bbs;
114 unsigned i;
115 vec<edge> latches;
116 edge e;
117
118 if (! loop || ! loop->header)
119 return;
120
121 fprintf (file, ";;\n;; Loop %d\n", loop->num);
122
123 fprintf (file, ";; header %d, ", loop->header->index);
124 if (loop->latch)
125 fprintf (file, "latch %d\n", loop->latch->index);
126 else
127 {
128 fprintf (file, "multiple latches:");
129 latches = get_loop_latch_edges (loop);
130 FOR_EACH_VEC_ELT (latches, i, e)
131 fprintf (file, " %d", e->src->index);
132 latches.release ();
133 fprintf (file, "\n");
134 }
135
136 fprintf (file, ";; depth %d, outer %ld\n",
137 loop_depth (loop), (long) (loop_outer (loop)
138 ? loop_outer (loop)->num : -1));
139
140 fprintf (file, ";; nodes:");
141 bbs = get_loop_body (loop);
142 for (i = 0; i < loop->num_nodes; i++)
143 fprintf (file, " %d", bbs[i]->index);
144 free (bbs);
145 fprintf (file, "\n");
146
147 if (loop_dump_aux)
148 loop_dump_aux (loop, file, verbose);
149 }
150
151 /* Dump the loop information about loops to the stream FILE,
152 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
153
154 void
155 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
156 {
157 loop_iterator li;
158 struct loop *loop;
159
160 if (!current_loops || ! file)
161 return;
162
163 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
164
165 FOR_EACH_LOOP (li, loop, LI_INCLUDE_ROOT)
166 {
167 flow_loop_dump (loop, file, loop_dump_aux, verbose);
168 }
169
170 if (verbose)
171 flow_loops_cfg_dump (file);
172 }
173
174 /* Free data allocated for LOOP. */
175
176 void
177 flow_loop_free (struct loop *loop)
178 {
179 struct loop_exit *exit, *next;
180
181 vec_free (loop->superloops);
182
183 /* Break the list of the loop exit records. They will be freed when the
184 corresponding edge is rescanned or removed, and this avoids
185 accessing the (already released) head of the list stored in the
186 loop structure. */
187 for (exit = loop->exits->next; exit != loop->exits; exit = next)
188 {
189 next = exit->next;
190 exit->next = exit;
191 exit->prev = exit;
192 }
193
194 ggc_free (loop->exits);
195 ggc_free (loop);
196 }
197
198 /* Free all the memory allocated for LOOPS. */
199
200 void
201 flow_loops_free (struct loops *loops)
202 {
203 if (loops->larray)
204 {
205 unsigned i;
206 loop_p loop;
207
208 /* Free the loop descriptors. */
209 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
210 {
211 if (!loop)
212 continue;
213
214 flow_loop_free (loop);
215 }
216
217 vec_free (loops->larray);
218 }
219 }
220
221 /* Find the nodes contained within the LOOP with header HEADER.
222 Return the number of nodes within the loop. */
223
224 int
225 flow_loop_nodes_find (basic_block header, struct loop *loop)
226 {
227 vec<basic_block> stack = vNULL;
228 int num_nodes = 1;
229 edge latch;
230 edge_iterator latch_ei;
231
232 header->loop_father = loop;
233
234 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
235 {
236 if (latch->src->loop_father == loop
237 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
238 continue;
239
240 num_nodes++;
241 stack.safe_push (latch->src);
242 latch->src->loop_father = loop;
243
244 while (!stack.is_empty ())
245 {
246 basic_block node;
247 edge e;
248 edge_iterator ei;
249
250 node = stack.pop ();
251
252 FOR_EACH_EDGE (e, ei, node->preds)
253 {
254 basic_block ancestor = e->src;
255
256 if (ancestor->loop_father != loop)
257 {
258 ancestor->loop_father = loop;
259 num_nodes++;
260 stack.safe_push (ancestor);
261 }
262 }
263 }
264 }
265 stack.release ();
266
267 return num_nodes;
268 }
269
270 /* Records the vector of superloops of the loop LOOP, whose immediate
271 superloop is FATHER. */
272
273 static void
274 establish_preds (struct loop *loop, struct loop *father)
275 {
276 loop_p ploop;
277 unsigned depth = loop_depth (father) + 1;
278 unsigned i;
279
280 loop->superloops = 0;
281 vec_alloc (loop->superloops, depth);
282 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
283 loop->superloops->quick_push (ploop);
284 loop->superloops->quick_push (father);
285
286 for (ploop = loop->inner; ploop; ploop = ploop->next)
287 establish_preds (ploop, loop);
288 }
289
290 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
291 added loop. If LOOP has some children, take care of that their
292 pred field will be initialized correctly. */
293
294 void
295 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
296 {
297 loop->next = father->inner;
298 father->inner = loop;
299
300 establish_preds (loop, father);
301 }
302
303 /* Remove LOOP from the loop hierarchy tree. */
304
305 void
306 flow_loop_tree_node_remove (struct loop *loop)
307 {
308 struct loop *prev, *father;
309
310 father = loop_outer (loop);
311
312 /* Remove loop from the list of sons. */
313 if (father->inner == loop)
314 father->inner = loop->next;
315 else
316 {
317 for (prev = father->inner; prev->next != loop; prev = prev->next)
318 continue;
319 prev->next = loop->next;
320 }
321
322 loop->superloops = NULL;
323 }
324
325 /* Allocates and returns new loop structure. */
326
327 struct loop *
328 alloc_loop (void)
329 {
330 struct loop *loop = ggc_alloc_cleared_loop ();
331
332 loop->exits = ggc_alloc_cleared_loop_exit ();
333 loop->exits->next = loop->exits->prev = loop->exits;
334 loop->can_be_parallel = false;
335 loop->nb_iterations_upper_bound = 0;
336 loop->nb_iterations_estimate = 0;
337 return loop;
338 }
339
340 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
341 (including the root of the loop tree). */
342
343 void
344 init_loops_structure (struct function *fn,
345 struct loops *loops, unsigned num_loops)
346 {
347 struct loop *root;
348
349 memset (loops, 0, sizeof *loops);
350 vec_alloc (loops->larray, num_loops);
351
352 /* Dummy loop containing whole function. */
353 root = alloc_loop ();
354 root->num_nodes = n_basic_blocks_for_function (fn);
355 root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
356 root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
357 ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
358 EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
359
360 loops->larray->quick_push (root);
361 loops->tree_root = root;
362 }
363
364 /* Returns whether HEADER is a loop header. */
365
366 bool
367 bb_loop_header_p (basic_block header)
368 {
369 edge_iterator ei;
370 edge e;
371
372 /* If we have an abnormal predecessor, do not consider the
373 loop (not worth the problems). */
374 if (bb_has_abnormal_pred (header))
375 return false;
376
377 /* Look for back edges where a predecessor is dominated
378 by this block. A natural loop has a single entry
379 node (header) that dominates all the nodes in the
380 loop. It also has single back edge to the header
381 from a latch node. */
382 FOR_EACH_EDGE (e, ei, header->preds)
383 {
384 basic_block latch = e->src;
385 if (latch != ENTRY_BLOCK_PTR
386 && dominated_by_p (CDI_DOMINATORS, latch, header))
387 return true;
388 }
389
390 return false;
391 }
392
393 /* Find all the natural loops in the function and save in LOOPS structure and
394 recalculate loop_father information in basic block structures.
395 If LOOPS is non-NULL then the loop structures for already recorded loops
396 will be re-used and their number will not change. We assume that no
397 stale loops exist in LOOPS.
398 When LOOPS is NULL it is allocated and re-built from scratch.
399 Return the built LOOPS structure. */
400
401 struct loops *
402 flow_loops_find (struct loops *loops)
403 {
404 bool from_scratch = (loops == NULL);
405 int *rc_order;
406 int b;
407 unsigned i;
408 vec<loop_p> larray;
409
410 /* Ensure that the dominators are computed. */
411 calculate_dominance_info (CDI_DOMINATORS);
412
413 if (!loops)
414 {
415 loops = ggc_alloc_cleared_loops ();
416 init_loops_structure (cfun, loops, 1);
417 }
418
419 /* Ensure that loop exits were released. */
420 gcc_assert (loops->exits == NULL);
421
422 /* Taking care of this degenerate case makes the rest of
423 this code simpler. */
424 if (n_basic_blocks == NUM_FIXED_BLOCKS)
425 return loops;
426
427 /* The root loop node contains all basic-blocks. */
428 loops->tree_root->num_nodes = n_basic_blocks;
429
430 /* Compute depth first search order of the CFG so that outer
431 natural loops will be found before inner natural loops. */
432 rc_order = XNEWVEC (int, n_basic_blocks);
433 pre_and_rev_post_order_compute (NULL, rc_order, false);
434
435 /* Gather all loop headers in reverse completion order and allocate
436 loop structures for loops that are not already present. */
437 larray.create (loops->larray->length ());
438 for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
439 {
440 basic_block header = BASIC_BLOCK (rc_order[b]);
441 if (bb_loop_header_p (header))
442 {
443 struct loop *loop;
444
445 /* The current active loop tree has valid loop-fathers for
446 header blocks. */
447 if (!from_scratch
448 && header->loop_father->header == header)
449 {
450 loop = header->loop_father;
451 /* If we found an existing loop remove it from the
452 loop tree. It is going to be inserted again
453 below. */
454 flow_loop_tree_node_remove (loop);
455 }
456 else
457 {
458 /* Otherwise allocate a new loop structure for the loop. */
459 loop = alloc_loop ();
460 /* ??? We could re-use unused loop slots here. */
461 loop->num = loops->larray->length ();
462 vec_safe_push (loops->larray, loop);
463 loop->header = header;
464
465 if (!from_scratch
466 && dump_file && (dump_flags & TDF_DETAILS))
467 fprintf (dump_file, "flow_loops_find: discovered new "
468 "loop %d with header %d\n",
469 loop->num, header->index);
470 }
471 /* Reset latch, we recompute it below. */
472 loop->latch = NULL;
473 larray.safe_push (loop);
474 }
475
476 /* Make blocks part of the loop root node at start. */
477 header->loop_father = loops->tree_root;
478 }
479
480 free (rc_order);
481
482 /* Now iterate over the loops found, insert them into the loop tree
483 and assign basic-block ownership. */
484 for (i = 0; i < larray.length (); ++i)
485 {
486 struct loop *loop = larray[i];
487 basic_block header = loop->header;
488 edge_iterator ei;
489 edge e;
490
491 flow_loop_tree_node_add (header->loop_father, loop);
492 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
493
494 /* Look for the latch for this header block, if it has just a
495 single one. */
496 FOR_EACH_EDGE (e, ei, header->preds)
497 {
498 basic_block latch = e->src;
499
500 if (flow_bb_inside_loop_p (loop, latch))
501 {
502 if (loop->latch != NULL)
503 {
504 /* More than one latch edge. */
505 loop->latch = NULL;
506 break;
507 }
508 loop->latch = latch;
509 }
510 }
511 }
512
513 larray.release ();
514
515 return loops;
516 }
517
518 /* Ratio of frequencies of edges so that one of more latch edges is
519 considered to belong to inner loop with same header. */
520 #define HEAVY_EDGE_RATIO 8
521
522 /* Minimum number of samples for that we apply
523 find_subloop_latch_edge_by_profile heuristics. */
524 #define HEAVY_EDGE_MIN_SAMPLES 10
525
526 /* If the profile info is available, finds an edge in LATCHES that much more
527 frequent than the remaining edges. Returns such an edge, or NULL if we do
528 not find one.
529
530 We do not use guessed profile here, only the measured one. The guessed
531 profile is usually too flat and unreliable for this (and it is mostly based
532 on the loop structure of the program, so it does not make much sense to
533 derive the loop structure from it). */
534
535 static edge
536 find_subloop_latch_edge_by_profile (vec<edge> latches)
537 {
538 unsigned i;
539 edge e, me = NULL;
540 gcov_type mcount = 0, tcount = 0;
541
542 FOR_EACH_VEC_ELT (latches, i, e)
543 {
544 if (e->count > mcount)
545 {
546 me = e;
547 mcount = e->count;
548 }
549 tcount += e->count;
550 }
551
552 if (tcount < HEAVY_EDGE_MIN_SAMPLES
553 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
554 return NULL;
555
556 if (dump_file)
557 fprintf (dump_file,
558 "Found latch edge %d -> %d using profile information.\n",
559 me->src->index, me->dest->index);
560 return me;
561 }
562
563 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
564 on the structure of induction variables. Returns this edge, or NULL if we
565 do not find any.
566
567 We are quite conservative, and look just for an obvious simple innermost
568 loop (which is the case where we would lose the most performance by not
569 disambiguating the loop). More precisely, we look for the following
570 situation: The source of the chosen latch edge dominates sources of all
571 the other latch edges. Additionally, the header does not contain a phi node
572 such that the argument from the chosen edge is equal to the argument from
573 another edge. */
574
575 static edge
576 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
577 {
578 edge e, latch = latches[0];
579 unsigned i;
580 gimple phi;
581 gimple_stmt_iterator psi;
582 tree lop;
583 basic_block bb;
584
585 /* Find the candidate for the latch edge. */
586 for (i = 1; latches.iterate (i, &e); i++)
587 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
588 latch = e;
589
590 /* Verify that it dominates all the latch edges. */
591 FOR_EACH_VEC_ELT (latches, i, e)
592 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
593 return NULL;
594
595 /* Check for a phi node that would deny that this is a latch edge of
596 a subloop. */
597 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
598 {
599 phi = gsi_stmt (psi);
600 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
601
602 /* Ignore the values that are not changed inside the subloop. */
603 if (TREE_CODE (lop) != SSA_NAME
604 || SSA_NAME_DEF_STMT (lop) == phi)
605 continue;
606 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
607 if (!bb || !flow_bb_inside_loop_p (loop, bb))
608 continue;
609
610 FOR_EACH_VEC_ELT (latches, i, e)
611 if (e != latch
612 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
613 return NULL;
614 }
615
616 if (dump_file)
617 fprintf (dump_file,
618 "Found latch edge %d -> %d using iv structure.\n",
619 latch->src->index, latch->dest->index);
620 return latch;
621 }
622
623 /* If we can determine that one of the several latch edges of LOOP behaves
624 as a latch edge of a separate subloop, returns this edge. Otherwise
625 returns NULL. */
626
627 static edge
628 find_subloop_latch_edge (struct loop *loop)
629 {
630 vec<edge> latches = get_loop_latch_edges (loop);
631 edge latch = NULL;
632
633 if (latches.length () > 1)
634 {
635 latch = find_subloop_latch_edge_by_profile (latches);
636
637 if (!latch
638 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
639 should use cfghook for this, but it is hard to imagine it would
640 be useful elsewhere. */
641 && current_ir_type () == IR_GIMPLE)
642 latch = find_subloop_latch_edge_by_ivs (loop, latches);
643 }
644
645 latches.release ();
646 return latch;
647 }
648
649 /* Callback for make_forwarder_block. Returns true if the edge E is marked
650 in the set MFB_REIS_SET. */
651
652 static struct pointer_set_t *mfb_reis_set;
653 static bool
654 mfb_redirect_edges_in_set (edge e)
655 {
656 return pointer_set_contains (mfb_reis_set, e);
657 }
658
659 /* Creates a subloop of LOOP with latch edge LATCH. */
660
661 static void
662 form_subloop (struct loop *loop, edge latch)
663 {
664 edge_iterator ei;
665 edge e, new_entry;
666 struct loop *new_loop;
667
668 mfb_reis_set = pointer_set_create ();
669 FOR_EACH_EDGE (e, ei, loop->header->preds)
670 {
671 if (e != latch)
672 pointer_set_insert (mfb_reis_set, e);
673 }
674 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
675 NULL);
676 pointer_set_destroy (mfb_reis_set);
677
678 loop->header = new_entry->src;
679
680 /* Find the blocks and subloops that belong to the new loop, and add it to
681 the appropriate place in the loop tree. */
682 new_loop = alloc_loop ();
683 new_loop->header = new_entry->dest;
684 new_loop->latch = latch->src;
685 add_loop (new_loop, loop);
686 }
687
688 /* Make all the latch edges of LOOP to go to a single forwarder block --
689 a new latch of LOOP. */
690
691 static void
692 merge_latch_edges (struct loop *loop)
693 {
694 vec<edge> latches = get_loop_latch_edges (loop);
695 edge latch, e;
696 unsigned i;
697
698 gcc_assert (latches.length () > 0);
699
700 if (latches.length () == 1)
701 loop->latch = latches[0]->src;
702 else
703 {
704 if (dump_file)
705 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
706
707 mfb_reis_set = pointer_set_create ();
708 FOR_EACH_VEC_ELT (latches, i, e)
709 pointer_set_insert (mfb_reis_set, e);
710 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
711 NULL);
712 pointer_set_destroy (mfb_reis_set);
713
714 loop->header = latch->dest;
715 loop->latch = latch->src;
716 }
717
718 latches.release ();
719 }
720
721 /* LOOP may have several latch edges. Transform it into (possibly several)
722 loops with single latch edge. */
723
724 static void
725 disambiguate_multiple_latches (struct loop *loop)
726 {
727 edge e;
728
729 /* We eliminate the multiple latches by splitting the header to the forwarder
730 block F and the rest R, and redirecting the edges. There are two cases:
731
732 1) If there is a latch edge E that corresponds to a subloop (we guess
733 that based on profile -- if it is taken much more often than the
734 remaining edges; and on trees, using the information about induction
735 variables of the loops), we redirect E to R, all the remaining edges to
736 F, then rescan the loops and try again for the outer loop.
737 2) If there is no such edge, we redirect all latch edges to F, and the
738 entry edges to R, thus making F the single latch of the loop. */
739
740 if (dump_file)
741 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
742 loop->num);
743
744 /* During latch merging, we may need to redirect the entry edges to a new
745 block. This would cause problems if the entry edge was the one from the
746 entry block. To avoid having to handle this case specially, split
747 such entry edge. */
748 e = find_edge (ENTRY_BLOCK_PTR, loop->header);
749 if (e)
750 split_edge (e);
751
752 while (1)
753 {
754 e = find_subloop_latch_edge (loop);
755 if (!e)
756 break;
757
758 form_subloop (loop, e);
759 }
760
761 merge_latch_edges (loop);
762 }
763
764 /* Split loops with multiple latch edges. */
765
766 void
767 disambiguate_loops_with_multiple_latches (void)
768 {
769 loop_iterator li;
770 struct loop *loop;
771
772 FOR_EACH_LOOP (li, loop, 0)
773 {
774 if (!loop->latch)
775 disambiguate_multiple_latches (loop);
776 }
777 }
778
779 /* Return nonzero if basic block BB belongs to LOOP. */
780 bool
781 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
782 {
783 struct loop *source_loop;
784
785 if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
786 return 0;
787
788 source_loop = bb->loop_father;
789 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
790 }
791
792 /* Enumeration predicate for get_loop_body_with_size. */
793 static bool
794 glb_enum_p (const_basic_block bb, const void *glb_loop)
795 {
796 const struct loop *const loop = (const struct loop *) glb_loop;
797 return (bb != loop->header
798 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
799 }
800
801 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
802 order against direction of edges from latch. Specially, if
803 header != latch, latch is the 1-st block. LOOP cannot be the fake
804 loop tree root, and its size must be at most MAX_SIZE. The blocks
805 in the LOOP body are stored to BODY, and the size of the LOOP is
806 returned. */
807
808 unsigned
809 get_loop_body_with_size (const struct loop *loop, basic_block *body,
810 unsigned max_size)
811 {
812 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
813 body, max_size, loop);
814 }
815
816 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
817 order against direction of edges from latch. Specially, if
818 header != latch, latch is the 1-st block. */
819
820 basic_block *
821 get_loop_body (const struct loop *loop)
822 {
823 basic_block *body, bb;
824 unsigned tv = 0;
825
826 gcc_assert (loop->num_nodes);
827
828 body = XNEWVEC (basic_block, loop->num_nodes);
829
830 if (loop->latch == EXIT_BLOCK_PTR)
831 {
832 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
833 special-case the fake loop that contains the whole function. */
834 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
835 body[tv++] = loop->header;
836 body[tv++] = EXIT_BLOCK_PTR;
837 FOR_EACH_BB (bb)
838 body[tv++] = bb;
839 }
840 else
841 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
842
843 gcc_assert (tv == loop->num_nodes);
844 return body;
845 }
846
847 /* Fills dominance descendants inside LOOP of the basic block BB into
848 array TOVISIT from index *TV. */
849
850 static void
851 fill_sons_in_loop (const struct loop *loop, basic_block bb,
852 basic_block *tovisit, int *tv)
853 {
854 basic_block son, postpone = NULL;
855
856 tovisit[(*tv)++] = bb;
857 for (son = first_dom_son (CDI_DOMINATORS, bb);
858 son;
859 son = next_dom_son (CDI_DOMINATORS, son))
860 {
861 if (!flow_bb_inside_loop_p (loop, son))
862 continue;
863
864 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
865 {
866 postpone = son;
867 continue;
868 }
869 fill_sons_in_loop (loop, son, tovisit, tv);
870 }
871
872 if (postpone)
873 fill_sons_in_loop (loop, postpone, tovisit, tv);
874 }
875
876 /* Gets body of a LOOP (that must be different from the outermost loop)
877 sorted by dominance relation. Additionally, if a basic block s dominates
878 the latch, then only blocks dominated by s are be after it. */
879
880 basic_block *
881 get_loop_body_in_dom_order (const struct loop *loop)
882 {
883 basic_block *tovisit;
884 int tv;
885
886 gcc_assert (loop->num_nodes);
887
888 tovisit = XNEWVEC (basic_block, loop->num_nodes);
889
890 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
891
892 tv = 0;
893 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
894
895 gcc_assert (tv == (int) loop->num_nodes);
896
897 return tovisit;
898 }
899
900 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
901
902 basic_block *
903 get_loop_body_in_custom_order (const struct loop *loop,
904 int (*bb_comparator) (const void *, const void *))
905 {
906 basic_block *bbs = get_loop_body (loop);
907
908 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
909
910 return bbs;
911 }
912
913 /* Get body of a LOOP in breadth first sort order. */
914
915 basic_block *
916 get_loop_body_in_bfs_order (const struct loop *loop)
917 {
918 basic_block *blocks;
919 basic_block bb;
920 bitmap visited;
921 unsigned int i = 0;
922 unsigned int vc = 1;
923
924 gcc_assert (loop->num_nodes);
925 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
926
927 blocks = XNEWVEC (basic_block, loop->num_nodes);
928 visited = BITMAP_ALLOC (NULL);
929
930 bb = loop->header;
931 while (i < loop->num_nodes)
932 {
933 edge e;
934 edge_iterator ei;
935
936 if (bitmap_set_bit (visited, bb->index))
937 /* This basic block is now visited */
938 blocks[i++] = bb;
939
940 FOR_EACH_EDGE (e, ei, bb->succs)
941 {
942 if (flow_bb_inside_loop_p (loop, e->dest))
943 {
944 if (bitmap_set_bit (visited, e->dest->index))
945 blocks[i++] = e->dest;
946 }
947 }
948
949 gcc_assert (i >= vc);
950
951 bb = blocks[vc++];
952 }
953
954 BITMAP_FREE (visited);
955 return blocks;
956 }
957
958 /* Hash function for struct loop_exit. */
959
960 static hashval_t
961 loop_exit_hash (const void *ex)
962 {
963 const struct loop_exit *const exit = (const struct loop_exit *) ex;
964
965 return htab_hash_pointer (exit->e);
966 }
967
968 /* Equality function for struct loop_exit. Compares with edge. */
969
970 static int
971 loop_exit_eq (const void *ex, const void *e)
972 {
973 const struct loop_exit *const exit = (const struct loop_exit *) ex;
974
975 return exit->e == e;
976 }
977
978 /* Frees the list of loop exit descriptions EX. */
979
980 static void
981 loop_exit_free (void *ex)
982 {
983 struct loop_exit *exit = (struct loop_exit *) ex, *next;
984
985 for (; exit; exit = next)
986 {
987 next = exit->next_e;
988
989 exit->next->prev = exit->prev;
990 exit->prev->next = exit->next;
991
992 ggc_free (exit);
993 }
994 }
995
996 /* Returns the list of records for E as an exit of a loop. */
997
998 static struct loop_exit *
999 get_exit_descriptions (edge e)
1000 {
1001 return (struct loop_exit *) htab_find_with_hash (current_loops->exits, e,
1002 htab_hash_pointer (e));
1003 }
1004
1005 /* Updates the lists of loop exits in that E appears.
1006 If REMOVED is true, E is being removed, and we
1007 just remove it from the lists of exits.
1008 If NEW_EDGE is true and E is not a loop exit, we
1009 do not try to remove it from loop exit lists. */
1010
1011 void
1012 rescan_loop_exit (edge e, bool new_edge, bool removed)
1013 {
1014 void **slot;
1015 struct loop_exit *exits = NULL, *exit;
1016 struct loop *aloop, *cloop;
1017
1018 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1019 return;
1020
1021 if (!removed
1022 && e->src->loop_father != NULL
1023 && e->dest->loop_father != NULL
1024 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1025 {
1026 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1027 for (aloop = e->src->loop_father;
1028 aloop != cloop;
1029 aloop = loop_outer (aloop))
1030 {
1031 exit = ggc_alloc_loop_exit ();
1032 exit->e = e;
1033
1034 exit->next = aloop->exits->next;
1035 exit->prev = aloop->exits;
1036 exit->next->prev = exit;
1037 exit->prev->next = exit;
1038
1039 exit->next_e = exits;
1040 exits = exit;
1041 }
1042 }
1043
1044 if (!exits && new_edge)
1045 return;
1046
1047 slot = htab_find_slot_with_hash (current_loops->exits, e,
1048 htab_hash_pointer (e),
1049 exits ? INSERT : NO_INSERT);
1050 if (!slot)
1051 return;
1052
1053 if (exits)
1054 {
1055 if (*slot)
1056 loop_exit_free (*slot);
1057 *slot = exits;
1058 }
1059 else
1060 htab_clear_slot (current_loops->exits, slot);
1061 }
1062
1063 /* For each loop, record list of exit edges, and start maintaining these
1064 lists. */
1065
1066 void
1067 record_loop_exits (void)
1068 {
1069 basic_block bb;
1070 edge_iterator ei;
1071 edge e;
1072
1073 if (!current_loops)
1074 return;
1075
1076 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1077 return;
1078 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1079
1080 gcc_assert (current_loops->exits == NULL);
1081 current_loops->exits = htab_create_ggc (2 * number_of_loops (cfun),
1082 loop_exit_hash, loop_exit_eq,
1083 loop_exit_free);
1084
1085 FOR_EACH_BB (bb)
1086 {
1087 FOR_EACH_EDGE (e, ei, bb->succs)
1088 {
1089 rescan_loop_exit (e, true, false);
1090 }
1091 }
1092 }
1093
1094 /* Dumps information about the exit in *SLOT to FILE.
1095 Callback for htab_traverse. */
1096
1097 static int
1098 dump_recorded_exit (void **slot, void *file)
1099 {
1100 struct loop_exit *exit = (struct loop_exit *) *slot;
1101 unsigned n = 0;
1102 edge e = exit->e;
1103
1104 for (; exit != NULL; exit = exit->next_e)
1105 n++;
1106
1107 fprintf ((FILE*) file, "Edge %d->%d exits %u loops\n",
1108 e->src->index, e->dest->index, n);
1109
1110 return 1;
1111 }
1112
1113 /* Dumps the recorded exits of loops to FILE. */
1114
1115 extern void dump_recorded_exits (FILE *);
1116 void
1117 dump_recorded_exits (FILE *file)
1118 {
1119 if (!current_loops->exits)
1120 return;
1121 htab_traverse (current_loops->exits, dump_recorded_exit, file);
1122 }
1123
1124 /* Releases lists of loop exits. */
1125
1126 void
1127 release_recorded_exits (void)
1128 {
1129 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1130 htab_delete (current_loops->exits);
1131 current_loops->exits = NULL;
1132 loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1133 }
1134
1135 /* Returns the list of the exit edges of a LOOP. */
1136
1137 vec<edge>
1138 get_loop_exit_edges (const struct loop *loop)
1139 {
1140 vec<edge> edges = vNULL;
1141 edge e;
1142 unsigned i;
1143 basic_block *body;
1144 edge_iterator ei;
1145 struct loop_exit *exit;
1146
1147 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
1148
1149 /* If we maintain the lists of exits, use them. Otherwise we must
1150 scan the body of the loop. */
1151 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1152 {
1153 for (exit = loop->exits->next; exit->e; exit = exit->next)
1154 edges.safe_push (exit->e);
1155 }
1156 else
1157 {
1158 body = get_loop_body (loop);
1159 for (i = 0; i < loop->num_nodes; i++)
1160 FOR_EACH_EDGE (e, ei, body[i]->succs)
1161 {
1162 if (!flow_bb_inside_loop_p (loop, e->dest))
1163 edges.safe_push (e);
1164 }
1165 free (body);
1166 }
1167
1168 return edges;
1169 }
1170
1171 /* Counts the number of conditional branches inside LOOP. */
1172
1173 unsigned
1174 num_loop_branches (const struct loop *loop)
1175 {
1176 unsigned i, n;
1177 basic_block * body;
1178
1179 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
1180
1181 body = get_loop_body (loop);
1182 n = 0;
1183 for (i = 0; i < loop->num_nodes; i++)
1184 if (EDGE_COUNT (body[i]->succs) >= 2)
1185 n++;
1186 free (body);
1187
1188 return n;
1189 }
1190
1191 /* Adds basic block BB to LOOP. */
1192 void
1193 add_bb_to_loop (basic_block bb, struct loop *loop)
1194 {
1195 unsigned i;
1196 loop_p ploop;
1197 edge_iterator ei;
1198 edge e;
1199
1200 gcc_assert (bb->loop_father == NULL);
1201 bb->loop_father = loop;
1202 loop->num_nodes++;
1203 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1204 ploop->num_nodes++;
1205
1206 FOR_EACH_EDGE (e, ei, bb->succs)
1207 {
1208 rescan_loop_exit (e, true, false);
1209 }
1210 FOR_EACH_EDGE (e, ei, bb->preds)
1211 {
1212 rescan_loop_exit (e, true, false);
1213 }
1214 }
1215
1216 /* Remove basic block BB from loops. */
1217 void
1218 remove_bb_from_loops (basic_block bb)
1219 {
1220 unsigned i;
1221 struct loop *loop = bb->loop_father;
1222 loop_p ploop;
1223 edge_iterator ei;
1224 edge e;
1225
1226 gcc_assert (loop != NULL);
1227 loop->num_nodes--;
1228 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1229 ploop->num_nodes--;
1230 bb->loop_father = NULL;
1231
1232 FOR_EACH_EDGE (e, ei, bb->succs)
1233 {
1234 rescan_loop_exit (e, false, true);
1235 }
1236 FOR_EACH_EDGE (e, ei, bb->preds)
1237 {
1238 rescan_loop_exit (e, false, true);
1239 }
1240 }
1241
1242 /* Finds nearest common ancestor in loop tree for given loops. */
1243 struct loop *
1244 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1245 {
1246 unsigned sdepth, ddepth;
1247
1248 if (!loop_s) return loop_d;
1249 if (!loop_d) return loop_s;
1250
1251 sdepth = loop_depth (loop_s);
1252 ddepth = loop_depth (loop_d);
1253
1254 if (sdepth < ddepth)
1255 loop_d = (*loop_d->superloops)[sdepth];
1256 else if (sdepth > ddepth)
1257 loop_s = (*loop_s->superloops)[ddepth];
1258
1259 while (loop_s != loop_d)
1260 {
1261 loop_s = loop_outer (loop_s);
1262 loop_d = loop_outer (loop_d);
1263 }
1264 return loop_s;
1265 }
1266
1267 /* Removes LOOP from structures and frees its data. */
1268
1269 void
1270 delete_loop (struct loop *loop)
1271 {
1272 /* Remove the loop from structure. */
1273 flow_loop_tree_node_remove (loop);
1274
1275 /* Remove loop from loops array. */
1276 (*current_loops->larray)[loop->num] = NULL;
1277
1278 /* Free loop data. */
1279 flow_loop_free (loop);
1280 }
1281
1282 /* Cancels the LOOP; it must be innermost one. */
1283
1284 static void
1285 cancel_loop (struct loop *loop)
1286 {
1287 basic_block *bbs;
1288 unsigned i;
1289 struct loop *outer = loop_outer (loop);
1290
1291 gcc_assert (!loop->inner);
1292
1293 /* Move blocks up one level (they should be removed as soon as possible). */
1294 bbs = get_loop_body (loop);
1295 for (i = 0; i < loop->num_nodes; i++)
1296 bbs[i]->loop_father = outer;
1297
1298 free (bbs);
1299 delete_loop (loop);
1300 }
1301
1302 /* Cancels LOOP and all its subloops. */
1303 void
1304 cancel_loop_tree (struct loop *loop)
1305 {
1306 while (loop->inner)
1307 cancel_loop_tree (loop->inner);
1308 cancel_loop (loop);
1309 }
1310
1311 /* Checks that information about loops is correct
1312 -- sizes of loops are all right
1313 -- results of get_loop_body really belong to the loop
1314 -- loop header have just single entry edge and single latch edge
1315 -- loop latches have only single successor that is header of their loop
1316 -- irreducible loops are correctly marked
1317 -- the cached loop depth and loop father of each bb is correct
1318 */
1319 DEBUG_FUNCTION void
1320 verify_loop_structure (void)
1321 {
1322 unsigned *sizes, i, j;
1323 sbitmap irreds;
1324 basic_block bb, *bbs;
1325 struct loop *loop;
1326 int err = 0;
1327 edge e;
1328 unsigned num = number_of_loops (cfun);
1329 loop_iterator li;
1330 struct loop_exit *exit, *mexit;
1331 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1332 sbitmap visited;
1333
1334 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1335 {
1336 error ("loop verification on loop tree that needs fixup");
1337 err = 1;
1338 }
1339
1340 /* We need up-to-date dominators, compute or verify them. */
1341 if (!dom_available)
1342 calculate_dominance_info (CDI_DOMINATORS);
1343 else
1344 verify_dominators (CDI_DOMINATORS);
1345
1346 /* Check the headers. */
1347 FOR_EACH_BB (bb)
1348 if (bb_loop_header_p (bb))
1349 {
1350 if (bb->loop_father->header == NULL)
1351 {
1352 error ("loop with header %d marked for removal", bb->index);
1353 err = 1;
1354 }
1355 else if (bb->loop_father->header != bb)
1356 {
1357 error ("loop with header %d not in loop tree", bb->index);
1358 err = 1;
1359 }
1360 }
1361 else if (bb->loop_father->header == bb)
1362 {
1363 error ("non-loop with header %d not marked for removal", bb->index);
1364 err = 1;
1365 }
1366
1367 /* Check the recorded loop father and sizes of loops. */
1368 visited = sbitmap_alloc (last_basic_block);
1369 bitmap_clear (visited);
1370 bbs = XNEWVEC (basic_block, n_basic_blocks);
1371 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1372 {
1373 unsigned n;
1374
1375 if (loop->header == NULL)
1376 {
1377 error ("removed loop %d in loop tree", loop->num);
1378 err = 1;
1379 continue;
1380 }
1381
1382 n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
1383 if (loop->num_nodes != n)
1384 {
1385 error ("size of loop %d should be %d, not %d",
1386 loop->num, n, loop->num_nodes);
1387 err = 1;
1388 }
1389
1390 for (j = 0; j < n; j++)
1391 {
1392 bb = bbs[j];
1393
1394 if (!flow_bb_inside_loop_p (loop, bb))
1395 {
1396 error ("bb %d does not belong to loop %d",
1397 bb->index, loop->num);
1398 err = 1;
1399 }
1400
1401 /* Ignore this block if it is in an inner loop. */
1402 if (bitmap_bit_p (visited, bb->index))
1403 continue;
1404 bitmap_set_bit (visited, bb->index);
1405
1406 if (bb->loop_father != loop)
1407 {
1408 error ("bb %d has father loop %d, should be loop %d",
1409 bb->index, bb->loop_father->num, loop->num);
1410 err = 1;
1411 }
1412 }
1413 }
1414 free (bbs);
1415 sbitmap_free (visited);
1416
1417 /* Check headers and latches. */
1418 FOR_EACH_LOOP (li, loop, 0)
1419 {
1420 i = loop->num;
1421 if (loop->header == NULL)
1422 continue;
1423 if (!bb_loop_header_p (loop->header))
1424 {
1425 error ("loop %d%'s header is not a loop header", i);
1426 err = 1;
1427 }
1428 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1429 && EDGE_COUNT (loop->header->preds) != 2)
1430 {
1431 error ("loop %d%'s header does not have exactly 2 entries", i);
1432 err = 1;
1433 }
1434 if (loop->latch)
1435 {
1436 if (!find_edge (loop->latch, loop->header))
1437 {
1438 error ("loop %d%'s latch does not have an edge to its header", i);
1439 err = 1;
1440 }
1441 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1442 {
1443 error ("loop %d%'s latch is not dominated by its header", i);
1444 err = 1;
1445 }
1446 }
1447 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1448 {
1449 if (!single_succ_p (loop->latch))
1450 {
1451 error ("loop %d%'s latch does not have exactly 1 successor", i);
1452 err = 1;
1453 }
1454 if (single_succ (loop->latch) != loop->header)
1455 {
1456 error ("loop %d%'s latch does not have header as successor", i);
1457 err = 1;
1458 }
1459 if (loop->latch->loop_father != loop)
1460 {
1461 error ("loop %d%'s latch does not belong directly to it", i);
1462 err = 1;
1463 }
1464 }
1465 if (loop->header->loop_father != loop)
1466 {
1467 error ("loop %d%'s header does not belong directly to it", i);
1468 err = 1;
1469 }
1470 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1471 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1472 {
1473 error ("loop %d%'s latch is marked as part of irreducible region", i);
1474 err = 1;
1475 }
1476 }
1477
1478 /* Check irreducible loops. */
1479 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1480 {
1481 /* Record old info. */
1482 irreds = sbitmap_alloc (last_basic_block);
1483 FOR_EACH_BB (bb)
1484 {
1485 edge_iterator ei;
1486 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1487 bitmap_set_bit (irreds, bb->index);
1488 else
1489 bitmap_clear_bit (irreds, bb->index);
1490 FOR_EACH_EDGE (e, ei, bb->succs)
1491 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1492 e->flags |= EDGE_ALL_FLAGS + 1;
1493 }
1494
1495 /* Recount it. */
1496 mark_irreducible_loops ();
1497
1498 /* Compare. */
1499 FOR_EACH_BB (bb)
1500 {
1501 edge_iterator ei;
1502
1503 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1504 && !bitmap_bit_p (irreds, bb->index))
1505 {
1506 error ("basic block %d should be marked irreducible", bb->index);
1507 err = 1;
1508 }
1509 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1510 && bitmap_bit_p (irreds, bb->index))
1511 {
1512 error ("basic block %d should not be marked irreducible", bb->index);
1513 err = 1;
1514 }
1515 FOR_EACH_EDGE (e, ei, bb->succs)
1516 {
1517 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1518 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1519 {
1520 error ("edge from %d to %d should be marked irreducible",
1521 e->src->index, e->dest->index);
1522 err = 1;
1523 }
1524 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1525 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1526 {
1527 error ("edge from %d to %d should not be marked irreducible",
1528 e->src->index, e->dest->index);
1529 err = 1;
1530 }
1531 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1532 }
1533 }
1534 free (irreds);
1535 }
1536
1537 /* Check the recorded loop exits. */
1538 FOR_EACH_LOOP (li, loop, 0)
1539 {
1540 if (!loop->exits || loop->exits->e != NULL)
1541 {
1542 error ("corrupted head of the exits list of loop %d",
1543 loop->num);
1544 err = 1;
1545 }
1546 else
1547 {
1548 /* Check that the list forms a cycle, and all elements except
1549 for the head are nonnull. */
1550 for (mexit = loop->exits, exit = mexit->next, i = 0;
1551 exit->e && exit != mexit;
1552 exit = exit->next)
1553 {
1554 if (i++ & 1)
1555 mexit = mexit->next;
1556 }
1557
1558 if (exit != loop->exits)
1559 {
1560 error ("corrupted exits list of loop %d", loop->num);
1561 err = 1;
1562 }
1563 }
1564
1565 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1566 {
1567 if (loop->exits->next != loop->exits)
1568 {
1569 error ("nonempty exits list of loop %d, but exits are not recorded",
1570 loop->num);
1571 err = 1;
1572 }
1573 }
1574 }
1575
1576 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1577 {
1578 unsigned n_exits = 0, eloops;
1579
1580 sizes = XCNEWVEC (unsigned, num);
1581 memset (sizes, 0, sizeof (unsigned) * num);
1582 FOR_EACH_BB (bb)
1583 {
1584 edge_iterator ei;
1585 if (bb->loop_father == current_loops->tree_root)
1586 continue;
1587 FOR_EACH_EDGE (e, ei, bb->succs)
1588 {
1589 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1590 continue;
1591
1592 n_exits++;
1593 exit = get_exit_descriptions (e);
1594 if (!exit)
1595 {
1596 error ("exit %d->%d not recorded",
1597 e->src->index, e->dest->index);
1598 err = 1;
1599 }
1600 eloops = 0;
1601 for (; exit; exit = exit->next_e)
1602 eloops++;
1603
1604 for (loop = bb->loop_father;
1605 loop != e->dest->loop_father
1606 /* When a loop exit is also an entry edge which
1607 can happen when avoiding CFG manipulations
1608 then the last loop exited is the outer loop
1609 of the loop entered. */
1610 && loop != loop_outer (e->dest->loop_father);
1611 loop = loop_outer (loop))
1612 {
1613 eloops--;
1614 sizes[loop->num]++;
1615 }
1616
1617 if (eloops != 0)
1618 {
1619 error ("wrong list of exited loops for edge %d->%d",
1620 e->src->index, e->dest->index);
1621 err = 1;
1622 }
1623 }
1624 }
1625
1626 if (n_exits != htab_elements (current_loops->exits))
1627 {
1628 error ("too many loop exits recorded");
1629 err = 1;
1630 }
1631
1632 FOR_EACH_LOOP (li, loop, 0)
1633 {
1634 eloops = 0;
1635 for (exit = loop->exits->next; exit->e; exit = exit->next)
1636 eloops++;
1637 if (eloops != sizes[loop->num])
1638 {
1639 error ("%d exits recorded for loop %d (having %d exits)",
1640 eloops, loop->num, sizes[loop->num]);
1641 err = 1;
1642 }
1643 }
1644
1645 free (sizes);
1646 }
1647
1648 gcc_assert (!err);
1649
1650 if (!dom_available)
1651 free_dominance_info (CDI_DOMINATORS);
1652 }
1653
1654 /* Returns latch edge of LOOP. */
1655 edge
1656 loop_latch_edge (const struct loop *loop)
1657 {
1658 return find_edge (loop->latch, loop->header);
1659 }
1660
1661 /* Returns preheader edge of LOOP. */
1662 edge
1663 loop_preheader_edge (const struct loop *loop)
1664 {
1665 edge e;
1666 edge_iterator ei;
1667
1668 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1669
1670 FOR_EACH_EDGE (e, ei, loop->header->preds)
1671 if (e->src != loop->latch)
1672 break;
1673
1674 return e;
1675 }
1676
1677 /* Returns true if E is an exit of LOOP. */
1678
1679 bool
1680 loop_exit_edge_p (const struct loop *loop, const_edge e)
1681 {
1682 return (flow_bb_inside_loop_p (loop, e->src)
1683 && !flow_bb_inside_loop_p (loop, e->dest));
1684 }
1685
1686 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1687 or more than one exit. If loops do not have the exits recorded, NULL
1688 is returned always. */
1689
1690 edge
1691 single_exit (const struct loop *loop)
1692 {
1693 struct loop_exit *exit = loop->exits->next;
1694
1695 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1696 return NULL;
1697
1698 if (exit->e && exit->next == loop->exits)
1699 return exit->e;
1700 else
1701 return NULL;
1702 }
1703
1704 /* Returns true when BB has an incoming edge exiting LOOP. */
1705
1706 bool
1707 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1708 {
1709 edge e;
1710 edge_iterator ei;
1711
1712 FOR_EACH_EDGE (e, ei, bb->preds)
1713 if (loop_exit_edge_p (loop, e))
1714 return true;
1715
1716 return false;
1717 }
1718
1719 /* Returns true when BB has an outgoing edge exiting LOOP. */
1720
1721 bool
1722 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1723 {
1724 edge e;
1725 edge_iterator ei;
1726
1727 FOR_EACH_EDGE (e, ei, bb->succs)
1728 if (loop_exit_edge_p (loop, e))
1729 return true;
1730
1731 return false;
1732 }
1733
1734 /* Return location corresponding to the loop control condition if possible. */
1735
1736 location_t
1737 get_loop_location (struct loop *loop)
1738 {
1739 rtx insn = NULL;
1740 struct niter_desc *desc = NULL;
1741 edge exit;
1742
1743 /* For a for or while loop, we would like to return the location
1744 of the for or while statement, if possible. To do this, look
1745 for the branch guarding the loop back-edge. */
1746
1747 /* If this is a simple loop with an in_edge, then the loop control
1748 branch is typically at the end of its source. */
1749 desc = get_simple_loop_desc (loop);
1750 if (desc->in_edge)
1751 {
1752 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1753 {
1754 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1755 return INSN_LOCATION (insn);
1756 }
1757 }
1758 /* If loop has a single exit, then the loop control branch
1759 must be at the end of its source. */
1760 if ((exit = single_exit (loop)))
1761 {
1762 FOR_BB_INSNS_REVERSE (exit->src, insn)
1763 {
1764 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1765 return INSN_LOCATION (insn);
1766 }
1767 }
1768 /* Next check the latch, to see if it is non-empty. */
1769 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1770 {
1771 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1772 return INSN_LOCATION (insn);
1773 }
1774 /* Finally, if none of the above identifies the loop control branch,
1775 return the first location in the loop header. */
1776 FOR_BB_INSNS (loop->header, insn)
1777 {
1778 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1779 return INSN_LOCATION (insn);
1780 }
1781 /* If all else fails, simply return the current function location. */
1782 return DECL_SOURCE_LOCATION (current_function_decl);
1783 }
1784
1785 /* Records that every statement in LOOP is executed I_BOUND times.
1786 REALISTIC is true if I_BOUND is expected to be close to the real number
1787 of iterations. UPPER is true if we are sure the loop iterates at most
1788 I_BOUND times. */
1789
1790 void
1791 record_niter_bound (struct loop *loop, const max_wide_int &i_bound,
1792 bool realistic, bool upper)
1793 {
1794 /* Update the bounds only when there is no previous estimation, or when the
1795 current estimation is smaller. */
1796 if (upper
1797 && (!loop->any_upper_bound
1798 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1799 {
1800 loop->any_upper_bound = true;
1801 loop->nb_iterations_upper_bound = i_bound;
1802 }
1803 if (realistic
1804 && (!loop->any_estimate
1805 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1806 {
1807 loop->any_estimate = true;
1808 loop->nb_iterations_estimate = i_bound;
1809 }
1810
1811 /* If an upper bound is smaller than the realistic estimate of the
1812 number of iterations, use the upper bound instead. */
1813 if (loop->any_upper_bound
1814 && loop->any_estimate
1815 && wi::ltu_p (loop->nb_iterations_upper_bound,
1816 loop->nb_iterations_estimate))
1817 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1818 }
1819
1820 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1821 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1822 on the number of iterations of LOOP could not be derived, returns -1. */
1823
1824 HOST_WIDE_INT
1825 get_estimated_loop_iterations_int (struct loop *loop)
1826 {
1827 max_wide_int nit;
1828 HOST_WIDE_INT hwi_nit;
1829
1830 if (!get_estimated_loop_iterations (loop, &nit))
1831 return -1;
1832
1833 if (!wi::fits_shwi_p (nit))
1834 return -1;
1835 hwi_nit = nit.to_shwi ();
1836
1837 return hwi_nit < 0 ? -1 : hwi_nit;
1838 }
1839
1840 /* Returns an upper bound on the number of executions of statements
1841 in the LOOP. For statements before the loop exit, this exceeds
1842 the number of execution of the latch by one. */
1843
1844 HOST_WIDE_INT
1845 max_stmt_executions_int (struct loop *loop)
1846 {
1847 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1848 HOST_WIDE_INT snit;
1849
1850 if (nit == -1)
1851 return -1;
1852
1853 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1854
1855 /* If the computation overflows, return -1. */
1856 return snit < 0 ? -1 : snit;
1857 }
1858
1859 /* Sets NIT to the estimated number of executions of the latch of the
1860 LOOP. If we have no reliable estimate, the function returns false, otherwise
1861 returns true. */
1862
1863 bool
1864 get_estimated_loop_iterations (struct loop *loop, max_wide_int *nit)
1865 {
1866 /* Even if the bound is not recorded, possibly we can derrive one from
1867 profile. */
1868 if (!loop->any_estimate)
1869 {
1870 if (loop->header->count)
1871 {
1872 *nit = gcov_type_to_wide_int
1873 (expected_loop_iterations_unbounded (loop) + 1);
1874 return true;
1875 }
1876 return false;
1877 }
1878
1879 *nit = loop->nb_iterations_estimate;
1880 return true;
1881 }
1882
1883 /* Sets NIT to an upper bound for the maximum number of executions of the
1884 latch of the LOOP. If we have no reliable estimate, the function returns
1885 false, otherwise returns true. */
1886
1887 bool
1888 get_max_loop_iterations (struct loop *loop, max_wide_int *nit)
1889 {
1890 if (!loop->any_upper_bound)
1891 return false;
1892
1893 *nit = loop->nb_iterations_upper_bound;
1894 return true;
1895 }
1896
1897 /* Similar to get_max_loop_iterations, but returns the estimate only
1898 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1899 on the number of iterations of LOOP could not be derived, returns -1. */
1900
1901 HOST_WIDE_INT
1902 get_max_loop_iterations_int (struct loop *loop)
1903 {
1904 max_wide_int nit;
1905 HOST_WIDE_INT hwi_nit;
1906
1907 if (!get_max_loop_iterations (loop, &nit))
1908 return -1;
1909
1910 if (!wi::fits_shwi_p (nit))
1911 return -1;
1912 hwi_nit = nit.to_shwi ();
1913
1914 return hwi_nit < 0 ? -1 : hwi_nit;
1915 }
1916
1917 /* Returns the loop depth of the loop BB belongs to. */
1918
1919 int
1920 bb_loop_depth (const_basic_block bb)
1921 {
1922 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1923 }