]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
2015-07-07 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "rtl.h"
27 #include "cfganal.h"
28 #include "cfgloop.h"
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "fold-const.h"
32 #include "internal-fn.h"
33 #include "gimple-iterator.h"
34 #include "gimple-ssa.h"
35 #include "dumpfile.h"
36
37 static void flow_loops_cfg_dump (FILE *);
38 \f
39 /* Dump loop related CFG information. */
40
41 static void
42 flow_loops_cfg_dump (FILE *file)
43 {
44 basic_block bb;
45
46 if (!file)
47 return;
48
49 FOR_EACH_BB_FN (bb, cfun)
50 {
51 edge succ;
52 edge_iterator ei;
53
54 fprintf (file, ";; %d succs { ", bb->index);
55 FOR_EACH_EDGE (succ, ei, bb->succs)
56 fprintf (file, "%d ", succ->dest->index);
57 fprintf (file, "}\n");
58 }
59 }
60
61 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
62
63 bool
64 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
65 {
66 unsigned odepth = loop_depth (outer);
67
68 return (loop_depth (loop) > odepth
69 && (*loop->superloops)[odepth] == outer);
70 }
71
72 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
73 loops within LOOP. */
74
75 struct loop *
76 superloop_at_depth (struct loop *loop, unsigned depth)
77 {
78 unsigned ldepth = loop_depth (loop);
79
80 gcc_assert (depth <= ldepth);
81
82 if (depth == ldepth)
83 return loop;
84
85 return (*loop->superloops)[depth];
86 }
87
88 /* Returns the list of the latch edges of LOOP. */
89
90 static vec<edge>
91 get_loop_latch_edges (const struct loop *loop)
92 {
93 edge_iterator ei;
94 edge e;
95 vec<edge> ret = vNULL;
96
97 FOR_EACH_EDGE (e, ei, loop->header->preds)
98 {
99 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
100 ret.safe_push (e);
101 }
102
103 return ret;
104 }
105
106 /* Dump the loop information specified by LOOP to the stream FILE
107 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
108
109 void
110 flow_loop_dump (const struct loop *loop, FILE *file,
111 void (*loop_dump_aux) (const struct loop *, FILE *, int),
112 int verbose)
113 {
114 basic_block *bbs;
115 unsigned i;
116 vec<edge> latches;
117 edge e;
118
119 if (! loop || ! loop->header)
120 return;
121
122 fprintf (file, ";;\n;; Loop %d\n", loop->num);
123
124 fprintf (file, ";; header %d, ", loop->header->index);
125 if (loop->latch)
126 fprintf (file, "latch %d\n", loop->latch->index);
127 else
128 {
129 fprintf (file, "multiple latches:");
130 latches = get_loop_latch_edges (loop);
131 FOR_EACH_VEC_ELT (latches, i, e)
132 fprintf (file, " %d", e->src->index);
133 latches.release ();
134 fprintf (file, "\n");
135 }
136
137 fprintf (file, ";; depth %d, outer %ld\n",
138 loop_depth (loop), (long) (loop_outer (loop)
139 ? loop_outer (loop)->num : -1));
140
141 fprintf (file, ";; nodes:");
142 bbs = get_loop_body (loop);
143 for (i = 0; i < loop->num_nodes; i++)
144 fprintf (file, " %d", bbs[i]->index);
145 free (bbs);
146 fprintf (file, "\n");
147
148 if (loop_dump_aux)
149 loop_dump_aux (loop, file, verbose);
150 }
151
152 /* Dump the loop information about loops to the stream FILE,
153 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
154
155 void
156 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
157 {
158 struct loop *loop;
159
160 if (!current_loops || ! file)
161 return;
162
163 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
164
165 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
166 {
167 flow_loop_dump (loop, file, loop_dump_aux, verbose);
168 }
169
170 if (verbose)
171 flow_loops_cfg_dump (file);
172 }
173
174 /* Free data allocated for LOOP. */
175
176 void
177 flow_loop_free (struct loop *loop)
178 {
179 struct loop_exit *exit, *next;
180
181 vec_free (loop->superloops);
182
183 /* Break the list of the loop exit records. They will be freed when the
184 corresponding edge is rescanned or removed, and this avoids
185 accessing the (already released) head of the list stored in the
186 loop structure. */
187 for (exit = loop->exits->next; exit != loop->exits; exit = next)
188 {
189 next = exit->next;
190 exit->next = exit;
191 exit->prev = exit;
192 }
193
194 ggc_free (loop->exits);
195 ggc_free (loop);
196 }
197
198 /* Free all the memory allocated for LOOPS. */
199
200 void
201 flow_loops_free (struct loops *loops)
202 {
203 if (loops->larray)
204 {
205 unsigned i;
206 loop_p loop;
207
208 /* Free the loop descriptors. */
209 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
210 {
211 if (!loop)
212 continue;
213
214 flow_loop_free (loop);
215 }
216
217 vec_free (loops->larray);
218 }
219 }
220
221 /* Find the nodes contained within the LOOP with header HEADER.
222 Return the number of nodes within the loop. */
223
224 int
225 flow_loop_nodes_find (basic_block header, struct loop *loop)
226 {
227 vec<basic_block> stack = vNULL;
228 int num_nodes = 1;
229 edge latch;
230 edge_iterator latch_ei;
231
232 header->loop_father = loop;
233
234 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
235 {
236 if (latch->src->loop_father == loop
237 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
238 continue;
239
240 num_nodes++;
241 stack.safe_push (latch->src);
242 latch->src->loop_father = loop;
243
244 while (!stack.is_empty ())
245 {
246 basic_block node;
247 edge e;
248 edge_iterator ei;
249
250 node = stack.pop ();
251
252 FOR_EACH_EDGE (e, ei, node->preds)
253 {
254 basic_block ancestor = e->src;
255
256 if (ancestor->loop_father != loop)
257 {
258 ancestor->loop_father = loop;
259 num_nodes++;
260 stack.safe_push (ancestor);
261 }
262 }
263 }
264 }
265 stack.release ();
266
267 return num_nodes;
268 }
269
270 /* Records the vector of superloops of the loop LOOP, whose immediate
271 superloop is FATHER. */
272
273 static void
274 establish_preds (struct loop *loop, struct loop *father)
275 {
276 loop_p ploop;
277 unsigned depth = loop_depth (father) + 1;
278 unsigned i;
279
280 loop->superloops = 0;
281 vec_alloc (loop->superloops, depth);
282 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
283 loop->superloops->quick_push (ploop);
284 loop->superloops->quick_push (father);
285
286 for (ploop = loop->inner; ploop; ploop = ploop->next)
287 establish_preds (ploop, loop);
288 }
289
290 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
291 added loop. If LOOP has some children, take care of that their
292 pred field will be initialized correctly. */
293
294 void
295 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
296 {
297 loop->next = father->inner;
298 father->inner = loop;
299
300 establish_preds (loop, father);
301 }
302
303 /* Remove LOOP from the loop hierarchy tree. */
304
305 void
306 flow_loop_tree_node_remove (struct loop *loop)
307 {
308 struct loop *prev, *father;
309
310 father = loop_outer (loop);
311
312 /* Remove loop from the list of sons. */
313 if (father->inner == loop)
314 father->inner = loop->next;
315 else
316 {
317 for (prev = father->inner; prev->next != loop; prev = prev->next)
318 continue;
319 prev->next = loop->next;
320 }
321
322 loop->superloops = NULL;
323 }
324
325 /* Allocates and returns new loop structure. */
326
327 struct loop *
328 alloc_loop (void)
329 {
330 struct loop *loop = ggc_cleared_alloc<struct loop> ();
331
332 loop->exits = ggc_cleared_alloc<loop_exit> ();
333 loop->exits->next = loop->exits->prev = loop->exits;
334 loop->can_be_parallel = false;
335 loop->nb_iterations_upper_bound = 0;
336 loop->nb_iterations_estimate = 0;
337 return loop;
338 }
339
340 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
341 (including the root of the loop tree). */
342
343 void
344 init_loops_structure (struct function *fn,
345 struct loops *loops, unsigned num_loops)
346 {
347 struct loop *root;
348
349 memset (loops, 0, sizeof *loops);
350 vec_alloc (loops->larray, num_loops);
351
352 /* Dummy loop containing whole function. */
353 root = alloc_loop ();
354 root->num_nodes = n_basic_blocks_for_fn (fn);
355 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
356 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
357 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
358 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
359
360 loops->larray->quick_push (root);
361 loops->tree_root = root;
362 }
363
364 /* Returns whether HEADER is a loop header. */
365
366 bool
367 bb_loop_header_p (basic_block header)
368 {
369 edge_iterator ei;
370 edge e;
371
372 /* If we have an abnormal predecessor, do not consider the
373 loop (not worth the problems). */
374 if (bb_has_abnormal_pred (header))
375 return false;
376
377 /* Look for back edges where a predecessor is dominated
378 by this block. A natural loop has a single entry
379 node (header) that dominates all the nodes in the
380 loop. It also has single back edge to the header
381 from a latch node. */
382 FOR_EACH_EDGE (e, ei, header->preds)
383 {
384 basic_block latch = e->src;
385 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
386 && dominated_by_p (CDI_DOMINATORS, latch, header))
387 return true;
388 }
389
390 return false;
391 }
392
393 /* Find all the natural loops in the function and save in LOOPS structure and
394 recalculate loop_father information in basic block structures.
395 If LOOPS is non-NULL then the loop structures for already recorded loops
396 will be re-used and their number will not change. We assume that no
397 stale loops exist in LOOPS.
398 When LOOPS is NULL it is allocated and re-built from scratch.
399 Return the built LOOPS structure. */
400
401 struct loops *
402 flow_loops_find (struct loops *loops)
403 {
404 bool from_scratch = (loops == NULL);
405 int *rc_order;
406 int b;
407 unsigned i;
408
409 /* Ensure that the dominators are computed. */
410 calculate_dominance_info (CDI_DOMINATORS);
411
412 if (!loops)
413 {
414 loops = ggc_cleared_alloc<struct loops> ();
415 init_loops_structure (cfun, loops, 1);
416 }
417
418 /* Ensure that loop exits were released. */
419 gcc_assert (loops->exits == NULL);
420
421 /* Taking care of this degenerate case makes the rest of
422 this code simpler. */
423 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
424 return loops;
425
426 /* The root loop node contains all basic-blocks. */
427 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
428
429 /* Compute depth first search order of the CFG so that outer
430 natural loops will be found before inner natural loops. */
431 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
432 pre_and_rev_post_order_compute (NULL, rc_order, false);
433
434 /* Gather all loop headers in reverse completion order and allocate
435 loop structures for loops that are not already present. */
436 auto_vec<loop_p> larray (loops->larray->length ());
437 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
438 {
439 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
440 if (bb_loop_header_p (header))
441 {
442 struct loop *loop;
443
444 /* The current active loop tree has valid loop-fathers for
445 header blocks. */
446 if (!from_scratch
447 && header->loop_father->header == header)
448 {
449 loop = header->loop_father;
450 /* If we found an existing loop remove it from the
451 loop tree. It is going to be inserted again
452 below. */
453 flow_loop_tree_node_remove (loop);
454 }
455 else
456 {
457 /* Otherwise allocate a new loop structure for the loop. */
458 loop = alloc_loop ();
459 /* ??? We could re-use unused loop slots here. */
460 loop->num = loops->larray->length ();
461 vec_safe_push (loops->larray, loop);
462 loop->header = header;
463
464 if (!from_scratch
465 && dump_file && (dump_flags & TDF_DETAILS))
466 fprintf (dump_file, "flow_loops_find: discovered new "
467 "loop %d with header %d\n",
468 loop->num, header->index);
469 }
470 /* Reset latch, we recompute it below. */
471 loop->latch = NULL;
472 larray.safe_push (loop);
473 }
474
475 /* Make blocks part of the loop root node at start. */
476 header->loop_father = loops->tree_root;
477 }
478
479 free (rc_order);
480
481 /* Now iterate over the loops found, insert them into the loop tree
482 and assign basic-block ownership. */
483 for (i = 0; i < larray.length (); ++i)
484 {
485 struct loop *loop = larray[i];
486 basic_block header = loop->header;
487 edge_iterator ei;
488 edge e;
489
490 flow_loop_tree_node_add (header->loop_father, loop);
491 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
492
493 /* Look for the latch for this header block, if it has just a
494 single one. */
495 FOR_EACH_EDGE (e, ei, header->preds)
496 {
497 basic_block latch = e->src;
498
499 if (flow_bb_inside_loop_p (loop, latch))
500 {
501 if (loop->latch != NULL)
502 {
503 /* More than one latch edge. */
504 loop->latch = NULL;
505 break;
506 }
507 loop->latch = latch;
508 }
509 }
510 }
511
512 return loops;
513 }
514
515 /* Ratio of frequencies of edges so that one of more latch edges is
516 considered to belong to inner loop with same header. */
517 #define HEAVY_EDGE_RATIO 8
518
519 /* Minimum number of samples for that we apply
520 find_subloop_latch_edge_by_profile heuristics. */
521 #define HEAVY_EDGE_MIN_SAMPLES 10
522
523 /* If the profile info is available, finds an edge in LATCHES that much more
524 frequent than the remaining edges. Returns such an edge, or NULL if we do
525 not find one.
526
527 We do not use guessed profile here, only the measured one. The guessed
528 profile is usually too flat and unreliable for this (and it is mostly based
529 on the loop structure of the program, so it does not make much sense to
530 derive the loop structure from it). */
531
532 static edge
533 find_subloop_latch_edge_by_profile (vec<edge> latches)
534 {
535 unsigned i;
536 edge e, me = NULL;
537 gcov_type mcount = 0, tcount = 0;
538
539 FOR_EACH_VEC_ELT (latches, i, e)
540 {
541 if (e->count > mcount)
542 {
543 me = e;
544 mcount = e->count;
545 }
546 tcount += e->count;
547 }
548
549 if (tcount < HEAVY_EDGE_MIN_SAMPLES
550 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
551 return NULL;
552
553 if (dump_file)
554 fprintf (dump_file,
555 "Found latch edge %d -> %d using profile information.\n",
556 me->src->index, me->dest->index);
557 return me;
558 }
559
560 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
561 on the structure of induction variables. Returns this edge, or NULL if we
562 do not find any.
563
564 We are quite conservative, and look just for an obvious simple innermost
565 loop (which is the case where we would lose the most performance by not
566 disambiguating the loop). More precisely, we look for the following
567 situation: The source of the chosen latch edge dominates sources of all
568 the other latch edges. Additionally, the header does not contain a phi node
569 such that the argument from the chosen edge is equal to the argument from
570 another edge. */
571
572 static edge
573 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
574 {
575 edge e, latch = latches[0];
576 unsigned i;
577 gphi *phi;
578 gphi_iterator psi;
579 tree lop;
580 basic_block bb;
581
582 /* Find the candidate for the latch edge. */
583 for (i = 1; latches.iterate (i, &e); i++)
584 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
585 latch = e;
586
587 /* Verify that it dominates all the latch edges. */
588 FOR_EACH_VEC_ELT (latches, i, e)
589 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
590 return NULL;
591
592 /* Check for a phi node that would deny that this is a latch edge of
593 a subloop. */
594 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
595 {
596 phi = psi.phi ();
597 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
598
599 /* Ignore the values that are not changed inside the subloop. */
600 if (TREE_CODE (lop) != SSA_NAME
601 || SSA_NAME_DEF_STMT (lop) == phi)
602 continue;
603 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
604 if (!bb || !flow_bb_inside_loop_p (loop, bb))
605 continue;
606
607 FOR_EACH_VEC_ELT (latches, i, e)
608 if (e != latch
609 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
610 return NULL;
611 }
612
613 if (dump_file)
614 fprintf (dump_file,
615 "Found latch edge %d -> %d using iv structure.\n",
616 latch->src->index, latch->dest->index);
617 return latch;
618 }
619
620 /* If we can determine that one of the several latch edges of LOOP behaves
621 as a latch edge of a separate subloop, returns this edge. Otherwise
622 returns NULL. */
623
624 static edge
625 find_subloop_latch_edge (struct loop *loop)
626 {
627 vec<edge> latches = get_loop_latch_edges (loop);
628 edge latch = NULL;
629
630 if (latches.length () > 1)
631 {
632 latch = find_subloop_latch_edge_by_profile (latches);
633
634 if (!latch
635 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
636 should use cfghook for this, but it is hard to imagine it would
637 be useful elsewhere. */
638 && current_ir_type () == IR_GIMPLE)
639 latch = find_subloop_latch_edge_by_ivs (loop, latches);
640 }
641
642 latches.release ();
643 return latch;
644 }
645
646 /* Callback for make_forwarder_block. Returns true if the edge E is marked
647 in the set MFB_REIS_SET. */
648
649 static hash_set<edge> *mfb_reis_set;
650 static bool
651 mfb_redirect_edges_in_set (edge e)
652 {
653 return mfb_reis_set->contains (e);
654 }
655
656 /* Creates a subloop of LOOP with latch edge LATCH. */
657
658 static void
659 form_subloop (struct loop *loop, edge latch)
660 {
661 edge_iterator ei;
662 edge e, new_entry;
663 struct loop *new_loop;
664
665 mfb_reis_set = new hash_set<edge>;
666 FOR_EACH_EDGE (e, ei, loop->header->preds)
667 {
668 if (e != latch)
669 mfb_reis_set->add (e);
670 }
671 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
672 NULL);
673 delete mfb_reis_set;
674
675 loop->header = new_entry->src;
676
677 /* Find the blocks and subloops that belong to the new loop, and add it to
678 the appropriate place in the loop tree. */
679 new_loop = alloc_loop ();
680 new_loop->header = new_entry->dest;
681 new_loop->latch = latch->src;
682 add_loop (new_loop, loop);
683 }
684
685 /* Make all the latch edges of LOOP to go to a single forwarder block --
686 a new latch of LOOP. */
687
688 static void
689 merge_latch_edges (struct loop *loop)
690 {
691 vec<edge> latches = get_loop_latch_edges (loop);
692 edge latch, e;
693 unsigned i;
694
695 gcc_assert (latches.length () > 0);
696
697 if (latches.length () == 1)
698 loop->latch = latches[0]->src;
699 else
700 {
701 if (dump_file)
702 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
703
704 mfb_reis_set = new hash_set<edge>;
705 FOR_EACH_VEC_ELT (latches, i, e)
706 mfb_reis_set->add (e);
707 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
708 NULL);
709 delete mfb_reis_set;
710
711 loop->header = latch->dest;
712 loop->latch = latch->src;
713 }
714
715 latches.release ();
716 }
717
718 /* LOOP may have several latch edges. Transform it into (possibly several)
719 loops with single latch edge. */
720
721 static void
722 disambiguate_multiple_latches (struct loop *loop)
723 {
724 edge e;
725
726 /* We eliminate the multiple latches by splitting the header to the forwarder
727 block F and the rest R, and redirecting the edges. There are two cases:
728
729 1) If there is a latch edge E that corresponds to a subloop (we guess
730 that based on profile -- if it is taken much more often than the
731 remaining edges; and on trees, using the information about induction
732 variables of the loops), we redirect E to R, all the remaining edges to
733 F, then rescan the loops and try again for the outer loop.
734 2) If there is no such edge, we redirect all latch edges to F, and the
735 entry edges to R, thus making F the single latch of the loop. */
736
737 if (dump_file)
738 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
739 loop->num);
740
741 /* During latch merging, we may need to redirect the entry edges to a new
742 block. This would cause problems if the entry edge was the one from the
743 entry block. To avoid having to handle this case specially, split
744 such entry edge. */
745 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
746 if (e)
747 split_edge (e);
748
749 while (1)
750 {
751 e = find_subloop_latch_edge (loop);
752 if (!e)
753 break;
754
755 form_subloop (loop, e);
756 }
757
758 merge_latch_edges (loop);
759 }
760
761 /* Split loops with multiple latch edges. */
762
763 void
764 disambiguate_loops_with_multiple_latches (void)
765 {
766 struct loop *loop;
767
768 FOR_EACH_LOOP (loop, 0)
769 {
770 if (!loop->latch)
771 disambiguate_multiple_latches (loop);
772 }
773 }
774
775 /* Return nonzero if basic block BB belongs to LOOP. */
776 bool
777 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
778 {
779 struct loop *source_loop;
780
781 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
782 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
783 return 0;
784
785 source_loop = bb->loop_father;
786 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
787 }
788
789 /* Enumeration predicate for get_loop_body_with_size. */
790 static bool
791 glb_enum_p (const_basic_block bb, const void *glb_loop)
792 {
793 const struct loop *const loop = (const struct loop *) glb_loop;
794 return (bb != loop->header
795 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
796 }
797
798 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
799 order against direction of edges from latch. Specially, if
800 header != latch, latch is the 1-st block. LOOP cannot be the fake
801 loop tree root, and its size must be at most MAX_SIZE. The blocks
802 in the LOOP body are stored to BODY, and the size of the LOOP is
803 returned. */
804
805 unsigned
806 get_loop_body_with_size (const struct loop *loop, basic_block *body,
807 unsigned max_size)
808 {
809 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
810 body, max_size, loop);
811 }
812
813 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
814 order against direction of edges from latch. Specially, if
815 header != latch, latch is the 1-st block. */
816
817 basic_block *
818 get_loop_body (const struct loop *loop)
819 {
820 basic_block *body, bb;
821 unsigned tv = 0;
822
823 gcc_assert (loop->num_nodes);
824
825 body = XNEWVEC (basic_block, loop->num_nodes);
826
827 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
828 {
829 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
830 special-case the fake loop that contains the whole function. */
831 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
832 body[tv++] = loop->header;
833 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
834 FOR_EACH_BB_FN (bb, cfun)
835 body[tv++] = bb;
836 }
837 else
838 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
839
840 gcc_assert (tv == loop->num_nodes);
841 return body;
842 }
843
844 /* Fills dominance descendants inside LOOP of the basic block BB into
845 array TOVISIT from index *TV. */
846
847 static void
848 fill_sons_in_loop (const struct loop *loop, basic_block bb,
849 basic_block *tovisit, int *tv)
850 {
851 basic_block son, postpone = NULL;
852
853 tovisit[(*tv)++] = bb;
854 for (son = first_dom_son (CDI_DOMINATORS, bb);
855 son;
856 son = next_dom_son (CDI_DOMINATORS, son))
857 {
858 if (!flow_bb_inside_loop_p (loop, son))
859 continue;
860
861 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
862 {
863 postpone = son;
864 continue;
865 }
866 fill_sons_in_loop (loop, son, tovisit, tv);
867 }
868
869 if (postpone)
870 fill_sons_in_loop (loop, postpone, tovisit, tv);
871 }
872
873 /* Gets body of a LOOP (that must be different from the outermost loop)
874 sorted by dominance relation. Additionally, if a basic block s dominates
875 the latch, then only blocks dominated by s are be after it. */
876
877 basic_block *
878 get_loop_body_in_dom_order (const struct loop *loop)
879 {
880 basic_block *tovisit;
881 int tv;
882
883 gcc_assert (loop->num_nodes);
884
885 tovisit = XNEWVEC (basic_block, loop->num_nodes);
886
887 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
888
889 tv = 0;
890 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
891
892 gcc_assert (tv == (int) loop->num_nodes);
893
894 return tovisit;
895 }
896
897 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
898
899 basic_block *
900 get_loop_body_in_custom_order (const struct loop *loop,
901 int (*bb_comparator) (const void *, const void *))
902 {
903 basic_block *bbs = get_loop_body (loop);
904
905 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
906
907 return bbs;
908 }
909
910 /* Get body of a LOOP in breadth first sort order. */
911
912 basic_block *
913 get_loop_body_in_bfs_order (const struct loop *loop)
914 {
915 basic_block *blocks;
916 basic_block bb;
917 bitmap visited;
918 unsigned int i = 0;
919 unsigned int vc = 1;
920
921 gcc_assert (loop->num_nodes);
922 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
923
924 blocks = XNEWVEC (basic_block, loop->num_nodes);
925 visited = BITMAP_ALLOC (NULL);
926
927 bb = loop->header;
928 while (i < loop->num_nodes)
929 {
930 edge e;
931 edge_iterator ei;
932
933 if (bitmap_set_bit (visited, bb->index))
934 /* This basic block is now visited */
935 blocks[i++] = bb;
936
937 FOR_EACH_EDGE (e, ei, bb->succs)
938 {
939 if (flow_bb_inside_loop_p (loop, e->dest))
940 {
941 if (bitmap_set_bit (visited, e->dest->index))
942 blocks[i++] = e->dest;
943 }
944 }
945
946 gcc_assert (i > vc);
947
948 bb = blocks[vc++];
949 }
950
951 BITMAP_FREE (visited);
952 return blocks;
953 }
954
955 /* Hash function for struct loop_exit. */
956
957 hashval_t
958 loop_exit_hasher::hash (loop_exit *exit)
959 {
960 return htab_hash_pointer (exit->e);
961 }
962
963 /* Equality function for struct loop_exit. Compares with edge. */
964
965 bool
966 loop_exit_hasher::equal (loop_exit *exit, edge e)
967 {
968 return exit->e == e;
969 }
970
971 /* Frees the list of loop exit descriptions EX. */
972
973 void
974 loop_exit_hasher::remove (loop_exit *exit)
975 {
976 loop_exit *next;
977 for (; exit; exit = next)
978 {
979 next = exit->next_e;
980
981 exit->next->prev = exit->prev;
982 exit->prev->next = exit->next;
983
984 ggc_free (exit);
985 }
986 }
987
988 /* Returns the list of records for E as an exit of a loop. */
989
990 static struct loop_exit *
991 get_exit_descriptions (edge e)
992 {
993 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
994 }
995
996 /* Updates the lists of loop exits in that E appears.
997 If REMOVED is true, E is being removed, and we
998 just remove it from the lists of exits.
999 If NEW_EDGE is true and E is not a loop exit, we
1000 do not try to remove it from loop exit lists. */
1001
1002 void
1003 rescan_loop_exit (edge e, bool new_edge, bool removed)
1004 {
1005 struct loop_exit *exits = NULL, *exit;
1006 struct loop *aloop, *cloop;
1007
1008 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1009 return;
1010
1011 if (!removed
1012 && e->src->loop_father != NULL
1013 && e->dest->loop_father != NULL
1014 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1015 {
1016 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1017 for (aloop = e->src->loop_father;
1018 aloop != cloop;
1019 aloop = loop_outer (aloop))
1020 {
1021 exit = ggc_alloc<loop_exit> ();
1022 exit->e = e;
1023
1024 exit->next = aloop->exits->next;
1025 exit->prev = aloop->exits;
1026 exit->next->prev = exit;
1027 exit->prev->next = exit;
1028
1029 exit->next_e = exits;
1030 exits = exit;
1031 }
1032 }
1033
1034 if (!exits && new_edge)
1035 return;
1036
1037 loop_exit **slot
1038 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1039 exits ? INSERT : NO_INSERT);
1040 if (!slot)
1041 return;
1042
1043 if (exits)
1044 {
1045 if (*slot)
1046 loop_exit_hasher::remove (*slot);
1047 *slot = exits;
1048 }
1049 else
1050 current_loops->exits->clear_slot (slot);
1051 }
1052
1053 /* For each loop, record list of exit edges, and start maintaining these
1054 lists. */
1055
1056 void
1057 record_loop_exits (void)
1058 {
1059 basic_block bb;
1060 edge_iterator ei;
1061 edge e;
1062
1063 if (!current_loops)
1064 return;
1065
1066 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1067 return;
1068 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1069
1070 gcc_assert (current_loops->exits == NULL);
1071 current_loops->exits
1072 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1073
1074 FOR_EACH_BB_FN (bb, cfun)
1075 {
1076 FOR_EACH_EDGE (e, ei, bb->succs)
1077 {
1078 rescan_loop_exit (e, true, false);
1079 }
1080 }
1081 }
1082
1083 /* Dumps information about the exit in *SLOT to FILE.
1084 Callback for htab_traverse. */
1085
1086 int
1087 dump_recorded_exit (loop_exit **slot, FILE *file)
1088 {
1089 struct loop_exit *exit = *slot;
1090 unsigned n = 0;
1091 edge e = exit->e;
1092
1093 for (; exit != NULL; exit = exit->next_e)
1094 n++;
1095
1096 fprintf (file, "Edge %d->%d exits %u loops\n",
1097 e->src->index, e->dest->index, n);
1098
1099 return 1;
1100 }
1101
1102 /* Dumps the recorded exits of loops to FILE. */
1103
1104 extern void dump_recorded_exits (FILE *);
1105 void
1106 dump_recorded_exits (FILE *file)
1107 {
1108 if (!current_loops->exits)
1109 return;
1110 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1111 }
1112
1113 /* Releases lists of loop exits. */
1114
1115 void
1116 release_recorded_exits (void)
1117 {
1118 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1119 current_loops->exits->empty ();
1120 current_loops->exits = NULL;
1121 loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1122 }
1123
1124 /* Returns the list of the exit edges of a LOOP. */
1125
1126 vec<edge>
1127 get_loop_exit_edges (const struct loop *loop)
1128 {
1129 vec<edge> edges = vNULL;
1130 edge e;
1131 unsigned i;
1132 basic_block *body;
1133 edge_iterator ei;
1134 struct loop_exit *exit;
1135
1136 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1137
1138 /* If we maintain the lists of exits, use them. Otherwise we must
1139 scan the body of the loop. */
1140 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1141 {
1142 for (exit = loop->exits->next; exit->e; exit = exit->next)
1143 edges.safe_push (exit->e);
1144 }
1145 else
1146 {
1147 body = get_loop_body (loop);
1148 for (i = 0; i < loop->num_nodes; i++)
1149 FOR_EACH_EDGE (e, ei, body[i]->succs)
1150 {
1151 if (!flow_bb_inside_loop_p (loop, e->dest))
1152 edges.safe_push (e);
1153 }
1154 free (body);
1155 }
1156
1157 return edges;
1158 }
1159
1160 /* Counts the number of conditional branches inside LOOP. */
1161
1162 unsigned
1163 num_loop_branches (const struct loop *loop)
1164 {
1165 unsigned i, n;
1166 basic_block * body;
1167
1168 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1169
1170 body = get_loop_body (loop);
1171 n = 0;
1172 for (i = 0; i < loop->num_nodes; i++)
1173 if (EDGE_COUNT (body[i]->succs) >= 2)
1174 n++;
1175 free (body);
1176
1177 return n;
1178 }
1179
1180 /* Adds basic block BB to LOOP. */
1181 void
1182 add_bb_to_loop (basic_block bb, struct loop *loop)
1183 {
1184 unsigned i;
1185 loop_p ploop;
1186 edge_iterator ei;
1187 edge e;
1188
1189 gcc_assert (bb->loop_father == NULL);
1190 bb->loop_father = loop;
1191 loop->num_nodes++;
1192 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1193 ploop->num_nodes++;
1194
1195 FOR_EACH_EDGE (e, ei, bb->succs)
1196 {
1197 rescan_loop_exit (e, true, false);
1198 }
1199 FOR_EACH_EDGE (e, ei, bb->preds)
1200 {
1201 rescan_loop_exit (e, true, false);
1202 }
1203 }
1204
1205 /* Remove basic block BB from loops. */
1206 void
1207 remove_bb_from_loops (basic_block bb)
1208 {
1209 unsigned i;
1210 struct loop *loop = bb->loop_father;
1211 loop_p ploop;
1212 edge_iterator ei;
1213 edge e;
1214
1215 gcc_assert (loop != NULL);
1216 loop->num_nodes--;
1217 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1218 ploop->num_nodes--;
1219 bb->loop_father = NULL;
1220
1221 FOR_EACH_EDGE (e, ei, bb->succs)
1222 {
1223 rescan_loop_exit (e, false, true);
1224 }
1225 FOR_EACH_EDGE (e, ei, bb->preds)
1226 {
1227 rescan_loop_exit (e, false, true);
1228 }
1229 }
1230
1231 /* Finds nearest common ancestor in loop tree for given loops. */
1232 struct loop *
1233 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1234 {
1235 unsigned sdepth, ddepth;
1236
1237 if (!loop_s) return loop_d;
1238 if (!loop_d) return loop_s;
1239
1240 sdepth = loop_depth (loop_s);
1241 ddepth = loop_depth (loop_d);
1242
1243 if (sdepth < ddepth)
1244 loop_d = (*loop_d->superloops)[sdepth];
1245 else if (sdepth > ddepth)
1246 loop_s = (*loop_s->superloops)[ddepth];
1247
1248 while (loop_s != loop_d)
1249 {
1250 loop_s = loop_outer (loop_s);
1251 loop_d = loop_outer (loop_d);
1252 }
1253 return loop_s;
1254 }
1255
1256 /* Removes LOOP from structures and frees its data. */
1257
1258 void
1259 delete_loop (struct loop *loop)
1260 {
1261 /* Remove the loop from structure. */
1262 flow_loop_tree_node_remove (loop);
1263
1264 /* Remove loop from loops array. */
1265 (*current_loops->larray)[loop->num] = NULL;
1266
1267 /* Free loop data. */
1268 flow_loop_free (loop);
1269 }
1270
1271 /* Cancels the LOOP; it must be innermost one. */
1272
1273 static void
1274 cancel_loop (struct loop *loop)
1275 {
1276 basic_block *bbs;
1277 unsigned i;
1278 struct loop *outer = loop_outer (loop);
1279
1280 gcc_assert (!loop->inner);
1281
1282 /* Move blocks up one level (they should be removed as soon as possible). */
1283 bbs = get_loop_body (loop);
1284 for (i = 0; i < loop->num_nodes; i++)
1285 bbs[i]->loop_father = outer;
1286
1287 free (bbs);
1288 delete_loop (loop);
1289 }
1290
1291 /* Cancels LOOP and all its subloops. */
1292 void
1293 cancel_loop_tree (struct loop *loop)
1294 {
1295 while (loop->inner)
1296 cancel_loop_tree (loop->inner);
1297 cancel_loop (loop);
1298 }
1299
1300 /* Checks that information about loops is correct
1301 -- sizes of loops are all right
1302 -- results of get_loop_body really belong to the loop
1303 -- loop header have just single entry edge and single latch edge
1304 -- loop latches have only single successor that is header of their loop
1305 -- irreducible loops are correctly marked
1306 -- the cached loop depth and loop father of each bb is correct
1307 */
1308 DEBUG_FUNCTION void
1309 verify_loop_structure (void)
1310 {
1311 unsigned *sizes, i, j;
1312 sbitmap irreds;
1313 basic_block bb, *bbs;
1314 struct loop *loop;
1315 int err = 0;
1316 edge e;
1317 unsigned num = number_of_loops (cfun);
1318 struct loop_exit *exit, *mexit;
1319 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1320 sbitmap visited;
1321
1322 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1323 {
1324 error ("loop verification on loop tree that needs fixup");
1325 err = 1;
1326 }
1327
1328 /* We need up-to-date dominators, compute or verify them. */
1329 if (!dom_available)
1330 calculate_dominance_info (CDI_DOMINATORS);
1331 else
1332 verify_dominators (CDI_DOMINATORS);
1333
1334 /* Check the loop tree root. */
1335 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1336 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1337 || (current_loops->tree_root->num_nodes
1338 != (unsigned) n_basic_blocks_for_fn (cfun)))
1339 {
1340 error ("corrupt loop tree root");
1341 err = 1;
1342 }
1343
1344 /* Check the headers. */
1345 FOR_EACH_BB_FN (bb, cfun)
1346 if (bb_loop_header_p (bb))
1347 {
1348 if (bb->loop_father->header == NULL)
1349 {
1350 error ("loop with header %d marked for removal", bb->index);
1351 err = 1;
1352 }
1353 else if (bb->loop_father->header != bb)
1354 {
1355 error ("loop with header %d not in loop tree", bb->index);
1356 err = 1;
1357 }
1358 }
1359 else if (bb->loop_father->header == bb)
1360 {
1361 error ("non-loop with header %d not marked for removal", bb->index);
1362 err = 1;
1363 }
1364
1365 /* Check the recorded loop father and sizes of loops. */
1366 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1367 bitmap_clear (visited);
1368 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1369 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1370 {
1371 unsigned n;
1372
1373 if (loop->header == NULL)
1374 {
1375 error ("removed loop %d in loop tree", loop->num);
1376 err = 1;
1377 continue;
1378 }
1379
1380 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1381 if (loop->num_nodes != n)
1382 {
1383 error ("size of loop %d should be %d, not %d",
1384 loop->num, n, loop->num_nodes);
1385 err = 1;
1386 }
1387
1388 for (j = 0; j < n; j++)
1389 {
1390 bb = bbs[j];
1391
1392 if (!flow_bb_inside_loop_p (loop, bb))
1393 {
1394 error ("bb %d does not belong to loop %d",
1395 bb->index, loop->num);
1396 err = 1;
1397 }
1398
1399 /* Ignore this block if it is in an inner loop. */
1400 if (bitmap_bit_p (visited, bb->index))
1401 continue;
1402 bitmap_set_bit (visited, bb->index);
1403
1404 if (bb->loop_father != loop)
1405 {
1406 error ("bb %d has father loop %d, should be loop %d",
1407 bb->index, bb->loop_father->num, loop->num);
1408 err = 1;
1409 }
1410 }
1411 }
1412 free (bbs);
1413 sbitmap_free (visited);
1414
1415 /* Check headers and latches. */
1416 FOR_EACH_LOOP (loop, 0)
1417 {
1418 i = loop->num;
1419 if (loop->header == NULL)
1420 continue;
1421 if (!bb_loop_header_p (loop->header))
1422 {
1423 error ("loop %d%'s header is not a loop header", i);
1424 err = 1;
1425 }
1426 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1427 && EDGE_COUNT (loop->header->preds) != 2)
1428 {
1429 error ("loop %d%'s header does not have exactly 2 entries", i);
1430 err = 1;
1431 }
1432 if (loop->latch)
1433 {
1434 if (!find_edge (loop->latch, loop->header))
1435 {
1436 error ("loop %d%'s latch does not have an edge to its header", i);
1437 err = 1;
1438 }
1439 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1440 {
1441 error ("loop %d%'s latch is not dominated by its header", i);
1442 err = 1;
1443 }
1444 }
1445 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1446 {
1447 if (!single_succ_p (loop->latch))
1448 {
1449 error ("loop %d%'s latch does not have exactly 1 successor", i);
1450 err = 1;
1451 }
1452 if (single_succ (loop->latch) != loop->header)
1453 {
1454 error ("loop %d%'s latch does not have header as successor", i);
1455 err = 1;
1456 }
1457 if (loop->latch->loop_father != loop)
1458 {
1459 error ("loop %d%'s latch does not belong directly to it", i);
1460 err = 1;
1461 }
1462 }
1463 if (loop->header->loop_father != loop)
1464 {
1465 error ("loop %d%'s header does not belong directly to it", i);
1466 err = 1;
1467 }
1468 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1469 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1470 {
1471 error ("loop %d%'s latch is marked as part of irreducible region", i);
1472 err = 1;
1473 }
1474 }
1475
1476 /* Check irreducible loops. */
1477 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1478 {
1479 /* Record old info. */
1480 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1481 FOR_EACH_BB_FN (bb, cfun)
1482 {
1483 edge_iterator ei;
1484 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1485 bitmap_set_bit (irreds, bb->index);
1486 else
1487 bitmap_clear_bit (irreds, bb->index);
1488 FOR_EACH_EDGE (e, ei, bb->succs)
1489 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1490 e->flags |= EDGE_ALL_FLAGS + 1;
1491 }
1492
1493 /* Recount it. */
1494 mark_irreducible_loops ();
1495
1496 /* Compare. */
1497 FOR_EACH_BB_FN (bb, cfun)
1498 {
1499 edge_iterator ei;
1500
1501 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1502 && !bitmap_bit_p (irreds, bb->index))
1503 {
1504 error ("basic block %d should be marked irreducible", bb->index);
1505 err = 1;
1506 }
1507 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1508 && bitmap_bit_p (irreds, bb->index))
1509 {
1510 error ("basic block %d should not be marked irreducible", bb->index);
1511 err = 1;
1512 }
1513 FOR_EACH_EDGE (e, ei, bb->succs)
1514 {
1515 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1516 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1517 {
1518 error ("edge from %d to %d should be marked irreducible",
1519 e->src->index, e->dest->index);
1520 err = 1;
1521 }
1522 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1523 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1524 {
1525 error ("edge from %d to %d should not be marked irreducible",
1526 e->src->index, e->dest->index);
1527 err = 1;
1528 }
1529 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1530 }
1531 }
1532 free (irreds);
1533 }
1534
1535 /* Check the recorded loop exits. */
1536 FOR_EACH_LOOP (loop, 0)
1537 {
1538 if (!loop->exits || loop->exits->e != NULL)
1539 {
1540 error ("corrupted head of the exits list of loop %d",
1541 loop->num);
1542 err = 1;
1543 }
1544 else
1545 {
1546 /* Check that the list forms a cycle, and all elements except
1547 for the head are nonnull. */
1548 for (mexit = loop->exits, exit = mexit->next, i = 0;
1549 exit->e && exit != mexit;
1550 exit = exit->next)
1551 {
1552 if (i++ & 1)
1553 mexit = mexit->next;
1554 }
1555
1556 if (exit != loop->exits)
1557 {
1558 error ("corrupted exits list of loop %d", loop->num);
1559 err = 1;
1560 }
1561 }
1562
1563 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1564 {
1565 if (loop->exits->next != loop->exits)
1566 {
1567 error ("nonempty exits list of loop %d, but exits are not recorded",
1568 loop->num);
1569 err = 1;
1570 }
1571 }
1572 }
1573
1574 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1575 {
1576 unsigned n_exits = 0, eloops;
1577
1578 sizes = XCNEWVEC (unsigned, num);
1579 memset (sizes, 0, sizeof (unsigned) * num);
1580 FOR_EACH_BB_FN (bb, cfun)
1581 {
1582 edge_iterator ei;
1583 if (bb->loop_father == current_loops->tree_root)
1584 continue;
1585 FOR_EACH_EDGE (e, ei, bb->succs)
1586 {
1587 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1588 continue;
1589
1590 n_exits++;
1591 exit = get_exit_descriptions (e);
1592 if (!exit)
1593 {
1594 error ("exit %d->%d not recorded",
1595 e->src->index, e->dest->index);
1596 err = 1;
1597 }
1598 eloops = 0;
1599 for (; exit; exit = exit->next_e)
1600 eloops++;
1601
1602 for (loop = bb->loop_father;
1603 loop != e->dest->loop_father
1604 /* When a loop exit is also an entry edge which
1605 can happen when avoiding CFG manipulations
1606 then the last loop exited is the outer loop
1607 of the loop entered. */
1608 && loop != loop_outer (e->dest->loop_father);
1609 loop = loop_outer (loop))
1610 {
1611 eloops--;
1612 sizes[loop->num]++;
1613 }
1614
1615 if (eloops != 0)
1616 {
1617 error ("wrong list of exited loops for edge %d->%d",
1618 e->src->index, e->dest->index);
1619 err = 1;
1620 }
1621 }
1622 }
1623
1624 if (n_exits != current_loops->exits->elements ())
1625 {
1626 error ("too many loop exits recorded");
1627 err = 1;
1628 }
1629
1630 FOR_EACH_LOOP (loop, 0)
1631 {
1632 eloops = 0;
1633 for (exit = loop->exits->next; exit->e; exit = exit->next)
1634 eloops++;
1635 if (eloops != sizes[loop->num])
1636 {
1637 error ("%d exits recorded for loop %d (having %d exits)",
1638 eloops, loop->num, sizes[loop->num]);
1639 err = 1;
1640 }
1641 }
1642
1643 free (sizes);
1644 }
1645
1646 gcc_assert (!err);
1647
1648 if (!dom_available)
1649 free_dominance_info (CDI_DOMINATORS);
1650 }
1651
1652 /* Returns latch edge of LOOP. */
1653 edge
1654 loop_latch_edge (const struct loop *loop)
1655 {
1656 return find_edge (loop->latch, loop->header);
1657 }
1658
1659 /* Returns preheader edge of LOOP. */
1660 edge
1661 loop_preheader_edge (const struct loop *loop)
1662 {
1663 edge e;
1664 edge_iterator ei;
1665
1666 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1667
1668 FOR_EACH_EDGE (e, ei, loop->header->preds)
1669 if (e->src != loop->latch)
1670 break;
1671
1672 return e;
1673 }
1674
1675 /* Returns true if E is an exit of LOOP. */
1676
1677 bool
1678 loop_exit_edge_p (const struct loop *loop, const_edge e)
1679 {
1680 return (flow_bb_inside_loop_p (loop, e->src)
1681 && !flow_bb_inside_loop_p (loop, e->dest));
1682 }
1683
1684 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1685 or more than one exit. If loops do not have the exits recorded, NULL
1686 is returned always. */
1687
1688 edge
1689 single_exit (const struct loop *loop)
1690 {
1691 struct loop_exit *exit = loop->exits->next;
1692
1693 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1694 return NULL;
1695
1696 if (exit->e && exit->next == loop->exits)
1697 return exit->e;
1698 else
1699 return NULL;
1700 }
1701
1702 /* Returns true when BB has an incoming edge exiting LOOP. */
1703
1704 bool
1705 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1706 {
1707 edge e;
1708 edge_iterator ei;
1709
1710 FOR_EACH_EDGE (e, ei, bb->preds)
1711 if (loop_exit_edge_p (loop, e))
1712 return true;
1713
1714 return false;
1715 }
1716
1717 /* Returns true when BB has an outgoing edge exiting LOOP. */
1718
1719 bool
1720 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1721 {
1722 edge e;
1723 edge_iterator ei;
1724
1725 FOR_EACH_EDGE (e, ei, bb->succs)
1726 if (loop_exit_edge_p (loop, e))
1727 return true;
1728
1729 return false;
1730 }
1731
1732 /* Return location corresponding to the loop control condition if possible. */
1733
1734 location_t
1735 get_loop_location (struct loop *loop)
1736 {
1737 rtx_insn *insn = NULL;
1738 struct niter_desc *desc = NULL;
1739 edge exit;
1740
1741 /* For a for or while loop, we would like to return the location
1742 of the for or while statement, if possible. To do this, look
1743 for the branch guarding the loop back-edge. */
1744
1745 /* If this is a simple loop with an in_edge, then the loop control
1746 branch is typically at the end of its source. */
1747 desc = get_simple_loop_desc (loop);
1748 if (desc->in_edge)
1749 {
1750 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1751 {
1752 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1753 return INSN_LOCATION (insn);
1754 }
1755 }
1756 /* If loop has a single exit, then the loop control branch
1757 must be at the end of its source. */
1758 if ((exit = single_exit (loop)))
1759 {
1760 FOR_BB_INSNS_REVERSE (exit->src, insn)
1761 {
1762 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1763 return INSN_LOCATION (insn);
1764 }
1765 }
1766 /* Next check the latch, to see if it is non-empty. */
1767 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1768 {
1769 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1770 return INSN_LOCATION (insn);
1771 }
1772 /* Finally, if none of the above identifies the loop control branch,
1773 return the first location in the loop header. */
1774 FOR_BB_INSNS (loop->header, insn)
1775 {
1776 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1777 return INSN_LOCATION (insn);
1778 }
1779 /* If all else fails, simply return the current function location. */
1780 return DECL_SOURCE_LOCATION (current_function_decl);
1781 }
1782
1783 /* Records that every statement in LOOP is executed I_BOUND times.
1784 REALISTIC is true if I_BOUND is expected to be close to the real number
1785 of iterations. UPPER is true if we are sure the loop iterates at most
1786 I_BOUND times. */
1787
1788 void
1789 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1790 bool realistic, bool upper)
1791 {
1792 /* Update the bounds only when there is no previous estimation, or when the
1793 current estimation is smaller. */
1794 if (upper
1795 && (!loop->any_upper_bound
1796 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1797 {
1798 loop->any_upper_bound = true;
1799 loop->nb_iterations_upper_bound = i_bound;
1800 }
1801 if (realistic
1802 && (!loop->any_estimate
1803 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1804 {
1805 loop->any_estimate = true;
1806 loop->nb_iterations_estimate = i_bound;
1807 }
1808
1809 /* If an upper bound is smaller than the realistic estimate of the
1810 number of iterations, use the upper bound instead. */
1811 if (loop->any_upper_bound
1812 && loop->any_estimate
1813 && wi::ltu_p (loop->nb_iterations_upper_bound,
1814 loop->nb_iterations_estimate))
1815 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1816 }
1817
1818 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1819 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1820 on the number of iterations of LOOP could not be derived, returns -1. */
1821
1822 HOST_WIDE_INT
1823 get_estimated_loop_iterations_int (struct loop *loop)
1824 {
1825 widest_int nit;
1826 HOST_WIDE_INT hwi_nit;
1827
1828 if (!get_estimated_loop_iterations (loop, &nit))
1829 return -1;
1830
1831 if (!wi::fits_shwi_p (nit))
1832 return -1;
1833 hwi_nit = nit.to_shwi ();
1834
1835 return hwi_nit < 0 ? -1 : hwi_nit;
1836 }
1837
1838 /* Returns an upper bound on the number of executions of statements
1839 in the LOOP. For statements before the loop exit, this exceeds
1840 the number of execution of the latch by one. */
1841
1842 HOST_WIDE_INT
1843 max_stmt_executions_int (struct loop *loop)
1844 {
1845 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1846 HOST_WIDE_INT snit;
1847
1848 if (nit == -1)
1849 return -1;
1850
1851 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1852
1853 /* If the computation overflows, return -1. */
1854 return snit < 0 ? -1 : snit;
1855 }
1856
1857 /* Sets NIT to the estimated number of executions of the latch of the
1858 LOOP. If we have no reliable estimate, the function returns false, otherwise
1859 returns true. */
1860
1861 bool
1862 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1863 {
1864 /* Even if the bound is not recorded, possibly we can derrive one from
1865 profile. */
1866 if (!loop->any_estimate)
1867 {
1868 if (loop->header->count)
1869 {
1870 *nit = gcov_type_to_wide_int
1871 (expected_loop_iterations_unbounded (loop) + 1);
1872 return true;
1873 }
1874 return false;
1875 }
1876
1877 *nit = loop->nb_iterations_estimate;
1878 return true;
1879 }
1880
1881 /* Sets NIT to an upper bound for the maximum number of executions of the
1882 latch of the LOOP. If we have no reliable estimate, the function returns
1883 false, otherwise returns true. */
1884
1885 bool
1886 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1887 {
1888 if (!loop->any_upper_bound)
1889 return false;
1890
1891 *nit = loop->nb_iterations_upper_bound;
1892 return true;
1893 }
1894
1895 /* Similar to get_max_loop_iterations, but returns the estimate only
1896 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1897 on the number of iterations of LOOP could not be derived, returns -1. */
1898
1899 HOST_WIDE_INT
1900 get_max_loop_iterations_int (struct loop *loop)
1901 {
1902 widest_int nit;
1903 HOST_WIDE_INT hwi_nit;
1904
1905 if (!get_max_loop_iterations (loop, &nit))
1906 return -1;
1907
1908 if (!wi::fits_shwi_p (nit))
1909 return -1;
1910 hwi_nit = nit.to_shwi ();
1911
1912 return hwi_nit < 0 ? -1 : hwi_nit;
1913 }
1914
1915 /* Returns the loop depth of the loop BB belongs to. */
1916
1917 int
1918 bb_loop_depth (const_basic_block bb)
1919 {
1920 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1921 }
1922
1923 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1924
1925 void
1926 mark_loop_for_removal (loop_p loop)
1927 {
1928 if (loop->header == NULL)
1929 return;
1930 loop->former_header = loop->header;
1931 loop->header = NULL;
1932 loop->latch = NULL;
1933 loops_state_set (LOOPS_NEED_FIXUP);
1934 }