]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
[cfgloop] PR middle-end/68375: Restructure get_loop_body_in_bfs_order to handle loops...
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "gimple-ssa.h"
29 #include "diagnostic-core.h"
30 #include "cfganal.h"
31 #include "cfgloop.h"
32 #include "gimple-iterator.h"
33 #include "dumpfile.h"
34
35 static void flow_loops_cfg_dump (FILE *);
36 \f
37 /* Dump loop related CFG information. */
38
39 static void
40 flow_loops_cfg_dump (FILE *file)
41 {
42 basic_block bb;
43
44 if (!file)
45 return;
46
47 FOR_EACH_BB_FN (bb, cfun)
48 {
49 edge succ;
50 edge_iterator ei;
51
52 fprintf (file, ";; %d succs { ", bb->index);
53 FOR_EACH_EDGE (succ, ei, bb->succs)
54 fprintf (file, "%d ", succ->dest->index);
55 fprintf (file, "}\n");
56 }
57 }
58
59 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
60
61 bool
62 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
63 {
64 unsigned odepth = loop_depth (outer);
65
66 return (loop_depth (loop) > odepth
67 && (*loop->superloops)[odepth] == outer);
68 }
69
70 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
71 loops within LOOP. */
72
73 struct loop *
74 superloop_at_depth (struct loop *loop, unsigned depth)
75 {
76 unsigned ldepth = loop_depth (loop);
77
78 gcc_assert (depth <= ldepth);
79
80 if (depth == ldepth)
81 return loop;
82
83 return (*loop->superloops)[depth];
84 }
85
86 /* Returns the list of the latch edges of LOOP. */
87
88 static vec<edge>
89 get_loop_latch_edges (const struct loop *loop)
90 {
91 edge_iterator ei;
92 edge e;
93 vec<edge> ret = vNULL;
94
95 FOR_EACH_EDGE (e, ei, loop->header->preds)
96 {
97 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
98 ret.safe_push (e);
99 }
100
101 return ret;
102 }
103
104 /* Dump the loop information specified by LOOP to the stream FILE
105 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
106
107 void
108 flow_loop_dump (const struct loop *loop, FILE *file,
109 void (*loop_dump_aux) (const struct loop *, FILE *, int),
110 int verbose)
111 {
112 basic_block *bbs;
113 unsigned i;
114 vec<edge> latches;
115 edge e;
116
117 if (! loop || ! loop->header)
118 return;
119
120 fprintf (file, ";;\n;; Loop %d\n", loop->num);
121
122 fprintf (file, ";; header %d, ", loop->header->index);
123 if (loop->latch)
124 fprintf (file, "latch %d\n", loop->latch->index);
125 else
126 {
127 fprintf (file, "multiple latches:");
128 latches = get_loop_latch_edges (loop);
129 FOR_EACH_VEC_ELT (latches, i, e)
130 fprintf (file, " %d", e->src->index);
131 latches.release ();
132 fprintf (file, "\n");
133 }
134
135 fprintf (file, ";; depth %d, outer %ld\n",
136 loop_depth (loop), (long) (loop_outer (loop)
137 ? loop_outer (loop)->num : -1));
138
139 fprintf (file, ";; nodes:");
140 bbs = get_loop_body (loop);
141 for (i = 0; i < loop->num_nodes; i++)
142 fprintf (file, " %d", bbs[i]->index);
143 free (bbs);
144 fprintf (file, "\n");
145
146 if (loop_dump_aux)
147 loop_dump_aux (loop, file, verbose);
148 }
149
150 /* Dump the loop information about loops to the stream FILE,
151 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
152
153 void
154 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
155 {
156 struct loop *loop;
157
158 if (!current_loops || ! file)
159 return;
160
161 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
162
163 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
164 {
165 flow_loop_dump (loop, file, loop_dump_aux, verbose);
166 }
167
168 if (verbose)
169 flow_loops_cfg_dump (file);
170 }
171
172 /* Free data allocated for LOOP. */
173
174 void
175 flow_loop_free (struct loop *loop)
176 {
177 struct loop_exit *exit, *next;
178
179 vec_free (loop->superloops);
180
181 /* Break the list of the loop exit records. They will be freed when the
182 corresponding edge is rescanned or removed, and this avoids
183 accessing the (already released) head of the list stored in the
184 loop structure. */
185 for (exit = loop->exits->next; exit != loop->exits; exit = next)
186 {
187 next = exit->next;
188 exit->next = exit;
189 exit->prev = exit;
190 }
191
192 ggc_free (loop->exits);
193 ggc_free (loop);
194 }
195
196 /* Free all the memory allocated for LOOPS. */
197
198 void
199 flow_loops_free (struct loops *loops)
200 {
201 if (loops->larray)
202 {
203 unsigned i;
204 loop_p loop;
205
206 /* Free the loop descriptors. */
207 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
208 {
209 if (!loop)
210 continue;
211
212 flow_loop_free (loop);
213 }
214
215 vec_free (loops->larray);
216 }
217 }
218
219 /* Find the nodes contained within the LOOP with header HEADER.
220 Return the number of nodes within the loop. */
221
222 int
223 flow_loop_nodes_find (basic_block header, struct loop *loop)
224 {
225 vec<basic_block> stack = vNULL;
226 int num_nodes = 1;
227 edge latch;
228 edge_iterator latch_ei;
229
230 header->loop_father = loop;
231
232 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
233 {
234 if (latch->src->loop_father == loop
235 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
236 continue;
237
238 num_nodes++;
239 stack.safe_push (latch->src);
240 latch->src->loop_father = loop;
241
242 while (!stack.is_empty ())
243 {
244 basic_block node;
245 edge e;
246 edge_iterator ei;
247
248 node = stack.pop ();
249
250 FOR_EACH_EDGE (e, ei, node->preds)
251 {
252 basic_block ancestor = e->src;
253
254 if (ancestor->loop_father != loop)
255 {
256 ancestor->loop_father = loop;
257 num_nodes++;
258 stack.safe_push (ancestor);
259 }
260 }
261 }
262 }
263 stack.release ();
264
265 return num_nodes;
266 }
267
268 /* Records the vector of superloops of the loop LOOP, whose immediate
269 superloop is FATHER. */
270
271 static void
272 establish_preds (struct loop *loop, struct loop *father)
273 {
274 loop_p ploop;
275 unsigned depth = loop_depth (father) + 1;
276 unsigned i;
277
278 loop->superloops = 0;
279 vec_alloc (loop->superloops, depth);
280 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
281 loop->superloops->quick_push (ploop);
282 loop->superloops->quick_push (father);
283
284 for (ploop = loop->inner; ploop; ploop = ploop->next)
285 establish_preds (ploop, loop);
286 }
287
288 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
289 added loop. If LOOP has some children, take care of that their
290 pred field will be initialized correctly. */
291
292 void
293 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
294 {
295 loop->next = father->inner;
296 father->inner = loop;
297
298 establish_preds (loop, father);
299 }
300
301 /* Remove LOOP from the loop hierarchy tree. */
302
303 void
304 flow_loop_tree_node_remove (struct loop *loop)
305 {
306 struct loop *prev, *father;
307
308 father = loop_outer (loop);
309
310 /* Remove loop from the list of sons. */
311 if (father->inner == loop)
312 father->inner = loop->next;
313 else
314 {
315 for (prev = father->inner; prev->next != loop; prev = prev->next)
316 continue;
317 prev->next = loop->next;
318 }
319
320 loop->superloops = NULL;
321 }
322
323 /* Allocates and returns new loop structure. */
324
325 struct loop *
326 alloc_loop (void)
327 {
328 struct loop *loop = ggc_cleared_alloc<struct loop> ();
329
330 loop->exits = ggc_cleared_alloc<loop_exit> ();
331 loop->exits->next = loop->exits->prev = loop->exits;
332 loop->can_be_parallel = false;
333 loop->nb_iterations_upper_bound = 0;
334 loop->nb_iterations_estimate = 0;
335 return loop;
336 }
337
338 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
339 (including the root of the loop tree). */
340
341 void
342 init_loops_structure (struct function *fn,
343 struct loops *loops, unsigned num_loops)
344 {
345 struct loop *root;
346
347 memset (loops, 0, sizeof *loops);
348 vec_alloc (loops->larray, num_loops);
349
350 /* Dummy loop containing whole function. */
351 root = alloc_loop ();
352 root->num_nodes = n_basic_blocks_for_fn (fn);
353 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
354 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
355 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
356 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
357
358 loops->larray->quick_push (root);
359 loops->tree_root = root;
360 }
361
362 /* Returns whether HEADER is a loop header. */
363
364 bool
365 bb_loop_header_p (basic_block header)
366 {
367 edge_iterator ei;
368 edge e;
369
370 /* If we have an abnormal predecessor, do not consider the
371 loop (not worth the problems). */
372 if (bb_has_abnormal_pred (header))
373 return false;
374
375 /* Look for back edges where a predecessor is dominated
376 by this block. A natural loop has a single entry
377 node (header) that dominates all the nodes in the
378 loop. It also has single back edge to the header
379 from a latch node. */
380 FOR_EACH_EDGE (e, ei, header->preds)
381 {
382 basic_block latch = e->src;
383 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
384 && dominated_by_p (CDI_DOMINATORS, latch, header))
385 return true;
386 }
387
388 return false;
389 }
390
391 /* Find all the natural loops in the function and save in LOOPS structure and
392 recalculate loop_father information in basic block structures.
393 If LOOPS is non-NULL then the loop structures for already recorded loops
394 will be re-used and their number will not change. We assume that no
395 stale loops exist in LOOPS.
396 When LOOPS is NULL it is allocated and re-built from scratch.
397 Return the built LOOPS structure. */
398
399 struct loops *
400 flow_loops_find (struct loops *loops)
401 {
402 bool from_scratch = (loops == NULL);
403 int *rc_order;
404 int b;
405 unsigned i;
406
407 /* Ensure that the dominators are computed. */
408 calculate_dominance_info (CDI_DOMINATORS);
409
410 if (!loops)
411 {
412 loops = ggc_cleared_alloc<struct loops> ();
413 init_loops_structure (cfun, loops, 1);
414 }
415
416 /* Ensure that loop exits were released. */
417 gcc_assert (loops->exits == NULL);
418
419 /* Taking care of this degenerate case makes the rest of
420 this code simpler. */
421 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
422 return loops;
423
424 /* The root loop node contains all basic-blocks. */
425 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
426
427 /* Compute depth first search order of the CFG so that outer
428 natural loops will be found before inner natural loops. */
429 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
430 pre_and_rev_post_order_compute (NULL, rc_order, false);
431
432 /* Gather all loop headers in reverse completion order and allocate
433 loop structures for loops that are not already present. */
434 auto_vec<loop_p> larray (loops->larray->length ());
435 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
436 {
437 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
438 if (bb_loop_header_p (header))
439 {
440 struct loop *loop;
441
442 /* The current active loop tree has valid loop-fathers for
443 header blocks. */
444 if (!from_scratch
445 && header->loop_father->header == header)
446 {
447 loop = header->loop_father;
448 /* If we found an existing loop remove it from the
449 loop tree. It is going to be inserted again
450 below. */
451 flow_loop_tree_node_remove (loop);
452 }
453 else
454 {
455 /* Otherwise allocate a new loop structure for the loop. */
456 loop = alloc_loop ();
457 /* ??? We could re-use unused loop slots here. */
458 loop->num = loops->larray->length ();
459 vec_safe_push (loops->larray, loop);
460 loop->header = header;
461
462 if (!from_scratch
463 && dump_file && (dump_flags & TDF_DETAILS))
464 fprintf (dump_file, "flow_loops_find: discovered new "
465 "loop %d with header %d\n",
466 loop->num, header->index);
467 }
468 /* Reset latch, we recompute it below. */
469 loop->latch = NULL;
470 larray.safe_push (loop);
471 }
472
473 /* Make blocks part of the loop root node at start. */
474 header->loop_father = loops->tree_root;
475 }
476
477 free (rc_order);
478
479 /* Now iterate over the loops found, insert them into the loop tree
480 and assign basic-block ownership. */
481 for (i = 0; i < larray.length (); ++i)
482 {
483 struct loop *loop = larray[i];
484 basic_block header = loop->header;
485 edge_iterator ei;
486 edge e;
487
488 flow_loop_tree_node_add (header->loop_father, loop);
489 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
490
491 /* Look for the latch for this header block, if it has just a
492 single one. */
493 FOR_EACH_EDGE (e, ei, header->preds)
494 {
495 basic_block latch = e->src;
496
497 if (flow_bb_inside_loop_p (loop, latch))
498 {
499 if (loop->latch != NULL)
500 {
501 /* More than one latch edge. */
502 loop->latch = NULL;
503 break;
504 }
505 loop->latch = latch;
506 }
507 }
508 }
509
510 return loops;
511 }
512
513 /* Ratio of frequencies of edges so that one of more latch edges is
514 considered to belong to inner loop with same header. */
515 #define HEAVY_EDGE_RATIO 8
516
517 /* Minimum number of samples for that we apply
518 find_subloop_latch_edge_by_profile heuristics. */
519 #define HEAVY_EDGE_MIN_SAMPLES 10
520
521 /* If the profile info is available, finds an edge in LATCHES that much more
522 frequent than the remaining edges. Returns such an edge, or NULL if we do
523 not find one.
524
525 We do not use guessed profile here, only the measured one. The guessed
526 profile is usually too flat and unreliable for this (and it is mostly based
527 on the loop structure of the program, so it does not make much sense to
528 derive the loop structure from it). */
529
530 static edge
531 find_subloop_latch_edge_by_profile (vec<edge> latches)
532 {
533 unsigned i;
534 edge e, me = NULL;
535 gcov_type mcount = 0, tcount = 0;
536
537 FOR_EACH_VEC_ELT (latches, i, e)
538 {
539 if (e->count > mcount)
540 {
541 me = e;
542 mcount = e->count;
543 }
544 tcount += e->count;
545 }
546
547 if (tcount < HEAVY_EDGE_MIN_SAMPLES
548 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
549 return NULL;
550
551 if (dump_file)
552 fprintf (dump_file,
553 "Found latch edge %d -> %d using profile information.\n",
554 me->src->index, me->dest->index);
555 return me;
556 }
557
558 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
559 on the structure of induction variables. Returns this edge, or NULL if we
560 do not find any.
561
562 We are quite conservative, and look just for an obvious simple innermost
563 loop (which is the case where we would lose the most performance by not
564 disambiguating the loop). More precisely, we look for the following
565 situation: The source of the chosen latch edge dominates sources of all
566 the other latch edges. Additionally, the header does not contain a phi node
567 such that the argument from the chosen edge is equal to the argument from
568 another edge. */
569
570 static edge
571 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
572 {
573 edge e, latch = latches[0];
574 unsigned i;
575 gphi *phi;
576 gphi_iterator psi;
577 tree lop;
578 basic_block bb;
579
580 /* Find the candidate for the latch edge. */
581 for (i = 1; latches.iterate (i, &e); i++)
582 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
583 latch = e;
584
585 /* Verify that it dominates all the latch edges. */
586 FOR_EACH_VEC_ELT (latches, i, e)
587 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
588 return NULL;
589
590 /* Check for a phi node that would deny that this is a latch edge of
591 a subloop. */
592 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
593 {
594 phi = psi.phi ();
595 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
596
597 /* Ignore the values that are not changed inside the subloop. */
598 if (TREE_CODE (lop) != SSA_NAME
599 || SSA_NAME_DEF_STMT (lop) == phi)
600 continue;
601 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
602 if (!bb || !flow_bb_inside_loop_p (loop, bb))
603 continue;
604
605 FOR_EACH_VEC_ELT (latches, i, e)
606 if (e != latch
607 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
608 return NULL;
609 }
610
611 if (dump_file)
612 fprintf (dump_file,
613 "Found latch edge %d -> %d using iv structure.\n",
614 latch->src->index, latch->dest->index);
615 return latch;
616 }
617
618 /* If we can determine that one of the several latch edges of LOOP behaves
619 as a latch edge of a separate subloop, returns this edge. Otherwise
620 returns NULL. */
621
622 static edge
623 find_subloop_latch_edge (struct loop *loop)
624 {
625 vec<edge> latches = get_loop_latch_edges (loop);
626 edge latch = NULL;
627
628 if (latches.length () > 1)
629 {
630 latch = find_subloop_latch_edge_by_profile (latches);
631
632 if (!latch
633 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
634 should use cfghook for this, but it is hard to imagine it would
635 be useful elsewhere. */
636 && current_ir_type () == IR_GIMPLE)
637 latch = find_subloop_latch_edge_by_ivs (loop, latches);
638 }
639
640 latches.release ();
641 return latch;
642 }
643
644 /* Callback for make_forwarder_block. Returns true if the edge E is marked
645 in the set MFB_REIS_SET. */
646
647 static hash_set<edge> *mfb_reis_set;
648 static bool
649 mfb_redirect_edges_in_set (edge e)
650 {
651 return mfb_reis_set->contains (e);
652 }
653
654 /* Creates a subloop of LOOP with latch edge LATCH. */
655
656 static void
657 form_subloop (struct loop *loop, edge latch)
658 {
659 edge_iterator ei;
660 edge e, new_entry;
661 struct loop *new_loop;
662
663 mfb_reis_set = new hash_set<edge>;
664 FOR_EACH_EDGE (e, ei, loop->header->preds)
665 {
666 if (e != latch)
667 mfb_reis_set->add (e);
668 }
669 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
670 NULL);
671 delete mfb_reis_set;
672
673 loop->header = new_entry->src;
674
675 /* Find the blocks and subloops that belong to the new loop, and add it to
676 the appropriate place in the loop tree. */
677 new_loop = alloc_loop ();
678 new_loop->header = new_entry->dest;
679 new_loop->latch = latch->src;
680 add_loop (new_loop, loop);
681 }
682
683 /* Make all the latch edges of LOOP to go to a single forwarder block --
684 a new latch of LOOP. */
685
686 static void
687 merge_latch_edges (struct loop *loop)
688 {
689 vec<edge> latches = get_loop_latch_edges (loop);
690 edge latch, e;
691 unsigned i;
692
693 gcc_assert (latches.length () > 0);
694
695 if (latches.length () == 1)
696 loop->latch = latches[0]->src;
697 else
698 {
699 if (dump_file)
700 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
701
702 mfb_reis_set = new hash_set<edge>;
703 FOR_EACH_VEC_ELT (latches, i, e)
704 mfb_reis_set->add (e);
705 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
706 NULL);
707 delete mfb_reis_set;
708
709 loop->header = latch->dest;
710 loop->latch = latch->src;
711 }
712
713 latches.release ();
714 }
715
716 /* LOOP may have several latch edges. Transform it into (possibly several)
717 loops with single latch edge. */
718
719 static void
720 disambiguate_multiple_latches (struct loop *loop)
721 {
722 edge e;
723
724 /* We eliminate the multiple latches by splitting the header to the forwarder
725 block F and the rest R, and redirecting the edges. There are two cases:
726
727 1) If there is a latch edge E that corresponds to a subloop (we guess
728 that based on profile -- if it is taken much more often than the
729 remaining edges; and on trees, using the information about induction
730 variables of the loops), we redirect E to R, all the remaining edges to
731 F, then rescan the loops and try again for the outer loop.
732 2) If there is no such edge, we redirect all latch edges to F, and the
733 entry edges to R, thus making F the single latch of the loop. */
734
735 if (dump_file)
736 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
737 loop->num);
738
739 /* During latch merging, we may need to redirect the entry edges to a new
740 block. This would cause problems if the entry edge was the one from the
741 entry block. To avoid having to handle this case specially, split
742 such entry edge. */
743 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
744 if (e)
745 split_edge (e);
746
747 while (1)
748 {
749 e = find_subloop_latch_edge (loop);
750 if (!e)
751 break;
752
753 form_subloop (loop, e);
754 }
755
756 merge_latch_edges (loop);
757 }
758
759 /* Split loops with multiple latch edges. */
760
761 void
762 disambiguate_loops_with_multiple_latches (void)
763 {
764 struct loop *loop;
765
766 FOR_EACH_LOOP (loop, 0)
767 {
768 if (!loop->latch)
769 disambiguate_multiple_latches (loop);
770 }
771 }
772
773 /* Return nonzero if basic block BB belongs to LOOP. */
774 bool
775 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
776 {
777 struct loop *source_loop;
778
779 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
780 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
781 return 0;
782
783 source_loop = bb->loop_father;
784 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
785 }
786
787 /* Enumeration predicate for get_loop_body_with_size. */
788 static bool
789 glb_enum_p (const_basic_block bb, const void *glb_loop)
790 {
791 const struct loop *const loop = (const struct loop *) glb_loop;
792 return (bb != loop->header
793 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
794 }
795
796 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
797 order against direction of edges from latch. Specially, if
798 header != latch, latch is the 1-st block. LOOP cannot be the fake
799 loop tree root, and its size must be at most MAX_SIZE. The blocks
800 in the LOOP body are stored to BODY, and the size of the LOOP is
801 returned. */
802
803 unsigned
804 get_loop_body_with_size (const struct loop *loop, basic_block *body,
805 unsigned max_size)
806 {
807 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
808 body, max_size, loop);
809 }
810
811 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
812 order against direction of edges from latch. Specially, if
813 header != latch, latch is the 1-st block. */
814
815 basic_block *
816 get_loop_body (const struct loop *loop)
817 {
818 basic_block *body, bb;
819 unsigned tv = 0;
820
821 gcc_assert (loop->num_nodes);
822
823 body = XNEWVEC (basic_block, loop->num_nodes);
824
825 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
826 {
827 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
828 special-case the fake loop that contains the whole function. */
829 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
830 body[tv++] = loop->header;
831 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
832 FOR_EACH_BB_FN (bb, cfun)
833 body[tv++] = bb;
834 }
835 else
836 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
837
838 gcc_assert (tv == loop->num_nodes);
839 return body;
840 }
841
842 /* Fills dominance descendants inside LOOP of the basic block BB into
843 array TOVISIT from index *TV. */
844
845 static void
846 fill_sons_in_loop (const struct loop *loop, basic_block bb,
847 basic_block *tovisit, int *tv)
848 {
849 basic_block son, postpone = NULL;
850
851 tovisit[(*tv)++] = bb;
852 for (son = first_dom_son (CDI_DOMINATORS, bb);
853 son;
854 son = next_dom_son (CDI_DOMINATORS, son))
855 {
856 if (!flow_bb_inside_loop_p (loop, son))
857 continue;
858
859 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
860 {
861 postpone = son;
862 continue;
863 }
864 fill_sons_in_loop (loop, son, tovisit, tv);
865 }
866
867 if (postpone)
868 fill_sons_in_loop (loop, postpone, tovisit, tv);
869 }
870
871 /* Gets body of a LOOP (that must be different from the outermost loop)
872 sorted by dominance relation. Additionally, if a basic block s dominates
873 the latch, then only blocks dominated by s are be after it. */
874
875 basic_block *
876 get_loop_body_in_dom_order (const struct loop *loop)
877 {
878 basic_block *tovisit;
879 int tv;
880
881 gcc_assert (loop->num_nodes);
882
883 tovisit = XNEWVEC (basic_block, loop->num_nodes);
884
885 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
886
887 tv = 0;
888 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
889
890 gcc_assert (tv == (int) loop->num_nodes);
891
892 return tovisit;
893 }
894
895 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
896
897 basic_block *
898 get_loop_body_in_custom_order (const struct loop *loop,
899 int (*bb_comparator) (const void *, const void *))
900 {
901 basic_block *bbs = get_loop_body (loop);
902
903 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
904
905 return bbs;
906 }
907
908 /* Get body of a LOOP in breadth first sort order. */
909
910 basic_block *
911 get_loop_body_in_bfs_order (const struct loop *loop)
912 {
913 basic_block *blocks;
914 basic_block bb;
915 bitmap visited;
916 unsigned int i = 1;
917 unsigned int vc = 0;
918
919 gcc_assert (loop->num_nodes);
920 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
921
922 blocks = XNEWVEC (basic_block, loop->num_nodes);
923 visited = BITMAP_ALLOC (NULL);
924 blocks[0] = loop->header;
925 bitmap_set_bit (visited, loop->header->index);
926 while (i < loop->num_nodes)
927 {
928 edge e;
929 edge_iterator ei;
930 gcc_assert (i > vc);
931 bb = blocks[vc++];
932
933 FOR_EACH_EDGE (e, ei, bb->succs)
934 {
935 if (flow_bb_inside_loop_p (loop, e->dest))
936 {
937 /* This bb is now visited. */
938 if (bitmap_set_bit (visited, e->dest->index))
939 blocks[i++] = e->dest;
940 }
941 }
942 }
943
944 BITMAP_FREE (visited);
945 return blocks;
946 }
947
948 /* Hash function for struct loop_exit. */
949
950 hashval_t
951 loop_exit_hasher::hash (loop_exit *exit)
952 {
953 return htab_hash_pointer (exit->e);
954 }
955
956 /* Equality function for struct loop_exit. Compares with edge. */
957
958 bool
959 loop_exit_hasher::equal (loop_exit *exit, edge e)
960 {
961 return exit->e == e;
962 }
963
964 /* Frees the list of loop exit descriptions EX. */
965
966 void
967 loop_exit_hasher::remove (loop_exit *exit)
968 {
969 loop_exit *next;
970 for (; exit; exit = next)
971 {
972 next = exit->next_e;
973
974 exit->next->prev = exit->prev;
975 exit->prev->next = exit->next;
976
977 ggc_free (exit);
978 }
979 }
980
981 /* Returns the list of records for E as an exit of a loop. */
982
983 static struct loop_exit *
984 get_exit_descriptions (edge e)
985 {
986 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
987 }
988
989 /* Updates the lists of loop exits in that E appears.
990 If REMOVED is true, E is being removed, and we
991 just remove it from the lists of exits.
992 If NEW_EDGE is true and E is not a loop exit, we
993 do not try to remove it from loop exit lists. */
994
995 void
996 rescan_loop_exit (edge e, bool new_edge, bool removed)
997 {
998 struct loop_exit *exits = NULL, *exit;
999 struct loop *aloop, *cloop;
1000
1001 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1002 return;
1003
1004 if (!removed
1005 && e->src->loop_father != NULL
1006 && e->dest->loop_father != NULL
1007 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1008 {
1009 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1010 for (aloop = e->src->loop_father;
1011 aloop != cloop;
1012 aloop = loop_outer (aloop))
1013 {
1014 exit = ggc_alloc<loop_exit> ();
1015 exit->e = e;
1016
1017 exit->next = aloop->exits->next;
1018 exit->prev = aloop->exits;
1019 exit->next->prev = exit;
1020 exit->prev->next = exit;
1021
1022 exit->next_e = exits;
1023 exits = exit;
1024 }
1025 }
1026
1027 if (!exits && new_edge)
1028 return;
1029
1030 loop_exit **slot
1031 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1032 exits ? INSERT : NO_INSERT);
1033 if (!slot)
1034 return;
1035
1036 if (exits)
1037 {
1038 if (*slot)
1039 loop_exit_hasher::remove (*slot);
1040 *slot = exits;
1041 }
1042 else
1043 current_loops->exits->clear_slot (slot);
1044 }
1045
1046 /* For each loop, record list of exit edges, and start maintaining these
1047 lists. */
1048
1049 void
1050 record_loop_exits (void)
1051 {
1052 basic_block bb;
1053 edge_iterator ei;
1054 edge e;
1055
1056 if (!current_loops)
1057 return;
1058
1059 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1060 return;
1061 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1062
1063 gcc_assert (current_loops->exits == NULL);
1064 current_loops->exits
1065 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1066
1067 FOR_EACH_BB_FN (bb, cfun)
1068 {
1069 FOR_EACH_EDGE (e, ei, bb->succs)
1070 {
1071 rescan_loop_exit (e, true, false);
1072 }
1073 }
1074 }
1075
1076 /* Dumps information about the exit in *SLOT to FILE.
1077 Callback for htab_traverse. */
1078
1079 int
1080 dump_recorded_exit (loop_exit **slot, FILE *file)
1081 {
1082 struct loop_exit *exit = *slot;
1083 unsigned n = 0;
1084 edge e = exit->e;
1085
1086 for (; exit != NULL; exit = exit->next_e)
1087 n++;
1088
1089 fprintf (file, "Edge %d->%d exits %u loops\n",
1090 e->src->index, e->dest->index, n);
1091
1092 return 1;
1093 }
1094
1095 /* Dumps the recorded exits of loops to FILE. */
1096
1097 extern void dump_recorded_exits (FILE *);
1098 void
1099 dump_recorded_exits (FILE *file)
1100 {
1101 if (!current_loops->exits)
1102 return;
1103 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1104 }
1105
1106 /* Releases lists of loop exits. */
1107
1108 void
1109 release_recorded_exits (function *fn)
1110 {
1111 gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS));
1112 loops_for_fn (fn)->exits->empty ();
1113 loops_for_fn (fn)->exits = NULL;
1114 loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
1115 }
1116
1117 /* Returns the list of the exit edges of a LOOP. */
1118
1119 vec<edge>
1120 get_loop_exit_edges (const struct loop *loop)
1121 {
1122 vec<edge> edges = vNULL;
1123 edge e;
1124 unsigned i;
1125 basic_block *body;
1126 edge_iterator ei;
1127 struct loop_exit *exit;
1128
1129 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1130
1131 /* If we maintain the lists of exits, use them. Otherwise we must
1132 scan the body of the loop. */
1133 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1134 {
1135 for (exit = loop->exits->next; exit->e; exit = exit->next)
1136 edges.safe_push (exit->e);
1137 }
1138 else
1139 {
1140 body = get_loop_body (loop);
1141 for (i = 0; i < loop->num_nodes; i++)
1142 FOR_EACH_EDGE (e, ei, body[i]->succs)
1143 {
1144 if (!flow_bb_inside_loop_p (loop, e->dest))
1145 edges.safe_push (e);
1146 }
1147 free (body);
1148 }
1149
1150 return edges;
1151 }
1152
1153 /* Counts the number of conditional branches inside LOOP. */
1154
1155 unsigned
1156 num_loop_branches (const struct loop *loop)
1157 {
1158 unsigned i, n;
1159 basic_block * body;
1160
1161 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1162
1163 body = get_loop_body (loop);
1164 n = 0;
1165 for (i = 0; i < loop->num_nodes; i++)
1166 if (EDGE_COUNT (body[i]->succs) >= 2)
1167 n++;
1168 free (body);
1169
1170 return n;
1171 }
1172
1173 /* Adds basic block BB to LOOP. */
1174 void
1175 add_bb_to_loop (basic_block bb, struct loop *loop)
1176 {
1177 unsigned i;
1178 loop_p ploop;
1179 edge_iterator ei;
1180 edge e;
1181
1182 gcc_assert (bb->loop_father == NULL);
1183 bb->loop_father = loop;
1184 loop->num_nodes++;
1185 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1186 ploop->num_nodes++;
1187
1188 FOR_EACH_EDGE (e, ei, bb->succs)
1189 {
1190 rescan_loop_exit (e, true, false);
1191 }
1192 FOR_EACH_EDGE (e, ei, bb->preds)
1193 {
1194 rescan_loop_exit (e, true, false);
1195 }
1196 }
1197
1198 /* Remove basic block BB from loops. */
1199 void
1200 remove_bb_from_loops (basic_block bb)
1201 {
1202 unsigned i;
1203 struct loop *loop = bb->loop_father;
1204 loop_p ploop;
1205 edge_iterator ei;
1206 edge e;
1207
1208 gcc_assert (loop != NULL);
1209 loop->num_nodes--;
1210 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1211 ploop->num_nodes--;
1212 bb->loop_father = NULL;
1213
1214 FOR_EACH_EDGE (e, ei, bb->succs)
1215 {
1216 rescan_loop_exit (e, false, true);
1217 }
1218 FOR_EACH_EDGE (e, ei, bb->preds)
1219 {
1220 rescan_loop_exit (e, false, true);
1221 }
1222 }
1223
1224 /* Finds nearest common ancestor in loop tree for given loops. */
1225 struct loop *
1226 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1227 {
1228 unsigned sdepth, ddepth;
1229
1230 if (!loop_s) return loop_d;
1231 if (!loop_d) return loop_s;
1232
1233 sdepth = loop_depth (loop_s);
1234 ddepth = loop_depth (loop_d);
1235
1236 if (sdepth < ddepth)
1237 loop_d = (*loop_d->superloops)[sdepth];
1238 else if (sdepth > ddepth)
1239 loop_s = (*loop_s->superloops)[ddepth];
1240
1241 while (loop_s != loop_d)
1242 {
1243 loop_s = loop_outer (loop_s);
1244 loop_d = loop_outer (loop_d);
1245 }
1246 return loop_s;
1247 }
1248
1249 /* Removes LOOP from structures and frees its data. */
1250
1251 void
1252 delete_loop (struct loop *loop)
1253 {
1254 /* Remove the loop from structure. */
1255 flow_loop_tree_node_remove (loop);
1256
1257 /* Remove loop from loops array. */
1258 (*current_loops->larray)[loop->num] = NULL;
1259
1260 /* Free loop data. */
1261 flow_loop_free (loop);
1262 }
1263
1264 /* Cancels the LOOP; it must be innermost one. */
1265
1266 static void
1267 cancel_loop (struct loop *loop)
1268 {
1269 basic_block *bbs;
1270 unsigned i;
1271 struct loop *outer = loop_outer (loop);
1272
1273 gcc_assert (!loop->inner);
1274
1275 /* Move blocks up one level (they should be removed as soon as possible). */
1276 bbs = get_loop_body (loop);
1277 for (i = 0; i < loop->num_nodes; i++)
1278 bbs[i]->loop_father = outer;
1279
1280 free (bbs);
1281 delete_loop (loop);
1282 }
1283
1284 /* Cancels LOOP and all its subloops. */
1285 void
1286 cancel_loop_tree (struct loop *loop)
1287 {
1288 while (loop->inner)
1289 cancel_loop_tree (loop->inner);
1290 cancel_loop (loop);
1291 }
1292
1293 /* Checks that information about loops is correct
1294 -- sizes of loops are all right
1295 -- results of get_loop_body really belong to the loop
1296 -- loop header have just single entry edge and single latch edge
1297 -- loop latches have only single successor that is header of their loop
1298 -- irreducible loops are correctly marked
1299 -- the cached loop depth and loop father of each bb is correct
1300 */
1301 DEBUG_FUNCTION void
1302 verify_loop_structure (void)
1303 {
1304 unsigned *sizes, i, j;
1305 sbitmap irreds;
1306 basic_block bb, *bbs;
1307 struct loop *loop;
1308 int err = 0;
1309 edge e;
1310 unsigned num = number_of_loops (cfun);
1311 struct loop_exit *exit, *mexit;
1312 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1313 sbitmap visited;
1314
1315 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1316 {
1317 error ("loop verification on loop tree that needs fixup");
1318 err = 1;
1319 }
1320
1321 /* We need up-to-date dominators, compute or verify them. */
1322 if (!dom_available)
1323 calculate_dominance_info (CDI_DOMINATORS);
1324 else
1325 verify_dominators (CDI_DOMINATORS);
1326
1327 /* Check the loop tree root. */
1328 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1329 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1330 || (current_loops->tree_root->num_nodes
1331 != (unsigned) n_basic_blocks_for_fn (cfun)))
1332 {
1333 error ("corrupt loop tree root");
1334 err = 1;
1335 }
1336
1337 /* Check the headers. */
1338 FOR_EACH_BB_FN (bb, cfun)
1339 if (bb_loop_header_p (bb))
1340 {
1341 if (bb->loop_father->header == NULL)
1342 {
1343 error ("loop with header %d marked for removal", bb->index);
1344 err = 1;
1345 }
1346 else if (bb->loop_father->header != bb)
1347 {
1348 error ("loop with header %d not in loop tree", bb->index);
1349 err = 1;
1350 }
1351 }
1352 else if (bb->loop_father->header == bb)
1353 {
1354 error ("non-loop with header %d not marked for removal", bb->index);
1355 err = 1;
1356 }
1357
1358 /* Check the recorded loop father and sizes of loops. */
1359 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1360 bitmap_clear (visited);
1361 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1362 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1363 {
1364 unsigned n;
1365
1366 if (loop->header == NULL)
1367 {
1368 error ("removed loop %d in loop tree", loop->num);
1369 err = 1;
1370 continue;
1371 }
1372
1373 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1374 if (loop->num_nodes != n)
1375 {
1376 error ("size of loop %d should be %d, not %d",
1377 loop->num, n, loop->num_nodes);
1378 err = 1;
1379 }
1380
1381 for (j = 0; j < n; j++)
1382 {
1383 bb = bbs[j];
1384
1385 if (!flow_bb_inside_loop_p (loop, bb))
1386 {
1387 error ("bb %d does not belong to loop %d",
1388 bb->index, loop->num);
1389 err = 1;
1390 }
1391
1392 /* Ignore this block if it is in an inner loop. */
1393 if (bitmap_bit_p (visited, bb->index))
1394 continue;
1395 bitmap_set_bit (visited, bb->index);
1396
1397 if (bb->loop_father != loop)
1398 {
1399 error ("bb %d has father loop %d, should be loop %d",
1400 bb->index, bb->loop_father->num, loop->num);
1401 err = 1;
1402 }
1403 }
1404 }
1405 free (bbs);
1406 sbitmap_free (visited);
1407
1408 /* Check headers and latches. */
1409 FOR_EACH_LOOP (loop, 0)
1410 {
1411 i = loop->num;
1412 if (loop->header == NULL)
1413 continue;
1414 if (!bb_loop_header_p (loop->header))
1415 {
1416 error ("loop %d%'s header is not a loop header", i);
1417 err = 1;
1418 }
1419 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1420 && EDGE_COUNT (loop->header->preds) != 2)
1421 {
1422 error ("loop %d%'s header does not have exactly 2 entries", i);
1423 err = 1;
1424 }
1425 if (loop->latch)
1426 {
1427 if (!find_edge (loop->latch, loop->header))
1428 {
1429 error ("loop %d%'s latch does not have an edge to its header", i);
1430 err = 1;
1431 }
1432 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1433 {
1434 error ("loop %d%'s latch is not dominated by its header", i);
1435 err = 1;
1436 }
1437 }
1438 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1439 {
1440 if (!single_succ_p (loop->latch))
1441 {
1442 error ("loop %d%'s latch does not have exactly 1 successor", i);
1443 err = 1;
1444 }
1445 if (single_succ (loop->latch) != loop->header)
1446 {
1447 error ("loop %d%'s latch does not have header as successor", i);
1448 err = 1;
1449 }
1450 if (loop->latch->loop_father != loop)
1451 {
1452 error ("loop %d%'s latch does not belong directly to it", i);
1453 err = 1;
1454 }
1455 }
1456 if (loop->header->loop_father != loop)
1457 {
1458 error ("loop %d%'s header does not belong directly to it", i);
1459 err = 1;
1460 }
1461 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1462 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1463 {
1464 error ("loop %d%'s latch is marked as part of irreducible region", i);
1465 err = 1;
1466 }
1467 }
1468
1469 /* Check irreducible loops. */
1470 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1471 {
1472 /* Record old info. */
1473 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1474 FOR_EACH_BB_FN (bb, cfun)
1475 {
1476 edge_iterator ei;
1477 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1478 bitmap_set_bit (irreds, bb->index);
1479 else
1480 bitmap_clear_bit (irreds, bb->index);
1481 FOR_EACH_EDGE (e, ei, bb->succs)
1482 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1483 e->flags |= EDGE_ALL_FLAGS + 1;
1484 }
1485
1486 /* Recount it. */
1487 mark_irreducible_loops ();
1488
1489 /* Compare. */
1490 FOR_EACH_BB_FN (bb, cfun)
1491 {
1492 edge_iterator ei;
1493
1494 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1495 && !bitmap_bit_p (irreds, bb->index))
1496 {
1497 error ("basic block %d should be marked irreducible", bb->index);
1498 err = 1;
1499 }
1500 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1501 && bitmap_bit_p (irreds, bb->index))
1502 {
1503 error ("basic block %d should not be marked irreducible", bb->index);
1504 err = 1;
1505 }
1506 FOR_EACH_EDGE (e, ei, bb->succs)
1507 {
1508 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1509 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1510 {
1511 error ("edge from %d to %d should be marked irreducible",
1512 e->src->index, e->dest->index);
1513 err = 1;
1514 }
1515 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1516 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1517 {
1518 error ("edge from %d to %d should not be marked irreducible",
1519 e->src->index, e->dest->index);
1520 err = 1;
1521 }
1522 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1523 }
1524 }
1525 free (irreds);
1526 }
1527
1528 /* Check the recorded loop exits. */
1529 FOR_EACH_LOOP (loop, 0)
1530 {
1531 if (!loop->exits || loop->exits->e != NULL)
1532 {
1533 error ("corrupted head of the exits list of loop %d",
1534 loop->num);
1535 err = 1;
1536 }
1537 else
1538 {
1539 /* Check that the list forms a cycle, and all elements except
1540 for the head are nonnull. */
1541 for (mexit = loop->exits, exit = mexit->next, i = 0;
1542 exit->e && exit != mexit;
1543 exit = exit->next)
1544 {
1545 if (i++ & 1)
1546 mexit = mexit->next;
1547 }
1548
1549 if (exit != loop->exits)
1550 {
1551 error ("corrupted exits list of loop %d", loop->num);
1552 err = 1;
1553 }
1554 }
1555
1556 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1557 {
1558 if (loop->exits->next != loop->exits)
1559 {
1560 error ("nonempty exits list of loop %d, but exits are not recorded",
1561 loop->num);
1562 err = 1;
1563 }
1564 }
1565 }
1566
1567 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1568 {
1569 unsigned n_exits = 0, eloops;
1570
1571 sizes = XCNEWVEC (unsigned, num);
1572 memset (sizes, 0, sizeof (unsigned) * num);
1573 FOR_EACH_BB_FN (bb, cfun)
1574 {
1575 edge_iterator ei;
1576 if (bb->loop_father == current_loops->tree_root)
1577 continue;
1578 FOR_EACH_EDGE (e, ei, bb->succs)
1579 {
1580 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1581 continue;
1582
1583 n_exits++;
1584 exit = get_exit_descriptions (e);
1585 if (!exit)
1586 {
1587 error ("exit %d->%d not recorded",
1588 e->src->index, e->dest->index);
1589 err = 1;
1590 }
1591 eloops = 0;
1592 for (; exit; exit = exit->next_e)
1593 eloops++;
1594
1595 for (loop = bb->loop_father;
1596 loop != e->dest->loop_father
1597 /* When a loop exit is also an entry edge which
1598 can happen when avoiding CFG manipulations
1599 then the last loop exited is the outer loop
1600 of the loop entered. */
1601 && loop != loop_outer (e->dest->loop_father);
1602 loop = loop_outer (loop))
1603 {
1604 eloops--;
1605 sizes[loop->num]++;
1606 }
1607
1608 if (eloops != 0)
1609 {
1610 error ("wrong list of exited loops for edge %d->%d",
1611 e->src->index, e->dest->index);
1612 err = 1;
1613 }
1614 }
1615 }
1616
1617 if (n_exits != current_loops->exits->elements ())
1618 {
1619 error ("too many loop exits recorded");
1620 err = 1;
1621 }
1622
1623 FOR_EACH_LOOP (loop, 0)
1624 {
1625 eloops = 0;
1626 for (exit = loop->exits->next; exit->e; exit = exit->next)
1627 eloops++;
1628 if (eloops != sizes[loop->num])
1629 {
1630 error ("%d exits recorded for loop %d (having %d exits)",
1631 eloops, loop->num, sizes[loop->num]);
1632 err = 1;
1633 }
1634 }
1635
1636 free (sizes);
1637 }
1638
1639 gcc_assert (!err);
1640
1641 if (!dom_available)
1642 free_dominance_info (CDI_DOMINATORS);
1643 }
1644
1645 /* Returns latch edge of LOOP. */
1646 edge
1647 loop_latch_edge (const struct loop *loop)
1648 {
1649 return find_edge (loop->latch, loop->header);
1650 }
1651
1652 /* Returns preheader edge of LOOP. */
1653 edge
1654 loop_preheader_edge (const struct loop *loop)
1655 {
1656 edge e;
1657 edge_iterator ei;
1658
1659 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1660
1661 FOR_EACH_EDGE (e, ei, loop->header->preds)
1662 if (e->src != loop->latch)
1663 break;
1664
1665 return e;
1666 }
1667
1668 /* Returns true if E is an exit of LOOP. */
1669
1670 bool
1671 loop_exit_edge_p (const struct loop *loop, const_edge e)
1672 {
1673 return (flow_bb_inside_loop_p (loop, e->src)
1674 && !flow_bb_inside_loop_p (loop, e->dest));
1675 }
1676
1677 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1678 or more than one exit. If loops do not have the exits recorded, NULL
1679 is returned always. */
1680
1681 edge
1682 single_exit (const struct loop *loop)
1683 {
1684 struct loop_exit *exit = loop->exits->next;
1685
1686 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1687 return NULL;
1688
1689 if (exit->e && exit->next == loop->exits)
1690 return exit->e;
1691 else
1692 return NULL;
1693 }
1694
1695 /* Returns true when BB has an incoming edge exiting LOOP. */
1696
1697 bool
1698 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1699 {
1700 edge e;
1701 edge_iterator ei;
1702
1703 FOR_EACH_EDGE (e, ei, bb->preds)
1704 if (loop_exit_edge_p (loop, e))
1705 return true;
1706
1707 return false;
1708 }
1709
1710 /* Returns true when BB has an outgoing edge exiting LOOP. */
1711
1712 bool
1713 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1714 {
1715 edge e;
1716 edge_iterator ei;
1717
1718 FOR_EACH_EDGE (e, ei, bb->succs)
1719 if (loop_exit_edge_p (loop, e))
1720 return true;
1721
1722 return false;
1723 }
1724
1725 /* Return location corresponding to the loop control condition if possible. */
1726
1727 location_t
1728 get_loop_location (struct loop *loop)
1729 {
1730 rtx_insn *insn = NULL;
1731 struct niter_desc *desc = NULL;
1732 edge exit;
1733
1734 /* For a for or while loop, we would like to return the location
1735 of the for or while statement, if possible. To do this, look
1736 for the branch guarding the loop back-edge. */
1737
1738 /* If this is a simple loop with an in_edge, then the loop control
1739 branch is typically at the end of its source. */
1740 desc = get_simple_loop_desc (loop);
1741 if (desc->in_edge)
1742 {
1743 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1744 {
1745 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1746 return INSN_LOCATION (insn);
1747 }
1748 }
1749 /* If loop has a single exit, then the loop control branch
1750 must be at the end of its source. */
1751 if ((exit = single_exit (loop)))
1752 {
1753 FOR_BB_INSNS_REVERSE (exit->src, insn)
1754 {
1755 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1756 return INSN_LOCATION (insn);
1757 }
1758 }
1759 /* Next check the latch, to see if it is non-empty. */
1760 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1761 {
1762 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1763 return INSN_LOCATION (insn);
1764 }
1765 /* Finally, if none of the above identifies the loop control branch,
1766 return the first location in the loop header. */
1767 FOR_BB_INSNS (loop->header, insn)
1768 {
1769 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1770 return INSN_LOCATION (insn);
1771 }
1772 /* If all else fails, simply return the current function location. */
1773 return DECL_SOURCE_LOCATION (current_function_decl);
1774 }
1775
1776 /* Records that every statement in LOOP is executed I_BOUND times.
1777 REALISTIC is true if I_BOUND is expected to be close to the real number
1778 of iterations. UPPER is true if we are sure the loop iterates at most
1779 I_BOUND times. */
1780
1781 void
1782 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1783 bool realistic, bool upper)
1784 {
1785 /* Update the bounds only when there is no previous estimation, or when the
1786 current estimation is smaller. */
1787 if (upper
1788 && (!loop->any_upper_bound
1789 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1790 {
1791 loop->any_upper_bound = true;
1792 loop->nb_iterations_upper_bound = i_bound;
1793 }
1794 if (realistic
1795 && (!loop->any_estimate
1796 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1797 {
1798 loop->any_estimate = true;
1799 loop->nb_iterations_estimate = i_bound;
1800 }
1801
1802 /* If an upper bound is smaller than the realistic estimate of the
1803 number of iterations, use the upper bound instead. */
1804 if (loop->any_upper_bound
1805 && loop->any_estimate
1806 && wi::ltu_p (loop->nb_iterations_upper_bound,
1807 loop->nb_iterations_estimate))
1808 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1809 }
1810
1811 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1812 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1813 on the number of iterations of LOOP could not be derived, returns -1. */
1814
1815 HOST_WIDE_INT
1816 get_estimated_loop_iterations_int (struct loop *loop)
1817 {
1818 widest_int nit;
1819 HOST_WIDE_INT hwi_nit;
1820
1821 if (!get_estimated_loop_iterations (loop, &nit))
1822 return -1;
1823
1824 if (!wi::fits_shwi_p (nit))
1825 return -1;
1826 hwi_nit = nit.to_shwi ();
1827
1828 return hwi_nit < 0 ? -1 : hwi_nit;
1829 }
1830
1831 /* Returns an upper bound on the number of executions of statements
1832 in the LOOP. For statements before the loop exit, this exceeds
1833 the number of execution of the latch by one. */
1834
1835 HOST_WIDE_INT
1836 max_stmt_executions_int (struct loop *loop)
1837 {
1838 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1839 HOST_WIDE_INT snit;
1840
1841 if (nit == -1)
1842 return -1;
1843
1844 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1845
1846 /* If the computation overflows, return -1. */
1847 return snit < 0 ? -1 : snit;
1848 }
1849
1850 /* Sets NIT to the estimated number of executions of the latch of the
1851 LOOP. If we have no reliable estimate, the function returns false, otherwise
1852 returns true. */
1853
1854 bool
1855 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1856 {
1857 /* Even if the bound is not recorded, possibly we can derrive one from
1858 profile. */
1859 if (!loop->any_estimate)
1860 {
1861 if (loop->header->count)
1862 {
1863 *nit = gcov_type_to_wide_int
1864 (expected_loop_iterations_unbounded (loop) + 1);
1865 return true;
1866 }
1867 return false;
1868 }
1869
1870 *nit = loop->nb_iterations_estimate;
1871 return true;
1872 }
1873
1874 /* Sets NIT to an upper bound for the maximum number of executions of the
1875 latch of the LOOP. If we have no reliable estimate, the function returns
1876 false, otherwise returns true. */
1877
1878 bool
1879 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1880 {
1881 if (!loop->any_upper_bound)
1882 return false;
1883
1884 *nit = loop->nb_iterations_upper_bound;
1885 return true;
1886 }
1887
1888 /* Similar to get_max_loop_iterations, but returns the estimate only
1889 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1890 on the number of iterations of LOOP could not be derived, returns -1. */
1891
1892 HOST_WIDE_INT
1893 get_max_loop_iterations_int (struct loop *loop)
1894 {
1895 widest_int nit;
1896 HOST_WIDE_INT hwi_nit;
1897
1898 if (!get_max_loop_iterations (loop, &nit))
1899 return -1;
1900
1901 if (!wi::fits_shwi_p (nit))
1902 return -1;
1903 hwi_nit = nit.to_shwi ();
1904
1905 return hwi_nit < 0 ? -1 : hwi_nit;
1906 }
1907
1908 /* Returns the loop depth of the loop BB belongs to. */
1909
1910 int
1911 bb_loop_depth (const_basic_block bb)
1912 {
1913 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1914 }
1915
1916 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1917
1918 void
1919 mark_loop_for_removal (loop_p loop)
1920 {
1921 if (loop->header == NULL)
1922 return;
1923 loop->former_header = loop->header;
1924 loop->header = NULL;
1925 loop->latch = NULL;
1926 loops_state_set (LOOPS_NEED_FIXUP);
1927 }