]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.c
basic-block.h (single_succ_p, [...]): New inline functions.
[thirdparty/gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000, 2001, 2003, 2004, 2005 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hard-reg-set.h"
27 #include "obstack.h"
28 #include "function.h"
29 #include "basic-block.h"
30 #include "toplev.h"
31 #include "cfgloop.h"
32 #include "flags.h"
33 #include "tree.h"
34 #include "tree-flow.h"
35
36 /* Ratio of frequencies of edges so that one of more latch edges is
37 considered to belong to inner loop with same header. */
38 #define HEAVY_EDGE_RATIO 8
39
40 #define HEADER_BLOCK(B) (* (int *) (B)->aux)
41 #define LATCH_EDGE(E) (*(int *) (E)->aux)
42
43 static void flow_loops_cfg_dump (const struct loops *, FILE *);
44 static int flow_loop_level_compute (struct loop *);
45 static void flow_loops_level_compute (struct loops *);
46 static void establish_preds (struct loop *);
47 static void canonicalize_loop_headers (void);
48 static bool glb_enum_p (basic_block, void *);
49 \f
50 /* Dump loop related CFG information. */
51
52 static void
53 flow_loops_cfg_dump (const struct loops *loops, FILE *file)
54 {
55 int i;
56 basic_block bb;
57
58 if (! loops->num || ! file)
59 return;
60
61 FOR_EACH_BB (bb)
62 {
63 edge succ;
64 edge_iterator ei;
65
66 fprintf (file, ";; %d succs { ", bb->index);
67 FOR_EACH_EDGE (succ, ei, bb->succs)
68 fprintf (file, "%d ", succ->dest->index);
69 fprintf (file, "}\n");
70 }
71
72 /* Dump the DFS node order. */
73 if (loops->cfg.dfs_order)
74 {
75 fputs (";; DFS order: ", file);
76 for (i = 0; i < n_basic_blocks; i++)
77 fprintf (file, "%d ", loops->cfg.dfs_order[i]);
78
79 fputs ("\n", file);
80 }
81
82 /* Dump the reverse completion node order. */
83 if (loops->cfg.rc_order)
84 {
85 fputs (";; RC order: ", file);
86 for (i = 0; i < n_basic_blocks; i++)
87 fprintf (file, "%d ", loops->cfg.rc_order[i]);
88
89 fputs ("\n", file);
90 }
91 }
92
93 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
94
95 bool
96 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
97 {
98 return (loop->depth > outer->depth
99 && loop->pred[outer->depth] == outer);
100 }
101
102 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
103 loops within LOOP. */
104
105 struct loop *
106 superloop_at_depth (struct loop *loop, unsigned depth)
107 {
108 gcc_assert (depth <= (unsigned) loop->depth);
109
110 if (depth == (unsigned) loop->depth)
111 return loop;
112
113 return loop->pred[depth];
114 }
115
116 /* Dump the loop information specified by LOOP to the stream FILE
117 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
118
119 void
120 flow_loop_dump (const struct loop *loop, FILE *file,
121 void (*loop_dump_aux) (const struct loop *, FILE *, int),
122 int verbose)
123 {
124 basic_block *bbs;
125 unsigned i;
126
127 if (! loop || ! loop->header)
128 return;
129
130 fprintf (file, ";;\n;; Loop %d:%s\n", loop->num,
131 loop->invalid ? " invalid" : "");
132
133 fprintf (file, ";; header %d, latch %d\n",
134 loop->header->index, loop->latch->index);
135 fprintf (file, ";; depth %d, level %d, outer %ld\n",
136 loop->depth, loop->level,
137 (long) (loop->outer ? loop->outer->num : -1));
138
139 fprintf (file, ";; nodes:");
140 bbs = get_loop_body (loop);
141 for (i = 0; i < loop->num_nodes; i++)
142 fprintf (file, " %d", bbs[i]->index);
143 free (bbs);
144 fprintf (file, "\n");
145
146 if (loop_dump_aux)
147 loop_dump_aux (loop, file, verbose);
148 }
149
150 /* Dump the loop information specified by LOOPS to the stream FILE,
151 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
152
153 void
154 flow_loops_dump (const struct loops *loops, FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
155 {
156 int i;
157 int num_loops;
158
159 num_loops = loops->num;
160 if (! num_loops || ! file)
161 return;
162
163 fprintf (file, ";; %d loops found\n", num_loops);
164
165 for (i = 0; i < num_loops; i++)
166 {
167 struct loop *loop = loops->parray[i];
168
169 if (!loop)
170 continue;
171
172 flow_loop_dump (loop, file, loop_dump_aux, verbose);
173 }
174
175 if (verbose)
176 flow_loops_cfg_dump (loops, file);
177 }
178
179 /* Free data allocated for LOOP. */
180 void
181 flow_loop_free (struct loop *loop)
182 {
183 if (loop->pred)
184 free (loop->pred);
185 free (loop);
186 }
187
188 /* Free all the memory allocated for LOOPS. */
189
190 void
191 flow_loops_free (struct loops *loops)
192 {
193 if (loops->parray)
194 {
195 unsigned i;
196
197 gcc_assert (loops->num);
198
199 /* Free the loop descriptors. */
200 for (i = 0; i < loops->num; i++)
201 {
202 struct loop *loop = loops->parray[i];
203
204 if (!loop)
205 continue;
206
207 flow_loop_free (loop);
208 }
209
210 free (loops->parray);
211 loops->parray = NULL;
212
213 if (loops->cfg.dfs_order)
214 free (loops->cfg.dfs_order);
215 if (loops->cfg.rc_order)
216 free (loops->cfg.rc_order);
217
218 }
219 }
220
221 /* Find the nodes contained within the LOOP with header HEADER.
222 Return the number of nodes within the loop. */
223
224 int
225 flow_loop_nodes_find (basic_block header, struct loop *loop)
226 {
227 basic_block *stack;
228 int sp;
229 int num_nodes = 1;
230
231 header->loop_father = loop;
232 header->loop_depth = loop->depth;
233
234 if (loop->latch->loop_father != loop)
235 {
236 stack = xmalloc (n_basic_blocks * sizeof (basic_block));
237 sp = 0;
238 num_nodes++;
239 stack[sp++] = loop->latch;
240 loop->latch->loop_father = loop;
241 loop->latch->loop_depth = loop->depth;
242
243 while (sp)
244 {
245 basic_block node;
246 edge e;
247 edge_iterator ei;
248
249 node = stack[--sp];
250
251 FOR_EACH_EDGE (e, ei, node->preds)
252 {
253 basic_block ancestor = e->src;
254
255 if (ancestor != ENTRY_BLOCK_PTR
256 && ancestor->loop_father != loop)
257 {
258 ancestor->loop_father = loop;
259 ancestor->loop_depth = loop->depth;
260 num_nodes++;
261 stack[sp++] = ancestor;
262 }
263 }
264 }
265 free (stack);
266 }
267 return num_nodes;
268 }
269
270 /* For each loop in the lOOPS tree that has just a single exit
271 record the exit edge. */
272
273 void
274 mark_single_exit_loops (struct loops *loops)
275 {
276 basic_block bb;
277 edge e;
278 struct loop *loop;
279 unsigned i;
280
281 for (i = 1; i < loops->num; i++)
282 {
283 loop = loops->parray[i];
284 if (loop)
285 loop->single_exit = NULL;
286 }
287
288 FOR_EACH_BB (bb)
289 {
290 edge_iterator ei;
291 if (bb->loop_father == loops->tree_root)
292 continue;
293 FOR_EACH_EDGE (e, ei, bb->succs)
294 {
295 if (e->dest == EXIT_BLOCK_PTR)
296 continue;
297
298 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
299 continue;
300
301 for (loop = bb->loop_father;
302 loop != e->dest->loop_father;
303 loop = loop->outer)
304 {
305 /* If we have already seen an exit, mark this by the edge that
306 surely does not occur as any exit. */
307 if (loop->single_exit)
308 loop->single_exit = single_succ_edge (ENTRY_BLOCK_PTR);
309 else
310 loop->single_exit = e;
311 }
312 }
313 }
314
315 for (i = 1; i < loops->num; i++)
316 {
317 loop = loops->parray[i];
318 if (!loop)
319 continue;
320
321 if (loop->single_exit == single_succ_edge (ENTRY_BLOCK_PTR))
322 loop->single_exit = NULL;
323 }
324
325 loops->state |= LOOPS_HAVE_MARKED_SINGLE_EXITS;
326 }
327
328 static void
329 establish_preds (struct loop *loop)
330 {
331 struct loop *ploop, *father = loop->outer;
332
333 loop->depth = father->depth + 1;
334
335 /* Remember the current loop depth if it is the largest seen so far. */
336 cfun->max_loop_depth = MAX (cfun->max_loop_depth, loop->depth);
337
338 if (loop->pred)
339 free (loop->pred);
340 loop->pred = xmalloc (sizeof (struct loop *) * loop->depth);
341 memcpy (loop->pred, father->pred, sizeof (struct loop *) * father->depth);
342 loop->pred[father->depth] = father;
343
344 for (ploop = loop->inner; ploop; ploop = ploop->next)
345 establish_preds (ploop);
346 }
347
348 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
349 added loop. If LOOP has some children, take care of that their
350 pred field will be initialized correctly. */
351
352 void
353 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
354 {
355 loop->next = father->inner;
356 father->inner = loop;
357 loop->outer = father;
358
359 establish_preds (loop);
360 }
361
362 /* Remove LOOP from the loop hierarchy tree. */
363
364 void
365 flow_loop_tree_node_remove (struct loop *loop)
366 {
367 struct loop *prev, *father;
368
369 father = loop->outer;
370 loop->outer = NULL;
371
372 /* Remove loop from the list of sons. */
373 if (father->inner == loop)
374 father->inner = loop->next;
375 else
376 {
377 for (prev = father->inner; prev->next != loop; prev = prev->next);
378 prev->next = loop->next;
379 }
380
381 loop->depth = -1;
382 free (loop->pred);
383 loop->pred = NULL;
384 }
385
386 /* Helper function to compute loop nesting depth and enclosed loop level
387 for the natural loop specified by LOOP. Returns the loop level. */
388
389 static int
390 flow_loop_level_compute (struct loop *loop)
391 {
392 struct loop *inner;
393 int level = 1;
394
395 if (! loop)
396 return 0;
397
398 /* Traverse loop tree assigning depth and computing level as the
399 maximum level of all the inner loops of this loop. The loop
400 level is equivalent to the height of the loop in the loop tree
401 and corresponds to the number of enclosed loop levels (including
402 itself). */
403 for (inner = loop->inner; inner; inner = inner->next)
404 {
405 int ilevel = flow_loop_level_compute (inner) + 1;
406
407 if (ilevel > level)
408 level = ilevel;
409 }
410
411 loop->level = level;
412 return level;
413 }
414
415 /* Compute the loop nesting depth and enclosed loop level for the loop
416 hierarchy tree specified by LOOPS. Return the maximum enclosed loop
417 level. */
418
419 static void
420 flow_loops_level_compute (struct loops *loops)
421 {
422 flow_loop_level_compute (loops->tree_root);
423 }
424
425 /* A callback to update latch and header info for basic block JUMP created
426 by redirecting an edge. */
427
428 static void
429 update_latch_info (basic_block jump)
430 {
431 alloc_aux_for_block (jump, sizeof (int));
432 HEADER_BLOCK (jump) = 0;
433 alloc_aux_for_edge (single_pred_edge (jump), sizeof (int));
434 LATCH_EDGE (single_pred_edge (jump)) = 0;
435 set_immediate_dominator (CDI_DOMINATORS, jump, single_pred (jump));
436 }
437
438 /* A callback for make_forwarder block, to redirect all edges except for
439 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
440 whether to redirect it. */
441
442 static edge mfb_kj_edge;
443 static bool
444 mfb_keep_just (edge e)
445 {
446 return e != mfb_kj_edge;
447 }
448
449 /* A callback for make_forwarder block, to redirect the latch edges into an
450 entry part. E is the edge for that we should decide whether to redirect
451 it. */
452
453 static bool
454 mfb_keep_nonlatch (edge e)
455 {
456 return LATCH_EDGE (e);
457 }
458
459 /* Takes care of merging natural loops with shared headers. */
460
461 static void
462 canonicalize_loop_headers (void)
463 {
464 basic_block header;
465 edge e;
466
467 alloc_aux_for_blocks (sizeof (int));
468 alloc_aux_for_edges (sizeof (int));
469
470 /* Split blocks so that each loop has only single latch. */
471 FOR_EACH_BB (header)
472 {
473 edge_iterator ei;
474 int num_latches = 0;
475 int have_abnormal_edge = 0;
476
477 FOR_EACH_EDGE (e, ei, header->preds)
478 {
479 basic_block latch = e->src;
480
481 if (e->flags & EDGE_ABNORMAL)
482 have_abnormal_edge = 1;
483
484 if (latch != ENTRY_BLOCK_PTR
485 && dominated_by_p (CDI_DOMINATORS, latch, header))
486 {
487 num_latches++;
488 LATCH_EDGE (e) = 1;
489 }
490 }
491 if (have_abnormal_edge)
492 HEADER_BLOCK (header) = 0;
493 else
494 HEADER_BLOCK (header) = num_latches;
495 }
496
497 if (HEADER_BLOCK (single_succ (ENTRY_BLOCK_PTR)))
498 {
499 basic_block bb;
500
501 /* We could not redirect edges freely here. On the other hand,
502 we can simply split the edge from entry block. */
503 bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
504
505 alloc_aux_for_edge (single_succ_edge (bb), sizeof (int));
506 LATCH_EDGE (single_succ_edge (bb)) = 0;
507 alloc_aux_for_block (bb, sizeof (int));
508 HEADER_BLOCK (bb) = 0;
509 }
510
511 FOR_EACH_BB (header)
512 {
513 int max_freq, is_heavy;
514 edge heavy, tmp_edge;
515 edge_iterator ei;
516
517 if (HEADER_BLOCK (header) <= 1)
518 continue;
519
520 /* Find a heavy edge. */
521 is_heavy = 1;
522 heavy = NULL;
523 max_freq = 0;
524 FOR_EACH_EDGE (e, ei, header->preds)
525 if (LATCH_EDGE (e) &&
526 EDGE_FREQUENCY (e) > max_freq)
527 max_freq = EDGE_FREQUENCY (e);
528 FOR_EACH_EDGE (e, ei, header->preds)
529 if (LATCH_EDGE (e) &&
530 EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO)
531 {
532 if (heavy)
533 {
534 is_heavy = 0;
535 break;
536 }
537 else
538 heavy = e;
539 }
540
541 if (is_heavy)
542 {
543 /* Split out the heavy edge, and create inner loop for it. */
544 mfb_kj_edge = heavy;
545 tmp_edge = make_forwarder_block (header, mfb_keep_just,
546 update_latch_info);
547 alloc_aux_for_block (tmp_edge->dest, sizeof (int));
548 HEADER_BLOCK (tmp_edge->dest) = 1;
549 alloc_aux_for_edge (tmp_edge, sizeof (int));
550 LATCH_EDGE (tmp_edge) = 0;
551 HEADER_BLOCK (header)--;
552 }
553
554 if (HEADER_BLOCK (header) > 1)
555 {
556 /* Create a new latch block. */
557 tmp_edge = make_forwarder_block (header, mfb_keep_nonlatch,
558 update_latch_info);
559 alloc_aux_for_block (tmp_edge->dest, sizeof (int));
560 HEADER_BLOCK (tmp_edge->src) = 0;
561 HEADER_BLOCK (tmp_edge->dest) = 1;
562 alloc_aux_for_edge (tmp_edge, sizeof (int));
563 LATCH_EDGE (tmp_edge) = 1;
564 }
565 }
566
567 free_aux_for_blocks ();
568 free_aux_for_edges ();
569
570 #ifdef ENABLE_CHECKING
571 verify_dominators (CDI_DOMINATORS);
572 #endif
573 }
574
575 /* Initialize all the parallel_p fields of the loops structure to true. */
576
577 static void
578 initialize_loops_parallel_p (struct loops *loops)
579 {
580 unsigned int i;
581
582 for (i = 0; i < loops->num; i++)
583 {
584 struct loop *loop = loops->parray[i];
585 loop->parallel_p = true;
586 }
587 }
588
589 /* Find all the natural loops in the function and save in LOOPS structure and
590 recalculate loop_depth information in basic block structures.
591 Return the number of natural loops found. */
592
593 int
594 flow_loops_find (struct loops *loops)
595 {
596 int b;
597 int num_loops;
598 edge e;
599 sbitmap headers;
600 int *dfs_order;
601 int *rc_order;
602 basic_block header;
603 basic_block bb;
604
605 memset (loops, 0, sizeof *loops);
606
607 /* We are going to recount the maximum loop depth,
608 so throw away the last count. */
609 cfun->max_loop_depth = 0;
610
611 /* Taking care of this degenerate case makes the rest of
612 this code simpler. */
613 if (n_basic_blocks == 0)
614 return 0;
615
616 dfs_order = NULL;
617 rc_order = NULL;
618
619 /* Ensure that the dominators are computed. */
620 calculate_dominance_info (CDI_DOMINATORS);
621
622 /* Join loops with shared headers. */
623 canonicalize_loop_headers ();
624
625 /* Count the number of loop headers. This should be the
626 same as the number of natural loops. */
627 headers = sbitmap_alloc (last_basic_block);
628 sbitmap_zero (headers);
629
630 num_loops = 0;
631 FOR_EACH_BB (header)
632 {
633 edge_iterator ei;
634 int more_latches = 0;
635
636 header->loop_depth = 0;
637
638 /* If we have an abnormal predecessor, do not consider the
639 loop (not worth the problems). */
640 FOR_EACH_EDGE (e, ei, header->preds)
641 if (e->flags & EDGE_ABNORMAL)
642 break;
643 if (e)
644 continue;
645
646 FOR_EACH_EDGE (e, ei, header->preds)
647 {
648 basic_block latch = e->src;
649
650 gcc_assert (!(e->flags & EDGE_ABNORMAL));
651
652 /* Look for back edges where a predecessor is dominated
653 by this block. A natural loop has a single entry
654 node (header) that dominates all the nodes in the
655 loop. It also has single back edge to the header
656 from a latch node. */
657 if (latch != ENTRY_BLOCK_PTR
658 && dominated_by_p (CDI_DOMINATORS, latch, header))
659 {
660 /* Shared headers should be eliminated by now. */
661 gcc_assert (!more_latches);
662 more_latches = 1;
663 SET_BIT (headers, header->index);
664 num_loops++;
665 }
666 }
667 }
668
669 /* Allocate loop structures. */
670 loops->parray = xcalloc (num_loops + 1, sizeof (struct loop *));
671
672 /* Dummy loop containing whole function. */
673 loops->parray[0] = xcalloc (1, sizeof (struct loop));
674 loops->parray[0]->next = NULL;
675 loops->parray[0]->inner = NULL;
676 loops->parray[0]->outer = NULL;
677 loops->parray[0]->depth = 0;
678 loops->parray[0]->pred = NULL;
679 loops->parray[0]->num_nodes = n_basic_blocks + 2;
680 loops->parray[0]->latch = EXIT_BLOCK_PTR;
681 loops->parray[0]->header = ENTRY_BLOCK_PTR;
682 ENTRY_BLOCK_PTR->loop_father = loops->parray[0];
683 EXIT_BLOCK_PTR->loop_father = loops->parray[0];
684
685 loops->tree_root = loops->parray[0];
686
687 /* Find and record information about all the natural loops
688 in the CFG. */
689 loops->num = 1;
690 FOR_EACH_BB (bb)
691 bb->loop_father = loops->tree_root;
692
693 if (num_loops)
694 {
695 /* Compute depth first search order of the CFG so that outer
696 natural loops will be found before inner natural loops. */
697 dfs_order = xmalloc (n_basic_blocks * sizeof (int));
698 rc_order = xmalloc (n_basic_blocks * sizeof (int));
699 flow_depth_first_order_compute (dfs_order, rc_order);
700
701 /* Save CFG derived information to avoid recomputing it. */
702 loops->cfg.dfs_order = dfs_order;
703 loops->cfg.rc_order = rc_order;
704
705 num_loops = 1;
706
707 for (b = 0; b < n_basic_blocks; b++)
708 {
709 struct loop *loop;
710 edge_iterator ei;
711
712 /* Search the nodes of the CFG in reverse completion order
713 so that we can find outer loops first. */
714 if (!TEST_BIT (headers, rc_order[b]))
715 continue;
716
717 header = BASIC_BLOCK (rc_order[b]);
718
719 loop = loops->parray[num_loops] = xcalloc (1, sizeof (struct loop));
720
721 loop->header = header;
722 loop->num = num_loops;
723 num_loops++;
724
725 /* Look for the latch for this header block. */
726 FOR_EACH_EDGE (e, ei, header->preds)
727 {
728 basic_block latch = e->src;
729
730 if (latch != ENTRY_BLOCK_PTR
731 && dominated_by_p (CDI_DOMINATORS, latch, header))
732 {
733 loop->latch = latch;
734 break;
735 }
736 }
737
738 flow_loop_tree_node_add (header->loop_father, loop);
739 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
740 }
741
742 /* Assign the loop nesting depth and enclosed loop level for each
743 loop. */
744 flow_loops_level_compute (loops);
745
746 loops->num = num_loops;
747 initialize_loops_parallel_p (loops);
748 }
749
750 sbitmap_free (headers);
751
752 loops->state = 0;
753 #ifdef ENABLE_CHECKING
754 verify_flow_info ();
755 verify_loop_structure (loops);
756 #endif
757
758 return loops->num;
759 }
760
761 /* Return nonzero if basic block BB belongs to LOOP. */
762 bool
763 flow_bb_inside_loop_p (const struct loop *loop, const basic_block bb)
764 {
765 struct loop *source_loop;
766
767 if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
768 return 0;
769
770 source_loop = bb->loop_father;
771 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
772 }
773
774 /* Return nonzero if edge E enters header of LOOP from outside of LOOP. */
775
776 bool
777 flow_loop_outside_edge_p (const struct loop *loop, edge e)
778 {
779 gcc_assert (e->dest == loop->header);
780 return !flow_bb_inside_loop_p (loop, e->src);
781 }
782
783 /* Enumeration predicate for get_loop_body. */
784 static bool
785 glb_enum_p (basic_block bb, void *glb_header)
786 {
787 return bb != (basic_block) glb_header;
788 }
789
790 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
791 order against direction of edges from latch. Specially, if
792 header != latch, latch is the 1-st block. */
793 basic_block *
794 get_loop_body (const struct loop *loop)
795 {
796 basic_block *tovisit, bb;
797 unsigned tv = 0;
798
799 gcc_assert (loop->num_nodes);
800
801 tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
802 tovisit[tv++] = loop->header;
803
804 if (loop->latch == EXIT_BLOCK_PTR)
805 {
806 /* There may be blocks unreachable from EXIT_BLOCK. */
807 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks + 2);
808 FOR_EACH_BB (bb)
809 tovisit[tv++] = bb;
810 tovisit[tv++] = EXIT_BLOCK_PTR;
811 }
812 else if (loop->latch != loop->header)
813 {
814 tv = dfs_enumerate_from (loop->latch, 1, glb_enum_p,
815 tovisit + 1, loop->num_nodes - 1,
816 loop->header) + 1;
817 }
818
819 gcc_assert (tv == loop->num_nodes);
820 return tovisit;
821 }
822
823 /* Fills dominance descendants inside LOOP of the basic block BB into
824 array TOVISIT from index *TV. */
825
826 static void
827 fill_sons_in_loop (const struct loop *loop, basic_block bb,
828 basic_block *tovisit, int *tv)
829 {
830 basic_block son, postpone = NULL;
831
832 tovisit[(*tv)++] = bb;
833 for (son = first_dom_son (CDI_DOMINATORS, bb);
834 son;
835 son = next_dom_son (CDI_DOMINATORS, son))
836 {
837 if (!flow_bb_inside_loop_p (loop, son))
838 continue;
839
840 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
841 {
842 postpone = son;
843 continue;
844 }
845 fill_sons_in_loop (loop, son, tovisit, tv);
846 }
847
848 if (postpone)
849 fill_sons_in_loop (loop, postpone, tovisit, tv);
850 }
851
852 /* Gets body of a LOOP (that must be different from the outermost loop)
853 sorted by dominance relation. Additionally, if a basic block s dominates
854 the latch, then only blocks dominated by s are be after it. */
855
856 basic_block *
857 get_loop_body_in_dom_order (const struct loop *loop)
858 {
859 basic_block *tovisit;
860 int tv;
861
862 gcc_assert (loop->num_nodes);
863
864 tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
865
866 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
867
868 tv = 0;
869 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
870
871 gcc_assert (tv == (int) loop->num_nodes);
872
873 return tovisit;
874 }
875
876 /* Get body of a LOOP in breadth first sort order. */
877
878 basic_block *
879 get_loop_body_in_bfs_order (const struct loop *loop)
880 {
881 basic_block *blocks;
882 basic_block bb;
883 bitmap visited;
884 unsigned int i = 0;
885 unsigned int vc = 1;
886
887 gcc_assert (loop->num_nodes);
888 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
889
890 blocks = xcalloc (loop->num_nodes, sizeof (basic_block));
891 visited = BITMAP_ALLOC (NULL);
892
893 bb = loop->header;
894 while (i < loop->num_nodes)
895 {
896 edge e;
897 edge_iterator ei;
898
899 if (!bitmap_bit_p (visited, bb->index))
900 {
901 /* This basic block is now visited */
902 bitmap_set_bit (visited, bb->index);
903 blocks[i++] = bb;
904 }
905
906 FOR_EACH_EDGE (e, ei, bb->succs)
907 {
908 if (flow_bb_inside_loop_p (loop, e->dest))
909 {
910 if (!bitmap_bit_p (visited, e->dest->index))
911 {
912 bitmap_set_bit (visited, e->dest->index);
913 blocks[i++] = e->dest;
914 }
915 }
916 }
917
918 gcc_assert (i >= vc);
919
920 bb = blocks[vc++];
921 }
922
923 BITMAP_FREE (visited);
924 return blocks;
925 }
926
927 /* Gets exit edges of a LOOP, returning their number in N_EDGES. */
928 edge *
929 get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges)
930 {
931 edge *edges, e;
932 unsigned i, n;
933 basic_block * body;
934 edge_iterator ei;
935
936 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
937
938 body = get_loop_body (loop);
939 n = 0;
940 for (i = 0; i < loop->num_nodes; i++)
941 FOR_EACH_EDGE (e, ei, body[i]->succs)
942 if (!flow_bb_inside_loop_p (loop, e->dest))
943 n++;
944 edges = xmalloc (n * sizeof (edge));
945 *n_edges = n;
946 n = 0;
947 for (i = 0; i < loop->num_nodes; i++)
948 FOR_EACH_EDGE (e, ei, body[i]->succs)
949 if (!flow_bb_inside_loop_p (loop, e->dest))
950 edges[n++] = e;
951 free (body);
952
953 return edges;
954 }
955
956 /* Counts the number of conditional branches inside LOOP. */
957
958 unsigned
959 num_loop_branches (const struct loop *loop)
960 {
961 unsigned i, n;
962 basic_block * body;
963
964 gcc_assert (loop->latch != EXIT_BLOCK_PTR);
965
966 body = get_loop_body (loop);
967 n = 0;
968 for (i = 0; i < loop->num_nodes; i++)
969 if (EDGE_COUNT (body[i]->succs) >= 2)
970 n++;
971 free (body);
972
973 return n;
974 }
975
976 /* Adds basic block BB to LOOP. */
977 void
978 add_bb_to_loop (basic_block bb, struct loop *loop)
979 {
980 int i;
981
982 bb->loop_father = loop;
983 bb->loop_depth = loop->depth;
984 loop->num_nodes++;
985 for (i = 0; i < loop->depth; i++)
986 loop->pred[i]->num_nodes++;
987 }
988
989 /* Remove basic block BB from loops. */
990 void
991 remove_bb_from_loops (basic_block bb)
992 {
993 int i;
994 struct loop *loop = bb->loop_father;
995
996 loop->num_nodes--;
997 for (i = 0; i < loop->depth; i++)
998 loop->pred[i]->num_nodes--;
999 bb->loop_father = NULL;
1000 bb->loop_depth = 0;
1001 }
1002
1003 /* Finds nearest common ancestor in loop tree for given loops. */
1004 struct loop *
1005 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1006 {
1007 if (!loop_s) return loop_d;
1008 if (!loop_d) return loop_s;
1009
1010 if (loop_s->depth < loop_d->depth)
1011 loop_d = loop_d->pred[loop_s->depth];
1012 else if (loop_s->depth > loop_d->depth)
1013 loop_s = loop_s->pred[loop_d->depth];
1014
1015 while (loop_s != loop_d)
1016 {
1017 loop_s = loop_s->outer;
1018 loop_d = loop_d->outer;
1019 }
1020 return loop_s;
1021 }
1022
1023 /* Cancels the LOOP; it must be innermost one. */
1024 void
1025 cancel_loop (struct loops *loops, struct loop *loop)
1026 {
1027 basic_block *bbs;
1028 unsigned i;
1029
1030 gcc_assert (!loop->inner);
1031
1032 /* Move blocks up one level (they should be removed as soon as possible). */
1033 bbs = get_loop_body (loop);
1034 for (i = 0; i < loop->num_nodes; i++)
1035 bbs[i]->loop_father = loop->outer;
1036
1037 /* Remove the loop from structure. */
1038 flow_loop_tree_node_remove (loop);
1039
1040 /* Remove loop from loops array. */
1041 loops->parray[loop->num] = NULL;
1042
1043 /* Free loop data. */
1044 flow_loop_free (loop);
1045 }
1046
1047 /* Cancels LOOP and all its subloops. */
1048 void
1049 cancel_loop_tree (struct loops *loops, struct loop *loop)
1050 {
1051 while (loop->inner)
1052 cancel_loop_tree (loops, loop->inner);
1053 cancel_loop (loops, loop);
1054 }
1055
1056 /* Checks that LOOPS are all right:
1057 -- sizes of loops are all right
1058 -- results of get_loop_body really belong to the loop
1059 -- loop header have just single entry edge and single latch edge
1060 -- loop latches have only single successor that is header of their loop
1061 -- irreducible loops are correctly marked
1062 */
1063 void
1064 verify_loop_structure (struct loops *loops)
1065 {
1066 unsigned *sizes, i, j;
1067 sbitmap irreds;
1068 basic_block *bbs, bb;
1069 struct loop *loop;
1070 int err = 0;
1071 edge e;
1072
1073 /* Check sizes. */
1074 sizes = xcalloc (loops->num, sizeof (int));
1075 sizes[0] = 2;
1076
1077 FOR_EACH_BB (bb)
1078 for (loop = bb->loop_father; loop; loop = loop->outer)
1079 sizes[loop->num]++;
1080
1081 for (i = 0; i < loops->num; i++)
1082 {
1083 if (!loops->parray[i])
1084 continue;
1085
1086 if (loops->parray[i]->num_nodes != sizes[i])
1087 {
1088 error ("Size of loop %d should be %d, not %d.",
1089 i, sizes[i], loops->parray[i]->num_nodes);
1090 err = 1;
1091 }
1092 }
1093
1094 /* Check get_loop_body. */
1095 for (i = 1; i < loops->num; i++)
1096 {
1097 loop = loops->parray[i];
1098 if (!loop)
1099 continue;
1100 bbs = get_loop_body (loop);
1101
1102 for (j = 0; j < loop->num_nodes; j++)
1103 if (!flow_bb_inside_loop_p (loop, bbs[j]))
1104 {
1105 error ("Bb %d do not belong to loop %d.",
1106 bbs[j]->index, i);
1107 err = 1;
1108 }
1109 free (bbs);
1110 }
1111
1112 /* Check headers and latches. */
1113 for (i = 1; i < loops->num; i++)
1114 {
1115 loop = loops->parray[i];
1116 if (!loop)
1117 continue;
1118
1119 if ((loops->state & LOOPS_HAVE_PREHEADERS)
1120 && EDGE_COUNT (loop->header->preds) != 2)
1121 {
1122 error ("Loop %d's header does not have exactly 2 entries.", i);
1123 err = 1;
1124 }
1125 if (loops->state & LOOPS_HAVE_SIMPLE_LATCHES)
1126 {
1127 if (!single_succ_p (loop->latch))
1128 {
1129 error ("Loop %d's latch does not have exactly 1 successor.", i);
1130 err = 1;
1131 }
1132 if (single_succ (loop->latch) != loop->header)
1133 {
1134 error ("Loop %d's latch does not have header as successor.", i);
1135 err = 1;
1136 }
1137 if (loop->latch->loop_father != loop)
1138 {
1139 error ("Loop %d's latch does not belong directly to it.", i);
1140 err = 1;
1141 }
1142 }
1143 if (loop->header->loop_father != loop)
1144 {
1145 error ("Loop %d's header does not belong directly to it.", i);
1146 err = 1;
1147 }
1148 if ((loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1149 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1150 {
1151 error ("Loop %d's latch is marked as part of irreducible region.", i);
1152 err = 1;
1153 }
1154 }
1155
1156 /* Check irreducible loops. */
1157 if (loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1158 {
1159 /* Record old info. */
1160 irreds = sbitmap_alloc (last_basic_block);
1161 FOR_EACH_BB (bb)
1162 {
1163 edge_iterator ei;
1164 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1165 SET_BIT (irreds, bb->index);
1166 else
1167 RESET_BIT (irreds, bb->index);
1168 FOR_EACH_EDGE (e, ei, bb->succs)
1169 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1170 e->flags |= EDGE_ALL_FLAGS + 1;
1171 }
1172
1173 /* Recount it. */
1174 mark_irreducible_loops (loops);
1175
1176 /* Compare. */
1177 FOR_EACH_BB (bb)
1178 {
1179 edge_iterator ei;
1180
1181 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1182 && !TEST_BIT (irreds, bb->index))
1183 {
1184 error ("Basic block %d should be marked irreducible.", bb->index);
1185 err = 1;
1186 }
1187 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1188 && TEST_BIT (irreds, bb->index))
1189 {
1190 error ("Basic block %d should not be marked irreducible.", bb->index);
1191 err = 1;
1192 }
1193 FOR_EACH_EDGE (e, ei, bb->succs)
1194 {
1195 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1196 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1197 {
1198 error ("Edge from %d to %d should be marked irreducible.",
1199 e->src->index, e->dest->index);
1200 err = 1;
1201 }
1202 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1203 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1204 {
1205 error ("Edge from %d to %d should not be marked irreducible.",
1206 e->src->index, e->dest->index);
1207 err = 1;
1208 }
1209 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1210 }
1211 }
1212 free (irreds);
1213 }
1214
1215 /* Check the single_exit. */
1216 if (loops->state & LOOPS_HAVE_MARKED_SINGLE_EXITS)
1217 {
1218 memset (sizes, 0, sizeof (unsigned) * loops->num);
1219 FOR_EACH_BB (bb)
1220 {
1221 edge_iterator ei;
1222 if (bb->loop_father == loops->tree_root)
1223 continue;
1224 FOR_EACH_EDGE (e, ei, bb->succs)
1225 {
1226 if (e->dest == EXIT_BLOCK_PTR)
1227 continue;
1228
1229 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1230 continue;
1231
1232 for (loop = bb->loop_father;
1233 loop != e->dest->loop_father;
1234 loop = loop->outer)
1235 {
1236 sizes[loop->num]++;
1237 if (loop->single_exit
1238 && loop->single_exit != e)
1239 {
1240 error ("Wrong single exit %d->%d recorded for loop %d.",
1241 loop->single_exit->src->index,
1242 loop->single_exit->dest->index,
1243 loop->num);
1244 error ("Right exit is %d->%d.",
1245 e->src->index, e->dest->index);
1246 err = 1;
1247 }
1248 }
1249 }
1250 }
1251
1252 for (i = 1; i < loops->num; i++)
1253 {
1254 loop = loops->parray[i];
1255 if (!loop)
1256 continue;
1257
1258 if (sizes[i] == 1
1259 && !loop->single_exit)
1260 {
1261 error ("Single exit not recorded for loop %d.", loop->num);
1262 err = 1;
1263 }
1264
1265 if (sizes[i] != 1
1266 && loop->single_exit)
1267 {
1268 error ("Loop %d should not have single exit (%d -> %d).",
1269 loop->num,
1270 loop->single_exit->src->index,
1271 loop->single_exit->dest->index);
1272 err = 1;
1273 }
1274 }
1275 }
1276
1277 gcc_assert (!err);
1278
1279 free (sizes);
1280 }
1281
1282 /* Returns latch edge of LOOP. */
1283 edge
1284 loop_latch_edge (const struct loop *loop)
1285 {
1286 return find_edge (loop->latch, loop->header);
1287 }
1288
1289 /* Returns preheader edge of LOOP. */
1290 edge
1291 loop_preheader_edge (const struct loop *loop)
1292 {
1293 edge e;
1294 edge_iterator ei;
1295
1296 FOR_EACH_EDGE (e, ei, loop->header->preds)
1297 if (e->src != loop->latch)
1298 break;
1299
1300 return e;
1301 }
1302
1303 /* Returns true if E is an exit of LOOP. */
1304
1305 bool
1306 loop_exit_edge_p (const struct loop *loop, edge e)
1307 {
1308 return (flow_bb_inside_loop_p (loop, e->src)
1309 && !flow_bb_inside_loop_p (loop, e->dest));
1310 }