]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfg.c
* doc/install.texi (Specific, alpha): Remove note to use
[thirdparty/gcc.git] / gcc / cfg.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and
21 analyze it. All other modules should not transform the data structure
22 directly and use abstraction instead. The file is supposed to be
23 ordered bottom-up and should not contain any code dependent on a
24 particular intermediate language (RTL or trees).
25
26 Available functionality:
27 - Initialization/deallocation
28 init_flow, clear_edges
29 - Low level basic block manipulation
30 alloc_block, expunge_block
31 - Edge manipulation
32 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 - Low level edge redirection (without updating instruction chain)
34 redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35 - Dumping and debugging
36 dump_flow_info, debug_flow_info, dump_edge_info
37 - Allocation of AUX fields for basic blocks
38 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39 - clear_bb_flags
40 - Consistency checking
41 verify_flow_info
42 - Dumping and debugging
43 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44
45 TODO: Document these "Available functionality" functions in the files
46 that implement them.
47 */
48 \f
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "backend.h"
53 #include "hard-reg-set.h"
54 #include "tree.h"
55 #include "cfghooks.h"
56 #include "df.h"
57 #include "cfganal.h"
58 #include "cfgloop.h" /* FIXME: For struct loop. */
59 #include "dumpfile.h"
60
61 \f
62
63 /* Called once at initialization time. */
64
65 void
66 init_flow (struct function *the_fun)
67 {
68 if (!the_fun->cfg)
69 the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
70 n_edges_for_fn (the_fun) = 0;
71 the_fun->cfg->count_max = profile_count::uninitialized ();
72 ENTRY_BLOCK_PTR_FOR_FN (the_fun)
73 = alloc_block ();
74 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
75 EXIT_BLOCK_PTR_FOR_FN (the_fun)
76 = alloc_block ();
77 EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
78 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
79 = EXIT_BLOCK_PTR_FOR_FN (the_fun);
80 EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
81 = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
82 the_fun->cfg->edge_flags_allocated = EDGE_ALL_FLAGS;
83 the_fun->cfg->bb_flags_allocated = BB_ALL_FLAGS;
84 }
85 \f
86 /* Helper function for remove_edge and clear_edges. Frees edge structure
87 without actually removing it from the pred/succ arrays. */
88
89 static void
90 free_edge (function *fn, edge e)
91 {
92 n_edges_for_fn (fn)--;
93 ggc_free (e);
94 }
95
96 /* Free the memory associated with the edge structures. */
97
98 void
99 clear_edges (struct function *fn)
100 {
101 basic_block bb;
102 edge e;
103 edge_iterator ei;
104
105 FOR_EACH_BB_FN (bb, fn)
106 {
107 FOR_EACH_EDGE (e, ei, bb->succs)
108 free_edge (fn, e);
109 vec_safe_truncate (bb->succs, 0);
110 vec_safe_truncate (bb->preds, 0);
111 }
112
113 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs)
114 free_edge (fn, e);
115 vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0);
116 vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0);
117
118 gcc_assert (!n_edges_for_fn (fn));
119 }
120 \f
121 /* Allocate memory for basic_block. */
122
123 basic_block
124 alloc_block (void)
125 {
126 basic_block bb;
127 bb = ggc_cleared_alloc<basic_block_def> ();
128 bb->count = profile_count::uninitialized ();
129 return bb;
130 }
131
132 /* Link block B to chain after AFTER. */
133 void
134 link_block (basic_block b, basic_block after)
135 {
136 b->next_bb = after->next_bb;
137 b->prev_bb = after;
138 after->next_bb = b;
139 b->next_bb->prev_bb = b;
140 }
141
142 /* Unlink block B from chain. */
143 void
144 unlink_block (basic_block b)
145 {
146 b->next_bb->prev_bb = b->prev_bb;
147 b->prev_bb->next_bb = b->next_bb;
148 b->prev_bb = NULL;
149 b->next_bb = NULL;
150 }
151
152 /* Sequentially order blocks and compact the arrays. */
153 void
154 compact_blocks (void)
155 {
156 int i;
157
158 SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
159 SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
160
161 if (df)
162 df_compact_blocks ();
163 else
164 {
165 basic_block bb;
166
167 i = NUM_FIXED_BLOCKS;
168 FOR_EACH_BB_FN (bb, cfun)
169 {
170 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
171 bb->index = i;
172 i++;
173 }
174 gcc_assert (i == n_basic_blocks_for_fn (cfun));
175
176 for (; i < last_basic_block_for_fn (cfun); i++)
177 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
178 }
179 last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
180 }
181
182 /* Remove block B from the basic block array. */
183
184 void
185 expunge_block (basic_block b)
186 {
187 unlink_block (b);
188 SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
189 n_basic_blocks_for_fn (cfun)--;
190 /* We should be able to ggc_free here, but we are not.
191 The dead SSA_NAMES are left pointing to dead statements that are pointing
192 to dead basic blocks making garbage collector to die.
193 We should be able to release all dead SSA_NAMES and at the same time we should
194 clear out BB pointer of dead statements consistently. */
195 }
196 \f
197 /* Connect E to E->src. */
198
199 static inline void
200 connect_src (edge e)
201 {
202 vec_safe_push (e->src->succs, e);
203 df_mark_solutions_dirty ();
204 }
205
206 /* Connect E to E->dest. */
207
208 static inline void
209 connect_dest (edge e)
210 {
211 basic_block dest = e->dest;
212 vec_safe_push (dest->preds, e);
213 e->dest_idx = EDGE_COUNT (dest->preds) - 1;
214 df_mark_solutions_dirty ();
215 }
216
217 /* Disconnect edge E from E->src. */
218
219 static inline void
220 disconnect_src (edge e)
221 {
222 basic_block src = e->src;
223 edge_iterator ei;
224 edge tmp;
225
226 for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
227 {
228 if (tmp == e)
229 {
230 src->succs->unordered_remove (ei.index);
231 df_mark_solutions_dirty ();
232 return;
233 }
234 else
235 ei_next (&ei);
236 }
237
238 gcc_unreachable ();
239 }
240
241 /* Disconnect edge E from E->dest. */
242
243 static inline void
244 disconnect_dest (edge e)
245 {
246 basic_block dest = e->dest;
247 unsigned int dest_idx = e->dest_idx;
248
249 dest->preds->unordered_remove (dest_idx);
250
251 /* If we removed an edge in the middle of the edge vector, we need
252 to update dest_idx of the edge that moved into the "hole". */
253 if (dest_idx < EDGE_COUNT (dest->preds))
254 EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
255 df_mark_solutions_dirty ();
256 }
257
258 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
259 created edge. Use this only if you are sure that this edge can't
260 possibly already exist. */
261
262 edge
263 unchecked_make_edge (basic_block src, basic_block dst, int flags)
264 {
265 edge e;
266 e = ggc_cleared_alloc<edge_def> ();
267 n_edges_for_fn (cfun)++;
268
269 e->probability = profile_probability::uninitialized ();
270 e->src = src;
271 e->dest = dst;
272 e->flags = flags;
273
274 connect_src (e);
275 connect_dest (e);
276
277 execute_on_growing_pred (e);
278 return e;
279 }
280
281 /* Create an edge connecting SRC and DST with FLAGS optionally using
282 edge cache CACHE. Return the new edge, NULL if already exist. */
283
284 edge
285 cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
286 {
287 if (edge_cache == NULL
288 || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
289 || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
290 return make_edge (src, dst, flags);
291
292 /* Does the requested edge already exist? */
293 if (! bitmap_bit_p (edge_cache, dst->index))
294 {
295 /* The edge does not exist. Create one and update the
296 cache. */
297 bitmap_set_bit (edge_cache, dst->index);
298 return unchecked_make_edge (src, dst, flags);
299 }
300
301 /* At this point, we know that the requested edge exists. Adjust
302 flags if necessary. */
303 if (flags)
304 {
305 edge e = find_edge (src, dst);
306 e->flags |= flags;
307 }
308
309 return NULL;
310 }
311
312 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
313 created edge or NULL if already exist. */
314
315 edge
316 make_edge (basic_block src, basic_block dest, int flags)
317 {
318 edge e = find_edge (src, dest);
319
320 /* Make sure we don't add duplicate edges. */
321 if (e)
322 {
323 e->flags |= flags;
324 return NULL;
325 }
326
327 return unchecked_make_edge (src, dest, flags);
328 }
329
330 /* Create an edge connecting SRC to DEST and set probability by knowing
331 that it is the single edge leaving SRC. */
332
333 edge
334 make_single_succ_edge (basic_block src, basic_block dest, int flags)
335 {
336 edge e = make_edge (src, dest, flags);
337
338 e->probability = profile_probability::always ();
339 return e;
340 }
341
342 /* This function will remove an edge from the flow graph. */
343
344 void
345 remove_edge_raw (edge e)
346 {
347 remove_predictions_associated_with_edge (e);
348 execute_on_shrinking_pred (e);
349
350 disconnect_src (e);
351 disconnect_dest (e);
352
353 free_edge (cfun, e);
354 }
355
356 /* Redirect an edge's successor from one block to another. */
357
358 void
359 redirect_edge_succ (edge e, basic_block new_succ)
360 {
361 execute_on_shrinking_pred (e);
362
363 disconnect_dest (e);
364
365 e->dest = new_succ;
366
367 /* Reconnect the edge to the new successor block. */
368 connect_dest (e);
369
370 execute_on_growing_pred (e);
371 }
372
373 /* Redirect an edge's predecessor from one block to another. */
374
375 void
376 redirect_edge_pred (edge e, basic_block new_pred)
377 {
378 disconnect_src (e);
379
380 e->src = new_pred;
381
382 /* Reconnect the edge to the new predecessor block. */
383 connect_src (e);
384 }
385
386 /* Clear all basic block flags that do not have to be preserved. */
387 void
388 clear_bb_flags (void)
389 {
390 basic_block bb;
391 int flags_to_preserve = BB_FLAGS_TO_PRESERVE;
392 if (current_loops
393 && loops_state_satisfies_p (cfun, LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
394 flags_to_preserve |= BB_IRREDUCIBLE_LOOP;
395
396 FOR_ALL_BB_FN (bb, cfun)
397 bb->flags &= flags_to_preserve;
398 }
399 \f
400 /* Check the consistency of profile information. We can't do that
401 in verify_flow_info, as the counts may get invalid for incompletely
402 solved graphs, later eliminating of conditionals or roundoff errors.
403 It is still practical to have them reported for debugging of simple
404 testcases. */
405 static void
406 check_bb_profile (basic_block bb, FILE * file, int indent)
407 {
408 edge e;
409 edge_iterator ei;
410 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
411 char *s_indent = (char *) alloca ((size_t) indent + 1);
412 memset ((void *) s_indent, ' ', (size_t) indent);
413 s_indent[indent] = '\0';
414
415 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
416 return;
417
418 if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
419 {
420 bool found = false;
421 profile_probability sum = profile_probability::never ();
422 int isum = 0;
423
424 FOR_EACH_EDGE (e, ei, bb->succs)
425 {
426 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
427 found = true;
428 sum += e->probability;
429 if (e->probability.initialized_p ())
430 isum += e->probability.to_reg_br_prob_base ();
431 }
432 /* Only report mismatches for non-EH control flow. If there are only EH
433 edges it means that the BB ends by noreturn call. Here the control
434 flow may just terminate. */
435 if (found)
436 {
437 if (sum.differs_from_p (profile_probability::always ()))
438 {
439 fprintf (file,
440 ";; %sInvalid sum of outgoing probabilities ",
441 s_indent);
442 sum.dump (file);
443 fprintf (file, "\n");
444 }
445 /* Probabilities caps to 100% and thus the previous test will never
446 fire if the sum of probabilities is too large. */
447 else if (isum > REG_BR_PROB_BASE + 100)
448 {
449 fprintf (file,
450 ";; %sInvalid sum of outgoing probabilities %.1f%%\n",
451 s_indent, isum * 100.0 / REG_BR_PROB_BASE);
452 }
453 }
454 }
455 if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
456 {
457 profile_count sum = profile_count::zero ();
458 FOR_EACH_EDGE (e, ei, bb->preds)
459 sum += e->count ();
460 if (sum.differs_from_p (bb->count))
461 {
462 fprintf (file, ";; %sInvalid sum of incoming counts ",
463 s_indent);
464 sum.dump (file);
465 fprintf (file, ", should be ");
466 bb->count.dump (file);
467 fprintf (file, "\n");
468 }
469 }
470 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
471 {
472 /* Warn about inconsistencies in the partitioning that are
473 currently caused by profile insanities created via optimization. */
474 if (!probably_never_executed_bb_p (fun, bb))
475 fprintf (file, ";; %sBlock in cold partition with hot count\n",
476 s_indent);
477 FOR_EACH_EDGE (e, ei, bb->preds)
478 {
479 if (!probably_never_executed_edge_p (fun, e))
480 fprintf (file,
481 ";; %sBlock in cold partition with incoming hot edge\n",
482 s_indent);
483 }
484 }
485 }
486 \f
487 void
488 dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
489 {
490 basic_block side = (do_succ ? e->dest : e->src);
491 bool do_details = false;
492
493 if ((flags & TDF_DETAILS) != 0
494 && (flags & TDF_SLIM) == 0)
495 do_details = true;
496
497 if (side->index == ENTRY_BLOCK)
498 fputs (" ENTRY", file);
499 else if (side->index == EXIT_BLOCK)
500 fputs (" EXIT", file);
501 else
502 fprintf (file, " %d", side->index);
503
504 if (e->probability.initialized_p () && do_details)
505 {
506 fprintf (file, " [");
507 e->probability.dump (file);
508 fprintf (file, "] ");
509 }
510
511 if (e->count ().initialized_p () && do_details)
512 {
513 fputs (" count:", file);
514 e->count ().dump (file);
515 }
516
517 if (e->flags && do_details)
518 {
519 static const char * const bitnames[] =
520 {
521 #define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
522 #include "cfg-flags.def"
523 NULL
524 #undef DEF_EDGE_FLAG
525 };
526 bool comma = false;
527 int i, flags = e->flags;
528
529 gcc_assert (e->flags <= EDGE_ALL_FLAGS);
530 fputs (" (", file);
531 for (i = 0; flags; i++)
532 if (flags & (1 << i))
533 {
534 flags &= ~(1 << i);
535
536 if (comma)
537 fputc (',', file);
538 fputs (bitnames[i], file);
539 comma = true;
540 }
541
542 fputc (')', file);
543 }
544 }
545
546 DEBUG_FUNCTION void
547 debug (edge_def &ref)
548 {
549 fprintf (stderr, "<edge (%d -> %d)>\n",
550 ref.src->index, ref.dest->index);
551 dump_edge_info (stderr, &ref, TDF_DETAILS, false);
552 fprintf (stderr, "\n");
553 }
554
555 DEBUG_FUNCTION void
556 debug (edge_def *ptr)
557 {
558 if (ptr)
559 debug (*ptr);
560 else
561 fprintf (stderr, "<nil>\n");
562 }
563
564 static void
565 debug_slim (edge e)
566 {
567 fprintf (stderr, "<edge 0x%p (%d -> %d)>", (void *) e,
568 e->src->index, e->dest->index);
569 }
570
571 DEFINE_DEBUG_VEC (edge)
572 DEFINE_DEBUG_HASH_SET (edge)
573 \f
574 /* Simple routines to easily allocate AUX fields of basic blocks. */
575
576 static struct obstack block_aux_obstack;
577 static void *first_block_aux_obj = 0;
578 static struct obstack edge_aux_obstack;
579 static void *first_edge_aux_obj = 0;
580
581 /* Allocate a memory block of SIZE as BB->aux. The obstack must
582 be first initialized by alloc_aux_for_blocks. */
583
584 static void
585 alloc_aux_for_block (basic_block bb, int size)
586 {
587 /* Verify that aux field is clear. */
588 gcc_assert (!bb->aux && first_block_aux_obj);
589 bb->aux = obstack_alloc (&block_aux_obstack, size);
590 memset (bb->aux, 0, size);
591 }
592
593 /* Initialize the block_aux_obstack and if SIZE is nonzero, call
594 alloc_aux_for_block for each basic block. */
595
596 void
597 alloc_aux_for_blocks (int size)
598 {
599 static int initialized;
600
601 if (!initialized)
602 {
603 gcc_obstack_init (&block_aux_obstack);
604 initialized = 1;
605 }
606 else
607 /* Check whether AUX data are still allocated. */
608 gcc_assert (!first_block_aux_obj);
609
610 first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
611 if (size)
612 {
613 basic_block bb;
614
615 FOR_ALL_BB_FN (bb, cfun)
616 alloc_aux_for_block (bb, size);
617 }
618 }
619
620 /* Clear AUX pointers of all blocks. */
621
622 void
623 clear_aux_for_blocks (void)
624 {
625 basic_block bb;
626
627 FOR_ALL_BB_FN (bb, cfun)
628 bb->aux = NULL;
629 }
630
631 /* Free data allocated in block_aux_obstack and clear AUX pointers
632 of all blocks. */
633
634 void
635 free_aux_for_blocks (void)
636 {
637 gcc_assert (first_block_aux_obj);
638 obstack_free (&block_aux_obstack, first_block_aux_obj);
639 first_block_aux_obj = NULL;
640
641 clear_aux_for_blocks ();
642 }
643
644 /* Allocate a memory edge of SIZE as E->aux. The obstack must
645 be first initialized by alloc_aux_for_edges. */
646
647 void
648 alloc_aux_for_edge (edge e, int size)
649 {
650 /* Verify that aux field is clear. */
651 gcc_assert (!e->aux && first_edge_aux_obj);
652 e->aux = obstack_alloc (&edge_aux_obstack, size);
653 memset (e->aux, 0, size);
654 }
655
656 /* Initialize the edge_aux_obstack and if SIZE is nonzero, call
657 alloc_aux_for_edge for each basic edge. */
658
659 void
660 alloc_aux_for_edges (int size)
661 {
662 static int initialized;
663
664 if (!initialized)
665 {
666 gcc_obstack_init (&edge_aux_obstack);
667 initialized = 1;
668 }
669 else
670 /* Check whether AUX data are still allocated. */
671 gcc_assert (!first_edge_aux_obj);
672
673 first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
674 if (size)
675 {
676 basic_block bb;
677
678 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
679 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
680 {
681 edge e;
682 edge_iterator ei;
683
684 FOR_EACH_EDGE (e, ei, bb->succs)
685 alloc_aux_for_edge (e, size);
686 }
687 }
688 }
689
690 /* Clear AUX pointers of all edges. */
691
692 void
693 clear_aux_for_edges (void)
694 {
695 basic_block bb;
696 edge e;
697
698 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
699 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
700 {
701 edge_iterator ei;
702 FOR_EACH_EDGE (e, ei, bb->succs)
703 e->aux = NULL;
704 }
705 }
706
707 /* Free data allocated in edge_aux_obstack and clear AUX pointers
708 of all edges. */
709
710 void
711 free_aux_for_edges (void)
712 {
713 gcc_assert (first_edge_aux_obj);
714 obstack_free (&edge_aux_obstack, first_edge_aux_obj);
715 first_edge_aux_obj = NULL;
716
717 clear_aux_for_edges ();
718 }
719
720 DEBUG_FUNCTION void
721 debug_bb (basic_block bb)
722 {
723 dump_bb (stderr, bb, 0, dump_flags);
724 }
725
726 DEBUG_FUNCTION basic_block
727 debug_bb_n (int n)
728 {
729 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
730 debug_bb (bb);
731 return bb;
732 }
733
734 /* Dumps cfg related information about basic block BB to OUTF.
735 If HEADER is true, dump things that appear before the instructions
736 contained in BB. If FOOTER is true, dump things that appear after.
737 Flags are the TDF_* masks as documented in dumpfile.h.
738 NB: With TDF_DETAILS, it is assumed that cfun is available, so
739 that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE. */
740
741 void
742 dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags,
743 bool do_header, bool do_footer)
744 {
745 edge_iterator ei;
746 edge e;
747 static const char * const bb_bitnames[] =
748 {
749 #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
750 #include "cfg-flags.def"
751 NULL
752 #undef DEF_BASIC_BLOCK_FLAG
753 };
754 const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
755 bool first;
756 char *s_indent = (char *) alloca ((size_t) indent + 1);
757 memset ((void *) s_indent, ' ', (size_t) indent);
758 s_indent[indent] = '\0';
759
760 gcc_assert (bb->flags <= BB_ALL_FLAGS);
761
762 if (do_header)
763 {
764 unsigned i;
765
766 fputs (";; ", outf);
767 fprintf (outf, "%sbasic block %d, loop depth %d",
768 s_indent, bb->index, bb_loop_depth (bb));
769 if (flags & TDF_DETAILS)
770 {
771 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
772 if (bb->count.initialized_p ())
773 {
774 fputs (", count ", outf);
775 bb->count.dump (outf);
776 }
777 if (maybe_hot_bb_p (fun, bb))
778 fputs (", maybe hot", outf);
779 if (probably_never_executed_bb_p (fun, bb))
780 fputs (", probably never executed", outf);
781 }
782 fputc ('\n', outf);
783
784 if (flags & TDF_DETAILS)
785 {
786 check_bb_profile (bb, outf, indent);
787 fputs (";; ", outf);
788 fprintf (outf, "%s prev block ", s_indent);
789 if (bb->prev_bb)
790 fprintf (outf, "%d", bb->prev_bb->index);
791 else
792 fprintf (outf, "(nil)");
793 fprintf (outf, ", next block ");
794 if (bb->next_bb)
795 fprintf (outf, "%d", bb->next_bb->index);
796 else
797 fprintf (outf, "(nil)");
798
799 fputs (", flags:", outf);
800 first = true;
801 for (i = 0; i < n_bitnames; i++)
802 if (bb->flags & (1 << i))
803 {
804 if (first)
805 fputs (" (", outf);
806 else
807 fputs (", ", outf);
808 first = false;
809 fputs (bb_bitnames[i], outf);
810 }
811 if (!first)
812 fputc (')', outf);
813 fputc ('\n', outf);
814 }
815
816 fputs (";; ", outf);
817 fprintf (outf, "%s pred: ", s_indent);
818 first = true;
819 FOR_EACH_EDGE (e, ei, bb->preds)
820 {
821 if (! first)
822 {
823 fputs (";; ", outf);
824 fprintf (outf, "%s ", s_indent);
825 }
826 first = false;
827 dump_edge_info (outf, e, flags, 0);
828 fputc ('\n', outf);
829 }
830 if (first)
831 fputc ('\n', outf);
832 }
833
834 if (do_footer)
835 {
836 fputs (";; ", outf);
837 fprintf (outf, "%s succ: ", s_indent);
838 first = true;
839 FOR_EACH_EDGE (e, ei, bb->succs)
840 {
841 if (! first)
842 {
843 fputs (";; ", outf);
844 fprintf (outf, "%s ", s_indent);
845 }
846 first = false;
847 dump_edge_info (outf, e, flags, 1);
848 fputc ('\n', outf);
849 }
850 if (first)
851 fputc ('\n', outf);
852 }
853 }
854
855 /* Dumps a brief description of cfg to FILE. */
856
857 void
858 brief_dump_cfg (FILE *file, dump_flags_t flags)
859 {
860 basic_block bb;
861
862 FOR_EACH_BB_FN (bb, cfun)
863 {
864 dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true);
865 }
866 }
867
868 /* An edge originally destinating BB of COUNT has been proved to
869 leave the block by TAKEN_EDGE. Update profile of BB such that edge E can be
870 redirected to destination of TAKEN_EDGE.
871
872 This function may leave the profile inconsistent in the case TAKEN_EDGE
873 frequency or count is believed to be lower than COUNT
874 respectively. */
875 void
876 update_bb_profile_for_threading (basic_block bb,
877 profile_count count, edge taken_edge)
878 {
879 edge c;
880 profile_probability prob;
881 edge_iterator ei;
882
883 if (bb->count < count)
884 {
885 if (dump_file)
886 fprintf (dump_file, "bb %i count became negative after threading",
887 bb->index);
888 }
889 bb->count -= count;
890
891 /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
892 Watch for overflows. */
893 if (bb->count.nonzero_p ())
894 prob = count.probability_in (bb->count);
895 else
896 prob = profile_probability::never ();
897 if (prob > taken_edge->probability)
898 {
899 if (dump_file)
900 {
901 fprintf (dump_file, "Jump threading proved probability of edge "
902 "%i->%i too small (it is ",
903 taken_edge->src->index, taken_edge->dest->index);
904 taken_edge->probability.dump (dump_file);
905 fprintf (dump_file, " should be ");
906 prob.dump (dump_file);
907 fprintf (dump_file, ")\n");
908 }
909 prob = taken_edge->probability.apply_scale (6, 8);
910 }
911
912 /* Now rescale the probabilities. */
913 taken_edge->probability -= prob;
914 prob = prob.invert ();
915 if (prob == profile_probability::never ())
916 {
917 if (dump_file)
918 fprintf (dump_file, "Edge probabilities of bb %i has been reset, "
919 "count of block should end up being 0, it is non-zero\n",
920 bb->index);
921 EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always ();
922 ei = ei_start (bb->succs);
923 ei_next (&ei);
924 for (; (c = ei_safe_edge (ei)); ei_next (&ei))
925 c->probability = profile_probability::guessed_never ();
926 }
927 else if (!(prob == profile_probability::always ()))
928 {
929 FOR_EACH_EDGE (c, ei, bb->succs)
930 c->probability /= prob;
931 }
932
933 gcc_assert (bb == taken_edge->src);
934 }
935
936 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
937 by NUM/DEN, in profile_count arithmetic. More accurate than previous
938 function but considerably slower. */
939 void
940 scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs,
941 profile_count num, profile_count den)
942 {
943 int i;
944 if (num == profile_count::zero () || den.nonzero_p ())
945 for (i = 0; i < nbbs; i++)
946 bbs[i]->count = bbs[i]->count.apply_scale (num, den);
947 }
948
949 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
950 by NUM/DEN, in profile_count arithmetic. More accurate than previous
951 function but considerably slower. */
952 void
953 scale_bbs_frequencies (basic_block *bbs, int nbbs,
954 profile_probability p)
955 {
956 int i;
957
958 for (i = 0; i < nbbs; i++)
959 bbs[i]->count = bbs[i]->count.apply_probability (p);
960 }
961
962 /* Helper types for hash tables. */
963
964 struct htab_bb_copy_original_entry
965 {
966 /* Block we are attaching info to. */
967 int index1;
968 /* Index of original or copy (depending on the hashtable) */
969 int index2;
970 };
971
972 struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
973 {
974 static inline hashval_t hash (const htab_bb_copy_original_entry *);
975 static inline bool equal (const htab_bb_copy_original_entry *existing,
976 const htab_bb_copy_original_entry * candidate);
977 };
978
979 inline hashval_t
980 bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
981 {
982 return data->index1;
983 }
984
985 inline bool
986 bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
987 const htab_bb_copy_original_entry *data2)
988 {
989 return data->index1 == data2->index1;
990 }
991
992 /* Data structures used to maintain mapping between basic blocks and
993 copies. */
994 static hash_table<bb_copy_hasher> *bb_original;
995 static hash_table<bb_copy_hasher> *bb_copy;
996
997 /* And between loops and copies. */
998 static hash_table<bb_copy_hasher> *loop_copy;
999 static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
1000
1001 /* Initialize the data structures to maintain mapping between blocks
1002 and its copies. */
1003 void
1004 initialize_original_copy_tables (void)
1005 {
1006 original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1007 ("original_copy");
1008 bb_original = new hash_table<bb_copy_hasher> (10);
1009 bb_copy = new hash_table<bb_copy_hasher> (10);
1010 loop_copy = new hash_table<bb_copy_hasher> (10);
1011 }
1012
1013 /* Reset the data structures to maintain mapping between blocks and
1014 its copies. */
1015
1016 void
1017 reset_original_copy_tables (void)
1018 {
1019 gcc_assert (original_copy_bb_pool);
1020 bb_original->empty ();
1021 bb_copy->empty ();
1022 loop_copy->empty ();
1023 }
1024
1025 /* Free the data structures to maintain mapping between blocks and
1026 its copies. */
1027 void
1028 free_original_copy_tables (void)
1029 {
1030 gcc_assert (original_copy_bb_pool);
1031 delete bb_copy;
1032 bb_copy = NULL;
1033 delete bb_original;
1034 bb_original = NULL;
1035 delete loop_copy;
1036 loop_copy = NULL;
1037 delete original_copy_bb_pool;
1038 original_copy_bb_pool = NULL;
1039 }
1040
1041 /* Return true iff we have had a call to initialize_original_copy_tables
1042 without a corresponding call to free_original_copy_tables. */
1043
1044 bool
1045 original_copy_tables_initialized_p (void)
1046 {
1047 return original_copy_bb_pool != NULL;
1048 }
1049
1050 /* Removes the value associated with OBJ from table TAB. */
1051
1052 static void
1053 copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1054 {
1055 htab_bb_copy_original_entry **slot;
1056 struct htab_bb_copy_original_entry key, *elt;
1057
1058 if (!original_copy_bb_pool)
1059 return;
1060
1061 key.index1 = obj;
1062 slot = tab->find_slot (&key, NO_INSERT);
1063 if (!slot)
1064 return;
1065
1066 elt = *slot;
1067 tab->clear_slot (slot);
1068 original_copy_bb_pool->remove (elt);
1069 }
1070
1071 /* Sets the value associated with OBJ in table TAB to VAL.
1072 Do nothing when data structures are not initialized. */
1073
1074 static void
1075 copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1076 unsigned obj, unsigned val)
1077 {
1078 struct htab_bb_copy_original_entry **slot;
1079 struct htab_bb_copy_original_entry key;
1080
1081 if (!original_copy_bb_pool)
1082 return;
1083
1084 key.index1 = obj;
1085 slot = tab->find_slot (&key, INSERT);
1086 if (!*slot)
1087 {
1088 *slot = original_copy_bb_pool->allocate ();
1089 (*slot)->index1 = obj;
1090 }
1091 (*slot)->index2 = val;
1092 }
1093
1094 /* Set original for basic block. Do nothing when data structures are not
1095 initialized so passes not needing this don't need to care. */
1096 void
1097 set_bb_original (basic_block bb, basic_block original)
1098 {
1099 copy_original_table_set (bb_original, bb->index, original->index);
1100 }
1101
1102 /* Get the original basic block. */
1103 basic_block
1104 get_bb_original (basic_block bb)
1105 {
1106 struct htab_bb_copy_original_entry *entry;
1107 struct htab_bb_copy_original_entry key;
1108
1109 gcc_assert (original_copy_bb_pool);
1110
1111 key.index1 = bb->index;
1112 entry = bb_original->find (&key);
1113 if (entry)
1114 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1115 else
1116 return NULL;
1117 }
1118
1119 /* Set copy for basic block. Do nothing when data structures are not
1120 initialized so passes not needing this don't need to care. */
1121 void
1122 set_bb_copy (basic_block bb, basic_block copy)
1123 {
1124 copy_original_table_set (bb_copy, bb->index, copy->index);
1125 }
1126
1127 /* Get the copy of basic block. */
1128 basic_block
1129 get_bb_copy (basic_block bb)
1130 {
1131 struct htab_bb_copy_original_entry *entry;
1132 struct htab_bb_copy_original_entry key;
1133
1134 gcc_assert (original_copy_bb_pool);
1135
1136 key.index1 = bb->index;
1137 entry = bb_copy->find (&key);
1138 if (entry)
1139 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1140 else
1141 return NULL;
1142 }
1143
1144 /* Set copy for LOOP to COPY. Do nothing when data structures are not
1145 initialized so passes not needing this don't need to care. */
1146
1147 void
1148 set_loop_copy (class loop *loop, class loop *copy)
1149 {
1150 if (!copy)
1151 copy_original_table_clear (loop_copy, loop->num);
1152 else
1153 copy_original_table_set (loop_copy, loop->num, copy->num);
1154 }
1155
1156 /* Get the copy of LOOP. */
1157
1158 class loop *
1159 get_loop_copy (class loop *loop)
1160 {
1161 struct htab_bb_copy_original_entry *entry;
1162 struct htab_bb_copy_original_entry key;
1163
1164 gcc_assert (original_copy_bb_pool);
1165
1166 key.index1 = loop->num;
1167 entry = loop_copy->find (&key);
1168 if (entry)
1169 return get_loop (cfun, entry->index2);
1170 else
1171 return NULL;
1172 }