]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/profile.c
gimple-walk.h: New File.
[thirdparty/gcc.git] / gcc / profile.c
1 /* Calculate branch probabilities, and basic block execution counts.
2 Copyright (C) 1990-2013 Free Software Foundation, Inc.
3 Contributed by James E. Wilson, UC Berkeley/Cygnus Support;
4 based on some ideas from Dain Samples of UC Berkeley.
5 Further mangling by Bob Manson, Cygnus Support.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* Generate basic block profile instrumentation and auxiliary files.
24 Profile generation is optimized, so that not all arcs in the basic
25 block graph need instrumenting. First, the BB graph is closed with
26 one entry (function start), and one exit (function exit). Any
27 ABNORMAL_EDGE cannot be instrumented (because there is no control
28 path to place the code). We close the graph by inserting fake
29 EDGE_FAKE edges to the EXIT_BLOCK, from the sources of abnormal
30 edges that do not go to the exit_block. We ignore such abnormal
31 edges. Naturally these fake edges are never directly traversed,
32 and so *cannot* be directly instrumented. Some other graph
33 massaging is done. To optimize the instrumentation we generate the
34 BB minimal span tree, only edges that are not on the span tree
35 (plus the entry point) need instrumenting. From that information
36 all other edge counts can be deduced. By construction all fake
37 edges must be on the spanning tree. We also attempt to place
38 EDGE_CRITICAL edges on the spanning tree.
39
40 The auxiliary files generated are <dumpbase>.gcno (at compile time)
41 and <dumpbase>.gcda (at run time). The format is
42 described in full in gcov-io.h. */
43
44 /* ??? Register allocation should use basic block execution counts to
45 give preference to the most commonly executed blocks. */
46
47 /* ??? Should calculate branch probabilities before instrumenting code, since
48 then we can use arc counts to help decide which arcs to instrument. */
49
50 #include "config.h"
51 #include "system.h"
52 #include "coretypes.h"
53 #include "tm.h"
54 #include "rtl.h"
55 #include "flags.h"
56 #include "regs.h"
57 #include "expr.h"
58 #include "function.h"
59 #include "basic-block.h"
60 #include "diagnostic-core.h"
61 #include "coverage.h"
62 #include "value-prof.h"
63 #include "tree.h"
64 #include "gimple.h"
65 #include "gimple-iterator.h"
66 #include "tree-cfg.h"
67 #include "cfgloop.h"
68 #include "dumpfile.h"
69 #include "cgraph.h"
70
71 #include "profile.h"
72
73 struct bb_info {
74 unsigned int count_valid : 1;
75
76 /* Number of successor and predecessor edges. */
77 gcov_type succ_count;
78 gcov_type pred_count;
79 };
80
81 #define BB_INFO(b) ((struct bb_info *) (b)->aux)
82
83
84 /* Counter summary from the last set of coverage counts read. */
85
86 const struct gcov_ctr_summary *profile_info;
87
88 /* Counter working set information computed from the current counter
89 summary. Not initialized unless profile_info summary is non-NULL. */
90 static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
91
92 /* Collect statistics on the performance of this pass for the entire source
93 file. */
94
95 static int total_num_blocks;
96 static int total_num_edges;
97 static int total_num_edges_ignored;
98 static int total_num_edges_instrumented;
99 static int total_num_blocks_created;
100 static int total_num_passes;
101 static int total_num_times_called;
102 static int total_hist_br_prob[20];
103 static int total_num_branches;
104
105 /* Forward declarations. */
106 static void find_spanning_tree (struct edge_list *);
107
108 /* Add edge instrumentation code to the entire insn chain.
109
110 F is the first insn of the chain.
111 NUM_BLOCKS is the number of basic blocks found in F. */
112
113 static unsigned
114 instrument_edges (struct edge_list *el)
115 {
116 unsigned num_instr_edges = 0;
117 int num_edges = NUM_EDGES (el);
118 basic_block bb;
119
120 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
121 {
122 edge e;
123 edge_iterator ei;
124
125 FOR_EACH_EDGE (e, ei, bb->succs)
126 {
127 struct edge_info *inf = EDGE_INFO (e);
128
129 if (!inf->ignore && !inf->on_tree)
130 {
131 gcc_assert (!(e->flags & EDGE_ABNORMAL));
132 if (dump_file)
133 fprintf (dump_file, "Edge %d to %d instrumented%s\n",
134 e->src->index, e->dest->index,
135 EDGE_CRITICAL_P (e) ? " (and split)" : "");
136 gimple_gen_edge_profiler (num_instr_edges++, e);
137 }
138 }
139 }
140
141 total_num_blocks_created += num_edges;
142 if (dump_file)
143 fprintf (dump_file, "%d edges instrumented\n", num_instr_edges);
144 return num_instr_edges;
145 }
146
147 /* Add code to measure histograms for values in list VALUES. */
148 static void
149 instrument_values (histogram_values values)
150 {
151 unsigned i;
152
153 /* Emit code to generate the histograms before the insns. */
154
155 for (i = 0; i < values.length (); i++)
156 {
157 histogram_value hist = values[i];
158 unsigned t = COUNTER_FOR_HIST_TYPE (hist->type);
159
160 if (!coverage_counter_alloc (t, hist->n_counters))
161 continue;
162
163 switch (hist->type)
164 {
165 case HIST_TYPE_INTERVAL:
166 gimple_gen_interval_profiler (hist, t, 0);
167 break;
168
169 case HIST_TYPE_POW2:
170 gimple_gen_pow2_profiler (hist, t, 0);
171 break;
172
173 case HIST_TYPE_SINGLE_VALUE:
174 gimple_gen_one_value_profiler (hist, t, 0);
175 break;
176
177 case HIST_TYPE_CONST_DELTA:
178 gimple_gen_const_delta_profiler (hist, t, 0);
179 break;
180
181 case HIST_TYPE_INDIR_CALL:
182 gimple_gen_ic_profiler (hist, t, 0);
183 break;
184
185 case HIST_TYPE_AVERAGE:
186 gimple_gen_average_profiler (hist, t, 0);
187 break;
188
189 case HIST_TYPE_IOR:
190 gimple_gen_ior_profiler (hist, t, 0);
191 break;
192
193 case HIST_TYPE_TIME_PROFILE:
194 {
195 basic_block bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
196 gimple_stmt_iterator gsi = gsi_start_bb (bb);
197
198 gimple_gen_time_profiler (t, 0, gsi);
199 break;
200 }
201
202 default:
203 gcc_unreachable ();
204 }
205 }
206 }
207 \f
208
209 /* Fill the working set information into the profile_info structure. */
210
211 void
212 get_working_sets (void)
213 {
214 unsigned ws_ix, pctinc, pct;
215 gcov_working_set_t *ws_info;
216
217 if (!profile_info)
218 return;
219
220 compute_working_sets (profile_info, gcov_working_sets);
221
222 if (dump_file)
223 {
224 fprintf (dump_file, "Counter working sets:\n");
225 /* Multiply the percentage by 100 to avoid float. */
226 pctinc = 100 * 100 / NUM_GCOV_WORKING_SETS;
227 for (ws_ix = 0, pct = pctinc; ws_ix < NUM_GCOV_WORKING_SETS;
228 ws_ix++, pct += pctinc)
229 {
230 if (ws_ix == NUM_GCOV_WORKING_SETS - 1)
231 pct = 9990;
232 ws_info = &gcov_working_sets[ws_ix];
233 /* Print out the percentage using int arithmatic to avoid float. */
234 fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
235 HOST_WIDEST_INT_PRINT_DEC "\n",
236 pct / 100, pct - (pct / 100 * 100),
237 ws_info->num_counters,
238 (HOST_WIDEST_INT)ws_info->min_counter);
239 }
240 }
241 }
242
243 /* Given a the desired percentage of the full profile (sum_all from the
244 summary), multiplied by 10 to avoid float in PCT_TIMES_10, returns
245 the corresponding working set information. If an exact match for
246 the percentage isn't found, the closest value is used. */
247
248 gcov_working_set_t *
249 find_working_set (unsigned pct_times_10)
250 {
251 unsigned i;
252 if (!profile_info)
253 return NULL;
254 gcc_assert (pct_times_10 <= 1000);
255 if (pct_times_10 >= 999)
256 return &gcov_working_sets[NUM_GCOV_WORKING_SETS - 1];
257 i = pct_times_10 * NUM_GCOV_WORKING_SETS / 1000;
258 if (!i)
259 return &gcov_working_sets[0];
260 return &gcov_working_sets[i - 1];
261 }
262
263 /* Computes hybrid profile for all matching entries in da_file.
264
265 CFG_CHECKSUM is the precomputed checksum for the CFG. */
266
267 static gcov_type *
268 get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
269 {
270 unsigned num_edges = 0;
271 basic_block bb;
272 gcov_type *counts;
273
274 /* Count the edges to be (possibly) instrumented. */
275 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
276 {
277 edge e;
278 edge_iterator ei;
279
280 FOR_EACH_EDGE (e, ei, bb->succs)
281 if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
282 num_edges++;
283 }
284
285 counts = get_coverage_counts (GCOV_COUNTER_ARCS, num_edges, cfg_checksum,
286 lineno_checksum, &profile_info);
287 if (!counts)
288 return NULL;
289
290 get_working_sets ();
291
292 if (dump_file && profile_info)
293 fprintf (dump_file, "Merged %u profiles with maximal count %u.\n",
294 profile_info->runs, (unsigned) profile_info->sum_max);
295
296 return counts;
297 }
298
299
300 static bool
301 is_edge_inconsistent (vec<edge, va_gc> *edges)
302 {
303 edge e;
304 edge_iterator ei;
305 FOR_EACH_EDGE (e, ei, edges)
306 {
307 if (!EDGE_INFO (e)->ignore)
308 {
309 if (e->count < 0
310 && (!(e->flags & EDGE_FAKE)
311 || !block_ends_with_call_p (e->src)))
312 {
313 if (dump_file)
314 {
315 fprintf (dump_file,
316 "Edge %i->%i is inconsistent, count"HOST_WIDEST_INT_PRINT_DEC,
317 e->src->index, e->dest->index, e->count);
318 dump_bb (dump_file, e->src, 0, TDF_DETAILS);
319 dump_bb (dump_file, e->dest, 0, TDF_DETAILS);
320 }
321 return true;
322 }
323 }
324 }
325 return false;
326 }
327
328 static void
329 correct_negative_edge_counts (void)
330 {
331 basic_block bb;
332 edge e;
333 edge_iterator ei;
334
335 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
336 {
337 FOR_EACH_EDGE (e, ei, bb->succs)
338 {
339 if (e->count < 0)
340 e->count = 0;
341 }
342 }
343 }
344
345 /* Check consistency.
346 Return true if inconsistency is found. */
347 static bool
348 is_inconsistent (void)
349 {
350 basic_block bb;
351 bool inconsistent = false;
352 FOR_EACH_BB (bb)
353 {
354 inconsistent |= is_edge_inconsistent (bb->preds);
355 if (!dump_file && inconsistent)
356 return true;
357 inconsistent |= is_edge_inconsistent (bb->succs);
358 if (!dump_file && inconsistent)
359 return true;
360 if (bb->count < 0)
361 {
362 if (dump_file)
363 {
364 fprintf (dump_file, "BB %i count is negative "
365 HOST_WIDEST_INT_PRINT_DEC,
366 bb->index,
367 bb->count);
368 dump_bb (dump_file, bb, 0, TDF_DETAILS);
369 }
370 inconsistent = true;
371 }
372 if (bb->count != sum_edge_counts (bb->preds))
373 {
374 if (dump_file)
375 {
376 fprintf (dump_file, "BB %i count does not match sum of incoming edges "
377 HOST_WIDEST_INT_PRINT_DEC" should be " HOST_WIDEST_INT_PRINT_DEC,
378 bb->index,
379 bb->count,
380 sum_edge_counts (bb->preds));
381 dump_bb (dump_file, bb, 0, TDF_DETAILS);
382 }
383 inconsistent = true;
384 }
385 if (bb->count != sum_edge_counts (bb->succs) &&
386 ! (find_edge (bb, EXIT_BLOCK_PTR) != NULL && block_ends_with_call_p (bb)))
387 {
388 if (dump_file)
389 {
390 fprintf (dump_file, "BB %i count does not match sum of outgoing edges "
391 HOST_WIDEST_INT_PRINT_DEC" should be " HOST_WIDEST_INT_PRINT_DEC,
392 bb->index,
393 bb->count,
394 sum_edge_counts (bb->succs));
395 dump_bb (dump_file, bb, 0, TDF_DETAILS);
396 }
397 inconsistent = true;
398 }
399 if (!dump_file && inconsistent)
400 return true;
401 }
402
403 return inconsistent;
404 }
405
406 /* Set each basic block count to the sum of its outgoing edge counts */
407 static void
408 set_bb_counts (void)
409 {
410 basic_block bb;
411 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
412 {
413 bb->count = sum_edge_counts (bb->succs);
414 gcc_assert (bb->count >= 0);
415 }
416 }
417
418 /* Reads profile data and returns total number of edge counts read */
419 static int
420 read_profile_edge_counts (gcov_type *exec_counts)
421 {
422 basic_block bb;
423 int num_edges = 0;
424 int exec_counts_pos = 0;
425 /* For each edge not on the spanning tree, set its execution count from
426 the .da file. */
427 /* The first count in the .da file is the number of times that the function
428 was entered. This is the exec_count for block zero. */
429
430 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
431 {
432 edge e;
433 edge_iterator ei;
434
435 FOR_EACH_EDGE (e, ei, bb->succs)
436 if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
437 {
438 num_edges++;
439 if (exec_counts)
440 {
441 e->count = exec_counts[exec_counts_pos++];
442 if (e->count > profile_info->sum_max)
443 {
444 if (flag_profile_correction)
445 {
446 static bool informed = 0;
447 if (dump_enabled_p () && !informed)
448 dump_printf_loc (MSG_NOTE, input_location,
449 "corrupted profile info: edge count"
450 " exceeds maximal count\n");
451 informed = 1;
452 }
453 else
454 error ("corrupted profile info: edge from %i to %i exceeds maximal count",
455 bb->index, e->dest->index);
456 }
457 }
458 else
459 e->count = 0;
460
461 EDGE_INFO (e)->count_valid = 1;
462 BB_INFO (bb)->succ_count--;
463 BB_INFO (e->dest)->pred_count--;
464 if (dump_file)
465 {
466 fprintf (dump_file, "\nRead edge from %i to %i, count:",
467 bb->index, e->dest->index);
468 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
469 (HOST_WIDEST_INT) e->count);
470 }
471 }
472 }
473
474 return num_edges;
475 }
476
477 #define OVERLAP_BASE 10000
478
479 /* Compare the static estimated profile to the actual profile, and
480 return the "degree of overlap" measure between them.
481
482 Degree of overlap is a number between 0 and OVERLAP_BASE. It is
483 the sum of each basic block's minimum relative weights between
484 two profiles. And overlap of OVERLAP_BASE means two profiles are
485 identical. */
486
487 static int
488 compute_frequency_overlap (void)
489 {
490 gcov_type count_total = 0, freq_total = 0;
491 int overlap = 0;
492 basic_block bb;
493
494 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
495 {
496 count_total += bb->count;
497 freq_total += bb->frequency;
498 }
499
500 if (count_total == 0 || freq_total == 0)
501 return 0;
502
503 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
504 overlap += MIN (bb->count * OVERLAP_BASE / count_total,
505 bb->frequency * OVERLAP_BASE / freq_total);
506
507 return overlap;
508 }
509
510 /* Compute the branch probabilities for the various branches.
511 Annotate them accordingly.
512
513 CFG_CHECKSUM is the precomputed checksum for the CFG. */
514
515 static void
516 compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
517 {
518 basic_block bb;
519 int i;
520 int num_edges = 0;
521 int changes;
522 int passes;
523 int hist_br_prob[20];
524 int num_branches;
525 gcov_type *exec_counts = get_exec_counts (cfg_checksum, lineno_checksum);
526 int inconsistent = 0;
527
528 /* Very simple sanity checks so we catch bugs in our profiling code. */
529 if (!profile_info)
530 return;
531 if (profile_info->run_max * profile_info->runs < profile_info->sum_max)
532 {
533 error ("corrupted profile info: run_max * runs < sum_max");
534 exec_counts = NULL;
535 }
536
537 if (profile_info->sum_all < profile_info->sum_max)
538 {
539 error ("corrupted profile info: sum_all is smaller than sum_max");
540 exec_counts = NULL;
541 }
542
543 /* Attach extra info block to each bb. */
544 alloc_aux_for_blocks (sizeof (struct bb_info));
545 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
546 {
547 edge e;
548 edge_iterator ei;
549
550 FOR_EACH_EDGE (e, ei, bb->succs)
551 if (!EDGE_INFO (e)->ignore)
552 BB_INFO (bb)->succ_count++;
553 FOR_EACH_EDGE (e, ei, bb->preds)
554 if (!EDGE_INFO (e)->ignore)
555 BB_INFO (bb)->pred_count++;
556 }
557
558 /* Avoid predicting entry on exit nodes. */
559 BB_INFO (EXIT_BLOCK_PTR)->succ_count = 2;
560 BB_INFO (ENTRY_BLOCK_PTR)->pred_count = 2;
561
562 num_edges = read_profile_edge_counts (exec_counts);
563
564 if (dump_file)
565 fprintf (dump_file, "\n%d edge counts read\n", num_edges);
566
567 /* For every block in the file,
568 - if every exit/entrance edge has a known count, then set the block count
569 - if the block count is known, and every exit/entrance edge but one has
570 a known execution count, then set the count of the remaining edge
571
572 As edge counts are set, decrement the succ/pred count, but don't delete
573 the edge, that way we can easily tell when all edges are known, or only
574 one edge is unknown. */
575
576 /* The order that the basic blocks are iterated through is important.
577 Since the code that finds spanning trees starts with block 0, low numbered
578 edges are put on the spanning tree in preference to high numbered edges.
579 Hence, most instrumented edges are at the end. Graph solving works much
580 faster if we propagate numbers from the end to the start.
581
582 This takes an average of slightly more than 3 passes. */
583
584 changes = 1;
585 passes = 0;
586 while (changes)
587 {
588 passes++;
589 changes = 0;
590 FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb)
591 {
592 struct bb_info *bi = BB_INFO (bb);
593 if (! bi->count_valid)
594 {
595 if (bi->succ_count == 0)
596 {
597 edge e;
598 edge_iterator ei;
599 gcov_type total = 0;
600
601 FOR_EACH_EDGE (e, ei, bb->succs)
602 total += e->count;
603 bb->count = total;
604 bi->count_valid = 1;
605 changes = 1;
606 }
607 else if (bi->pred_count == 0)
608 {
609 edge e;
610 edge_iterator ei;
611 gcov_type total = 0;
612
613 FOR_EACH_EDGE (e, ei, bb->preds)
614 total += e->count;
615 bb->count = total;
616 bi->count_valid = 1;
617 changes = 1;
618 }
619 }
620 if (bi->count_valid)
621 {
622 if (bi->succ_count == 1)
623 {
624 edge e;
625 edge_iterator ei;
626 gcov_type total = 0;
627
628 /* One of the counts will be invalid, but it is zero,
629 so adding it in also doesn't hurt. */
630 FOR_EACH_EDGE (e, ei, bb->succs)
631 total += e->count;
632
633 /* Search for the invalid edge, and set its count. */
634 FOR_EACH_EDGE (e, ei, bb->succs)
635 if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore)
636 break;
637
638 /* Calculate count for remaining edge by conservation. */
639 total = bb->count - total;
640
641 gcc_assert (e);
642 EDGE_INFO (e)->count_valid = 1;
643 e->count = total;
644 bi->succ_count--;
645
646 BB_INFO (e->dest)->pred_count--;
647 changes = 1;
648 }
649 if (bi->pred_count == 1)
650 {
651 edge e;
652 edge_iterator ei;
653 gcov_type total = 0;
654
655 /* One of the counts will be invalid, but it is zero,
656 so adding it in also doesn't hurt. */
657 FOR_EACH_EDGE (e, ei, bb->preds)
658 total += e->count;
659
660 /* Search for the invalid edge, and set its count. */
661 FOR_EACH_EDGE (e, ei, bb->preds)
662 if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore)
663 break;
664
665 /* Calculate count for remaining edge by conservation. */
666 total = bb->count - total + e->count;
667
668 gcc_assert (e);
669 EDGE_INFO (e)->count_valid = 1;
670 e->count = total;
671 bi->pred_count--;
672
673 BB_INFO (e->src)->succ_count--;
674 changes = 1;
675 }
676 }
677 }
678 }
679 if (dump_file)
680 {
681 int overlap = compute_frequency_overlap ();
682 gimple_dump_cfg (dump_file, dump_flags);
683 fprintf (dump_file, "Static profile overlap: %d.%d%%\n",
684 overlap / (OVERLAP_BASE / 100),
685 overlap % (OVERLAP_BASE / 100));
686 }
687
688 total_num_passes += passes;
689 if (dump_file)
690 fprintf (dump_file, "Graph solving took %d passes.\n\n", passes);
691
692 /* If the graph has been correctly solved, every block will have a
693 succ and pred count of zero. */
694 FOR_EACH_BB (bb)
695 {
696 gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
697 }
698
699 /* Check for inconsistent basic block counts */
700 inconsistent = is_inconsistent ();
701
702 if (inconsistent)
703 {
704 if (flag_profile_correction)
705 {
706 /* Inconsistency detected. Make it flow-consistent. */
707 static int informed = 0;
708 if (dump_enabled_p () && informed == 0)
709 {
710 informed = 1;
711 dump_printf_loc (MSG_NOTE, input_location,
712 "correcting inconsistent profile data\n");
713 }
714 correct_negative_edge_counts ();
715 /* Set bb counts to the sum of the outgoing edge counts */
716 set_bb_counts ();
717 if (dump_file)
718 fprintf (dump_file, "\nCalling mcf_smooth_cfg\n");
719 mcf_smooth_cfg ();
720 }
721 else
722 error ("corrupted profile info: profile data is not flow-consistent");
723 }
724
725 /* For every edge, calculate its branch probability and add a reg_note
726 to the branch insn to indicate this. */
727
728 for (i = 0; i < 20; i++)
729 hist_br_prob[i] = 0;
730 num_branches = 0;
731
732 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
733 {
734 edge e;
735 edge_iterator ei;
736
737 if (bb->count < 0)
738 {
739 error ("corrupted profile info: number of iterations for basic block %d thought to be %i",
740 bb->index, (int)bb->count);
741 bb->count = 0;
742 }
743 FOR_EACH_EDGE (e, ei, bb->succs)
744 {
745 /* Function may return twice in the cased the called function is
746 setjmp or calls fork, but we can't represent this by extra
747 edge from the entry, since extra edge from the exit is
748 already present. We get negative frequency from the entry
749 point. */
750 if ((e->count < 0
751 && e->dest == EXIT_BLOCK_PTR)
752 || (e->count > bb->count
753 && e->dest != EXIT_BLOCK_PTR))
754 {
755 if (block_ends_with_call_p (bb))
756 e->count = e->count < 0 ? 0 : bb->count;
757 }
758 if (e->count < 0 || e->count > bb->count)
759 {
760 error ("corrupted profile info: number of executions for edge %d-%d thought to be %i",
761 e->src->index, e->dest->index,
762 (int)e->count);
763 e->count = bb->count / 2;
764 }
765 }
766 if (bb->count)
767 {
768 FOR_EACH_EDGE (e, ei, bb->succs)
769 e->probability = GCOV_COMPUTE_SCALE (e->count, bb->count);
770 if (bb->index >= NUM_FIXED_BLOCKS
771 && block_ends_with_condjump_p (bb)
772 && EDGE_COUNT (bb->succs) >= 2)
773 {
774 int prob;
775 edge e;
776 int index;
777
778 /* Find the branch edge. It is possible that we do have fake
779 edges here. */
780 FOR_EACH_EDGE (e, ei, bb->succs)
781 if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU)))
782 break;
783
784 prob = e->probability;
785 index = prob * 20 / REG_BR_PROB_BASE;
786
787 if (index == 20)
788 index = 19;
789 hist_br_prob[index]++;
790
791 num_branches++;
792 }
793 }
794 /* As a last resort, distribute the probabilities evenly.
795 Use simple heuristics that if there are normal edges,
796 give all abnormals frequency of 0, otherwise distribute the
797 frequency over abnormals (this is the case of noreturn
798 calls). */
799 else if (profile_status == PROFILE_ABSENT)
800 {
801 int total = 0;
802
803 FOR_EACH_EDGE (e, ei, bb->succs)
804 if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE)))
805 total ++;
806 if (total)
807 {
808 FOR_EACH_EDGE (e, ei, bb->succs)
809 if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE)))
810 e->probability = REG_BR_PROB_BASE / total;
811 else
812 e->probability = 0;
813 }
814 else
815 {
816 total += EDGE_COUNT (bb->succs);
817 FOR_EACH_EDGE (e, ei, bb->succs)
818 e->probability = REG_BR_PROB_BASE / total;
819 }
820 if (bb->index >= NUM_FIXED_BLOCKS
821 && block_ends_with_condjump_p (bb)
822 && EDGE_COUNT (bb->succs) >= 2)
823 num_branches++;
824 }
825 }
826 counts_to_freqs ();
827 profile_status = PROFILE_READ;
828 compute_function_frequency ();
829
830 if (dump_file)
831 {
832 fprintf (dump_file, "%d branches\n", num_branches);
833 if (num_branches)
834 for (i = 0; i < 10; i++)
835 fprintf (dump_file, "%d%% branches in range %d-%d%%\n",
836 (hist_br_prob[i] + hist_br_prob[19-i]) * 100 / num_branches,
837 5 * i, 5 * i + 5);
838
839 total_num_branches += num_branches;
840 for (i = 0; i < 20; i++)
841 total_hist_br_prob[i] += hist_br_prob[i];
842
843 fputc ('\n', dump_file);
844 fputc ('\n', dump_file);
845 }
846
847 free_aux_for_blocks ();
848 }
849
850 /* Load value histograms values whose description is stored in VALUES array
851 from .gcda file.
852
853 CFG_CHECKSUM is the precomputed checksum for the CFG. */
854
855 static void
856 compute_value_histograms (histogram_values values, unsigned cfg_checksum,
857 unsigned lineno_checksum)
858 {
859 unsigned i, j, t, any;
860 unsigned n_histogram_counters[GCOV_N_VALUE_COUNTERS];
861 gcov_type *histogram_counts[GCOV_N_VALUE_COUNTERS];
862 gcov_type *act_count[GCOV_N_VALUE_COUNTERS];
863 gcov_type *aact_count;
864 struct cgraph_node *node;
865
866 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
867 n_histogram_counters[t] = 0;
868
869 for (i = 0; i < values.length (); i++)
870 {
871 histogram_value hist = values[i];
872 n_histogram_counters[(int) hist->type] += hist->n_counters;
873 }
874
875 any = 0;
876 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
877 {
878 if (!n_histogram_counters[t])
879 {
880 histogram_counts[t] = NULL;
881 continue;
882 }
883
884 histogram_counts[t] =
885 get_coverage_counts (COUNTER_FOR_HIST_TYPE (t),
886 n_histogram_counters[t], cfg_checksum,
887 lineno_checksum, NULL);
888 if (histogram_counts[t])
889 any = 1;
890 act_count[t] = histogram_counts[t];
891 }
892 if (!any)
893 return;
894
895 for (i = 0; i < values.length (); i++)
896 {
897 histogram_value hist = values[i];
898 gimple stmt = hist->hvalue.stmt;
899
900 t = (int) hist->type;
901
902 aact_count = act_count[t];
903
904 if (act_count[t])
905 act_count[t] += hist->n_counters;
906
907 gimple_add_histogram_value (cfun, stmt, hist);
908 hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
909 for (j = 0; j < hist->n_counters; j++)
910 if (aact_count)
911 hist->hvalue.counters[j] = aact_count[j];
912 else
913 hist->hvalue.counters[j] = 0;
914
915 /* Time profiler counter is not related to any statement,
916 so that we have to read the counter and set the value to
917 the corresponding call graph node. */
918 if (hist->type == HIST_TYPE_TIME_PROFILE)
919 {
920 node = cgraph_get_node (hist->fun->decl);
921
922 node->tp_first_run = hist->hvalue.counters[0];
923
924 if (dump_file)
925 fprintf (dump_file, "Read tp_first_run: %d\n", node->tp_first_run);
926 }
927 }
928
929 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
930 free (histogram_counts[t]);
931 }
932
933 /* When passed NULL as file_name, initialize.
934 When passed something else, output the necessary commands to change
935 line to LINE and offset to FILE_NAME. */
936 static void
937 output_location (char const *file_name, int line,
938 gcov_position_t *offset, basic_block bb)
939 {
940 static char const *prev_file_name;
941 static int prev_line;
942 bool name_differs, line_differs;
943
944 if (!file_name)
945 {
946 prev_file_name = NULL;
947 prev_line = -1;
948 return;
949 }
950
951 name_differs = !prev_file_name || filename_cmp (file_name, prev_file_name);
952 line_differs = prev_line != line;
953
954 if (name_differs || line_differs)
955 {
956 if (!*offset)
957 {
958 *offset = gcov_write_tag (GCOV_TAG_LINES);
959 gcov_write_unsigned (bb->index);
960 name_differs = line_differs=true;
961 }
962
963 /* If this is a new source file, then output the
964 file's name to the .bb file. */
965 if (name_differs)
966 {
967 prev_file_name = file_name;
968 gcov_write_unsigned (0);
969 gcov_write_string (prev_file_name);
970 }
971 if (line_differs)
972 {
973 gcov_write_unsigned (line);
974 prev_line = line;
975 }
976 }
977 }
978
979 /* Instrument and/or analyze program behavior based on program the CFG.
980
981 This function creates a representation of the control flow graph (of
982 the function being compiled) that is suitable for the instrumentation
983 of edges and/or converting measured edge counts to counts on the
984 complete CFG.
985
986 When FLAG_PROFILE_ARCS is nonzero, this function instruments the edges in
987 the flow graph that are needed to reconstruct the dynamic behavior of the
988 flow graph. This data is written to the gcno file for gcov.
989
990 When FLAG_BRANCH_PROBABILITIES is nonzero, this function reads auxiliary
991 information from the gcda file containing edge count information from
992 previous executions of the function being compiled. In this case, the
993 control flow graph is annotated with actual execution counts by
994 compute_branch_probabilities().
995
996 Main entry point of this file. */
997
998 void
999 branch_prob (void)
1000 {
1001 basic_block bb;
1002 unsigned i;
1003 unsigned num_edges, ignored_edges;
1004 unsigned num_instrumented;
1005 struct edge_list *el;
1006 histogram_values values = histogram_values ();
1007 unsigned cfg_checksum, lineno_checksum;
1008
1009 total_num_times_called++;
1010
1011 flow_call_edges_add (NULL);
1012 add_noreturn_fake_exit_edges ();
1013
1014 /* We can't handle cyclic regions constructed using abnormal edges.
1015 To avoid these we replace every source of abnormal edge by a fake
1016 edge from entry node and every destination by fake edge to exit.
1017 This keeps graph acyclic and our calculation exact for all normal
1018 edges except for exit and entrance ones.
1019
1020 We also add fake exit edges for each call and asm statement in the
1021 basic, since it may not return. */
1022
1023 FOR_EACH_BB (bb)
1024 {
1025 int need_exit_edge = 0, need_entry_edge = 0;
1026 int have_exit_edge = 0, have_entry_edge = 0;
1027 edge e;
1028 edge_iterator ei;
1029
1030 /* Functions returning multiple times are not handled by extra edges.
1031 Instead we simply allow negative counts on edges from exit to the
1032 block past call and corresponding probabilities. We can't go
1033 with the extra edges because that would result in flowgraph that
1034 needs to have fake edges outside the spanning tree. */
1035
1036 FOR_EACH_EDGE (e, ei, bb->succs)
1037 {
1038 gimple_stmt_iterator gsi;
1039 gimple last = NULL;
1040
1041 /* It may happen that there are compiler generated statements
1042 without a locus at all. Go through the basic block from the
1043 last to the first statement looking for a locus. */
1044 for (gsi = gsi_last_nondebug_bb (bb);
1045 !gsi_end_p (gsi);
1046 gsi_prev_nondebug (&gsi))
1047 {
1048 last = gsi_stmt (gsi);
1049 if (gimple_has_location (last))
1050 break;
1051 }
1052
1053 /* Edge with goto locus might get wrong coverage info unless
1054 it is the only edge out of BB.
1055 Don't do that when the locuses match, so
1056 if (blah) goto something;
1057 is not computed twice. */
1058 if (last
1059 && gimple_has_location (last)
1060 && LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
1061 && !single_succ_p (bb)
1062 && (LOCATION_FILE (e->goto_locus)
1063 != LOCATION_FILE (gimple_location (last))
1064 || (LOCATION_LINE (e->goto_locus)
1065 != LOCATION_LINE (gimple_location (last)))))
1066 {
1067 basic_block new_bb = split_edge (e);
1068 edge ne = single_succ_edge (new_bb);
1069 ne->goto_locus = e->goto_locus;
1070 }
1071 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1072 && e->dest != EXIT_BLOCK_PTR)
1073 need_exit_edge = 1;
1074 if (e->dest == EXIT_BLOCK_PTR)
1075 have_exit_edge = 1;
1076 }
1077 FOR_EACH_EDGE (e, ei, bb->preds)
1078 {
1079 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1080 && e->src != ENTRY_BLOCK_PTR)
1081 need_entry_edge = 1;
1082 if (e->src == ENTRY_BLOCK_PTR)
1083 have_entry_edge = 1;
1084 }
1085
1086 if (need_exit_edge && !have_exit_edge)
1087 {
1088 if (dump_file)
1089 fprintf (dump_file, "Adding fake exit edge to bb %i\n",
1090 bb->index);
1091 make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
1092 }
1093 if (need_entry_edge && !have_entry_edge)
1094 {
1095 if (dump_file)
1096 fprintf (dump_file, "Adding fake entry edge to bb %i\n",
1097 bb->index);
1098 make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
1099 /* Avoid bbs that have both fake entry edge and also some
1100 exit edge. One of those edges wouldn't be added to the
1101 spanning tree, but we can't instrument any of them. */
1102 if (have_exit_edge || need_exit_edge)
1103 {
1104 gimple_stmt_iterator gsi;
1105 gimple first;
1106 tree fndecl;
1107
1108 gsi = gsi_after_labels (bb);
1109 gcc_checking_assert (!gsi_end_p (gsi));
1110 first = gsi_stmt (gsi);
1111 if (is_gimple_debug (first))
1112 {
1113 gsi_next_nondebug (&gsi);
1114 gcc_checking_assert (!gsi_end_p (gsi));
1115 first = gsi_stmt (gsi);
1116 }
1117 /* Don't split the bbs containing __builtin_setjmp_receiver
1118 or __builtin_setjmp_dispatcher calls. These are very
1119 special and don't expect anything to be inserted before
1120 them. */
1121 if (is_gimple_call (first)
1122 && (((fndecl = gimple_call_fndecl (first)) != NULL
1123 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1124 && (DECL_FUNCTION_CODE (fndecl)
1125 == BUILT_IN_SETJMP_RECEIVER
1126 || (DECL_FUNCTION_CODE (fndecl)
1127 == BUILT_IN_SETJMP_DISPATCHER)))
1128 || gimple_call_flags (first) & ECF_RETURNS_TWICE))
1129 continue;
1130
1131 if (dump_file)
1132 fprintf (dump_file, "Splitting bb %i after labels\n",
1133 bb->index);
1134 split_block_after_labels (bb);
1135 }
1136 }
1137 }
1138
1139 el = create_edge_list ();
1140 num_edges = NUM_EDGES (el);
1141 alloc_aux_for_edges (sizeof (struct edge_info));
1142
1143 /* The basic blocks are expected to be numbered sequentially. */
1144 compact_blocks ();
1145
1146 ignored_edges = 0;
1147 for (i = 0 ; i < num_edges ; i++)
1148 {
1149 edge e = INDEX_EDGE (el, i);
1150 e->count = 0;
1151
1152 /* Mark edges we've replaced by fake edges above as ignored. */
1153 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1154 && e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR)
1155 {
1156 EDGE_INFO (e)->ignore = 1;
1157 ignored_edges++;
1158 }
1159 }
1160
1161 /* Create spanning tree from basic block graph, mark each edge that is
1162 on the spanning tree. We insert as many abnormal and critical edges
1163 as possible to minimize number of edge splits necessary. */
1164
1165 find_spanning_tree (el);
1166
1167 /* Fake edges that are not on the tree will not be instrumented, so
1168 mark them ignored. */
1169 for (num_instrumented = i = 0; i < num_edges; i++)
1170 {
1171 edge e = INDEX_EDGE (el, i);
1172 struct edge_info *inf = EDGE_INFO (e);
1173
1174 if (inf->ignore || inf->on_tree)
1175 /*NOP*/;
1176 else if (e->flags & EDGE_FAKE)
1177 {
1178 inf->ignore = 1;
1179 ignored_edges++;
1180 }
1181 else
1182 num_instrumented++;
1183 }
1184
1185 total_num_blocks += n_basic_blocks;
1186 if (dump_file)
1187 fprintf (dump_file, "%d basic blocks\n", n_basic_blocks);
1188
1189 total_num_edges += num_edges;
1190 if (dump_file)
1191 fprintf (dump_file, "%d edges\n", num_edges);
1192
1193 total_num_edges_ignored += ignored_edges;
1194 if (dump_file)
1195 fprintf (dump_file, "%d ignored edges\n", ignored_edges);
1196
1197 total_num_edges_instrumented += num_instrumented;
1198 if (dump_file)
1199 fprintf (dump_file, "%d instrumentation edges\n", num_instrumented);
1200
1201 /* Compute two different checksums. Note that we want to compute
1202 the checksum in only once place, since it depends on the shape
1203 of the control flow which can change during
1204 various transformations. */
1205 cfg_checksum = coverage_compute_cfg_checksum ();
1206 lineno_checksum = coverage_compute_lineno_checksum ();
1207
1208 /* Write the data from which gcov can reconstruct the basic block
1209 graph and function line numbers (the gcno file). */
1210 if (coverage_begin_function (lineno_checksum, cfg_checksum))
1211 {
1212 gcov_position_t offset;
1213
1214 /* Basic block flags */
1215 offset = gcov_write_tag (GCOV_TAG_BLOCKS);
1216 for (i = 0; i != (unsigned) (n_basic_blocks); i++)
1217 gcov_write_unsigned (0);
1218 gcov_write_length (offset);
1219
1220 /* Arcs */
1221 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
1222 {
1223 edge e;
1224 edge_iterator ei;
1225
1226 offset = gcov_write_tag (GCOV_TAG_ARCS);
1227 gcov_write_unsigned (bb->index);
1228
1229 FOR_EACH_EDGE (e, ei, bb->succs)
1230 {
1231 struct edge_info *i = EDGE_INFO (e);
1232 if (!i->ignore)
1233 {
1234 unsigned flag_bits = 0;
1235
1236 if (i->on_tree)
1237 flag_bits |= GCOV_ARC_ON_TREE;
1238 if (e->flags & EDGE_FAKE)
1239 flag_bits |= GCOV_ARC_FAKE;
1240 if (e->flags & EDGE_FALLTHRU)
1241 flag_bits |= GCOV_ARC_FALLTHROUGH;
1242 /* On trees we don't have fallthru flags, but we can
1243 recompute them from CFG shape. */
1244 if (e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)
1245 && e->src->next_bb == e->dest)
1246 flag_bits |= GCOV_ARC_FALLTHROUGH;
1247
1248 gcov_write_unsigned (e->dest->index);
1249 gcov_write_unsigned (flag_bits);
1250 }
1251 }
1252
1253 gcov_write_length (offset);
1254 }
1255
1256 /* Line numbers. */
1257 /* Initialize the output. */
1258 output_location (NULL, 0, NULL, NULL);
1259
1260 FOR_EACH_BB (bb)
1261 {
1262 gimple_stmt_iterator gsi;
1263 gcov_position_t offset = 0;
1264
1265 if (bb == ENTRY_BLOCK_PTR->next_bb)
1266 {
1267 expanded_location curr_location =
1268 expand_location (DECL_SOURCE_LOCATION (current_function_decl));
1269 output_location (curr_location.file, curr_location.line,
1270 &offset, bb);
1271 }
1272
1273 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1274 {
1275 gimple stmt = gsi_stmt (gsi);
1276 if (gimple_has_location (stmt))
1277 output_location (gimple_filename (stmt), gimple_lineno (stmt),
1278 &offset, bb);
1279 }
1280
1281 /* Notice GOTO expressions eliminated while constructing the CFG. */
1282 if (single_succ_p (bb)
1283 && LOCATION_LOCUS (single_succ_edge (bb)->goto_locus)
1284 != UNKNOWN_LOCATION)
1285 {
1286 expanded_location curr_location
1287 = expand_location (single_succ_edge (bb)->goto_locus);
1288 output_location (curr_location.file, curr_location.line,
1289 &offset, bb);
1290 }
1291
1292 if (offset)
1293 {
1294 /* A file of NULL indicates the end of run. */
1295 gcov_write_unsigned (0);
1296 gcov_write_string (NULL);
1297 gcov_write_length (offset);
1298 }
1299 }
1300 }
1301
1302 if (flag_profile_values)
1303 gimple_find_values_to_profile (&values);
1304
1305 if (flag_branch_probabilities)
1306 {
1307 compute_branch_probabilities (cfg_checksum, lineno_checksum);
1308 if (flag_profile_values)
1309 compute_value_histograms (values, cfg_checksum, lineno_checksum);
1310 }
1311
1312 remove_fake_edges ();
1313
1314 /* For each edge not on the spanning tree, add counting code. */
1315 if (profile_arc_flag
1316 && coverage_counter_alloc (GCOV_COUNTER_ARCS, num_instrumented))
1317 {
1318 unsigned n_instrumented;
1319
1320 gimple_init_edge_profiler ();
1321
1322 n_instrumented = instrument_edges (el);
1323
1324 gcc_assert (n_instrumented == num_instrumented);
1325
1326 if (flag_profile_values)
1327 instrument_values (values);
1328
1329 /* Commit changes done by instrumentation. */
1330 gsi_commit_edge_inserts ();
1331 }
1332
1333 free_aux_for_edges ();
1334
1335 values.release ();
1336 free_edge_list (el);
1337 coverage_end_function (lineno_checksum, cfg_checksum);
1338 }
1339 \f
1340 /* Union find algorithm implementation for the basic blocks using
1341 aux fields. */
1342
1343 static basic_block
1344 find_group (basic_block bb)
1345 {
1346 basic_block group = bb, bb1;
1347
1348 while ((basic_block) group->aux != group)
1349 group = (basic_block) group->aux;
1350
1351 /* Compress path. */
1352 while ((basic_block) bb->aux != group)
1353 {
1354 bb1 = (basic_block) bb->aux;
1355 bb->aux = (void *) group;
1356 bb = bb1;
1357 }
1358 return group;
1359 }
1360
1361 static void
1362 union_groups (basic_block bb1, basic_block bb2)
1363 {
1364 basic_block bb1g = find_group (bb1);
1365 basic_block bb2g = find_group (bb2);
1366
1367 /* ??? I don't have a place for the rank field. OK. Lets go w/o it,
1368 this code is unlikely going to be performance problem anyway. */
1369 gcc_assert (bb1g != bb2g);
1370
1371 bb1g->aux = bb2g;
1372 }
1373 \f
1374 /* This function searches all of the edges in the program flow graph, and puts
1375 as many bad edges as possible onto the spanning tree. Bad edges include
1376 abnormals edges, which can't be instrumented at the moment. Since it is
1377 possible for fake edges to form a cycle, we will have to develop some
1378 better way in the future. Also put critical edges to the tree, since they
1379 are more expensive to instrument. */
1380
1381 static void
1382 find_spanning_tree (struct edge_list *el)
1383 {
1384 int i;
1385 int num_edges = NUM_EDGES (el);
1386 basic_block bb;
1387
1388 /* We use aux field for standard union-find algorithm. */
1389 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
1390 bb->aux = bb;
1391
1392 /* Add fake edge exit to entry we can't instrument. */
1393 union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR);
1394
1395 /* First add all abnormal edges to the tree unless they form a cycle. Also
1396 add all edges to EXIT_BLOCK_PTR to avoid inserting profiling code behind
1397 setting return value from function. */
1398 for (i = 0; i < num_edges; i++)
1399 {
1400 edge e = INDEX_EDGE (el, i);
1401 if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE))
1402 || e->dest == EXIT_BLOCK_PTR)
1403 && !EDGE_INFO (e)->ignore
1404 && (find_group (e->src) != find_group (e->dest)))
1405 {
1406 if (dump_file)
1407 fprintf (dump_file, "Abnormal edge %d to %d put to tree\n",
1408 e->src->index, e->dest->index);
1409 EDGE_INFO (e)->on_tree = 1;
1410 union_groups (e->src, e->dest);
1411 }
1412 }
1413
1414 /* Now insert all critical edges to the tree unless they form a cycle. */
1415 for (i = 0; i < num_edges; i++)
1416 {
1417 edge e = INDEX_EDGE (el, i);
1418 if (EDGE_CRITICAL_P (e) && !EDGE_INFO (e)->ignore
1419 && find_group (e->src) != find_group (e->dest))
1420 {
1421 if (dump_file)
1422 fprintf (dump_file, "Critical edge %d to %d put to tree\n",
1423 e->src->index, e->dest->index);
1424 EDGE_INFO (e)->on_tree = 1;
1425 union_groups (e->src, e->dest);
1426 }
1427 }
1428
1429 /* And now the rest. */
1430 for (i = 0; i < num_edges; i++)
1431 {
1432 edge e = INDEX_EDGE (el, i);
1433 if (!EDGE_INFO (e)->ignore
1434 && find_group (e->src) != find_group (e->dest))
1435 {
1436 if (dump_file)
1437 fprintf (dump_file, "Normal edge %d to %d put to tree\n",
1438 e->src->index, e->dest->index);
1439 EDGE_INFO (e)->on_tree = 1;
1440 union_groups (e->src, e->dest);
1441 }
1442 }
1443
1444 clear_aux_for_blocks ();
1445 }
1446 \f
1447 /* Perform file-level initialization for branch-prob processing. */
1448
1449 void
1450 init_branch_prob (void)
1451 {
1452 int i;
1453
1454 total_num_blocks = 0;
1455 total_num_edges = 0;
1456 total_num_edges_ignored = 0;
1457 total_num_edges_instrumented = 0;
1458 total_num_blocks_created = 0;
1459 total_num_passes = 0;
1460 total_num_times_called = 0;
1461 total_num_branches = 0;
1462 for (i = 0; i < 20; i++)
1463 total_hist_br_prob[i] = 0;
1464 }
1465
1466 /* Performs file-level cleanup after branch-prob processing
1467 is completed. */
1468
1469 void
1470 end_branch_prob (void)
1471 {
1472 if (dump_file)
1473 {
1474 fprintf (dump_file, "\n");
1475 fprintf (dump_file, "Total number of blocks: %d\n",
1476 total_num_blocks);
1477 fprintf (dump_file, "Total number of edges: %d\n", total_num_edges);
1478 fprintf (dump_file, "Total number of ignored edges: %d\n",
1479 total_num_edges_ignored);
1480 fprintf (dump_file, "Total number of instrumented edges: %d\n",
1481 total_num_edges_instrumented);
1482 fprintf (dump_file, "Total number of blocks created: %d\n",
1483 total_num_blocks_created);
1484 fprintf (dump_file, "Total number of graph solution passes: %d\n",
1485 total_num_passes);
1486 if (total_num_times_called != 0)
1487 fprintf (dump_file, "Average number of graph solution passes: %d\n",
1488 (total_num_passes + (total_num_times_called >> 1))
1489 / total_num_times_called);
1490 fprintf (dump_file, "Total number of branches: %d\n",
1491 total_num_branches);
1492 if (total_num_branches)
1493 {
1494 int i;
1495
1496 for (i = 0; i < 10; i++)
1497 fprintf (dump_file, "%d%% branches in range %d-%d%%\n",
1498 (total_hist_br_prob[i] + total_hist_br_prob[19-i]) * 100
1499 / total_num_branches, 5*i, 5*i+5);
1500 }
1501 }
1502 }