]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/predict.c
* cgraph.h (cgraph_edge::maybe_hot_p): Tweak comment.
[thirdparty/gcc.git] / gcc / predict.c
1 /* Branch prediction routines for the GNU compiler.
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* References:
21
22 [1] "Branch Prediction for Free"
23 Ball and Larus; PLDI '93.
24 [2] "Static Branch Frequency and Program Profile Analysis"
25 Wu and Larus; MICRO-27.
26 [3] "Corpus-based Static Branch Prediction"
27 Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */
28
29
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "backend.h"
34 #include "rtl.h"
35 #include "tree.h"
36 #include "gimple.h"
37 #include "cfghooks.h"
38 #include "tree-pass.h"
39 #include "ssa.h"
40 #include "memmodel.h"
41 #include "emit-rtl.h"
42 #include "cgraph.h"
43 #include "coverage.h"
44 #include "diagnostic-core.h"
45 #include "gimple-predict.h"
46 #include "fold-const.h"
47 #include "calls.h"
48 #include "cfganal.h"
49 #include "profile.h"
50 #include "sreal.h"
51 #include "params.h"
52 #include "cfgloop.h"
53 #include "gimple-iterator.h"
54 #include "tree-cfg.h"
55 #include "tree-ssa-loop-niter.h"
56 #include "tree-ssa-loop.h"
57 #include "tree-scalar-evolution.h"
58 #include "ipa-utils.h"
59 #include "gimple-pretty-print.h"
60 #include "selftest.h"
61 #include "cfgrtl.h"
62 #include "stringpool.h"
63 #include "attribs.h"
64
65 /* Enum with reasons why a predictor is ignored. */
66
67 enum predictor_reason
68 {
69 REASON_NONE,
70 REASON_IGNORED,
71 REASON_SINGLE_EDGE_DUPLICATE,
72 REASON_EDGE_PAIR_DUPLICATE
73 };
74
75 /* String messages for the aforementioned enum. */
76
77 static const char *reason_messages[] = {"", " (ignored)",
78 " (single edge duplicate)", " (edge pair duplicate)"};
79
80 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
81 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */
82 static sreal real_almost_one, real_br_prob_base,
83 real_inv_br_prob_base, real_one_half, real_bb_freq_max;
84
85 static void combine_predictions_for_insn (rtx_insn *, basic_block);
86 static void dump_prediction (FILE *, enum br_predictor, int, basic_block,
87 enum predictor_reason, edge);
88 static void predict_paths_leading_to (basic_block, enum br_predictor,
89 enum prediction,
90 class loop *in_loop = NULL);
91 static void predict_paths_leading_to_edge (edge, enum br_predictor,
92 enum prediction,
93 class loop *in_loop = NULL);
94 static bool can_predict_insn_p (const rtx_insn *);
95 static HOST_WIDE_INT get_predictor_value (br_predictor, HOST_WIDE_INT);
96 static void determine_unlikely_bbs ();
97
98 /* Information we hold about each branch predictor.
99 Filled using information from predict.def. */
100
101 struct predictor_info
102 {
103 const char *const name; /* Name used in the debugging dumps. */
104 const int hitrate; /* Expected hitrate used by
105 predict_insn_def call. */
106 const int flags;
107 };
108
109 /* Use given predictor without Dempster-Shaffer theory if it matches
110 using first_match heuristics. */
111 #define PRED_FLAG_FIRST_MATCH 1
112
113 /* Recompute hitrate in percent to our representation. */
114
115 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
116
117 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
118 static const struct predictor_info predictor_info[]= {
119 #include "predict.def"
120
121 /* Upper bound on predictors. */
122 {NULL, 0, 0}
123 };
124 #undef DEF_PREDICTOR
125
126 static gcov_type min_count = -1;
127
128 /* Determine the threshold for hot BB counts. */
129
130 gcov_type
131 get_hot_bb_threshold ()
132 {
133 if (min_count == -1)
134 {
135 gcov_type t = profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION);
136 set_hot_bb_threshold (t);
137 if (dump_file)
138 fprintf (dump_file, "Setting hotness threshold to %" PRId64 ".\n",
139 min_count);
140 }
141 return min_count;
142 }
143
144 /* Set the threshold for hot BB counts. */
145
146 void
147 set_hot_bb_threshold (gcov_type min)
148 {
149 min_count = min;
150 }
151
152 /* Return TRUE if COUNT is considered to be hot in function FUN. */
153
154 bool
155 maybe_hot_count_p (struct function *fun, profile_count count)
156 {
157 if (!count.initialized_p ())
158 return true;
159 if (count.ipa () == profile_count::zero ())
160 return false;
161 if (!count.ipa_p ())
162 {
163 struct cgraph_node *node = cgraph_node::get (fun->decl);
164 if (!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
165 {
166 if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
167 return false;
168 if (node->frequency == NODE_FREQUENCY_HOT)
169 return true;
170 }
171 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
172 return true;
173 if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
174 && count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
175 return false;
176 if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
177 < ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
178 return false;
179 return true;
180 }
181 /* Code executed at most once is not hot. */
182 if (count <= MAX (profile_info ? profile_info->runs : 1, 1))
183 return false;
184 return (count.to_gcov_type () >= get_hot_bb_threshold ());
185 }
186
187 /* Return true if basic block BB of function FUN can be CPU intensive
188 and should thus be optimized for maximum performance. */
189
190 bool
191 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
192 {
193 gcc_checking_assert (fun);
194 return maybe_hot_count_p (fun, bb->count);
195 }
196
197 /* Return true if edge E can be CPU intensive and should thus be optimized
198 for maximum performance. */
199
200 bool
201 maybe_hot_edge_p (edge e)
202 {
203 return maybe_hot_count_p (cfun, e->count ());
204 }
205
206 /* Return true if COUNT is considered to be never executed in function FUN
207 or if function FUN is considered so in the static profile. */
208
209 static bool
210 probably_never_executed (struct function *fun, profile_count count)
211 {
212 gcc_checking_assert (fun);
213 if (count.ipa () == profile_count::zero ())
214 return true;
215 /* Do not trust adjusted counts. This will make us to drop int cold section
216 code with low execution count as a result of inlining. These low counts
217 are not safe even with read profile and may lead us to dropping
218 code which actually gets executed into cold section of binary that is not
219 desirable. */
220 if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
221 {
222 const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
223 if (count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
224 return false;
225 return true;
226 }
227 if ((!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
228 && (cgraph_node::get (fun->decl)->frequency
229 == NODE_FREQUENCY_UNLIKELY_EXECUTED))
230 return true;
231 return false;
232 }
233
234 /* Return true if basic block BB of function FUN is probably never executed. */
235
236 bool
237 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
238 {
239 return probably_never_executed (fun, bb->count);
240 }
241
242 /* Return true if edge E is unlikely executed for obvious reasons. */
243
244 static bool
245 unlikely_executed_edge_p (edge e)
246 {
247 return (e->count () == profile_count::zero ()
248 || e->probability == profile_probability::never ())
249 || (e->flags & (EDGE_EH | EDGE_FAKE));
250 }
251
252 /* Return true if edge E of function FUN is probably never executed. */
253
254 bool
255 probably_never_executed_edge_p (struct function *fun, edge e)
256 {
257 if (unlikely_executed_edge_p (e))
258 return true;
259 return probably_never_executed (fun, e->count ());
260 }
261
262 /* Return true if function FUN should always be optimized for size. */
263
264 bool
265 optimize_function_for_size_p (struct function *fun)
266 {
267 if (!fun || !fun->decl)
268 return optimize_size;
269 cgraph_node *n = cgraph_node::get (fun->decl);
270 return n && n->optimize_for_size_p ();
271 }
272
273 /* Return true if function FUN should always be optimized for speed. */
274
275 bool
276 optimize_function_for_speed_p (struct function *fun)
277 {
278 return !optimize_function_for_size_p (fun);
279 }
280
281 /* Return the optimization type that should be used for function FUN. */
282
283 optimization_type
284 function_optimization_type (struct function *fun)
285 {
286 return (optimize_function_for_speed_p (fun)
287 ? OPTIMIZE_FOR_SPEED
288 : OPTIMIZE_FOR_SIZE);
289 }
290
291 /* Return TRUE if basic block BB should be optimized for size. */
292
293 bool
294 optimize_bb_for_size_p (const_basic_block bb)
295 {
296 return (optimize_function_for_size_p (cfun)
297 || (bb && !maybe_hot_bb_p (cfun, bb)));
298 }
299
300 /* Return TRUE if basic block BB should be optimized for speed. */
301
302 bool
303 optimize_bb_for_speed_p (const_basic_block bb)
304 {
305 return !optimize_bb_for_size_p (bb);
306 }
307
308 /* Return the optimization type that should be used for basic block BB. */
309
310 optimization_type
311 bb_optimization_type (const_basic_block bb)
312 {
313 return (optimize_bb_for_speed_p (bb)
314 ? OPTIMIZE_FOR_SPEED
315 : OPTIMIZE_FOR_SIZE);
316 }
317
318 /* Return TRUE if edge E should be optimized for size. */
319
320 bool
321 optimize_edge_for_size_p (edge e)
322 {
323 return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
324 }
325
326 /* Return TRUE if edge E should be optimized for speed. */
327
328 bool
329 optimize_edge_for_speed_p (edge e)
330 {
331 return !optimize_edge_for_size_p (e);
332 }
333
334 /* Return TRUE if the current function is optimized for size. */
335
336 bool
337 optimize_insn_for_size_p (void)
338 {
339 return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
340 }
341
342 /* Return TRUE if the current function is optimized for speed. */
343
344 bool
345 optimize_insn_for_speed_p (void)
346 {
347 return !optimize_insn_for_size_p ();
348 }
349
350 /* Return TRUE if LOOP should be optimized for size. */
351
352 bool
353 optimize_loop_for_size_p (class loop *loop)
354 {
355 return optimize_bb_for_size_p (loop->header);
356 }
357
358 /* Return TRUE if LOOP should be optimized for speed. */
359
360 bool
361 optimize_loop_for_speed_p (class loop *loop)
362 {
363 return optimize_bb_for_speed_p (loop->header);
364 }
365
366 /* Return TRUE if nest rooted at LOOP should be optimized for speed. */
367
368 bool
369 optimize_loop_nest_for_speed_p (class loop *loop)
370 {
371 class loop *l = loop;
372 if (optimize_loop_for_speed_p (loop))
373 return true;
374 l = loop->inner;
375 while (l && l != loop)
376 {
377 if (optimize_loop_for_speed_p (l))
378 return true;
379 if (l->inner)
380 l = l->inner;
381 else if (l->next)
382 l = l->next;
383 else
384 {
385 while (l != loop && !l->next)
386 l = loop_outer (l);
387 if (l != loop)
388 l = l->next;
389 }
390 }
391 return false;
392 }
393
394 /* Return TRUE if nest rooted at LOOP should be optimized for size. */
395
396 bool
397 optimize_loop_nest_for_size_p (class loop *loop)
398 {
399 return !optimize_loop_nest_for_speed_p (loop);
400 }
401
402 /* Return true if edge E is likely to be well predictable by branch
403 predictor. */
404
405 bool
406 predictable_edge_p (edge e)
407 {
408 if (!e->probability.initialized_p ())
409 return false;
410 if ((e->probability.to_reg_br_prob_base ()
411 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
412 || (REG_BR_PROB_BASE - e->probability.to_reg_br_prob_base ()
413 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
414 return true;
415 return false;
416 }
417
418
419 /* Set RTL expansion for BB profile. */
420
421 void
422 rtl_profile_for_bb (basic_block bb)
423 {
424 crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
425 }
426
427 /* Set RTL expansion for edge profile. */
428
429 void
430 rtl_profile_for_edge (edge e)
431 {
432 crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
433 }
434
435 /* Set RTL expansion to default mode (i.e. when profile info is not known). */
436 void
437 default_rtl_profile (void)
438 {
439 crtl->maybe_hot_insn_p = true;
440 }
441
442 /* Return true if the one of outgoing edges is already predicted by
443 PREDICTOR. */
444
445 bool
446 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
447 {
448 rtx note;
449 if (!INSN_P (BB_END (bb)))
450 return false;
451 for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
452 if (REG_NOTE_KIND (note) == REG_BR_PRED
453 && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
454 return true;
455 return false;
456 }
457
458 /* Structure representing predictions in tree level. */
459
460 struct edge_prediction {
461 struct edge_prediction *ep_next;
462 edge ep_edge;
463 enum br_predictor ep_predictor;
464 int ep_probability;
465 };
466
467 /* This map contains for a basic block the list of predictions for the
468 outgoing edges. */
469
470 static hash_map<const_basic_block, edge_prediction *> *bb_predictions;
471
472 /* Return true if the one of outgoing edges is already predicted by
473 PREDICTOR. */
474
475 bool
476 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
477 {
478 struct edge_prediction *i;
479 edge_prediction **preds = bb_predictions->get (bb);
480
481 if (!preds)
482 return false;
483
484 for (i = *preds; i; i = i->ep_next)
485 if (i->ep_predictor == predictor)
486 return true;
487 return false;
488 }
489
490 /* Return true if the one of outgoing edges is already predicted by
491 PREDICTOR for edge E predicted as TAKEN. */
492
493 bool
494 edge_predicted_by_p (edge e, enum br_predictor predictor, bool taken)
495 {
496 struct edge_prediction *i;
497 basic_block bb = e->src;
498 edge_prediction **preds = bb_predictions->get (bb);
499 if (!preds)
500 return false;
501
502 int probability = predictor_info[(int) predictor].hitrate;
503
504 if (taken != TAKEN)
505 probability = REG_BR_PROB_BASE - probability;
506
507 for (i = *preds; i; i = i->ep_next)
508 if (i->ep_predictor == predictor
509 && i->ep_edge == e
510 && i->ep_probability == probability)
511 return true;
512 return false;
513 }
514
515 /* Same predicate as above, working on edges. */
516 bool
517 edge_probability_reliable_p (const_edge e)
518 {
519 return e->probability.probably_reliable_p ();
520 }
521
522 /* Same predicate as edge_probability_reliable_p, working on notes. */
523 bool
524 br_prob_note_reliable_p (const_rtx note)
525 {
526 gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
527 return profile_probability::from_reg_br_prob_note
528 (XINT (note, 0)).probably_reliable_p ();
529 }
530
531 static void
532 predict_insn (rtx_insn *insn, enum br_predictor predictor, int probability)
533 {
534 gcc_assert (any_condjump_p (insn));
535 if (!flag_guess_branch_prob)
536 return;
537
538 add_reg_note (insn, REG_BR_PRED,
539 gen_rtx_CONCAT (VOIDmode,
540 GEN_INT ((int) predictor),
541 GEN_INT ((int) probability)));
542 }
543
544 /* Predict insn by given predictor. */
545
546 void
547 predict_insn_def (rtx_insn *insn, enum br_predictor predictor,
548 enum prediction taken)
549 {
550 int probability = predictor_info[(int) predictor].hitrate;
551 gcc_assert (probability != PROB_UNINITIALIZED);
552
553 if (taken != TAKEN)
554 probability = REG_BR_PROB_BASE - probability;
555
556 predict_insn (insn, predictor, probability);
557 }
558
559 /* Predict edge E with given probability if possible. */
560
561 void
562 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
563 {
564 rtx_insn *last_insn;
565 last_insn = BB_END (e->src);
566
567 /* We can store the branch prediction information only about
568 conditional jumps. */
569 if (!any_condjump_p (last_insn))
570 return;
571
572 /* We always store probability of branching. */
573 if (e->flags & EDGE_FALLTHRU)
574 probability = REG_BR_PROB_BASE - probability;
575
576 predict_insn (last_insn, predictor, probability);
577 }
578
579 /* Predict edge E with the given PROBABILITY. */
580 void
581 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
582 {
583 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
584 && EDGE_COUNT (e->src->succs) > 1
585 && flag_guess_branch_prob
586 && optimize)
587 {
588 struct edge_prediction *i = XNEW (struct edge_prediction);
589 edge_prediction *&preds = bb_predictions->get_or_insert (e->src);
590
591 i->ep_next = preds;
592 preds = i;
593 i->ep_probability = probability;
594 i->ep_predictor = predictor;
595 i->ep_edge = e;
596 }
597 }
598
599 /* Filter edge predictions PREDS by a function FILTER. DATA are passed
600 to the filter function. */
601
602 void
603 filter_predictions (edge_prediction **preds,
604 bool (*filter) (edge_prediction *, void *), void *data)
605 {
606 if (!bb_predictions)
607 return;
608
609 if (preds)
610 {
611 struct edge_prediction **prediction = preds;
612 struct edge_prediction *next;
613
614 while (*prediction)
615 {
616 if ((*filter) (*prediction, data))
617 prediction = &((*prediction)->ep_next);
618 else
619 {
620 next = (*prediction)->ep_next;
621 free (*prediction);
622 *prediction = next;
623 }
624 }
625 }
626 }
627
628 /* Filter function predicate that returns true for a edge predicate P
629 if its edge is equal to DATA. */
630
631 bool
632 equal_edge_p (edge_prediction *p, void *data)
633 {
634 return p->ep_edge == (edge)data;
635 }
636
637 /* Remove all predictions on given basic block that are attached
638 to edge E. */
639 void
640 remove_predictions_associated_with_edge (edge e)
641 {
642 if (!bb_predictions)
643 return;
644
645 edge_prediction **preds = bb_predictions->get (e->src);
646 filter_predictions (preds, equal_edge_p, e);
647 }
648
649 /* Clears the list of predictions stored for BB. */
650
651 static void
652 clear_bb_predictions (basic_block bb)
653 {
654 edge_prediction **preds = bb_predictions->get (bb);
655 struct edge_prediction *pred, *next;
656
657 if (!preds)
658 return;
659
660 for (pred = *preds; pred; pred = next)
661 {
662 next = pred->ep_next;
663 free (pred);
664 }
665 *preds = NULL;
666 }
667
668 /* Return true when we can store prediction on insn INSN.
669 At the moment we represent predictions only on conditional
670 jumps, not at computed jump or other complicated cases. */
671 static bool
672 can_predict_insn_p (const rtx_insn *insn)
673 {
674 return (JUMP_P (insn)
675 && any_condjump_p (insn)
676 && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
677 }
678
679 /* Predict edge E by given predictor if possible. */
680
681 void
682 predict_edge_def (edge e, enum br_predictor predictor,
683 enum prediction taken)
684 {
685 int probability = predictor_info[(int) predictor].hitrate;
686
687 if (taken != TAKEN)
688 probability = REG_BR_PROB_BASE - probability;
689
690 predict_edge (e, predictor, probability);
691 }
692
693 /* Invert all branch predictions or probability notes in the INSN. This needs
694 to be done each time we invert the condition used by the jump. */
695
696 void
697 invert_br_probabilities (rtx insn)
698 {
699 rtx note;
700
701 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
702 if (REG_NOTE_KIND (note) == REG_BR_PROB)
703 XINT (note, 0) = profile_probability::from_reg_br_prob_note
704 (XINT (note, 0)).invert ().to_reg_br_prob_note ();
705 else if (REG_NOTE_KIND (note) == REG_BR_PRED)
706 XEXP (XEXP (note, 0), 1)
707 = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
708 }
709
710 /* Dump information about the branch prediction to the output file. */
711
712 static void
713 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
714 basic_block bb, enum predictor_reason reason = REASON_NONE,
715 edge ep_edge = NULL)
716 {
717 edge e = ep_edge;
718 edge_iterator ei;
719
720 if (!file)
721 return;
722
723 if (e == NULL)
724 FOR_EACH_EDGE (e, ei, bb->succs)
725 if (! (e->flags & EDGE_FALLTHRU))
726 break;
727
728 char edge_info_str[128];
729 if (ep_edge)
730 sprintf (edge_info_str, " of edge %d->%d", ep_edge->src->index,
731 ep_edge->dest->index);
732 else
733 edge_info_str[0] = '\0';
734
735 fprintf (file, " %s heuristics%s%s: %.2f%%",
736 predictor_info[predictor].name,
737 edge_info_str, reason_messages[reason],
738 probability * 100.0 / REG_BR_PROB_BASE);
739
740 if (bb->count.initialized_p ())
741 {
742 fprintf (file, " exec ");
743 bb->count.dump (file);
744 if (e)
745 {
746 fprintf (file, " hit ");
747 e->count ().dump (file);
748 fprintf (file, " (%.1f%%)", e->count ().to_gcov_type() * 100.0
749 / bb->count.to_gcov_type ());
750 }
751 }
752
753 fprintf (file, "\n");
754
755 /* Print output that be easily read by analyze_brprob.py script. We are
756 interested only in counts that are read from GCDA files. */
757 if (dump_file && (dump_flags & TDF_DETAILS)
758 && bb->count.precise_p ()
759 && reason == REASON_NONE)
760 {
761 gcc_assert (e->count ().precise_p ());
762 fprintf (file, ";;heuristics;%s;%" PRId64 ";%" PRId64 ";%.1f;\n",
763 predictor_info[predictor].name,
764 bb->count.to_gcov_type (), e->count ().to_gcov_type (),
765 probability * 100.0 / REG_BR_PROB_BASE);
766 }
767 }
768
769 /* Return true if STMT is known to be unlikely executed. */
770
771 static bool
772 unlikely_executed_stmt_p (gimple *stmt)
773 {
774 if (!is_gimple_call (stmt))
775 return false;
776 /* NORETURN attribute alone is not strong enough: exit() may be quite
777 likely executed once during program run. */
778 if (gimple_call_fntype (stmt)
779 && lookup_attribute ("cold",
780 TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))
781 && !lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl)))
782 return true;
783 tree decl = gimple_call_fndecl (stmt);
784 if (!decl)
785 return false;
786 if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl))
787 && !lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl)))
788 return true;
789
790 cgraph_node *n = cgraph_node::get (decl);
791 if (!n)
792 return false;
793
794 availability avail;
795 n = n->ultimate_alias_target (&avail);
796 if (avail < AVAIL_AVAILABLE)
797 return false;
798 if (!n->analyzed
799 || n->decl == current_function_decl)
800 return false;
801 return n->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED;
802 }
803
804 /* Return true if BB is unlikely executed. */
805
806 static bool
807 unlikely_executed_bb_p (basic_block bb)
808 {
809 if (bb->count == profile_count::zero ())
810 return true;
811 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
812 return false;
813 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
814 !gsi_end_p (gsi); gsi_next (&gsi))
815 {
816 if (unlikely_executed_stmt_p (gsi_stmt (gsi)))
817 return true;
818 if (stmt_can_terminate_bb_p (gsi_stmt (gsi)))
819 return false;
820 }
821 return false;
822 }
823
824 /* We cannot predict the probabilities of outgoing edges of bb. Set them
825 evenly and hope for the best. If UNLIKELY_EDGES is not null, distribute
826 even probability for all edges not mentioned in the set. These edges
827 are given PROB_VERY_UNLIKELY probability. Similarly for LIKELY_EDGES,
828 if we have exactly one likely edge, make the other edges predicted
829 as not probable. */
830
831 static void
832 set_even_probabilities (basic_block bb,
833 hash_set<edge> *unlikely_edges = NULL,
834 hash_set<edge_prediction *> *likely_edges = NULL)
835 {
836 unsigned nedges = 0, unlikely_count = 0;
837 edge e = NULL;
838 edge_iterator ei;
839 profile_probability all = profile_probability::always ();
840
841 FOR_EACH_EDGE (e, ei, bb->succs)
842 if (e->probability.initialized_p ())
843 all -= e->probability;
844 else if (!unlikely_executed_edge_p (e))
845 {
846 nedges++;
847 if (unlikely_edges != NULL && unlikely_edges->contains (e))
848 {
849 all -= profile_probability::very_unlikely ();
850 unlikely_count++;
851 }
852 }
853
854 /* Make the distribution even if all edges are unlikely. */
855 unsigned likely_count = likely_edges ? likely_edges->elements () : 0;
856 if (unlikely_count == nedges)
857 {
858 unlikely_edges = NULL;
859 unlikely_count = 0;
860 }
861
862 /* If we have one likely edge, then use its probability and distribute
863 remaining probabilities as even. */
864 if (likely_count == 1)
865 {
866 FOR_EACH_EDGE (e, ei, bb->succs)
867 if (e->probability.initialized_p ())
868 ;
869 else if (!unlikely_executed_edge_p (e))
870 {
871 edge_prediction *prediction = *likely_edges->begin ();
872 int p = prediction->ep_probability;
873 profile_probability prob
874 = profile_probability::from_reg_br_prob_base (p);
875
876 if (prediction->ep_edge == e)
877 e->probability = prob;
878 else if (unlikely_edges != NULL && unlikely_edges->contains (e))
879 e->probability = profile_probability::very_unlikely ();
880 else
881 {
882 profile_probability remainder = prob.invert ();
883 remainder -= profile_probability::very_unlikely ()
884 .apply_scale (unlikely_count, 1);
885 int count = nedges - unlikely_count - 1;
886 gcc_assert (count >= 0);
887
888 e->probability = remainder.apply_scale (1, count);
889 }
890 }
891 else
892 e->probability = profile_probability::never ();
893 }
894 else
895 {
896 /* Make all unlikely edges unlikely and the rest will have even
897 probability. */
898 unsigned scale = nedges - unlikely_count;
899 FOR_EACH_EDGE (e, ei, bb->succs)
900 if (e->probability.initialized_p ())
901 ;
902 else if (!unlikely_executed_edge_p (e))
903 {
904 if (unlikely_edges != NULL && unlikely_edges->contains (e))
905 e->probability = profile_probability::very_unlikely ();
906 else
907 e->probability = all.apply_scale (1, scale);
908 }
909 else
910 e->probability = profile_probability::never ();
911 }
912 }
913
914 /* Add REG_BR_PROB note to JUMP with PROB. */
915
916 void
917 add_reg_br_prob_note (rtx_insn *jump, profile_probability prob)
918 {
919 gcc_checking_assert (JUMP_P (jump) && !find_reg_note (jump, REG_BR_PROB, 0));
920 add_int_reg_note (jump, REG_BR_PROB, prob.to_reg_br_prob_note ());
921 }
922
923 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
924 note if not already present. Remove now useless REG_BR_PRED notes. */
925
926 static void
927 combine_predictions_for_insn (rtx_insn *insn, basic_block bb)
928 {
929 rtx prob_note;
930 rtx *pnote;
931 rtx note;
932 int best_probability = PROB_EVEN;
933 enum br_predictor best_predictor = END_PREDICTORS;
934 int combined_probability = REG_BR_PROB_BASE / 2;
935 int d;
936 bool first_match = false;
937 bool found = false;
938
939 if (!can_predict_insn_p (insn))
940 {
941 set_even_probabilities (bb);
942 return;
943 }
944
945 prob_note = find_reg_note (insn, REG_BR_PROB, 0);
946 pnote = &REG_NOTES (insn);
947 if (dump_file)
948 fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
949 bb->index);
950
951 /* We implement "first match" heuristics and use probability guessed
952 by predictor with smallest index. */
953 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
954 if (REG_NOTE_KIND (note) == REG_BR_PRED)
955 {
956 enum br_predictor predictor = ((enum br_predictor)
957 INTVAL (XEXP (XEXP (note, 0), 0)));
958 int probability = INTVAL (XEXP (XEXP (note, 0), 1));
959
960 found = true;
961 if (best_predictor > predictor
962 && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
963 best_probability = probability, best_predictor = predictor;
964
965 d = (combined_probability * probability
966 + (REG_BR_PROB_BASE - combined_probability)
967 * (REG_BR_PROB_BASE - probability));
968
969 /* Use FP math to avoid overflows of 32bit integers. */
970 if (d == 0)
971 /* If one probability is 0% and one 100%, avoid division by zero. */
972 combined_probability = REG_BR_PROB_BASE / 2;
973 else
974 combined_probability = (((double) combined_probability) * probability
975 * REG_BR_PROB_BASE / d + 0.5);
976 }
977
978 /* Decide which heuristic to use. In case we didn't match anything,
979 use no_prediction heuristic, in case we did match, use either
980 first match or Dempster-Shaffer theory depending on the flags. */
981
982 if (best_predictor != END_PREDICTORS)
983 first_match = true;
984
985 if (!found)
986 dump_prediction (dump_file, PRED_NO_PREDICTION,
987 combined_probability, bb);
988 else
989 {
990 if (!first_match)
991 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
992 bb, !first_match ? REASON_NONE : REASON_IGNORED);
993 else
994 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
995 bb, first_match ? REASON_NONE : REASON_IGNORED);
996 }
997
998 if (first_match)
999 combined_probability = best_probability;
1000 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
1001
1002 while (*pnote)
1003 {
1004 if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
1005 {
1006 enum br_predictor predictor = ((enum br_predictor)
1007 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
1008 int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
1009
1010 dump_prediction (dump_file, predictor, probability, bb,
1011 (!first_match || best_predictor == predictor)
1012 ? REASON_NONE : REASON_IGNORED);
1013 *pnote = XEXP (*pnote, 1);
1014 }
1015 else
1016 pnote = &XEXP (*pnote, 1);
1017 }
1018
1019 if (!prob_note)
1020 {
1021 profile_probability p
1022 = profile_probability::from_reg_br_prob_base (combined_probability);
1023 add_reg_br_prob_note (insn, p);
1024
1025 /* Save the prediction into CFG in case we are seeing non-degenerated
1026 conditional jump. */
1027 if (!single_succ_p (bb))
1028 {
1029 BRANCH_EDGE (bb)->probability = p;
1030 FALLTHRU_EDGE (bb)->probability
1031 = BRANCH_EDGE (bb)->probability.invert ();
1032 }
1033 }
1034 else if (!single_succ_p (bb))
1035 {
1036 profile_probability prob = profile_probability::from_reg_br_prob_note
1037 (XINT (prob_note, 0));
1038
1039 BRANCH_EDGE (bb)->probability = prob;
1040 FALLTHRU_EDGE (bb)->probability = prob.invert ();
1041 }
1042 else
1043 single_succ_edge (bb)->probability = profile_probability::always ();
1044 }
1045
1046 /* Edge prediction hash traits. */
1047
1048 struct predictor_hash: pointer_hash <edge_prediction>
1049 {
1050
1051 static inline hashval_t hash (const edge_prediction *);
1052 static inline bool equal (const edge_prediction *, const edge_prediction *);
1053 };
1054
1055 /* Calculate hash value of an edge prediction P based on predictor and
1056 normalized probability. */
1057
1058 inline hashval_t
1059 predictor_hash::hash (const edge_prediction *p)
1060 {
1061 inchash::hash hstate;
1062 hstate.add_int (p->ep_predictor);
1063
1064 int prob = p->ep_probability;
1065 if (prob > REG_BR_PROB_BASE / 2)
1066 prob = REG_BR_PROB_BASE - prob;
1067
1068 hstate.add_int (prob);
1069
1070 return hstate.end ();
1071 }
1072
1073 /* Return true whether edge predictions P1 and P2 use the same predictor and
1074 have equal (or opposed probability). */
1075
1076 inline bool
1077 predictor_hash::equal (const edge_prediction *p1, const edge_prediction *p2)
1078 {
1079 return (p1->ep_predictor == p2->ep_predictor
1080 && (p1->ep_probability == p2->ep_probability
1081 || p1->ep_probability == REG_BR_PROB_BASE - p2->ep_probability));
1082 }
1083
1084 struct predictor_hash_traits: predictor_hash,
1085 typed_noop_remove <edge_prediction *> {};
1086
1087 /* Return true if edge prediction P is not in DATA hash set. */
1088
1089 static bool
1090 not_removed_prediction_p (edge_prediction *p, void *data)
1091 {
1092 hash_set<edge_prediction *> *remove = (hash_set<edge_prediction *> *) data;
1093 return !remove->contains (p);
1094 }
1095
1096 /* Prune predictions for a basic block BB. Currently we do following
1097 clean-up steps:
1098
1099 1) remove duplicate prediction that is guessed with the same probability
1100 (different than 1/2) to both edge
1101 2) remove duplicates for a prediction that belongs with the same probability
1102 to a single edge
1103
1104 */
1105
1106 static void
1107 prune_predictions_for_bb (basic_block bb)
1108 {
1109 edge_prediction **preds = bb_predictions->get (bb);
1110
1111 if (preds)
1112 {
1113 hash_table <predictor_hash_traits> s (13);
1114 hash_set <edge_prediction *> remove;
1115
1116 /* Step 1: identify predictors that should be removed. */
1117 for (edge_prediction *pred = *preds; pred; pred = pred->ep_next)
1118 {
1119 edge_prediction *existing = s.find (pred);
1120 if (existing)
1121 {
1122 if (pred->ep_edge == existing->ep_edge
1123 && pred->ep_probability == existing->ep_probability)
1124 {
1125 /* Remove a duplicate predictor. */
1126 dump_prediction (dump_file, pred->ep_predictor,
1127 pred->ep_probability, bb,
1128 REASON_SINGLE_EDGE_DUPLICATE, pred->ep_edge);
1129
1130 remove.add (pred);
1131 }
1132 else if (pred->ep_edge != existing->ep_edge
1133 && pred->ep_probability == existing->ep_probability
1134 && pred->ep_probability != REG_BR_PROB_BASE / 2)
1135 {
1136 /* Remove both predictors as they predict the same
1137 for both edges. */
1138 dump_prediction (dump_file, existing->ep_predictor,
1139 pred->ep_probability, bb,
1140 REASON_EDGE_PAIR_DUPLICATE,
1141 existing->ep_edge);
1142 dump_prediction (dump_file, pred->ep_predictor,
1143 pred->ep_probability, bb,
1144 REASON_EDGE_PAIR_DUPLICATE,
1145 pred->ep_edge);
1146
1147 remove.add (existing);
1148 remove.add (pred);
1149 }
1150 }
1151
1152 edge_prediction **slot2 = s.find_slot (pred, INSERT);
1153 *slot2 = pred;
1154 }
1155
1156 /* Step 2: Remove predictors. */
1157 filter_predictions (preds, not_removed_prediction_p, &remove);
1158 }
1159 }
1160
1161 /* Combine predictions into single probability and store them into CFG.
1162 Remove now useless prediction entries.
1163 If DRY_RUN is set, only produce dumps and do not modify profile. */
1164
1165 static void
1166 combine_predictions_for_bb (basic_block bb, bool dry_run)
1167 {
1168 int best_probability = PROB_EVEN;
1169 enum br_predictor best_predictor = END_PREDICTORS;
1170 int combined_probability = REG_BR_PROB_BASE / 2;
1171 int d;
1172 bool first_match = false;
1173 bool found = false;
1174 struct edge_prediction *pred;
1175 int nedges = 0;
1176 edge e, first = NULL, second = NULL;
1177 edge_iterator ei;
1178 int nzero = 0;
1179 int nunknown = 0;
1180
1181 FOR_EACH_EDGE (e, ei, bb->succs)
1182 {
1183 if (!unlikely_executed_edge_p (e))
1184 {
1185 nedges ++;
1186 if (first && !second)
1187 second = e;
1188 if (!first)
1189 first = e;
1190 }
1191 else if (!e->probability.initialized_p ())
1192 e->probability = profile_probability::never ();
1193 if (!e->probability.initialized_p ())
1194 nunknown++;
1195 else if (e->probability == profile_probability::never ())
1196 nzero++;
1197 }
1198
1199 /* When there is no successor or only one choice, prediction is easy.
1200
1201 When we have a basic block with more than 2 successors, the situation
1202 is more complicated as DS theory cannot be used literally.
1203 More precisely, let's assume we predicted edge e1 with probability p1,
1204 thus: m1({b1}) = p1. As we're going to combine more than 2 edges, we
1205 need to find probability of e.g. m1({b2}), which we don't know.
1206 The only approximation is to equally distribute 1-p1 to all edges
1207 different from b1.
1208
1209 According to numbers we've got from SPEC2006 benchark, there's only
1210 one interesting reliable predictor (noreturn call), which can be
1211 handled with a bit easier approach. */
1212 if (nedges != 2)
1213 {
1214 hash_set<edge> unlikely_edges (4);
1215 hash_set<edge_prediction *> likely_edges (4);
1216
1217 /* Identify all edges that have a probability close to very unlikely.
1218 Doing the approach for very unlikely doesn't worth for doing as
1219 there's no such probability in SPEC2006 benchmark. */
1220 edge_prediction **preds = bb_predictions->get (bb);
1221 if (preds)
1222 for (pred = *preds; pred; pred = pred->ep_next)
1223 {
1224 if (pred->ep_probability <= PROB_VERY_UNLIKELY
1225 || pred->ep_predictor == PRED_COLD_LABEL)
1226 unlikely_edges.add (pred->ep_edge);
1227 else if (pred->ep_probability >= PROB_VERY_LIKELY
1228 || pred->ep_predictor == PRED_BUILTIN_EXPECT
1229 || pred->ep_predictor == PRED_HOT_LABEL)
1230 likely_edges.add (pred);
1231 }
1232
1233 /* It can happen that an edge is both in likely_edges and unlikely_edges.
1234 Clear both sets in that situation. */
1235 for (hash_set<edge_prediction *>::iterator it = likely_edges.begin ();
1236 it != likely_edges.end (); ++it)
1237 if (unlikely_edges.contains ((*it)->ep_edge))
1238 {
1239 likely_edges.empty ();
1240 unlikely_edges.empty ();
1241 break;
1242 }
1243
1244 if (!dry_run)
1245 set_even_probabilities (bb, &unlikely_edges, &likely_edges);
1246 clear_bb_predictions (bb);
1247 if (dump_file)
1248 {
1249 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1250 if (unlikely_edges.is_empty ())
1251 fprintf (dump_file,
1252 "%i edges in bb %i predicted to even probabilities\n",
1253 nedges, bb->index);
1254 else
1255 {
1256 fprintf (dump_file,
1257 "%i edges in bb %i predicted with some unlikely edges\n",
1258 nedges, bb->index);
1259 FOR_EACH_EDGE (e, ei, bb->succs)
1260 if (!unlikely_executed_edge_p (e))
1261 dump_prediction (dump_file, PRED_COMBINED,
1262 e->probability.to_reg_br_prob_base (), bb, REASON_NONE, e);
1263 }
1264 }
1265 return;
1266 }
1267
1268 if (dump_file)
1269 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1270
1271 prune_predictions_for_bb (bb);
1272
1273 edge_prediction **preds = bb_predictions->get (bb);
1274
1275 if (preds)
1276 {
1277 /* We implement "first match" heuristics and use probability guessed
1278 by predictor with smallest index. */
1279 for (pred = *preds; pred; pred = pred->ep_next)
1280 {
1281 enum br_predictor predictor = pred->ep_predictor;
1282 int probability = pred->ep_probability;
1283
1284 if (pred->ep_edge != first)
1285 probability = REG_BR_PROB_BASE - probability;
1286
1287 found = true;
1288 /* First match heuristics would be widly confused if we predicted
1289 both directions. */
1290 if (best_predictor > predictor
1291 && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
1292 {
1293 struct edge_prediction *pred2;
1294 int prob = probability;
1295
1296 for (pred2 = (struct edge_prediction *) *preds;
1297 pred2; pred2 = pred2->ep_next)
1298 if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
1299 {
1300 int probability2 = pred2->ep_probability;
1301
1302 if (pred2->ep_edge != first)
1303 probability2 = REG_BR_PROB_BASE - probability2;
1304
1305 if ((probability < REG_BR_PROB_BASE / 2) !=
1306 (probability2 < REG_BR_PROB_BASE / 2))
1307 break;
1308
1309 /* If the same predictor later gave better result, go for it! */
1310 if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
1311 || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
1312 prob = probability2;
1313 }
1314 if (!pred2)
1315 best_probability = prob, best_predictor = predictor;
1316 }
1317
1318 d = (combined_probability * probability
1319 + (REG_BR_PROB_BASE - combined_probability)
1320 * (REG_BR_PROB_BASE - probability));
1321
1322 /* Use FP math to avoid overflows of 32bit integers. */
1323 if (d == 0)
1324 /* If one probability is 0% and one 100%, avoid division by zero. */
1325 combined_probability = REG_BR_PROB_BASE / 2;
1326 else
1327 combined_probability = (((double) combined_probability)
1328 * probability
1329 * REG_BR_PROB_BASE / d + 0.5);
1330 }
1331 }
1332
1333 /* Decide which heuristic to use. In case we didn't match anything,
1334 use no_prediction heuristic, in case we did match, use either
1335 first match or Dempster-Shaffer theory depending on the flags. */
1336
1337 if (best_predictor != END_PREDICTORS)
1338 first_match = true;
1339
1340 if (!found)
1341 dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb);
1342 else
1343 {
1344 if (!first_match)
1345 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
1346 !first_match ? REASON_NONE : REASON_IGNORED);
1347 else
1348 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
1349 first_match ? REASON_NONE : REASON_IGNORED);
1350 }
1351
1352 if (first_match)
1353 combined_probability = best_probability;
1354 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
1355
1356 if (preds)
1357 {
1358 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
1359 {
1360 enum br_predictor predictor = pred->ep_predictor;
1361 int probability = pred->ep_probability;
1362
1363 dump_prediction (dump_file, predictor, probability, bb,
1364 (!first_match || best_predictor == predictor)
1365 ? REASON_NONE : REASON_IGNORED, pred->ep_edge);
1366 }
1367 }
1368 clear_bb_predictions (bb);
1369
1370
1371 /* If we have only one successor which is unknown, we can compute missing
1372 probablity. */
1373 if (nunknown == 1)
1374 {
1375 profile_probability prob = profile_probability::always ();
1376 edge missing = NULL;
1377
1378 FOR_EACH_EDGE (e, ei, bb->succs)
1379 if (e->probability.initialized_p ())
1380 prob -= e->probability;
1381 else if (missing == NULL)
1382 missing = e;
1383 else
1384 gcc_unreachable ();
1385 missing->probability = prob;
1386 }
1387 /* If nothing is unknown, we have nothing to update. */
1388 else if (!nunknown && nzero != (int)EDGE_COUNT (bb->succs))
1389 ;
1390 else if (!dry_run)
1391 {
1392 first->probability
1393 = profile_probability::from_reg_br_prob_base (combined_probability);
1394 second->probability = first->probability.invert ();
1395 }
1396 }
1397
1398 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
1399 Return the SSA_NAME if the condition satisfies, NULL otherwise.
1400
1401 T1 and T2 should be one of the following cases:
1402 1. T1 is SSA_NAME, T2 is NULL
1403 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
1404 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
1405
1406 static tree
1407 strips_small_constant (tree t1, tree t2)
1408 {
1409 tree ret = NULL;
1410 int value = 0;
1411
1412 if (!t1)
1413 return NULL;
1414 else if (TREE_CODE (t1) == SSA_NAME)
1415 ret = t1;
1416 else if (tree_fits_shwi_p (t1))
1417 value = tree_to_shwi (t1);
1418 else
1419 return NULL;
1420
1421 if (!t2)
1422 return ret;
1423 else if (tree_fits_shwi_p (t2))
1424 value = tree_to_shwi (t2);
1425 else if (TREE_CODE (t2) == SSA_NAME)
1426 {
1427 if (ret)
1428 return NULL;
1429 else
1430 ret = t2;
1431 }
1432
1433 if (value <= 4 && value >= -4)
1434 return ret;
1435 else
1436 return NULL;
1437 }
1438
1439 /* Return the SSA_NAME in T or T's operands.
1440 Return NULL if SSA_NAME cannot be found. */
1441
1442 static tree
1443 get_base_value (tree t)
1444 {
1445 if (TREE_CODE (t) == SSA_NAME)
1446 return t;
1447
1448 if (!BINARY_CLASS_P (t))
1449 return NULL;
1450
1451 switch (TREE_OPERAND_LENGTH (t))
1452 {
1453 case 1:
1454 return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1455 case 2:
1456 return strips_small_constant (TREE_OPERAND (t, 0),
1457 TREE_OPERAND (t, 1));
1458 default:
1459 return NULL;
1460 }
1461 }
1462
1463 /* Check the compare STMT in LOOP. If it compares an induction
1464 variable to a loop invariant, return true, and save
1465 LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1466 Otherwise return false and set LOOP_INVAIANT to NULL. */
1467
1468 static bool
1469 is_comparison_with_loop_invariant_p (gcond *stmt, class loop *loop,
1470 tree *loop_invariant,
1471 enum tree_code *compare_code,
1472 tree *loop_step,
1473 tree *loop_iv_base)
1474 {
1475 tree op0, op1, bound, base;
1476 affine_iv iv0, iv1;
1477 enum tree_code code;
1478 tree step;
1479
1480 code = gimple_cond_code (stmt);
1481 *loop_invariant = NULL;
1482
1483 switch (code)
1484 {
1485 case GT_EXPR:
1486 case GE_EXPR:
1487 case NE_EXPR:
1488 case LT_EXPR:
1489 case LE_EXPR:
1490 case EQ_EXPR:
1491 break;
1492
1493 default:
1494 return false;
1495 }
1496
1497 op0 = gimple_cond_lhs (stmt);
1498 op1 = gimple_cond_rhs (stmt);
1499
1500 if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1501 || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1502 return false;
1503 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1504 return false;
1505 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1506 return false;
1507 if (TREE_CODE (iv0.step) != INTEGER_CST
1508 || TREE_CODE (iv1.step) != INTEGER_CST)
1509 return false;
1510 if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1511 || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1512 return false;
1513
1514 if (integer_zerop (iv0.step))
1515 {
1516 if (code != NE_EXPR && code != EQ_EXPR)
1517 code = invert_tree_comparison (code, false);
1518 bound = iv0.base;
1519 base = iv1.base;
1520 if (tree_fits_shwi_p (iv1.step))
1521 step = iv1.step;
1522 else
1523 return false;
1524 }
1525 else
1526 {
1527 bound = iv1.base;
1528 base = iv0.base;
1529 if (tree_fits_shwi_p (iv0.step))
1530 step = iv0.step;
1531 else
1532 return false;
1533 }
1534
1535 if (TREE_CODE (bound) != INTEGER_CST)
1536 bound = get_base_value (bound);
1537 if (!bound)
1538 return false;
1539 if (TREE_CODE (base) != INTEGER_CST)
1540 base = get_base_value (base);
1541 if (!base)
1542 return false;
1543
1544 *loop_invariant = bound;
1545 *compare_code = code;
1546 *loop_step = step;
1547 *loop_iv_base = base;
1548 return true;
1549 }
1550
1551 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
1552
1553 static bool
1554 expr_coherent_p (tree t1, tree t2)
1555 {
1556 gimple *stmt;
1557 tree ssa_name_1 = NULL;
1558 tree ssa_name_2 = NULL;
1559
1560 gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1561 gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1562
1563 if (t1 == t2)
1564 return true;
1565
1566 if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1567 return true;
1568 if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1569 return false;
1570
1571 /* Check to see if t1 is expressed/defined with t2. */
1572 stmt = SSA_NAME_DEF_STMT (t1);
1573 gcc_assert (stmt != NULL);
1574 if (is_gimple_assign (stmt))
1575 {
1576 ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1577 if (ssa_name_1 && ssa_name_1 == t2)
1578 return true;
1579 }
1580
1581 /* Check to see if t2 is expressed/defined with t1. */
1582 stmt = SSA_NAME_DEF_STMT (t2);
1583 gcc_assert (stmt != NULL);
1584 if (is_gimple_assign (stmt))
1585 {
1586 ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1587 if (ssa_name_2 && ssa_name_2 == t1)
1588 return true;
1589 }
1590
1591 /* Compare if t1 and t2's def_stmts are identical. */
1592 if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1593 return true;
1594 else
1595 return false;
1596 }
1597
1598 /* Return true if E is predicted by one of loop heuristics. */
1599
1600 static bool
1601 predicted_by_loop_heuristics_p (basic_block bb)
1602 {
1603 struct edge_prediction *i;
1604 edge_prediction **preds = bb_predictions->get (bb);
1605
1606 if (!preds)
1607 return false;
1608
1609 for (i = *preds; i; i = i->ep_next)
1610 if (i->ep_predictor == PRED_LOOP_ITERATIONS_GUESSED
1611 || i->ep_predictor == PRED_LOOP_ITERATIONS_MAX
1612 || i->ep_predictor == PRED_LOOP_ITERATIONS
1613 || i->ep_predictor == PRED_LOOP_EXIT
1614 || i->ep_predictor == PRED_LOOP_EXIT_WITH_RECURSION
1615 || i->ep_predictor == PRED_LOOP_EXTRA_EXIT)
1616 return true;
1617 return false;
1618 }
1619
1620 /* Predict branch probability of BB when BB contains a branch that compares
1621 an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1622 loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1623
1624 E.g.
1625 for (int i = 0; i < bound; i++) {
1626 if (i < bound - 2)
1627 computation_1();
1628 else
1629 computation_2();
1630 }
1631
1632 In this loop, we will predict the branch inside the loop to be taken. */
1633
1634 static void
1635 predict_iv_comparison (class loop *loop, basic_block bb,
1636 tree loop_bound_var,
1637 tree loop_iv_base_var,
1638 enum tree_code loop_bound_code,
1639 int loop_bound_step)
1640 {
1641 gimple *stmt;
1642 tree compare_var, compare_base;
1643 enum tree_code compare_code;
1644 tree compare_step_var;
1645 edge then_edge;
1646 edge_iterator ei;
1647
1648 if (predicted_by_loop_heuristics_p (bb))
1649 return;
1650
1651 stmt = last_stmt (bb);
1652 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1653 return;
1654 if (!is_comparison_with_loop_invariant_p (as_a <gcond *> (stmt),
1655 loop, &compare_var,
1656 &compare_code,
1657 &compare_step_var,
1658 &compare_base))
1659 return;
1660
1661 /* Find the taken edge. */
1662 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1663 if (then_edge->flags & EDGE_TRUE_VALUE)
1664 break;
1665
1666 /* When comparing an IV to a loop invariant, NE is more likely to be
1667 taken while EQ is more likely to be not-taken. */
1668 if (compare_code == NE_EXPR)
1669 {
1670 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1671 return;
1672 }
1673 else if (compare_code == EQ_EXPR)
1674 {
1675 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1676 return;
1677 }
1678
1679 if (!expr_coherent_p (loop_iv_base_var, compare_base))
1680 return;
1681
1682 /* If loop bound, base and compare bound are all constants, we can
1683 calculate the probability directly. */
1684 if (tree_fits_shwi_p (loop_bound_var)
1685 && tree_fits_shwi_p (compare_var)
1686 && tree_fits_shwi_p (compare_base))
1687 {
1688 int probability;
1689 wi::overflow_type overflow;
1690 bool overall_overflow = false;
1691 widest_int compare_count, tem;
1692
1693 /* (loop_bound - base) / compare_step */
1694 tem = wi::sub (wi::to_widest (loop_bound_var),
1695 wi::to_widest (compare_base), SIGNED, &overflow);
1696 overall_overflow |= overflow;
1697 widest_int loop_count = wi::div_trunc (tem,
1698 wi::to_widest (compare_step_var),
1699 SIGNED, &overflow);
1700 overall_overflow |= overflow;
1701
1702 if (!wi::neg_p (wi::to_widest (compare_step_var))
1703 ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1704 {
1705 /* (loop_bound - compare_bound) / compare_step */
1706 tem = wi::sub (wi::to_widest (loop_bound_var),
1707 wi::to_widest (compare_var), SIGNED, &overflow);
1708 overall_overflow |= overflow;
1709 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1710 SIGNED, &overflow);
1711 overall_overflow |= overflow;
1712 }
1713 else
1714 {
1715 /* (compare_bound - base) / compare_step */
1716 tem = wi::sub (wi::to_widest (compare_var),
1717 wi::to_widest (compare_base), SIGNED, &overflow);
1718 overall_overflow |= overflow;
1719 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1720 SIGNED, &overflow);
1721 overall_overflow |= overflow;
1722 }
1723 if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1724 ++compare_count;
1725 if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1726 ++loop_count;
1727 if (wi::neg_p (compare_count))
1728 compare_count = 0;
1729 if (wi::neg_p (loop_count))
1730 loop_count = 0;
1731 if (loop_count == 0)
1732 probability = 0;
1733 else if (wi::cmps (compare_count, loop_count) == 1)
1734 probability = REG_BR_PROB_BASE;
1735 else
1736 {
1737 tem = compare_count * REG_BR_PROB_BASE;
1738 tem = wi::udiv_trunc (tem, loop_count);
1739 probability = tem.to_uhwi ();
1740 }
1741
1742 /* FIXME: The branch prediction seems broken. It has only 20% hitrate. */
1743 if (!overall_overflow)
1744 predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1745
1746 return;
1747 }
1748
1749 if (expr_coherent_p (loop_bound_var, compare_var))
1750 {
1751 if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1752 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1753 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1754 else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1755 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1756 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1757 else if (loop_bound_code == NE_EXPR)
1758 {
1759 /* If the loop backedge condition is "(i != bound)", we do
1760 the comparison based on the step of IV:
1761 * step < 0 : backedge condition is like (i > bound)
1762 * step > 0 : backedge condition is like (i < bound) */
1763 gcc_assert (loop_bound_step != 0);
1764 if (loop_bound_step > 0
1765 && (compare_code == LT_EXPR
1766 || compare_code == LE_EXPR))
1767 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1768 else if (loop_bound_step < 0
1769 && (compare_code == GT_EXPR
1770 || compare_code == GE_EXPR))
1771 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1772 else
1773 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1774 }
1775 else
1776 /* The branch is predicted not-taken if loop_bound_code is
1777 opposite with compare_code. */
1778 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1779 }
1780 else if (expr_coherent_p (loop_iv_base_var, compare_var))
1781 {
1782 /* For cases like:
1783 for (i = s; i < h; i++)
1784 if (i > s + 2) ....
1785 The branch should be predicted taken. */
1786 if (loop_bound_step > 0
1787 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1788 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1789 else if (loop_bound_step < 0
1790 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1791 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1792 else
1793 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1794 }
1795 }
1796
1797 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1798 exits are resulted from short-circuit conditions that will generate an
1799 if_tmp. E.g.:
1800
1801 if (foo() || global > 10)
1802 break;
1803
1804 This will be translated into:
1805
1806 BB3:
1807 loop header...
1808 BB4:
1809 if foo() goto BB6 else goto BB5
1810 BB5:
1811 if global > 10 goto BB6 else goto BB7
1812 BB6:
1813 goto BB7
1814 BB7:
1815 iftmp = (PHI 0(BB5), 1(BB6))
1816 if iftmp == 1 goto BB8 else goto BB3
1817 BB8:
1818 outside of the loop...
1819
1820 The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1821 From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1822 exits. This function takes BB7->BB8 as input, and finds out the extra loop
1823 exits to predict them using PRED_LOOP_EXTRA_EXIT. */
1824
1825 static void
1826 predict_extra_loop_exits (edge exit_edge)
1827 {
1828 unsigned i;
1829 bool check_value_one;
1830 gimple *lhs_def_stmt;
1831 gphi *phi_stmt;
1832 tree cmp_rhs, cmp_lhs;
1833 gimple *last;
1834 gcond *cmp_stmt;
1835
1836 last = last_stmt (exit_edge->src);
1837 if (!last)
1838 return;
1839 cmp_stmt = dyn_cast <gcond *> (last);
1840 if (!cmp_stmt)
1841 return;
1842
1843 cmp_rhs = gimple_cond_rhs (cmp_stmt);
1844 cmp_lhs = gimple_cond_lhs (cmp_stmt);
1845 if (!TREE_CONSTANT (cmp_rhs)
1846 || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1847 return;
1848 if (TREE_CODE (cmp_lhs) != SSA_NAME)
1849 return;
1850
1851 /* If check_value_one is true, only the phi_args with value '1' will lead
1852 to loop exit. Otherwise, only the phi_args with value '0' will lead to
1853 loop exit. */
1854 check_value_one = (((integer_onep (cmp_rhs))
1855 ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1856 ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1857
1858 lhs_def_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1859 if (!lhs_def_stmt)
1860 return;
1861
1862 phi_stmt = dyn_cast <gphi *> (lhs_def_stmt);
1863 if (!phi_stmt)
1864 return;
1865
1866 for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1867 {
1868 edge e1;
1869 edge_iterator ei;
1870 tree val = gimple_phi_arg_def (phi_stmt, i);
1871 edge e = gimple_phi_arg_edge (phi_stmt, i);
1872
1873 if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1874 continue;
1875 if ((check_value_one ^ integer_onep (val)) == 1)
1876 continue;
1877 if (EDGE_COUNT (e->src->succs) != 1)
1878 {
1879 predict_paths_leading_to_edge (e, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1880 continue;
1881 }
1882
1883 FOR_EACH_EDGE (e1, ei, e->src->preds)
1884 predict_paths_leading_to_edge (e1, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1885 }
1886 }
1887
1888
1889 /* Predict edge probabilities by exploiting loop structure. */
1890
1891 static void
1892 predict_loops (void)
1893 {
1894 class loop *loop;
1895 basic_block bb;
1896 hash_set <class loop *> with_recursion(10);
1897
1898 FOR_EACH_BB_FN (bb, cfun)
1899 {
1900 gimple_stmt_iterator gsi;
1901 tree decl;
1902
1903 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1904 if (is_gimple_call (gsi_stmt (gsi))
1905 && (decl = gimple_call_fndecl (gsi_stmt (gsi))) != NULL
1906 && recursive_call_p (current_function_decl, decl))
1907 {
1908 loop = bb->loop_father;
1909 while (loop && !with_recursion.add (loop))
1910 loop = loop_outer (loop);
1911 }
1912 }
1913
1914 /* Try to predict out blocks in a loop that are not part of a
1915 natural loop. */
1916 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1917 {
1918 basic_block bb, *bbs;
1919 unsigned j, n_exits = 0;
1920 vec<edge> exits;
1921 class tree_niter_desc niter_desc;
1922 edge ex;
1923 class nb_iter_bound *nb_iter;
1924 enum tree_code loop_bound_code = ERROR_MARK;
1925 tree loop_bound_step = NULL;
1926 tree loop_bound_var = NULL;
1927 tree loop_iv_base = NULL;
1928 gcond *stmt = NULL;
1929 bool recursion = with_recursion.contains (loop);
1930
1931 exits = get_loop_exit_edges (loop);
1932 FOR_EACH_VEC_ELT (exits, j, ex)
1933 if (!unlikely_executed_edge_p (ex) && !(ex->flags & EDGE_ABNORMAL_CALL))
1934 n_exits ++;
1935 if (!n_exits)
1936 {
1937 exits.release ();
1938 continue;
1939 }
1940
1941 if (dump_file && (dump_flags & TDF_DETAILS))
1942 fprintf (dump_file, "Predicting loop %i%s with %i exits.\n",
1943 loop->num, recursion ? " (with recursion)":"", n_exits);
1944 if (dump_file && (dump_flags & TDF_DETAILS)
1945 && max_loop_iterations_int (loop) >= 0)
1946 {
1947 fprintf (dump_file,
1948 "Loop %d iterates at most %i times.\n", loop->num,
1949 (int)max_loop_iterations_int (loop));
1950 }
1951 if (dump_file && (dump_flags & TDF_DETAILS)
1952 && likely_max_loop_iterations_int (loop) >= 0)
1953 {
1954 fprintf (dump_file, "Loop %d likely iterates at most %i times.\n",
1955 loop->num, (int)likely_max_loop_iterations_int (loop));
1956 }
1957
1958 FOR_EACH_VEC_ELT (exits, j, ex)
1959 {
1960 tree niter = NULL;
1961 HOST_WIDE_INT nitercst;
1962 int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1963 int probability;
1964 enum br_predictor predictor;
1965 widest_int nit;
1966
1967 if (unlikely_executed_edge_p (ex)
1968 || (ex->flags & EDGE_ABNORMAL_CALL))
1969 continue;
1970 /* Loop heuristics do not expect exit conditional to be inside
1971 inner loop. We predict from innermost to outermost loop. */
1972 if (predicted_by_loop_heuristics_p (ex->src))
1973 {
1974 if (dump_file && (dump_flags & TDF_DETAILS))
1975 fprintf (dump_file, "Skipping exit %i->%i because "
1976 "it is already predicted.\n",
1977 ex->src->index, ex->dest->index);
1978 continue;
1979 }
1980 predict_extra_loop_exits (ex);
1981
1982 if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1983 niter = niter_desc.niter;
1984 if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1985 niter = loop_niter_by_eval (loop, ex);
1986 if (dump_file && (dump_flags & TDF_DETAILS)
1987 && TREE_CODE (niter) == INTEGER_CST)
1988 {
1989 fprintf (dump_file, "Exit %i->%i %d iterates ",
1990 ex->src->index, ex->dest->index,
1991 loop->num);
1992 print_generic_expr (dump_file, niter, TDF_SLIM);
1993 fprintf (dump_file, " times.\n");
1994 }
1995
1996 if (TREE_CODE (niter) == INTEGER_CST)
1997 {
1998 if (tree_fits_uhwi_p (niter)
1999 && max
2000 && compare_tree_int (niter, max - 1) == -1)
2001 nitercst = tree_to_uhwi (niter) + 1;
2002 else
2003 nitercst = max;
2004 predictor = PRED_LOOP_ITERATIONS;
2005 }
2006 /* If we have just one exit and we can derive some information about
2007 the number of iterations of the loop from the statements inside
2008 the loop, use it to predict this exit. */
2009 else if (n_exits == 1
2010 && estimated_stmt_executions (loop, &nit))
2011 {
2012 if (wi::gtu_p (nit, max))
2013 nitercst = max;
2014 else
2015 nitercst = nit.to_shwi ();
2016 predictor = PRED_LOOP_ITERATIONS_GUESSED;
2017 }
2018 /* If we have likely upper bound, trust it for very small iteration
2019 counts. Such loops would otherwise get mispredicted by standard
2020 LOOP_EXIT heuristics. */
2021 else if (n_exits == 1
2022 && likely_max_stmt_executions (loop, &nit)
2023 && wi::ltu_p (nit,
2024 RDIV (REG_BR_PROB_BASE,
2025 REG_BR_PROB_BASE
2026 - predictor_info
2027 [recursion
2028 ? PRED_LOOP_EXIT_WITH_RECURSION
2029 : PRED_LOOP_EXIT].hitrate)))
2030 {
2031 nitercst = nit.to_shwi ();
2032 predictor = PRED_LOOP_ITERATIONS_MAX;
2033 }
2034 else
2035 {
2036 if (dump_file && (dump_flags & TDF_DETAILS))
2037 fprintf (dump_file, "Nothing known about exit %i->%i.\n",
2038 ex->src->index, ex->dest->index);
2039 continue;
2040 }
2041
2042 if (dump_file && (dump_flags & TDF_DETAILS))
2043 fprintf (dump_file, "Recording prediction to %i iterations by %s.\n",
2044 (int)nitercst, predictor_info[predictor].name);
2045 /* If the prediction for number of iterations is zero, do not
2046 predict the exit edges. */
2047 if (nitercst == 0)
2048 continue;
2049
2050 probability = RDIV (REG_BR_PROB_BASE, nitercst);
2051 predict_edge (ex, predictor, probability);
2052 }
2053 exits.release ();
2054
2055 /* Find information about loop bound variables. */
2056 for (nb_iter = loop->bounds; nb_iter;
2057 nb_iter = nb_iter->next)
2058 if (nb_iter->stmt
2059 && gimple_code (nb_iter->stmt) == GIMPLE_COND)
2060 {
2061 stmt = as_a <gcond *> (nb_iter->stmt);
2062 break;
2063 }
2064 if (!stmt && last_stmt (loop->header)
2065 && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
2066 stmt = as_a <gcond *> (last_stmt (loop->header));
2067 if (stmt)
2068 is_comparison_with_loop_invariant_p (stmt, loop,
2069 &loop_bound_var,
2070 &loop_bound_code,
2071 &loop_bound_step,
2072 &loop_iv_base);
2073
2074 bbs = get_loop_body (loop);
2075
2076 for (j = 0; j < loop->num_nodes; j++)
2077 {
2078 edge e;
2079 edge_iterator ei;
2080
2081 bb = bbs[j];
2082
2083 /* Bypass loop heuristics on continue statement. These
2084 statements construct loops via "non-loop" constructs
2085 in the source language and are better to be handled
2086 separately. */
2087 if (predicted_by_p (bb, PRED_CONTINUE))
2088 {
2089 if (dump_file && (dump_flags & TDF_DETAILS))
2090 fprintf (dump_file, "BB %i predicted by continue.\n",
2091 bb->index);
2092 continue;
2093 }
2094
2095 /* If we already used more reliable loop exit predictors, do not
2096 bother with PRED_LOOP_EXIT. */
2097 if (!predicted_by_loop_heuristics_p (bb))
2098 {
2099 /* For loop with many exits we don't want to predict all exits
2100 with the pretty large probability, because if all exits are
2101 considered in row, the loop would be predicted to iterate
2102 almost never. The code to divide probability by number of
2103 exits is very rough. It should compute the number of exits
2104 taken in each patch through function (not the overall number
2105 of exits that might be a lot higher for loops with wide switch
2106 statements in them) and compute n-th square root.
2107
2108 We limit the minimal probability by 2% to avoid
2109 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
2110 as this was causing regression in perl benchmark containing such
2111 a wide loop. */
2112
2113 int probability = ((REG_BR_PROB_BASE
2114 - predictor_info
2115 [recursion
2116 ? PRED_LOOP_EXIT_WITH_RECURSION
2117 : PRED_LOOP_EXIT].hitrate)
2118 / n_exits);
2119 if (probability < HITRATE (2))
2120 probability = HITRATE (2);
2121 FOR_EACH_EDGE (e, ei, bb->succs)
2122 if (e->dest->index < NUM_FIXED_BLOCKS
2123 || !flow_bb_inside_loop_p (loop, e->dest))
2124 {
2125 if (dump_file && (dump_flags & TDF_DETAILS))
2126 fprintf (dump_file,
2127 "Predicting exit %i->%i with prob %i.\n",
2128 e->src->index, e->dest->index, probability);
2129 predict_edge (e,
2130 recursion ? PRED_LOOP_EXIT_WITH_RECURSION
2131 : PRED_LOOP_EXIT, probability);
2132 }
2133 }
2134 if (loop_bound_var)
2135 predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
2136 loop_bound_code,
2137 tree_to_shwi (loop_bound_step));
2138 }
2139
2140 /* In the following code
2141 for (loop1)
2142 if (cond)
2143 for (loop2)
2144 body;
2145 guess that cond is unlikely. */
2146 if (loop_outer (loop)->num)
2147 {
2148 basic_block bb = NULL;
2149 edge preheader_edge = loop_preheader_edge (loop);
2150
2151 if (single_pred_p (preheader_edge->src)
2152 && single_succ_p (preheader_edge->src))
2153 preheader_edge = single_pred_edge (preheader_edge->src);
2154
2155 gimple *stmt = last_stmt (preheader_edge->src);
2156 /* Pattern match fortran loop preheader:
2157 _16 = BUILTIN_EXPECT (_15, 1, PRED_FORTRAN_LOOP_PREHEADER);
2158 _17 = (logical(kind=4)) _16;
2159 if (_17 != 0)
2160 goto <bb 11>;
2161 else
2162 goto <bb 13>;
2163
2164 Loop guard branch prediction says nothing about duplicated loop
2165 headers produced by fortran frontend and in this case we want
2166 to predict paths leading to this preheader. */
2167
2168 if (stmt
2169 && gimple_code (stmt) == GIMPLE_COND
2170 && gimple_cond_code (stmt) == NE_EXPR
2171 && TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
2172 && integer_zerop (gimple_cond_rhs (stmt)))
2173 {
2174 gimple *call_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2175 if (gimple_code (call_stmt) == GIMPLE_ASSIGN
2176 && gimple_expr_code (call_stmt) == NOP_EXPR
2177 && TREE_CODE (gimple_assign_rhs1 (call_stmt)) == SSA_NAME)
2178 call_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (call_stmt));
2179 if (gimple_call_internal_p (call_stmt, IFN_BUILTIN_EXPECT)
2180 && TREE_CODE (gimple_call_arg (call_stmt, 2)) == INTEGER_CST
2181 && tree_fits_uhwi_p (gimple_call_arg (call_stmt, 2))
2182 && tree_to_uhwi (gimple_call_arg (call_stmt, 2))
2183 == PRED_FORTRAN_LOOP_PREHEADER)
2184 bb = preheader_edge->src;
2185 }
2186 if (!bb)
2187 {
2188 if (!dominated_by_p (CDI_DOMINATORS,
2189 loop_outer (loop)->latch, loop->header))
2190 predict_paths_leading_to_edge (loop_preheader_edge (loop),
2191 recursion
2192 ? PRED_LOOP_GUARD_WITH_RECURSION
2193 : PRED_LOOP_GUARD,
2194 NOT_TAKEN,
2195 loop_outer (loop));
2196 }
2197 else
2198 {
2199 if (!dominated_by_p (CDI_DOMINATORS,
2200 loop_outer (loop)->latch, bb))
2201 predict_paths_leading_to (bb,
2202 recursion
2203 ? PRED_LOOP_GUARD_WITH_RECURSION
2204 : PRED_LOOP_GUARD,
2205 NOT_TAKEN,
2206 loop_outer (loop));
2207 }
2208 }
2209
2210 /* Free basic blocks from get_loop_body. */
2211 free (bbs);
2212 }
2213 }
2214
2215 /* Attempt to predict probabilities of BB outgoing edges using local
2216 properties. */
2217 static void
2218 bb_estimate_probability_locally (basic_block bb)
2219 {
2220 rtx_insn *last_insn = BB_END (bb);
2221 rtx cond;
2222
2223 if (! can_predict_insn_p (last_insn))
2224 return;
2225 cond = get_condition (last_insn, NULL, false, false);
2226 if (! cond)
2227 return;
2228
2229 /* Try "pointer heuristic."
2230 A comparison ptr == 0 is predicted as false.
2231 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
2232 if (COMPARISON_P (cond)
2233 && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
2234 || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
2235 {
2236 if (GET_CODE (cond) == EQ)
2237 predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
2238 else if (GET_CODE (cond) == NE)
2239 predict_insn_def (last_insn, PRED_POINTER, TAKEN);
2240 }
2241 else
2242
2243 /* Try "opcode heuristic."
2244 EQ tests are usually false and NE tests are usually true. Also,
2245 most quantities are positive, so we can make the appropriate guesses
2246 about signed comparisons against zero. */
2247 switch (GET_CODE (cond))
2248 {
2249 case CONST_INT:
2250 /* Unconditional branch. */
2251 predict_insn_def (last_insn, PRED_UNCONDITIONAL,
2252 cond == const0_rtx ? NOT_TAKEN : TAKEN);
2253 break;
2254
2255 case EQ:
2256 case UNEQ:
2257 /* Floating point comparisons appears to behave in a very
2258 unpredictable way because of special role of = tests in
2259 FP code. */
2260 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
2261 ;
2262 /* Comparisons with 0 are often used for booleans and there is
2263 nothing useful to predict about them. */
2264 else if (XEXP (cond, 1) == const0_rtx
2265 || XEXP (cond, 0) == const0_rtx)
2266 ;
2267 else
2268 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
2269 break;
2270
2271 case NE:
2272 case LTGT:
2273 /* Floating point comparisons appears to behave in a very
2274 unpredictable way because of special role of = tests in
2275 FP code. */
2276 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
2277 ;
2278 /* Comparisons with 0 are often used for booleans and there is
2279 nothing useful to predict about them. */
2280 else if (XEXP (cond, 1) == const0_rtx
2281 || XEXP (cond, 0) == const0_rtx)
2282 ;
2283 else
2284 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
2285 break;
2286
2287 case ORDERED:
2288 predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
2289 break;
2290
2291 case UNORDERED:
2292 predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
2293 break;
2294
2295 case LE:
2296 case LT:
2297 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2298 || XEXP (cond, 1) == constm1_rtx)
2299 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
2300 break;
2301
2302 case GE:
2303 case GT:
2304 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2305 || XEXP (cond, 1) == constm1_rtx)
2306 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
2307 break;
2308
2309 default:
2310 break;
2311 }
2312 }
2313
2314 /* Set edge->probability for each successor edge of BB. */
2315 void
2316 guess_outgoing_edge_probabilities (basic_block bb)
2317 {
2318 bb_estimate_probability_locally (bb);
2319 combine_predictions_for_insn (BB_END (bb), bb);
2320 }
2321 \f
2322 static tree expr_expected_value (tree, bitmap, enum br_predictor *predictor,
2323 HOST_WIDE_INT *probability);
2324
2325 /* Helper function for expr_expected_value. */
2326
2327 static tree
2328 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
2329 tree op1, bitmap visited, enum br_predictor *predictor,
2330 HOST_WIDE_INT *probability)
2331 {
2332 gimple *def;
2333
2334 /* Reset returned probability value. */
2335 *probability = -1;
2336 *predictor = PRED_UNCONDITIONAL;
2337
2338 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2339 {
2340 if (TREE_CONSTANT (op0))
2341 return op0;
2342
2343 if (code == IMAGPART_EXPR)
2344 {
2345 if (TREE_CODE (TREE_OPERAND (op0, 0)) == SSA_NAME)
2346 {
2347 def = SSA_NAME_DEF_STMT (TREE_OPERAND (op0, 0));
2348 if (is_gimple_call (def)
2349 && gimple_call_internal_p (def)
2350 && (gimple_call_internal_fn (def)
2351 == IFN_ATOMIC_COMPARE_EXCHANGE))
2352 {
2353 /* Assume that any given atomic operation has low contention,
2354 and thus the compare-and-swap operation succeeds. */
2355 *predictor = PRED_COMPARE_AND_SWAP;
2356 return build_one_cst (TREE_TYPE (op0));
2357 }
2358 }
2359 }
2360
2361 if (code != SSA_NAME)
2362 return NULL_TREE;
2363
2364 def = SSA_NAME_DEF_STMT (op0);
2365
2366 /* If we were already here, break the infinite cycle. */
2367 if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
2368 return NULL;
2369
2370 if (gimple_code (def) == GIMPLE_PHI)
2371 {
2372 /* All the arguments of the PHI node must have the same constant
2373 length. */
2374 int i, n = gimple_phi_num_args (def);
2375 tree val = NULL, new_val;
2376
2377 for (i = 0; i < n; i++)
2378 {
2379 tree arg = PHI_ARG_DEF (def, i);
2380 enum br_predictor predictor2;
2381
2382 /* If this PHI has itself as an argument, we cannot
2383 determine the string length of this argument. However,
2384 if we can find an expected constant value for the other
2385 PHI args then we can still be sure that this is
2386 likely a constant. So be optimistic and just
2387 continue with the next argument. */
2388 if (arg == PHI_RESULT (def))
2389 continue;
2390
2391 HOST_WIDE_INT probability2;
2392 new_val = expr_expected_value (arg, visited, &predictor2,
2393 &probability2);
2394
2395 /* It is difficult to combine value predictors. Simply assume
2396 that later predictor is weaker and take its prediction. */
2397 if (*predictor < predictor2)
2398 {
2399 *predictor = predictor2;
2400 *probability = probability2;
2401 }
2402 if (!new_val)
2403 return NULL;
2404 if (!val)
2405 val = new_val;
2406 else if (!operand_equal_p (val, new_val, false))
2407 return NULL;
2408 }
2409 return val;
2410 }
2411 if (is_gimple_assign (def))
2412 {
2413 if (gimple_assign_lhs (def) != op0)
2414 return NULL;
2415
2416 return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
2417 gimple_assign_rhs1 (def),
2418 gimple_assign_rhs_code (def),
2419 gimple_assign_rhs2 (def),
2420 visited, predictor, probability);
2421 }
2422
2423 if (is_gimple_call (def))
2424 {
2425 tree decl = gimple_call_fndecl (def);
2426 if (!decl)
2427 {
2428 if (gimple_call_internal_p (def)
2429 && gimple_call_internal_fn (def) == IFN_BUILTIN_EXPECT)
2430 {
2431 gcc_assert (gimple_call_num_args (def) == 3);
2432 tree val = gimple_call_arg (def, 0);
2433 if (TREE_CONSTANT (val))
2434 return val;
2435 tree val2 = gimple_call_arg (def, 2);
2436 gcc_assert (TREE_CODE (val2) == INTEGER_CST
2437 && tree_fits_uhwi_p (val2)
2438 && tree_to_uhwi (val2) < END_PREDICTORS);
2439 *predictor = (enum br_predictor) tree_to_uhwi (val2);
2440 if (*predictor == PRED_BUILTIN_EXPECT)
2441 *probability
2442 = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
2443 return gimple_call_arg (def, 1);
2444 }
2445 return NULL;
2446 }
2447
2448 if (DECL_IS_MALLOC (decl) || DECL_IS_OPERATOR_NEW_P (decl))
2449 {
2450 if (predictor)
2451 *predictor = PRED_MALLOC_NONNULL;
2452 return boolean_true_node;
2453 }
2454
2455 if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
2456 switch (DECL_FUNCTION_CODE (decl))
2457 {
2458 case BUILT_IN_EXPECT:
2459 {
2460 tree val;
2461 if (gimple_call_num_args (def) != 2)
2462 return NULL;
2463 val = gimple_call_arg (def, 0);
2464 if (TREE_CONSTANT (val))
2465 return val;
2466 *predictor = PRED_BUILTIN_EXPECT;
2467 *probability
2468 = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
2469 return gimple_call_arg (def, 1);
2470 }
2471 case BUILT_IN_EXPECT_WITH_PROBABILITY:
2472 {
2473 tree val;
2474 if (gimple_call_num_args (def) != 3)
2475 return NULL;
2476 val = gimple_call_arg (def, 0);
2477 if (TREE_CONSTANT (val))
2478 return val;
2479 /* Compute final probability as:
2480 probability * REG_BR_PROB_BASE. */
2481 tree prob = gimple_call_arg (def, 2);
2482 tree t = TREE_TYPE (prob);
2483 tree base = build_int_cst (integer_type_node,
2484 REG_BR_PROB_BASE);
2485 base = build_real_from_int_cst (t, base);
2486 tree r = fold_build2_initializer_loc (UNKNOWN_LOCATION,
2487 MULT_EXPR, t, prob, base);
2488 if (TREE_CODE (r) != REAL_CST)
2489 {
2490 error_at (gimple_location (def),
2491 "probability %qE must be "
2492 "constant floating-point expression", prob);
2493 return NULL;
2494 }
2495 HOST_WIDE_INT probi
2496 = real_to_integer (TREE_REAL_CST_PTR (r));
2497 if (probi >= 0 && probi <= REG_BR_PROB_BASE)
2498 {
2499 *predictor = PRED_BUILTIN_EXPECT_WITH_PROBABILITY;
2500 *probability = probi;
2501 }
2502 else
2503 error_at (gimple_location (def),
2504 "probability %qE is outside "
2505 "the range [0.0, 1.0]", prob);
2506
2507 return gimple_call_arg (def, 1);
2508 }
2509
2510 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
2511 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
2512 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
2513 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
2514 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
2515 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
2516 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
2517 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
2518 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
2519 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
2520 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
2521 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
2522 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
2523 /* Assume that any given atomic operation has low contention,
2524 and thus the compare-and-swap operation succeeds. */
2525 *predictor = PRED_COMPARE_AND_SWAP;
2526 return boolean_true_node;
2527 case BUILT_IN_REALLOC:
2528 if (predictor)
2529 *predictor = PRED_MALLOC_NONNULL;
2530 return boolean_true_node;
2531 default:
2532 break;
2533 }
2534 }
2535
2536 return NULL;
2537 }
2538
2539 if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
2540 {
2541 tree res;
2542 enum br_predictor predictor2;
2543 HOST_WIDE_INT probability2;
2544 op0 = expr_expected_value (op0, visited, predictor, probability);
2545 if (!op0)
2546 return NULL;
2547 op1 = expr_expected_value (op1, visited, &predictor2, &probability2);
2548 if (!op1)
2549 return NULL;
2550 res = fold_build2 (code, type, op0, op1);
2551 if (TREE_CODE (res) == INTEGER_CST
2552 && TREE_CODE (op0) == INTEGER_CST
2553 && TREE_CODE (op1) == INTEGER_CST)
2554 {
2555 /* Combine binary predictions. */
2556 if (*probability != -1 || probability2 != -1)
2557 {
2558 HOST_WIDE_INT p1 = get_predictor_value (*predictor, *probability);
2559 HOST_WIDE_INT p2 = get_predictor_value (predictor2, probability2);
2560 *probability = RDIV (p1 * p2, REG_BR_PROB_BASE);
2561 }
2562
2563 if (*predictor < predictor2)
2564 *predictor = predictor2;
2565
2566 return res;
2567 }
2568 return NULL;
2569 }
2570 if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
2571 {
2572 tree res;
2573 op0 = expr_expected_value (op0, visited, predictor, probability);
2574 if (!op0)
2575 return NULL;
2576 res = fold_build1 (code, type, op0);
2577 if (TREE_CONSTANT (res))
2578 return res;
2579 return NULL;
2580 }
2581 return NULL;
2582 }
2583
2584 /* Return constant EXPR will likely have at execution time, NULL if unknown.
2585 The function is used by builtin_expect branch predictor so the evidence
2586 must come from this construct and additional possible constant folding.
2587
2588 We may want to implement more involved value guess (such as value range
2589 propagation based prediction), but such tricks shall go to new
2590 implementation. */
2591
2592 static tree
2593 expr_expected_value (tree expr, bitmap visited,
2594 enum br_predictor *predictor,
2595 HOST_WIDE_INT *probability)
2596 {
2597 enum tree_code code;
2598 tree op0, op1;
2599
2600 if (TREE_CONSTANT (expr))
2601 {
2602 *predictor = PRED_UNCONDITIONAL;
2603 *probability = -1;
2604 return expr;
2605 }
2606
2607 extract_ops_from_tree (expr, &code, &op0, &op1);
2608 return expr_expected_value_1 (TREE_TYPE (expr),
2609 op0, code, op1, visited, predictor,
2610 probability);
2611 }
2612 \f
2613
2614 /* Return probability of a PREDICTOR. If the predictor has variable
2615 probability return passed PROBABILITY. */
2616
2617 static HOST_WIDE_INT
2618 get_predictor_value (br_predictor predictor, HOST_WIDE_INT probability)
2619 {
2620 switch (predictor)
2621 {
2622 case PRED_BUILTIN_EXPECT:
2623 case PRED_BUILTIN_EXPECT_WITH_PROBABILITY:
2624 gcc_assert (probability != -1);
2625 return probability;
2626 default:
2627 gcc_assert (probability == -1);
2628 return predictor_info[(int) predictor].hitrate;
2629 }
2630 }
2631
2632 /* Predict using opcode of the last statement in basic block. */
2633 static void
2634 tree_predict_by_opcode (basic_block bb)
2635 {
2636 gimple *stmt = last_stmt (bb);
2637 edge then_edge;
2638 tree op0, op1;
2639 tree type;
2640 tree val;
2641 enum tree_code cmp;
2642 edge_iterator ei;
2643 enum br_predictor predictor;
2644 HOST_WIDE_INT probability;
2645
2646 if (!stmt)
2647 return;
2648
2649 if (gswitch *sw = dyn_cast <gswitch *> (stmt))
2650 {
2651 tree index = gimple_switch_index (sw);
2652 tree val = expr_expected_value (index, auto_bitmap (),
2653 &predictor, &probability);
2654 if (val && TREE_CODE (val) == INTEGER_CST)
2655 {
2656 edge e = find_taken_edge_switch_expr (sw, val);
2657 if (predictor == PRED_BUILTIN_EXPECT)
2658 {
2659 int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
2660 gcc_assert (percent >= 0 && percent <= 100);
2661 predict_edge (e, PRED_BUILTIN_EXPECT,
2662 HITRATE (percent));
2663 }
2664 else
2665 predict_edge_def (e, predictor, TAKEN);
2666 }
2667 }
2668
2669 if (gimple_code (stmt) != GIMPLE_COND)
2670 return;
2671 FOR_EACH_EDGE (then_edge, ei, bb->succs)
2672 if (then_edge->flags & EDGE_TRUE_VALUE)
2673 break;
2674 op0 = gimple_cond_lhs (stmt);
2675 op1 = gimple_cond_rhs (stmt);
2676 cmp = gimple_cond_code (stmt);
2677 type = TREE_TYPE (op0);
2678 val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, auto_bitmap (),
2679 &predictor, &probability);
2680 if (val && TREE_CODE (val) == INTEGER_CST)
2681 {
2682 HOST_WIDE_INT prob = get_predictor_value (predictor, probability);
2683 if (integer_zerop (val))
2684 prob = REG_BR_PROB_BASE - prob;
2685 predict_edge (then_edge, predictor, prob);
2686 }
2687 /* Try "pointer heuristic."
2688 A comparison ptr == 0 is predicted as false.
2689 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
2690 if (POINTER_TYPE_P (type))
2691 {
2692 if (cmp == EQ_EXPR)
2693 predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
2694 else if (cmp == NE_EXPR)
2695 predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
2696 }
2697 else
2698
2699 /* Try "opcode heuristic."
2700 EQ tests are usually false and NE tests are usually true. Also,
2701 most quantities are positive, so we can make the appropriate guesses
2702 about signed comparisons against zero. */
2703 switch (cmp)
2704 {
2705 case EQ_EXPR:
2706 case UNEQ_EXPR:
2707 /* Floating point comparisons appears to behave in a very
2708 unpredictable way because of special role of = tests in
2709 FP code. */
2710 if (FLOAT_TYPE_P (type))
2711 ;
2712 /* Comparisons with 0 are often used for booleans and there is
2713 nothing useful to predict about them. */
2714 else if (integer_zerop (op0) || integer_zerop (op1))
2715 ;
2716 else
2717 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2718 break;
2719
2720 case NE_EXPR:
2721 case LTGT_EXPR:
2722 /* Floating point comparisons appears to behave in a very
2723 unpredictable way because of special role of = tests in
2724 FP code. */
2725 if (FLOAT_TYPE_P (type))
2726 ;
2727 /* Comparisons with 0 are often used for booleans and there is
2728 nothing useful to predict about them. */
2729 else if (integer_zerop (op0)
2730 || integer_zerop (op1))
2731 ;
2732 else
2733 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2734 break;
2735
2736 case ORDERED_EXPR:
2737 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2738 break;
2739
2740 case UNORDERED_EXPR:
2741 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2742 break;
2743
2744 case LE_EXPR:
2745 case LT_EXPR:
2746 if (integer_zerop (op1)
2747 || integer_onep (op1)
2748 || integer_all_onesp (op1)
2749 || real_zerop (op1)
2750 || real_onep (op1)
2751 || real_minus_onep (op1))
2752 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2753 break;
2754
2755 case GE_EXPR:
2756 case GT_EXPR:
2757 if (integer_zerop (op1)
2758 || integer_onep (op1)
2759 || integer_all_onesp (op1)
2760 || real_zerop (op1)
2761 || real_onep (op1)
2762 || real_minus_onep (op1))
2763 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2764 break;
2765
2766 default:
2767 break;
2768 }
2769 }
2770
2771 /* Returns TRUE if the STMT is exit(0) like statement. */
2772
2773 static bool
2774 is_exit_with_zero_arg (const gimple *stmt)
2775 {
2776 /* This is not exit, _exit or _Exit. */
2777 if (!gimple_call_builtin_p (stmt, BUILT_IN_EXIT)
2778 && !gimple_call_builtin_p (stmt, BUILT_IN__EXIT)
2779 && !gimple_call_builtin_p (stmt, BUILT_IN__EXIT2))
2780 return false;
2781
2782 /* Argument is an interger zero. */
2783 return integer_zerop (gimple_call_arg (stmt, 0));
2784 }
2785
2786 /* Try to guess whether the value of return means error code. */
2787
2788 static enum br_predictor
2789 return_prediction (tree val, enum prediction *prediction)
2790 {
2791 /* VOID. */
2792 if (!val)
2793 return PRED_NO_PREDICTION;
2794 /* Different heuristics for pointers and scalars. */
2795 if (POINTER_TYPE_P (TREE_TYPE (val)))
2796 {
2797 /* NULL is usually not returned. */
2798 if (integer_zerop (val))
2799 {
2800 *prediction = NOT_TAKEN;
2801 return PRED_NULL_RETURN;
2802 }
2803 }
2804 else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2805 {
2806 /* Negative return values are often used to indicate
2807 errors. */
2808 if (TREE_CODE (val) == INTEGER_CST
2809 && tree_int_cst_sgn (val) < 0)
2810 {
2811 *prediction = NOT_TAKEN;
2812 return PRED_NEGATIVE_RETURN;
2813 }
2814 /* Constant return values seems to be commonly taken.
2815 Zero/one often represent booleans so exclude them from the
2816 heuristics. */
2817 if (TREE_CONSTANT (val)
2818 && (!integer_zerop (val) && !integer_onep (val)))
2819 {
2820 *prediction = NOT_TAKEN;
2821 return PRED_CONST_RETURN;
2822 }
2823 }
2824 return PRED_NO_PREDICTION;
2825 }
2826
2827 /* Return zero if phi result could have values other than -1, 0 or 1,
2828 otherwise return a bitmask, with bits 0, 1 and 2 set if -1, 0 and 1
2829 values are used or likely. */
2830
2831 static int
2832 zero_one_minusone (gphi *phi, int limit)
2833 {
2834 int phi_num_args = gimple_phi_num_args (phi);
2835 int ret = 0;
2836 for (int i = 0; i < phi_num_args; i++)
2837 {
2838 tree t = PHI_ARG_DEF (phi, i);
2839 if (TREE_CODE (t) != INTEGER_CST)
2840 continue;
2841 wide_int w = wi::to_wide (t);
2842 if (w == -1)
2843 ret |= 1;
2844 else if (w == 0)
2845 ret |= 2;
2846 else if (w == 1)
2847 ret |= 4;
2848 else
2849 return 0;
2850 }
2851 for (int i = 0; i < phi_num_args; i++)
2852 {
2853 tree t = PHI_ARG_DEF (phi, i);
2854 if (TREE_CODE (t) == INTEGER_CST)
2855 continue;
2856 if (TREE_CODE (t) != SSA_NAME)
2857 return 0;
2858 gimple *g = SSA_NAME_DEF_STMT (t);
2859 if (gimple_code (g) == GIMPLE_PHI && limit > 0)
2860 if (int r = zero_one_minusone (as_a <gphi *> (g), limit - 1))
2861 {
2862 ret |= r;
2863 continue;
2864 }
2865 if (!is_gimple_assign (g))
2866 return 0;
2867 if (gimple_assign_cast_p (g))
2868 {
2869 tree rhs1 = gimple_assign_rhs1 (g);
2870 if (TREE_CODE (rhs1) != SSA_NAME
2871 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2872 || TYPE_PRECISION (TREE_TYPE (rhs1)) != 1
2873 || !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2874 return 0;
2875 ret |= (2 | 4);
2876 continue;
2877 }
2878 if (TREE_CODE_CLASS (gimple_assign_rhs_code (g)) != tcc_comparison)
2879 return 0;
2880 ret |= (2 | 4);
2881 }
2882 return ret;
2883 }
2884
2885 /* Find the basic block with return expression and look up for possible
2886 return value trying to apply RETURN_PREDICTION heuristics. */
2887 static void
2888 apply_return_prediction (void)
2889 {
2890 greturn *return_stmt = NULL;
2891 tree return_val;
2892 edge e;
2893 gphi *phi;
2894 int phi_num_args, i;
2895 enum br_predictor pred;
2896 enum prediction direction;
2897 edge_iterator ei;
2898
2899 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2900 {
2901 gimple *last = last_stmt (e->src);
2902 if (last
2903 && gimple_code (last) == GIMPLE_RETURN)
2904 {
2905 return_stmt = as_a <greturn *> (last);
2906 break;
2907 }
2908 }
2909 if (!e)
2910 return;
2911 return_val = gimple_return_retval (return_stmt);
2912 if (!return_val)
2913 return;
2914 if (TREE_CODE (return_val) != SSA_NAME
2915 || !SSA_NAME_DEF_STMT (return_val)
2916 || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2917 return;
2918 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (return_val));
2919 phi_num_args = gimple_phi_num_args (phi);
2920 pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2921
2922 /* Avoid the case where the function returns -1, 0 and 1 values and
2923 nothing else. Those could be qsort etc. comparison functions
2924 where the negative return isn't less probable than positive.
2925 For this require that the function returns at least -1 or 1
2926 or -1 and a boolean value or comparison result, so that functions
2927 returning just -1 and 0 are treated as if -1 represents error value. */
2928 if (INTEGRAL_TYPE_P (TREE_TYPE (return_val))
2929 && !TYPE_UNSIGNED (TREE_TYPE (return_val))
2930 && TYPE_PRECISION (TREE_TYPE (return_val)) > 1)
2931 if (int r = zero_one_minusone (phi, 3))
2932 if ((r & (1 | 4)) == (1 | 4))
2933 return;
2934
2935 /* Avoid the degenerate case where all return values form the function
2936 belongs to same category (ie they are all positive constants)
2937 so we can hardly say something about them. */
2938 for (i = 1; i < phi_num_args; i++)
2939 if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2940 break;
2941 if (i != phi_num_args)
2942 for (i = 0; i < phi_num_args; i++)
2943 {
2944 pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2945 if (pred != PRED_NO_PREDICTION)
2946 predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2947 direction);
2948 }
2949 }
2950
2951 /* Look for basic block that contains unlikely to happen events
2952 (such as noreturn calls) and mark all paths leading to execution
2953 of this basic blocks as unlikely. */
2954
2955 static void
2956 tree_bb_level_predictions (void)
2957 {
2958 basic_block bb;
2959 bool has_return_edges = false;
2960 edge e;
2961 edge_iterator ei;
2962
2963 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2964 if (!unlikely_executed_edge_p (e) && !(e->flags & EDGE_ABNORMAL_CALL))
2965 {
2966 has_return_edges = true;
2967 break;
2968 }
2969
2970 apply_return_prediction ();
2971
2972 FOR_EACH_BB_FN (bb, cfun)
2973 {
2974 gimple_stmt_iterator gsi;
2975
2976 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2977 {
2978 gimple *stmt = gsi_stmt (gsi);
2979 tree decl;
2980
2981 if (is_gimple_call (stmt))
2982 {
2983 if (gimple_call_noreturn_p (stmt)
2984 && has_return_edges
2985 && !is_exit_with_zero_arg (stmt))
2986 predict_paths_leading_to (bb, PRED_NORETURN,
2987 NOT_TAKEN);
2988 decl = gimple_call_fndecl (stmt);
2989 if (decl
2990 && lookup_attribute ("cold",
2991 DECL_ATTRIBUTES (decl)))
2992 predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2993 NOT_TAKEN);
2994 if (decl && recursive_call_p (current_function_decl, decl))
2995 predict_paths_leading_to (bb, PRED_RECURSIVE_CALL,
2996 NOT_TAKEN);
2997 }
2998 else if (gimple_code (stmt) == GIMPLE_PREDICT)
2999 {
3000 predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
3001 gimple_predict_outcome (stmt));
3002 /* Keep GIMPLE_PREDICT around so early inlining will propagate
3003 hints to callers. */
3004 }
3005 }
3006 }
3007 }
3008
3009 /* Callback for hash_map::traverse, asserts that the pointer map is
3010 empty. */
3011
3012 bool
3013 assert_is_empty (const_basic_block const &, edge_prediction *const &value,
3014 void *)
3015 {
3016 gcc_assert (!value);
3017 return false;
3018 }
3019
3020 /* Predict branch probabilities and estimate profile for basic block BB.
3021 When LOCAL_ONLY is set do not use any global properties of CFG. */
3022
3023 static void
3024 tree_estimate_probability_bb (basic_block bb, bool local_only)
3025 {
3026 edge e;
3027 edge_iterator ei;
3028
3029 FOR_EACH_EDGE (e, ei, bb->succs)
3030 {
3031 /* Look for block we are guarding (ie we dominate it,
3032 but it doesn't postdominate us). */
3033 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
3034 && !local_only
3035 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
3036 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
3037 {
3038 gimple_stmt_iterator bi;
3039
3040 /* The call heuristic claims that a guarded function call
3041 is improbable. This is because such calls are often used
3042 to signal exceptional situations such as printing error
3043 messages. */
3044 for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
3045 gsi_next (&bi))
3046 {
3047 gimple *stmt = gsi_stmt (bi);
3048 if (is_gimple_call (stmt)
3049 && !gimple_inexpensive_call_p (as_a <gcall *> (stmt))
3050 /* Constant and pure calls are hardly used to signalize
3051 something exceptional. */
3052 && gimple_has_side_effects (stmt))
3053 {
3054 if (gimple_call_fndecl (stmt))
3055 predict_edge_def (e, PRED_CALL, NOT_TAKEN);
3056 else if (virtual_method_call_p (gimple_call_fn (stmt)))
3057 predict_edge_def (e, PRED_POLYMORPHIC_CALL, NOT_TAKEN);
3058 else
3059 predict_edge_def (e, PRED_INDIR_CALL, TAKEN);
3060 break;
3061 }
3062 }
3063 }
3064 }
3065 tree_predict_by_opcode (bb);
3066 }
3067
3068 /* Predict branch probabilities and estimate profile of the tree CFG.
3069 This function can be called from the loop optimizers to recompute
3070 the profile information.
3071 If DRY_RUN is set, do not modify CFG and only produce dump files. */
3072
3073 void
3074 tree_estimate_probability (bool dry_run)
3075 {
3076 basic_block bb;
3077
3078 add_noreturn_fake_exit_edges ();
3079 connect_infinite_loops_to_exit ();
3080 /* We use loop_niter_by_eval, which requires that the loops have
3081 preheaders. */
3082 create_preheaders (CP_SIMPLE_PREHEADERS);
3083 calculate_dominance_info (CDI_POST_DOMINATORS);
3084 /* Decide which edges are known to be unlikely. This improves later
3085 branch prediction. */
3086 determine_unlikely_bbs ();
3087
3088 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
3089 tree_bb_level_predictions ();
3090 record_loop_exits ();
3091
3092 if (number_of_loops (cfun) > 1)
3093 predict_loops ();
3094
3095 FOR_EACH_BB_FN (bb, cfun)
3096 tree_estimate_probability_bb (bb, false);
3097
3098 FOR_EACH_BB_FN (bb, cfun)
3099 combine_predictions_for_bb (bb, dry_run);
3100
3101 if (flag_checking)
3102 bb_predictions->traverse<void *, assert_is_empty> (NULL);
3103
3104 delete bb_predictions;
3105 bb_predictions = NULL;
3106
3107 if (!dry_run)
3108 estimate_bb_frequencies (false);
3109 free_dominance_info (CDI_POST_DOMINATORS);
3110 remove_fake_exit_edges ();
3111 }
3112
3113 /* Set edge->probability for each successor edge of BB. */
3114 void
3115 tree_guess_outgoing_edge_probabilities (basic_block bb)
3116 {
3117 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
3118 tree_estimate_probability_bb (bb, true);
3119 combine_predictions_for_bb (bb, false);
3120 if (flag_checking)
3121 bb_predictions->traverse<void *, assert_is_empty> (NULL);
3122 delete bb_predictions;
3123 bb_predictions = NULL;
3124 }
3125 \f
3126 /* Predict edges to successors of CUR whose sources are not postdominated by
3127 BB by PRED and recurse to all postdominators. */
3128
3129 static void
3130 predict_paths_for_bb (basic_block cur, basic_block bb,
3131 enum br_predictor pred,
3132 enum prediction taken,
3133 bitmap visited, class loop *in_loop = NULL)
3134 {
3135 edge e;
3136 edge_iterator ei;
3137 basic_block son;
3138
3139 /* If we exited the loop or CUR is unconditional in the loop, there is
3140 nothing to do. */
3141 if (in_loop
3142 && (!flow_bb_inside_loop_p (in_loop, cur)
3143 || dominated_by_p (CDI_DOMINATORS, in_loop->latch, cur)))
3144 return;
3145
3146 /* We are looking for all edges forming edge cut induced by
3147 set of all blocks postdominated by BB. */
3148 FOR_EACH_EDGE (e, ei, cur->preds)
3149 if (e->src->index >= NUM_FIXED_BLOCKS
3150 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
3151 {
3152 edge e2;
3153 edge_iterator ei2;
3154 bool found = false;
3155
3156 /* Ignore fake edges and eh, we predict them as not taken anyway. */
3157 if (unlikely_executed_edge_p (e))
3158 continue;
3159 gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
3160
3161 /* See if there is an edge from e->src that is not abnormal
3162 and does not lead to BB and does not exit the loop. */
3163 FOR_EACH_EDGE (e2, ei2, e->src->succs)
3164 if (e2 != e
3165 && !unlikely_executed_edge_p (e2)
3166 && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb)
3167 && (!in_loop || !loop_exit_edge_p (in_loop, e2)))
3168 {
3169 found = true;
3170 break;
3171 }
3172
3173 /* If there is non-abnormal path leaving e->src, predict edge
3174 using predictor. Otherwise we need to look for paths
3175 leading to e->src.
3176
3177 The second may lead to infinite loop in the case we are predicitng
3178 regions that are only reachable by abnormal edges. We simply
3179 prevent visiting given BB twice. */
3180 if (found)
3181 {
3182 if (!edge_predicted_by_p (e, pred, taken))
3183 predict_edge_def (e, pred, taken);
3184 }
3185 else if (bitmap_set_bit (visited, e->src->index))
3186 predict_paths_for_bb (e->src, e->src, pred, taken, visited, in_loop);
3187 }
3188 for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
3189 son;
3190 son = next_dom_son (CDI_POST_DOMINATORS, son))
3191 predict_paths_for_bb (son, bb, pred, taken, visited, in_loop);
3192 }
3193
3194 /* Sets branch probabilities according to PREDiction and
3195 FLAGS. */
3196
3197 static void
3198 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
3199 enum prediction taken, class loop *in_loop)
3200 {
3201 predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
3202 }
3203
3204 /* Like predict_paths_leading_to but take edge instead of basic block. */
3205
3206 static void
3207 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
3208 enum prediction taken, class loop *in_loop)
3209 {
3210 bool has_nonloop_edge = false;
3211 edge_iterator ei;
3212 edge e2;
3213
3214 basic_block bb = e->src;
3215 FOR_EACH_EDGE (e2, ei, bb->succs)
3216 if (e2->dest != e->src && e2->dest != e->dest
3217 && !unlikely_executed_edge_p (e)
3218 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
3219 {
3220 has_nonloop_edge = true;
3221 break;
3222 }
3223 if (!has_nonloop_edge)
3224 {
3225 predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
3226 }
3227 else
3228 predict_edge_def (e, pred, taken);
3229 }
3230 \f
3231 /* This is used to carry information about basic blocks. It is
3232 attached to the AUX field of the standard CFG block. */
3233
3234 class block_info
3235 {
3236 public:
3237 /* Estimated frequency of execution of basic_block. */
3238 sreal frequency;
3239
3240 /* To keep queue of basic blocks to process. */
3241 basic_block next;
3242
3243 /* Number of predecessors we need to visit first. */
3244 int npredecessors;
3245 };
3246
3247 /* Similar information for edges. */
3248 class edge_prob_info
3249 {
3250 public:
3251 /* In case edge is a loopback edge, the probability edge will be reached
3252 in case header is. Estimated number of iterations of the loop can be
3253 then computed as 1 / (1 - back_edge_prob). */
3254 sreal back_edge_prob;
3255 /* True if the edge is a loopback edge in the natural loop. */
3256 unsigned int back_edge:1;
3257 };
3258
3259 #define BLOCK_INFO(B) ((block_info *) (B)->aux)
3260 #undef EDGE_INFO
3261 #define EDGE_INFO(E) ((edge_prob_info *) (E)->aux)
3262
3263 /* Helper function for estimate_bb_frequencies.
3264 Propagate the frequencies in blocks marked in
3265 TOVISIT, starting in HEAD. */
3266
3267 static void
3268 propagate_freq (basic_block head, bitmap tovisit)
3269 {
3270 basic_block bb;
3271 basic_block last;
3272 unsigned i;
3273 edge e;
3274 basic_block nextbb;
3275 bitmap_iterator bi;
3276
3277 /* For each basic block we need to visit count number of his predecessors
3278 we need to visit first. */
3279 EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
3280 {
3281 edge_iterator ei;
3282 int count = 0;
3283
3284 bb = BASIC_BLOCK_FOR_FN (cfun, i);
3285
3286 FOR_EACH_EDGE (e, ei, bb->preds)
3287 {
3288 bool visit = bitmap_bit_p (tovisit, e->src->index);
3289
3290 if (visit && !(e->flags & EDGE_DFS_BACK))
3291 count++;
3292 else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
3293 fprintf (dump_file,
3294 "Irreducible region hit, ignoring edge to %i->%i\n",
3295 e->src->index, bb->index);
3296 }
3297 BLOCK_INFO (bb)->npredecessors = count;
3298 /* When function never returns, we will never process exit block. */
3299 if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
3300 bb->count = profile_count::zero ();
3301 }
3302
3303 BLOCK_INFO (head)->frequency = 1;
3304 last = head;
3305 for (bb = head; bb; bb = nextbb)
3306 {
3307 edge_iterator ei;
3308 sreal cyclic_probability = 0;
3309 sreal frequency = 0;
3310
3311 nextbb = BLOCK_INFO (bb)->next;
3312 BLOCK_INFO (bb)->next = NULL;
3313
3314 /* Compute frequency of basic block. */
3315 if (bb != head)
3316 {
3317 if (flag_checking)
3318 FOR_EACH_EDGE (e, ei, bb->preds)
3319 gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
3320 || (e->flags & EDGE_DFS_BACK));
3321
3322 FOR_EACH_EDGE (e, ei, bb->preds)
3323 if (EDGE_INFO (e)->back_edge)
3324 {
3325 cyclic_probability += EDGE_INFO (e)->back_edge_prob;
3326 }
3327 else if (!(e->flags & EDGE_DFS_BACK))
3328 {
3329 /* frequency += (e->probability
3330 * BLOCK_INFO (e->src)->frequency /
3331 REG_BR_PROB_BASE); */
3332
3333 /* FIXME: Graphite is producing edges with no profile. Once
3334 this is fixed, drop this. */
3335 sreal tmp = e->probability.initialized_p () ?
3336 e->probability.to_reg_br_prob_base () : 0;
3337 tmp *= BLOCK_INFO (e->src)->frequency;
3338 tmp *= real_inv_br_prob_base;
3339 frequency += tmp;
3340 }
3341
3342 if (cyclic_probability == 0)
3343 {
3344 BLOCK_INFO (bb)->frequency = frequency;
3345 }
3346 else
3347 {
3348 if (cyclic_probability > real_almost_one)
3349 cyclic_probability = real_almost_one;
3350
3351 /* BLOCK_INFO (bb)->frequency = frequency
3352 / (1 - cyclic_probability) */
3353
3354 cyclic_probability = sreal (1) - cyclic_probability;
3355 BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
3356 }
3357 }
3358
3359 bitmap_clear_bit (tovisit, bb->index);
3360
3361 e = find_edge (bb, head);
3362 if (e)
3363 {
3364 /* EDGE_INFO (e)->back_edge_prob
3365 = ((e->probability * BLOCK_INFO (bb)->frequency)
3366 / REG_BR_PROB_BASE); */
3367
3368 /* FIXME: Graphite is producing edges with no profile. Once
3369 this is fixed, drop this. */
3370 sreal tmp = e->probability.initialized_p () ?
3371 e->probability.to_reg_br_prob_base () : 0;
3372 tmp *= BLOCK_INFO (bb)->frequency;
3373 EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
3374 }
3375
3376 /* Propagate to successor blocks. */
3377 FOR_EACH_EDGE (e, ei, bb->succs)
3378 if (!(e->flags & EDGE_DFS_BACK)
3379 && BLOCK_INFO (e->dest)->npredecessors)
3380 {
3381 BLOCK_INFO (e->dest)->npredecessors--;
3382 if (!BLOCK_INFO (e->dest)->npredecessors)
3383 {
3384 if (!nextbb)
3385 nextbb = e->dest;
3386 else
3387 BLOCK_INFO (last)->next = e->dest;
3388
3389 last = e->dest;
3390 }
3391 }
3392 }
3393 }
3394
3395 /* Estimate frequencies in loops at same nest level. */
3396
3397 static void
3398 estimate_loops_at_level (class loop *first_loop)
3399 {
3400 class loop *loop;
3401
3402 for (loop = first_loop; loop; loop = loop->next)
3403 {
3404 edge e;
3405 basic_block *bbs;
3406 unsigned i;
3407 auto_bitmap tovisit;
3408
3409 estimate_loops_at_level (loop->inner);
3410
3411 /* Find current loop back edge and mark it. */
3412 e = loop_latch_edge (loop);
3413 EDGE_INFO (e)->back_edge = 1;
3414
3415 bbs = get_loop_body (loop);
3416 for (i = 0; i < loop->num_nodes; i++)
3417 bitmap_set_bit (tovisit, bbs[i]->index);
3418 free (bbs);
3419 propagate_freq (loop->header, tovisit);
3420 }
3421 }
3422
3423 /* Propagates frequencies through structure of loops. */
3424
3425 static void
3426 estimate_loops (void)
3427 {
3428 auto_bitmap tovisit;
3429 basic_block bb;
3430
3431 /* Start by estimating the frequencies in the loops. */
3432 if (number_of_loops (cfun) > 1)
3433 estimate_loops_at_level (current_loops->tree_root->inner);
3434
3435 /* Now propagate the frequencies through all the blocks. */
3436 FOR_ALL_BB_FN (bb, cfun)
3437 {
3438 bitmap_set_bit (tovisit, bb->index);
3439 }
3440 propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
3441 }
3442
3443 /* Drop the profile for NODE to guessed, and update its frequency based on
3444 whether it is expected to be hot given the CALL_COUNT. */
3445
3446 static void
3447 drop_profile (struct cgraph_node *node, profile_count call_count)
3448 {
3449 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3450 /* In the case where this was called by another function with a
3451 dropped profile, call_count will be 0. Since there are no
3452 non-zero call counts to this function, we don't know for sure
3453 whether it is hot, and therefore it will be marked normal below. */
3454 bool hot = maybe_hot_count_p (NULL, call_count);
3455
3456 if (dump_file)
3457 fprintf (dump_file,
3458 "Dropping 0 profile for %s. %s based on calls.\n",
3459 node->dump_name (),
3460 hot ? "Function is hot" : "Function is normal");
3461 /* We only expect to miss profiles for functions that are reached
3462 via non-zero call edges in cases where the function may have
3463 been linked from another module or library (COMDATs and extern
3464 templates). See the comments below for handle_missing_profiles.
3465 Also, only warn in cases where the missing counts exceed the
3466 number of training runs. In certain cases with an execv followed
3467 by a no-return call the profile for the no-return call is not
3468 dumped and there can be a mismatch. */
3469 if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
3470 && call_count > profile_info->runs)
3471 {
3472 if (flag_profile_correction)
3473 {
3474 if (dump_file)
3475 fprintf (dump_file,
3476 "Missing counts for called function %s\n",
3477 node->dump_name ());
3478 }
3479 else
3480 warning (0, "Missing counts for called function %s",
3481 node->dump_name ());
3482 }
3483
3484 basic_block bb;
3485 if (opt_for_fn (node->decl, flag_guess_branch_prob))
3486 {
3487 bool clear_zeros
3488 = !ENTRY_BLOCK_PTR_FOR_FN (fn)->count.nonzero_p ();
3489 FOR_ALL_BB_FN (bb, fn)
3490 if (clear_zeros || !(bb->count == profile_count::zero ()))
3491 bb->count = bb->count.guessed_local ();
3492 fn->cfg->count_max = fn->cfg->count_max.guessed_local ();
3493 }
3494 else
3495 {
3496 FOR_ALL_BB_FN (bb, fn)
3497 bb->count = profile_count::uninitialized ();
3498 fn->cfg->count_max = profile_count::uninitialized ();
3499 }
3500
3501 struct cgraph_edge *e;
3502 for (e = node->callees; e; e = e->next_callee)
3503 e->count = gimple_bb (e->call_stmt)->count;
3504 for (e = node->indirect_calls; e; e = e->next_callee)
3505 e->count = gimple_bb (e->call_stmt)->count;
3506 node->count = ENTRY_BLOCK_PTR_FOR_FN (fn)->count;
3507
3508 profile_status_for_fn (fn)
3509 = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
3510 node->frequency
3511 = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
3512 }
3513
3514 /* In the case of COMDAT routines, multiple object files will contain the same
3515 function and the linker will select one for the binary. In that case
3516 all the other copies from the profile instrument binary will be missing
3517 profile counts. Look for cases where this happened, due to non-zero
3518 call counts going to 0-count functions, and drop the profile to guessed
3519 so that we can use the estimated probabilities and avoid optimizing only
3520 for size.
3521
3522 The other case where the profile may be missing is when the routine
3523 is not going to be emitted to the object file, e.g. for "extern template"
3524 class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
3525 all other cases of non-zero calls to 0-count functions. */
3526
3527 void
3528 handle_missing_profiles (void)
3529 {
3530 const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
3531 struct cgraph_node *node;
3532 auto_vec<struct cgraph_node *, 64> worklist;
3533
3534 /* See if 0 count function has non-0 count callers. In this case we
3535 lost some profile. Drop its function profile to PROFILE_GUESSED. */
3536 FOR_EACH_DEFINED_FUNCTION (node)
3537 {
3538 struct cgraph_edge *e;
3539 profile_count call_count = profile_count::zero ();
3540 gcov_type max_tp_first_run = 0;
3541 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3542
3543 if (node->count.ipa ().nonzero_p ())
3544 continue;
3545 for (e = node->callers; e; e = e->next_caller)
3546 if (e->count.ipa ().initialized_p () && e->count.ipa () > 0)
3547 {
3548 call_count = call_count + e->count.ipa ();
3549
3550 if (e->caller->tp_first_run > max_tp_first_run)
3551 max_tp_first_run = e->caller->tp_first_run;
3552 }
3553
3554 /* If time profile is missing, let assign the maximum that comes from
3555 caller functions. */
3556 if (!node->tp_first_run && max_tp_first_run)
3557 node->tp_first_run = max_tp_first_run + 1;
3558
3559 if (call_count > 0
3560 && fn && fn->cfg
3561 && call_count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
3562 {
3563 drop_profile (node, call_count);
3564 worklist.safe_push (node);
3565 }
3566 }
3567
3568 /* Propagate the profile dropping to other 0-count COMDATs that are
3569 potentially called by COMDATs we already dropped the profile on. */
3570 while (worklist.length () > 0)
3571 {
3572 struct cgraph_edge *e;
3573
3574 node = worklist.pop ();
3575 for (e = node->callees; e; e = e->next_caller)
3576 {
3577 struct cgraph_node *callee = e->callee;
3578 struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
3579
3580 if (!(e->count.ipa () == profile_count::zero ())
3581 && callee->count.ipa ().nonzero_p ())
3582 continue;
3583 if ((DECL_COMDAT (callee->decl) || DECL_EXTERNAL (callee->decl))
3584 && fn && fn->cfg
3585 && profile_status_for_fn (fn) == PROFILE_READ)
3586 {
3587 drop_profile (node, profile_count::zero ());
3588 worklist.safe_push (callee);
3589 }
3590 }
3591 }
3592 }
3593
3594 /* Convert counts measured by profile driven feedback to frequencies.
3595 Return nonzero iff there was any nonzero execution count. */
3596
3597 bool
3598 update_max_bb_count (void)
3599 {
3600 profile_count true_count_max = profile_count::uninitialized ();
3601 basic_block bb;
3602
3603 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3604 true_count_max = true_count_max.max (bb->count);
3605
3606 cfun->cfg->count_max = true_count_max;
3607
3608 return true_count_max.ipa ().nonzero_p ();
3609 }
3610
3611 /* Return true if function is likely to be expensive, so there is no point to
3612 optimize performance of prologue, epilogue or do inlining at the expense
3613 of code size growth. THRESHOLD is the limit of number of instructions
3614 function can execute at average to be still considered not expensive. */
3615
3616 bool
3617 expensive_function_p (int threshold)
3618 {
3619 basic_block bb;
3620
3621 /* If profile was scaled in a way entry block has count 0, then the function
3622 is deifnitly taking a lot of time. */
3623 if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.nonzero_p ())
3624 return true;
3625
3626 profile_count limit = ENTRY_BLOCK_PTR_FOR_FN
3627 (cfun)->count.apply_scale (threshold, 1);
3628 profile_count sum = profile_count::zero ();
3629 FOR_EACH_BB_FN (bb, cfun)
3630 {
3631 rtx_insn *insn;
3632
3633 if (!bb->count.initialized_p ())
3634 {
3635 if (dump_file)
3636 fprintf (dump_file, "Function is considered expensive because"
3637 " count of bb %i is not initialized\n", bb->index);
3638 return true;
3639 }
3640
3641 FOR_BB_INSNS (bb, insn)
3642 if (active_insn_p (insn))
3643 {
3644 sum += bb->count;
3645 if (sum > limit)
3646 return true;
3647 }
3648 }
3649
3650 return false;
3651 }
3652
3653 /* All basic blocks that are reachable only from unlikely basic blocks are
3654 unlikely. */
3655
3656 void
3657 propagate_unlikely_bbs_forward (void)
3658 {
3659 auto_vec<basic_block, 64> worklist;
3660 basic_block bb;
3661 edge_iterator ei;
3662 edge e;
3663
3664 if (!(ENTRY_BLOCK_PTR_FOR_FN (cfun)->count == profile_count::zero ()))
3665 {
3666 ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = (void *)(size_t) 1;
3667 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
3668
3669 while (worklist.length () > 0)
3670 {
3671 bb = worklist.pop ();
3672 FOR_EACH_EDGE (e, ei, bb->succs)
3673 if (!(e->count () == profile_count::zero ())
3674 && !(e->dest->count == profile_count::zero ())
3675 && !e->dest->aux)
3676 {
3677 e->dest->aux = (void *)(size_t) 1;
3678 worklist.safe_push (e->dest);
3679 }
3680 }
3681 }
3682
3683 FOR_ALL_BB_FN (bb, cfun)
3684 {
3685 if (!bb->aux)
3686 {
3687 if (!(bb->count == profile_count::zero ())
3688 && (dump_file && (dump_flags & TDF_DETAILS)))
3689 fprintf (dump_file,
3690 "Basic block %i is marked unlikely by forward prop\n",
3691 bb->index);
3692 bb->count = profile_count::zero ();
3693 }
3694 else
3695 bb->aux = NULL;
3696 }
3697 }
3698
3699 /* Determine basic blocks/edges that are known to be unlikely executed and set
3700 their counters to zero.
3701 This is done with first identifying obviously unlikely BBs/edges and then
3702 propagating in both directions. */
3703
3704 static void
3705 determine_unlikely_bbs ()
3706 {
3707 basic_block bb;
3708 auto_vec<basic_block, 64> worklist;
3709 edge_iterator ei;
3710 edge e;
3711
3712 FOR_EACH_BB_FN (bb, cfun)
3713 {
3714 if (!(bb->count == profile_count::zero ())
3715 && unlikely_executed_bb_p (bb))
3716 {
3717 if (dump_file && (dump_flags & TDF_DETAILS))
3718 fprintf (dump_file, "Basic block %i is locally unlikely\n",
3719 bb->index);
3720 bb->count = profile_count::zero ();
3721 }
3722
3723 FOR_EACH_EDGE (e, ei, bb->succs)
3724 if (!(e->probability == profile_probability::never ())
3725 && unlikely_executed_edge_p (e))
3726 {
3727 if (dump_file && (dump_flags & TDF_DETAILS))
3728 fprintf (dump_file, "Edge %i->%i is locally unlikely\n",
3729 bb->index, e->dest->index);
3730 e->probability = profile_probability::never ();
3731 }
3732
3733 gcc_checking_assert (!bb->aux);
3734 }
3735 propagate_unlikely_bbs_forward ();
3736
3737 auto_vec<int, 64> nsuccs;
3738 nsuccs.safe_grow_cleared (last_basic_block_for_fn (cfun));
3739 FOR_ALL_BB_FN (bb, cfun)
3740 if (!(bb->count == profile_count::zero ())
3741 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3742 {
3743 nsuccs[bb->index] = 0;
3744 FOR_EACH_EDGE (e, ei, bb->succs)
3745 if (!(e->probability == profile_probability::never ())
3746 && !(e->dest->count == profile_count::zero ()))
3747 nsuccs[bb->index]++;
3748 if (!nsuccs[bb->index])
3749 worklist.safe_push (bb);
3750 }
3751 while (worklist.length () > 0)
3752 {
3753 bb = worklist.pop ();
3754 if (bb->count == profile_count::zero ())
3755 continue;
3756 if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
3757 {
3758 bool found = false;
3759 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
3760 !gsi_end_p (gsi); gsi_next (&gsi))
3761 if (stmt_can_terminate_bb_p (gsi_stmt (gsi))
3762 /* stmt_can_terminate_bb_p special cases noreturns because it
3763 assumes that fake edges are created. We want to know that
3764 noreturn alone does not imply BB to be unlikely. */
3765 || (is_gimple_call (gsi_stmt (gsi))
3766 && (gimple_call_flags (gsi_stmt (gsi)) & ECF_NORETURN)))
3767 {
3768 found = true;
3769 break;
3770 }
3771 if (found)
3772 continue;
3773 }
3774 if (dump_file && (dump_flags & TDF_DETAILS))
3775 fprintf (dump_file,
3776 "Basic block %i is marked unlikely by backward prop\n",
3777 bb->index);
3778 bb->count = profile_count::zero ();
3779 FOR_EACH_EDGE (e, ei, bb->preds)
3780 if (!(e->probability == profile_probability::never ()))
3781 {
3782 if (!(e->src->count == profile_count::zero ()))
3783 {
3784 gcc_checking_assert (nsuccs[e->src->index] > 0);
3785 nsuccs[e->src->index]--;
3786 if (!nsuccs[e->src->index])
3787 worklist.safe_push (e->src);
3788 }
3789 }
3790 }
3791 /* Finally all edges from non-0 regions to 0 are unlikely. */
3792 FOR_ALL_BB_FN (bb, cfun)
3793 {
3794 if (!(bb->count == profile_count::zero ()))
3795 FOR_EACH_EDGE (e, ei, bb->succs)
3796 if (!(e->probability == profile_probability::never ())
3797 && e->dest->count == profile_count::zero ())
3798 {
3799 if (dump_file && (dump_flags & TDF_DETAILS))
3800 fprintf (dump_file, "Edge %i->%i is unlikely because "
3801 "it enters unlikely block\n",
3802 bb->index, e->dest->index);
3803 e->probability = profile_probability::never ();
3804 }
3805
3806 edge other = NULL;
3807
3808 FOR_EACH_EDGE (e, ei, bb->succs)
3809 if (e->probability == profile_probability::never ())
3810 ;
3811 else if (other)
3812 {
3813 other = NULL;
3814 break;
3815 }
3816 else
3817 other = e;
3818 if (other
3819 && !(other->probability == profile_probability::always ()))
3820 {
3821 if (dump_file && (dump_flags & TDF_DETAILS))
3822 fprintf (dump_file, "Edge %i->%i is locally likely\n",
3823 bb->index, other->dest->index);
3824 other->probability = profile_probability::always ();
3825 }
3826 }
3827 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count == profile_count::zero ())
3828 cgraph_node::get (current_function_decl)->count = profile_count::zero ();
3829 }
3830
3831 /* Estimate and propagate basic block frequencies using the given branch
3832 probabilities. If FORCE is true, the frequencies are used to estimate
3833 the counts even when there are already non-zero profile counts. */
3834
3835 void
3836 estimate_bb_frequencies (bool force)
3837 {
3838 basic_block bb;
3839 sreal freq_max;
3840
3841 determine_unlikely_bbs ();
3842
3843 if (force || profile_status_for_fn (cfun) != PROFILE_READ
3844 || !update_max_bb_count ())
3845 {
3846 static int real_values_initialized = 0;
3847
3848 if (!real_values_initialized)
3849 {
3850 real_values_initialized = 1;
3851 real_br_prob_base = REG_BR_PROB_BASE;
3852 /* Scaling frequencies up to maximal profile count may result in
3853 frequent overflows especially when inlining loops.
3854 Small scalling results in unnecesary precision loss. Stay in
3855 the half of the (exponential) range. */
3856 real_bb_freq_max = (uint64_t)1 << (profile_count::n_bits / 2);
3857 real_one_half = sreal (1, -1);
3858 real_inv_br_prob_base = sreal (1) / real_br_prob_base;
3859 real_almost_one = sreal (1) - real_inv_br_prob_base;
3860 }
3861
3862 mark_dfs_back_edges ();
3863
3864 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
3865 profile_probability::always ();
3866
3867 /* Set up block info for each basic block. */
3868 alloc_aux_for_blocks (sizeof (block_info));
3869 alloc_aux_for_edges (sizeof (edge_prob_info));
3870 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3871 {
3872 edge e;
3873 edge_iterator ei;
3874
3875 FOR_EACH_EDGE (e, ei, bb->succs)
3876 {
3877 /* FIXME: Graphite is producing edges with no profile. Once
3878 this is fixed, drop this. */
3879 if (e->probability.initialized_p ())
3880 EDGE_INFO (e)->back_edge_prob
3881 = e->probability.to_reg_br_prob_base ();
3882 else
3883 EDGE_INFO (e)->back_edge_prob = REG_BR_PROB_BASE / 2;
3884 EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
3885 }
3886 }
3887
3888 /* First compute frequencies locally for each loop from innermost
3889 to outermost to examine frequencies for back edges. */
3890 estimate_loops ();
3891
3892 freq_max = 0;
3893 FOR_EACH_BB_FN (bb, cfun)
3894 if (freq_max < BLOCK_INFO (bb)->frequency)
3895 freq_max = BLOCK_INFO (bb)->frequency;
3896
3897 freq_max = real_bb_freq_max / freq_max;
3898 if (freq_max < 16)
3899 freq_max = 16;
3900 profile_count ipa_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ();
3901 cfun->cfg->count_max = profile_count::uninitialized ();
3902 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3903 {
3904 sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
3905 profile_count count = profile_count::from_gcov_type (tmp.to_int ());
3906
3907 /* If we have profile feedback in which this function was never
3908 executed, then preserve this info. */
3909 if (!(bb->count == profile_count::zero ()))
3910 bb->count = count.guessed_local ().combine_with_ipa_count (ipa_count);
3911 cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
3912 }
3913
3914 free_aux_for_blocks ();
3915 free_aux_for_edges ();
3916 }
3917 compute_function_frequency ();
3918 }
3919
3920 /* Decide whether function is hot, cold or unlikely executed. */
3921 void
3922 compute_function_frequency (void)
3923 {
3924 basic_block bb;
3925 struct cgraph_node *node = cgraph_node::get (current_function_decl);
3926
3927 if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3928 || MAIN_NAME_P (DECL_NAME (current_function_decl)))
3929 node->only_called_at_startup = true;
3930 if (DECL_STATIC_DESTRUCTOR (current_function_decl))
3931 node->only_called_at_exit = true;
3932
3933 if (profile_status_for_fn (cfun) != PROFILE_READ)
3934 {
3935 int flags = flags_from_decl_or_type (current_function_decl);
3936 if ((ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa_p ()
3937 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa() == profile_count::zero ())
3938 || lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
3939 != NULL)
3940 {
3941 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3942 warn_function_cold (current_function_decl);
3943 }
3944 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
3945 != NULL)
3946 node->frequency = NODE_FREQUENCY_HOT;
3947 else if (flags & ECF_NORETURN)
3948 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3949 else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
3950 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3951 else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3952 || DECL_STATIC_DESTRUCTOR (current_function_decl))
3953 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3954 return;
3955 }
3956
3957 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3958 warn_function_cold (current_function_decl);
3959 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa() == profile_count::zero ())
3960 return;
3961 FOR_EACH_BB_FN (bb, cfun)
3962 {
3963 if (maybe_hot_bb_p (cfun, bb))
3964 {
3965 node->frequency = NODE_FREQUENCY_HOT;
3966 return;
3967 }
3968 if (!probably_never_executed_bb_p (cfun, bb))
3969 node->frequency = NODE_FREQUENCY_NORMAL;
3970 }
3971 }
3972
3973 /* Build PREDICT_EXPR. */
3974 tree
3975 build_predict_expr (enum br_predictor predictor, enum prediction taken)
3976 {
3977 tree t = build1 (PREDICT_EXPR, void_type_node,
3978 build_int_cst (integer_type_node, predictor));
3979 SET_PREDICT_EXPR_OUTCOME (t, taken);
3980 return t;
3981 }
3982
3983 const char *
3984 predictor_name (enum br_predictor predictor)
3985 {
3986 return predictor_info[predictor].name;
3987 }
3988
3989 /* Predict branch probabilities and estimate profile of the tree CFG. */
3990
3991 namespace {
3992
3993 const pass_data pass_data_profile =
3994 {
3995 GIMPLE_PASS, /* type */
3996 "profile_estimate", /* name */
3997 OPTGROUP_NONE, /* optinfo_flags */
3998 TV_BRANCH_PROB, /* tv_id */
3999 PROP_cfg, /* properties_required */
4000 0, /* properties_provided */
4001 0, /* properties_destroyed */
4002 0, /* todo_flags_start */
4003 0, /* todo_flags_finish */
4004 };
4005
4006 class pass_profile : public gimple_opt_pass
4007 {
4008 public:
4009 pass_profile (gcc::context *ctxt)
4010 : gimple_opt_pass (pass_data_profile, ctxt)
4011 {}
4012
4013 /* opt_pass methods: */
4014 virtual bool gate (function *) { return flag_guess_branch_prob; }
4015 virtual unsigned int execute (function *);
4016
4017 }; // class pass_profile
4018
4019 unsigned int
4020 pass_profile::execute (function *fun)
4021 {
4022 unsigned nb_loops;
4023
4024 if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
4025 return 0;
4026
4027 loop_optimizer_init (LOOPS_NORMAL);
4028 if (dump_file && (dump_flags & TDF_DETAILS))
4029 flow_loops_dump (dump_file, NULL, 0);
4030
4031 mark_irreducible_loops ();
4032
4033 nb_loops = number_of_loops (fun);
4034 if (nb_loops > 1)
4035 scev_initialize ();
4036
4037 tree_estimate_probability (false);
4038
4039 if (nb_loops > 1)
4040 scev_finalize ();
4041
4042 loop_optimizer_finalize ();
4043 if (dump_file && (dump_flags & TDF_DETAILS))
4044 gimple_dump_cfg (dump_file, dump_flags);
4045 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
4046 profile_status_for_fn (fun) = PROFILE_GUESSED;
4047 if (dump_file && (dump_flags & TDF_DETAILS))
4048 {
4049 class loop *loop;
4050 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
4051 if (loop->header->count.initialized_p ())
4052 fprintf (dump_file, "Loop got predicted %d to iterate %i times.\n",
4053 loop->num,
4054 (int)expected_loop_iterations_unbounded (loop));
4055 }
4056 return 0;
4057 }
4058
4059 } // anon namespace
4060
4061 gimple_opt_pass *
4062 make_pass_profile (gcc::context *ctxt)
4063 {
4064 return new pass_profile (ctxt);
4065 }
4066
4067 /* Return true when PRED predictor should be removed after early
4068 tree passes. Most of the predictors are beneficial to survive
4069 as early inlining can also distribute then into caller's bodies. */
4070
4071 static bool
4072 strip_predictor_early (enum br_predictor pred)
4073 {
4074 switch (pred)
4075 {
4076 case PRED_TREE_EARLY_RETURN:
4077 return true;
4078 default:
4079 return false;
4080 }
4081 }
4082
4083 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
4084 we no longer need. EARLY is set to true when called from early
4085 optimizations. */
4086
4087 unsigned int
4088 strip_predict_hints (function *fun, bool early)
4089 {
4090 basic_block bb;
4091 gimple *ass_stmt;
4092 tree var;
4093 bool changed = false;
4094
4095 FOR_EACH_BB_FN (bb, fun)
4096 {
4097 gimple_stmt_iterator bi;
4098 for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
4099 {
4100 gimple *stmt = gsi_stmt (bi);
4101
4102 if (gimple_code (stmt) == GIMPLE_PREDICT)
4103 {
4104 if (!early
4105 || strip_predictor_early (gimple_predict_predictor (stmt)))
4106 {
4107 gsi_remove (&bi, true);
4108 changed = true;
4109 continue;
4110 }
4111 }
4112 else if (is_gimple_call (stmt))
4113 {
4114 tree fndecl = gimple_call_fndecl (stmt);
4115
4116 if (!early
4117 && ((fndecl != NULL_TREE
4118 && fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
4119 && gimple_call_num_args (stmt) == 2)
4120 || (fndecl != NULL_TREE
4121 && fndecl_built_in_p (fndecl,
4122 BUILT_IN_EXPECT_WITH_PROBABILITY)
4123 && gimple_call_num_args (stmt) == 3)
4124 || (gimple_call_internal_p (stmt)
4125 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT)))
4126 {
4127 var = gimple_call_lhs (stmt);
4128 changed = true;
4129 if (var)
4130 {
4131 ass_stmt
4132 = gimple_build_assign (var, gimple_call_arg (stmt, 0));
4133 gsi_replace (&bi, ass_stmt, true);
4134 }
4135 else
4136 {
4137 gsi_remove (&bi, true);
4138 continue;
4139 }
4140 }
4141 }
4142 gsi_next (&bi);
4143 }
4144 }
4145 return changed ? TODO_cleanup_cfg : 0;
4146 }
4147
4148 namespace {
4149
4150 const pass_data pass_data_strip_predict_hints =
4151 {
4152 GIMPLE_PASS, /* type */
4153 "*strip_predict_hints", /* name */
4154 OPTGROUP_NONE, /* optinfo_flags */
4155 TV_BRANCH_PROB, /* tv_id */
4156 PROP_cfg, /* properties_required */
4157 0, /* properties_provided */
4158 0, /* properties_destroyed */
4159 0, /* todo_flags_start */
4160 0, /* todo_flags_finish */
4161 };
4162
4163 class pass_strip_predict_hints : public gimple_opt_pass
4164 {
4165 public:
4166 pass_strip_predict_hints (gcc::context *ctxt)
4167 : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
4168 {}
4169
4170 /* opt_pass methods: */
4171 opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
4172 void set_pass_param (unsigned int n, bool param)
4173 {
4174 gcc_assert (n == 0);
4175 early_p = param;
4176 }
4177
4178 virtual unsigned int execute (function *);
4179
4180 private:
4181 bool early_p;
4182
4183 }; // class pass_strip_predict_hints
4184
4185 unsigned int
4186 pass_strip_predict_hints::execute (function *fun)
4187 {
4188 return strip_predict_hints (fun, early_p);
4189 }
4190
4191 } // anon namespace
4192
4193 gimple_opt_pass *
4194 make_pass_strip_predict_hints (gcc::context *ctxt)
4195 {
4196 return new pass_strip_predict_hints (ctxt);
4197 }
4198
4199 /* Rebuild function frequencies. Passes are in general expected to
4200 maintain profile by hand, however in some cases this is not possible:
4201 for example when inlining several functions with loops freuqencies might run
4202 out of scale and thus needs to be recomputed. */
4203
4204 void
4205 rebuild_frequencies (void)
4206 {
4207 timevar_push (TV_REBUILD_FREQUENCIES);
4208
4209 /* When the max bb count in the function is small, there is a higher
4210 chance that there were truncation errors in the integer scaling
4211 of counts by inlining and other optimizations. This could lead
4212 to incorrect classification of code as being cold when it isn't.
4213 In that case, force the estimation of bb counts/frequencies from the
4214 branch probabilities, rather than computing frequencies from counts,
4215 which may also lead to frequencies incorrectly reduced to 0. There
4216 is less precision in the probabilities, so we only do this for small
4217 max counts. */
4218 cfun->cfg->count_max = profile_count::uninitialized ();
4219 basic_block bb;
4220 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
4221 cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
4222
4223 if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
4224 {
4225 loop_optimizer_init (0);
4226 add_noreturn_fake_exit_edges ();
4227 mark_irreducible_loops ();
4228 connect_infinite_loops_to_exit ();
4229 estimate_bb_frequencies (true);
4230 remove_fake_exit_edges ();
4231 loop_optimizer_finalize ();
4232 }
4233 else if (profile_status_for_fn (cfun) == PROFILE_READ)
4234 update_max_bb_count ();
4235 else if (profile_status_for_fn (cfun) == PROFILE_ABSENT
4236 && !flag_guess_branch_prob)
4237 ;
4238 else
4239 gcc_unreachable ();
4240 timevar_pop (TV_REBUILD_FREQUENCIES);
4241 }
4242
4243 /* Perform a dry run of the branch prediction pass and report comparsion of
4244 the predicted and real profile into the dump file. */
4245
4246 void
4247 report_predictor_hitrates (void)
4248 {
4249 unsigned nb_loops;
4250
4251 loop_optimizer_init (LOOPS_NORMAL);
4252 if (dump_file && (dump_flags & TDF_DETAILS))
4253 flow_loops_dump (dump_file, NULL, 0);
4254
4255 mark_irreducible_loops ();
4256
4257 nb_loops = number_of_loops (cfun);
4258 if (nb_loops > 1)
4259 scev_initialize ();
4260
4261 tree_estimate_probability (true);
4262
4263 if (nb_loops > 1)
4264 scev_finalize ();
4265
4266 loop_optimizer_finalize ();
4267 }
4268
4269 /* Force edge E to be cold.
4270 If IMPOSSIBLE is true, for edge to have count and probability 0 otherwise
4271 keep low probability to represent possible error in a guess. This is used
4272 i.e. in case we predict loop to likely iterate given number of times but
4273 we are not 100% sure.
4274
4275 This function locally updates profile without attempt to keep global
4276 consistency which cannot be reached in full generality without full profile
4277 rebuild from probabilities alone. Doing so is not necessarily a good idea
4278 because frequencies and counts may be more realistic then probabilities.
4279
4280 In some cases (such as for elimination of early exits during full loop
4281 unrolling) the caller can ensure that profile will get consistent
4282 afterwards. */
4283
4284 void
4285 force_edge_cold (edge e, bool impossible)
4286 {
4287 profile_count count_sum = profile_count::zero ();
4288 profile_probability prob_sum = profile_probability::never ();
4289 edge_iterator ei;
4290 edge e2;
4291 bool uninitialized_exit = false;
4292
4293 /* When branch probability guesses are not known, then do nothing. */
4294 if (!impossible && !e->count ().initialized_p ())
4295 return;
4296
4297 profile_probability goal = (impossible ? profile_probability::never ()
4298 : profile_probability::very_unlikely ());
4299
4300 /* If edge is already improbably or cold, just return. */
4301 if (e->probability <= goal
4302 && (!impossible || e->count () == profile_count::zero ()))
4303 return;
4304 FOR_EACH_EDGE (e2, ei, e->src->succs)
4305 if (e2 != e)
4306 {
4307 if (e->flags & EDGE_FAKE)
4308 continue;
4309 if (e2->count ().initialized_p ())
4310 count_sum += e2->count ();
4311 if (e2->probability.initialized_p ())
4312 prob_sum += e2->probability;
4313 else
4314 uninitialized_exit = true;
4315 }
4316
4317 /* If we are not guessing profiles but have some other edges out,
4318 just assume the control flow goes elsewhere. */
4319 if (uninitialized_exit)
4320 e->probability = goal;
4321 /* If there are other edges out of e->src, redistribute probabilitity
4322 there. */
4323 else if (prob_sum > profile_probability::never ())
4324 {
4325 if (!(e->probability < goal))
4326 e->probability = goal;
4327
4328 profile_probability prob_comp = prob_sum / e->probability.invert ();
4329
4330 if (dump_file && (dump_flags & TDF_DETAILS))
4331 fprintf (dump_file, "Making edge %i->%i %s by redistributing "
4332 "probability to other edges.\n",
4333 e->src->index, e->dest->index,
4334 impossible ? "impossible" : "cold");
4335 FOR_EACH_EDGE (e2, ei, e->src->succs)
4336 if (e2 != e)
4337 {
4338 e2->probability /= prob_comp;
4339 }
4340 if (current_ir_type () != IR_GIMPLE
4341 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
4342 update_br_prob_note (e->src);
4343 }
4344 /* If all edges out of e->src are unlikely, the basic block itself
4345 is unlikely. */
4346 else
4347 {
4348 if (prob_sum == profile_probability::never ())
4349 e->probability = profile_probability::always ();
4350 else
4351 {
4352 if (impossible)
4353 e->probability = profile_probability::never ();
4354 /* If BB has some edges out that are not impossible, we cannot
4355 assume that BB itself is. */
4356 impossible = false;
4357 }
4358 if (current_ir_type () != IR_GIMPLE
4359 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
4360 update_br_prob_note (e->src);
4361 if (e->src->count == profile_count::zero ())
4362 return;
4363 if (count_sum == profile_count::zero () && impossible)
4364 {
4365 bool found = false;
4366 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
4367 ;
4368 else if (current_ir_type () == IR_GIMPLE)
4369 for (gimple_stmt_iterator gsi = gsi_start_bb (e->src);
4370 !gsi_end_p (gsi); gsi_next (&gsi))
4371 {
4372 if (stmt_can_terminate_bb_p (gsi_stmt (gsi)))
4373 {
4374 found = true;
4375 break;
4376 }
4377 }
4378 /* FIXME: Implement RTL path. */
4379 else
4380 found = true;
4381 if (!found)
4382 {
4383 if (dump_file && (dump_flags & TDF_DETAILS))
4384 fprintf (dump_file,
4385 "Making bb %i impossible and dropping count to 0.\n",
4386 e->src->index);
4387 e->src->count = profile_count::zero ();
4388 FOR_EACH_EDGE (e2, ei, e->src->preds)
4389 force_edge_cold (e2, impossible);
4390 return;
4391 }
4392 }
4393
4394 /* If we did not adjusting, the source basic block has no likely edeges
4395 leaving other direction. In that case force that bb cold, too.
4396 This in general is difficult task to do, but handle special case when
4397 BB has only one predecestor. This is common case when we are updating
4398 after loop transforms. */
4399 if (!(prob_sum > profile_probability::never ())
4400 && count_sum == profile_count::zero ()
4401 && single_pred_p (e->src) && e->src->count.to_frequency (cfun)
4402 > (impossible ? 0 : 1))
4403 {
4404 int old_frequency = e->src->count.to_frequency (cfun);
4405 if (dump_file && (dump_flags & TDF_DETAILS))
4406 fprintf (dump_file, "Making bb %i %s.\n", e->src->index,
4407 impossible ? "impossible" : "cold");
4408 int new_frequency = MIN (e->src->count.to_frequency (cfun),
4409 impossible ? 0 : 1);
4410 if (impossible)
4411 e->src->count = profile_count::zero ();
4412 else
4413 e->src->count = e->count ().apply_scale (new_frequency,
4414 old_frequency);
4415 force_edge_cold (single_pred_edge (e->src), impossible);
4416 }
4417 else if (dump_file && (dump_flags & TDF_DETAILS)
4418 && maybe_hot_bb_p (cfun, e->src))
4419 fprintf (dump_file, "Giving up on making bb %i %s.\n", e->src->index,
4420 impossible ? "impossible" : "cold");
4421 }
4422 }
4423
4424 #if CHECKING_P
4425
4426 namespace selftest {
4427
4428 /* Test that value range of predictor values defined in predict.def is
4429 within range (50, 100]. */
4430
4431 struct branch_predictor
4432 {
4433 const char *name;
4434 int probability;
4435 };
4436
4437 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) { NAME, HITRATE },
4438
4439 static void
4440 test_prediction_value_range ()
4441 {
4442 branch_predictor predictors[] = {
4443 #include "predict.def"
4444 { NULL, PROB_UNINITIALIZED }
4445 };
4446
4447 for (unsigned i = 0; predictors[i].name != NULL; i++)
4448 {
4449 if (predictors[i].probability == PROB_UNINITIALIZED)
4450 continue;
4451
4452 unsigned p = 100 * predictors[i].probability / REG_BR_PROB_BASE;
4453 ASSERT_TRUE (p >= 50 && p <= 100);
4454 }
4455 }
4456
4457 #undef DEF_PREDICTOR
4458
4459 /* Run all of the selfests within this file. */
4460
4461 void
4462 predict_c_tests ()
4463 {
4464 test_prediction_value_range ();
4465 }
4466
4467 } // namespace selftest
4468 #endif /* CHECKING_P. */