]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/predict.c
alias.c: Reorder #include statements and remove duplicates.
[thirdparty/gcc.git] / gcc / predict.c
1 /* Branch prediction routines for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* References:
21
22 [1] "Branch Prediction for Free"
23 Ball and Larus; PLDI '93.
24 [2] "Static Branch Frequency and Program Profile Analysis"
25 Wu and Larus; MICRO-27.
26 [3] "Corpus-based Static Branch Prediction"
27 Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */
28
29
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "backend.h"
34 #include "target.h"
35 #include "rtl.h"
36 #include "tree.h"
37 #include "gimple.h"
38 #include "cfghooks.h"
39 #include "tree-pass.h"
40 #include "tm_p.h"
41 #include "ssa.h"
42 #include "expmed.h"
43 #include "insn-config.h"
44 #include "regs.h"
45 #include "emit-rtl.h"
46 #include "recog.h"
47 #include "cgraph.h"
48 #include "coverage.h"
49 #include "diagnostic-core.h"
50 #include "gimple-predict.h"
51 #include "alias.h"
52 #include "fold-const.h"
53 #include "calls.h"
54 #include "cfganal.h"
55 #include "flags.h"
56 #include "profile.h"
57 #include "except.h"
58 #include "dojump.h"
59 #include "explow.h"
60 #include "varasm.h"
61 #include "stmt.h"
62 #include "expr.h"
63 #include "sreal.h"
64 #include "params.h"
65 #include "cfgloop.h"
66 #include "internal-fn.h"
67 #include "gimple-iterator.h"
68 #include "tree-cfg.h"
69 #include "tree-ssa-loop-niter.h"
70 #include "tree-ssa-loop.h"
71 #include "tree-scalar-evolution.h"
72
73 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
74 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */
75 static sreal real_almost_one, real_br_prob_base,
76 real_inv_br_prob_base, real_one_half, real_bb_freq_max;
77
78 static void combine_predictions_for_insn (rtx_insn *, basic_block);
79 static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int);
80 static void predict_paths_leading_to (basic_block, enum br_predictor, enum prediction);
81 static void predict_paths_leading_to_edge (edge, enum br_predictor, enum prediction);
82 static bool can_predict_insn_p (const rtx_insn *);
83
84 /* Information we hold about each branch predictor.
85 Filled using information from predict.def. */
86
87 struct predictor_info
88 {
89 const char *const name; /* Name used in the debugging dumps. */
90 const int hitrate; /* Expected hitrate used by
91 predict_insn_def call. */
92 const int flags;
93 };
94
95 /* Use given predictor without Dempster-Shaffer theory if it matches
96 using first_match heuristics. */
97 #define PRED_FLAG_FIRST_MATCH 1
98
99 /* Recompute hitrate in percent to our representation. */
100
101 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
102
103 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
104 static const struct predictor_info predictor_info[]= {
105 #include "predict.def"
106
107 /* Upper bound on predictors. */
108 {NULL, 0, 0}
109 };
110 #undef DEF_PREDICTOR
111
112 /* Return TRUE if frequency FREQ is considered to be hot. */
113
114 static inline bool
115 maybe_hot_frequency_p (struct function *fun, int freq)
116 {
117 struct cgraph_node *node = cgraph_node::get (fun->decl);
118 if (!profile_info
119 || !opt_for_fn (fun->decl, flag_branch_probabilities))
120 {
121 if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
122 return false;
123 if (node->frequency == NODE_FREQUENCY_HOT)
124 return true;
125 }
126 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
127 return true;
128 if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
129 && freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency * 2 / 3))
130 return false;
131 if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
132 return false;
133 if (freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency
134 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
135 return false;
136 return true;
137 }
138
139 static gcov_type min_count = -1;
140
141 /* Determine the threshold for hot BB counts. */
142
143 gcov_type
144 get_hot_bb_threshold ()
145 {
146 gcov_working_set_t *ws;
147 if (min_count == -1)
148 {
149 ws = find_working_set (PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE));
150 gcc_assert (ws);
151 min_count = ws->min_counter;
152 }
153 return min_count;
154 }
155
156 /* Set the threshold for hot BB counts. */
157
158 void
159 set_hot_bb_threshold (gcov_type min)
160 {
161 min_count = min;
162 }
163
164 /* Return TRUE if frequency FREQ is considered to be hot. */
165
166 bool
167 maybe_hot_count_p (struct function *fun, gcov_type count)
168 {
169 if (fun && profile_status_for_fn (fun) != PROFILE_READ)
170 return true;
171 /* Code executed at most once is not hot. */
172 if (profile_info->runs >= count)
173 return false;
174 return (count >= get_hot_bb_threshold ());
175 }
176
177 /* Return true in case BB can be CPU intensive and should be optimized
178 for maximal performance. */
179
180 bool
181 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
182 {
183 gcc_checking_assert (fun);
184 if (profile_status_for_fn (fun) == PROFILE_READ)
185 return maybe_hot_count_p (fun, bb->count);
186 return maybe_hot_frequency_p (fun, bb->frequency);
187 }
188
189 /* Return true in case BB can be CPU intensive and should be optimized
190 for maximal performance. */
191
192 bool
193 maybe_hot_edge_p (edge e)
194 {
195 if (profile_status_for_fn (cfun) == PROFILE_READ)
196 return maybe_hot_count_p (cfun, e->count);
197 return maybe_hot_frequency_p (cfun, EDGE_FREQUENCY (e));
198 }
199
200 /* Return true if profile COUNT and FREQUENCY, or function FUN static
201 node frequency reflects never being executed. */
202
203 static bool
204 probably_never_executed (struct function *fun,
205 gcov_type count, int frequency)
206 {
207 gcc_checking_assert (fun);
208 if (profile_status_for_fn (fun) == PROFILE_READ)
209 {
210 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
211 if (count * unlikely_count_fraction >= profile_info->runs)
212 return false;
213 if (!frequency)
214 return true;
215 if (!ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency)
216 return false;
217 if (ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
218 {
219 gcov_type computed_count;
220 /* Check for possibility of overflow, in which case entry bb count
221 is large enough to do the division first without losing much
222 precision. */
223 if (ENTRY_BLOCK_PTR_FOR_FN (fun)->count < REG_BR_PROB_BASE *
224 REG_BR_PROB_BASE)
225 {
226 gcov_type scaled_count
227 = frequency * ENTRY_BLOCK_PTR_FOR_FN (fun)->count *
228 unlikely_count_fraction;
229 computed_count = RDIV (scaled_count,
230 ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency);
231 }
232 else
233 {
234 computed_count = RDIV (ENTRY_BLOCK_PTR_FOR_FN (fun)->count,
235 ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency);
236 computed_count *= frequency * unlikely_count_fraction;
237 }
238 if (computed_count >= profile_info->runs)
239 return false;
240 }
241 return true;
242 }
243 if ((!profile_info || !(opt_for_fn (fun->decl, flag_branch_probabilities)))
244 && (cgraph_node::get (fun->decl)->frequency
245 == NODE_FREQUENCY_UNLIKELY_EXECUTED))
246 return true;
247 return false;
248 }
249
250
251 /* Return true in case BB is probably never executed. */
252
253 bool
254 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
255 {
256 return probably_never_executed (fun, bb->count, bb->frequency);
257 }
258
259
260 /* Return true in case edge E is probably never executed. */
261
262 bool
263 probably_never_executed_edge_p (struct function *fun, edge e)
264 {
265 return probably_never_executed (fun, e->count, EDGE_FREQUENCY (e));
266 }
267
268 /* Return true when current function should always be optimized for size. */
269
270 bool
271 optimize_function_for_size_p (struct function *fun)
272 {
273 if (!fun || !fun->decl)
274 return optimize_size;
275 cgraph_node *n = cgraph_node::get (fun->decl);
276 return n && n->optimize_for_size_p ();
277 }
278
279 /* Return true when current function should always be optimized for speed. */
280
281 bool
282 optimize_function_for_speed_p (struct function *fun)
283 {
284 return !optimize_function_for_size_p (fun);
285 }
286
287 /* Return TRUE when BB should be optimized for size. */
288
289 bool
290 optimize_bb_for_size_p (const_basic_block bb)
291 {
292 return (optimize_function_for_size_p (cfun)
293 || (bb && !maybe_hot_bb_p (cfun, bb)));
294 }
295
296 /* Return TRUE when BB should be optimized for speed. */
297
298 bool
299 optimize_bb_for_speed_p (const_basic_block bb)
300 {
301 return !optimize_bb_for_size_p (bb);
302 }
303
304 /* Return TRUE when BB should be optimized for size. */
305
306 bool
307 optimize_edge_for_size_p (edge e)
308 {
309 return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
310 }
311
312 /* Return TRUE when BB should be optimized for speed. */
313
314 bool
315 optimize_edge_for_speed_p (edge e)
316 {
317 return !optimize_edge_for_size_p (e);
318 }
319
320 /* Return TRUE when BB should be optimized for size. */
321
322 bool
323 optimize_insn_for_size_p (void)
324 {
325 return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
326 }
327
328 /* Return TRUE when BB should be optimized for speed. */
329
330 bool
331 optimize_insn_for_speed_p (void)
332 {
333 return !optimize_insn_for_size_p ();
334 }
335
336 /* Return TRUE when LOOP should be optimized for size. */
337
338 bool
339 optimize_loop_for_size_p (struct loop *loop)
340 {
341 return optimize_bb_for_size_p (loop->header);
342 }
343
344 /* Return TRUE when LOOP should be optimized for speed. */
345
346 bool
347 optimize_loop_for_speed_p (struct loop *loop)
348 {
349 return optimize_bb_for_speed_p (loop->header);
350 }
351
352 /* Return TRUE when LOOP nest should be optimized for speed. */
353
354 bool
355 optimize_loop_nest_for_speed_p (struct loop *loop)
356 {
357 struct loop *l = loop;
358 if (optimize_loop_for_speed_p (loop))
359 return true;
360 l = loop->inner;
361 while (l && l != loop)
362 {
363 if (optimize_loop_for_speed_p (l))
364 return true;
365 if (l->inner)
366 l = l->inner;
367 else if (l->next)
368 l = l->next;
369 else
370 {
371 while (l != loop && !l->next)
372 l = loop_outer (l);
373 if (l != loop)
374 l = l->next;
375 }
376 }
377 return false;
378 }
379
380 /* Return TRUE when LOOP nest should be optimized for size. */
381
382 bool
383 optimize_loop_nest_for_size_p (struct loop *loop)
384 {
385 return !optimize_loop_nest_for_speed_p (loop);
386 }
387
388 /* Return true when edge E is likely to be well predictable by branch
389 predictor. */
390
391 bool
392 predictable_edge_p (edge e)
393 {
394 if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
395 return false;
396 if ((e->probability
397 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
398 || (REG_BR_PROB_BASE - e->probability
399 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
400 return true;
401 return false;
402 }
403
404
405 /* Set RTL expansion for BB profile. */
406
407 void
408 rtl_profile_for_bb (basic_block bb)
409 {
410 crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
411 }
412
413 /* Set RTL expansion for edge profile. */
414
415 void
416 rtl_profile_for_edge (edge e)
417 {
418 crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
419 }
420
421 /* Set RTL expansion to default mode (i.e. when profile info is not known). */
422 void
423 default_rtl_profile (void)
424 {
425 crtl->maybe_hot_insn_p = true;
426 }
427
428 /* Return true if the one of outgoing edges is already predicted by
429 PREDICTOR. */
430
431 bool
432 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
433 {
434 rtx note;
435 if (!INSN_P (BB_END (bb)))
436 return false;
437 for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
438 if (REG_NOTE_KIND (note) == REG_BR_PRED
439 && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
440 return true;
441 return false;
442 }
443
444 /* Structure representing predictions in tree level. */
445
446 struct edge_prediction {
447 struct edge_prediction *ep_next;
448 edge ep_edge;
449 enum br_predictor ep_predictor;
450 int ep_probability;
451 };
452
453 /* This map contains for a basic block the list of predictions for the
454 outgoing edges. */
455
456 static hash_map<const_basic_block, edge_prediction *> *bb_predictions;
457
458 /* Return true if the one of outgoing edges is already predicted by
459 PREDICTOR. */
460
461 bool
462 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
463 {
464 struct edge_prediction *i;
465 edge_prediction **preds = bb_predictions->get (bb);
466
467 if (!preds)
468 return false;
469
470 for (i = *preds; i; i = i->ep_next)
471 if (i->ep_predictor == predictor)
472 return true;
473 return false;
474 }
475
476 /* Return true when the probability of edge is reliable.
477
478 The profile guessing code is good at predicting branch outcome (ie.
479 taken/not taken), that is predicted right slightly over 75% of time.
480 It is however notoriously poor on predicting the probability itself.
481 In general the profile appear a lot flatter (with probabilities closer
482 to 50%) than the reality so it is bad idea to use it to drive optimization
483 such as those disabling dynamic branch prediction for well predictable
484 branches.
485
486 There are two exceptions - edges leading to noreturn edges and edges
487 predicted by number of iterations heuristics are predicted well. This macro
488 should be able to distinguish those, but at the moment it simply check for
489 noreturn heuristic that is only one giving probability over 99% or bellow
490 1%. In future we might want to propagate reliability information across the
491 CFG if we find this information useful on multiple places. */
492 static bool
493 probability_reliable_p (int prob)
494 {
495 return (profile_status_for_fn (cfun) == PROFILE_READ
496 || (profile_status_for_fn (cfun) == PROFILE_GUESSED
497 && (prob <= HITRATE (1) || prob >= HITRATE (99))));
498 }
499
500 /* Same predicate as above, working on edges. */
501 bool
502 edge_probability_reliable_p (const_edge e)
503 {
504 return probability_reliable_p (e->probability);
505 }
506
507 /* Same predicate as edge_probability_reliable_p, working on notes. */
508 bool
509 br_prob_note_reliable_p (const_rtx note)
510 {
511 gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
512 return probability_reliable_p (XINT (note, 0));
513 }
514
515 static void
516 predict_insn (rtx_insn *insn, enum br_predictor predictor, int probability)
517 {
518 gcc_assert (any_condjump_p (insn));
519 if (!flag_guess_branch_prob)
520 return;
521
522 add_reg_note (insn, REG_BR_PRED,
523 gen_rtx_CONCAT (VOIDmode,
524 GEN_INT ((int) predictor),
525 GEN_INT ((int) probability)));
526 }
527
528 /* Predict insn by given predictor. */
529
530 void
531 predict_insn_def (rtx_insn *insn, enum br_predictor predictor,
532 enum prediction taken)
533 {
534 int probability = predictor_info[(int) predictor].hitrate;
535
536 if (taken != TAKEN)
537 probability = REG_BR_PROB_BASE - probability;
538
539 predict_insn (insn, predictor, probability);
540 }
541
542 /* Predict edge E with given probability if possible. */
543
544 void
545 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
546 {
547 rtx_insn *last_insn;
548 last_insn = BB_END (e->src);
549
550 /* We can store the branch prediction information only about
551 conditional jumps. */
552 if (!any_condjump_p (last_insn))
553 return;
554
555 /* We always store probability of branching. */
556 if (e->flags & EDGE_FALLTHRU)
557 probability = REG_BR_PROB_BASE - probability;
558
559 predict_insn (last_insn, predictor, probability);
560 }
561
562 /* Predict edge E with the given PROBABILITY. */
563 void
564 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
565 {
566 gcc_assert (profile_status_for_fn (cfun) != PROFILE_GUESSED);
567 if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && EDGE_COUNT (e->src->succs) >
568 1)
569 && flag_guess_branch_prob && optimize)
570 {
571 struct edge_prediction *i = XNEW (struct edge_prediction);
572 edge_prediction *&preds = bb_predictions->get_or_insert (e->src);
573
574 i->ep_next = preds;
575 preds = i;
576 i->ep_probability = probability;
577 i->ep_predictor = predictor;
578 i->ep_edge = e;
579 }
580 }
581
582 /* Remove all predictions on given basic block that are attached
583 to edge E. */
584 void
585 remove_predictions_associated_with_edge (edge e)
586 {
587 if (!bb_predictions)
588 return;
589
590 edge_prediction **preds = bb_predictions->get (e->src);
591
592 if (preds)
593 {
594 struct edge_prediction **prediction = preds;
595 struct edge_prediction *next;
596
597 while (*prediction)
598 {
599 if ((*prediction)->ep_edge == e)
600 {
601 next = (*prediction)->ep_next;
602 free (*prediction);
603 *prediction = next;
604 }
605 else
606 prediction = &((*prediction)->ep_next);
607 }
608 }
609 }
610
611 /* Clears the list of predictions stored for BB. */
612
613 static void
614 clear_bb_predictions (basic_block bb)
615 {
616 edge_prediction **preds = bb_predictions->get (bb);
617 struct edge_prediction *pred, *next;
618
619 if (!preds)
620 return;
621
622 for (pred = *preds; pred; pred = next)
623 {
624 next = pred->ep_next;
625 free (pred);
626 }
627 *preds = NULL;
628 }
629
630 /* Return true when we can store prediction on insn INSN.
631 At the moment we represent predictions only on conditional
632 jumps, not at computed jump or other complicated cases. */
633 static bool
634 can_predict_insn_p (const rtx_insn *insn)
635 {
636 return (JUMP_P (insn)
637 && any_condjump_p (insn)
638 && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
639 }
640
641 /* Predict edge E by given predictor if possible. */
642
643 void
644 predict_edge_def (edge e, enum br_predictor predictor,
645 enum prediction taken)
646 {
647 int probability = predictor_info[(int) predictor].hitrate;
648
649 if (taken != TAKEN)
650 probability = REG_BR_PROB_BASE - probability;
651
652 predict_edge (e, predictor, probability);
653 }
654
655 /* Invert all branch predictions or probability notes in the INSN. This needs
656 to be done each time we invert the condition used by the jump. */
657
658 void
659 invert_br_probabilities (rtx insn)
660 {
661 rtx note;
662
663 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
664 if (REG_NOTE_KIND (note) == REG_BR_PROB)
665 XINT (note, 0) = REG_BR_PROB_BASE - XINT (note, 0);
666 else if (REG_NOTE_KIND (note) == REG_BR_PRED)
667 XEXP (XEXP (note, 0), 1)
668 = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
669 }
670
671 /* Dump information about the branch prediction to the output file. */
672
673 static void
674 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
675 basic_block bb, int used)
676 {
677 edge e;
678 edge_iterator ei;
679
680 if (!file)
681 return;
682
683 FOR_EACH_EDGE (e, ei, bb->succs)
684 if (! (e->flags & EDGE_FALLTHRU))
685 break;
686
687 fprintf (file, " %s heuristics%s: %.1f%%",
688 predictor_info[predictor].name,
689 used ? "" : " (ignored)", probability * 100.0 / REG_BR_PROB_BASE);
690
691 if (bb->count)
692 {
693 fprintf (file, " exec %" PRId64, bb->count);
694 if (e)
695 {
696 fprintf (file, " hit %" PRId64, e->count);
697 fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count);
698 }
699 }
700
701 fprintf (file, "\n");
702 }
703
704 /* We can not predict the probabilities of outgoing edges of bb. Set them
705 evenly and hope for the best. */
706 static void
707 set_even_probabilities (basic_block bb)
708 {
709 int nedges = 0;
710 edge e;
711 edge_iterator ei;
712
713 FOR_EACH_EDGE (e, ei, bb->succs)
714 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
715 nedges ++;
716 FOR_EACH_EDGE (e, ei, bb->succs)
717 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
718 e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
719 else
720 e->probability = 0;
721 }
722
723 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
724 note if not already present. Remove now useless REG_BR_PRED notes. */
725
726 static void
727 combine_predictions_for_insn (rtx_insn *insn, basic_block bb)
728 {
729 rtx prob_note;
730 rtx *pnote;
731 rtx note;
732 int best_probability = PROB_EVEN;
733 enum br_predictor best_predictor = END_PREDICTORS;
734 int combined_probability = REG_BR_PROB_BASE / 2;
735 int d;
736 bool first_match = false;
737 bool found = false;
738
739 if (!can_predict_insn_p (insn))
740 {
741 set_even_probabilities (bb);
742 return;
743 }
744
745 prob_note = find_reg_note (insn, REG_BR_PROB, 0);
746 pnote = &REG_NOTES (insn);
747 if (dump_file)
748 fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
749 bb->index);
750
751 /* We implement "first match" heuristics and use probability guessed
752 by predictor with smallest index. */
753 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
754 if (REG_NOTE_KIND (note) == REG_BR_PRED)
755 {
756 enum br_predictor predictor = ((enum br_predictor)
757 INTVAL (XEXP (XEXP (note, 0), 0)));
758 int probability = INTVAL (XEXP (XEXP (note, 0), 1));
759
760 found = true;
761 if (best_predictor > predictor)
762 best_probability = probability, best_predictor = predictor;
763
764 d = (combined_probability * probability
765 + (REG_BR_PROB_BASE - combined_probability)
766 * (REG_BR_PROB_BASE - probability));
767
768 /* Use FP math to avoid overflows of 32bit integers. */
769 if (d == 0)
770 /* If one probability is 0% and one 100%, avoid division by zero. */
771 combined_probability = REG_BR_PROB_BASE / 2;
772 else
773 combined_probability = (((double) combined_probability) * probability
774 * REG_BR_PROB_BASE / d + 0.5);
775 }
776
777 /* Decide which heuristic to use. In case we didn't match anything,
778 use no_prediction heuristic, in case we did match, use either
779 first match or Dempster-Shaffer theory depending on the flags. */
780
781 if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
782 first_match = true;
783
784 if (!found)
785 dump_prediction (dump_file, PRED_NO_PREDICTION,
786 combined_probability, bb, true);
787 else
788 {
789 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
790 bb, !first_match);
791 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
792 bb, first_match);
793 }
794
795 if (first_match)
796 combined_probability = best_probability;
797 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
798
799 while (*pnote)
800 {
801 if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
802 {
803 enum br_predictor predictor = ((enum br_predictor)
804 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
805 int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
806
807 dump_prediction (dump_file, predictor, probability, bb,
808 !first_match || best_predictor == predictor);
809 *pnote = XEXP (*pnote, 1);
810 }
811 else
812 pnote = &XEXP (*pnote, 1);
813 }
814
815 if (!prob_note)
816 {
817 add_int_reg_note (insn, REG_BR_PROB, combined_probability);
818
819 /* Save the prediction into CFG in case we are seeing non-degenerated
820 conditional jump. */
821 if (!single_succ_p (bb))
822 {
823 BRANCH_EDGE (bb)->probability = combined_probability;
824 FALLTHRU_EDGE (bb)->probability
825 = REG_BR_PROB_BASE - combined_probability;
826 }
827 }
828 else if (!single_succ_p (bb))
829 {
830 int prob = XINT (prob_note, 0);
831
832 BRANCH_EDGE (bb)->probability = prob;
833 FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob;
834 }
835 else
836 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
837 }
838
839 /* Combine predictions into single probability and store them into CFG.
840 Remove now useless prediction entries. */
841
842 static void
843 combine_predictions_for_bb (basic_block bb)
844 {
845 int best_probability = PROB_EVEN;
846 enum br_predictor best_predictor = END_PREDICTORS;
847 int combined_probability = REG_BR_PROB_BASE / 2;
848 int d;
849 bool first_match = false;
850 bool found = false;
851 struct edge_prediction *pred;
852 int nedges = 0;
853 edge e, first = NULL, second = NULL;
854 edge_iterator ei;
855
856 FOR_EACH_EDGE (e, ei, bb->succs)
857 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
858 {
859 nedges ++;
860 if (first && !second)
861 second = e;
862 if (!first)
863 first = e;
864 }
865
866 /* When there is no successor or only one choice, prediction is easy.
867
868 We are lazy for now and predict only basic blocks with two outgoing
869 edges. It is possible to predict generic case too, but we have to
870 ignore first match heuristics and do more involved combining. Implement
871 this later. */
872 if (nedges != 2)
873 {
874 if (!bb->count)
875 set_even_probabilities (bb);
876 clear_bb_predictions (bb);
877 if (dump_file)
878 fprintf (dump_file, "%i edges in bb %i predicted to even probabilities\n",
879 nedges, bb->index);
880 return;
881 }
882
883 if (dump_file)
884 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
885
886 edge_prediction **preds = bb_predictions->get (bb);
887 if (preds)
888 {
889 /* We implement "first match" heuristics and use probability guessed
890 by predictor with smallest index. */
891 for (pred = *preds; pred; pred = pred->ep_next)
892 {
893 enum br_predictor predictor = pred->ep_predictor;
894 int probability = pred->ep_probability;
895
896 if (pred->ep_edge != first)
897 probability = REG_BR_PROB_BASE - probability;
898
899 found = true;
900 /* First match heuristics would be widly confused if we predicted
901 both directions. */
902 if (best_predictor > predictor)
903 {
904 struct edge_prediction *pred2;
905 int prob = probability;
906
907 for (pred2 = (struct edge_prediction *) *preds;
908 pred2; pred2 = pred2->ep_next)
909 if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
910 {
911 int probability2 = pred->ep_probability;
912
913 if (pred2->ep_edge != first)
914 probability2 = REG_BR_PROB_BASE - probability2;
915
916 if ((probability < REG_BR_PROB_BASE / 2) !=
917 (probability2 < REG_BR_PROB_BASE / 2))
918 break;
919
920 /* If the same predictor later gave better result, go for it! */
921 if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
922 || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
923 prob = probability2;
924 }
925 if (!pred2)
926 best_probability = prob, best_predictor = predictor;
927 }
928
929 d = (combined_probability * probability
930 + (REG_BR_PROB_BASE - combined_probability)
931 * (REG_BR_PROB_BASE - probability));
932
933 /* Use FP math to avoid overflows of 32bit integers. */
934 if (d == 0)
935 /* If one probability is 0% and one 100%, avoid division by zero. */
936 combined_probability = REG_BR_PROB_BASE / 2;
937 else
938 combined_probability = (((double) combined_probability)
939 * probability
940 * REG_BR_PROB_BASE / d + 0.5);
941 }
942 }
943
944 /* Decide which heuristic to use. In case we didn't match anything,
945 use no_prediction heuristic, in case we did match, use either
946 first match or Dempster-Shaffer theory depending on the flags. */
947
948 if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
949 first_match = true;
950
951 if (!found)
952 dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb, true);
953 else
954 {
955 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
956 !first_match);
957 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
958 first_match);
959 }
960
961 if (first_match)
962 combined_probability = best_probability;
963 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
964
965 if (preds)
966 {
967 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
968 {
969 enum br_predictor predictor = pred->ep_predictor;
970 int probability = pred->ep_probability;
971
972 if (pred->ep_edge != EDGE_SUCC (bb, 0))
973 probability = REG_BR_PROB_BASE - probability;
974 dump_prediction (dump_file, predictor, probability, bb,
975 !first_match || best_predictor == predictor);
976 }
977 }
978 clear_bb_predictions (bb);
979
980 if (!bb->count)
981 {
982 first->probability = combined_probability;
983 second->probability = REG_BR_PROB_BASE - combined_probability;
984 }
985 }
986
987 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
988 Return the SSA_NAME if the condition satisfies, NULL otherwise.
989
990 T1 and T2 should be one of the following cases:
991 1. T1 is SSA_NAME, T2 is NULL
992 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
993 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
994
995 static tree
996 strips_small_constant (tree t1, tree t2)
997 {
998 tree ret = NULL;
999 int value = 0;
1000
1001 if (!t1)
1002 return NULL;
1003 else if (TREE_CODE (t1) == SSA_NAME)
1004 ret = t1;
1005 else if (tree_fits_shwi_p (t1))
1006 value = tree_to_shwi (t1);
1007 else
1008 return NULL;
1009
1010 if (!t2)
1011 return ret;
1012 else if (tree_fits_shwi_p (t2))
1013 value = tree_to_shwi (t2);
1014 else if (TREE_CODE (t2) == SSA_NAME)
1015 {
1016 if (ret)
1017 return NULL;
1018 else
1019 ret = t2;
1020 }
1021
1022 if (value <= 4 && value >= -4)
1023 return ret;
1024 else
1025 return NULL;
1026 }
1027
1028 /* Return the SSA_NAME in T or T's operands.
1029 Return NULL if SSA_NAME cannot be found. */
1030
1031 static tree
1032 get_base_value (tree t)
1033 {
1034 if (TREE_CODE (t) == SSA_NAME)
1035 return t;
1036
1037 if (!BINARY_CLASS_P (t))
1038 return NULL;
1039
1040 switch (TREE_OPERAND_LENGTH (t))
1041 {
1042 case 1:
1043 return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1044 case 2:
1045 return strips_small_constant (TREE_OPERAND (t, 0),
1046 TREE_OPERAND (t, 1));
1047 default:
1048 return NULL;
1049 }
1050 }
1051
1052 /* Check the compare STMT in LOOP. If it compares an induction
1053 variable to a loop invariant, return true, and save
1054 LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1055 Otherwise return false and set LOOP_INVAIANT to NULL. */
1056
1057 static bool
1058 is_comparison_with_loop_invariant_p (gcond *stmt, struct loop *loop,
1059 tree *loop_invariant,
1060 enum tree_code *compare_code,
1061 tree *loop_step,
1062 tree *loop_iv_base)
1063 {
1064 tree op0, op1, bound, base;
1065 affine_iv iv0, iv1;
1066 enum tree_code code;
1067 tree step;
1068
1069 code = gimple_cond_code (stmt);
1070 *loop_invariant = NULL;
1071
1072 switch (code)
1073 {
1074 case GT_EXPR:
1075 case GE_EXPR:
1076 case NE_EXPR:
1077 case LT_EXPR:
1078 case LE_EXPR:
1079 case EQ_EXPR:
1080 break;
1081
1082 default:
1083 return false;
1084 }
1085
1086 op0 = gimple_cond_lhs (stmt);
1087 op1 = gimple_cond_rhs (stmt);
1088
1089 if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1090 || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1091 return false;
1092 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1093 return false;
1094 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1095 return false;
1096 if (TREE_CODE (iv0.step) != INTEGER_CST
1097 || TREE_CODE (iv1.step) != INTEGER_CST)
1098 return false;
1099 if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1100 || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1101 return false;
1102
1103 if (integer_zerop (iv0.step))
1104 {
1105 if (code != NE_EXPR && code != EQ_EXPR)
1106 code = invert_tree_comparison (code, false);
1107 bound = iv0.base;
1108 base = iv1.base;
1109 if (tree_fits_shwi_p (iv1.step))
1110 step = iv1.step;
1111 else
1112 return false;
1113 }
1114 else
1115 {
1116 bound = iv1.base;
1117 base = iv0.base;
1118 if (tree_fits_shwi_p (iv0.step))
1119 step = iv0.step;
1120 else
1121 return false;
1122 }
1123
1124 if (TREE_CODE (bound) != INTEGER_CST)
1125 bound = get_base_value (bound);
1126 if (!bound)
1127 return false;
1128 if (TREE_CODE (base) != INTEGER_CST)
1129 base = get_base_value (base);
1130 if (!base)
1131 return false;
1132
1133 *loop_invariant = bound;
1134 *compare_code = code;
1135 *loop_step = step;
1136 *loop_iv_base = base;
1137 return true;
1138 }
1139
1140 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
1141
1142 static bool
1143 expr_coherent_p (tree t1, tree t2)
1144 {
1145 gimple *stmt;
1146 tree ssa_name_1 = NULL;
1147 tree ssa_name_2 = NULL;
1148
1149 gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1150 gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1151
1152 if (t1 == t2)
1153 return true;
1154
1155 if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1156 return true;
1157 if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1158 return false;
1159
1160 /* Check to see if t1 is expressed/defined with t2. */
1161 stmt = SSA_NAME_DEF_STMT (t1);
1162 gcc_assert (stmt != NULL);
1163 if (is_gimple_assign (stmt))
1164 {
1165 ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1166 if (ssa_name_1 && ssa_name_1 == t2)
1167 return true;
1168 }
1169
1170 /* Check to see if t2 is expressed/defined with t1. */
1171 stmt = SSA_NAME_DEF_STMT (t2);
1172 gcc_assert (stmt != NULL);
1173 if (is_gimple_assign (stmt))
1174 {
1175 ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1176 if (ssa_name_2 && ssa_name_2 == t1)
1177 return true;
1178 }
1179
1180 /* Compare if t1 and t2's def_stmts are identical. */
1181 if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1182 return true;
1183 else
1184 return false;
1185 }
1186
1187 /* Predict branch probability of BB when BB contains a branch that compares
1188 an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1189 loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1190
1191 E.g.
1192 for (int i = 0; i < bound; i++) {
1193 if (i < bound - 2)
1194 computation_1();
1195 else
1196 computation_2();
1197 }
1198
1199 In this loop, we will predict the branch inside the loop to be taken. */
1200
1201 static void
1202 predict_iv_comparison (struct loop *loop, basic_block bb,
1203 tree loop_bound_var,
1204 tree loop_iv_base_var,
1205 enum tree_code loop_bound_code,
1206 int loop_bound_step)
1207 {
1208 gimple *stmt;
1209 tree compare_var, compare_base;
1210 enum tree_code compare_code;
1211 tree compare_step_var;
1212 edge then_edge;
1213 edge_iterator ei;
1214
1215 if (predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
1216 || predicted_by_p (bb, PRED_LOOP_ITERATIONS)
1217 || predicted_by_p (bb, PRED_LOOP_EXIT))
1218 return;
1219
1220 stmt = last_stmt (bb);
1221 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1222 return;
1223 if (!is_comparison_with_loop_invariant_p (as_a <gcond *> (stmt),
1224 loop, &compare_var,
1225 &compare_code,
1226 &compare_step_var,
1227 &compare_base))
1228 return;
1229
1230 /* Find the taken edge. */
1231 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1232 if (then_edge->flags & EDGE_TRUE_VALUE)
1233 break;
1234
1235 /* When comparing an IV to a loop invariant, NE is more likely to be
1236 taken while EQ is more likely to be not-taken. */
1237 if (compare_code == NE_EXPR)
1238 {
1239 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1240 return;
1241 }
1242 else if (compare_code == EQ_EXPR)
1243 {
1244 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1245 return;
1246 }
1247
1248 if (!expr_coherent_p (loop_iv_base_var, compare_base))
1249 return;
1250
1251 /* If loop bound, base and compare bound are all constants, we can
1252 calculate the probability directly. */
1253 if (tree_fits_shwi_p (loop_bound_var)
1254 && tree_fits_shwi_p (compare_var)
1255 && tree_fits_shwi_p (compare_base))
1256 {
1257 int probability;
1258 bool overflow, overall_overflow = false;
1259 widest_int compare_count, tem;
1260
1261 /* (loop_bound - base) / compare_step */
1262 tem = wi::sub (wi::to_widest (loop_bound_var),
1263 wi::to_widest (compare_base), SIGNED, &overflow);
1264 overall_overflow |= overflow;
1265 widest_int loop_count = wi::div_trunc (tem,
1266 wi::to_widest (compare_step_var),
1267 SIGNED, &overflow);
1268 overall_overflow |= overflow;
1269
1270 if (!wi::neg_p (wi::to_widest (compare_step_var))
1271 ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1272 {
1273 /* (loop_bound - compare_bound) / compare_step */
1274 tem = wi::sub (wi::to_widest (loop_bound_var),
1275 wi::to_widest (compare_var), SIGNED, &overflow);
1276 overall_overflow |= overflow;
1277 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1278 SIGNED, &overflow);
1279 overall_overflow |= overflow;
1280 }
1281 else
1282 {
1283 /* (compare_bound - base) / compare_step */
1284 tem = wi::sub (wi::to_widest (compare_var),
1285 wi::to_widest (compare_base), SIGNED, &overflow);
1286 overall_overflow |= overflow;
1287 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1288 SIGNED, &overflow);
1289 overall_overflow |= overflow;
1290 }
1291 if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1292 ++compare_count;
1293 if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1294 ++loop_count;
1295 if (wi::neg_p (compare_count))
1296 compare_count = 0;
1297 if (wi::neg_p (loop_count))
1298 loop_count = 0;
1299 if (loop_count == 0)
1300 probability = 0;
1301 else if (wi::cmps (compare_count, loop_count) == 1)
1302 probability = REG_BR_PROB_BASE;
1303 else
1304 {
1305 tem = compare_count * REG_BR_PROB_BASE;
1306 tem = wi::udiv_trunc (tem, loop_count);
1307 probability = tem.to_uhwi ();
1308 }
1309
1310 if (!overall_overflow)
1311 predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1312
1313 return;
1314 }
1315
1316 if (expr_coherent_p (loop_bound_var, compare_var))
1317 {
1318 if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1319 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1320 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1321 else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1322 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1323 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1324 else if (loop_bound_code == NE_EXPR)
1325 {
1326 /* If the loop backedge condition is "(i != bound)", we do
1327 the comparison based on the step of IV:
1328 * step < 0 : backedge condition is like (i > bound)
1329 * step > 0 : backedge condition is like (i < bound) */
1330 gcc_assert (loop_bound_step != 0);
1331 if (loop_bound_step > 0
1332 && (compare_code == LT_EXPR
1333 || compare_code == LE_EXPR))
1334 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1335 else if (loop_bound_step < 0
1336 && (compare_code == GT_EXPR
1337 || compare_code == GE_EXPR))
1338 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1339 else
1340 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1341 }
1342 else
1343 /* The branch is predicted not-taken if loop_bound_code is
1344 opposite with compare_code. */
1345 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1346 }
1347 else if (expr_coherent_p (loop_iv_base_var, compare_var))
1348 {
1349 /* For cases like:
1350 for (i = s; i < h; i++)
1351 if (i > s + 2) ....
1352 The branch should be predicted taken. */
1353 if (loop_bound_step > 0
1354 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1355 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1356 else if (loop_bound_step < 0
1357 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1358 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1359 else
1360 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1361 }
1362 }
1363
1364 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1365 exits are resulted from short-circuit conditions that will generate an
1366 if_tmp. E.g.:
1367
1368 if (foo() || global > 10)
1369 break;
1370
1371 This will be translated into:
1372
1373 BB3:
1374 loop header...
1375 BB4:
1376 if foo() goto BB6 else goto BB5
1377 BB5:
1378 if global > 10 goto BB6 else goto BB7
1379 BB6:
1380 goto BB7
1381 BB7:
1382 iftmp = (PHI 0(BB5), 1(BB6))
1383 if iftmp == 1 goto BB8 else goto BB3
1384 BB8:
1385 outside of the loop...
1386
1387 The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1388 From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1389 exits. This function takes BB7->BB8 as input, and finds out the extra loop
1390 exits to predict them using PRED_LOOP_EXIT. */
1391
1392 static void
1393 predict_extra_loop_exits (edge exit_edge)
1394 {
1395 unsigned i;
1396 bool check_value_one;
1397 gimple *lhs_def_stmt;
1398 gphi *phi_stmt;
1399 tree cmp_rhs, cmp_lhs;
1400 gimple *last;
1401 gcond *cmp_stmt;
1402
1403 last = last_stmt (exit_edge->src);
1404 if (!last)
1405 return;
1406 cmp_stmt = dyn_cast <gcond *> (last);
1407 if (!cmp_stmt)
1408 return;
1409
1410 cmp_rhs = gimple_cond_rhs (cmp_stmt);
1411 cmp_lhs = gimple_cond_lhs (cmp_stmt);
1412 if (!TREE_CONSTANT (cmp_rhs)
1413 || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1414 return;
1415 if (TREE_CODE (cmp_lhs) != SSA_NAME)
1416 return;
1417
1418 /* If check_value_one is true, only the phi_args with value '1' will lead
1419 to loop exit. Otherwise, only the phi_args with value '0' will lead to
1420 loop exit. */
1421 check_value_one = (((integer_onep (cmp_rhs))
1422 ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1423 ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1424
1425 lhs_def_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1426 if (!lhs_def_stmt)
1427 return;
1428
1429 phi_stmt = dyn_cast <gphi *> (lhs_def_stmt);
1430 if (!phi_stmt)
1431 return;
1432
1433 for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1434 {
1435 edge e1;
1436 edge_iterator ei;
1437 tree val = gimple_phi_arg_def (phi_stmt, i);
1438 edge e = gimple_phi_arg_edge (phi_stmt, i);
1439
1440 if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1441 continue;
1442 if ((check_value_one ^ integer_onep (val)) == 1)
1443 continue;
1444 if (EDGE_COUNT (e->src->succs) != 1)
1445 {
1446 predict_paths_leading_to_edge (e, PRED_LOOP_EXIT, NOT_TAKEN);
1447 continue;
1448 }
1449
1450 FOR_EACH_EDGE (e1, ei, e->src->preds)
1451 predict_paths_leading_to_edge (e1, PRED_LOOP_EXIT, NOT_TAKEN);
1452 }
1453 }
1454
1455 /* Predict edge probabilities by exploiting loop structure. */
1456
1457 static void
1458 predict_loops (void)
1459 {
1460 struct loop *loop;
1461
1462 /* Try to predict out blocks in a loop that are not part of a
1463 natural loop. */
1464 FOR_EACH_LOOP (loop, 0)
1465 {
1466 basic_block bb, *bbs;
1467 unsigned j, n_exits;
1468 vec<edge> exits;
1469 struct tree_niter_desc niter_desc;
1470 edge ex;
1471 struct nb_iter_bound *nb_iter;
1472 enum tree_code loop_bound_code = ERROR_MARK;
1473 tree loop_bound_step = NULL;
1474 tree loop_bound_var = NULL;
1475 tree loop_iv_base = NULL;
1476 gcond *stmt = NULL;
1477
1478 exits = get_loop_exit_edges (loop);
1479 n_exits = exits.length ();
1480 if (!n_exits)
1481 {
1482 exits.release ();
1483 continue;
1484 }
1485
1486 FOR_EACH_VEC_ELT (exits, j, ex)
1487 {
1488 tree niter = NULL;
1489 HOST_WIDE_INT nitercst;
1490 int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1491 int probability;
1492 enum br_predictor predictor;
1493
1494 predict_extra_loop_exits (ex);
1495
1496 if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1497 niter = niter_desc.niter;
1498 if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1499 niter = loop_niter_by_eval (loop, ex);
1500
1501 if (TREE_CODE (niter) == INTEGER_CST)
1502 {
1503 if (tree_fits_uhwi_p (niter)
1504 && max
1505 && compare_tree_int (niter, max - 1) == -1)
1506 nitercst = tree_to_uhwi (niter) + 1;
1507 else
1508 nitercst = max;
1509 predictor = PRED_LOOP_ITERATIONS;
1510 }
1511 /* If we have just one exit and we can derive some information about
1512 the number of iterations of the loop from the statements inside
1513 the loop, use it to predict this exit. */
1514 else if (n_exits == 1)
1515 {
1516 nitercst = estimated_stmt_executions_int (loop);
1517 if (nitercst < 0)
1518 continue;
1519 if (nitercst > max)
1520 nitercst = max;
1521
1522 predictor = PRED_LOOP_ITERATIONS_GUESSED;
1523 }
1524 else
1525 continue;
1526
1527 /* If the prediction for number of iterations is zero, do not
1528 predict the exit edges. */
1529 if (nitercst == 0)
1530 continue;
1531
1532 probability = ((REG_BR_PROB_BASE + nitercst / 2) / nitercst);
1533 predict_edge (ex, predictor, probability);
1534 }
1535 exits.release ();
1536
1537 /* Find information about loop bound variables. */
1538 for (nb_iter = loop->bounds; nb_iter;
1539 nb_iter = nb_iter->next)
1540 if (nb_iter->stmt
1541 && gimple_code (nb_iter->stmt) == GIMPLE_COND)
1542 {
1543 stmt = as_a <gcond *> (nb_iter->stmt);
1544 break;
1545 }
1546 if (!stmt && last_stmt (loop->header)
1547 && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
1548 stmt = as_a <gcond *> (last_stmt (loop->header));
1549 if (stmt)
1550 is_comparison_with_loop_invariant_p (stmt, loop,
1551 &loop_bound_var,
1552 &loop_bound_code,
1553 &loop_bound_step,
1554 &loop_iv_base);
1555
1556 bbs = get_loop_body (loop);
1557
1558 for (j = 0; j < loop->num_nodes; j++)
1559 {
1560 int header_found = 0;
1561 edge e;
1562 edge_iterator ei;
1563
1564 bb = bbs[j];
1565
1566 /* Bypass loop heuristics on continue statement. These
1567 statements construct loops via "non-loop" constructs
1568 in the source language and are better to be handled
1569 separately. */
1570 if (predicted_by_p (bb, PRED_CONTINUE))
1571 continue;
1572
1573 /* Loop branch heuristics - predict an edge back to a
1574 loop's head as taken. */
1575 if (bb == loop->latch)
1576 {
1577 e = find_edge (loop->latch, loop->header);
1578 if (e)
1579 {
1580 header_found = 1;
1581 predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN);
1582 }
1583 }
1584
1585 /* Loop exit heuristics - predict an edge exiting the loop if the
1586 conditional has no loop header successors as not taken. */
1587 if (!header_found
1588 /* If we already used more reliable loop exit predictors, do not
1589 bother with PRED_LOOP_EXIT. */
1590 && !predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
1591 && !predicted_by_p (bb, PRED_LOOP_ITERATIONS))
1592 {
1593 /* For loop with many exits we don't want to predict all exits
1594 with the pretty large probability, because if all exits are
1595 considered in row, the loop would be predicted to iterate
1596 almost never. The code to divide probability by number of
1597 exits is very rough. It should compute the number of exits
1598 taken in each patch through function (not the overall number
1599 of exits that might be a lot higher for loops with wide switch
1600 statements in them) and compute n-th square root.
1601
1602 We limit the minimal probability by 2% to avoid
1603 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
1604 as this was causing regression in perl benchmark containing such
1605 a wide loop. */
1606
1607 int probability = ((REG_BR_PROB_BASE
1608 - predictor_info [(int) PRED_LOOP_EXIT].hitrate)
1609 / n_exits);
1610 if (probability < HITRATE (2))
1611 probability = HITRATE (2);
1612 FOR_EACH_EDGE (e, ei, bb->succs)
1613 if (e->dest->index < NUM_FIXED_BLOCKS
1614 || !flow_bb_inside_loop_p (loop, e->dest))
1615 predict_edge (e, PRED_LOOP_EXIT, probability);
1616 }
1617 if (loop_bound_var)
1618 predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
1619 loop_bound_code,
1620 tree_to_shwi (loop_bound_step));
1621 }
1622
1623 /* Free basic blocks from get_loop_body. */
1624 free (bbs);
1625 }
1626 }
1627
1628 /* Attempt to predict probabilities of BB outgoing edges using local
1629 properties. */
1630 static void
1631 bb_estimate_probability_locally (basic_block bb)
1632 {
1633 rtx_insn *last_insn = BB_END (bb);
1634 rtx cond;
1635
1636 if (! can_predict_insn_p (last_insn))
1637 return;
1638 cond = get_condition (last_insn, NULL, false, false);
1639 if (! cond)
1640 return;
1641
1642 /* Try "pointer heuristic."
1643 A comparison ptr == 0 is predicted as false.
1644 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
1645 if (COMPARISON_P (cond)
1646 && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
1647 || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
1648 {
1649 if (GET_CODE (cond) == EQ)
1650 predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
1651 else if (GET_CODE (cond) == NE)
1652 predict_insn_def (last_insn, PRED_POINTER, TAKEN);
1653 }
1654 else
1655
1656 /* Try "opcode heuristic."
1657 EQ tests are usually false and NE tests are usually true. Also,
1658 most quantities are positive, so we can make the appropriate guesses
1659 about signed comparisons against zero. */
1660 switch (GET_CODE (cond))
1661 {
1662 case CONST_INT:
1663 /* Unconditional branch. */
1664 predict_insn_def (last_insn, PRED_UNCONDITIONAL,
1665 cond == const0_rtx ? NOT_TAKEN : TAKEN);
1666 break;
1667
1668 case EQ:
1669 case UNEQ:
1670 /* Floating point comparisons appears to behave in a very
1671 unpredictable way because of special role of = tests in
1672 FP code. */
1673 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1674 ;
1675 /* Comparisons with 0 are often used for booleans and there is
1676 nothing useful to predict about them. */
1677 else if (XEXP (cond, 1) == const0_rtx
1678 || XEXP (cond, 0) == const0_rtx)
1679 ;
1680 else
1681 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
1682 break;
1683
1684 case NE:
1685 case LTGT:
1686 /* Floating point comparisons appears to behave in a very
1687 unpredictable way because of special role of = tests in
1688 FP code. */
1689 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1690 ;
1691 /* Comparisons with 0 are often used for booleans and there is
1692 nothing useful to predict about them. */
1693 else if (XEXP (cond, 1) == const0_rtx
1694 || XEXP (cond, 0) == const0_rtx)
1695 ;
1696 else
1697 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
1698 break;
1699
1700 case ORDERED:
1701 predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
1702 break;
1703
1704 case UNORDERED:
1705 predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
1706 break;
1707
1708 case LE:
1709 case LT:
1710 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
1711 || XEXP (cond, 1) == constm1_rtx)
1712 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
1713 break;
1714
1715 case GE:
1716 case GT:
1717 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
1718 || XEXP (cond, 1) == constm1_rtx)
1719 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
1720 break;
1721
1722 default:
1723 break;
1724 }
1725 }
1726
1727 /* Set edge->probability for each successor edge of BB. */
1728 void
1729 guess_outgoing_edge_probabilities (basic_block bb)
1730 {
1731 bb_estimate_probability_locally (bb);
1732 combine_predictions_for_insn (BB_END (bb), bb);
1733 }
1734 \f
1735 static tree expr_expected_value (tree, bitmap, enum br_predictor *predictor);
1736
1737 /* Helper function for expr_expected_value. */
1738
1739 static tree
1740 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
1741 tree op1, bitmap visited, enum br_predictor *predictor)
1742 {
1743 gimple *def;
1744
1745 if (predictor)
1746 *predictor = PRED_UNCONDITIONAL;
1747
1748 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1749 {
1750 if (TREE_CONSTANT (op0))
1751 return op0;
1752
1753 if (code != SSA_NAME)
1754 return NULL_TREE;
1755
1756 def = SSA_NAME_DEF_STMT (op0);
1757
1758 /* If we were already here, break the infinite cycle. */
1759 if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
1760 return NULL;
1761
1762 if (gimple_code (def) == GIMPLE_PHI)
1763 {
1764 /* All the arguments of the PHI node must have the same constant
1765 length. */
1766 int i, n = gimple_phi_num_args (def);
1767 tree val = NULL, new_val;
1768
1769 for (i = 0; i < n; i++)
1770 {
1771 tree arg = PHI_ARG_DEF (def, i);
1772 enum br_predictor predictor2;
1773
1774 /* If this PHI has itself as an argument, we cannot
1775 determine the string length of this argument. However,
1776 if we can find an expected constant value for the other
1777 PHI args then we can still be sure that this is
1778 likely a constant. So be optimistic and just
1779 continue with the next argument. */
1780 if (arg == PHI_RESULT (def))
1781 continue;
1782
1783 new_val = expr_expected_value (arg, visited, &predictor2);
1784
1785 /* It is difficult to combine value predictors. Simply assume
1786 that later predictor is weaker and take its prediction. */
1787 if (predictor && *predictor < predictor2)
1788 *predictor = predictor2;
1789 if (!new_val)
1790 return NULL;
1791 if (!val)
1792 val = new_val;
1793 else if (!operand_equal_p (val, new_val, false))
1794 return NULL;
1795 }
1796 return val;
1797 }
1798 if (is_gimple_assign (def))
1799 {
1800 if (gimple_assign_lhs (def) != op0)
1801 return NULL;
1802
1803 return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
1804 gimple_assign_rhs1 (def),
1805 gimple_assign_rhs_code (def),
1806 gimple_assign_rhs2 (def),
1807 visited, predictor);
1808 }
1809
1810 if (is_gimple_call (def))
1811 {
1812 tree decl = gimple_call_fndecl (def);
1813 if (!decl)
1814 {
1815 if (gimple_call_internal_p (def)
1816 && gimple_call_internal_fn (def) == IFN_BUILTIN_EXPECT)
1817 {
1818 gcc_assert (gimple_call_num_args (def) == 3);
1819 tree val = gimple_call_arg (def, 0);
1820 if (TREE_CONSTANT (val))
1821 return val;
1822 if (predictor)
1823 {
1824 tree val2 = gimple_call_arg (def, 2);
1825 gcc_assert (TREE_CODE (val2) == INTEGER_CST
1826 && tree_fits_uhwi_p (val2)
1827 && tree_to_uhwi (val2) < END_PREDICTORS);
1828 *predictor = (enum br_predictor) tree_to_uhwi (val2);
1829 }
1830 return gimple_call_arg (def, 1);
1831 }
1832 return NULL;
1833 }
1834 if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
1835 switch (DECL_FUNCTION_CODE (decl))
1836 {
1837 case BUILT_IN_EXPECT:
1838 {
1839 tree val;
1840 if (gimple_call_num_args (def) != 2)
1841 return NULL;
1842 val = gimple_call_arg (def, 0);
1843 if (TREE_CONSTANT (val))
1844 return val;
1845 if (predictor)
1846 *predictor = PRED_BUILTIN_EXPECT;
1847 return gimple_call_arg (def, 1);
1848 }
1849
1850 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
1851 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
1852 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
1853 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
1854 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
1855 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
1856 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
1857 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
1858 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
1859 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
1860 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
1861 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
1862 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
1863 /* Assume that any given atomic operation has low contention,
1864 and thus the compare-and-swap operation succeeds. */
1865 if (predictor)
1866 *predictor = PRED_COMPARE_AND_SWAP;
1867 return boolean_true_node;
1868 default:
1869 break;
1870 }
1871 }
1872
1873 return NULL;
1874 }
1875
1876 if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
1877 {
1878 tree res;
1879 enum br_predictor predictor2;
1880 op0 = expr_expected_value (op0, visited, predictor);
1881 if (!op0)
1882 return NULL;
1883 op1 = expr_expected_value (op1, visited, &predictor2);
1884 if (predictor && *predictor < predictor2)
1885 *predictor = predictor2;
1886 if (!op1)
1887 return NULL;
1888 res = fold_build2 (code, type, op0, op1);
1889 if (TREE_CONSTANT (res))
1890 return res;
1891 return NULL;
1892 }
1893 if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
1894 {
1895 tree res;
1896 op0 = expr_expected_value (op0, visited, predictor);
1897 if (!op0)
1898 return NULL;
1899 res = fold_build1 (code, type, op0);
1900 if (TREE_CONSTANT (res))
1901 return res;
1902 return NULL;
1903 }
1904 return NULL;
1905 }
1906
1907 /* Return constant EXPR will likely have at execution time, NULL if unknown.
1908 The function is used by builtin_expect branch predictor so the evidence
1909 must come from this construct and additional possible constant folding.
1910
1911 We may want to implement more involved value guess (such as value range
1912 propagation based prediction), but such tricks shall go to new
1913 implementation. */
1914
1915 static tree
1916 expr_expected_value (tree expr, bitmap visited,
1917 enum br_predictor *predictor)
1918 {
1919 enum tree_code code;
1920 tree op0, op1;
1921
1922 if (TREE_CONSTANT (expr))
1923 {
1924 if (predictor)
1925 *predictor = PRED_UNCONDITIONAL;
1926 return expr;
1927 }
1928
1929 extract_ops_from_tree (expr, &code, &op0, &op1);
1930 return expr_expected_value_1 (TREE_TYPE (expr),
1931 op0, code, op1, visited, predictor);
1932 }
1933 \f
1934 /* Predict using opcode of the last statement in basic block. */
1935 static void
1936 tree_predict_by_opcode (basic_block bb)
1937 {
1938 gimple *stmt = last_stmt (bb);
1939 edge then_edge;
1940 tree op0, op1;
1941 tree type;
1942 tree val;
1943 enum tree_code cmp;
1944 bitmap visited;
1945 edge_iterator ei;
1946 enum br_predictor predictor;
1947
1948 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1949 return;
1950 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1951 if (then_edge->flags & EDGE_TRUE_VALUE)
1952 break;
1953 op0 = gimple_cond_lhs (stmt);
1954 op1 = gimple_cond_rhs (stmt);
1955 cmp = gimple_cond_code (stmt);
1956 type = TREE_TYPE (op0);
1957 visited = BITMAP_ALLOC (NULL);
1958 val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, visited,
1959 &predictor);
1960 BITMAP_FREE (visited);
1961 if (val && TREE_CODE (val) == INTEGER_CST)
1962 {
1963 if (predictor == PRED_BUILTIN_EXPECT)
1964 {
1965 int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
1966
1967 gcc_assert (percent >= 0 && percent <= 100);
1968 if (integer_zerop (val))
1969 percent = 100 - percent;
1970 predict_edge (then_edge, PRED_BUILTIN_EXPECT, HITRATE (percent));
1971 }
1972 else
1973 predict_edge (then_edge, predictor,
1974 integer_zerop (val) ? NOT_TAKEN : TAKEN);
1975 }
1976 /* Try "pointer heuristic."
1977 A comparison ptr == 0 is predicted as false.
1978 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
1979 if (POINTER_TYPE_P (type))
1980 {
1981 if (cmp == EQ_EXPR)
1982 predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
1983 else if (cmp == NE_EXPR)
1984 predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
1985 }
1986 else
1987
1988 /* Try "opcode heuristic."
1989 EQ tests are usually false and NE tests are usually true. Also,
1990 most quantities are positive, so we can make the appropriate guesses
1991 about signed comparisons against zero. */
1992 switch (cmp)
1993 {
1994 case EQ_EXPR:
1995 case UNEQ_EXPR:
1996 /* Floating point comparisons appears to behave in a very
1997 unpredictable way because of special role of = tests in
1998 FP code. */
1999 if (FLOAT_TYPE_P (type))
2000 ;
2001 /* Comparisons with 0 are often used for booleans and there is
2002 nothing useful to predict about them. */
2003 else if (integer_zerop (op0) || integer_zerop (op1))
2004 ;
2005 else
2006 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2007 break;
2008
2009 case NE_EXPR:
2010 case LTGT_EXPR:
2011 /* Floating point comparisons appears to behave in a very
2012 unpredictable way because of special role of = tests in
2013 FP code. */
2014 if (FLOAT_TYPE_P (type))
2015 ;
2016 /* Comparisons with 0 are often used for booleans and there is
2017 nothing useful to predict about them. */
2018 else if (integer_zerop (op0)
2019 || integer_zerop (op1))
2020 ;
2021 else
2022 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2023 break;
2024
2025 case ORDERED_EXPR:
2026 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2027 break;
2028
2029 case UNORDERED_EXPR:
2030 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2031 break;
2032
2033 case LE_EXPR:
2034 case LT_EXPR:
2035 if (integer_zerop (op1)
2036 || integer_onep (op1)
2037 || integer_all_onesp (op1)
2038 || real_zerop (op1)
2039 || real_onep (op1)
2040 || real_minus_onep (op1))
2041 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2042 break;
2043
2044 case GE_EXPR:
2045 case GT_EXPR:
2046 if (integer_zerop (op1)
2047 || integer_onep (op1)
2048 || integer_all_onesp (op1)
2049 || real_zerop (op1)
2050 || real_onep (op1)
2051 || real_minus_onep (op1))
2052 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2053 break;
2054
2055 default:
2056 break;
2057 }
2058 }
2059
2060 /* Try to guess whether the value of return means error code. */
2061
2062 static enum br_predictor
2063 return_prediction (tree val, enum prediction *prediction)
2064 {
2065 /* VOID. */
2066 if (!val)
2067 return PRED_NO_PREDICTION;
2068 /* Different heuristics for pointers and scalars. */
2069 if (POINTER_TYPE_P (TREE_TYPE (val)))
2070 {
2071 /* NULL is usually not returned. */
2072 if (integer_zerop (val))
2073 {
2074 *prediction = NOT_TAKEN;
2075 return PRED_NULL_RETURN;
2076 }
2077 }
2078 else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2079 {
2080 /* Negative return values are often used to indicate
2081 errors. */
2082 if (TREE_CODE (val) == INTEGER_CST
2083 && tree_int_cst_sgn (val) < 0)
2084 {
2085 *prediction = NOT_TAKEN;
2086 return PRED_NEGATIVE_RETURN;
2087 }
2088 /* Constant return values seems to be commonly taken.
2089 Zero/one often represent booleans so exclude them from the
2090 heuristics. */
2091 if (TREE_CONSTANT (val)
2092 && (!integer_zerop (val) && !integer_onep (val)))
2093 {
2094 *prediction = TAKEN;
2095 return PRED_CONST_RETURN;
2096 }
2097 }
2098 return PRED_NO_PREDICTION;
2099 }
2100
2101 /* Find the basic block with return expression and look up for possible
2102 return value trying to apply RETURN_PREDICTION heuristics. */
2103 static void
2104 apply_return_prediction (void)
2105 {
2106 greturn *return_stmt = NULL;
2107 tree return_val;
2108 edge e;
2109 gphi *phi;
2110 int phi_num_args, i;
2111 enum br_predictor pred;
2112 enum prediction direction;
2113 edge_iterator ei;
2114
2115 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2116 {
2117 gimple *last = last_stmt (e->src);
2118 if (last
2119 && gimple_code (last) == GIMPLE_RETURN)
2120 {
2121 return_stmt = as_a <greturn *> (last);
2122 break;
2123 }
2124 }
2125 if (!e)
2126 return;
2127 return_val = gimple_return_retval (return_stmt);
2128 if (!return_val)
2129 return;
2130 if (TREE_CODE (return_val) != SSA_NAME
2131 || !SSA_NAME_DEF_STMT (return_val)
2132 || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2133 return;
2134 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (return_val));
2135 phi_num_args = gimple_phi_num_args (phi);
2136 pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2137
2138 /* Avoid the degenerate case where all return values form the function
2139 belongs to same category (ie they are all positive constants)
2140 so we can hardly say something about them. */
2141 for (i = 1; i < phi_num_args; i++)
2142 if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2143 break;
2144 if (i != phi_num_args)
2145 for (i = 0; i < phi_num_args; i++)
2146 {
2147 pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2148 if (pred != PRED_NO_PREDICTION)
2149 predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2150 direction);
2151 }
2152 }
2153
2154 /* Look for basic block that contains unlikely to happen events
2155 (such as noreturn calls) and mark all paths leading to execution
2156 of this basic blocks as unlikely. */
2157
2158 static void
2159 tree_bb_level_predictions (void)
2160 {
2161 basic_block bb;
2162 bool has_return_edges = false;
2163 edge e;
2164 edge_iterator ei;
2165
2166 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2167 if (!(e->flags & (EDGE_ABNORMAL | EDGE_FAKE | EDGE_EH)))
2168 {
2169 has_return_edges = true;
2170 break;
2171 }
2172
2173 apply_return_prediction ();
2174
2175 FOR_EACH_BB_FN (bb, cfun)
2176 {
2177 gimple_stmt_iterator gsi;
2178
2179 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2180 {
2181 gimple *stmt = gsi_stmt (gsi);
2182 tree decl;
2183
2184 if (is_gimple_call (stmt))
2185 {
2186 if ((gimple_call_flags (stmt) & ECF_NORETURN)
2187 && has_return_edges)
2188 predict_paths_leading_to (bb, PRED_NORETURN,
2189 NOT_TAKEN);
2190 decl = gimple_call_fndecl (stmt);
2191 if (decl
2192 && lookup_attribute ("cold",
2193 DECL_ATTRIBUTES (decl)))
2194 predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2195 NOT_TAKEN);
2196 }
2197 else if (gimple_code (stmt) == GIMPLE_PREDICT)
2198 {
2199 predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
2200 gimple_predict_outcome (stmt));
2201 /* Keep GIMPLE_PREDICT around so early inlining will propagate
2202 hints to callers. */
2203 }
2204 }
2205 }
2206 }
2207
2208 /* Callback for hash_map::traverse, asserts that the pointer map is
2209 empty. */
2210
2211 bool
2212 assert_is_empty (const_basic_block const &, edge_prediction *const &value,
2213 void *)
2214 {
2215 gcc_assert (!value);
2216 return false;
2217 }
2218
2219 /* Predict branch probabilities and estimate profile for basic block BB. */
2220
2221 static void
2222 tree_estimate_probability_bb (basic_block bb)
2223 {
2224 edge e;
2225 edge_iterator ei;
2226 gimple *last;
2227
2228 FOR_EACH_EDGE (e, ei, bb->succs)
2229 {
2230 /* Predict edges to user labels with attributes. */
2231 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2232 {
2233 gimple_stmt_iterator gi;
2234 for (gi = gsi_start_bb (e->dest); !gsi_end_p (gi); gsi_next (&gi))
2235 {
2236 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gi));
2237 tree decl;
2238
2239 if (!label_stmt)
2240 break;
2241 decl = gimple_label_label (label_stmt);
2242 if (DECL_ARTIFICIAL (decl))
2243 continue;
2244
2245 /* Finally, we have a user-defined label. */
2246 if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl)))
2247 predict_edge_def (e, PRED_COLD_LABEL, NOT_TAKEN);
2248 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (decl)))
2249 predict_edge_def (e, PRED_HOT_LABEL, TAKEN);
2250 }
2251 }
2252
2253 /* Predict early returns to be probable, as we've already taken
2254 care for error returns and other cases are often used for
2255 fast paths through function.
2256
2257 Since we've already removed the return statements, we are
2258 looking for CFG like:
2259
2260 if (conditional)
2261 {
2262 ..
2263 goto return_block
2264 }
2265 some other blocks
2266 return_block:
2267 return_stmt. */
2268 if (e->dest != bb->next_bb
2269 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2270 && single_succ_p (e->dest)
2271 && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2272 && (last = last_stmt (e->dest)) != NULL
2273 && gimple_code (last) == GIMPLE_RETURN)
2274 {
2275 edge e1;
2276 edge_iterator ei1;
2277
2278 if (single_succ_p (bb))
2279 {
2280 FOR_EACH_EDGE (e1, ei1, bb->preds)
2281 if (!predicted_by_p (e1->src, PRED_NULL_RETURN)
2282 && !predicted_by_p (e1->src, PRED_CONST_RETURN)
2283 && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN))
2284 predict_edge_def (e1, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2285 }
2286 else
2287 if (!predicted_by_p (e->src, PRED_NULL_RETURN)
2288 && !predicted_by_p (e->src, PRED_CONST_RETURN)
2289 && !predicted_by_p (e->src, PRED_NEGATIVE_RETURN))
2290 predict_edge_def (e, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2291 }
2292
2293 /* Look for block we are guarding (ie we dominate it,
2294 but it doesn't postdominate us). */
2295 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
2296 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
2297 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
2298 {
2299 gimple_stmt_iterator bi;
2300
2301 /* The call heuristic claims that a guarded function call
2302 is improbable. This is because such calls are often used
2303 to signal exceptional situations such as printing error
2304 messages. */
2305 for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
2306 gsi_next (&bi))
2307 {
2308 gimple *stmt = gsi_stmt (bi);
2309 if (is_gimple_call (stmt)
2310 /* Constant and pure calls are hardly used to signalize
2311 something exceptional. */
2312 && gimple_has_side_effects (stmt))
2313 {
2314 predict_edge_def (e, PRED_CALL, NOT_TAKEN);
2315 break;
2316 }
2317 }
2318 }
2319 }
2320 tree_predict_by_opcode (bb);
2321 }
2322
2323 /* Predict branch probabilities and estimate profile of the tree CFG.
2324 This function can be called from the loop optimizers to recompute
2325 the profile information. */
2326
2327 void
2328 tree_estimate_probability (void)
2329 {
2330 basic_block bb;
2331
2332 add_noreturn_fake_exit_edges ();
2333 connect_infinite_loops_to_exit ();
2334 /* We use loop_niter_by_eval, which requires that the loops have
2335 preheaders. */
2336 create_preheaders (CP_SIMPLE_PREHEADERS);
2337 calculate_dominance_info (CDI_POST_DOMINATORS);
2338
2339 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
2340 tree_bb_level_predictions ();
2341 record_loop_exits ();
2342
2343 if (number_of_loops (cfun) > 1)
2344 predict_loops ();
2345
2346 FOR_EACH_BB_FN (bb, cfun)
2347 tree_estimate_probability_bb (bb);
2348
2349 FOR_EACH_BB_FN (bb, cfun)
2350 combine_predictions_for_bb (bb);
2351
2352 if (flag_checking)
2353 bb_predictions->traverse<void *, assert_is_empty> (NULL);
2354
2355 delete bb_predictions;
2356 bb_predictions = NULL;
2357
2358 estimate_bb_frequencies (false);
2359 free_dominance_info (CDI_POST_DOMINATORS);
2360 remove_fake_exit_edges ();
2361 }
2362 \f
2363 /* Predict edges to successors of CUR whose sources are not postdominated by
2364 BB by PRED and recurse to all postdominators. */
2365
2366 static void
2367 predict_paths_for_bb (basic_block cur, basic_block bb,
2368 enum br_predictor pred,
2369 enum prediction taken,
2370 bitmap visited)
2371 {
2372 edge e;
2373 edge_iterator ei;
2374 basic_block son;
2375
2376 /* We are looking for all edges forming edge cut induced by
2377 set of all blocks postdominated by BB. */
2378 FOR_EACH_EDGE (e, ei, cur->preds)
2379 if (e->src->index >= NUM_FIXED_BLOCKS
2380 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
2381 {
2382 edge e2;
2383 edge_iterator ei2;
2384 bool found = false;
2385
2386 /* Ignore fake edges and eh, we predict them as not taken anyway. */
2387 if (e->flags & (EDGE_EH | EDGE_FAKE))
2388 continue;
2389 gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
2390
2391 /* See if there is an edge from e->src that is not abnormal
2392 and does not lead to BB. */
2393 FOR_EACH_EDGE (e2, ei2, e->src->succs)
2394 if (e2 != e
2395 && !(e2->flags & (EDGE_EH | EDGE_FAKE))
2396 && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb))
2397 {
2398 found = true;
2399 break;
2400 }
2401
2402 /* If there is non-abnormal path leaving e->src, predict edge
2403 using predictor. Otherwise we need to look for paths
2404 leading to e->src.
2405
2406 The second may lead to infinite loop in the case we are predicitng
2407 regions that are only reachable by abnormal edges. We simply
2408 prevent visiting given BB twice. */
2409 if (found)
2410 predict_edge_def (e, pred, taken);
2411 else if (bitmap_set_bit (visited, e->src->index))
2412 predict_paths_for_bb (e->src, e->src, pred, taken, visited);
2413 }
2414 for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
2415 son;
2416 son = next_dom_son (CDI_POST_DOMINATORS, son))
2417 predict_paths_for_bb (son, bb, pred, taken, visited);
2418 }
2419
2420 /* Sets branch probabilities according to PREDiction and
2421 FLAGS. */
2422
2423 static void
2424 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
2425 enum prediction taken)
2426 {
2427 bitmap visited = BITMAP_ALLOC (NULL);
2428 predict_paths_for_bb (bb, bb, pred, taken, visited);
2429 BITMAP_FREE (visited);
2430 }
2431
2432 /* Like predict_paths_leading_to but take edge instead of basic block. */
2433
2434 static void
2435 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
2436 enum prediction taken)
2437 {
2438 bool has_nonloop_edge = false;
2439 edge_iterator ei;
2440 edge e2;
2441
2442 basic_block bb = e->src;
2443 FOR_EACH_EDGE (e2, ei, bb->succs)
2444 if (e2->dest != e->src && e2->dest != e->dest
2445 && !(e->flags & (EDGE_EH | EDGE_FAKE))
2446 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
2447 {
2448 has_nonloop_edge = true;
2449 break;
2450 }
2451 if (!has_nonloop_edge)
2452 {
2453 bitmap visited = BITMAP_ALLOC (NULL);
2454 predict_paths_for_bb (bb, bb, pred, taken, visited);
2455 BITMAP_FREE (visited);
2456 }
2457 else
2458 predict_edge_def (e, pred, taken);
2459 }
2460 \f
2461 /* This is used to carry information about basic blocks. It is
2462 attached to the AUX field of the standard CFG block. */
2463
2464 struct block_info
2465 {
2466 /* Estimated frequency of execution of basic_block. */
2467 sreal frequency;
2468
2469 /* To keep queue of basic blocks to process. */
2470 basic_block next;
2471
2472 /* Number of predecessors we need to visit first. */
2473 int npredecessors;
2474 };
2475
2476 /* Similar information for edges. */
2477 struct edge_prob_info
2478 {
2479 /* In case edge is a loopback edge, the probability edge will be reached
2480 in case header is. Estimated number of iterations of the loop can be
2481 then computed as 1 / (1 - back_edge_prob). */
2482 sreal back_edge_prob;
2483 /* True if the edge is a loopback edge in the natural loop. */
2484 unsigned int back_edge:1;
2485 };
2486
2487 #define BLOCK_INFO(B) ((block_info *) (B)->aux)
2488 #undef EDGE_INFO
2489 #define EDGE_INFO(E) ((edge_prob_info *) (E)->aux)
2490
2491 /* Helper function for estimate_bb_frequencies.
2492 Propagate the frequencies in blocks marked in
2493 TOVISIT, starting in HEAD. */
2494
2495 static void
2496 propagate_freq (basic_block head, bitmap tovisit)
2497 {
2498 basic_block bb;
2499 basic_block last;
2500 unsigned i;
2501 edge e;
2502 basic_block nextbb;
2503 bitmap_iterator bi;
2504
2505 /* For each basic block we need to visit count number of his predecessors
2506 we need to visit first. */
2507 EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
2508 {
2509 edge_iterator ei;
2510 int count = 0;
2511
2512 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2513
2514 FOR_EACH_EDGE (e, ei, bb->preds)
2515 {
2516 bool visit = bitmap_bit_p (tovisit, e->src->index);
2517
2518 if (visit && !(e->flags & EDGE_DFS_BACK))
2519 count++;
2520 else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
2521 fprintf (dump_file,
2522 "Irreducible region hit, ignoring edge to %i->%i\n",
2523 e->src->index, bb->index);
2524 }
2525 BLOCK_INFO (bb)->npredecessors = count;
2526 /* When function never returns, we will never process exit block. */
2527 if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2528 bb->count = bb->frequency = 0;
2529 }
2530
2531 BLOCK_INFO (head)->frequency = 1;
2532 last = head;
2533 for (bb = head; bb; bb = nextbb)
2534 {
2535 edge_iterator ei;
2536 sreal cyclic_probability = 0;
2537 sreal frequency = 0;
2538
2539 nextbb = BLOCK_INFO (bb)->next;
2540 BLOCK_INFO (bb)->next = NULL;
2541
2542 /* Compute frequency of basic block. */
2543 if (bb != head)
2544 {
2545 if (flag_checking)
2546 FOR_EACH_EDGE (e, ei, bb->preds)
2547 gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
2548 || (e->flags & EDGE_DFS_BACK));
2549
2550 FOR_EACH_EDGE (e, ei, bb->preds)
2551 if (EDGE_INFO (e)->back_edge)
2552 {
2553 cyclic_probability += EDGE_INFO (e)->back_edge_prob;
2554 }
2555 else if (!(e->flags & EDGE_DFS_BACK))
2556 {
2557 /* frequency += (e->probability
2558 * BLOCK_INFO (e->src)->frequency /
2559 REG_BR_PROB_BASE); */
2560
2561 sreal tmp = e->probability;
2562 tmp *= BLOCK_INFO (e->src)->frequency;
2563 tmp *= real_inv_br_prob_base;
2564 frequency += tmp;
2565 }
2566
2567 if (cyclic_probability == 0)
2568 {
2569 BLOCK_INFO (bb)->frequency = frequency;
2570 }
2571 else
2572 {
2573 if (cyclic_probability > real_almost_one)
2574 cyclic_probability = real_almost_one;
2575
2576 /* BLOCK_INFO (bb)->frequency = frequency
2577 / (1 - cyclic_probability) */
2578
2579 cyclic_probability = sreal (1) - cyclic_probability;
2580 BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
2581 }
2582 }
2583
2584 bitmap_clear_bit (tovisit, bb->index);
2585
2586 e = find_edge (bb, head);
2587 if (e)
2588 {
2589 /* EDGE_INFO (e)->back_edge_prob
2590 = ((e->probability * BLOCK_INFO (bb)->frequency)
2591 / REG_BR_PROB_BASE); */
2592
2593 sreal tmp = e->probability;
2594 tmp *= BLOCK_INFO (bb)->frequency;
2595 EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
2596 }
2597
2598 /* Propagate to successor blocks. */
2599 FOR_EACH_EDGE (e, ei, bb->succs)
2600 if (!(e->flags & EDGE_DFS_BACK)
2601 && BLOCK_INFO (e->dest)->npredecessors)
2602 {
2603 BLOCK_INFO (e->dest)->npredecessors--;
2604 if (!BLOCK_INFO (e->dest)->npredecessors)
2605 {
2606 if (!nextbb)
2607 nextbb = e->dest;
2608 else
2609 BLOCK_INFO (last)->next = e->dest;
2610
2611 last = e->dest;
2612 }
2613 }
2614 }
2615 }
2616
2617 /* Estimate frequencies in loops at same nest level. */
2618
2619 static void
2620 estimate_loops_at_level (struct loop *first_loop)
2621 {
2622 struct loop *loop;
2623
2624 for (loop = first_loop; loop; loop = loop->next)
2625 {
2626 edge e;
2627 basic_block *bbs;
2628 unsigned i;
2629 bitmap tovisit = BITMAP_ALLOC (NULL);
2630
2631 estimate_loops_at_level (loop->inner);
2632
2633 /* Find current loop back edge and mark it. */
2634 e = loop_latch_edge (loop);
2635 EDGE_INFO (e)->back_edge = 1;
2636
2637 bbs = get_loop_body (loop);
2638 for (i = 0; i < loop->num_nodes; i++)
2639 bitmap_set_bit (tovisit, bbs[i]->index);
2640 free (bbs);
2641 propagate_freq (loop->header, tovisit);
2642 BITMAP_FREE (tovisit);
2643 }
2644 }
2645
2646 /* Propagates frequencies through structure of loops. */
2647
2648 static void
2649 estimate_loops (void)
2650 {
2651 bitmap tovisit = BITMAP_ALLOC (NULL);
2652 basic_block bb;
2653
2654 /* Start by estimating the frequencies in the loops. */
2655 if (number_of_loops (cfun) > 1)
2656 estimate_loops_at_level (current_loops->tree_root->inner);
2657
2658 /* Now propagate the frequencies through all the blocks. */
2659 FOR_ALL_BB_FN (bb, cfun)
2660 {
2661 bitmap_set_bit (tovisit, bb->index);
2662 }
2663 propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
2664 BITMAP_FREE (tovisit);
2665 }
2666
2667 /* Drop the profile for NODE to guessed, and update its frequency based on
2668 whether it is expected to be hot given the CALL_COUNT. */
2669
2670 static void
2671 drop_profile (struct cgraph_node *node, gcov_type call_count)
2672 {
2673 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2674 /* In the case where this was called by another function with a
2675 dropped profile, call_count will be 0. Since there are no
2676 non-zero call counts to this function, we don't know for sure
2677 whether it is hot, and therefore it will be marked normal below. */
2678 bool hot = maybe_hot_count_p (NULL, call_count);
2679
2680 if (dump_file)
2681 fprintf (dump_file,
2682 "Dropping 0 profile for %s/%i. %s based on calls.\n",
2683 node->name (), node->order,
2684 hot ? "Function is hot" : "Function is normal");
2685 /* We only expect to miss profiles for functions that are reached
2686 via non-zero call edges in cases where the function may have
2687 been linked from another module or library (COMDATs and extern
2688 templates). See the comments below for handle_missing_profiles.
2689 Also, only warn in cases where the missing counts exceed the
2690 number of training runs. In certain cases with an execv followed
2691 by a no-return call the profile for the no-return call is not
2692 dumped and there can be a mismatch. */
2693 if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
2694 && call_count > profile_info->runs)
2695 {
2696 if (flag_profile_correction)
2697 {
2698 if (dump_file)
2699 fprintf (dump_file,
2700 "Missing counts for called function %s/%i\n",
2701 node->name (), node->order);
2702 }
2703 else
2704 warning (0, "Missing counts for called function %s/%i",
2705 node->name (), node->order);
2706 }
2707
2708 profile_status_for_fn (fn)
2709 = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
2710 node->frequency
2711 = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
2712 }
2713
2714 /* In the case of COMDAT routines, multiple object files will contain the same
2715 function and the linker will select one for the binary. In that case
2716 all the other copies from the profile instrument binary will be missing
2717 profile counts. Look for cases where this happened, due to non-zero
2718 call counts going to 0-count functions, and drop the profile to guessed
2719 so that we can use the estimated probabilities and avoid optimizing only
2720 for size.
2721
2722 The other case where the profile may be missing is when the routine
2723 is not going to be emitted to the object file, e.g. for "extern template"
2724 class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
2725 all other cases of non-zero calls to 0-count functions. */
2726
2727 void
2728 handle_missing_profiles (void)
2729 {
2730 struct cgraph_node *node;
2731 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
2732 vec<struct cgraph_node *> worklist;
2733 worklist.create (64);
2734
2735 /* See if 0 count function has non-0 count callers. In this case we
2736 lost some profile. Drop its function profile to PROFILE_GUESSED. */
2737 FOR_EACH_DEFINED_FUNCTION (node)
2738 {
2739 struct cgraph_edge *e;
2740 gcov_type call_count = 0;
2741 gcov_type max_tp_first_run = 0;
2742 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2743
2744 if (node->count)
2745 continue;
2746 for (e = node->callers; e; e = e->next_caller)
2747 {
2748 call_count += e->count;
2749
2750 if (e->caller->tp_first_run > max_tp_first_run)
2751 max_tp_first_run = e->caller->tp_first_run;
2752 }
2753
2754 /* If time profile is missing, let assign the maximum that comes from
2755 caller functions. */
2756 if (!node->tp_first_run && max_tp_first_run)
2757 node->tp_first_run = max_tp_first_run + 1;
2758
2759 if (call_count
2760 && fn && fn->cfg
2761 && (call_count * unlikely_count_fraction >= profile_info->runs))
2762 {
2763 drop_profile (node, call_count);
2764 worklist.safe_push (node);
2765 }
2766 }
2767
2768 /* Propagate the profile dropping to other 0-count COMDATs that are
2769 potentially called by COMDATs we already dropped the profile on. */
2770 while (worklist.length () > 0)
2771 {
2772 struct cgraph_edge *e;
2773
2774 node = worklist.pop ();
2775 for (e = node->callees; e; e = e->next_caller)
2776 {
2777 struct cgraph_node *callee = e->callee;
2778 struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
2779
2780 if (callee->count > 0)
2781 continue;
2782 if (DECL_COMDAT (callee->decl) && fn && fn->cfg
2783 && profile_status_for_fn (fn) == PROFILE_READ)
2784 {
2785 drop_profile (node, 0);
2786 worklist.safe_push (callee);
2787 }
2788 }
2789 }
2790 worklist.release ();
2791 }
2792
2793 /* Convert counts measured by profile driven feedback to frequencies.
2794 Return nonzero iff there was any nonzero execution count. */
2795
2796 int
2797 counts_to_freqs (void)
2798 {
2799 gcov_type count_max, true_count_max = 0;
2800 basic_block bb;
2801
2802 /* Don't overwrite the estimated frequencies when the profile for
2803 the function is missing. We may drop this function PROFILE_GUESSED
2804 later in drop_profile (). */
2805 if (!flag_auto_profile && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
2806 return 0;
2807
2808 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2809 true_count_max = MAX (bb->count, true_count_max);
2810
2811 count_max = MAX (true_count_max, 1);
2812 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2813 bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
2814
2815 return true_count_max;
2816 }
2817
2818 /* Return true if function is likely to be expensive, so there is no point to
2819 optimize performance of prologue, epilogue or do inlining at the expense
2820 of code size growth. THRESHOLD is the limit of number of instructions
2821 function can execute at average to be still considered not expensive. */
2822
2823 bool
2824 expensive_function_p (int threshold)
2825 {
2826 unsigned int sum = 0;
2827 basic_block bb;
2828 unsigned int limit;
2829
2830 /* We can not compute accurately for large thresholds due to scaled
2831 frequencies. */
2832 gcc_assert (threshold <= BB_FREQ_MAX);
2833
2834 /* Frequencies are out of range. This either means that function contains
2835 internal loop executing more than BB_FREQ_MAX times or profile feedback
2836 is available and function has not been executed at all. */
2837 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency == 0)
2838 return true;
2839
2840 /* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
2841 limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
2842 FOR_EACH_BB_FN (bb, cfun)
2843 {
2844 rtx_insn *insn;
2845
2846 FOR_BB_INSNS (bb, insn)
2847 if (active_insn_p (insn))
2848 {
2849 sum += bb->frequency;
2850 if (sum > limit)
2851 return true;
2852 }
2853 }
2854
2855 return false;
2856 }
2857
2858 /* Estimate and propagate basic block frequencies using the given branch
2859 probabilities. If FORCE is true, the frequencies are used to estimate
2860 the counts even when there are already non-zero profile counts. */
2861
2862 void
2863 estimate_bb_frequencies (bool force)
2864 {
2865 basic_block bb;
2866 sreal freq_max;
2867
2868 if (force || profile_status_for_fn (cfun) != PROFILE_READ || !counts_to_freqs ())
2869 {
2870 static int real_values_initialized = 0;
2871
2872 if (!real_values_initialized)
2873 {
2874 real_values_initialized = 1;
2875 real_br_prob_base = REG_BR_PROB_BASE;
2876 real_bb_freq_max = BB_FREQ_MAX;
2877 real_one_half = sreal (1, -1);
2878 real_inv_br_prob_base = sreal (1) / real_br_prob_base;
2879 real_almost_one = sreal (1) - real_inv_br_prob_base;
2880 }
2881
2882 mark_dfs_back_edges ();
2883
2884 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
2885 REG_BR_PROB_BASE;
2886
2887 /* Set up block info for each basic block. */
2888 alloc_aux_for_blocks (sizeof (block_info));
2889 alloc_aux_for_edges (sizeof (edge_prob_info));
2890 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2891 {
2892 edge e;
2893 edge_iterator ei;
2894
2895 FOR_EACH_EDGE (e, ei, bb->succs)
2896 {
2897 EDGE_INFO (e)->back_edge_prob = e->probability;
2898 EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
2899 }
2900 }
2901
2902 /* First compute frequencies locally for each loop from innermost
2903 to outermost to examine frequencies for back edges. */
2904 estimate_loops ();
2905
2906 freq_max = 0;
2907 FOR_EACH_BB_FN (bb, cfun)
2908 if (freq_max < BLOCK_INFO (bb)->frequency)
2909 freq_max = BLOCK_INFO (bb)->frequency;
2910
2911 freq_max = real_bb_freq_max / freq_max;
2912 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2913 {
2914 sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
2915 bb->frequency = tmp.to_int ();
2916 }
2917
2918 free_aux_for_blocks ();
2919 free_aux_for_edges ();
2920 }
2921 compute_function_frequency ();
2922 }
2923
2924 /* Decide whether function is hot, cold or unlikely executed. */
2925 void
2926 compute_function_frequency (void)
2927 {
2928 basic_block bb;
2929 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2930
2931 if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
2932 || MAIN_NAME_P (DECL_NAME (current_function_decl)))
2933 node->only_called_at_startup = true;
2934 if (DECL_STATIC_DESTRUCTOR (current_function_decl))
2935 node->only_called_at_exit = true;
2936
2937 if (profile_status_for_fn (cfun) != PROFILE_READ)
2938 {
2939 int flags = flags_from_decl_or_type (current_function_decl);
2940 if (lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
2941 != NULL)
2942 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
2943 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
2944 != NULL)
2945 node->frequency = NODE_FREQUENCY_HOT;
2946 else if (flags & ECF_NORETURN)
2947 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
2948 else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
2949 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
2950 else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
2951 || DECL_STATIC_DESTRUCTOR (current_function_decl))
2952 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
2953 return;
2954 }
2955
2956 /* Only first time try to drop function into unlikely executed.
2957 After inlining the roundoff errors may confuse us.
2958 Ipa-profile pass will drop functions only called from unlikely
2959 functions to unlikely and that is most of what we care about. */
2960 if (!cfun->after_inlining)
2961 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
2962 FOR_EACH_BB_FN (bb, cfun)
2963 {
2964 if (maybe_hot_bb_p (cfun, bb))
2965 {
2966 node->frequency = NODE_FREQUENCY_HOT;
2967 return;
2968 }
2969 if (!probably_never_executed_bb_p (cfun, bb))
2970 node->frequency = NODE_FREQUENCY_NORMAL;
2971 }
2972 }
2973
2974 /* Build PREDICT_EXPR. */
2975 tree
2976 build_predict_expr (enum br_predictor predictor, enum prediction taken)
2977 {
2978 tree t = build1 (PREDICT_EXPR, void_type_node,
2979 build_int_cst (integer_type_node, predictor));
2980 SET_PREDICT_EXPR_OUTCOME (t, taken);
2981 return t;
2982 }
2983
2984 const char *
2985 predictor_name (enum br_predictor predictor)
2986 {
2987 return predictor_info[predictor].name;
2988 }
2989
2990 /* Predict branch probabilities and estimate profile of the tree CFG. */
2991
2992 namespace {
2993
2994 const pass_data pass_data_profile =
2995 {
2996 GIMPLE_PASS, /* type */
2997 "profile_estimate", /* name */
2998 OPTGROUP_NONE, /* optinfo_flags */
2999 TV_BRANCH_PROB, /* tv_id */
3000 PROP_cfg, /* properties_required */
3001 0, /* properties_provided */
3002 0, /* properties_destroyed */
3003 0, /* todo_flags_start */
3004 0, /* todo_flags_finish */
3005 };
3006
3007 class pass_profile : public gimple_opt_pass
3008 {
3009 public:
3010 pass_profile (gcc::context *ctxt)
3011 : gimple_opt_pass (pass_data_profile, ctxt)
3012 {}
3013
3014 /* opt_pass methods: */
3015 virtual bool gate (function *) { return flag_guess_branch_prob; }
3016 virtual unsigned int execute (function *);
3017
3018 }; // class pass_profile
3019
3020 unsigned int
3021 pass_profile::execute (function *fun)
3022 {
3023 unsigned nb_loops;
3024
3025 if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
3026 return 0;
3027
3028 loop_optimizer_init (LOOPS_NORMAL);
3029 if (dump_file && (dump_flags & TDF_DETAILS))
3030 flow_loops_dump (dump_file, NULL, 0);
3031
3032 mark_irreducible_loops ();
3033
3034 nb_loops = number_of_loops (fun);
3035 if (nb_loops > 1)
3036 scev_initialize ();
3037
3038 tree_estimate_probability ();
3039
3040 if (nb_loops > 1)
3041 scev_finalize ();
3042
3043 loop_optimizer_finalize ();
3044 if (dump_file && (dump_flags & TDF_DETAILS))
3045 gimple_dump_cfg (dump_file, dump_flags);
3046 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
3047 profile_status_for_fn (fun) = PROFILE_GUESSED;
3048 return 0;
3049 }
3050
3051 } // anon namespace
3052
3053 gimple_opt_pass *
3054 make_pass_profile (gcc::context *ctxt)
3055 {
3056 return new pass_profile (ctxt);
3057 }
3058
3059 namespace {
3060
3061 const pass_data pass_data_strip_predict_hints =
3062 {
3063 GIMPLE_PASS, /* type */
3064 "*strip_predict_hints", /* name */
3065 OPTGROUP_NONE, /* optinfo_flags */
3066 TV_BRANCH_PROB, /* tv_id */
3067 PROP_cfg, /* properties_required */
3068 0, /* properties_provided */
3069 0, /* properties_destroyed */
3070 0, /* todo_flags_start */
3071 0, /* todo_flags_finish */
3072 };
3073
3074 class pass_strip_predict_hints : public gimple_opt_pass
3075 {
3076 public:
3077 pass_strip_predict_hints (gcc::context *ctxt)
3078 : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
3079 {}
3080
3081 /* opt_pass methods: */
3082 opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
3083 virtual unsigned int execute (function *);
3084
3085 }; // class pass_strip_predict_hints
3086
3087 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
3088 we no longer need. */
3089 unsigned int
3090 pass_strip_predict_hints::execute (function *fun)
3091 {
3092 basic_block bb;
3093 gimple *ass_stmt;
3094 tree var;
3095
3096 FOR_EACH_BB_FN (bb, fun)
3097 {
3098 gimple_stmt_iterator bi;
3099 for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
3100 {
3101 gimple *stmt = gsi_stmt (bi);
3102
3103 if (gimple_code (stmt) == GIMPLE_PREDICT)
3104 {
3105 gsi_remove (&bi, true);
3106 continue;
3107 }
3108 else if (is_gimple_call (stmt))
3109 {
3110 tree fndecl = gimple_call_fndecl (stmt);
3111
3112 if ((fndecl
3113 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3114 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
3115 && gimple_call_num_args (stmt) == 2)
3116 || (gimple_call_internal_p (stmt)
3117 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
3118 {
3119 var = gimple_call_lhs (stmt);
3120 if (var)
3121 {
3122 ass_stmt
3123 = gimple_build_assign (var, gimple_call_arg (stmt, 0));
3124 gsi_replace (&bi, ass_stmt, true);
3125 }
3126 else
3127 {
3128 gsi_remove (&bi, true);
3129 continue;
3130 }
3131 }
3132 }
3133 gsi_next (&bi);
3134 }
3135 }
3136 return 0;
3137 }
3138
3139 } // anon namespace
3140
3141 gimple_opt_pass *
3142 make_pass_strip_predict_hints (gcc::context *ctxt)
3143 {
3144 return new pass_strip_predict_hints (ctxt);
3145 }
3146
3147 /* Rebuild function frequencies. Passes are in general expected to
3148 maintain profile by hand, however in some cases this is not possible:
3149 for example when inlining several functions with loops freuqencies might run
3150 out of scale and thus needs to be recomputed. */
3151
3152 void
3153 rebuild_frequencies (void)
3154 {
3155 timevar_push (TV_REBUILD_FREQUENCIES);
3156
3157 /* When the max bb count in the function is small, there is a higher
3158 chance that there were truncation errors in the integer scaling
3159 of counts by inlining and other optimizations. This could lead
3160 to incorrect classification of code as being cold when it isn't.
3161 In that case, force the estimation of bb counts/frequencies from the
3162 branch probabilities, rather than computing frequencies from counts,
3163 which may also lead to frequencies incorrectly reduced to 0. There
3164 is less precision in the probabilities, so we only do this for small
3165 max counts. */
3166 gcov_type count_max = 0;
3167 basic_block bb;
3168 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3169 count_max = MAX (bb->count, count_max);
3170
3171 if (profile_status_for_fn (cfun) == PROFILE_GUESSED
3172 || (!flag_auto_profile && profile_status_for_fn (cfun) == PROFILE_READ
3173 && count_max < REG_BR_PROB_BASE/10))
3174 {
3175 loop_optimizer_init (0);
3176 add_noreturn_fake_exit_edges ();
3177 mark_irreducible_loops ();
3178 connect_infinite_loops_to_exit ();
3179 estimate_bb_frequencies (true);
3180 remove_fake_exit_edges ();
3181 loop_optimizer_finalize ();
3182 }
3183 else if (profile_status_for_fn (cfun) == PROFILE_READ)
3184 counts_to_freqs ();
3185 else
3186 gcc_unreachable ();
3187 timevar_pop (TV_REBUILD_FREQUENCIES);
3188 }