]> git.ipfire.org Git - people/ms/gcc.git/blob - gcc/profile-count.h
c++: namespace-scoped friend in local class [PR69410]
[people/ms/gcc.git] / gcc / profile-count.h
1 /* Profile counter container type.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #ifndef GCC_PROFILE_COUNT_H
22 #define GCC_PROFILE_COUNT_H
23
24 struct function;
25 struct profile_count;
26 class sreal;
27
28 /* Quality of the profile count. Because gengtype does not support enums
29 inside of classes, this is in global namespace. */
30 enum profile_quality {
31 /* Uninitialized value. */
32 UNINITIALIZED_PROFILE,
33
34 /* Profile is based on static branch prediction heuristics and may
35 or may not match reality. It is local to function and cannot be compared
36 inter-procedurally. Never used by probabilities (they are always local).
37 */
38 GUESSED_LOCAL,
39
40 /* Profile was read by feedback and was 0, we used local heuristics to guess
41 better. This is the case of functions not run in profile feedback.
42 Never used by probabilities. */
43 GUESSED_GLOBAL0,
44
45 /* Same as GUESSED_GLOBAL0 but global count is adjusted 0. */
46 GUESSED_GLOBAL0_ADJUSTED,
47
48 /* Profile is based on static branch prediction heuristics. It may or may
49 not reflect the reality but it can be compared interprocedurally
50 (for example, we inlined function w/o profile feedback into function
51 with feedback and propagated from that).
52 Never used by probabilities. */
53 GUESSED,
54
55 /* Profile was determined by autofdo. */
56 AFDO,
57
58 /* Profile was originally based on feedback but it was adjusted
59 by code duplicating optimization. It may not precisely reflect the
60 particular code path. */
61 ADJUSTED,
62
63 /* Profile was read from profile feedback or determined by accurate static
64 method. */
65 PRECISE
66 };
67
68 extern const char *profile_quality_as_string (enum profile_quality);
69 extern bool parse_profile_quality (const char *value,
70 profile_quality *quality);
71
72 /* The base value for branch probability notes and edge probabilities. */
73 #define REG_BR_PROB_BASE 10000
74
75 #define RDIV(X,Y) (((X) + (Y) / 2) / (Y))
76
77 bool slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res);
78
79 /* Compute RES=(a*b + c/2)/c capping and return false if overflow happened. */
80
81 inline bool
82 safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
83 {
84 #if (GCC_VERSION >= 5000)
85 uint64_t tmp;
86 if (!__builtin_mul_overflow (a, b, &tmp)
87 && !__builtin_add_overflow (tmp, c/2, &tmp))
88 {
89 *res = tmp / c;
90 return true;
91 }
92 if (c == 1)
93 {
94 *res = (uint64_t) -1;
95 return false;
96 }
97 #else
98 if (a < ((uint64_t)1 << 31)
99 && b < ((uint64_t)1 << 31)
100 && c < ((uint64_t)1 << 31))
101 {
102 *res = (a * b + (c / 2)) / c;
103 return true;
104 }
105 #endif
106 return slow_safe_scale_64bit (a, b, c, res);
107 }
108
109 /* Data type to hold probabilities. It implements fixed point arithmetics
110 with capping so probability is always in range [0,1] and scaling requiring
111 values greater than 1 needs to be represented otherwise.
112
113 In addition to actual value the quality of profile is tracked and propagated
114 through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities
115 that has not been determined yet (for example because of
116 -fno-guess-branch-probability)
117
118 Typically probabilities are derived from profile feedback (via
119 probability_in_gcov_type), autoFDO or guessed statically and then propagated
120 thorough the compilation.
121
122 Named probabilities are available:
123 - never (0 probability)
124 - guessed_never
125 - very_unlikely (1/2000 probability)
126 - unlikely (1/5 probability)
127 - even (1/2 probability)
128 - likely (4/5 probability)
129 - very_likely (1999/2000 probability)
130 - guessed_always
131 - always
132
133 Named probabilities except for never/always are assumed to be statically
134 guessed and thus not necessarily accurate. The difference between never
135 and guessed_never is that the first one should be used only in case that
136 well behaving program will very likely not execute the "never" path.
137 For example if the path is going to abort () call or it exception handling.
138
139 Always and guessed_always probabilities are symmetric.
140
141 For legacy code we support conversion to/from REG_BR_PROB_BASE based fixpoint
142 integer arithmetics. Once the code is converted to branch probabilities,
143 these conversions will probably go away because they are lossy.
144 */
145
146 class GTY((user)) profile_probability
147 {
148 static const int n_bits = 29;
149 /* We can technically use ((uint32_t) 1 << (n_bits - 1)) - 2 but that
150 will lead to harder multiplication sequences. */
151 static const uint32_t max_probability = (uint32_t) 1 << (n_bits - 2);
152 static const uint32_t uninitialized_probability
153 = ((uint32_t) 1 << (n_bits - 1)) - 1;
154
155 uint32_t m_val : 29;
156 enum profile_quality m_quality : 3;
157
158 friend struct profile_count;
159 public:
160 profile_probability (): m_val (uninitialized_probability),
161 m_quality (GUESSED)
162 {}
163
164 profile_probability (uint32_t val, profile_quality quality):
165 m_val (val), m_quality (quality)
166 {}
167
168 /* Named probabilities. */
169 static profile_probability never ()
170 {
171 profile_probability ret;
172 ret.m_val = 0;
173 ret.m_quality = PRECISE;
174 return ret;
175 }
176
177 static profile_probability guessed_never ()
178 {
179 profile_probability ret;
180 ret.m_val = 0;
181 ret.m_quality = GUESSED;
182 return ret;
183 }
184
185 static profile_probability very_unlikely ()
186 {
187 /* Be consistent with PROB_VERY_UNLIKELY in predict.h. */
188 profile_probability r = guessed_always () / 2000;
189 r.m_val--;
190 return r;
191 }
192
193 static profile_probability unlikely ()
194 {
195 /* Be consistent with PROB_VERY_LIKELY in predict.h. */
196 profile_probability r = guessed_always () / 5;
197 r.m_val--;
198 return r;
199 }
200
201 static profile_probability even ()
202 {
203 return guessed_always () / 2;
204 }
205
206 static profile_probability very_likely ()
207 {
208 return always () - very_unlikely ();
209 }
210
211 static profile_probability likely ()
212 {
213 return always () - unlikely ();
214 }
215
216 static profile_probability guessed_always ()
217 {
218 profile_probability ret;
219 ret.m_val = max_probability;
220 ret.m_quality = GUESSED;
221 return ret;
222 }
223
224 static profile_probability always ()
225 {
226 profile_probability ret;
227 ret.m_val = max_probability;
228 ret.m_quality = PRECISE;
229 return ret;
230 }
231
232 /* Probabilities which has not been initialized. Either because
233 initialization did not happen yet or because profile is unknown. */
234 static profile_probability uninitialized ()
235 {
236 profile_probability c;
237 c.m_val = uninitialized_probability;
238 c.m_quality = GUESSED;
239 return c;
240 }
241
242 /* Return true if value has been initialized. */
243 bool initialized_p () const
244 {
245 return m_val != uninitialized_probability;
246 }
247
248 /* Return true if value can be trusted. */
249 bool reliable_p () const
250 {
251 return m_quality >= ADJUSTED;
252 }
253
254 /* Conversion from and to REG_BR_PROB_BASE integer fixpoint arithmetics.
255 this is mostly to support legacy code and should go away. */
256 static profile_probability from_reg_br_prob_base (int v)
257 {
258 profile_probability ret;
259 gcc_checking_assert (v >= 0 && v <= REG_BR_PROB_BASE);
260 ret.m_val = RDIV (v * (uint64_t) max_probability, REG_BR_PROB_BASE);
261 ret.m_quality = GUESSED;
262 return ret;
263 }
264
265 /* Return THIS with quality set to ADJUSTED. */
266 profile_probability adjusted () const
267 {
268 profile_probability ret = *this;
269 if (!initialized_p ())
270 return *this;
271 ret.m_quality = ADJUSTED;
272 return ret;
273 }
274
275 int to_reg_br_prob_base () const
276 {
277 gcc_checking_assert (initialized_p ());
278 return RDIV (m_val * (uint64_t) REG_BR_PROB_BASE, max_probability);
279 }
280
281 /* Conversion to and from RTL representation of profile probabilities. */
282 static profile_probability from_reg_br_prob_note (int v)
283 {
284 profile_probability ret;
285 ret.m_val = ((unsigned int)v) / 8;
286 ret.m_quality = (enum profile_quality)(v & 7);
287 return ret;
288 }
289
290 int to_reg_br_prob_note () const
291 {
292 gcc_checking_assert (initialized_p ());
293 int ret = m_val * 8 + m_quality;
294 gcc_checking_assert (from_reg_br_prob_note (ret) == *this);
295 return ret;
296 }
297
298 /* Return VAL1/VAL2. */
299 static profile_probability probability_in_gcov_type
300 (gcov_type val1, gcov_type val2)
301 {
302 profile_probability ret;
303 gcc_checking_assert (val1 >= 0 && val2 > 0);
304 if (val1 > val2)
305 ret.m_val = max_probability;
306 else
307 {
308 uint64_t tmp;
309 safe_scale_64bit (val1, max_probability, val2, &tmp);
310 gcc_checking_assert (tmp <= max_probability);
311 ret.m_val = tmp;
312 }
313 ret.m_quality = PRECISE;
314 return ret;
315 }
316
317 /* Basic operations. */
318 bool operator== (const profile_probability &other) const
319 {
320 return m_val == other.m_val && m_quality == other.m_quality;
321 }
322
323 profile_probability operator+ (const profile_probability &other) const
324 {
325 if (other == never ())
326 return *this;
327 if (*this == never ())
328 return other;
329 if (!initialized_p () || !other.initialized_p ())
330 return uninitialized ();
331
332 profile_probability ret;
333 ret.m_val = MIN ((uint32_t)(m_val + other.m_val), max_probability);
334 ret.m_quality = MIN (m_quality, other.m_quality);
335 return ret;
336 }
337
338 profile_probability &operator+= (const profile_probability &other)
339 {
340 if (other == never ())
341 return *this;
342 if (*this == never ())
343 {
344 *this = other;
345 return *this;
346 }
347 if (!initialized_p () || !other.initialized_p ())
348 return *this = uninitialized ();
349 else
350 {
351 m_val = MIN ((uint32_t)(m_val + other.m_val), max_probability);
352 m_quality = MIN (m_quality, other.m_quality);
353 }
354 return *this;
355 }
356
357 profile_probability operator- (const profile_probability &other) const
358 {
359 if (*this == never ()
360 || other == never ())
361 return *this;
362 if (!initialized_p () || !other.initialized_p ())
363 return uninitialized ();
364 profile_probability ret;
365 ret.m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
366 ret.m_quality = MIN (m_quality, other.m_quality);
367 return ret;
368 }
369
370 profile_probability &operator-= (const profile_probability &other)
371 {
372 if (*this == never ()
373 || other == never ())
374 return *this;
375 if (!initialized_p () || !other.initialized_p ())
376 return *this = uninitialized ();
377 else
378 {
379 m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
380 m_quality = MIN (m_quality, other.m_quality);
381 }
382 return *this;
383 }
384
385 profile_probability operator* (const profile_probability &other) const
386 {
387 if (*this == never ()
388 || other == never ())
389 return never ();
390 if (!initialized_p () || !other.initialized_p ())
391 return uninitialized ();
392 profile_probability ret;
393 ret.m_val = RDIV ((uint64_t)m_val * other.m_val, max_probability);
394 ret.m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
395 return ret;
396 }
397
398 profile_probability &operator*= (const profile_probability &other)
399 {
400 if (*this == never ()
401 || other == never ())
402 return *this = never ();
403 if (!initialized_p () || !other.initialized_p ())
404 return *this = uninitialized ();
405 else
406 {
407 m_val = RDIV ((uint64_t)m_val * other.m_val, max_probability);
408 m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
409 }
410 return *this;
411 }
412
413 profile_probability operator/ (const profile_probability &other) const
414 {
415 if (*this == never ())
416 return never ();
417 if (!initialized_p () || !other.initialized_p ())
418 return uninitialized ();
419 profile_probability ret;
420 /* If we get probability above 1, mark it as unreliable and return 1. */
421 if (m_val >= other.m_val)
422 {
423 ret.m_val = max_probability;
424 ret.m_quality = MIN (MIN (m_quality, other.m_quality),
425 GUESSED);
426 return ret;
427 }
428 else if (!m_val)
429 ret.m_val = 0;
430 else
431 {
432 gcc_checking_assert (other.m_val);
433 ret.m_val = MIN (RDIV ((uint64_t)m_val * max_probability,
434 other.m_val),
435 max_probability);
436 }
437 ret.m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
438 return ret;
439 }
440
441 profile_probability &operator/= (const profile_probability &other)
442 {
443 if (*this == never ())
444 return *this = never ();
445 if (!initialized_p () || !other.initialized_p ())
446 return *this = uninitialized ();
447 else
448 {
449 /* If we get probability above 1, mark it as unreliable
450 and return 1. */
451 if (m_val > other.m_val)
452 {
453 m_val = max_probability;
454 m_quality = MIN (MIN (m_quality, other.m_quality),
455 GUESSED);
456 return *this;
457 }
458 else if (!m_val)
459 ;
460 else
461 {
462 gcc_checking_assert (other.m_val);
463 m_val = MIN (RDIV ((uint64_t)m_val * max_probability,
464 other.m_val),
465 max_probability);
466 }
467 m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
468 }
469 return *this;
470 }
471
472 /* Split *THIS (ORIG) probability into 2 probabilities, such that
473 the returned one (FIRST) is *THIS * CPROB and *THIS is
474 adjusted (SECOND) so that FIRST + FIRST.invert () * SECOND
475 == ORIG. This is useful e.g. when splitting a conditional
476 branch like:
477 if (cond)
478 goto lab; // ORIG probability
479 into
480 if (cond1)
481 goto lab; // FIRST = ORIG * CPROB probability
482 if (cond2)
483 goto lab; // SECOND probability
484 such that the overall probability of jumping to lab remains
485 the same. CPROB gives the relative probability between the
486 branches. */
487 profile_probability split (const profile_probability &cprob)
488 {
489 profile_probability ret = *this * cprob;
490 /* The following is equivalent to:
491 *this = cprob.invert () * *this / ret.invert ();
492 Avoid scaling when overall outcome is supposed to be always.
493 Without knowing that one is inverse of other, the result would be
494 conservative. */
495 if (!(*this == always ()))
496 *this = (*this - ret) / ret.invert ();
497 return ret;
498 }
499
500 gcov_type apply (gcov_type val) const
501 {
502 if (*this == uninitialized ())
503 return val / 2;
504 return RDIV (val * m_val, max_probability);
505 }
506
507 /* Return 1-*THIS. */
508 profile_probability invert () const
509 {
510 return always() - *this;
511 }
512
513 /* Return THIS with quality dropped to GUESSED. */
514 profile_probability guessed () const
515 {
516 profile_probability ret = *this;
517 ret.m_quality = GUESSED;
518 return ret;
519 }
520
521 /* Return THIS with quality dropped to AFDO. */
522 profile_probability afdo () const
523 {
524 profile_probability ret = *this;
525 ret.m_quality = AFDO;
526 return ret;
527 }
528
529 /* Return *THIS * NUM / DEN. */
530 profile_probability apply_scale (int64_t num, int64_t den) const
531 {
532 if (*this == never ())
533 return *this;
534 if (!initialized_p ())
535 return uninitialized ();
536 profile_probability ret;
537 uint64_t tmp;
538 safe_scale_64bit (m_val, num, den, &tmp);
539 ret.m_val = MIN (tmp, max_probability);
540 ret.m_quality = MIN (m_quality, ADJUSTED);
541 return ret;
542 }
543
544 /* Return true when the probability of edge is reliable.
545
546 The profile guessing code is good at predicting branch outcome (i.e.
547 taken/not taken), that is predicted right slightly over 75% of time.
548 It is however notoriously poor on predicting the probability itself.
549 In general the profile appear a lot flatter (with probabilities closer
550 to 50%) than the reality so it is bad idea to use it to drive optimization
551 such as those disabling dynamic branch prediction for well predictable
552 branches.
553
554 There are two exceptions - edges leading to noreturn edges and edges
555 predicted by number of iterations heuristics are predicted well. This macro
556 should be able to distinguish those, but at the moment it simply check for
557 noreturn heuristic that is only one giving probability over 99% or bellow
558 1%. In future we might want to propagate reliability information across the
559 CFG if we find this information useful on multiple places. */
560 bool probably_reliable_p () const
561 {
562 if (m_quality >= ADJUSTED)
563 return true;
564 if (!initialized_p ())
565 return false;
566 return m_val < max_probability / 100
567 || m_val > max_probability - max_probability / 100;
568 }
569
570 /* Return false if profile_probability is bogus. */
571 bool verify () const
572 {
573 gcc_checking_assert (m_quality != UNINITIALIZED_PROFILE);
574 if (m_val == uninitialized_probability)
575 return m_quality == GUESSED;
576 else if (m_quality < GUESSED)
577 return false;
578 return m_val <= max_probability;
579 }
580
581 /* Comparisons are three-state and conservative. False is returned if
582 the inequality cannot be decided. */
583 bool operator< (const profile_probability &other) const
584 {
585 return initialized_p () && other.initialized_p () && m_val < other.m_val;
586 }
587
588 bool operator> (const profile_probability &other) const
589 {
590 return initialized_p () && other.initialized_p () && m_val > other.m_val;
591 }
592
593 bool operator<= (const profile_probability &other) const
594 {
595 return initialized_p () && other.initialized_p () && m_val <= other.m_val;
596 }
597
598 bool operator>= (const profile_probability &other) const
599 {
600 return initialized_p () && other.initialized_p () && m_val >= other.m_val;
601 }
602
603 profile_probability operator* (int64_t num) const
604 {
605 return apply_scale (num, 1);
606 }
607
608 profile_probability operator*= (int64_t num)
609 {
610 *this = apply_scale (num, 1);
611 return *this;
612 }
613
614 profile_probability operator/ (int64_t den) const
615 {
616 return apply_scale (1, den);
617 }
618
619 profile_probability operator/= (int64_t den)
620 {
621 *this = apply_scale (1, den);
622 return *this;
623 }
624
625 /* Get the value of the count. */
626 uint32_t value () const { return m_val; }
627
628 /* Get the quality of the count. */
629 enum profile_quality quality () const { return m_quality; }
630
631 /* Output THIS to F. */
632 void dump (FILE *f) const;
633
634 /* Output THIS to BUFFER. */
635 void dump (char *buffer) const;
636
637 /* Print THIS to stderr. */
638 void debug () const;
639
640 /* Return true if THIS is known to differ significantly from OTHER. */
641 bool differs_from_p (profile_probability other) const;
642
643 /* Return if difference is greater than 50%. */
644 bool differs_lot_from_p (profile_probability other) const;
645
646 /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
647 happens with COUNT2 probability. Return probability that either *THIS or
648 OTHER happens. */
649 profile_probability combine_with_count (profile_count count1,
650 profile_probability other,
651 profile_count count2) const;
652
653 /* Return probability as sreal. */
654 sreal to_sreal () const;
655 /* LTO streaming support. */
656 static profile_probability stream_in (class lto_input_block *);
657 void stream_out (struct output_block *);
658 void stream_out (struct lto_output_stream *);
659 };
660
661 /* Main data type to hold profile counters in GCC. Profile counts originate
662 either from profile feedback, static profile estimation or both. We do not
663 perform whole program profile propagation and thus profile estimation
664 counters are often local to function, while counters from profile feedback
665 (or special cases of profile estimation) can be used inter-procedurally.
666
667 There are 3 basic types
668 1) local counters which are result of intra-procedural static profile
669 estimation.
670 2) ipa counters which are result of profile feedback or special case
671 of static profile estimation (such as in function main).
672 3) counters which counts as 0 inter-procedurally (because given function
673 was never run in train feedback) but they hold local static profile
674 estimate.
675
676 Counters of type 1 and 3 cannot be mixed with counters of different type
677 within operation (because whole function should use one type of counter)
678 with exception that global zero mix in most operations where outcome is
679 well defined.
680
681 To take local counter and use it inter-procedurally use ipa member function
682 which strips information irrelevant at the inter-procedural level.
683
684 Counters are 61bit integers representing number of executions during the
685 train run or normalized frequency within the function.
686
687 As the profile is maintained during the compilation, many adjustments are
688 made. Not all transformations can be made precisely, most importantly
689 when code is being duplicated. It also may happen that part of CFG has
690 profile counts known while other do not - for example when LTO optimizing
691 partly profiled program or when profile was lost due to COMDAT merging.
692
693 For this reason profile_count tracks more information than
694 just unsigned integer and it is also ready for profile mismatches.
695 The API of this data type represent operations that are natural
696 on profile counts - sum, difference and operation with scales and
697 probabilities. All operations are safe by never getting negative counts
698 and they do end up in uninitialized scale if any of the parameters is
699 uninitialized.
700
701 All comparisons that are three state and handling of probabilities. Thus
702 a < b is not equal to !(a >= b).
703
704 The following pre-defined counts are available:
705
706 profile_count::zero () for code that is known to execute zero times at
707 runtime (this can be detected statically i.e. for paths leading to
708 abort ();
709 profile_count::one () for code that is known to execute once (such as
710 main () function
711 profile_count::uninitialized () for unknown execution count.
712
713 */
714
715 struct GTY(()) profile_count
716 {
717 public:
718 /* Use 62bit to hold basic block counters. Should be at least
719 64bit. Although a counter cannot be negative, we use a signed
720 type to hold various extra stages. */
721
722 static const int n_bits = 61;
723 static const uint64_t max_count = ((uint64_t) 1 << n_bits) - 2;
724 private:
725 static const uint64_t uninitialized_count = ((uint64_t) 1 << n_bits) - 1;
726
727 #if defined (__arm__) && (__GNUC__ >= 6 && __GNUC__ <= 8)
728 /* Work-around for PR88469. A bug in the gcc-6/7/8 PCS layout code
729 incorrectly detects the alignment of a structure where the only
730 64-bit aligned object is a bit-field. We force the alignment of
731 the entire field to mitigate this. */
732 #define UINT64_BIT_FIELD_ALIGN __attribute__ ((aligned(8)))
733 #else
734 #define UINT64_BIT_FIELD_ALIGN
735 #endif
736 uint64_t UINT64_BIT_FIELD_ALIGN m_val : n_bits;
737 #undef UINT64_BIT_FIELD_ALIGN
738 enum profile_quality m_quality : 3;
739 public:
740
741 /* Return true if both values can meaningfully appear in single function
742 body. We have either all counters in function local or global, otherwise
743 operations between them are not really defined well. */
744 bool compatible_p (const profile_count other) const
745 {
746 if (!initialized_p () || !other.initialized_p ())
747 return true;
748 if (*this == zero ()
749 || other == zero ())
750 return true;
751 /* Do not allow nonzero global profile together with local guesses
752 that are globally0. */
753 if (ipa ().nonzero_p ()
754 && !(other.ipa () == other))
755 return false;
756 if (other.ipa ().nonzero_p ()
757 && !(ipa () == *this))
758 return false;
759
760 return ipa_p () == other.ipa_p ();
761 }
762
763 /* Used for counters which are expected to be never executed. */
764 static profile_count zero ()
765 {
766 return from_gcov_type (0);
767 }
768
769 static profile_count adjusted_zero ()
770 {
771 profile_count c;
772 c.m_val = 0;
773 c.m_quality = ADJUSTED;
774 return c;
775 }
776
777 static profile_count guessed_zero ()
778 {
779 profile_count c;
780 c.m_val = 0;
781 c.m_quality = GUESSED;
782 return c;
783 }
784
785 static profile_count one ()
786 {
787 return from_gcov_type (1);
788 }
789
790 /* Value of counters which has not been initialized. Either because
791 initialization did not happen yet or because profile is unknown. */
792 static profile_count uninitialized ()
793 {
794 profile_count c;
795 c.m_val = uninitialized_count;
796 c.m_quality = GUESSED_LOCAL;
797 return c;
798 }
799
800 /* Conversion to gcov_type is lossy. */
801 gcov_type to_gcov_type () const
802 {
803 gcc_checking_assert (initialized_p ());
804 return m_val;
805 }
806
807 /* Return true if value has been initialized. */
808 bool initialized_p () const
809 {
810 return m_val != uninitialized_count;
811 }
812
813 /* Return true if value can be trusted. */
814 bool reliable_p () const
815 {
816 return m_quality >= ADJUSTED;
817 }
818
819 /* Return true if value can be operated inter-procedurally. */
820 bool ipa_p () const
821 {
822 return !initialized_p () || m_quality >= GUESSED_GLOBAL0;
823 }
824
825 /* Return true if quality of profile is precise. */
826 bool precise_p () const
827 {
828 return m_quality == PRECISE;
829 }
830
831 /* Get the value of the count. */
832 uint64_t value () const { return m_val; }
833
834 /* Get the quality of the count. */
835 enum profile_quality quality () const { return m_quality; }
836
837 /* When merging basic blocks, the two different profile counts are unified.
838 Return true if this can be done without losing info about profile.
839 The only case we care about here is when first BB contains something
840 that makes it terminate in a way not visible in CFG. */
841 bool ok_for_merging (profile_count other) const
842 {
843 if (m_quality < ADJUSTED
844 || other.m_quality < ADJUSTED)
845 return true;
846 return !(other < *this);
847 }
848
849 /* When merging two BBs with different counts, pick common count that looks
850 most representative. */
851 profile_count merge (profile_count other) const
852 {
853 if (*this == other || !other.initialized_p ()
854 || m_quality > other.m_quality)
855 return *this;
856 if (other.m_quality > m_quality
857 || other > *this)
858 return other;
859 return *this;
860 }
861
862 /* Basic operations. */
863 bool operator== (const profile_count &other) const
864 {
865 return m_val == other.m_val && m_quality == other.m_quality;
866 }
867
868 profile_count operator+ (const profile_count &other) const
869 {
870 if (other == zero ())
871 return *this;
872 if (*this == zero ())
873 return other;
874 if (!initialized_p () || !other.initialized_p ())
875 return uninitialized ();
876
877 profile_count ret;
878 gcc_checking_assert (compatible_p (other));
879 ret.m_val = m_val + other.m_val;
880 ret.m_quality = MIN (m_quality, other.m_quality);
881 return ret;
882 }
883
884 profile_count &operator+= (const profile_count &other)
885 {
886 if (other == zero ())
887 return *this;
888 if (*this == zero ())
889 {
890 *this = other;
891 return *this;
892 }
893 if (!initialized_p () || !other.initialized_p ())
894 return *this = uninitialized ();
895 else
896 {
897 gcc_checking_assert (compatible_p (other));
898 m_val += other.m_val;
899 m_quality = MIN (m_quality, other.m_quality);
900 }
901 return *this;
902 }
903
904 profile_count operator- (const profile_count &other) const
905 {
906 if (*this == zero () || other == zero ())
907 return *this;
908 if (!initialized_p () || !other.initialized_p ())
909 return uninitialized ();
910 gcc_checking_assert (compatible_p (other));
911 profile_count ret;
912 ret.m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
913 ret.m_quality = MIN (m_quality, other.m_quality);
914 return ret;
915 }
916
917 profile_count &operator-= (const profile_count &other)
918 {
919 if (*this == zero () || other == zero ())
920 return *this;
921 if (!initialized_p () || !other.initialized_p ())
922 return *this = uninitialized ();
923 else
924 {
925 gcc_checking_assert (compatible_p (other));
926 m_val = m_val >= other.m_val ? m_val - other.m_val: 0;
927 m_quality = MIN (m_quality, other.m_quality);
928 }
929 return *this;
930 }
931
932 /* Return false if profile_count is bogus. */
933 bool verify () const
934 {
935 gcc_checking_assert (m_quality != UNINITIALIZED_PROFILE);
936 return m_val != uninitialized_count || m_quality == GUESSED_LOCAL;
937 }
938
939 /* Comparisons are three-state and conservative. False is returned if
940 the inequality cannot be decided. */
941 bool operator< (const profile_count &other) const
942 {
943 if (!initialized_p () || !other.initialized_p ())
944 return false;
945 if (*this == zero ())
946 return !(other == zero ());
947 if (other == zero ())
948 return false;
949 gcc_checking_assert (compatible_p (other));
950 return m_val < other.m_val;
951 }
952
953 bool operator> (const profile_count &other) const
954 {
955 if (!initialized_p () || !other.initialized_p ())
956 return false;
957 if (*this == zero ())
958 return false;
959 if (other == zero ())
960 return !(*this == zero ());
961 gcc_checking_assert (compatible_p (other));
962 return initialized_p () && other.initialized_p () && m_val > other.m_val;
963 }
964
965 bool operator< (const gcov_type other) const
966 {
967 gcc_checking_assert (ipa_p ());
968 gcc_checking_assert (other >= 0);
969 return ipa ().initialized_p () && ipa ().m_val < (uint64_t) other;
970 }
971
972 bool operator> (const gcov_type other) const
973 {
974 gcc_checking_assert (ipa_p ());
975 gcc_checking_assert (other >= 0);
976 return ipa ().initialized_p () && ipa ().m_val > (uint64_t) other;
977 }
978
979 bool operator<= (const profile_count &other) const
980 {
981 if (!initialized_p () || !other.initialized_p ())
982 return false;
983 if (*this == zero ())
984 return true;
985 if (other == zero ())
986 return (*this == zero ());
987 gcc_checking_assert (compatible_p (other));
988 return m_val <= other.m_val;
989 }
990
991 bool operator>= (const profile_count &other) const
992 {
993 if (!initialized_p () || !other.initialized_p ())
994 return false;
995 if (other == zero ())
996 return true;
997 if (*this == zero ())
998 return (other == zero ());
999 gcc_checking_assert (compatible_p (other));
1000 return m_val >= other.m_val;
1001 }
1002
1003 bool operator<= (const gcov_type other) const
1004 {
1005 gcc_checking_assert (ipa_p ());
1006 gcc_checking_assert (other >= 0);
1007 return ipa ().initialized_p () && ipa ().m_val <= (uint64_t) other;
1008 }
1009
1010 bool operator>= (const gcov_type other) const
1011 {
1012 gcc_checking_assert (ipa_p ());
1013 gcc_checking_assert (other >= 0);
1014 return ipa ().initialized_p () && ipa ().m_val >= (uint64_t) other;
1015 }
1016
1017 profile_count operator* (int64_t num) const
1018 {
1019 return apply_scale (num, 1);
1020 }
1021
1022 profile_count operator*= (int64_t num)
1023 {
1024 *this = apply_scale (num, 1);
1025 return *this;
1026 }
1027
1028 profile_count operator/ (int64_t den) const
1029 {
1030 return apply_scale (1, den);
1031 }
1032
1033 profile_count operator/= (int64_t den)
1034 {
1035 *this = apply_scale (1, den);
1036 return *this;
1037 }
1038
1039 /* Return true when value is not zero and can be used for scaling.
1040 This is different from *this > 0 because that requires counter to
1041 be IPA. */
1042 bool nonzero_p () const
1043 {
1044 return initialized_p () && m_val != 0;
1045 }
1046
1047 /* Make counter forcibly nonzero. */
1048 profile_count force_nonzero () const
1049 {
1050 if (!initialized_p ())
1051 return *this;
1052 profile_count ret = *this;
1053 if (ret.m_val == 0)
1054 {
1055 ret.m_val = 1;
1056 ret.m_quality = MIN (m_quality, ADJUSTED);
1057 }
1058 return ret;
1059 }
1060
1061 profile_count max (profile_count other) const
1062 {
1063 profile_count val = *this;
1064
1065 /* Always prefer nonzero IPA counts over local counts. */
1066 if (ipa ().nonzero_p () || other.ipa ().nonzero_p ())
1067 {
1068 val = ipa ();
1069 other = other.ipa ();
1070 }
1071 if (!initialized_p ())
1072 return other;
1073 if (!other.initialized_p ())
1074 return *this;
1075 if (*this == zero ())
1076 return other;
1077 if (other == zero ())
1078 return *this;
1079 gcc_checking_assert (compatible_p (other));
1080 if (val.m_val < other.m_val || (m_val == other.m_val
1081 && val.m_quality < other.m_quality))
1082 return other;
1083 return *this;
1084 }
1085
1086 /* PROB is a probability in scale 0...REG_BR_PROB_BASE. Scale counter
1087 accordingly. */
1088 profile_count apply_probability (int prob) const
1089 {
1090 gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
1091 if (m_val == 0)
1092 return *this;
1093 if (!initialized_p ())
1094 return uninitialized ();
1095 profile_count ret;
1096 ret.m_val = RDIV (m_val * prob, REG_BR_PROB_BASE);
1097 ret.m_quality = MIN (m_quality, ADJUSTED);
1098 return ret;
1099 }
1100
1101 /* Scale counter according to PROB. */
1102 profile_count apply_probability (profile_probability prob) const
1103 {
1104 if (*this == zero ())
1105 return *this;
1106 if (prob == profile_probability::never ())
1107 return zero ();
1108 if (!initialized_p ())
1109 return uninitialized ();
1110 profile_count ret;
1111 uint64_t tmp;
1112 safe_scale_64bit (m_val, prob.m_val, profile_probability::max_probability,
1113 &tmp);
1114 ret.m_val = tmp;
1115 ret.m_quality = MIN (m_quality, prob.m_quality);
1116 return ret;
1117 }
1118
1119 /* Return *THIS * NUM / DEN. */
1120 profile_count apply_scale (int64_t num, int64_t den) const
1121 {
1122 if (m_val == 0)
1123 return *this;
1124 if (!initialized_p ())
1125 return uninitialized ();
1126 profile_count ret;
1127 uint64_t tmp;
1128
1129 gcc_checking_assert (num >= 0 && den > 0);
1130 safe_scale_64bit (m_val, num, den, &tmp);
1131 ret.m_val = MIN (tmp, max_count);
1132 ret.m_quality = MIN (m_quality, ADJUSTED);
1133 return ret;
1134 }
1135
1136 profile_count apply_scale (profile_count num, profile_count den) const
1137 {
1138 if (*this == zero ())
1139 return *this;
1140 if (num == zero ())
1141 return num;
1142 if (!initialized_p () || !num.initialized_p () || !den.initialized_p ())
1143 return uninitialized ();
1144 if (num == den)
1145 return *this;
1146 gcc_checking_assert (den.m_val);
1147
1148 profile_count ret;
1149 uint64_t val;
1150 safe_scale_64bit (m_val, num.m_val, den.m_val, &val);
1151 ret.m_val = MIN (val, max_count);
1152 ret.m_quality = MIN (MIN (MIN (m_quality, ADJUSTED),
1153 num.m_quality), den.m_quality);
1154 /* Be sure that ret is not local if num is global.
1155 Also ensure that ret is not global0 when num is global. */
1156 if (num.ipa_p ())
1157 ret.m_quality = MAX (ret.m_quality,
1158 num == num.ipa () ? GUESSED : num.m_quality);
1159 return ret;
1160 }
1161
1162 /* Return THIS with quality dropped to GUESSED_LOCAL. */
1163 profile_count guessed_local () const
1164 {
1165 profile_count ret = *this;
1166 if (!initialized_p ())
1167 return *this;
1168 ret.m_quality = GUESSED_LOCAL;
1169 return ret;
1170 }
1171
1172 /* We know that profile is globally 0 but keep local profile if present. */
1173 profile_count global0 () const
1174 {
1175 profile_count ret = *this;
1176 if (!initialized_p ())
1177 return *this;
1178 ret.m_quality = GUESSED_GLOBAL0;
1179 return ret;
1180 }
1181
1182 /* We know that profile is globally adjusted 0 but keep local profile
1183 if present. */
1184 profile_count global0adjusted () const
1185 {
1186 profile_count ret = *this;
1187 if (!initialized_p ())
1188 return *this;
1189 ret.m_quality = GUESSED_GLOBAL0_ADJUSTED;
1190 return ret;
1191 }
1192
1193 /* Return THIS with quality dropped to GUESSED. */
1194 profile_count guessed () const
1195 {
1196 profile_count ret = *this;
1197 ret.m_quality = MIN (ret.m_quality, GUESSED);
1198 return ret;
1199 }
1200
1201 /* Return variant of profile count which is always safe to compare
1202 across functions. */
1203 profile_count ipa () const
1204 {
1205 if (m_quality > GUESSED_GLOBAL0_ADJUSTED)
1206 return *this;
1207 if (m_quality == GUESSED_GLOBAL0)
1208 return zero ();
1209 if (m_quality == GUESSED_GLOBAL0_ADJUSTED)
1210 return adjusted_zero ();
1211 return uninitialized ();
1212 }
1213
1214 /* Return THIS with quality dropped to AFDO. */
1215 profile_count afdo () const
1216 {
1217 profile_count ret = *this;
1218 ret.m_quality = AFDO;
1219 return ret;
1220 }
1221
1222 /* Return probability of event with counter THIS within event with counter
1223 OVERALL. */
1224 profile_probability probability_in (const profile_count overall) const
1225 {
1226 if (*this == zero ()
1227 && !(overall == zero ()))
1228 return profile_probability::never ();
1229 if (!initialized_p () || !overall.initialized_p ()
1230 || !overall.m_val)
1231 return profile_probability::uninitialized ();
1232 if (*this == overall && m_quality == PRECISE)
1233 return profile_probability::always ();
1234 profile_probability ret;
1235 gcc_checking_assert (compatible_p (overall));
1236
1237 if (overall.m_val < m_val)
1238 {
1239 ret.m_val = profile_probability::max_probability;
1240 ret.m_quality = GUESSED;
1241 return ret;
1242 }
1243 else
1244 ret.m_val = RDIV (m_val * profile_probability::max_probability,
1245 overall.m_val);
1246 ret.m_quality = MIN (MAX (MIN (m_quality, overall.m_quality),
1247 GUESSED), ADJUSTED);
1248 return ret;
1249 }
1250
1251 int to_frequency (struct function *fun) const;
1252 int to_cgraph_frequency (profile_count entry_bb_count) const;
1253 sreal to_sreal_scale (profile_count in, bool *known = NULL) const;
1254
1255 /* Output THIS to F. */
1256 void dump (FILE *f) const;
1257
1258 /* Output THIS to BUFFER. */
1259 void dump (char *buffer) const;
1260
1261 /* Print THIS to stderr. */
1262 void debug () const;
1263
1264 /* Return true if THIS is known to differ significantly from OTHER. */
1265 bool differs_from_p (profile_count other) const;
1266
1267 /* We want to scale profile across function boundary from NUM to DEN.
1268 Take care of the side case when NUM and DEN are zeros of incompatible
1269 kinds. */
1270 static void adjust_for_ipa_scaling (profile_count *num, profile_count *den);
1271
1272 /* THIS is a count of bb which is known to be executed IPA times.
1273 Combine this information into bb counter. This means returning IPA
1274 if it is nonzero, not changing anything if IPA is uninitialized
1275 and if IPA is zero, turning THIS into corresponding local profile with
1276 global0. */
1277 profile_count combine_with_ipa_count (profile_count ipa);
1278
1279 /* Same as combine_with_ipa_count but inside function with count IPA2. */
1280 profile_count combine_with_ipa_count_within
1281 (profile_count ipa, profile_count ipa2);
1282
1283 /* The profiling runtime uses gcov_type, which is usually 64bit integer.
1284 Conversions back and forth are used to read the coverage and get it
1285 into internal representation. */
1286 static profile_count from_gcov_type (gcov_type v,
1287 profile_quality quality = PRECISE);
1288
1289 /* LTO streaming support. */
1290 static profile_count stream_in (class lto_input_block *);
1291 void stream_out (struct output_block *);
1292 void stream_out (struct lto_output_stream *);
1293 };
1294 #endif