]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/profile-count.c
[testsuite] Add missing dg-require-effective-target label_values
[thirdparty/gcc.git] / gcc / profile-count.c
1 /* Profile counter container type.
2 Copyright (C) 2017-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "profile-count.h"
25 #include "options.h"
26 #include "tree.h"
27 #include "basic-block.h"
28 #include "function.h"
29 #include "cfg.h"
30 #include "gimple.h"
31 #include "data-streamer.h"
32 #include "cgraph.h"
33 #include "wide-int.h"
34 #include "sreal.h"
35
36 /* Names from profile_quality enum values. */
37
38 const char *profile_quality_names[] =
39 {
40 "uninitialized",
41 "guessed_local",
42 "guessed_global0",
43 "guessed_global0adjusted",
44 "guessed",
45 "afdo",
46 "adjusted",
47 "precise"
48 };
49
50 /* Get a string describing QUALITY. */
51
52 const char *
53 profile_quality_as_string (enum profile_quality quality)
54 {
55 return profile_quality_names[quality];
56 }
57
58 /* Parse VALUE as profile quality and return true when a valid QUALITY. */
59
60 bool
61 parse_profile_quality (const char *value, profile_quality *quality)
62 {
63 for (unsigned i = 0; i < ARRAY_SIZE (profile_quality_names); i++)
64 if (strcmp (profile_quality_names[i], value) == 0)
65 {
66 *quality = (profile_quality)i;
67 return true;
68 }
69
70 return false;
71 }
72
73 /* Display names from profile_quality enum values. */
74
75 const char *profile_quality_display_names[] =
76 {
77 NULL,
78 "estimated locally",
79 "estimated locally, globally 0",
80 "estimated locally, globally 0 adjusted",
81 "adjusted",
82 "auto FDO",
83 "guessed",
84 "precise"
85 };
86
87 /* Dump THIS to F. */
88
89 void
90 profile_count::dump (FILE *f) const
91 {
92 if (!initialized_p ())
93 fprintf (f, "uninitialized");
94 else
95 fprintf (f, "%" PRId64 " (%s)", m_val,
96 profile_quality_display_names[m_quality]);
97 }
98
99 /* Dump THIS to stderr. */
100
101 void
102 profile_count::debug () const
103 {
104 dump (stderr);
105 fprintf (stderr, "\n");
106 }
107
108 /* Return true if THIS differs from OTHER; tolerate small diferences. */
109
110 bool
111 profile_count::differs_from_p (profile_count other) const
112 {
113 gcc_checking_assert (compatible_p (other));
114 if (!initialized_p () || !other.initialized_p ())
115 return false;
116 if ((uint64_t)m_val - (uint64_t)other.m_val < 100
117 || (uint64_t)other.m_val - (uint64_t)m_val < 100)
118 return false;
119 if (!other.m_val)
120 return true;
121 int64_t ratio = (int64_t)m_val * 100 / other.m_val;
122 return ratio < 99 || ratio > 101;
123 }
124
125 /* Stream THIS from IB. */
126
127 profile_count
128 profile_count::stream_in (struct lto_input_block *ib)
129 {
130 profile_count ret;
131 ret.m_val = streamer_read_gcov_count (ib);
132 ret.m_quality = (profile_quality) streamer_read_uhwi (ib);
133 return ret;
134 }
135
136 /* Stream THIS to OB. */
137
138 void
139 profile_count::stream_out (struct output_block *ob)
140 {
141 streamer_write_gcov_count (ob, m_val);
142 streamer_write_uhwi (ob, m_quality);
143 }
144
145 /* Stream THIS to OB. */
146
147 void
148 profile_count::stream_out (struct lto_output_stream *ob)
149 {
150 streamer_write_gcov_count_stream (ob, m_val);
151 streamer_write_uhwi_stream (ob, m_quality);
152 }
153
154 /* Dump THIS to F. */
155
156 void
157 profile_probability::dump (FILE *f) const
158 {
159 if (!initialized_p ())
160 fprintf (f, "uninitialized");
161 else
162 {
163 /* Make difference between 0.00 as a roundoff error and actual 0.
164 Similarly for 1. */
165 if (m_val == 0)
166 fprintf (f, "never");
167 else if (m_val == max_probability)
168 fprintf (f, "always");
169 else
170 fprintf (f, "%3.1f%%", (double)m_val * 100 / max_probability);
171 if (m_quality == ADJUSTED)
172 fprintf (f, " (adjusted)");
173 else if (m_quality == AFDO)
174 fprintf (f, " (auto FDO)");
175 else if (m_quality == GUESSED)
176 fprintf (f, " (guessed)");
177 }
178 }
179
180 /* Dump THIS to stderr. */
181
182 void
183 profile_probability::debug () const
184 {
185 dump (stderr);
186 fprintf (stderr, "\n");
187 }
188
189 /* Return true if THIS differs from OTHER; tolerate small diferences. */
190
191 bool
192 profile_probability::differs_from_p (profile_probability other) const
193 {
194 if (!initialized_p () || !other.initialized_p ())
195 return false;
196 if ((uint64_t)m_val - (uint64_t)other.m_val < max_probability / 1000
197 || (uint64_t)other.m_val - (uint64_t)max_probability < 1000)
198 return false;
199 if (!other.m_val)
200 return true;
201 int64_t ratio = (int64_t)m_val * 100 / other.m_val;
202 return ratio < 99 || ratio > 101;
203 }
204
205 /* Return true if THIS differs significantly from OTHER. */
206
207 bool
208 profile_probability::differs_lot_from_p (profile_probability other) const
209 {
210 if (!initialized_p () || !other.initialized_p ())
211 return false;
212 uint32_t d = m_val > other.m_val ? m_val - other.m_val : other.m_val - m_val;
213 return d > max_probability / 2;
214 }
215
216 /* Stream THIS from IB. */
217
218 profile_probability
219 profile_probability::stream_in (struct lto_input_block *ib)
220 {
221 profile_probability ret;
222 ret.m_val = streamer_read_uhwi (ib);
223 ret.m_quality = (profile_quality) streamer_read_uhwi (ib);
224 return ret;
225 }
226
227 /* Stream THIS to OB. */
228
229 void
230 profile_probability::stream_out (struct output_block *ob)
231 {
232 streamer_write_uhwi (ob, m_val);
233 streamer_write_uhwi (ob, m_quality);
234 }
235
236 /* Stream THIS to OB. */
237
238 void
239 profile_probability::stream_out (struct lto_output_stream *ob)
240 {
241 streamer_write_uhwi_stream (ob, m_val);
242 streamer_write_uhwi_stream (ob, m_quality);
243 }
244
245 /* Compute RES=(a*b + c/2)/c capping and return false if overflow happened. */
246
247 bool
248 slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
249 {
250 FIXED_WIDE_INT (128) tmp = a;
251 wi::overflow_type overflow;
252 tmp = wi::udiv_floor (wi::umul (tmp, b, &overflow) + (c / 2), c);
253 gcc_checking_assert (!overflow);
254 if (wi::fits_uhwi_p (tmp))
255 {
256 *res = tmp.to_uhwi ();
257 return true;
258 }
259 *res = (uint64_t) -1;
260 return false;
261 }
262
263 /* Return count as frequency within FUN scaled in range 0 to REG_FREQ_MAX
264 Used for legacy code and should not be used anymore. */
265
266 int
267 profile_count::to_frequency (struct function *fun) const
268 {
269 if (!initialized_p ())
270 return BB_FREQ_MAX;
271 if (*this == zero ())
272 return 0;
273 gcc_assert (REG_BR_PROB_BASE == BB_FREQ_MAX
274 && fun->cfg->count_max.initialized_p ());
275 profile_probability prob = probability_in (fun->cfg->count_max);
276 if (!prob.initialized_p ())
277 return REG_BR_PROB_BASE;
278 return prob.to_reg_br_prob_base ();
279 }
280
281 /* Return count as frequency within FUN scaled in range 0 to CGRAPH_FREQ_MAX
282 where CGRAPH_FREQ_BASE means that count equals to entry block count.
283 Used for legacy code and should not be used anymore. */
284
285 int
286 profile_count::to_cgraph_frequency (profile_count entry_bb_count) const
287 {
288 if (!initialized_p () || !entry_bb_count.initialized_p ())
289 return CGRAPH_FREQ_BASE;
290 if (*this == zero ())
291 return 0;
292 gcc_checking_assert (entry_bb_count.initialized_p ());
293 uint64_t scale;
294 if (!safe_scale_64bit (!entry_bb_count.m_val ? m_val + 1 : m_val,
295 CGRAPH_FREQ_BASE, MAX (1, entry_bb_count.m_val), &scale))
296 return CGRAPH_FREQ_MAX;
297 return MIN (scale, CGRAPH_FREQ_MAX);
298 }
299
300 /* Return THIS/IN as sreal value. */
301
302 sreal
303 profile_count::to_sreal_scale (profile_count in, bool *known) const
304 {
305 if (!initialized_p () || !in.initialized_p ())
306 {
307 if (known)
308 *known = false;
309 return 1;
310 }
311 if (known)
312 *known = true;
313 if (*this == zero ())
314 return 0;
315
316 if (!in.m_val)
317 {
318 if (!m_val)
319 return 1;
320 return m_val * 4;
321 }
322 return (sreal)m_val / (sreal)in.m_val;
323 }
324
325 /* We want to scale profile across function boundary from NUM to DEN.
326 Take care of the side case when DEN is zeros. We still want to behave
327 sanely here which means
328 - scale to profile_count::zero () if NUM is profile_count::zero
329 - do not affect anything if NUM == DEN
330 - preserve counter value but adjust quality in other cases. */
331
332 void
333 profile_count::adjust_for_ipa_scaling (profile_count *num,
334 profile_count *den)
335 {
336 /* Scaling is no-op if NUM and DEN are the same. */
337 if (*num == *den)
338 return;
339 /* Scaling to zero is always zero. */
340 if (*num == zero ())
341 return;
342 /* If den is non-zero we are safe. */
343 if (den->force_nonzero () == *den)
344 return;
345 /* Force both to non-zero so we do not push profiles to 0 when
346 both num == 0 and den == 0. */
347 *den = den->force_nonzero ();
348 *num = num->force_nonzero ();
349 }
350
351 /* THIS is a count of bb which is known to be executed IPA times.
352 Combine this information into bb counter. This means returning IPA
353 if it is nonzero, not changing anything if IPA is uninitialized
354 and if IPA is zero, turning THIS into corresponding local profile with
355 global0. */
356
357 profile_count
358 profile_count::combine_with_ipa_count (profile_count ipa)
359 {
360 ipa = ipa.ipa ();
361 if (ipa.nonzero_p ())
362 return ipa;
363 if (!ipa.initialized_p () || *this == zero ())
364 return *this;
365 if (ipa == zero ())
366 return this->global0 ();
367 return this->global0adjusted ();
368 }
369
370 /* The profiling runtime uses gcov_type, which is usually 64bit integer.
371 Conversions back and forth are used to read the coverage and get it
372 into internal representation. */
373
374 profile_count
375 profile_count::from_gcov_type (gcov_type v, profile_quality quality)
376 {
377 profile_count ret;
378 gcc_checking_assert (v >= 0);
379 if (dump_file && v >= (gcov_type)max_count)
380 fprintf (dump_file,
381 "Capping gcov count %" PRId64 " to max_count %" PRId64 "\n",
382 (int64_t) v, (int64_t) max_count);
383 ret.m_val = MIN (v, (gcov_type)max_count);
384 ret.m_quality = quality;
385 return ret;
386 }
387
388 /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
389 happens with COUNT2 probablity. Return probablity that either *THIS or
390 OTHER happens. */
391
392 profile_probability
393 profile_probability::combine_with_count (profile_count count1,
394 profile_probability other,
395 profile_count count2) const
396 {
397 /* If probabilities are same, we are done.
398 If counts are nonzero we can distribute accordingly. In remaining
399 cases just avreage the values and hope for the best. */
400 if (*this == other || count1 == count2
401 || (count2 == profile_count::zero ()
402 && !(count1 == profile_count::zero ())))
403 return *this;
404 if (count1 == profile_count::zero () && !(count2 == profile_count::zero ()))
405 return other;
406 else if (count1.nonzero_p () || count2.nonzero_p ())
407 return *this * count1.probability_in (count1 + count2)
408 + other * count2.probability_in (count1 + count2);
409 else
410 return *this * even () + other * even ();
411 }