]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-inline-analysis.c
c++: Simplify constraint normalization routines
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
CommitLineData
27d020cf 1/* Analysis used by inlining decision heuristics.
8d9254fc 2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
03dfc36d
JH
3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
03dfc36d
JH
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
03dfc36d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5
AM
27#include "alloc-pool.h"
28#include "tree-pass.h"
27d020cf
JH
29#include "ssa.h"
30#include "tree-streamer.h"
31#include "cgraph.h"
32#include "diagnostic.h"
33#include "fold-const.h"
34#include "print-tree.h"
35#include "tree-inline.h"
36#include "gimple-pretty-print.h"
27d020cf
JH
37#include "cfganal.h"
38#include "gimple-iterator.h"
39#include "tree-cfg.h"
40#include "tree-ssa-loop-niter.h"
41#include "tree-ssa-loop.h"
42#include "symbol-summary.h"
43#include "ipa-prop.h"
44#include "ipa-fnsummary.h"
45#include "ipa-inline.h"
46#include "cfgloop.h"
47#include "tree-scalar-evolution.h"
48#include "ipa-utils.h"
27d020cf
JH
49#include "cfgexpand.h"
50#include "gimplify.h"
27a4cd48 51
27d020cf 52/* Cached node/edge growths. */
7237f93e 53fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
03dfc36d 54
ac6f2e59
JH
55/* The context cache remembers estimated time/size and hints for given
56 ipa_call_context of a call. */
57class node_context_cache_entry
58{
59public:
7d2cb275 60 ipa_cached_call_context ctx;
ac6f2e59
JH
61 sreal time, nonspec_time;
62 int size;
63 ipa_hints hints;
64
65 node_context_cache_entry ()
66 : ctx ()
67 {
68 }
69 ~node_context_cache_entry ()
70 {
71 ctx.release ();
72 }
73};
74
75/* At the moment we implement primitive single entry LRU cache. */
76class node_context_summary
77{
78public:
79 node_context_cache_entry entry;
80
81 node_context_summary ()
82 : entry ()
83 {
84 }
85 ~node_context_summary ()
86 {
87 }
88};
89
90/* Summary holding the context cache. */
91static fast_function_summary <node_context_summary *, va_heap>
92 *node_context_cache = NULL;
93/* Statistics about the context cache effectivity. */
94static long node_context_cache_hit, node_context_cache_miss,
95 node_context_cache_clear;
96
27d020cf
JH
97/* Give initial reasons why inlining would fail on EDGE. This gets either
98 nullified or usually overwritten by more precise reasons later. */
d2d668fb 99
27d020cf
JH
100void
101initialize_inline_failed (struct cgraph_edge *e)
d2d668fb 102{
27d020cf 103 struct cgraph_node *callee = e->callee;
d2d668fb 104
27d020cf
JH
105 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
106 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
107 ;
108 else if (e->indirect_unknown_callee)
109 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
110 else if (!callee->definition)
111 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
87f94429 112 else if (callee->redefined_extern_inline)
27d020cf 113 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
4adaad64 114 else
27d020cf
JH
115 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
116 gcc_checking_assert (!e->call_stmt_cannot_inline_p
117 || cgraph_inline_failed_type (e->inline_failed)
118 == CIF_FINAL_ERROR);
632b4f8e
JH
119}
120
ac6f2e59
JH
121/* Allocate edge growth caches. */
122
123void
124initialize_growth_caches ()
125{
126 edge_growth_cache
7237f93e 127 = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
ac6f2e59
JH
128 node_context_cache
129 = new fast_function_summary<node_context_summary *, va_heap> (symtab);
40e67ab8
JH
130 edge_growth_cache->disable_duplication_hook ();
131 node_context_cache->disable_insertion_hook ();
132 node_context_cache->disable_duplication_hook ();
ac6f2e59 133}
632b4f8e 134
27d020cf 135/* Free growth caches. */
5ee53a06 136
c170d40f 137void
27d020cf 138free_growth_caches (void)
c170d40f 139{
9fb50ad8 140 delete edge_growth_cache;
ac6f2e59 141 delete node_context_cache;
9fb50ad8 142 edge_growth_cache = NULL;
ac6f2e59
JH
143 node_context_cache = NULL;
144 if (dump_file)
145 fprintf (dump_file, "node context cache: %li hits, %li misses,"
146 " %li initializations\n",
147 node_context_cache_hit, node_context_cache_miss,
148 node_context_cache_clear);
149 node_context_cache_hit = 0;
150 node_context_cache_miss = 0;
151 node_context_cache_clear = 0;
632b4f8e
JH
152}
153
956d615d 154/* Return hints derived from EDGE. */
27d020cf 155
d59171da
JH
156int
157simple_edge_hints (struct cgraph_edge *edge)
158{
159 int hints = 0;
a62bfab5
ML
160 struct cgraph_node *to = (edge->caller->inlined_to
161 ? edge->caller->inlined_to : edge->caller);
ebc8f0bb 162 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
6f86434f
ML
163 int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
164 int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
165
166 if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
d59171da
JH
167 hints |= INLINE_HINT_same_scc;
168
b74d8dc4 169 if (cross_module_call_p (edge))
d59171da
JH
170 hints |= INLINE_HINT_cross_module;
171
172 return hints;
173}
174
632b4f8e
JH
175/* Estimate the time cost for the caller when inlining EDGE.
176 Only to be called via estimate_edge_time, that handles the
177 caching mechanism.
178
179 When caching, also update the cache entry. Compute both time and
180 size, since we always need both metrics eventually. */
181
ab38481c 182sreal
23ff8c05 183do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
632b4f8e 184{
4adaad64 185 sreal time, nonspec_time;
632b4f8e 186 int size;
0bceb671 187 ipa_hints hints;
d2d668fb 188 struct cgraph_node *callee;
4adaad64 189 clause_t clause, nonspec_clause;
9d5af1db 190 ipa_auto_call_arg_values avals;
99b1c316 191 class ipa_call_summary *es = ipa_call_summaries->get (edge);
ac6f2e59 192 int min_size = -1;
632b4f8e 193
d52f5295 194 callee = edge->callee->ultimate_alias_target ();
d2d668fb 195
632b4f8e 196 gcc_checking_assert (edge->inline_failed);
9d5af1db
MJ
197 evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
198 &avals, true);
199 ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals);
ac6f2e59
JH
200 if (node_context_cache != NULL)
201 {
202 node_context_summary *e = node_context_cache->get_create (callee);
203 if (e->entry.ctx.equal_to (ctx))
204 {
205 node_context_cache_hit++;
206 size = e->entry.size;
207 time = e->entry.time;
208 nonspec_time = e->entry.nonspec_time;
209 hints = e->entry.hints;
367c959f 210 if (flag_checking
a63574d7 211 && !opt_for_fn (callee->decl, flag_profile_partial_training)
8d890d37 212 && !callee->count.ipa_p ())
b914768c 213 {
1e7fdc02
MJ
214 ipa_call_estimates chk_estimates;
215 ctx.estimate_size_and_time (&chk_estimates);
216 gcc_assert (chk_estimates.size == size
217 && chk_estimates.time == time
218 && chk_estimates.nonspecialized_time == nonspec_time
219 && chk_estimates.hints == hints);
b914768c 220 }
ac6f2e59
JH
221 }
222 else
223 {
224 if (e->entry.ctx.exists_p ())
225 node_context_cache_miss++;
226 else
227 node_context_cache_clear++;
7d2cb275 228 e->entry.ctx.release ();
1e7fdc02
MJ
229 ipa_call_estimates estimates;
230 ctx.estimate_size_and_time (&estimates);
231 size = estimates.size;
ac6f2e59 232 e->entry.size = size;
1e7fdc02 233 time = estimates.time;
ac6f2e59 234 e->entry.time = time;
1e7fdc02 235 nonspec_time = estimates.nonspecialized_time;
ac6f2e59 236 e->entry.nonspec_time = nonspec_time;
1e7fdc02 237 hints = estimates.hints;
ac6f2e59
JH
238 e->entry.hints = hints;
239 e->entry.ctx.duplicate_from (ctx);
240 }
241 }
242 else
1e7fdc02
MJ
243 {
244 ipa_call_estimates estimates;
245 ctx.estimate_size_and_time (&estimates);
246 size = estimates.size;
247 time = estimates.time;
248 nonspec_time = estimates.nonspecialized_time;
249 hints = estimates.hints;
250 }
b6d627e4
JH
251
252 /* When we have profile feedback, we can quite safely identify hot
253 edges and for those we disable size limits. Don't do that when
254 probability that caller will call the callee is low however, since it
255 may hurt optimization of the caller's hot path. */
1bad9c18
JH
256 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
257 && (edge->count.ipa ().apply_scale (2, 1)
a62bfab5
ML
258 > (edge->caller->inlined_to
259 ? edge->caller->inlined_to->count.ipa ()
1bad9c18 260 : edge->caller->count.ipa ())))
b6d627e4
JH
261 hints |= INLINE_HINT_known_hot;
262
d59171da
JH
263 gcc_checking_assert (size >= 0);
264 gcc_checking_assert (time >= 0);
632b4f8e
JH
265
266 /* When caching, update the cache entry. */
9fb50ad8 267 if (edge_growth_cache != NULL)
632b4f8e 268 {
ac6f2e59
JH
269 if (min_size >= 0)
270 ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
271 = min_size;
9fb50ad8
ML
272 edge_growth_cache_entry *entry
273 = edge_growth_cache->get_create (edge);
274 entry->time = time;
275 entry->nonspec_time = nonspec_time;
632b4f8e 276
9fb50ad8 277 entry->size = size + (size >= 0);
d59171da 278 hints |= simple_edge_hints (edge);
9fb50ad8 279 entry->hints = hints + 1;
632b4f8e 280 }
23ff8c05
JH
281 if (ret_nonspec_time)
282 *ret_nonspec_time = nonspec_time;
d59171da 283 return time;
632b4f8e
JH
284}
285
ac6f2e59
JH
286/* Reset cache for NODE.
287 This must be done each time NODE body is modified. */
288void
289reset_node_cache (struct cgraph_node *node)
290{
291 if (node_context_cache)
292 node_context_cache->remove (node);
293}
632b4f8e 294
7237f93e
JH
295/* Remove EDGE from caches once it was inlined. */
296void
297ipa_remove_from_growth_caches (struct cgraph_edge *edge)
298{
299 if (node_context_cache)
300 node_context_cache->remove (edge->callee);
301 if (edge_growth_cache)
302 edge_growth_cache->remove (edge);
303}
304
ed901e4c 305/* Return estimated callee growth after inlining EDGE.
632b4f8e
JH
306 Only to be called via estimate_edge_size. */
307
308int
ed901e4c 309do_estimate_edge_size (struct cgraph_edge *edge)
632b4f8e
JH
310{
311 int size;
a5b1779f 312 struct cgraph_node *callee;
4adaad64 313 clause_t clause, nonspec_clause;
632b4f8e
JH
314
315 /* When we do caching, use do_estimate_edge_time to populate the entry. */
316
9fb50ad8 317 if (edge_growth_cache != NULL)
632b4f8e
JH
318 {
319 do_estimate_edge_time (edge);
9fb50ad8 320 size = edge_growth_cache->get (edge)->size;
632b4f8e
JH
321 gcc_checking_assert (size);
322 return size - (size > 0);
323 }
d2d668fb 324
d52f5295 325 callee = edge->callee->ultimate_alias_target ();
632b4f8e
JH
326
327 /* Early inliner runs without caching, go ahead and do the dirty work. */
328 gcc_checking_assert (edge->inline_failed);
9d5af1db
MJ
329 ipa_auto_call_arg_values avals;
330 evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
331 &avals, true);
332 ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
1e7fdc02
MJ
333 ipa_call_estimates estimates;
334 ctx.estimate_size_and_time (&estimates, false, false);
335 return estimates.size;
03dfc36d
JH
336}
337
338
37678631
JH
339/* Estimate the growth of the caller when inlining EDGE.
340 Only to be called via estimate_edge_size. */
341
0bceb671 342ipa_hints
37678631
JH
343do_estimate_edge_hints (struct cgraph_edge *edge)
344{
37678631 345 struct cgraph_node *callee;
4adaad64 346 clause_t clause, nonspec_clause;
37678631
JH
347
348 /* When we do caching, use do_estimate_edge_time to populate the entry. */
349
9fb50ad8 350 if (edge_growth_cache != NULL)
37678631
JH
351 {
352 do_estimate_edge_time (edge);
1e7fdc02 353 ipa_hints hints = edge_growth_cache->get (edge)->hints;
37678631
JH
354 gcc_checking_assert (hints);
355 return hints - 1;
356 }
357
d52f5295 358 callee = edge->callee->ultimate_alias_target ();
37678631
JH
359
360 /* Early inliner runs without caching, go ahead and do the dirty work. */
361 gcc_checking_assert (edge->inline_failed);
9d5af1db
MJ
362 ipa_auto_call_arg_values avals;
363 evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
364 &avals, true);
365 ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
1e7fdc02
MJ
366 ipa_call_estimates estimates;
367 ctx.estimate_size_and_time (&estimates, false, true);
368 ipa_hints hints = estimates.hints | simple_edge_hints (edge);
37678631
JH
369 return hints;
370}
371
03dfc36d
JH
372/* Estimate the size of NODE after inlining EDGE which should be an
373 edge to either NODE or a call inlined into NODE. */
374
375int
376estimate_size_after_inlining (struct cgraph_node *node,
10a5dd5d 377 struct cgraph_edge *edge)
03dfc36d 378{
99b1c316 379 class ipa_call_summary *es = ipa_call_summaries->get (edge);
f658ad30 380 ipa_size_summary *s = ipa_size_summaries->get (node);
dbcb3c74 381 if (!es->predicate || *es->predicate != false)
b15c64ee 382 {
99353fcf 383 int size = s->size + estimate_edge_growth (edge);
b15c64ee
JH
384 gcc_assert (size >= 0);
385 return size;
386 }
99353fcf 387 return s->size;
03dfc36d
JH
388}
389
390
a5b1779f
JH
391struct growth_data
392{
a93c18c8 393 struct cgraph_node *node;
a5b1779f 394 bool self_recursive;
cf3648f2 395 bool uninlinable;
a5b1779f 396 int growth;
49d9c9d2 397 int cap;
a5b1779f 398};
03dfc36d 399
a5b1779f
JH
400
401/* Worker for do_estimate_growth. Collect growth for all callers. */
402
403static bool
404do_estimate_growth_1 (struct cgraph_node *node, void *data)
03dfc36d 405{
03dfc36d 406 struct cgraph_edge *e;
a5b1779f 407 struct growth_data *d = (struct growth_data *) data;
03dfc36d 408
03dfc36d
JH
409 for (e = node->callers; e; e = e->next_caller)
410 {
4c0f7679
JH
411 gcc_checking_assert (e->inline_failed);
412
29f1e2b1
JH
413 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
414 || !opt_for_fn (e->caller->decl, optimize))
cf3648f2
JH
415 {
416 d->uninlinable = true;
49d9c9d2
JH
417 if (d->cap < INT_MAX)
418 return true;
cf3648f2
JH
419 continue;
420 }
421
1af8bfe5
JH
422 if (e->recursive_p ())
423 {
424 d->self_recursive = true;
49d9c9d2
JH
425 if (d->cap < INT_MAX)
426 return true;
1af8bfe5
JH
427 continue;
428 }
a5b1779f 429 d->growth += estimate_edge_growth (e);
49d9c9d2
JH
430 if (d->growth > d->cap)
431 return true;
4c0f7679 432 }
a5b1779f
JH
433 return false;
434}
435
49d9c9d2
JH
436/* Return estimated savings for eliminating offline copy of NODE by inlining
437 it everywhere. */
438
439static int
440offline_size (struct cgraph_node *node, ipa_size_summary *info)
441{
442 if (!DECL_EXTERNAL (node->decl))
443 {
444 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
445 return info->size;
446 /* COMDAT functions are very often not shared across multiple units
447 since they come from various template instantiations.
448 Take this into account. */
449 else if (DECL_COMDAT (node->decl)
450 && node->can_remove_if_no_direct_calls_p ())
fdfd7f53
ML
451 {
452 int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
7e2b7e23 453 return (info->size * (100 - prob) + 50) / 100;
fdfd7f53 454 }
49d9c9d2
JH
455 }
456 return 0;
457}
a5b1779f 458
56eb4c70 459/* Estimate the growth caused by inlining NODE into all callers. */
a5b1779f
JH
460
461int
0d92b555 462estimate_growth (struct cgraph_node *node)
a5b1779f 463{
49d9c9d2
JH
464 struct growth_data d = { node, false, false, 0, INT_MAX };
465 ipa_size_summary *info = ipa_size_summaries->get (node);
a5b1779f 466
49d9c9d2
JH
467 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
468 return 1;
4c0f7679
JH
469
470 /* For self recursive functions the growth estimation really should be
471 infinity. We don't want to return very large values because the growth
472 plays various roles in badness computation fractions. Be sure to not
473 return zero or negative growths. */
a5b1779f
JH
474 if (d.self_recursive)
475 d.growth = d.growth < info->size ? info->size : d.growth;
49d9c9d2
JH
476 else if (!d.uninlinable)
477 d.growth -= offline_size (node, info);
03dfc36d 478
a5b1779f 479 return d.growth;
03dfc36d
JH
480}
481
cf3648f2
JH
482/* Verify if there are fewer than MAX_CALLERS. */
483
484static bool
49d9c9d2
JH
485check_callers (cgraph_node *node, int *growth, int *n, int offline,
486 int min_size, struct cgraph_edge *known_edge)
cf3648f2
JH
487{
488 ipa_ref *ref;
489
e0d514da
JH
490 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
491 return true;
492
cf3648f2
JH
493 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
494 {
49d9c9d2
JH
495 edge_growth_cache_entry *entry;
496
497 if (e == known_edge)
498 continue;
499 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
500 return true;
501 if (edge_growth_cache != NULL
502 && (entry = edge_growth_cache->get (e)) != NULL
503 && entry->size != 0)
504 *growth += entry->size - (entry->size > 0);
505 else
506 {
507 class ipa_call_summary *es = ipa_call_summaries->get (e);
508 if (!es)
509 return true;
510 *growth += min_size - es->call_stmt_size;
511 if (--(*n) < 0)
512 return false;
513 }
514 if (*growth > offline)
cf3648f2
JH
515 return true;
516 }
517
49d9c9d2
JH
518 if (*n > 0)
519 FOR_EACH_ALIAS (node, ref)
520 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
521 offline, min_size, known_edge))
522 return true;
cf3648f2
JH
523
524 return false;
525}
526
10a5dd5d 527
49d9c9d2
JH
528/* Decide if growth of NODE is positive. This is cheaper than calculating
529 actual growth. If edge growth of KNOWN_EDGE is known
530 it is passed by EDGE_GROWTH. */
4cd8957f
JH
531
532bool
49d9c9d2
JH
533growth_positive_p (struct cgraph_node *node,
534 struct cgraph_edge * known_edge, int edge_growth)
4cd8957f 535{
4cd8957f 536 struct cgraph_edge *e;
49d9c9d2
JH
537
538 ipa_size_summary *s = ipa_size_summaries->get (node);
4cd8957f 539
e0d514da 540 /* First quickly check if NODE is removable at all. */
49d9c9d2
JH
541 int offline = offline_size (node, s);
542 if (offline <= 0 && known_edge && edge_growth > 0)
4cd8957f 543 return true;
e0d514da 544
49d9c9d2
JH
545 int min_size = ipa_fn_summaries->get (node)->min_size;
546 int n = 10;
4cd8957f 547
49d9c9d2 548 int min_growth = known_edge ? edge_growth : 0;
4cd8957f
JH
549 for (e = node->callers; e; e = e->next_caller)
550 {
49d9c9d2
JH
551 edge_growth_cache_entry *entry;
552
553 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
554 return true;
555 if (e == known_edge)
556 continue;
557 if (edge_growth_cache != NULL
558 && (entry = edge_growth_cache->get (e)) != NULL
559 && entry->size != 0)
560 min_growth += entry->size - (entry->size > 0);
561 else
562 {
563 class ipa_call_summary *es = ipa_call_summaries->get (e);
564 if (!es)
565 return true;
566 min_growth += min_size - es->call_stmt_size;
567 if (--n <= 0)
568 break;
569 }
570 if (min_growth > offline)
4cd8957f
JH
571 return true;
572 }
cf3648f2
JH
573
574 ipa_ref *ref;
49d9c9d2
JH
575 if (n > 0)
576 FOR_EACH_ALIAS (node, ref)
577 if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
578 &min_growth, &n, offline, min_size, known_edge))
e0d514da 579 return true;
e0d514da 580
49d9c9d2
JH
581 struct growth_data d = { node, false, false, 0, offline };
582 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
583 return true;
584 if (d.self_recursive || d.uninlinable)
585 return true;
586 return (d.growth > offline);
4cd8957f 587}