]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-inline-analysis.c
Apply mechanical replacement (generated patch).
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
CommitLineData
27d020cf 1/* Analysis used by inlining decision heuristics.
a5544970 2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
03dfc36d
JH
3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
03dfc36d
JH
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
03dfc36d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5
AM
27#include "alloc-pool.h"
28#include "tree-pass.h"
27d020cf
JH
29#include "ssa.h"
30#include "tree-streamer.h"
31#include "cgraph.h"
32#include "diagnostic.h"
33#include "fold-const.h"
34#include "print-tree.h"
35#include "tree-inline.h"
36#include "gimple-pretty-print.h"
37#include "params.h"
38#include "cfganal.h"
39#include "gimple-iterator.h"
40#include "tree-cfg.h"
41#include "tree-ssa-loop-niter.h"
42#include "tree-ssa-loop.h"
43#include "symbol-summary.h"
44#include "ipa-prop.h"
45#include "ipa-fnsummary.h"
46#include "ipa-inline.h"
47#include "cfgloop.h"
48#include "tree-scalar-evolution.h"
49#include "ipa-utils.h"
27d020cf
JH
50#include "cfgexpand.h"
51#include "gimplify.h"
27a4cd48 52
27d020cf 53/* Cached node/edge growths. */
7237f93e 54fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
03dfc36d 55
ac6f2e59
JH
56/* The context cache remembers estimated time/size and hints for given
57 ipa_call_context of a call. */
58class node_context_cache_entry
59{
60public:
61 ipa_call_context ctx;
62 sreal time, nonspec_time;
63 int size;
64 ipa_hints hints;
65
66 node_context_cache_entry ()
67 : ctx ()
68 {
69 }
70 ~node_context_cache_entry ()
71 {
72 ctx.release ();
73 }
74};
75
76/* At the moment we implement primitive single entry LRU cache. */
77class node_context_summary
78{
79public:
80 node_context_cache_entry entry;
81
82 node_context_summary ()
83 : entry ()
84 {
85 }
86 ~node_context_summary ()
87 {
88 }
89};
90
91/* Summary holding the context cache. */
92static fast_function_summary <node_context_summary *, va_heap>
93 *node_context_cache = NULL;
94/* Statistics about the context cache effectivity. */
95static long node_context_cache_hit, node_context_cache_miss,
96 node_context_cache_clear;
97
27d020cf
JH
98/* Give initial reasons why inlining would fail on EDGE. This gets either
99 nullified or usually overwritten by more precise reasons later. */
d2d668fb 100
27d020cf
JH
101void
102initialize_inline_failed (struct cgraph_edge *e)
d2d668fb 103{
27d020cf 104 struct cgraph_node *callee = e->callee;
d2d668fb 105
27d020cf
JH
106 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
107 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
108 ;
109 else if (e->indirect_unknown_callee)
110 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
111 else if (!callee->definition)
112 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
87f94429 113 else if (callee->redefined_extern_inline)
27d020cf 114 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
4adaad64 115 else
27d020cf
JH
116 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
117 gcc_checking_assert (!e->call_stmt_cannot_inline_p
118 || cgraph_inline_failed_type (e->inline_failed)
119 == CIF_FINAL_ERROR);
632b4f8e
JH
120}
121
ac6f2e59
JH
122/* Allocate edge growth caches. */
123
124void
125initialize_growth_caches ()
126{
127 edge_growth_cache
7237f93e 128 = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
ac6f2e59
JH
129 node_context_cache
130 = new fast_function_summary<node_context_summary *, va_heap> (symtab);
131}
632b4f8e 132
27d020cf 133/* Free growth caches. */
5ee53a06 134
c170d40f 135void
27d020cf 136free_growth_caches (void)
c170d40f 137{
9fb50ad8 138 delete edge_growth_cache;
ac6f2e59 139 delete node_context_cache;
9fb50ad8 140 edge_growth_cache = NULL;
ac6f2e59
JH
141 node_context_cache = NULL;
142 if (dump_file)
143 fprintf (dump_file, "node context cache: %li hits, %li misses,"
144 " %li initializations\n",
145 node_context_cache_hit, node_context_cache_miss,
146 node_context_cache_clear);
147 node_context_cache_hit = 0;
148 node_context_cache_miss = 0;
149 node_context_cache_clear = 0;
632b4f8e
JH
150}
151
d59171da 152/* Return hints derrived from EDGE. */
27d020cf 153
d59171da
JH
154int
155simple_edge_hints (struct cgraph_edge *edge)
156{
157 int hints = 0;
a62bfab5
ML
158 struct cgraph_node *to = (edge->caller->inlined_to
159 ? edge->caller->inlined_to : edge->caller);
ebc8f0bb 160 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
6f86434f
ML
161 int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
162 int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
163
164 if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
d59171da
JH
165 hints |= INLINE_HINT_same_scc;
166
ebc8f0bb
JH
167 if (callee->lto_file_data && edge->caller->lto_file_data
168 && edge->caller->lto_file_data != callee->lto_file_data
88636b62 169 && !callee->merged_comdat && !callee->icf_merged)
d59171da
JH
170 hints |= INLINE_HINT_cross_module;
171
172 return hints;
173}
174
632b4f8e
JH
175/* Estimate the time cost for the caller when inlining EDGE.
176 Only to be called via estimate_edge_time, that handles the
177 caching mechanism.
178
179 When caching, also update the cache entry. Compute both time and
180 size, since we always need both metrics eventually. */
181
ab38481c 182sreal
632b4f8e
JH
183do_estimate_edge_time (struct cgraph_edge *edge)
184{
4adaad64 185 sreal time, nonspec_time;
632b4f8e 186 int size;
0bceb671 187 ipa_hints hints;
d2d668fb 188 struct cgraph_node *callee;
4adaad64 189 clause_t clause, nonspec_clause;
9771b263 190 vec<tree> known_vals;
44210a96 191 vec<ipa_polymorphic_call_context> known_contexts;
9771b263 192 vec<ipa_agg_jump_function_p> known_aggs;
99b1c316 193 class ipa_call_summary *es = ipa_call_summaries->get (edge);
ac6f2e59 194 int min_size = -1;
632b4f8e 195
d52f5295 196 callee = edge->callee->ultimate_alias_target ();
d2d668fb 197
632b4f8e 198 gcc_checking_assert (edge->inline_failed);
d2d668fb 199 evaluate_properties_for_edge (edge, true,
4adaad64
JH
200 &clause, &nonspec_clause, &known_vals,
201 &known_contexts, &known_aggs);
1532500e
JH
202 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
203 known_contexts, known_aggs, es->param);
ac6f2e59
JH
204 if (node_context_cache != NULL)
205 {
206 node_context_summary *e = node_context_cache->get_create (callee);
207 if (e->entry.ctx.equal_to (ctx))
208 {
209 node_context_cache_hit++;
210 size = e->entry.size;
211 time = e->entry.time;
212 nonspec_time = e->entry.nonspec_time;
213 hints = e->entry.hints;
214 }
215 else
216 {
217 if (e->entry.ctx.exists_p ())
218 node_context_cache_miss++;
219 else
220 node_context_cache_clear++;
221 e->entry.ctx.release (true);
ac6f2e59
JH
222 ctx.estimate_size_and_time (&size, &min_size,
223 &time, &nonspec_time, &hints);
224 e->entry.size = size;
225 e->entry.time = time;
226 e->entry.nonspec_time = nonspec_time;
227 e->entry.hints = hints;
228 e->entry.ctx.duplicate_from (ctx);
229 }
230 }
231 else
232 ctx.estimate_size_and_time (&size, &min_size,
233 &time, &nonspec_time, &hints);
b6d627e4
JH
234
235 /* When we have profile feedback, we can quite safely identify hot
236 edges and for those we disable size limits. Don't do that when
237 probability that caller will call the callee is low however, since it
238 may hurt optimization of the caller's hot path. */
1bad9c18
JH
239 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
240 && (edge->count.ipa ().apply_scale (2, 1)
a62bfab5
ML
241 > (edge->caller->inlined_to
242 ? edge->caller->inlined_to->count.ipa ()
1bad9c18 243 : edge->caller->count.ipa ())))
b6d627e4
JH
244 hints |= INLINE_HINT_known_hot;
245
1532500e 246 ctx.release ();
d59171da
JH
247 gcc_checking_assert (size >= 0);
248 gcc_checking_assert (time >= 0);
632b4f8e
JH
249
250 /* When caching, update the cache entry. */
9fb50ad8 251 if (edge_growth_cache != NULL)
632b4f8e 252 {
ac6f2e59
JH
253 if (min_size >= 0)
254 ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
255 = min_size;
9fb50ad8
ML
256 edge_growth_cache_entry *entry
257 = edge_growth_cache->get_create (edge);
258 entry->time = time;
259 entry->nonspec_time = nonspec_time;
632b4f8e 260
9fb50ad8 261 entry->size = size + (size >= 0);
d59171da 262 hints |= simple_edge_hints (edge);
9fb50ad8 263 entry->hints = hints + 1;
632b4f8e 264 }
d59171da 265 return time;
632b4f8e
JH
266}
267
ac6f2e59
JH
268/* Reset cache for NODE.
269 This must be done each time NODE body is modified. */
270void
271reset_node_cache (struct cgraph_node *node)
272{
273 if (node_context_cache)
274 node_context_cache->remove (node);
275}
632b4f8e 276
7237f93e
JH
277/* Remove EDGE from caches once it was inlined. */
278void
279ipa_remove_from_growth_caches (struct cgraph_edge *edge)
280{
281 if (node_context_cache)
282 node_context_cache->remove (edge->callee);
283 if (edge_growth_cache)
284 edge_growth_cache->remove (edge);
285}
286
ed901e4c 287/* Return estimated callee growth after inlining EDGE.
632b4f8e
JH
288 Only to be called via estimate_edge_size. */
289
290int
ed901e4c 291do_estimate_edge_size (struct cgraph_edge *edge)
632b4f8e
JH
292{
293 int size;
a5b1779f 294 struct cgraph_node *callee;
4adaad64 295 clause_t clause, nonspec_clause;
9771b263 296 vec<tree> known_vals;
44210a96 297 vec<ipa_polymorphic_call_context> known_contexts;
9771b263 298 vec<ipa_agg_jump_function_p> known_aggs;
632b4f8e
JH
299
300 /* When we do caching, use do_estimate_edge_time to populate the entry. */
301
9fb50ad8 302 if (edge_growth_cache != NULL)
632b4f8e
JH
303 {
304 do_estimate_edge_time (edge);
9fb50ad8 305 size = edge_growth_cache->get (edge)->size;
632b4f8e
JH
306 gcc_checking_assert (size);
307 return size - (size > 0);
308 }
d2d668fb 309
d52f5295 310 callee = edge->callee->ultimate_alias_target ();
632b4f8e
JH
311
312 /* Early inliner runs without caching, go ahead and do the dirty work. */
313 gcc_checking_assert (edge->inline_failed);
d2d668fb 314 evaluate_properties_for_edge (edge, true,
4adaad64
JH
315 &clause, &nonspec_clause,
316 &known_vals, &known_contexts,
8810cc52 317 &known_aggs);
1532500e
JH
318 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
319 known_contexts, known_aggs, vNULL);
320 ctx.estimate_size_and_time (&size, NULL, NULL, NULL, NULL);
321 ctx.release ();
ed901e4c 322 return size;
03dfc36d
JH
323}
324
325
37678631
JH
326/* Estimate the growth of the caller when inlining EDGE.
327 Only to be called via estimate_edge_size. */
328
0bceb671 329ipa_hints
37678631
JH
330do_estimate_edge_hints (struct cgraph_edge *edge)
331{
0bceb671 332 ipa_hints hints;
37678631 333 struct cgraph_node *callee;
4adaad64 334 clause_t clause, nonspec_clause;
9771b263 335 vec<tree> known_vals;
44210a96 336 vec<ipa_polymorphic_call_context> known_contexts;
9771b263 337 vec<ipa_agg_jump_function_p> known_aggs;
37678631
JH
338
339 /* When we do caching, use do_estimate_edge_time to populate the entry. */
340
9fb50ad8 341 if (edge_growth_cache != NULL)
37678631
JH
342 {
343 do_estimate_edge_time (edge);
9fb50ad8 344 hints = edge_growth_cache->get (edge)->hints;
37678631
JH
345 gcc_checking_assert (hints);
346 return hints - 1;
347 }
348
d52f5295 349 callee = edge->callee->ultimate_alias_target ();
37678631
JH
350
351 /* Early inliner runs without caching, go ahead and do the dirty work. */
352 gcc_checking_assert (edge->inline_failed);
353 evaluate_properties_for_edge (edge, true,
4adaad64
JH
354 &clause, &nonspec_clause,
355 &known_vals, &known_contexts,
37678631 356 &known_aggs);
1532500e
JH
357 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
358 known_contexts, known_aggs, vNULL);
359 ctx.estimate_size_and_time (NULL, NULL, NULL, NULL, &hints);
360 ctx.release ();
d59171da 361 hints |= simple_edge_hints (edge);
37678631
JH
362 return hints;
363}
364
03dfc36d
JH
365/* Estimate the size of NODE after inlining EDGE which should be an
366 edge to either NODE or a call inlined into NODE. */
367
368int
369estimate_size_after_inlining (struct cgraph_node *node,
10a5dd5d 370 struct cgraph_edge *edge)
03dfc36d 371{
99b1c316 372 class ipa_call_summary *es = ipa_call_summaries->get (edge);
f658ad30 373 ipa_size_summary *s = ipa_size_summaries->get (node);
dbcb3c74 374 if (!es->predicate || *es->predicate != false)
b15c64ee 375 {
99353fcf 376 int size = s->size + estimate_edge_growth (edge);
b15c64ee
JH
377 gcc_assert (size >= 0);
378 return size;
379 }
99353fcf 380 return s->size;
03dfc36d
JH
381}
382
383
a5b1779f
JH
384struct growth_data
385{
a93c18c8 386 struct cgraph_node *node;
a5b1779f 387 bool self_recursive;
cf3648f2 388 bool uninlinable;
a5b1779f 389 int growth;
49d9c9d2 390 int cap;
a5b1779f 391};
03dfc36d 392
a5b1779f
JH
393
394/* Worker for do_estimate_growth. Collect growth for all callers. */
395
396static bool
397do_estimate_growth_1 (struct cgraph_node *node, void *data)
03dfc36d 398{
03dfc36d 399 struct cgraph_edge *e;
a5b1779f 400 struct growth_data *d = (struct growth_data *) data;
03dfc36d 401
03dfc36d
JH
402 for (e = node->callers; e; e = e->next_caller)
403 {
4c0f7679
JH
404 gcc_checking_assert (e->inline_failed);
405
29f1e2b1
JH
406 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
407 || !opt_for_fn (e->caller->decl, optimize))
cf3648f2
JH
408 {
409 d->uninlinable = true;
49d9c9d2
JH
410 if (d->cap < INT_MAX)
411 return true;
cf3648f2
JH
412 continue;
413 }
414
1af8bfe5
JH
415 if (e->recursive_p ())
416 {
417 d->self_recursive = true;
49d9c9d2
JH
418 if (d->cap < INT_MAX)
419 return true;
1af8bfe5
JH
420 continue;
421 }
a5b1779f 422 d->growth += estimate_edge_growth (e);
49d9c9d2
JH
423 if (d->growth > d->cap)
424 return true;
4c0f7679 425 }
a5b1779f
JH
426 return false;
427}
428
49d9c9d2
JH
429/* Return estimated savings for eliminating offline copy of NODE by inlining
430 it everywhere. */
431
432static int
433offline_size (struct cgraph_node *node, ipa_size_summary *info)
434{
435 if (!DECL_EXTERNAL (node->decl))
436 {
437 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
438 return info->size;
439 /* COMDAT functions are very often not shared across multiple units
440 since they come from various template instantiations.
441 Take this into account. */
442 else if (DECL_COMDAT (node->decl)
443 && node->can_remove_if_no_direct_calls_p ())
444 return (info->size
028d4092
ML
445 * (100 - param_comdat_sharing_probability)
446 + 50) / 100;
49d9c9d2
JH
447 }
448 return 0;
449}
a5b1779f
JH
450
451/* Estimate the growth caused by inlining NODE into all callees. */
452
453int
0d92b555 454estimate_growth (struct cgraph_node *node)
a5b1779f 455{
49d9c9d2
JH
456 struct growth_data d = { node, false, false, 0, INT_MAX };
457 ipa_size_summary *info = ipa_size_summaries->get (node);
a5b1779f 458
49d9c9d2
JH
459 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
460 return 1;
4c0f7679
JH
461
462 /* For self recursive functions the growth estimation really should be
463 infinity. We don't want to return very large values because the growth
464 plays various roles in badness computation fractions. Be sure to not
465 return zero or negative growths. */
a5b1779f
JH
466 if (d.self_recursive)
467 d.growth = d.growth < info->size ? info->size : d.growth;
49d9c9d2
JH
468 else if (!d.uninlinable)
469 d.growth -= offline_size (node, info);
03dfc36d 470
a5b1779f 471 return d.growth;
03dfc36d
JH
472}
473
cf3648f2
JH
474/* Verify if there are fewer than MAX_CALLERS. */
475
476static bool
49d9c9d2
JH
477check_callers (cgraph_node *node, int *growth, int *n, int offline,
478 int min_size, struct cgraph_edge *known_edge)
cf3648f2
JH
479{
480 ipa_ref *ref;
481
e0d514da
JH
482 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
483 return true;
484
cf3648f2
JH
485 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
486 {
49d9c9d2
JH
487 edge_growth_cache_entry *entry;
488
489 if (e == known_edge)
490 continue;
491 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
492 return true;
493 if (edge_growth_cache != NULL
494 && (entry = edge_growth_cache->get (e)) != NULL
495 && entry->size != 0)
496 *growth += entry->size - (entry->size > 0);
497 else
498 {
499 class ipa_call_summary *es = ipa_call_summaries->get (e);
500 if (!es)
501 return true;
502 *growth += min_size - es->call_stmt_size;
503 if (--(*n) < 0)
504 return false;
505 }
506 if (*growth > offline)
cf3648f2
JH
507 return true;
508 }
509
49d9c9d2
JH
510 if (*n > 0)
511 FOR_EACH_ALIAS (node, ref)
512 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
513 offline, min_size, known_edge))
514 return true;
cf3648f2
JH
515
516 return false;
517}
518
10a5dd5d 519
49d9c9d2
JH
520/* Decide if growth of NODE is positive. This is cheaper than calculating
521 actual growth. If edge growth of KNOWN_EDGE is known
522 it is passed by EDGE_GROWTH. */
4cd8957f
JH
523
524bool
49d9c9d2
JH
525growth_positive_p (struct cgraph_node *node,
526 struct cgraph_edge * known_edge, int edge_growth)
4cd8957f 527{
4cd8957f 528 struct cgraph_edge *e;
49d9c9d2
JH
529
530 ipa_size_summary *s = ipa_size_summaries->get (node);
4cd8957f 531
e0d514da 532 /* First quickly check if NODE is removable at all. */
49d9c9d2
JH
533 int offline = offline_size (node, s);
534 if (offline <= 0 && known_edge && edge_growth > 0)
4cd8957f 535 return true;
e0d514da 536
49d9c9d2
JH
537 int min_size = ipa_fn_summaries->get (node)->min_size;
538 int n = 10;
4cd8957f 539
49d9c9d2 540 int min_growth = known_edge ? edge_growth : 0;
4cd8957f
JH
541 for (e = node->callers; e; e = e->next_caller)
542 {
49d9c9d2
JH
543 edge_growth_cache_entry *entry;
544
545 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
546 return true;
547 if (e == known_edge)
548 continue;
549 if (edge_growth_cache != NULL
550 && (entry = edge_growth_cache->get (e)) != NULL
551 && entry->size != 0)
552 min_growth += entry->size - (entry->size > 0);
553 else
554 {
555 class ipa_call_summary *es = ipa_call_summaries->get (e);
556 if (!es)
557 return true;
558 min_growth += min_size - es->call_stmt_size;
559 if (--n <= 0)
560 break;
561 }
562 if (min_growth > offline)
4cd8957f
JH
563 return true;
564 }
cf3648f2
JH
565
566 ipa_ref *ref;
49d9c9d2
JH
567 if (n > 0)
568 FOR_EACH_ALIAS (node, ref)
569 if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
570 &min_growth, &n, offline, min_size, known_edge))
e0d514da 571 return true;
e0d514da 572
49d9c9d2
JH
573 struct growth_data d = { node, false, false, 0, offline };
574 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
575 return true;
576 if (d.self_recursive || d.uninlinable)
577 return true;
578 return (d.growth > offline);
4cd8957f 579}