]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-inline-analysis.c
[Ada] Improved support for aspect alignment in CCG
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
CommitLineData
27d020cf 1/* Analysis used by inlining decision heuristics.
8d9254fc 2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
03dfc36d
JH
3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
03dfc36d
JH
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
03dfc36d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5
AM
27#include "alloc-pool.h"
28#include "tree-pass.h"
27d020cf
JH
29#include "ssa.h"
30#include "tree-streamer.h"
31#include "cgraph.h"
32#include "diagnostic.h"
33#include "fold-const.h"
34#include "print-tree.h"
35#include "tree-inline.h"
36#include "gimple-pretty-print.h"
27d020cf
JH
37#include "cfganal.h"
38#include "gimple-iterator.h"
39#include "tree-cfg.h"
40#include "tree-ssa-loop-niter.h"
41#include "tree-ssa-loop.h"
42#include "symbol-summary.h"
43#include "ipa-prop.h"
44#include "ipa-fnsummary.h"
45#include "ipa-inline.h"
46#include "cfgloop.h"
47#include "tree-scalar-evolution.h"
48#include "ipa-utils.h"
27d020cf
JH
49#include "cfgexpand.h"
50#include "gimplify.h"
27a4cd48 51
27d020cf 52/* Cached node/edge growths. */
7237f93e 53fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
03dfc36d 54
ac6f2e59
JH
55/* The context cache remembers estimated time/size and hints for given
56 ipa_call_context of a call. */
57class node_context_cache_entry
58{
59public:
60 ipa_call_context ctx;
61 sreal time, nonspec_time;
62 int size;
63 ipa_hints hints;
64
65 node_context_cache_entry ()
66 : ctx ()
67 {
68 }
69 ~node_context_cache_entry ()
70 {
71 ctx.release ();
72 }
73};
74
75/* At the moment we implement primitive single entry LRU cache. */
76class node_context_summary
77{
78public:
79 node_context_cache_entry entry;
80
81 node_context_summary ()
82 : entry ()
83 {
84 }
85 ~node_context_summary ()
86 {
87 }
88};
89
90/* Summary holding the context cache. */
91static fast_function_summary <node_context_summary *, va_heap>
92 *node_context_cache = NULL;
93/* Statistics about the context cache effectivity. */
94static long node_context_cache_hit, node_context_cache_miss,
95 node_context_cache_clear;
96
27d020cf
JH
97/* Give initial reasons why inlining would fail on EDGE. This gets either
98 nullified or usually overwritten by more precise reasons later. */
d2d668fb 99
27d020cf
JH
100void
101initialize_inline_failed (struct cgraph_edge *e)
d2d668fb 102{
27d020cf 103 struct cgraph_node *callee = e->callee;
d2d668fb 104
27d020cf
JH
105 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
106 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
107 ;
108 else if (e->indirect_unknown_callee)
109 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
110 else if (!callee->definition)
111 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
87f94429 112 else if (callee->redefined_extern_inline)
27d020cf 113 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
4adaad64 114 else
27d020cf
JH
115 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
116 gcc_checking_assert (!e->call_stmt_cannot_inline_p
117 || cgraph_inline_failed_type (e->inline_failed)
118 == CIF_FINAL_ERROR);
632b4f8e
JH
119}
120
ac6f2e59
JH
121/* Allocate edge growth caches. */
122
123void
124initialize_growth_caches ()
125{
126 edge_growth_cache
7237f93e 127 = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
ac6f2e59
JH
128 node_context_cache
129 = new fast_function_summary<node_context_summary *, va_heap> (symtab);
130}
632b4f8e 131
27d020cf 132/* Free growth caches. */
5ee53a06 133
c170d40f 134void
27d020cf 135free_growth_caches (void)
c170d40f 136{
9fb50ad8 137 delete edge_growth_cache;
ac6f2e59 138 delete node_context_cache;
9fb50ad8 139 edge_growth_cache = NULL;
ac6f2e59
JH
140 node_context_cache = NULL;
141 if (dump_file)
142 fprintf (dump_file, "node context cache: %li hits, %li misses,"
143 " %li initializations\n",
144 node_context_cache_hit, node_context_cache_miss,
145 node_context_cache_clear);
146 node_context_cache_hit = 0;
147 node_context_cache_miss = 0;
148 node_context_cache_clear = 0;
632b4f8e
JH
149}
150
956d615d 151/* Return hints derived from EDGE. */
27d020cf 152
d59171da
JH
153int
154simple_edge_hints (struct cgraph_edge *edge)
155{
156 int hints = 0;
a62bfab5
ML
157 struct cgraph_node *to = (edge->caller->inlined_to
158 ? edge->caller->inlined_to : edge->caller);
ebc8f0bb 159 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
6f86434f
ML
160 int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
161 int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
162
163 if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
d59171da
JH
164 hints |= INLINE_HINT_same_scc;
165
b74d8dc4 166 if (cross_module_call_p (edge))
d59171da
JH
167 hints |= INLINE_HINT_cross_module;
168
169 return hints;
170}
171
632b4f8e
JH
172/* Estimate the time cost for the caller when inlining EDGE.
173 Only to be called via estimate_edge_time, that handles the
174 caching mechanism.
175
176 When caching, also update the cache entry. Compute both time and
177 size, since we always need both metrics eventually. */
178
ab38481c 179sreal
23ff8c05 180do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
632b4f8e 181{
4adaad64 182 sreal time, nonspec_time;
632b4f8e 183 int size;
0bceb671 184 ipa_hints hints;
d2d668fb 185 struct cgraph_node *callee;
4adaad64 186 clause_t clause, nonspec_clause;
b0d55476
JH
187 auto_vec<tree, 32> known_vals;
188 auto_vec<ipa_polymorphic_call_context, 32> known_contexts;
189 auto_vec<ipa_agg_value_set, 32> known_aggs;
99b1c316 190 class ipa_call_summary *es = ipa_call_summaries->get (edge);
ac6f2e59 191 int min_size = -1;
632b4f8e 192
d52f5295 193 callee = edge->callee->ultimate_alias_target ();
d2d668fb 194
632b4f8e 195 gcc_checking_assert (edge->inline_failed);
d2d668fb 196 evaluate_properties_for_edge (edge, true,
4adaad64
JH
197 &clause, &nonspec_clause, &known_vals,
198 &known_contexts, &known_aggs);
1532500e
JH
199 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
200 known_contexts, known_aggs, es->param);
ac6f2e59
JH
201 if (node_context_cache != NULL)
202 {
203 node_context_summary *e = node_context_cache->get_create (callee);
204 if (e->entry.ctx.equal_to (ctx))
205 {
206 node_context_cache_hit++;
207 size = e->entry.size;
208 time = e->entry.time;
209 nonspec_time = e->entry.nonspec_time;
210 hints = e->entry.hints;
367c959f 211 if (flag_checking
a63574d7 212 && !opt_for_fn (callee->decl, flag_profile_partial_training)
8d890d37 213 && !callee->count.ipa_p ())
b914768c
JH
214 {
215 sreal chk_time, chk_nonspec_time;
216 int chk_size, chk_min_size;
217
218 ipa_hints chk_hints;
219 ctx.estimate_size_and_time (&chk_size, &chk_min_size,
220 &chk_time, &chk_nonspec_time,
221 &chk_hints);
222 gcc_assert (chk_size == size && chk_time == time
223 && chk_nonspec_time == nonspec_time
224 && chk_hints == hints);
225 }
ac6f2e59
JH
226 }
227 else
228 {
229 if (e->entry.ctx.exists_p ())
230 node_context_cache_miss++;
231 else
232 node_context_cache_clear++;
233 e->entry.ctx.release (true);
ac6f2e59
JH
234 ctx.estimate_size_and_time (&size, &min_size,
235 &time, &nonspec_time, &hints);
236 e->entry.size = size;
237 e->entry.time = time;
238 e->entry.nonspec_time = nonspec_time;
239 e->entry.hints = hints;
240 e->entry.ctx.duplicate_from (ctx);
241 }
242 }
243 else
244 ctx.estimate_size_and_time (&size, &min_size,
245 &time, &nonspec_time, &hints);
b6d627e4
JH
246
247 /* When we have profile feedback, we can quite safely identify hot
248 edges and for those we disable size limits. Don't do that when
249 probability that caller will call the callee is low however, since it
250 may hurt optimization of the caller's hot path. */
1bad9c18
JH
251 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
252 && (edge->count.ipa ().apply_scale (2, 1)
a62bfab5
ML
253 > (edge->caller->inlined_to
254 ? edge->caller->inlined_to->count.ipa ()
1bad9c18 255 : edge->caller->count.ipa ())))
b6d627e4
JH
256 hints |= INLINE_HINT_known_hot;
257
1532500e 258 ctx.release ();
d59171da
JH
259 gcc_checking_assert (size >= 0);
260 gcc_checking_assert (time >= 0);
632b4f8e
JH
261
262 /* When caching, update the cache entry. */
9fb50ad8 263 if (edge_growth_cache != NULL)
632b4f8e 264 {
ac6f2e59
JH
265 if (min_size >= 0)
266 ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
267 = min_size;
9fb50ad8
ML
268 edge_growth_cache_entry *entry
269 = edge_growth_cache->get_create (edge);
270 entry->time = time;
271 entry->nonspec_time = nonspec_time;
632b4f8e 272
9fb50ad8 273 entry->size = size + (size >= 0);
d59171da 274 hints |= simple_edge_hints (edge);
9fb50ad8 275 entry->hints = hints + 1;
632b4f8e 276 }
23ff8c05
JH
277 if (ret_nonspec_time)
278 *ret_nonspec_time = nonspec_time;
d59171da 279 return time;
632b4f8e
JH
280}
281
ac6f2e59
JH
282/* Reset cache for NODE.
283 This must be done each time NODE body is modified. */
284void
285reset_node_cache (struct cgraph_node *node)
286{
287 if (node_context_cache)
288 node_context_cache->remove (node);
289}
632b4f8e 290
7237f93e
JH
291/* Remove EDGE from caches once it was inlined. */
292void
293ipa_remove_from_growth_caches (struct cgraph_edge *edge)
294{
295 if (node_context_cache)
296 node_context_cache->remove (edge->callee);
297 if (edge_growth_cache)
298 edge_growth_cache->remove (edge);
299}
300
ed901e4c 301/* Return estimated callee growth after inlining EDGE.
632b4f8e
JH
302 Only to be called via estimate_edge_size. */
303
304int
ed901e4c 305do_estimate_edge_size (struct cgraph_edge *edge)
632b4f8e
JH
306{
307 int size;
a5b1779f 308 struct cgraph_node *callee;
4adaad64 309 clause_t clause, nonspec_clause;
b0d55476
JH
310 auto_vec<tree, 32> known_vals;
311 auto_vec<ipa_polymorphic_call_context, 32> known_contexts;
312 auto_vec<ipa_agg_value_set, 32> known_aggs;
632b4f8e
JH
313
314 /* When we do caching, use do_estimate_edge_time to populate the entry. */
315
9fb50ad8 316 if (edge_growth_cache != NULL)
632b4f8e
JH
317 {
318 do_estimate_edge_time (edge);
9fb50ad8 319 size = edge_growth_cache->get (edge)->size;
632b4f8e
JH
320 gcc_checking_assert (size);
321 return size - (size > 0);
322 }
d2d668fb 323
d52f5295 324 callee = edge->callee->ultimate_alias_target ();
632b4f8e
JH
325
326 /* Early inliner runs without caching, go ahead and do the dirty work. */
327 gcc_checking_assert (edge->inline_failed);
d2d668fb 328 evaluate_properties_for_edge (edge, true,
4adaad64
JH
329 &clause, &nonspec_clause,
330 &known_vals, &known_contexts,
8810cc52 331 &known_aggs);
1532500e
JH
332 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
333 known_contexts, known_aggs, vNULL);
334 ctx.estimate_size_and_time (&size, NULL, NULL, NULL, NULL);
335 ctx.release ();
ed901e4c 336 return size;
03dfc36d
JH
337}
338
339
37678631
JH
340/* Estimate the growth of the caller when inlining EDGE.
341 Only to be called via estimate_edge_size. */
342
0bceb671 343ipa_hints
37678631
JH
344do_estimate_edge_hints (struct cgraph_edge *edge)
345{
0bceb671 346 ipa_hints hints;
37678631 347 struct cgraph_node *callee;
4adaad64 348 clause_t clause, nonspec_clause;
b0d55476
JH
349 auto_vec<tree, 32> known_vals;
350 auto_vec<ipa_polymorphic_call_context, 32> known_contexts;
351 auto_vec<ipa_agg_value_set, 32> known_aggs;
37678631
JH
352
353 /* When we do caching, use do_estimate_edge_time to populate the entry. */
354
9fb50ad8 355 if (edge_growth_cache != NULL)
37678631
JH
356 {
357 do_estimate_edge_time (edge);
9fb50ad8 358 hints = edge_growth_cache->get (edge)->hints;
37678631
JH
359 gcc_checking_assert (hints);
360 return hints - 1;
361 }
362
d52f5295 363 callee = edge->callee->ultimate_alias_target ();
37678631
JH
364
365 /* Early inliner runs without caching, go ahead and do the dirty work. */
366 gcc_checking_assert (edge->inline_failed);
367 evaluate_properties_for_edge (edge, true,
4adaad64
JH
368 &clause, &nonspec_clause,
369 &known_vals, &known_contexts,
37678631 370 &known_aggs);
1532500e
JH
371 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
372 known_contexts, known_aggs, vNULL);
373 ctx.estimate_size_and_time (NULL, NULL, NULL, NULL, &hints);
374 ctx.release ();
d59171da 375 hints |= simple_edge_hints (edge);
37678631
JH
376 return hints;
377}
378
03dfc36d
JH
379/* Estimate the size of NODE after inlining EDGE which should be an
380 edge to either NODE or a call inlined into NODE. */
381
382int
383estimate_size_after_inlining (struct cgraph_node *node,
10a5dd5d 384 struct cgraph_edge *edge)
03dfc36d 385{
99b1c316 386 class ipa_call_summary *es = ipa_call_summaries->get (edge);
f658ad30 387 ipa_size_summary *s = ipa_size_summaries->get (node);
dbcb3c74 388 if (!es->predicate || *es->predicate != false)
b15c64ee 389 {
99353fcf 390 int size = s->size + estimate_edge_growth (edge);
b15c64ee
JH
391 gcc_assert (size >= 0);
392 return size;
393 }
99353fcf 394 return s->size;
03dfc36d
JH
395}
396
397
a5b1779f
JH
398struct growth_data
399{
a93c18c8 400 struct cgraph_node *node;
a5b1779f 401 bool self_recursive;
cf3648f2 402 bool uninlinable;
a5b1779f 403 int growth;
49d9c9d2 404 int cap;
a5b1779f 405};
03dfc36d 406
a5b1779f
JH
407
408/* Worker for do_estimate_growth. Collect growth for all callers. */
409
410static bool
411do_estimate_growth_1 (struct cgraph_node *node, void *data)
03dfc36d 412{
03dfc36d 413 struct cgraph_edge *e;
a5b1779f 414 struct growth_data *d = (struct growth_data *) data;
03dfc36d 415
03dfc36d
JH
416 for (e = node->callers; e; e = e->next_caller)
417 {
4c0f7679
JH
418 gcc_checking_assert (e->inline_failed);
419
29f1e2b1
JH
420 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
421 || !opt_for_fn (e->caller->decl, optimize))
cf3648f2
JH
422 {
423 d->uninlinable = true;
49d9c9d2
JH
424 if (d->cap < INT_MAX)
425 return true;
cf3648f2
JH
426 continue;
427 }
428
1af8bfe5
JH
429 if (e->recursive_p ())
430 {
431 d->self_recursive = true;
49d9c9d2
JH
432 if (d->cap < INT_MAX)
433 return true;
1af8bfe5
JH
434 continue;
435 }
a5b1779f 436 d->growth += estimate_edge_growth (e);
49d9c9d2
JH
437 if (d->growth > d->cap)
438 return true;
4c0f7679 439 }
a5b1779f
JH
440 return false;
441}
442
49d9c9d2
JH
443/* Return estimated savings for eliminating offline copy of NODE by inlining
444 it everywhere. */
445
446static int
447offline_size (struct cgraph_node *node, ipa_size_summary *info)
448{
449 if (!DECL_EXTERNAL (node->decl))
450 {
451 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
452 return info->size;
453 /* COMDAT functions are very often not shared across multiple units
454 since they come from various template instantiations.
455 Take this into account. */
456 else if (DECL_COMDAT (node->decl)
457 && node->can_remove_if_no_direct_calls_p ())
fdfd7f53
ML
458 {
459 int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
7e2b7e23 460 return (info->size * (100 - prob) + 50) / 100;
fdfd7f53 461 }
49d9c9d2
JH
462 }
463 return 0;
464}
a5b1779f 465
56eb4c70 466/* Estimate the growth caused by inlining NODE into all callers. */
a5b1779f
JH
467
468int
0d92b555 469estimate_growth (struct cgraph_node *node)
a5b1779f 470{
49d9c9d2
JH
471 struct growth_data d = { node, false, false, 0, INT_MAX };
472 ipa_size_summary *info = ipa_size_summaries->get (node);
a5b1779f 473
49d9c9d2
JH
474 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
475 return 1;
4c0f7679
JH
476
477 /* For self recursive functions the growth estimation really should be
478 infinity. We don't want to return very large values because the growth
479 plays various roles in badness computation fractions. Be sure to not
480 return zero or negative growths. */
a5b1779f
JH
481 if (d.self_recursive)
482 d.growth = d.growth < info->size ? info->size : d.growth;
49d9c9d2
JH
483 else if (!d.uninlinable)
484 d.growth -= offline_size (node, info);
03dfc36d 485
a5b1779f 486 return d.growth;
03dfc36d
JH
487}
488
cf3648f2
JH
489/* Verify if there are fewer than MAX_CALLERS. */
490
491static bool
49d9c9d2
JH
492check_callers (cgraph_node *node, int *growth, int *n, int offline,
493 int min_size, struct cgraph_edge *known_edge)
cf3648f2
JH
494{
495 ipa_ref *ref;
496
e0d514da
JH
497 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
498 return true;
499
cf3648f2
JH
500 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
501 {
49d9c9d2
JH
502 edge_growth_cache_entry *entry;
503
504 if (e == known_edge)
505 continue;
506 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
507 return true;
508 if (edge_growth_cache != NULL
509 && (entry = edge_growth_cache->get (e)) != NULL
510 && entry->size != 0)
511 *growth += entry->size - (entry->size > 0);
512 else
513 {
514 class ipa_call_summary *es = ipa_call_summaries->get (e);
515 if (!es)
516 return true;
517 *growth += min_size - es->call_stmt_size;
518 if (--(*n) < 0)
519 return false;
520 }
521 if (*growth > offline)
cf3648f2
JH
522 return true;
523 }
524
49d9c9d2
JH
525 if (*n > 0)
526 FOR_EACH_ALIAS (node, ref)
527 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
528 offline, min_size, known_edge))
529 return true;
cf3648f2
JH
530
531 return false;
532}
533
10a5dd5d 534
49d9c9d2
JH
535/* Decide if growth of NODE is positive. This is cheaper than calculating
536 actual growth. If edge growth of KNOWN_EDGE is known
537 it is passed by EDGE_GROWTH. */
4cd8957f
JH
538
539bool
49d9c9d2
JH
540growth_positive_p (struct cgraph_node *node,
541 struct cgraph_edge * known_edge, int edge_growth)
4cd8957f 542{
4cd8957f 543 struct cgraph_edge *e;
49d9c9d2
JH
544
545 ipa_size_summary *s = ipa_size_summaries->get (node);
4cd8957f 546
e0d514da 547 /* First quickly check if NODE is removable at all. */
49d9c9d2
JH
548 int offline = offline_size (node, s);
549 if (offline <= 0 && known_edge && edge_growth > 0)
4cd8957f 550 return true;
e0d514da 551
49d9c9d2
JH
552 int min_size = ipa_fn_summaries->get (node)->min_size;
553 int n = 10;
4cd8957f 554
49d9c9d2 555 int min_growth = known_edge ? edge_growth : 0;
4cd8957f
JH
556 for (e = node->callers; e; e = e->next_caller)
557 {
49d9c9d2
JH
558 edge_growth_cache_entry *entry;
559
560 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
561 return true;
562 if (e == known_edge)
563 continue;
564 if (edge_growth_cache != NULL
565 && (entry = edge_growth_cache->get (e)) != NULL
566 && entry->size != 0)
567 min_growth += entry->size - (entry->size > 0);
568 else
569 {
570 class ipa_call_summary *es = ipa_call_summaries->get (e);
571 if (!es)
572 return true;
573 min_growth += min_size - es->call_stmt_size;
574 if (--n <= 0)
575 break;
576 }
577 if (min_growth > offline)
4cd8957f
JH
578 return true;
579 }
cf3648f2
JH
580
581 ipa_ref *ref;
49d9c9d2
JH
582 if (n > 0)
583 FOR_EACH_ALIAS (node, ref)
584 if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
585 &min_growth, &n, offline, min_size, known_edge))
e0d514da 586 return true;
e0d514da 587
49d9c9d2
JH
588 struct growth_data d = { node, false, false, 0, offline };
589 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
590 return true;
591 if (d.self_recursive || d.uninlinable)
592 return true;
593 return (d.growth > offline);
4cd8957f 594}