]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-inline-analysis.c
* doc/install.texi (Specific, alpha): Remove note to use
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
CommitLineData
b9a58fc5 1/* Analysis used by inlining decision heuristics.
fbd26352 2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
99c67f24 3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
99c67f24 21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
9ef16211 24#include "backend.h"
99c67f24 25#include "tree.h"
9ef16211 26#include "gimple.h"
7c29e30e 27#include "alloc-pool.h"
28#include "tree-pass.h"
b9a58fc5 29#include "ssa.h"
30#include "tree-streamer.h"
31#include "cgraph.h"
32#include "diagnostic.h"
33#include "fold-const.h"
34#include "print-tree.h"
35#include "tree-inline.h"
36#include "gimple-pretty-print.h"
37#include "params.h"
38#include "cfganal.h"
39#include "gimple-iterator.h"
40#include "tree-cfg.h"
41#include "tree-ssa-loop-niter.h"
42#include "tree-ssa-loop.h"
43#include "symbol-summary.h"
44#include "ipa-prop.h"
45#include "ipa-fnsummary.h"
46#include "ipa-inline.h"
47#include "cfgloop.h"
48#include "tree-scalar-evolution.h"
49#include "ipa-utils.h"
b9a58fc5 50#include "cfgexpand.h"
51#include "gimplify.h"
cbe8bda8 52
b9a58fc5 53/* Cached node/edge growths. */
bc4e1286 54call_summary<edge_growth_cache_entry *> *edge_growth_cache = NULL;
99c67f24 55
b9a58fc5 56/* Give initial reasons why inlining would fail on EDGE. This gets either
57 nullified or usually overwritten by more precise reasons later. */
20da2013 58
b9a58fc5 59void
60initialize_inline_failed (struct cgraph_edge *e)
20da2013 61{
b9a58fc5 62 struct cgraph_node *callee = e->callee;
20da2013 63
b9a58fc5 64 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
65 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
66 ;
67 else if (e->indirect_unknown_callee)
68 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
69 else if (!callee->definition)
70 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
71 else if (callee->local.redefined_extern_inline)
72 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
e062e35c 73 else
b9a58fc5 74 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
75 gcc_checking_assert (!e->call_stmt_cannot_inline_p
76 || cgraph_inline_failed_type (e->inline_failed)
77 == CIF_FINAL_ERROR);
a41f2a28 78}
79
80
b9a58fc5 81/* Free growth caches. */
a226c368 82
6331b6fa 83void
b9a58fc5 84free_growth_caches (void)
6331b6fa 85{
bc4e1286 86 delete edge_growth_cache;
87 edge_growth_cache = NULL;
a41f2a28 88}
89
3172b7bf 90/* Return hints derrived from EDGE. */
b9a58fc5 91
3172b7bf 92int
93simple_edge_hints (struct cgraph_edge *edge)
94{
95 int hints = 0;
96 struct cgraph_node *to = (edge->caller->global.inlined_to
e876d531 97 ? edge->caller->global.inlined_to : edge->caller);
57e20c4a 98 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
f445cfda 99 int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
100 int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
101
102 if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
3172b7bf 103 hints |= INLINE_HINT_same_scc;
104
57e20c4a 105 if (callee->lto_file_data && edge->caller->lto_file_data
106 && edge->caller->lto_file_data != callee->lto_file_data
7d38b7bc 107 && !callee->merged_comdat && !callee->icf_merged)
3172b7bf 108 hints |= INLINE_HINT_cross_module;
109
110 return hints;
111}
112
a41f2a28 113/* Estimate the time cost for the caller when inlining EDGE.
114 Only to be called via estimate_edge_time, that handles the
115 caching mechanism.
116
117 When caching, also update the cache entry. Compute both time and
118 size, since we always need both metrics eventually. */
119
2e211986 120sreal
a41f2a28 121do_estimate_edge_time (struct cgraph_edge *edge)
122{
e062e35c 123 sreal time, nonspec_time;
a41f2a28 124 int size;
1297cbcd 125 ipa_hints hints;
20da2013 126 struct cgraph_node *callee;
e062e35c 127 clause_t clause, nonspec_clause;
f1f41a6c 128 vec<tree> known_vals;
245ab191 129 vec<ipa_polymorphic_call_context> known_contexts;
f1f41a6c 130 vec<ipa_agg_jump_function_p> known_aggs;
2e966e2a 131 class ipa_call_summary *es = ipa_call_summaries->get (edge);
db197f90 132 int min_size;
a41f2a28 133
415d1b9a 134 callee = edge->callee->ultimate_alias_target ();
20da2013 135
a41f2a28 136 gcc_checking_assert (edge->inline_failed);
20da2013 137 evaluate_properties_for_edge (edge, true,
e062e35c 138 &clause, &nonspec_clause, &known_vals,
139 &known_contexts, &known_aggs);
140 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
141 known_contexts, known_aggs, &size, &min_size,
142 &time, &nonspec_time, &hints, es->param);
3072aa32 143
144 /* When we have profile feedback, we can quite safely identify hot
145 edges and for those we disable size limits. Don't do that when
146 probability that caller will call the callee is low however, since it
147 may hurt optimization of the caller's hot path. */
151b9ff5 148 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
149 && (edge->count.ipa ().apply_scale (2, 1)
3072aa32 150 > (edge->caller->global.inlined_to
151b9ff5 151 ? edge->caller->global.inlined_to->count.ipa ()
152 : edge->caller->count.ipa ())))
3072aa32 153 hints |= INLINE_HINT_known_hot;
154
f1f41a6c 155 known_vals.release ();
245ab191 156 known_contexts.release ();
f1f41a6c 157 known_aggs.release ();
3172b7bf 158 gcc_checking_assert (size >= 0);
159 gcc_checking_assert (time >= 0);
a41f2a28 160
161 /* When caching, update the cache entry. */
bc4e1286 162 if (edge_growth_cache != NULL)
a41f2a28 163 {
b53d4f56 164 ipa_fn_summaries->get_create (edge->callee)->min_size = min_size;
bc4e1286 165 edge_growth_cache_entry *entry
166 = edge_growth_cache->get_create (edge);
167 entry->time = time;
168 entry->nonspec_time = nonspec_time;
a41f2a28 169
bc4e1286 170 entry->size = size + (size >= 0);
3172b7bf 171 hints |= simple_edge_hints (edge);
bc4e1286 172 entry->hints = hints + 1;
a41f2a28 173 }
3172b7bf 174 return time;
a41f2a28 175}
176
177
6c2c7775 178/* Return estimated callee growth after inlining EDGE.
a41f2a28 179 Only to be called via estimate_edge_size. */
180
181int
6c2c7775 182do_estimate_edge_size (struct cgraph_edge *edge)
a41f2a28 183{
184 int size;
82626cb0 185 struct cgraph_node *callee;
e062e35c 186 clause_t clause, nonspec_clause;
f1f41a6c 187 vec<tree> known_vals;
245ab191 188 vec<ipa_polymorphic_call_context> known_contexts;
f1f41a6c 189 vec<ipa_agg_jump_function_p> known_aggs;
a41f2a28 190
191 /* When we do caching, use do_estimate_edge_time to populate the entry. */
192
bc4e1286 193 if (edge_growth_cache != NULL)
a41f2a28 194 {
195 do_estimate_edge_time (edge);
bc4e1286 196 size = edge_growth_cache->get (edge)->size;
a41f2a28 197 gcc_checking_assert (size);
198 return size - (size > 0);
199 }
20da2013 200
415d1b9a 201 callee = edge->callee->ultimate_alias_target ();
a41f2a28 202
203 /* Early inliner runs without caching, go ahead and do the dirty work. */
204 gcc_checking_assert (edge->inline_failed);
20da2013 205 evaluate_properties_for_edge (edge, true,
e062e35c 206 &clause, &nonspec_clause,
207 &known_vals, &known_contexts,
a4f60e55 208 &known_aggs);
e062e35c 209 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
210 known_contexts, known_aggs, &size, NULL, NULL,
211 NULL, NULL, vNULL);
f1f41a6c 212 known_vals.release ();
245ab191 213 known_contexts.release ();
f1f41a6c 214 known_aggs.release ();
6c2c7775 215 return size;
99c67f24 216}
217
218
eb7c606e 219/* Estimate the growth of the caller when inlining EDGE.
220 Only to be called via estimate_edge_size. */
221
1297cbcd 222ipa_hints
eb7c606e 223do_estimate_edge_hints (struct cgraph_edge *edge)
224{
1297cbcd 225 ipa_hints hints;
eb7c606e 226 struct cgraph_node *callee;
e062e35c 227 clause_t clause, nonspec_clause;
f1f41a6c 228 vec<tree> known_vals;
245ab191 229 vec<ipa_polymorphic_call_context> known_contexts;
f1f41a6c 230 vec<ipa_agg_jump_function_p> known_aggs;
eb7c606e 231
232 /* When we do caching, use do_estimate_edge_time to populate the entry. */
233
bc4e1286 234 if (edge_growth_cache != NULL)
eb7c606e 235 {
236 do_estimate_edge_time (edge);
bc4e1286 237 hints = edge_growth_cache->get (edge)->hints;
eb7c606e 238 gcc_checking_assert (hints);
239 return hints - 1;
240 }
241
415d1b9a 242 callee = edge->callee->ultimate_alias_target ();
eb7c606e 243
244 /* Early inliner runs without caching, go ahead and do the dirty work. */
245 gcc_checking_assert (edge->inline_failed);
246 evaluate_properties_for_edge (edge, true,
e062e35c 247 &clause, &nonspec_clause,
248 &known_vals, &known_contexts,
eb7c606e 249 &known_aggs);
e062e35c 250 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
251 known_contexts, known_aggs, NULL, NULL,
252 NULL, NULL, &hints, vNULL);
f1f41a6c 253 known_vals.release ();
245ab191 254 known_contexts.release ();
f1f41a6c 255 known_aggs.release ();
3172b7bf 256 hints |= simple_edge_hints (edge);
eb7c606e 257 return hints;
258}
259
99c67f24 260/* Estimate the size of NODE after inlining EDGE which should be an
261 edge to either NODE or a call inlined into NODE. */
262
263int
264estimate_size_after_inlining (struct cgraph_node *node,
c7b2cc59 265 struct cgraph_edge *edge)
99c67f24 266{
2e966e2a 267 class ipa_call_summary *es = ipa_call_summaries->get (edge);
d2c2513e 268 ipa_fn_summary *s = ipa_fn_summaries->get (node);
b3def0e7 269 if (!es->predicate || *es->predicate != false)
905aa3bd 270 {
b53d4f56 271 int size = s->size + estimate_edge_growth (edge);
905aa3bd 272 gcc_assert (size >= 0);
273 return size;
274 }
b53d4f56 275 return s->size;
99c67f24 276}
277
278
82626cb0 279struct growth_data
280{
ce1f57d6 281 struct cgraph_node *node;
82626cb0 282 bool self_recursive;
6a044047 283 bool uninlinable;
82626cb0 284 int growth;
285};
99c67f24 286
82626cb0 287
288/* Worker for do_estimate_growth. Collect growth for all callers. */
289
290static bool
291do_estimate_growth_1 (struct cgraph_node *node, void *data)
99c67f24 292{
99c67f24 293 struct cgraph_edge *e;
82626cb0 294 struct growth_data *d = (struct growth_data *) data;
99c67f24 295
99c67f24 296 for (e = node->callers; e; e = e->next_caller)
297 {
4869c23f 298 gcc_checking_assert (e->inline_failed);
299
5f4e4f36 300 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
301 || !opt_for_fn (e->caller->decl, optimize))
6a044047 302 {
303 d->uninlinable = true;
304 continue;
305 }
306
9dcc8702 307 if (e->recursive_p ())
308 {
309 d->self_recursive = true;
310 continue;
311 }
82626cb0 312 d->growth += estimate_edge_growth (e);
4869c23f 313 }
82626cb0 314 return false;
315}
316
317
318/* Estimate the growth caused by inlining NODE into all callees. */
319
320int
bc42c20c 321estimate_growth (struct cgraph_node *node)
82626cb0 322{
6a044047 323 struct growth_data d = { node, false, false, 0 };
2e966e2a 324 class ipa_fn_summary *info = ipa_fn_summaries->get (node);
82626cb0 325
6a044047 326 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
4869c23f 327
328 /* For self recursive functions the growth estimation really should be
329 infinity. We don't want to return very large values because the growth
330 plays various roles in badness computation fractions. Be sure to not
331 return zero or negative growths. */
82626cb0 332 if (d.self_recursive)
333 d.growth = d.growth < info->size ? info->size : d.growth;
6a044047 334 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3172b7bf 335 ;
4869c23f 336 else
337 {
415d1b9a 338 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
82626cb0 339 d.growth -= info->size;
fb3c587e 340 /* COMDAT functions are very often not shared across multiple units
e876d531 341 since they come from various template instantiations.
342 Take this into account. */
aca3df3b 343 else if (DECL_COMDAT (node->decl)
415d1b9a 344 && node->can_remove_if_no_direct_calls_p ())
82626cb0 345 d.growth -= (info->size
fb3c587e 346 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
347 + 50) / 100;
99c67f24 348 }
99c67f24 349
82626cb0 350 return d.growth;
99c67f24 351}
352
6a044047 353/* Verify if there are fewer than MAX_CALLERS. */
354
355static bool
356check_callers (cgraph_node *node, int *max_callers)
357{
358 ipa_ref *ref;
359
b12b920e 360 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
361 return true;
362
6a044047 363 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
364 {
365 (*max_callers)--;
366 if (!*max_callers
367 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
368 return true;
369 }
370
371 FOR_EACH_ALIAS (node, ref)
372 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
373 return true;
374
375 return false;
376}
377
c7b2cc59 378
db197f90 379/* Make cheap estimation if growth of NODE is likely positive knowing
380 EDGE_GROWTH of one particular edge.
381 We assume that most of other edges will have similar growth
382 and skip computation if there are too many callers. */
383
384bool
6a044047 385growth_likely_positive (struct cgraph_node *node,
386 int edge_growth)
db197f90 387{
388 int max_callers;
db197f90 389 struct cgraph_edge *e;
390 gcc_checking_assert (edge_growth > 0);
391
b12b920e 392 /* First quickly check if NODE is removable at all. */
e88fecaf 393 if (DECL_EXTERNAL (node->decl))
394 return true;
b12b920e 395 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
396 || node->address_taken)
db197f90 397 return true;
b12b920e 398
d2c2513e 399 max_callers = ipa_fn_summaries->get (node)->size * 4 / edge_growth + 2;
db197f90 400
401 for (e = node->callers; e; e = e->next_caller)
402 {
403 max_callers--;
6a044047 404 if (!max_callers
405 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
db197f90 406 return true;
407 }
6a044047 408
409 ipa_ref *ref;
410 FOR_EACH_ALIAS (node, ref)
411 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
412 return true;
413
b12b920e 414 /* Unlike for functions called once, we play unsafe with
415 COMDATs. We can allow that since we know functions
416 in consideration are small (and thus risk is small) and
417 moreover grow estimates already accounts that COMDAT
418 functions may or may not disappear when eliminated from
419 current unit. With good probability making aggressive
420 choice in all units is going to make overall program
421 smaller. */
422 if (DECL_COMDAT (node->decl))
423 {
424 if (!node->can_remove_if_no_direct_calls_p ())
425 return true;
426 }
427 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
428 return true;
429
db197f90 430 return estimate_growth (node) > 0;
431}