]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline-analysis.c
Update copyright years.
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
1 /* Analysis used by inlining decision heuristics.
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "alloc-pool.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "tree-streamer.h"
31 #include "cgraph.h"
32 #include "diagnostic.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "tree-inline.h"
36 #include "gimple-pretty-print.h"
37 #include "params.h"
38 #include "cfganal.h"
39 #include "gimple-iterator.h"
40 #include "tree-cfg.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-ssa-loop.h"
43 #include "symbol-summary.h"
44 #include "ipa-prop.h"
45 #include "ipa-fnsummary.h"
46 #include "ipa-inline.h"
47 #include "cfgloop.h"
48 #include "tree-scalar-evolution.h"
49 #include "ipa-utils.h"
50 #include "cfgexpand.h"
51 #include "gimplify.h"
52
53 /* Cached node/edge growths. */
54 vec<edge_growth_cache_entry> edge_growth_cache;
55 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
56
57
58 /* Give initial reasons why inlining would fail on EDGE. This gets either
59 nullified or usually overwritten by more precise reasons later. */
60
61 void
62 initialize_inline_failed (struct cgraph_edge *e)
63 {
64 struct cgraph_node *callee = e->callee;
65
66 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
67 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
68 ;
69 else if (e->indirect_unknown_callee)
70 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
71 else if (!callee->definition)
72 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
73 else if (callee->local.redefined_extern_inline)
74 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
75 else
76 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
77 gcc_checking_assert (!e->call_stmt_cannot_inline_p
78 || cgraph_inline_failed_type (e->inline_failed)
79 == CIF_FINAL_ERROR);
80 }
81
82
83 /* Keep edge cache consistent across edge removal. */
84
85 static void
86 inline_edge_removal_hook (struct cgraph_edge *edge,
87 void *data ATTRIBUTE_UNUSED)
88 {
89 reset_edge_growth_cache (edge);
90 }
91
92
93 /* Initialize growth caches. */
94
95 void
96 initialize_growth_caches (void)
97 {
98 if (!edge_removal_hook_holder)
99 edge_removal_hook_holder =
100 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
101 if (symtab->edges_max_uid)
102 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
103 }
104
105
106 /* Free growth caches. */
107
108 void
109 free_growth_caches (void)
110 {
111 if (edge_removal_hook_holder)
112 {
113 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
114 edge_removal_hook_holder = NULL;
115 }
116 edge_growth_cache.release ();
117 }
118
119 /* Return hints derrived from EDGE. */
120
121 int
122 simple_edge_hints (struct cgraph_edge *edge)
123 {
124 int hints = 0;
125 struct cgraph_node *to = (edge->caller->global.inlined_to
126 ? edge->caller->global.inlined_to : edge->caller);
127 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
128 if (ipa_fn_summaries->get (to)->scc_no
129 && ipa_fn_summaries->get (to)->scc_no
130 == ipa_fn_summaries->get (callee)->scc_no
131 && !edge->recursive_p ())
132 hints |= INLINE_HINT_same_scc;
133
134 if (callee->lto_file_data && edge->caller->lto_file_data
135 && edge->caller->lto_file_data != callee->lto_file_data
136 && !callee->merged_comdat && !callee->icf_merged)
137 hints |= INLINE_HINT_cross_module;
138
139 return hints;
140 }
141
142 /* Estimate the time cost for the caller when inlining EDGE.
143 Only to be called via estimate_edge_time, that handles the
144 caching mechanism.
145
146 When caching, also update the cache entry. Compute both time and
147 size, since we always need both metrics eventually. */
148
149 sreal
150 do_estimate_edge_time (struct cgraph_edge *edge)
151 {
152 sreal time, nonspec_time;
153 int size;
154 ipa_hints hints;
155 struct cgraph_node *callee;
156 clause_t clause, nonspec_clause;
157 vec<tree> known_vals;
158 vec<ipa_polymorphic_call_context> known_contexts;
159 vec<ipa_agg_jump_function_p> known_aggs;
160 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
161 int min_size;
162
163 callee = edge->callee->ultimate_alias_target ();
164
165 gcc_checking_assert (edge->inline_failed);
166 evaluate_properties_for_edge (edge, true,
167 &clause, &nonspec_clause, &known_vals,
168 &known_contexts, &known_aggs);
169 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
170 known_contexts, known_aggs, &size, &min_size,
171 &time, &nonspec_time, &hints, es->param);
172
173 /* When we have profile feedback, we can quite safely identify hot
174 edges and for those we disable size limits. Don't do that when
175 probability that caller will call the callee is low however, since it
176 may hurt optimization of the caller's hot path. */
177 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
178 && (edge->count.ipa ().apply_scale (2, 1)
179 > (edge->caller->global.inlined_to
180 ? edge->caller->global.inlined_to->count.ipa ()
181 : edge->caller->count.ipa ())))
182 hints |= INLINE_HINT_known_hot;
183
184 known_vals.release ();
185 known_contexts.release ();
186 known_aggs.release ();
187 gcc_checking_assert (size >= 0);
188 gcc_checking_assert (time >= 0);
189
190 /* When caching, update the cache entry. */
191 if (edge_growth_cache.exists ())
192 {
193 ipa_fn_summaries->get (edge->callee)->min_size = min_size;
194 if ((int) edge_growth_cache.length () <= edge->uid)
195 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
196 edge_growth_cache[edge->uid].time = time;
197 edge_growth_cache[edge->uid].nonspec_time = nonspec_time;
198
199 edge_growth_cache[edge->uid].size = size + (size >= 0);
200 hints |= simple_edge_hints (edge);
201 edge_growth_cache[edge->uid].hints = hints + 1;
202 }
203 return time;
204 }
205
206
207 /* Return estimated callee growth after inlining EDGE.
208 Only to be called via estimate_edge_size. */
209
210 int
211 do_estimate_edge_size (struct cgraph_edge *edge)
212 {
213 int size;
214 struct cgraph_node *callee;
215 clause_t clause, nonspec_clause;
216 vec<tree> known_vals;
217 vec<ipa_polymorphic_call_context> known_contexts;
218 vec<ipa_agg_jump_function_p> known_aggs;
219
220 /* When we do caching, use do_estimate_edge_time to populate the entry. */
221
222 if (edge_growth_cache.exists ())
223 {
224 do_estimate_edge_time (edge);
225 size = edge_growth_cache[edge->uid].size;
226 gcc_checking_assert (size);
227 return size - (size > 0);
228 }
229
230 callee = edge->callee->ultimate_alias_target ();
231
232 /* Early inliner runs without caching, go ahead and do the dirty work. */
233 gcc_checking_assert (edge->inline_failed);
234 evaluate_properties_for_edge (edge, true,
235 &clause, &nonspec_clause,
236 &known_vals, &known_contexts,
237 &known_aggs);
238 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
239 known_contexts, known_aggs, &size, NULL, NULL,
240 NULL, NULL, vNULL);
241 known_vals.release ();
242 known_contexts.release ();
243 known_aggs.release ();
244 return size;
245 }
246
247
248 /* Estimate the growth of the caller when inlining EDGE.
249 Only to be called via estimate_edge_size. */
250
251 ipa_hints
252 do_estimate_edge_hints (struct cgraph_edge *edge)
253 {
254 ipa_hints hints;
255 struct cgraph_node *callee;
256 clause_t clause, nonspec_clause;
257 vec<tree> known_vals;
258 vec<ipa_polymorphic_call_context> known_contexts;
259 vec<ipa_agg_jump_function_p> known_aggs;
260
261 /* When we do caching, use do_estimate_edge_time to populate the entry. */
262
263 if (edge_growth_cache.exists ())
264 {
265 do_estimate_edge_time (edge);
266 hints = edge_growth_cache[edge->uid].hints;
267 gcc_checking_assert (hints);
268 return hints - 1;
269 }
270
271 callee = edge->callee->ultimate_alias_target ();
272
273 /* Early inliner runs without caching, go ahead and do the dirty work. */
274 gcc_checking_assert (edge->inline_failed);
275 evaluate_properties_for_edge (edge, true,
276 &clause, &nonspec_clause,
277 &known_vals, &known_contexts,
278 &known_aggs);
279 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
280 known_contexts, known_aggs, NULL, NULL,
281 NULL, NULL, &hints, vNULL);
282 known_vals.release ();
283 known_contexts.release ();
284 known_aggs.release ();
285 hints |= simple_edge_hints (edge);
286 return hints;
287 }
288
289 /* Estimate the size of NODE after inlining EDGE which should be an
290 edge to either NODE or a call inlined into NODE. */
291
292 int
293 estimate_size_after_inlining (struct cgraph_node *node,
294 struct cgraph_edge *edge)
295 {
296 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
297 if (!es->predicate || *es->predicate != false)
298 {
299 int size = ipa_fn_summaries->get (node)->size + estimate_edge_growth (edge);
300 gcc_assert (size >= 0);
301 return size;
302 }
303 return ipa_fn_summaries->get (node)->size;
304 }
305
306
307 struct growth_data
308 {
309 struct cgraph_node *node;
310 bool self_recursive;
311 bool uninlinable;
312 int growth;
313 };
314
315
316 /* Worker for do_estimate_growth. Collect growth for all callers. */
317
318 static bool
319 do_estimate_growth_1 (struct cgraph_node *node, void *data)
320 {
321 struct cgraph_edge *e;
322 struct growth_data *d = (struct growth_data *) data;
323
324 for (e = node->callers; e; e = e->next_caller)
325 {
326 gcc_checking_assert (e->inline_failed);
327
328 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
329 || !opt_for_fn (e->caller->decl, optimize))
330 {
331 d->uninlinable = true;
332 continue;
333 }
334
335 if (e->recursive_p ())
336 {
337 d->self_recursive = true;
338 continue;
339 }
340 d->growth += estimate_edge_growth (e);
341 }
342 return false;
343 }
344
345
346 /* Estimate the growth caused by inlining NODE into all callees. */
347
348 int
349 estimate_growth (struct cgraph_node *node)
350 {
351 struct growth_data d = { node, false, false, 0 };
352 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
353
354 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
355
356 /* For self recursive functions the growth estimation really should be
357 infinity. We don't want to return very large values because the growth
358 plays various roles in badness computation fractions. Be sure to not
359 return zero or negative growths. */
360 if (d.self_recursive)
361 d.growth = d.growth < info->size ? info->size : d.growth;
362 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
363 ;
364 else
365 {
366 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
367 d.growth -= info->size;
368 /* COMDAT functions are very often not shared across multiple units
369 since they come from various template instantiations.
370 Take this into account. */
371 else if (DECL_COMDAT (node->decl)
372 && node->can_remove_if_no_direct_calls_p ())
373 d.growth -= (info->size
374 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
375 + 50) / 100;
376 }
377
378 return d.growth;
379 }
380
381 /* Verify if there are fewer than MAX_CALLERS. */
382
383 static bool
384 check_callers (cgraph_node *node, int *max_callers)
385 {
386 ipa_ref *ref;
387
388 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
389 return true;
390
391 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
392 {
393 (*max_callers)--;
394 if (!*max_callers
395 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
396 return true;
397 }
398
399 FOR_EACH_ALIAS (node, ref)
400 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
401 return true;
402
403 return false;
404 }
405
406
407 /* Make cheap estimation if growth of NODE is likely positive knowing
408 EDGE_GROWTH of one particular edge.
409 We assume that most of other edges will have similar growth
410 and skip computation if there are too many callers. */
411
412 bool
413 growth_likely_positive (struct cgraph_node *node,
414 int edge_growth)
415 {
416 int max_callers;
417 struct cgraph_edge *e;
418 gcc_checking_assert (edge_growth > 0);
419
420 /* First quickly check if NODE is removable at all. */
421 if (DECL_EXTERNAL (node->decl))
422 return true;
423 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
424 || node->address_taken)
425 return true;
426
427 max_callers = ipa_fn_summaries->get (node)->size * 4 / edge_growth + 2;
428
429 for (e = node->callers; e; e = e->next_caller)
430 {
431 max_callers--;
432 if (!max_callers
433 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
434 return true;
435 }
436
437 ipa_ref *ref;
438 FOR_EACH_ALIAS (node, ref)
439 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
440 return true;
441
442 /* Unlike for functions called once, we play unsafe with
443 COMDATs. We can allow that since we know functions
444 in consideration are small (and thus risk is small) and
445 moreover grow estimates already accounts that COMDAT
446 functions may or may not disappear when eliminated from
447 current unit. With good probability making aggressive
448 choice in all units is going to make overall program
449 smaller. */
450 if (DECL_COMDAT (node->decl))
451 {
452 if (!node->can_remove_if_no_direct_calls_p ())
453 return true;
454 }
455 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
456 return true;
457
458 return estimate_growth (node) > 0;
459 }