]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline-analysis.c
* ipa-inline-analysis.c (cgraph_2edge_hook_list, cgraph_edge_hook_list,
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
1 /* Analysis used by inlining decision heuristics.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "alloc-pool.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "tree-streamer.h"
31 #include "cgraph.h"
32 #include "diagnostic.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "tree-inline.h"
36 #include "gimple-pretty-print.h"
37 #include "params.h"
38 #include "cfganal.h"
39 #include "gimple-iterator.h"
40 #include "tree-cfg.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-ssa-loop.h"
43 #include "symbol-summary.h"
44 #include "ipa-prop.h"
45 #include "ipa-fnsummary.h"
46 #include "ipa-inline.h"
47 #include "cfgloop.h"
48 #include "tree-scalar-evolution.h"
49 #include "ipa-utils.h"
50 #include "cilk.h"
51 #include "cfgexpand.h"
52 #include "gimplify.h"
53
54 /* Cached node/edge growths. */
55 vec<edge_growth_cache_entry> edge_growth_cache;
56 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
57
58
59 /* Give initial reasons why inlining would fail on EDGE. This gets either
60 nullified or usually overwritten by more precise reasons later. */
61
62 void
63 initialize_inline_failed (struct cgraph_edge *e)
64 {
65 struct cgraph_node *callee = e->callee;
66
67 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
68 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
69 ;
70 else if (e->indirect_unknown_callee)
71 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
72 else if (!callee->definition)
73 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
74 else if (callee->local.redefined_extern_inline)
75 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
76 else
77 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
78 gcc_checking_assert (!e->call_stmt_cannot_inline_p
79 || cgraph_inline_failed_type (e->inline_failed)
80 == CIF_FINAL_ERROR);
81 }
82
83
84 /* Keep edge cache consistent across edge removal. */
85
86 static void
87 inline_edge_removal_hook (struct cgraph_edge *edge,
88 void *data ATTRIBUTE_UNUSED)
89 {
90 reset_edge_growth_cache (edge);
91 }
92
93
94 /* Initialize growth caches. */
95
96 void
97 initialize_growth_caches (void)
98 {
99 if (!edge_removal_hook_holder)
100 edge_removal_hook_holder =
101 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
102 if (symtab->edges_max_uid)
103 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
104 }
105
106
107 /* Free growth caches. */
108
109 void
110 free_growth_caches (void)
111 {
112 if (edge_removal_hook_holder)
113 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
114 edge_growth_cache.release ();
115 }
116
117 /* Return hints derrived from EDGE. */
118
119 int
120 simple_edge_hints (struct cgraph_edge *edge)
121 {
122 int hints = 0;
123 struct cgraph_node *to = (edge->caller->global.inlined_to
124 ? edge->caller->global.inlined_to : edge->caller);
125 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
126 if (ipa_fn_summaries->get (to)->scc_no
127 && ipa_fn_summaries->get (to)->scc_no
128 == ipa_fn_summaries->get (callee)->scc_no
129 && !edge->recursive_p ())
130 hints |= INLINE_HINT_same_scc;
131
132 if (callee->lto_file_data && edge->caller->lto_file_data
133 && edge->caller->lto_file_data != callee->lto_file_data
134 && !callee->merged_comdat && !callee->icf_merged)
135 hints |= INLINE_HINT_cross_module;
136
137 return hints;
138 }
139
140 /* Estimate the time cost for the caller when inlining EDGE.
141 Only to be called via estimate_edge_time, that handles the
142 caching mechanism.
143
144 When caching, also update the cache entry. Compute both time and
145 size, since we always need both metrics eventually. */
146
147 sreal
148 do_estimate_edge_time (struct cgraph_edge *edge)
149 {
150 sreal time, nonspec_time;
151 int size;
152 ipa_hints hints;
153 struct cgraph_node *callee;
154 clause_t clause, nonspec_clause;
155 vec<tree> known_vals;
156 vec<ipa_polymorphic_call_context> known_contexts;
157 vec<ipa_agg_jump_function_p> known_aggs;
158 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
159 int min_size;
160
161 callee = edge->callee->ultimate_alias_target ();
162
163 gcc_checking_assert (edge->inline_failed);
164 evaluate_properties_for_edge (edge, true,
165 &clause, &nonspec_clause, &known_vals,
166 &known_contexts, &known_aggs);
167 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
168 known_contexts, known_aggs, &size, &min_size,
169 &time, &nonspec_time, &hints, es->param);
170
171 /* When we have profile feedback, we can quite safely identify hot
172 edges and for those we disable size limits. Don't do that when
173 probability that caller will call the callee is low however, since it
174 may hurt optimization of the caller's hot path. */
175 if (edge->count && edge->maybe_hot_p ()
176 && (edge->count * 2
177 > (edge->caller->global.inlined_to
178 ? edge->caller->global.inlined_to->count : edge->caller->count)))
179 hints |= INLINE_HINT_known_hot;
180
181 known_vals.release ();
182 known_contexts.release ();
183 known_aggs.release ();
184 gcc_checking_assert (size >= 0);
185 gcc_checking_assert (time >= 0);
186
187 /* When caching, update the cache entry. */
188 if (edge_growth_cache.exists ())
189 {
190 ipa_fn_summaries->get (edge->callee)->min_size = min_size;
191 if ((int) edge_growth_cache.length () <= edge->uid)
192 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
193 edge_growth_cache[edge->uid].time = time;
194 edge_growth_cache[edge->uid].nonspec_time = nonspec_time;
195
196 edge_growth_cache[edge->uid].size = size + (size >= 0);
197 hints |= simple_edge_hints (edge);
198 edge_growth_cache[edge->uid].hints = hints + 1;
199 }
200 return time;
201 }
202
203
204 /* Return estimated callee growth after inlining EDGE.
205 Only to be called via estimate_edge_size. */
206
207 int
208 do_estimate_edge_size (struct cgraph_edge *edge)
209 {
210 int size;
211 struct cgraph_node *callee;
212 clause_t clause, nonspec_clause;
213 vec<tree> known_vals;
214 vec<ipa_polymorphic_call_context> known_contexts;
215 vec<ipa_agg_jump_function_p> known_aggs;
216
217 /* When we do caching, use do_estimate_edge_time to populate the entry. */
218
219 if (edge_growth_cache.exists ())
220 {
221 do_estimate_edge_time (edge);
222 size = edge_growth_cache[edge->uid].size;
223 gcc_checking_assert (size);
224 return size - (size > 0);
225 }
226
227 callee = edge->callee->ultimate_alias_target ();
228
229 /* Early inliner runs without caching, go ahead and do the dirty work. */
230 gcc_checking_assert (edge->inline_failed);
231 evaluate_properties_for_edge (edge, true,
232 &clause, &nonspec_clause,
233 &known_vals, &known_contexts,
234 &known_aggs);
235 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
236 known_contexts, known_aggs, &size, NULL, NULL,
237 NULL, NULL, vNULL);
238 known_vals.release ();
239 known_contexts.release ();
240 known_aggs.release ();
241 return size;
242 }
243
244
245 /* Estimate the growth of the caller when inlining EDGE.
246 Only to be called via estimate_edge_size. */
247
248 ipa_hints
249 do_estimate_edge_hints (struct cgraph_edge *edge)
250 {
251 ipa_hints hints;
252 struct cgraph_node *callee;
253 clause_t clause, nonspec_clause;
254 vec<tree> known_vals;
255 vec<ipa_polymorphic_call_context> known_contexts;
256 vec<ipa_agg_jump_function_p> known_aggs;
257
258 /* When we do caching, use do_estimate_edge_time to populate the entry. */
259
260 if (edge_growth_cache.exists ())
261 {
262 do_estimate_edge_time (edge);
263 hints = edge_growth_cache[edge->uid].hints;
264 gcc_checking_assert (hints);
265 return hints - 1;
266 }
267
268 callee = edge->callee->ultimate_alias_target ();
269
270 /* Early inliner runs without caching, go ahead and do the dirty work. */
271 gcc_checking_assert (edge->inline_failed);
272 evaluate_properties_for_edge (edge, true,
273 &clause, &nonspec_clause,
274 &known_vals, &known_contexts,
275 &known_aggs);
276 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
277 known_contexts, known_aggs, NULL, NULL,
278 NULL, NULL, &hints, vNULL);
279 known_vals.release ();
280 known_contexts.release ();
281 known_aggs.release ();
282 hints |= simple_edge_hints (edge);
283 return hints;
284 }
285
286 /* Estimate the size of NODE after inlining EDGE which should be an
287 edge to either NODE or a call inlined into NODE. */
288
289 int
290 estimate_size_after_inlining (struct cgraph_node *node,
291 struct cgraph_edge *edge)
292 {
293 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
294 if (!es->predicate || *es->predicate != false)
295 {
296 int size = ipa_fn_summaries->get (node)->size + estimate_edge_growth (edge);
297 gcc_assert (size >= 0);
298 return size;
299 }
300 return ipa_fn_summaries->get (node)->size;
301 }
302
303
304 struct growth_data
305 {
306 struct cgraph_node *node;
307 bool self_recursive;
308 bool uninlinable;
309 int growth;
310 };
311
312
313 /* Worker for do_estimate_growth. Collect growth for all callers. */
314
315 static bool
316 do_estimate_growth_1 (struct cgraph_node *node, void *data)
317 {
318 struct cgraph_edge *e;
319 struct growth_data *d = (struct growth_data *) data;
320
321 for (e = node->callers; e; e = e->next_caller)
322 {
323 gcc_checking_assert (e->inline_failed);
324
325 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
326 {
327 d->uninlinable = true;
328 continue;
329 }
330
331 if (e->recursive_p ())
332 {
333 d->self_recursive = true;
334 continue;
335 }
336 d->growth += estimate_edge_growth (e);
337 }
338 return false;
339 }
340
341
342 /* Estimate the growth caused by inlining NODE into all callees. */
343
344 int
345 estimate_growth (struct cgraph_node *node)
346 {
347 struct growth_data d = { node, false, false, 0 };
348 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
349
350 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
351
352 /* For self recursive functions the growth estimation really should be
353 infinity. We don't want to return very large values because the growth
354 plays various roles in badness computation fractions. Be sure to not
355 return zero or negative growths. */
356 if (d.self_recursive)
357 d.growth = d.growth < info->size ? info->size : d.growth;
358 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
359 ;
360 else
361 {
362 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
363 d.growth -= info->size;
364 /* COMDAT functions are very often not shared across multiple units
365 since they come from various template instantiations.
366 Take this into account. */
367 else if (DECL_COMDAT (node->decl)
368 && node->can_remove_if_no_direct_calls_p ())
369 d.growth -= (info->size
370 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
371 + 50) / 100;
372 }
373
374 return d.growth;
375 }
376
377 /* Verify if there are fewer than MAX_CALLERS. */
378
379 static bool
380 check_callers (cgraph_node *node, int *max_callers)
381 {
382 ipa_ref *ref;
383
384 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
385 return true;
386
387 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
388 {
389 (*max_callers)--;
390 if (!*max_callers
391 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
392 return true;
393 }
394
395 FOR_EACH_ALIAS (node, ref)
396 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
397 return true;
398
399 return false;
400 }
401
402
403 /* Make cheap estimation if growth of NODE is likely positive knowing
404 EDGE_GROWTH of one particular edge.
405 We assume that most of other edges will have similar growth
406 and skip computation if there are too many callers. */
407
408 bool
409 growth_likely_positive (struct cgraph_node *node,
410 int edge_growth)
411 {
412 int max_callers;
413 struct cgraph_edge *e;
414 gcc_checking_assert (edge_growth > 0);
415
416 /* First quickly check if NODE is removable at all. */
417 if (DECL_EXTERNAL (node->decl))
418 return true;
419 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
420 || node->address_taken)
421 return true;
422
423 max_callers = ipa_fn_summaries->get (node)->size * 4 / edge_growth + 2;
424
425 for (e = node->callers; e; e = e->next_caller)
426 {
427 max_callers--;
428 if (!max_callers
429 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
430 return true;
431 }
432
433 ipa_ref *ref;
434 FOR_EACH_ALIAS (node, ref)
435 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
436 return true;
437
438 /* Unlike for functions called once, we play unsafe with
439 COMDATs. We can allow that since we know functions
440 in consideration are small (and thus risk is small) and
441 moreover grow estimates already accounts that COMDAT
442 functions may or may not disappear when eliminated from
443 current unit. With good probability making aggressive
444 choice in all units is going to make overall program
445 smaller. */
446 if (DECL_COMDAT (node->decl))
447 {
448 if (!node->can_remove_if_no_direct_calls_p ())
449 return true;
450 }
451 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
452 return true;
453
454 return estimate_growth (node) > 0;
455 }