]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline-analysis.c
Remove cgraph_local_info structure.
[thirdparty/gcc.git] / gcc / ipa-inline-analysis.c
1 /* Analysis used by inlining decision heuristics.
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "alloc-pool.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "tree-streamer.h"
31 #include "cgraph.h"
32 #include "diagnostic.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "tree-inline.h"
36 #include "gimple-pretty-print.h"
37 #include "params.h"
38 #include "cfganal.h"
39 #include "gimple-iterator.h"
40 #include "tree-cfg.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-ssa-loop.h"
43 #include "symbol-summary.h"
44 #include "ipa-prop.h"
45 #include "ipa-fnsummary.h"
46 #include "ipa-inline.h"
47 #include "cfgloop.h"
48 #include "tree-scalar-evolution.h"
49 #include "ipa-utils.h"
50 #include "cfgexpand.h"
51 #include "gimplify.h"
52
53 /* Cached node/edge growths. */
54 call_summary<edge_growth_cache_entry *> *edge_growth_cache = NULL;
55
56 /* Give initial reasons why inlining would fail on EDGE. This gets either
57 nullified or usually overwritten by more precise reasons later. */
58
59 void
60 initialize_inline_failed (struct cgraph_edge *e)
61 {
62 struct cgraph_node *callee = e->callee;
63
64 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
65 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
66 ;
67 else if (e->indirect_unknown_callee)
68 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
69 else if (!callee->definition)
70 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
71 else if (callee->redefined_extern_inline)
72 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
73 else
74 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
75 gcc_checking_assert (!e->call_stmt_cannot_inline_p
76 || cgraph_inline_failed_type (e->inline_failed)
77 == CIF_FINAL_ERROR);
78 }
79
80
81 /* Free growth caches. */
82
83 void
84 free_growth_caches (void)
85 {
86 delete edge_growth_cache;
87 edge_growth_cache = NULL;
88 }
89
90 /* Return hints derrived from EDGE. */
91
92 int
93 simple_edge_hints (struct cgraph_edge *edge)
94 {
95 int hints = 0;
96 struct cgraph_node *to = (edge->caller->inlined_to
97 ? edge->caller->inlined_to : edge->caller);
98 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
99 int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
100 int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
101
102 if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
103 hints |= INLINE_HINT_same_scc;
104
105 if (callee->lto_file_data && edge->caller->lto_file_data
106 && edge->caller->lto_file_data != callee->lto_file_data
107 && !callee->merged_comdat && !callee->icf_merged)
108 hints |= INLINE_HINT_cross_module;
109
110 return hints;
111 }
112
113 /* Estimate the time cost for the caller when inlining EDGE.
114 Only to be called via estimate_edge_time, that handles the
115 caching mechanism.
116
117 When caching, also update the cache entry. Compute both time and
118 size, since we always need both metrics eventually. */
119
120 sreal
121 do_estimate_edge_time (struct cgraph_edge *edge)
122 {
123 sreal time, nonspec_time;
124 int size;
125 ipa_hints hints;
126 struct cgraph_node *callee;
127 clause_t clause, nonspec_clause;
128 vec<tree> known_vals;
129 vec<ipa_polymorphic_call_context> known_contexts;
130 vec<ipa_agg_jump_function_p> known_aggs;
131 class ipa_call_summary *es = ipa_call_summaries->get (edge);
132 int min_size;
133
134 callee = edge->callee->ultimate_alias_target ();
135
136 gcc_checking_assert (edge->inline_failed);
137 evaluate_properties_for_edge (edge, true,
138 &clause, &nonspec_clause, &known_vals,
139 &known_contexts, &known_aggs);
140 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
141 known_contexts, known_aggs, &size, &min_size,
142 &time, &nonspec_time, &hints, es->param);
143
144 /* When we have profile feedback, we can quite safely identify hot
145 edges and for those we disable size limits. Don't do that when
146 probability that caller will call the callee is low however, since it
147 may hurt optimization of the caller's hot path. */
148 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
149 && (edge->count.ipa ().apply_scale (2, 1)
150 > (edge->caller->inlined_to
151 ? edge->caller->inlined_to->count.ipa ()
152 : edge->caller->count.ipa ())))
153 hints |= INLINE_HINT_known_hot;
154
155 known_vals.release ();
156 known_contexts.release ();
157 known_aggs.release ();
158 gcc_checking_assert (size >= 0);
159 gcc_checking_assert (time >= 0);
160
161 /* When caching, update the cache entry. */
162 if (edge_growth_cache != NULL)
163 {
164 ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
165 = min_size;
166 edge_growth_cache_entry *entry
167 = edge_growth_cache->get_create (edge);
168 entry->time = time;
169 entry->nonspec_time = nonspec_time;
170
171 entry->size = size + (size >= 0);
172 hints |= simple_edge_hints (edge);
173 entry->hints = hints + 1;
174 }
175 return time;
176 }
177
178
179 /* Return estimated callee growth after inlining EDGE.
180 Only to be called via estimate_edge_size. */
181
182 int
183 do_estimate_edge_size (struct cgraph_edge *edge)
184 {
185 int size;
186 struct cgraph_node *callee;
187 clause_t clause, nonspec_clause;
188 vec<tree> known_vals;
189 vec<ipa_polymorphic_call_context> known_contexts;
190 vec<ipa_agg_jump_function_p> known_aggs;
191
192 /* When we do caching, use do_estimate_edge_time to populate the entry. */
193
194 if (edge_growth_cache != NULL)
195 {
196 do_estimate_edge_time (edge);
197 size = edge_growth_cache->get (edge)->size;
198 gcc_checking_assert (size);
199 return size - (size > 0);
200 }
201
202 callee = edge->callee->ultimate_alias_target ();
203
204 /* Early inliner runs without caching, go ahead and do the dirty work. */
205 gcc_checking_assert (edge->inline_failed);
206 evaluate_properties_for_edge (edge, true,
207 &clause, &nonspec_clause,
208 &known_vals, &known_contexts,
209 &known_aggs);
210 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
211 known_contexts, known_aggs, &size, NULL, NULL,
212 NULL, NULL, vNULL);
213 known_vals.release ();
214 known_contexts.release ();
215 known_aggs.release ();
216 return size;
217 }
218
219
220 /* Estimate the growth of the caller when inlining EDGE.
221 Only to be called via estimate_edge_size. */
222
223 ipa_hints
224 do_estimate_edge_hints (struct cgraph_edge *edge)
225 {
226 ipa_hints hints;
227 struct cgraph_node *callee;
228 clause_t clause, nonspec_clause;
229 vec<tree> known_vals;
230 vec<ipa_polymorphic_call_context> known_contexts;
231 vec<ipa_agg_jump_function_p> known_aggs;
232
233 /* When we do caching, use do_estimate_edge_time to populate the entry. */
234
235 if (edge_growth_cache != NULL)
236 {
237 do_estimate_edge_time (edge);
238 hints = edge_growth_cache->get (edge)->hints;
239 gcc_checking_assert (hints);
240 return hints - 1;
241 }
242
243 callee = edge->callee->ultimate_alias_target ();
244
245 /* Early inliner runs without caching, go ahead and do the dirty work. */
246 gcc_checking_assert (edge->inline_failed);
247 evaluate_properties_for_edge (edge, true,
248 &clause, &nonspec_clause,
249 &known_vals, &known_contexts,
250 &known_aggs);
251 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
252 known_contexts, known_aggs, NULL, NULL,
253 NULL, NULL, &hints, vNULL);
254 known_vals.release ();
255 known_contexts.release ();
256 known_aggs.release ();
257 hints |= simple_edge_hints (edge);
258 return hints;
259 }
260
261 /* Estimate the size of NODE after inlining EDGE which should be an
262 edge to either NODE or a call inlined into NODE. */
263
264 int
265 estimate_size_after_inlining (struct cgraph_node *node,
266 struct cgraph_edge *edge)
267 {
268 class ipa_call_summary *es = ipa_call_summaries->get (edge);
269 ipa_size_summary *s = ipa_size_summaries->get (node);
270 if (!es->predicate || *es->predicate != false)
271 {
272 int size = s->size + estimate_edge_growth (edge);
273 gcc_assert (size >= 0);
274 return size;
275 }
276 return s->size;
277 }
278
279
280 struct growth_data
281 {
282 struct cgraph_node *node;
283 bool self_recursive;
284 bool uninlinable;
285 int growth;
286 };
287
288
289 /* Worker for do_estimate_growth. Collect growth for all callers. */
290
291 static bool
292 do_estimate_growth_1 (struct cgraph_node *node, void *data)
293 {
294 struct cgraph_edge *e;
295 struct growth_data *d = (struct growth_data *) data;
296
297 for (e = node->callers; e; e = e->next_caller)
298 {
299 gcc_checking_assert (e->inline_failed);
300
301 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
302 || !opt_for_fn (e->caller->decl, optimize))
303 {
304 d->uninlinable = true;
305 continue;
306 }
307
308 if (e->recursive_p ())
309 {
310 d->self_recursive = true;
311 continue;
312 }
313 d->growth += estimate_edge_growth (e);
314 }
315 return false;
316 }
317
318
319 /* Estimate the growth caused by inlining NODE into all callees. */
320
321 int
322 estimate_growth (struct cgraph_node *node)
323 {
324 struct growth_data d = { node, false, false, 0 };
325 class ipa_size_summary *info = ipa_size_summaries->get (node);
326
327 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
328
329 /* For self recursive functions the growth estimation really should be
330 infinity. We don't want to return very large values because the growth
331 plays various roles in badness computation fractions. Be sure to not
332 return zero or negative growths. */
333 if (d.self_recursive)
334 d.growth = d.growth < info->size ? info->size : d.growth;
335 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
336 ;
337 else
338 {
339 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
340 d.growth -= info->size;
341 /* COMDAT functions are very often not shared across multiple units
342 since they come from various template instantiations.
343 Take this into account. */
344 else if (DECL_COMDAT (node->decl)
345 && node->can_remove_if_no_direct_calls_p ())
346 d.growth -= (info->size
347 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
348 + 50) / 100;
349 }
350
351 return d.growth;
352 }
353
354 /* Verify if there are fewer than MAX_CALLERS. */
355
356 static bool
357 check_callers (cgraph_node *node, int *max_callers)
358 {
359 ipa_ref *ref;
360
361 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
362 return true;
363
364 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
365 {
366 (*max_callers)--;
367 if (!*max_callers
368 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
369 return true;
370 }
371
372 FOR_EACH_ALIAS (node, ref)
373 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
374 return true;
375
376 return false;
377 }
378
379
380 /* Make cheap estimation if growth of NODE is likely positive knowing
381 EDGE_GROWTH of one particular edge.
382 We assume that most of other edges will have similar growth
383 and skip computation if there are too many callers. */
384
385 bool
386 growth_likely_positive (struct cgraph_node *node,
387 int edge_growth)
388 {
389 int max_callers;
390 struct cgraph_edge *e;
391 gcc_checking_assert (edge_growth > 0);
392
393 /* First quickly check if NODE is removable at all. */
394 if (DECL_EXTERNAL (node->decl))
395 return true;
396 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
397 || node->address_taken)
398 return true;
399
400 max_callers = ipa_size_summaries->get (node)->size * 4 / edge_growth + 2;
401
402 for (e = node->callers; e; e = e->next_caller)
403 {
404 max_callers--;
405 if (!max_callers
406 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
407 return true;
408 }
409
410 ipa_ref *ref;
411 FOR_EACH_ALIAS (node, ref)
412 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
413 return true;
414
415 /* Unlike for functions called once, we play unsafe with
416 COMDATs. We can allow that since we know functions
417 in consideration are small (and thus risk is small) and
418 moreover grow estimates already accounts that COMDAT
419 functions may or may not disappear when eliminated from
420 current unit. With good probability making aggressive
421 choice in all units is going to make overall program
422 smaller. */
423 if (DECL_COMDAT (node->decl))
424 {
425 if (!node->can_remove_if_no_direct_calls_p ())
426 return true;
427 }
428 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
429 return true;
430
431 return estimate_growth (node) > 0;
432 }