]>
Commit | Line | Data |
---|---|---|
27d020cf | 1 | /* Analysis used by inlining decision heuristics. |
a945c346 | 2 | Copyright (C) 2003-2024 Free Software Foundation, Inc. |
03dfc36d JH |
3 | Contributed by Jan Hubicka |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 3, or (at your option) any later | |
10 | version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
03dfc36d JH |
21 | #include "config.h" |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
c7131fb2 | 24 | #include "backend.h" |
03dfc36d | 25 | #include "tree.h" |
c7131fb2 | 26 | #include "gimple.h" |
957060b5 AM |
27 | #include "alloc-pool.h" |
28 | #include "tree-pass.h" | |
27d020cf JH |
29 | #include "ssa.h" |
30 | #include "tree-streamer.h" | |
31 | #include "cgraph.h" | |
32 | #include "diagnostic.h" | |
33 | #include "fold-const.h" | |
34 | #include "print-tree.h" | |
35 | #include "tree-inline.h" | |
36 | #include "gimple-pretty-print.h" | |
27d020cf JH |
37 | #include "cfganal.h" |
38 | #include "gimple-iterator.h" | |
39 | #include "tree-cfg.h" | |
40 | #include "tree-ssa-loop-niter.h" | |
41 | #include "tree-ssa-loop.h" | |
42 | #include "symbol-summary.h" | |
c8742849 MJ |
43 | #include "sreal.h" |
44 | #include "ipa-cp.h" | |
27d020cf JH |
45 | #include "ipa-prop.h" |
46 | #include "ipa-fnsummary.h" | |
47 | #include "ipa-inline.h" | |
48 | #include "cfgloop.h" | |
49 | #include "tree-scalar-evolution.h" | |
50 | #include "ipa-utils.h" | |
27d020cf JH |
51 | #include "cfgexpand.h" |
52 | #include "gimplify.h" | |
1b9a5cc9 | 53 | #include "attribs.h" |
27a4cd48 | 54 | |
27d020cf | 55 | /* Cached node/edge growths. */ |
7237f93e | 56 | fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL; |
03dfc36d | 57 | |
ac6f2e59 JH |
58 | /* The context cache remembers estimated time/size and hints for given |
59 | ipa_call_context of a call. */ | |
60 | class node_context_cache_entry | |
61 | { | |
62 | public: | |
7d2cb275 | 63 | ipa_cached_call_context ctx; |
ac6f2e59 JH |
64 | sreal time, nonspec_time; |
65 | int size; | |
66 | ipa_hints hints; | |
67 | ||
68 | node_context_cache_entry () | |
69 | : ctx () | |
70 | { | |
71 | } | |
72 | ~node_context_cache_entry () | |
73 | { | |
74 | ctx.release (); | |
75 | } | |
76 | }; | |
77 | ||
78 | /* At the moment we implement primitive single entry LRU cache. */ | |
79 | class node_context_summary | |
80 | { | |
81 | public: | |
82 | node_context_cache_entry entry; | |
83 | ||
84 | node_context_summary () | |
85 | : entry () | |
86 | { | |
87 | } | |
88 | ~node_context_summary () | |
89 | { | |
90 | } | |
91 | }; | |
92 | ||
93 | /* Summary holding the context cache. */ | |
94 | static fast_function_summary <node_context_summary *, va_heap> | |
95 | *node_context_cache = NULL; | |
96 | /* Statistics about the context cache effectivity. */ | |
97 | static long node_context_cache_hit, node_context_cache_miss, | |
98 | node_context_cache_clear; | |
99 | ||
27d020cf JH |
100 | /* Give initial reasons why inlining would fail on EDGE. This gets either |
101 | nullified or usually overwritten by more precise reasons later. */ | |
d2d668fb | 102 | |
27d020cf JH |
103 | void |
104 | initialize_inline_failed (struct cgraph_edge *e) | |
d2d668fb | 105 | { |
27d020cf | 106 | struct cgraph_node *callee = e->callee; |
d2d668fb | 107 | |
27d020cf JH |
108 | if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE |
109 | && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) | |
110 | ; | |
111 | else if (e->indirect_unknown_callee) | |
112 | e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL; | |
113 | else if (!callee->definition) | |
114 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
87f94429 | 115 | else if (callee->redefined_extern_inline) |
27d020cf | 116 | e->inline_failed = CIF_REDEFINED_EXTERN_INLINE; |
4adaad64 | 117 | else |
27d020cf JH |
118 | e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED; |
119 | gcc_checking_assert (!e->call_stmt_cannot_inline_p | |
120 | || cgraph_inline_failed_type (e->inline_failed) | |
121 | == CIF_FINAL_ERROR); | |
632b4f8e JH |
122 | } |
123 | ||
ac6f2e59 JH |
124 | /* Allocate edge growth caches. */ |
125 | ||
126 | void | |
127 | initialize_growth_caches () | |
128 | { | |
129 | edge_growth_cache | |
7237f93e | 130 | = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab); |
ac6f2e59 JH |
131 | node_context_cache |
132 | = new fast_function_summary<node_context_summary *, va_heap> (symtab); | |
40e67ab8 JH |
133 | edge_growth_cache->disable_duplication_hook (); |
134 | node_context_cache->disable_insertion_hook (); | |
135 | node_context_cache->disable_duplication_hook (); | |
ac6f2e59 | 136 | } |
632b4f8e | 137 | |
27d020cf | 138 | /* Free growth caches. */ |
5ee53a06 | 139 | |
c170d40f | 140 | void |
27d020cf | 141 | free_growth_caches (void) |
c170d40f | 142 | { |
9fb50ad8 | 143 | delete edge_growth_cache; |
ac6f2e59 | 144 | delete node_context_cache; |
9fb50ad8 | 145 | edge_growth_cache = NULL; |
ac6f2e59 JH |
146 | node_context_cache = NULL; |
147 | if (dump_file) | |
148 | fprintf (dump_file, "node context cache: %li hits, %li misses," | |
149 | " %li initializations\n", | |
150 | node_context_cache_hit, node_context_cache_miss, | |
151 | node_context_cache_clear); | |
152 | node_context_cache_hit = 0; | |
153 | node_context_cache_miss = 0; | |
154 | node_context_cache_clear = 0; | |
632b4f8e JH |
155 | } |
156 | ||
956d615d | 157 | /* Return hints derived from EDGE. */ |
27d020cf | 158 | |
d59171da JH |
159 | int |
160 | simple_edge_hints (struct cgraph_edge *edge) | |
161 | { | |
162 | int hints = 0; | |
a62bfab5 ML |
163 | struct cgraph_node *to = (edge->caller->inlined_to |
164 | ? edge->caller->inlined_to : edge->caller); | |
ebc8f0bb | 165 | struct cgraph_node *callee = edge->callee->ultimate_alias_target (); |
6f86434f ML |
166 | int to_scc_no = ipa_fn_summaries->get (to)->scc_no; |
167 | int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no; | |
168 | ||
169 | if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ()) | |
d59171da JH |
170 | hints |= INLINE_HINT_same_scc; |
171 | ||
b74d8dc4 | 172 | if (cross_module_call_p (edge)) |
d59171da JH |
173 | hints |= INLINE_HINT_cross_module; |
174 | ||
175 | return hints; | |
176 | } | |
177 | ||
632b4f8e JH |
178 | /* Estimate the time cost for the caller when inlining EDGE. |
179 | Only to be called via estimate_edge_time, that handles the | |
180 | caching mechanism. | |
181 | ||
182 | When caching, also update the cache entry. Compute both time and | |
183 | size, since we always need both metrics eventually. */ | |
184 | ||
ab38481c | 185 | sreal |
23ff8c05 | 186 | do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time) |
632b4f8e | 187 | { |
4adaad64 | 188 | sreal time, nonspec_time; |
632b4f8e | 189 | int size; |
0bceb671 | 190 | ipa_hints hints; |
d2d668fb | 191 | struct cgraph_node *callee; |
4adaad64 | 192 | clause_t clause, nonspec_clause; |
9d5af1db | 193 | ipa_auto_call_arg_values avals; |
99b1c316 | 194 | class ipa_call_summary *es = ipa_call_summaries->get (edge); |
ac6f2e59 | 195 | int min_size = -1; |
632b4f8e | 196 | |
d52f5295 | 197 | callee = edge->callee->ultimate_alias_target (); |
d2d668fb | 198 | |
632b4f8e | 199 | gcc_checking_assert (edge->inline_failed); |
9d5af1db MJ |
200 | evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause, |
201 | &avals, true); | |
202 | ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals); | |
ac6f2e59 JH |
203 | if (node_context_cache != NULL) |
204 | { | |
205 | node_context_summary *e = node_context_cache->get_create (callee); | |
206 | if (e->entry.ctx.equal_to (ctx)) | |
207 | { | |
208 | node_context_cache_hit++; | |
209 | size = e->entry.size; | |
210 | time = e->entry.time; | |
211 | nonspec_time = e->entry.nonspec_time; | |
212 | hints = e->entry.hints; | |
367c959f | 213 | if (flag_checking |
a63574d7 | 214 | && !opt_for_fn (callee->decl, flag_profile_partial_training) |
8d890d37 | 215 | && !callee->count.ipa_p ()) |
b914768c | 216 | { |
1e7fdc02 MJ |
217 | ipa_call_estimates chk_estimates; |
218 | ctx.estimate_size_and_time (&chk_estimates); | |
219 | gcc_assert (chk_estimates.size == size | |
220 | && chk_estimates.time == time | |
221 | && chk_estimates.nonspecialized_time == nonspec_time | |
222 | && chk_estimates.hints == hints); | |
b914768c | 223 | } |
ac6f2e59 JH |
224 | } |
225 | else | |
226 | { | |
227 | if (e->entry.ctx.exists_p ()) | |
228 | node_context_cache_miss++; | |
229 | else | |
230 | node_context_cache_clear++; | |
7d2cb275 | 231 | e->entry.ctx.release (); |
1e7fdc02 MJ |
232 | ipa_call_estimates estimates; |
233 | ctx.estimate_size_and_time (&estimates); | |
234 | size = estimates.size; | |
ac6f2e59 | 235 | e->entry.size = size; |
1e7fdc02 | 236 | time = estimates.time; |
ac6f2e59 | 237 | e->entry.time = time; |
1e7fdc02 | 238 | nonspec_time = estimates.nonspecialized_time; |
ac6f2e59 | 239 | e->entry.nonspec_time = nonspec_time; |
1e7fdc02 | 240 | hints = estimates.hints; |
ac6f2e59 JH |
241 | e->entry.hints = hints; |
242 | e->entry.ctx.duplicate_from (ctx); | |
243 | } | |
244 | } | |
245 | else | |
1e7fdc02 MJ |
246 | { |
247 | ipa_call_estimates estimates; | |
248 | ctx.estimate_size_and_time (&estimates); | |
249 | size = estimates.size; | |
250 | time = estimates.time; | |
251 | nonspec_time = estimates.nonspecialized_time; | |
252 | hints = estimates.hints; | |
253 | } | |
b6d627e4 | 254 | |
1b9a5cc9 CL |
255 | /* When we have profile feedback or function attribute, we can quite safely |
256 | identify hot edges and for those we disable size limits. Don't do that | |
257 | when probability that caller will call the callee is low however, since it | |
b6d627e4 | 258 | may hurt optimization of the caller's hot path. */ |
1b9a5cc9 | 259 | if ((edge->count.ipa ().initialized_p () && edge->maybe_hot_p () |
9f55aee9 | 260 | && (edge->count.ipa () * 2 |
a62bfab5 ML |
261 | > (edge->caller->inlined_to |
262 | ? edge->caller->inlined_to->count.ipa () | |
1bad9c18 | 263 | : edge->caller->count.ipa ()))) |
1b9a5cc9 CL |
264 | || (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl)) |
265 | != NULL | |
266 | && lookup_attribute ("hot", DECL_ATTRIBUTES (edge->callee->decl)) | |
267 | != NULL)) | |
b6d627e4 JH |
268 | hints |= INLINE_HINT_known_hot; |
269 | ||
d59171da JH |
270 | gcc_checking_assert (size >= 0); |
271 | gcc_checking_assert (time >= 0); | |
632b4f8e JH |
272 | |
273 | /* When caching, update the cache entry. */ | |
9fb50ad8 | 274 | if (edge_growth_cache != NULL) |
632b4f8e | 275 | { |
ac6f2e59 JH |
276 | if (min_size >= 0) |
277 | ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size | |
278 | = min_size; | |
9fb50ad8 ML |
279 | edge_growth_cache_entry *entry |
280 | = edge_growth_cache->get_create (edge); | |
281 | entry->time = time; | |
282 | entry->nonspec_time = nonspec_time; | |
632b4f8e | 283 | |
9fb50ad8 | 284 | entry->size = size + (size >= 0); |
d59171da | 285 | hints |= simple_edge_hints (edge); |
9fb50ad8 | 286 | entry->hints = hints + 1; |
632b4f8e | 287 | } |
23ff8c05 JH |
288 | if (ret_nonspec_time) |
289 | *ret_nonspec_time = nonspec_time; | |
d59171da | 290 | return time; |
632b4f8e JH |
291 | } |
292 | ||
ac6f2e59 JH |
293 | /* Reset cache for NODE. |
294 | This must be done each time NODE body is modified. */ | |
295 | void | |
296 | reset_node_cache (struct cgraph_node *node) | |
297 | { | |
298 | if (node_context_cache) | |
299 | node_context_cache->remove (node); | |
300 | } | |
632b4f8e | 301 | |
7237f93e JH |
302 | /* Remove EDGE from caches once it was inlined. */ |
303 | void | |
304 | ipa_remove_from_growth_caches (struct cgraph_edge *edge) | |
305 | { | |
306 | if (node_context_cache) | |
307 | node_context_cache->remove (edge->callee); | |
308 | if (edge_growth_cache) | |
309 | edge_growth_cache->remove (edge); | |
310 | } | |
311 | ||
ed901e4c | 312 | /* Return estimated callee growth after inlining EDGE. |
632b4f8e JH |
313 | Only to be called via estimate_edge_size. */ |
314 | ||
315 | int | |
ed901e4c | 316 | do_estimate_edge_size (struct cgraph_edge *edge) |
632b4f8e JH |
317 | { |
318 | int size; | |
a5b1779f | 319 | struct cgraph_node *callee; |
4adaad64 | 320 | clause_t clause, nonspec_clause; |
632b4f8e JH |
321 | |
322 | /* When we do caching, use do_estimate_edge_time to populate the entry. */ | |
323 | ||
9fb50ad8 | 324 | if (edge_growth_cache != NULL) |
632b4f8e JH |
325 | { |
326 | do_estimate_edge_time (edge); | |
9fb50ad8 | 327 | size = edge_growth_cache->get (edge)->size; |
632b4f8e JH |
328 | gcc_checking_assert (size); |
329 | return size - (size > 0); | |
330 | } | |
d2d668fb | 331 | |
d52f5295 | 332 | callee = edge->callee->ultimate_alias_target (); |
632b4f8e JH |
333 | |
334 | /* Early inliner runs without caching, go ahead and do the dirty work. */ | |
335 | gcc_checking_assert (edge->inline_failed); | |
9d5af1db MJ |
336 | ipa_auto_call_arg_values avals; |
337 | evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause, | |
338 | &avals, true); | |
339 | ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals); | |
1e7fdc02 MJ |
340 | ipa_call_estimates estimates; |
341 | ctx.estimate_size_and_time (&estimates, false, false); | |
342 | return estimates.size; | |
03dfc36d JH |
343 | } |
344 | ||
345 | ||
37678631 JH |
346 | /* Estimate the growth of the caller when inlining EDGE. |
347 | Only to be called via estimate_edge_size. */ | |
348 | ||
0bceb671 | 349 | ipa_hints |
37678631 JH |
350 | do_estimate_edge_hints (struct cgraph_edge *edge) |
351 | { | |
37678631 | 352 | struct cgraph_node *callee; |
4adaad64 | 353 | clause_t clause, nonspec_clause; |
37678631 JH |
354 | |
355 | /* When we do caching, use do_estimate_edge_time to populate the entry. */ | |
356 | ||
9fb50ad8 | 357 | if (edge_growth_cache != NULL) |
37678631 JH |
358 | { |
359 | do_estimate_edge_time (edge); | |
1e7fdc02 | 360 | ipa_hints hints = edge_growth_cache->get (edge)->hints; |
37678631 JH |
361 | gcc_checking_assert (hints); |
362 | return hints - 1; | |
363 | } | |
364 | ||
d52f5295 | 365 | callee = edge->callee->ultimate_alias_target (); |
37678631 JH |
366 | |
367 | /* Early inliner runs without caching, go ahead and do the dirty work. */ | |
368 | gcc_checking_assert (edge->inline_failed); | |
9d5af1db MJ |
369 | ipa_auto_call_arg_values avals; |
370 | evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause, | |
371 | &avals, true); | |
372 | ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals); | |
1e7fdc02 MJ |
373 | ipa_call_estimates estimates; |
374 | ctx.estimate_size_and_time (&estimates, false, true); | |
375 | ipa_hints hints = estimates.hints | simple_edge_hints (edge); | |
37678631 JH |
376 | return hints; |
377 | } | |
378 | ||
03dfc36d JH |
379 | /* Estimate the size of NODE after inlining EDGE which should be an |
380 | edge to either NODE or a call inlined into NODE. */ | |
381 | ||
382 | int | |
383 | estimate_size_after_inlining (struct cgraph_node *node, | |
10a5dd5d | 384 | struct cgraph_edge *edge) |
03dfc36d | 385 | { |
99b1c316 | 386 | class ipa_call_summary *es = ipa_call_summaries->get (edge); |
f658ad30 | 387 | ipa_size_summary *s = ipa_size_summaries->get (node); |
dbcb3c74 | 388 | if (!es->predicate || *es->predicate != false) |
b15c64ee | 389 | { |
99353fcf | 390 | int size = s->size + estimate_edge_growth (edge); |
b15c64ee JH |
391 | gcc_assert (size >= 0); |
392 | return size; | |
393 | } | |
99353fcf | 394 | return s->size; |
03dfc36d JH |
395 | } |
396 | ||
397 | ||
a5b1779f JH |
398 | struct growth_data |
399 | { | |
a93c18c8 | 400 | struct cgraph_node *node; |
a5b1779f | 401 | bool self_recursive; |
cf3648f2 | 402 | bool uninlinable; |
a5b1779f | 403 | int growth; |
49d9c9d2 | 404 | int cap; |
a5b1779f | 405 | }; |
03dfc36d | 406 | |
a5b1779f JH |
407 | |
408 | /* Worker for do_estimate_growth. Collect growth for all callers. */ | |
409 | ||
410 | static bool | |
411 | do_estimate_growth_1 (struct cgraph_node *node, void *data) | |
03dfc36d | 412 | { |
03dfc36d | 413 | struct cgraph_edge *e; |
a5b1779f | 414 | struct growth_data *d = (struct growth_data *) data; |
03dfc36d | 415 | |
03dfc36d JH |
416 | for (e = node->callers; e; e = e->next_caller) |
417 | { | |
4c0f7679 JH |
418 | gcc_checking_assert (e->inline_failed); |
419 | ||
29f1e2b1 JH |
420 | if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR |
421 | || !opt_for_fn (e->caller->decl, optimize)) | |
cf3648f2 JH |
422 | { |
423 | d->uninlinable = true; | |
49d9c9d2 JH |
424 | if (d->cap < INT_MAX) |
425 | return true; | |
cf3648f2 JH |
426 | continue; |
427 | } | |
428 | ||
1af8bfe5 JH |
429 | if (e->recursive_p ()) |
430 | { | |
431 | d->self_recursive = true; | |
49d9c9d2 JH |
432 | if (d->cap < INT_MAX) |
433 | return true; | |
1af8bfe5 JH |
434 | continue; |
435 | } | |
a5b1779f | 436 | d->growth += estimate_edge_growth (e); |
49d9c9d2 JH |
437 | if (d->growth > d->cap) |
438 | return true; | |
4c0f7679 | 439 | } |
a5b1779f JH |
440 | return false; |
441 | } | |
442 | ||
49d9c9d2 JH |
443 | /* Return estimated savings for eliminating offline copy of NODE by inlining |
444 | it everywhere. */ | |
445 | ||
446 | static int | |
447 | offline_size (struct cgraph_node *node, ipa_size_summary *info) | |
448 | { | |
449 | if (!DECL_EXTERNAL (node->decl)) | |
450 | { | |
451 | if (node->will_be_removed_from_program_if_no_direct_calls_p ()) | |
452 | return info->size; | |
453 | /* COMDAT functions are very often not shared across multiple units | |
454 | since they come from various template instantiations. | |
455 | Take this into account. */ | |
456 | else if (DECL_COMDAT (node->decl) | |
457 | && node->can_remove_if_no_direct_calls_p ()) | |
fdfd7f53 ML |
458 | { |
459 | int prob = opt_for_fn (node->decl, param_comdat_sharing_probability); | |
7e2b7e23 | 460 | return (info->size * (100 - prob) + 50) / 100; |
fdfd7f53 | 461 | } |
49d9c9d2 JH |
462 | } |
463 | return 0; | |
464 | } | |
a5b1779f | 465 | |
56eb4c70 | 466 | /* Estimate the growth caused by inlining NODE into all callers. */ |
a5b1779f JH |
467 | |
468 | int | |
0d92b555 | 469 | estimate_growth (struct cgraph_node *node) |
a5b1779f | 470 | { |
49d9c9d2 JH |
471 | struct growth_data d = { node, false, false, 0, INT_MAX }; |
472 | ipa_size_summary *info = ipa_size_summaries->get (node); | |
a5b1779f | 473 | |
49d9c9d2 JH |
474 | if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true)) |
475 | return 1; | |
4c0f7679 JH |
476 | |
477 | /* For self recursive functions the growth estimation really should be | |
478 | infinity. We don't want to return very large values because the growth | |
479 | plays various roles in badness computation fractions. Be sure to not | |
480 | return zero or negative growths. */ | |
a5b1779f JH |
481 | if (d.self_recursive) |
482 | d.growth = d.growth < info->size ? info->size : d.growth; | |
49d9c9d2 JH |
483 | else if (!d.uninlinable) |
484 | d.growth -= offline_size (node, info); | |
03dfc36d | 485 | |
a5b1779f | 486 | return d.growth; |
03dfc36d JH |
487 | } |
488 | ||
cf3648f2 JH |
489 | /* Verify if there are fewer than MAX_CALLERS. */ |
490 | ||
491 | static bool | |
49d9c9d2 JH |
492 | check_callers (cgraph_node *node, int *growth, int *n, int offline, |
493 | int min_size, struct cgraph_edge *known_edge) | |
cf3648f2 JH |
494 | { |
495 | ipa_ref *ref; | |
496 | ||
e0d514da JH |
497 | if (!node->can_remove_if_no_direct_calls_and_refs_p ()) |
498 | return true; | |
499 | ||
cf3648f2 JH |
500 | for (cgraph_edge *e = node->callers; e; e = e->next_caller) |
501 | { | |
49d9c9d2 JH |
502 | edge_growth_cache_entry *entry; |
503 | ||
504 | if (e == known_edge) | |
505 | continue; | |
506 | if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) | |
507 | return true; | |
508 | if (edge_growth_cache != NULL | |
509 | && (entry = edge_growth_cache->get (e)) != NULL | |
510 | && entry->size != 0) | |
511 | *growth += entry->size - (entry->size > 0); | |
512 | else | |
513 | { | |
514 | class ipa_call_summary *es = ipa_call_summaries->get (e); | |
515 | if (!es) | |
516 | return true; | |
517 | *growth += min_size - es->call_stmt_size; | |
518 | if (--(*n) < 0) | |
519 | return false; | |
520 | } | |
521 | if (*growth > offline) | |
cf3648f2 JH |
522 | return true; |
523 | } | |
524 | ||
49d9c9d2 JH |
525 | if (*n > 0) |
526 | FOR_EACH_ALIAS (node, ref) | |
527 | if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n, | |
528 | offline, min_size, known_edge)) | |
529 | return true; | |
cf3648f2 JH |
530 | |
531 | return false; | |
532 | } | |
533 | ||
10a5dd5d | 534 | |
49d9c9d2 JH |
535 | /* Decide if growth of NODE is positive. This is cheaper than calculating |
536 | actual growth. If edge growth of KNOWN_EDGE is known | |
537 | it is passed by EDGE_GROWTH. */ | |
4cd8957f JH |
538 | |
539 | bool | |
49d9c9d2 JH |
540 | growth_positive_p (struct cgraph_node *node, |
541 | struct cgraph_edge * known_edge, int edge_growth) | |
4cd8957f | 542 | { |
4cd8957f | 543 | struct cgraph_edge *e; |
49d9c9d2 JH |
544 | |
545 | ipa_size_summary *s = ipa_size_summaries->get (node); | |
4cd8957f | 546 | |
e0d514da | 547 | /* First quickly check if NODE is removable at all. */ |
49d9c9d2 JH |
548 | int offline = offline_size (node, s); |
549 | if (offline <= 0 && known_edge && edge_growth > 0) | |
4cd8957f | 550 | return true; |
e0d514da | 551 | |
49d9c9d2 JH |
552 | int min_size = ipa_fn_summaries->get (node)->min_size; |
553 | int n = 10; | |
4cd8957f | 554 | |
49d9c9d2 | 555 | int min_growth = known_edge ? edge_growth : 0; |
4cd8957f JH |
556 | for (e = node->callers; e; e = e->next_caller) |
557 | { | |
49d9c9d2 JH |
558 | edge_growth_cache_entry *entry; |
559 | ||
560 | if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) | |
561 | return true; | |
562 | if (e == known_edge) | |
563 | continue; | |
564 | if (edge_growth_cache != NULL | |
565 | && (entry = edge_growth_cache->get (e)) != NULL | |
566 | && entry->size != 0) | |
567 | min_growth += entry->size - (entry->size > 0); | |
568 | else | |
569 | { | |
570 | class ipa_call_summary *es = ipa_call_summaries->get (e); | |
571 | if (!es) | |
572 | return true; | |
573 | min_growth += min_size - es->call_stmt_size; | |
574 | if (--n <= 0) | |
575 | break; | |
576 | } | |
577 | if (min_growth > offline) | |
4cd8957f JH |
578 | return true; |
579 | } | |
cf3648f2 JH |
580 | |
581 | ipa_ref *ref; | |
49d9c9d2 JH |
582 | if (n > 0) |
583 | FOR_EACH_ALIAS (node, ref) | |
584 | if (check_callers (dyn_cast <cgraph_node *> (ref->referring), | |
585 | &min_growth, &n, offline, min_size, known_edge)) | |
e0d514da | 586 | return true; |
e0d514da | 587 | |
49d9c9d2 JH |
588 | struct growth_data d = { node, false, false, 0, offline }; |
589 | if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true)) | |
590 | return true; | |
591 | if (d.self_recursive || d.uninlinable) | |
592 | return true; | |
593 | return (d.growth > offline); | |
4cd8957f | 594 | } |