]>
Commit | Line | Data |
---|---|---|
ca31b95f | 1 | /* Inlining decision heuristics. |
cbe34bb5 | 2 | Copyright (C) 2003-2017 Free Software Foundation, Inc. |
ca31b95f JH |
3 | Contributed by Jan Hubicka |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 9 | Software Foundation; either version 3, or (at your option) any later |
ca31b95f JH |
10 | version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
ca31b95f JH |
20 | |
21 | /* Inlining decision heuristics | |
22 | ||
4c0f7679 | 23 | The implementation of inliner is organized as follows: |
ca31b95f | 24 | |
ca31b95f JH |
25 | inlining heuristics limits |
26 | ||
4c0f7679 JH |
27 | can_inline_edge_p allow to check that particular inlining is allowed |
28 | by the limits specified by user (allowed function growth, growth and so | |
29 | on). | |
30 | ||
31 | Functions are inlined when it is obvious the result is profitable (such | |
32 | as functions called once or when inlining reduce code size). | |
33 | In addition to that we perform inlining of small functions and recursive | |
34 | inlining. | |
ca31b95f JH |
35 | |
36 | inlining heuristics | |
37 | ||
4c0f7679 JH |
38 | The inliner itself is split into two passes: |
39 | ||
40 | pass_early_inlining | |
ca31b95f | 41 | |
4c0f7679 JH |
42 | Simple local inlining pass inlining callees into current function. |
43 | This pass makes no use of whole unit analysis and thus it can do only | |
44 | very simple decisions based on local properties. | |
ca31b95f | 45 | |
4c0f7679 JH |
46 | The strength of the pass is that it is run in topological order |
47 | (reverse postorder) on the callgraph. Functions are converted into SSA | |
48 | form just before this pass and optimized subsequently. As a result, the | |
49 | callees of the function seen by the early inliner was already optimized | |
09a2806f | 50 | and results of early inlining adds a lot of optimization opportunities |
4c0f7679 | 51 | for the local optimization. |
ca31b95f | 52 | |
09a2806f | 53 | The pass handle the obvious inlining decisions within the compilation |
4c0f7679 JH |
54 | unit - inlining auto inline functions, inlining for size and |
55 | flattening. | |
ca31b95f | 56 | |
4c0f7679 JH |
57 | main strength of the pass is the ability to eliminate abstraction |
58 | penalty in C++ code (via combination of inlining and early | |
59 | optimization) and thus improve quality of analysis done by real IPA | |
60 | optimizers. | |
873aa8f5 | 61 | |
4c0f7679 JH |
62 | Because of lack of whole unit knowledge, the pass can not really make |
63 | good code size/performance tradeoffs. It however does very simple | |
64 | speculative inlining allowing code size to grow by | |
09a2806f JH |
65 | EARLY_INLINING_INSNS when callee is leaf function. In this case the |
66 | optimizations performed later are very likely to eliminate the cost. | |
873aa8f5 | 67 | |
4c0f7679 | 68 | pass_ipa_inline |
873aa8f5 | 69 | |
4c0f7679 JH |
70 | This is the real inliner able to handle inlining with whole program |
71 | knowledge. It performs following steps: | |
873aa8f5 | 72 | |
4c0f7679 JH |
73 | 1) inlining of small functions. This is implemented by greedy |
74 | algorithm ordering all inlinable cgraph edges by their badness and | |
75 | inlining them in this order as long as inline limits allows doing so. | |
873aa8f5 | 76 | |
4c0f7679 JH |
77 | This heuristics is not very good on inlining recursive calls. Recursive |
78 | calls can be inlined with results similar to loop unrolling. To do so, | |
79 | special purpose recursive inliner is executed on function when | |
80 | recursive edge is met as viable candidate. | |
873aa8f5 | 81 | |
4c0f7679 JH |
82 | 2) Unreachable functions are removed from callgraph. Inlining leads |
83 | to devirtualization and other modification of callgraph so functions | |
84 | may become unreachable during the process. Also functions declared as | |
85 | extern inline or virtual functions are removed, since after inlining | |
86 | we no longer need the offline bodies. | |
87 | ||
88 | 3) Functions called once and not exported from the unit are inlined. | |
89 | This should almost always lead to reduction of code size by eliminating | |
90 | the need for offline copy of the function. */ | |
ca31b95f JH |
91 | |
92 | #include "config.h" | |
93 | #include "system.h" | |
94 | #include "coretypes.h" | |
c7131fb2 | 95 | #include "backend.h" |
957060b5 AM |
96 | #include "target.h" |
97 | #include "rtl.h" | |
ca31b95f | 98 | #include "tree.h" |
c7131fb2 | 99 | #include "gimple.h" |
957060b5 AM |
100 | #include "alloc-pool.h" |
101 | #include "tree-pass.h" | |
102 | #include "gimple-ssa.h" | |
103 | #include "cgraph.h" | |
957060b5 | 104 | #include "lto-streamer.h" |
d8a2d370 DN |
105 | #include "trans-mem.h" |
106 | #include "calls.h" | |
ca31b95f | 107 | #include "tree-inline.h" |
ca31b95f | 108 | #include "params.h" |
59f2e9d8 | 109 | #include "profile.h" |
dd912cb8 | 110 | #include "symbol-summary.h" |
8bc5448f | 111 | #include "tree-vrp.h" |
3e293154 | 112 | #include "ipa-prop.h" |
27d020cf | 113 | #include "ipa-fnsummary.h" |
03dfc36d | 114 | #include "ipa-inline.h" |
af8bca3c | 115 | #include "ipa-utils.h" |
1b08b734 | 116 | #include "sreal.h" |
be3c16c4 | 117 | #include "auto-profile.h" |
9b2b7279 | 118 | #include "builtins.h" |
4a910049 ML |
119 | #include "fibonacci_heap.h" |
120 | ||
f0e1509b ML |
121 | typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t; |
122 | typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t; | |
85057983 | 123 | |
ca31b95f | 124 | /* Statistics we collect about inlining algorithm. */ |
85057983 | 125 | static int overall_size; |
3995f3a2 JH |
126 | static profile_count max_count; |
127 | static profile_count spec_rem; | |
ca31b95f | 128 | |
6d4ab5f8 JH |
129 | /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */ |
130 | static sreal cgraph_freq_base_rec, percent_rec; | |
131 | ||
4c0f7679 JH |
132 | /* Return false when inlining edge E would lead to violating |
133 | limits on function unit growth or stack usage growth. | |
134 | ||
135 | The relative function body growth limit is present generally | |
09a2806f | 136 | to avoid problems with non-linear behavior of the compiler. |
4c0f7679 JH |
137 | To allow inlining huge functions into tiny wrapper, the limit |
138 | is always based on the bigger of the two functions considered. | |
139 | ||
140 | For stack growth limits we always base the growth in stack usage | |
141 | of the callers. We want to prevent applications from segfaulting | |
142 | on stack overflow when functions with huge stack frames gets | |
143 | inlined. */ | |
ca31b95f JH |
144 | |
145 | static bool | |
4c0f7679 | 146 | caller_growth_limits (struct cgraph_edge *e) |
ca31b95f | 147 | { |
d7d1d041 | 148 | struct cgraph_node *to = e->caller; |
d52f5295 | 149 | struct cgraph_node *what = e->callee->ultimate_alias_target (); |
ca31b95f | 150 | int newsize; |
4c0f7679 JH |
151 | int limit = 0; |
152 | HOST_WIDE_INT stack_size_limit = 0, inlined_stack; | |
0bceb671 | 153 | ipa_fn_summary *info, *what_info, *outer_info = ipa_fn_summaries->get (to); |
4c0f7679 JH |
154 | |
155 | /* Look for function e->caller is inlined to. While doing | |
156 | so work out the largest function body on the way. As | |
157 | described above, we want to base our function growth | |
158 | limits based on that. Not on the self size of the | |
159 | outer function, not on the self size of inline code | |
160 | we immediately inline to. This is the most relaxed | |
161 | interpretation of the rule "do not grow large functions | |
162 | too much in order to prevent compiler from exploding". */ | |
09dfe187 | 163 | while (true) |
4c0f7679 | 164 | { |
0bceb671 | 165 | info = ipa_fn_summaries->get (to); |
4c0f7679 JH |
166 | if (limit < info->self_size) |
167 | limit = info->self_size; | |
168 | if (stack_size_limit < info->estimated_self_stack_size) | |
169 | stack_size_limit = info->estimated_self_stack_size; | |
170 | if (to->global.inlined_to) | |
171 | to = to->callers->caller; | |
09dfe187 JH |
172 | else |
173 | break; | |
4c0f7679 | 174 | } |
6971d714 | 175 | |
0bceb671 | 176 | what_info = ipa_fn_summaries->get (what); |
e7f23018 | 177 | |
4c0f7679 | 178 | if (limit < what_info->self_size) |
e7f23018 | 179 | limit = what_info->self_size; |
ca31b95f JH |
180 | |
181 | limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100; | |
182 | ||
6971d714 RG |
183 | /* Check the size after inlining against the function limits. But allow |
184 | the function to shrink if it went over the limits by forced inlining. */ | |
03dfc36d | 185 | newsize = estimate_size_after_inlining (to, e); |
e7f23018 | 186 | if (newsize >= info->size |
6971d714 | 187 | && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) |
ca31b95f JH |
188 | && newsize > limit) |
189 | { | |
4c0f7679 | 190 | e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT; |
ca31b95f JH |
191 | return false; |
192 | } | |
ff28a94d | 193 | |
09dfe187 JH |
194 | if (!what_info->estimated_stack_size) |
195 | return true; | |
196 | ||
09a2806f JH |
197 | /* FIXME: Stack size limit often prevents inlining in Fortran programs |
198 | due to large i/o datastructures used by the Fortran front-end. | |
4c0f7679 JH |
199 | We ought to ignore this limit when we know that the edge is executed |
200 | on every invocation of the caller (i.e. its call statement dominates | |
201 | exit block). We do not track this information, yet. */ | |
09dfe187 | 202 | stack_size_limit += ((gcov_type)stack_size_limit |
4c0f7679 | 203 | * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100); |
ff28a94d | 204 | |
4c0f7679 JH |
205 | inlined_stack = (outer_info->stack_frame_offset |
206 | + outer_info->estimated_self_stack_size | |
e7f23018 | 207 | + what_info->estimated_stack_size); |
4c0f7679 JH |
208 | /* Check new stack consumption with stack consumption at the place |
209 | stack is used. */ | |
210 | if (inlined_stack > stack_size_limit | |
09a2806f | 211 | /* If function already has large stack usage from sibling |
4c0f7679 JH |
212 | inline call, we can inline, too. |
213 | This bit overoptimistically assume that we are good at stack | |
214 | packing. */ | |
215 | && inlined_stack > info->estimated_stack_size | |
ff28a94d JH |
216 | && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME)) |
217 | { | |
4c0f7679 | 218 | e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT; |
ff28a94d JH |
219 | return false; |
220 | } | |
ca31b95f JH |
221 | return true; |
222 | } | |
223 | ||
4c0f7679 JH |
224 | /* Dump info about why inlining has failed. */ |
225 | ||
226 | static void | |
227 | report_inline_failed_reason (struct cgraph_edge *e) | |
228 | { | |
229 | if (dump_file) | |
230 | { | |
464d0118 ML |
231 | fprintf (dump_file, " not inlinable: %s -> %s, %s\n", |
232 | e->caller->dump_name (), | |
233 | e->callee->dump_name (), | |
4c0f7679 | 234 | cgraph_inline_failed_string (e->inline_failed)); |
bb1e543c JH |
235 | if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH |
236 | || e->inline_failed == CIF_OPTIMIZATION_MISMATCH) | |
237 | && e->caller->lto_file_data | |
1f6f9079 | 238 | && e->callee->ultimate_alias_target ()->lto_file_data) |
bb1e543c JH |
239 | { |
240 | fprintf (dump_file, " LTO objects: %s, %s\n", | |
241 | e->caller->lto_file_data->file_name, | |
1f6f9079 | 242 | e->callee->ultimate_alias_target ()->lto_file_data->file_name); |
bb1e543c JH |
243 | } |
244 | if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH) | |
245 | cl_target_option_print_diff | |
246 | (dump_file, 2, target_opts_for_fn (e->caller->decl), | |
247 | target_opts_for_fn (e->callee->ultimate_alias_target ()->decl)); | |
248 | if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH) | |
249 | cl_optimization_print_diff | |
250 | (dump_file, 2, opts_for_fn (e->caller->decl), | |
251 | opts_for_fn (e->callee->ultimate_alias_target ()->decl)); | |
4c0f7679 JH |
252 | } |
253 | } | |
254 | ||
25a07c7e YG |
255 | /* Decide whether sanitizer-related attributes allow inlining. */ |
256 | ||
257 | static bool | |
258 | sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee) | |
259 | { | |
260 | /* Don't care if sanitizer is disabled */ | |
261 | if (!(flag_sanitize & SANITIZE_ADDRESS)) | |
262 | return true; | |
263 | ||
264 | if (!caller || !callee) | |
265 | return true; | |
266 | ||
267 | return !!lookup_attribute ("no_sanitize_address", | |
268 | DECL_ATTRIBUTES (caller)) == | |
269 | !!lookup_attribute ("no_sanitize_address", | |
270 | DECL_ATTRIBUTES (callee)); | |
271 | } | |
272 | ||
8e926cb1 JH |
273 | /* Used for flags where it is safe to inline when caller's value is |
274 | grater than callee's. */ | |
275 | #define check_maybe_up(flag) \ | |
276 | (opts_for_fn (caller->decl)->x_##flag \ | |
277 | != opts_for_fn (callee->decl)->x_##flag \ | |
278 | && (!always_inline \ | |
279 | || opts_for_fn (caller->decl)->x_##flag \ | |
280 | < opts_for_fn (callee->decl)->x_##flag)) | |
281 | /* Used for flags where it is safe to inline when caller's value is | |
282 | smaller than callee's. */ | |
283 | #define check_maybe_down(flag) \ | |
284 | (opts_for_fn (caller->decl)->x_##flag \ | |
285 | != opts_for_fn (callee->decl)->x_##flag \ | |
286 | && (!always_inline \ | |
287 | || opts_for_fn (caller->decl)->x_##flag \ | |
288 | > opts_for_fn (callee->decl)->x_##flag)) | |
289 | /* Used for flags where exact match is needed for correctness. */ | |
290 | #define check_match(flag) \ | |
291 | (opts_for_fn (caller->decl)->x_##flag \ | |
292 | != opts_for_fn (callee->decl)->x_##flag) | |
293 | ||
25a07c7e | 294 | /* Decide if we can inline the edge and possibly update |
4c0f7679 JH |
295 | inline_failed reason. |
296 | We check whether inlining is possible at all and whether | |
297 | caller growth limits allow doing so. | |
298 | ||
09ce3660 JH |
299 | if REPORT is true, output reason to the dump file. |
300 | ||
1f26ac87 | 301 | if DISREGARD_LIMITS is true, ignore size limits.*/ |
ca31b95f | 302 | |
61a05df1 | 303 | static bool |
09ce3660 | 304 | can_inline_edge_p (struct cgraph_edge *e, bool report, |
eb140ba0 | 305 | bool disregard_limits = false, bool early = false) |
ca31b95f | 306 | { |
7ce7e4d4 JH |
307 | gcc_checking_assert (e->inline_failed); |
308 | ||
309 | if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) | |
310 | { | |
311 | if (report) | |
312 | report_inline_failed_reason (e); | |
313 | return false; | |
314 | } | |
315 | ||
4c0f7679 | 316 | bool inlinable = true; |
a5b1779f | 317 | enum availability avail; |
bb1e543c JH |
318 | cgraph_node *caller = e->caller->global.inlined_to |
319 | ? e->caller->global.inlined_to : e->caller; | |
e6007a27 | 320 | cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller); |
bb1e543c | 321 | tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl); |
264b47b0 | 322 | tree callee_tree |
67348ccc | 323 | = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL; |
ea99e0be | 324 | |
7ce7e4d4 | 325 | if (!callee->definition) |
4c0f7679 JH |
326 | { |
327 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
328 | inlinable = false; | |
329 | } | |
1f26ac87 JM |
330 | else if (callee->calls_comdat_local) |
331 | { | |
332 | e->inline_failed = CIF_USES_COMDAT_LOCAL; | |
333 | inlinable = false; | |
334 | } | |
d52f5295 | 335 | else if (avail <= AVAIL_INTERPOSABLE) |
9de21a23 | 336 | { |
4c0f7679 | 337 | e->inline_failed = CIF_OVERWRITABLE; |
6957a6f6 | 338 | inlinable = false; |
9de21a23 | 339 | } |
1a0bf5e1 JH |
340 | /* All edges with call_stmt_cannot_inline_p should have inline_failed |
341 | initialized to one of FINAL_ERROR reasons. */ | |
89faf322 | 342 | else if (e->call_stmt_cannot_inline_p) |
1a0bf5e1 | 343 | gcc_unreachable (); |
4c0f7679 | 344 | /* Don't inline if the functions have different EH personalities. */ |
bb1e543c | 345 | else if (DECL_FUNCTION_PERSONALITY (caller->decl) |
67348ccc | 346 | && DECL_FUNCTION_PERSONALITY (callee->decl) |
bb1e543c | 347 | && (DECL_FUNCTION_PERSONALITY (caller->decl) |
67348ccc | 348 | != DECL_FUNCTION_PERSONALITY (callee->decl))) |
4c0f7679 JH |
349 | { |
350 | e->inline_failed = CIF_EH_PERSONALITY; | |
351 | inlinable = false; | |
352 | } | |
a7ff6e27 AH |
353 | /* TM pure functions should not be inlined into non-TM_pure |
354 | functions. */ | |
7ce7e4d4 | 355 | else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl)) |
0a35513e AH |
356 | { |
357 | e->inline_failed = CIF_UNSPECIFIED; | |
358 | inlinable = false; | |
359 | } | |
09a2806f | 360 | /* Check compatibility of target optimization options. */ |
bb1e543c | 361 | else if (!targetm.target_option.can_inline_p (caller->decl, |
67348ccc | 362 | callee->decl)) |
4c0f7679 JH |
363 | { |
364 | e->inline_failed = CIF_TARGET_OPTION_MISMATCH; | |
365 | inlinable = false; | |
366 | } | |
0bceb671 | 367 | else if (!ipa_fn_summaries->get (callee)->inlinable) |
5058c037 JH |
368 | { |
369 | e->inline_failed = CIF_FUNCTION_NOT_INLINABLE; | |
370 | inlinable = false; | |
371 | } | |
25a07c7e | 372 | /* Don't inline a function with mismatched sanitization attributes. */ |
bb1e543c | 373 | else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl)) |
25a07c7e YG |
374 | { |
375 | e->inline_failed = CIF_ATTRIBUTE_MISMATCH; | |
376 | inlinable = false; | |
377 | } | |
4c0f7679 | 378 | /* Check if caller growth allows the inlining. */ |
67348ccc | 379 | else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl) |
09ce3660 | 380 | && !disregard_limits |
9a4ac625 | 381 | && !lookup_attribute ("flatten", |
bb1e543c | 382 | DECL_ATTRIBUTES (caller->decl)) |
4c0f7679 JH |
383 | && !caller_growth_limits (e)) |
384 | inlinable = false; | |
385 | /* Don't inline a function with a higher optimization level than the | |
386 | caller. FIXME: this is really just tip of iceberg of handling | |
387 | optimization attribute. */ | |
388 | else if (caller_tree != callee_tree) | |
9de21a23 | 389 | { |
8e926cb1 JH |
390 | bool always_inline = |
391 | (DECL_DISREGARD_INLINE_LIMITS (callee->decl) | |
392 | && lookup_attribute ("always_inline", | |
393 | DECL_ATTRIBUTES (callee->decl))); | |
0bceb671 JH |
394 | ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller); |
395 | ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee); | |
8e926cb1 | 396 | |
ddb3773a JH |
397 | /* Until GCC 4.9 we did not check the semantics alterning flags |
398 | bellow and inline across optimization boundry. | |
399 | Enabling checks bellow breaks several packages by refusing | |
400 | to inline library always_inline functions. See PR65873. | |
401 | Disable the check for early inlining for now until better solution | |
402 | is found. */ | |
403 | if (always_inline && early) | |
404 | ; | |
0c3068e0 RB |
405 | /* There are some options that change IL semantics which means |
406 | we cannot inline in these cases for correctness reason. | |
407 | Not even for always_inline declared functions. */ | |
2bf54d93 | 408 | else if (check_match (flag_wrapv) |
ddb3773a | 409 | || check_match (flag_trapv) |
818b88a7 JH |
410 | /* When caller or callee does FP math, be sure FP codegen flags |
411 | compatible. */ | |
412 | || ((caller_info->fp_expressions && callee_info->fp_expressions) | |
413 | && (check_maybe_up (flag_rounding_math) | |
414 | || check_maybe_up (flag_trapping_math) | |
415 | || check_maybe_down (flag_unsafe_math_optimizations) | |
416 | || check_maybe_down (flag_finite_math_only) | |
417 | || check_maybe_up (flag_signaling_nans) | |
418 | || check_maybe_down (flag_cx_limited_range) | |
419 | || check_maybe_up (flag_signed_zeros) | |
420 | || check_maybe_down (flag_associative_math) | |
421 | || check_maybe_down (flag_reciprocal_math) | |
0d2f700f | 422 | || check_maybe_down (flag_fp_int_builtin_inexact) |
818b88a7 JH |
423 | /* Strictly speaking only when the callee contains function |
424 | calls that may end up setting errno. */ | |
425 | || check_maybe_up (flag_errno_math))) | |
ddb3773a JH |
426 | /* We do not want to make code compiled with exceptions to be |
427 | brought into a non-EH function unless we know that the callee | |
428 | does not throw. | |
429 | This is tracked by DECL_FUNCTION_PERSONALITY. */ | |
6a96e917 EB |
430 | || (check_maybe_up (flag_non_call_exceptions) |
431 | && DECL_FUNCTION_PERSONALITY (callee->decl)) | |
ddb3773a JH |
432 | || (check_maybe_up (flag_exceptions) |
433 | && DECL_FUNCTION_PERSONALITY (callee->decl)) | |
ddb3773a JH |
434 | /* When devirtualization is diabled for callee, it is not safe |
435 | to inline it as we possibly mangled the type info. | |
436 | Allow early inlining of always inlines. */ | |
437 | || (!early && check_maybe_down (flag_devirtualize))) | |
0c3068e0 RB |
438 | { |
439 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; | |
440 | inlinable = false; | |
441 | } | |
442 | /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */ | |
8e926cb1 | 443 | else if (always_inline) |
bb1e543c | 444 | ; |
0c3068e0 RB |
445 | /* When user added an attribute to the callee honor it. */ |
446 | else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl)) | |
447 | && opts_for_fn (caller->decl) != opts_for_fn (callee->decl)) | |
4c0f7679 | 448 | { |
fd811f03 | 449 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; |
4c0f7679 JH |
450 | inlinable = false; |
451 | } | |
ddb3773a JH |
452 | /* If explicit optimize attribute are not used, the mismatch is caused |
453 | by different command line options used to build different units. | |
454 | Do not care about COMDAT functions - those are intended to be | |
455 | optimized with the optimization flags of module they are used in. | |
456 | Also do not care about mixing up size/speed optimization when | |
457 | DECL_DISREGARD_INLINE_LIMITS is set. */ | |
88636b62 | 458 | else if ((callee->merged_comdat |
ddb3773a JH |
459 | && !lookup_attribute ("optimize", |
460 | DECL_ATTRIBUTES (caller->decl))) | |
461 | || DECL_DISREGARD_INLINE_LIMITS (callee->decl)) | |
462 | ; | |
bb1e543c JH |
463 | /* If mismatch is caused by merging two LTO units with different |
464 | optimizationflags we want to be bit nicer. However never inline | |
465 | if one of functions is not optimized at all. */ | |
466 | else if (!opt_for_fn (callee->decl, optimize) | |
467 | || !opt_for_fn (caller->decl, optimize)) | |
468 | { | |
469 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; | |
470 | inlinable = false; | |
471 | } | |
472 | /* If callee is optimized for size and caller is not, allow inlining if | |
473 | code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee | |
474 | is inline (and thus likely an unified comdat). This will allow caller | |
475 | to run faster. */ | |
476 | else if (opt_for_fn (callee->decl, optimize_size) | |
477 | > opt_for_fn (caller->decl, optimize_size)) | |
478 | { | |
479 | int growth = estimate_edge_growth (e); | |
480 | if (growth > 0 | |
481 | && (!DECL_DECLARED_INLINE_P (callee->decl) | |
482 | && growth >= MAX (MAX_INLINE_INSNS_SINGLE, | |
483 | MAX_INLINE_INSNS_AUTO))) | |
484 | { | |
485 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; | |
486 | inlinable = false; | |
487 | } | |
488 | } | |
489 | /* If callee is more aggressively optimized for performance than caller, | |
490 | we generally want to inline only cheap (runtime wise) functions. */ | |
491 | else if (opt_for_fn (callee->decl, optimize_size) | |
492 | < opt_for_fn (caller->decl, optimize_size) | |
493 | || (opt_for_fn (callee->decl, optimize) | |
86f46e39 | 494 | > opt_for_fn (caller->decl, optimize))) |
bb1e543c JH |
495 | { |
496 | if (estimate_edge_time (e) | |
263e19c7 | 497 | >= 20 + ipa_call_summaries->get (e)->call_stmt_time) |
bb1e543c JH |
498 | { |
499 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; | |
500 | inlinable = false; | |
501 | } | |
502 | } | |
503 | ||
4c0f7679 JH |
504 | } |
505 | ||
4c0f7679 JH |
506 | if (!inlinable && report) |
507 | report_inline_failed_reason (e); | |
508 | return inlinable; | |
509 | } | |
510 | ||
511 | ||
512 | /* Return true if the edge E is inlinable during early inlining. */ | |
513 | ||
514 | static bool | |
515 | can_early_inline_edge_p (struct cgraph_edge *e) | |
516 | { | |
d52f5295 | 517 | struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
4c0f7679 JH |
518 | /* Early inliner might get called at WPA stage when IPA pass adds new |
519 | function. In this case we can not really do any of early inlining | |
520 | because function bodies are missing. */ | |
1a0bf5e1 JH |
521 | if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) |
522 | return false; | |
67348ccc | 523 | if (!gimple_has_body_p (callee->decl)) |
4c0f7679 JH |
524 | { |
525 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
9de21a23 JC |
526 | return false; |
527 | } | |
4c0f7679 JH |
528 | /* In early inliner some of callees may not be in SSA form yet |
529 | (i.e. the callgraph is cyclic and we did not process | |
530 | the callee by early inliner, yet). We don't have CIF code for this | |
531 | case; later we will re-do the decision in the real inliner. */ | |
67348ccc DM |
532 | if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl)) |
533 | || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl))) | |
f27e50db | 534 | { |
4c0f7679 JH |
535 | if (dump_file) |
536 | fprintf (dump_file, " edge not inlinable: not in SSA form\n"); | |
f27e50db JH |
537 | return false; |
538 | } | |
eb140ba0 | 539 | if (!can_inline_edge_p (e, true, false, true)) |
4c0f7679 JH |
540 | return false; |
541 | return true; | |
542 | } | |
543 | ||
544 | ||
ae6e6a08 | 545 | /* Return number of calls in N. Ignore cheap builtins. */ |
4c0f7679 | 546 | |
ae6e6a08 JH |
547 | static int |
548 | num_calls (struct cgraph_node *n) | |
4c0f7679 JH |
549 | { |
550 | struct cgraph_edge *e; | |
ae6e6a08 JH |
551 | int num = 0; |
552 | ||
4c0f7679 | 553 | for (e = n->callees; e; e = e->next_callee) |
67348ccc | 554 | if (!is_inexpensive_builtin (e->callee->decl)) |
ae6e6a08 JH |
555 | num++; |
556 | return num; | |
4c0f7679 JH |
557 | } |
558 | ||
f27e50db | 559 | |
4c0f7679 | 560 | /* Return true if we are interested in inlining small function. */ |
9de21a23 | 561 | |
4c0f7679 JH |
562 | static bool |
563 | want_early_inline_function_p (struct cgraph_edge *e) | |
564 | { | |
565 | bool want_inline = true; | |
d52f5295 | 566 | struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
4c0f7679 | 567 | |
67348ccc | 568 | if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
4c0f7679 | 569 | ; |
9a1e784a | 570 | /* For AutoFDO, we need to make sure that before profile summary, all |
be3c16c4 DC |
571 | hot paths' IR look exactly the same as profiled binary. As a result, |
572 | in einliner, we will disregard size limit and inline those callsites | |
573 | that are: | |
574 | * inlined in the profiled binary, and | |
575 | * the cloned callee has enough samples to be considered "hot". */ | |
576 | else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e)) | |
577 | ; | |
67348ccc | 578 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
2bf86c84 | 579 | && !opt_for_fn (e->caller->decl, flag_inline_small_functions)) |
4c0f7679 JH |
580 | { |
581 | e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; | |
582 | report_inline_failed_reason (e); | |
583 | want_inline = false; | |
584 | } | |
585 | else | |
9de21a23 | 586 | { |
4c0f7679 | 587 | int growth = estimate_edge_growth (e); |
ae6e6a08 JH |
588 | int n; |
589 | ||
4c0f7679 JH |
590 | if (growth <= 0) |
591 | ; | |
3dafb85c | 592 | else if (!e->maybe_hot_p () |
4c0f7679 JH |
593 | && growth > 0) |
594 | { | |
595 | if (dump_file) | |
464d0118 | 596 | fprintf (dump_file, " will not early inline: %s->%s, " |
4c0f7679 | 597 | "call is cold and code would grow by %i\n", |
464d0118 ML |
598 | e->caller->dump_name (), |
599 | callee->dump_name (), | |
4c0f7679 JH |
600 | growth); |
601 | want_inline = false; | |
602 | } | |
ae6e6a08 | 603 | else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)) |
9de21a23 | 604 | { |
4c0f7679 | 605 | if (dump_file) |
464d0118 | 606 | fprintf (dump_file, " will not early inline: %s->%s, " |
ae6e6a08 | 607 | "growth %i exceeds --param early-inlining-insns\n", |
464d0118 ML |
608 | e->caller->dump_name (), |
609 | callee->dump_name (), | |
4c0f7679 JH |
610 | growth); |
611 | want_inline = false; | |
9de21a23 | 612 | } |
ae6e6a08 JH |
613 | else if ((n = num_calls (callee)) != 0 |
614 | && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)) | |
4c0f7679 JH |
615 | { |
616 | if (dump_file) | |
464d0118 | 617 | fprintf (dump_file, " will not early inline: %s->%s, " |
ae6e6a08 JH |
618 | "growth %i exceeds --param early-inlining-insns " |
619 | "divided by number of calls\n", | |
464d0118 ML |
620 | e->caller->dump_name (), |
621 | callee->dump_name (), | |
4c0f7679 JH |
622 | growth); |
623 | want_inline = false; | |
624 | } | |
625 | } | |
626 | return want_inline; | |
627 | } | |
628 | ||
d59171da JH |
629 | /* Compute time of the edge->caller + edge->callee execution when inlining |
630 | does not happen. */ | |
631 | ||
6d4ab5f8 | 632 | inline sreal |
4adaad64 JH |
633 | compute_uninlined_call_time (struct cgraph_edge *edge, |
634 | sreal uninlined_call_time) | |
d59171da | 635 | { |
208e5afa JH |
636 | cgraph_node *caller = (edge->caller->global.inlined_to |
637 | ? edge->caller->global.inlined_to | |
638 | : edge->caller); | |
639 | ||
3995f3a2 JH |
640 | if (edge->count > profile_count::zero () |
641 | && caller->count > profile_count::zero ()) | |
642 | uninlined_call_time *= (sreal)edge->count.to_gcov_type () | |
643 | / caller->count.to_gcov_type (); | |
208e5afa JH |
644 | if (edge->frequency) |
645 | uninlined_call_time *= cgraph_freq_base_rec * edge->frequency; | |
646 | else | |
647 | uninlined_call_time = uninlined_call_time >> 11; | |
648 | ||
0bceb671 | 649 | sreal caller_time = ipa_fn_summaries->get (caller)->time; |
d59171da JH |
650 | return uninlined_call_time + caller_time; |
651 | } | |
652 | ||
653 | /* Same as compute_uinlined_call_time but compute time when inlining | |
654 | does happen. */ | |
655 | ||
6d4ab5f8 | 656 | inline sreal |
d59171da | 657 | compute_inlined_call_time (struct cgraph_edge *edge, |
ab38481c | 658 | sreal time) |
d59171da | 659 | { |
208e5afa JH |
660 | cgraph_node *caller = (edge->caller->global.inlined_to |
661 | ? edge->caller->global.inlined_to | |
662 | : edge->caller); | |
0bceb671 | 663 | sreal caller_time = ipa_fn_summaries->get (caller)->time; |
208e5afa | 664 | |
3995f3a2 JH |
665 | if (edge->count > profile_count::zero () |
666 | && caller->count > profile_count::zero ()) | |
667 | time *= (sreal)edge->count.to_gcov_type () / caller->count.to_gcov_type (); | |
208e5afa JH |
668 | if (edge->frequency) |
669 | time *= cgraph_freq_base_rec * edge->frequency; | |
670 | else | |
671 | time = time >> 11; | |
672 | ||
4adaad64 JH |
673 | /* This calculation should match one in ipa-inline-analysis.c |
674 | (estimate_edge_size_and_time). */ | |
675 | time -= (sreal) edge->frequency | |
263e19c7 | 676 | * ipa_call_summaries->get (edge)->call_stmt_time / CGRAPH_FREQ_BASE; |
208e5afa JH |
677 | time += caller_time; |
678 | if (time <= 0) | |
679 | time = ((sreal) 1) >> 8; | |
6d4ab5f8 | 680 | gcc_checking_assert (time >= 0); |
d59171da JH |
681 | return time; |
682 | } | |
683 | ||
42f7b0fa JH |
684 | /* Return true if the speedup for inlining E is bigger than |
685 | PARAM_MAX_INLINE_MIN_SPEEDUP. */ | |
686 | ||
687 | static bool | |
688 | big_speedup_p (struct cgraph_edge *e) | |
689 | { | |
4adaad64 JH |
690 | sreal unspec_time; |
691 | sreal spec_time = estimate_edge_time (e, &unspec_time); | |
692 | sreal time = compute_uninlined_call_time (e, unspec_time); | |
693 | sreal inlined_time = compute_inlined_call_time (e, spec_time); | |
208e5afa | 694 | |
42f7b0fa | 695 | if (time - inlined_time |
4adaad64 | 696 | > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)) |
6d4ab5f8 | 697 | * percent_rec) |
42f7b0fa JH |
698 | return true; |
699 | return false; | |
700 | } | |
701 | ||
4c0f7679 JH |
702 | /* Return true if we are interested in inlining small function. |
703 | When REPORT is true, report reason to dump file. */ | |
704 | ||
705 | static bool | |
706 | want_inline_small_function_p (struct cgraph_edge *e, bool report) | |
707 | { | |
708 | bool want_inline = true; | |
d52f5295 | 709 | struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
4c0f7679 | 710 | |
67348ccc | 711 | if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
4c0f7679 | 712 | ; |
67348ccc | 713 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
2bf86c84 | 714 | && !opt_for_fn (e->caller->decl, flag_inline_small_functions)) |
4c0f7679 JH |
715 | { |
716 | e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; | |
717 | want_inline = false; | |
9de21a23 | 718 | } |
4cd8957f | 719 | /* Do fast and conservative check if the function can be good |
5970b079 EB |
720 | inline candidate. At the moment we allow inline hints to |
721 | promote non-inline functions to inline and we increase | |
722 | MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */ | |
b6d627e4 | 723 | else if ((!DECL_DECLARED_INLINE_P (callee->decl) |
3995f3a2 | 724 | && (!e->count.initialized_p () || !e->maybe_hot_p ())) |
0bceb671 | 725 | && ipa_fn_summaries->get (callee)->min_size |
263e19c7 | 726 | - ipa_call_summaries->get (e)->call_stmt_size |
9ca8a4c0 | 727 | > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO)) |
4cd8957f JH |
728 | { |
729 | e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT; | |
730 | want_inline = false; | |
731 | } | |
3995f3a2 JH |
732 | else if ((DECL_DECLARED_INLINE_P (callee->decl) |
733 | || e->count > profile_count::zero ()) | |
0bceb671 | 734 | && ipa_fn_summaries->get (callee)->min_size |
263e19c7 | 735 | - ipa_call_summaries->get (e)->call_stmt_size |
9ca8a4c0 | 736 | > 16 * MAX_INLINE_INSNS_SINGLE) |
4cd8957f | 737 | { |
b6d627e4 JH |
738 | e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl) |
739 | ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT | |
740 | : CIF_MAX_INLINE_INSNS_AUTO_LIMIT); | |
4cd8957f JH |
741 | want_inline = false; |
742 | } | |
ca31b95f | 743 | else |
9de21a23 | 744 | { |
4c0f7679 | 745 | int growth = estimate_edge_growth (e); |
0bceb671 | 746 | ipa_hints hints = estimate_edge_hints (e); |
42f7b0fa | 747 | bool big_speedup = big_speedup_p (e); |
4c0f7679 JH |
748 | |
749 | if (growth <= 0) | |
750 | ; | |
37678631 JH |
751 | /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when |
752 | hints suggests that inlining given function is very profitable. */ | |
67348ccc | 753 | else if (DECL_DECLARED_INLINE_P (callee->decl) |
37678631 | 754 | && growth >= MAX_INLINE_INSNS_SINGLE |
4cd8957f JH |
755 | && ((!big_speedup |
756 | && !(hints & (INLINE_HINT_indirect_call | |
b6d627e4 | 757 | | INLINE_HINT_known_hot |
4cd8957f JH |
758 | | INLINE_HINT_loop_iterations |
759 | | INLINE_HINT_array_index | |
760 | | INLINE_HINT_loop_stride))) | |
761 | || growth >= MAX_INLINE_INSNS_SINGLE * 16)) | |
4c0f7679 JH |
762 | { |
763 | e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT; | |
764 | want_inline = false; | |
765 | } | |
67348ccc | 766 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
2bf86c84 | 767 | && !opt_for_fn (e->caller->decl, flag_inline_functions)) |
4c0f7679 | 768 | { |
4cd8957f JH |
769 | /* growth_likely_positive is expensive, always test it last. */ |
770 | if (growth >= MAX_INLINE_INSNS_SINGLE | |
771 | || growth_likely_positive (callee, growth)) | |
772 | { | |
773 | e->inline_failed = CIF_NOT_DECLARED_INLINED; | |
774 | want_inline = false; | |
775 | } | |
4c0f7679 | 776 | } |
37678631 JH |
777 | /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline |
778 | Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that | |
779 | inlining given function is very profitable. */ | |
67348ccc | 780 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
42f7b0fa | 781 | && !big_speedup |
b6d627e4 | 782 | && !(hints & INLINE_HINT_known_hot) |
7c99ab65 | 783 | && growth >= ((hints & (INLINE_HINT_indirect_call |
128e0d89 | 784 | | INLINE_HINT_loop_iterations |
52843a47 | 785 | | INLINE_HINT_array_index |
128e0d89 | 786 | | INLINE_HINT_loop_stride)) |
37678631 JH |
787 | ? MAX (MAX_INLINE_INSNS_AUTO, |
788 | MAX_INLINE_INSNS_SINGLE) | |
789 | : MAX_INLINE_INSNS_AUTO)) | |
4c0f7679 | 790 | { |
4cd8957f JH |
791 | /* growth_likely_positive is expensive, always test it last. */ |
792 | if (growth >= MAX_INLINE_INSNS_SINGLE | |
793 | || growth_likely_positive (callee, growth)) | |
794 | { | |
795 | e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT; | |
796 | want_inline = false; | |
797 | } | |
4c0f7679 | 798 | } |
db22a743 | 799 | /* If call is cold, do not inline when function body would grow. */ |
3dafb85c | 800 | else if (!e->maybe_hot_p () |
4cd8957f JH |
801 | && (growth >= MAX_INLINE_INSNS_SINGLE |
802 | || growth_likely_positive (callee, growth))) | |
9de21a23 | 803 | { |
4c0f7679 JH |
804 | e->inline_failed = CIF_UNLIKELY_CALL; |
805 | want_inline = false; | |
9de21a23 JC |
806 | } |
807 | } | |
4c0f7679 JH |
808 | if (!want_inline && report) |
809 | report_inline_failed_reason (e); | |
810 | return want_inline; | |
811 | } | |
9de21a23 | 812 | |
4c0f7679 JH |
813 | /* EDGE is self recursive edge. |
814 | We hand two cases - when function A is inlining into itself | |
815 | or when function A is being inlined into another inliner copy of function | |
816 | A within function B. | |
817 | ||
818 | In first case OUTER_NODE points to the toplevel copy of A, while | |
819 | in the second case OUTER_NODE points to the outermost copy of A in B. | |
820 | ||
821 | In both cases we want to be extra selective since | |
822 | inlining the call will just introduce new recursive calls to appear. */ | |
09a2806f | 823 | |
4c0f7679 JH |
824 | static bool |
825 | want_inline_self_recursive_call_p (struct cgraph_edge *edge, | |
826 | struct cgraph_node *outer_node, | |
827 | bool peeling, | |
828 | int depth) | |
829 | { | |
830 | char const *reason = NULL; | |
831 | bool want_inline = true; | |
832 | int caller_freq = CGRAPH_FREQ_BASE; | |
833 | int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO); | |
834 | ||
67348ccc | 835 | if (DECL_DECLARED_INLINE_P (edge->caller->decl)) |
4c0f7679 JH |
836 | max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH); |
837 | ||
3dafb85c | 838 | if (!edge->maybe_hot_p ()) |
4c0f7679 JH |
839 | { |
840 | reason = "recursive call is cold"; | |
841 | want_inline = false; | |
842 | } | |
3995f3a2 | 843 | else if (outer_node->count == profile_count::zero ()) |
4c0f7679 JH |
844 | { |
845 | reason = "not executed in profile"; | |
846 | want_inline = false; | |
847 | } | |
848 | else if (depth > max_depth) | |
849 | { | |
850 | reason = "--param max-inline-recursive-depth exceeded."; | |
851 | want_inline = false; | |
852 | } | |
853 | ||
854 | if (outer_node->global.inlined_to) | |
855 | caller_freq = outer_node->callers->frequency; | |
856 | ||
bd936951 JH |
857 | if (!caller_freq) |
858 | { | |
859 | reason = "function is inlined and unlikely"; | |
860 | want_inline = false; | |
861 | } | |
862 | ||
4c0f7679 JH |
863 | if (!want_inline) |
864 | ; | |
865 | /* Inlining of self recursive function into copy of itself within other function | |
866 | is transformation similar to loop peeling. | |
867 | ||
09a2806f | 868 | Peeling is profitable if we can inline enough copies to make probability |
4c0f7679 JH |
869 | of actual call to the self recursive function very small. Be sure that |
870 | the probability of recursion is small. | |
871 | ||
09a2806f JH |
872 | We ensure that the frequency of recursing is at most 1 - (1/max_depth). |
873 | This way the expected number of recision is at most max_depth. */ | |
4c0f7679 JH |
874 | else if (peeling) |
875 | { | |
876 | int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1) | |
877 | / max_depth); | |
878 | int i; | |
879 | for (i = 1; i < depth; i++) | |
880 | max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE; | |
3995f3a2 JH |
881 | if (max_count > profile_count::zero () && edge->count > profile_count::zero () |
882 | && (edge->count.to_gcov_type () * CGRAPH_FREQ_BASE | |
883 | / outer_node->count.to_gcov_type () | |
4c0f7679 JH |
884 | >= max_prob)) |
885 | { | |
886 | reason = "profile of recursive call is too large"; | |
887 | want_inline = false; | |
888 | } | |
3995f3a2 | 889 | if (max_count == profile_count::zero () |
4c0f7679 JH |
890 | && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq |
891 | >= max_prob)) | |
892 | { | |
893 | reason = "frequency of recursive call is too large"; | |
894 | want_inline = false; | |
895 | } | |
896 | } | |
09a2806f | 897 | /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion |
4c0f7679 JH |
898 | depth is large. We reduce function call overhead and increase chances that |
899 | things fit in hardware return predictor. | |
900 | ||
901 | Recursive inlining might however increase cost of stack frame setup | |
902 | actually slowing down functions whose recursion tree is wide rather than | |
903 | deep. | |
904 | ||
09a2806f | 905 | Deciding reliably on when to do recursive inlining without profile feedback |
4c0f7679 JH |
906 | is tricky. For now we disable recursive inlining when probability of self |
907 | recursion is low. | |
908 | ||
909 | Recursive inlining of self recursive call within loop also results in large loop | |
910 | depths that generally optimize badly. We may want to throttle down inlining | |
911 | in those cases. In particular this seems to happen in one of libstdc++ rb tree | |
912 | methods. */ | |
913 | else | |
914 | { | |
3995f3a2 JH |
915 | if (max_count > profile_count::zero () |
916 | && (edge->count.to_gcov_type () * 100 | |
917 | / outer_node->count.to_gcov_type () | |
4c0f7679 JH |
918 | <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))) |
919 | { | |
920 | reason = "profile of recursive call is too small"; | |
921 | want_inline = false; | |
922 | } | |
3995f3a2 | 923 | else if (max_count == profile_count::zero () |
4c0f7679 JH |
924 | && (edge->frequency * 100 / caller_freq |
925 | <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))) | |
926 | { | |
927 | reason = "frequency of recursive call is too small"; | |
928 | want_inline = false; | |
929 | } | |
930 | } | |
931 | if (!want_inline && dump_file) | |
932 | fprintf (dump_file, " not inlining recursively: %s\n", reason); | |
933 | return want_inline; | |
ca31b95f JH |
934 | } |
935 | ||
19ba6aab JH |
936 | /* Return true when NODE has uninlinable caller; |
937 | set HAS_HOT_CALL if it has hot call. | |
9aa3f5c5 JH |
938 | Worker for cgraph_for_node_and_aliases. */ |
939 | ||
940 | static bool | |
19ba6aab | 941 | check_callers (struct cgraph_node *node, void *has_hot_call) |
9aa3f5c5 | 942 | { |
19ba6aab JH |
943 | struct cgraph_edge *e; |
944 | for (e = node->callers; e; e = e->next_caller) | |
945 | { | |
2bf86c84 JH |
946 | if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)) |
947 | return true; | |
19ba6aab JH |
948 | if (!can_inline_edge_p (e, true)) |
949 | return true; | |
1af8bfe5 JH |
950 | if (e->recursive_p ()) |
951 | return true; | |
3dafb85c | 952 | if (!(*(bool *)has_hot_call) && e->maybe_hot_p ()) |
19ba6aab JH |
953 | *(bool *)has_hot_call = true; |
954 | } | |
955 | return false; | |
9aa3f5c5 JH |
956 | } |
957 | ||
a81b0a3d JH |
958 | /* If NODE has a caller, return true. */ |
959 | ||
960 | static bool | |
961 | has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED) | |
962 | { | |
963 | if (node->callers) | |
964 | return true; | |
965 | return false; | |
966 | } | |
09a2806f | 967 | |
100411f8 JH |
968 | /* Decide if inlining NODE would reduce unit size by eliminating |
969 | the offline copy of function. | |
970 | When COLD is true the cold calls are considered, too. */ | |
09a2806f JH |
971 | |
972 | static bool | |
100411f8 | 973 | want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold) |
09a2806f | 974 | { |
5970b079 EB |
975 | bool has_hot_call = false; |
976 | ||
9789b553 JH |
977 | /* Aliases gets inlined along with the function they alias. */ |
978 | if (node->alias) | |
5970b079 EB |
979 | return false; |
980 | /* Already inlined? */ | |
981 | if (node->global.inlined_to) | |
982 | return false; | |
983 | /* Does it have callers? */ | |
1ede94c5 | 984 | if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true)) |
5970b079 EB |
985 | return false; |
986 | /* Inlining into all callers would increase size? */ | |
987 | if (estimate_growth (node) > 0) | |
988 | return false; | |
989 | /* All inlines must be possible. */ | |
1ede94c5 JH |
990 | if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call, |
991 | true)) | |
5970b079 EB |
992 | return false; |
993 | if (!cold && !has_hot_call) | |
994 | return false; | |
995 | return true; | |
09a2806f JH |
996 | } |
997 | ||
670cd5c5 JH |
998 | /* A cost model driving the inlining heuristics in a way so the edges with |
999 | smallest badness are inlined first. After each inlining is performed | |
0fa2e4df | 1000 | the costs of all caller edges of nodes affected are recomputed so the |
670cd5c5 | 1001 | metrics may accurately depend on values such as number of inlinable callers |
45a80bb9 | 1002 | of the function or function body size. */ |
670cd5c5 | 1003 | |
f0e1509b | 1004 | static sreal |
4c0f7679 | 1005 | edge_badness (struct cgraph_edge *edge, bool dump) |
670cd5c5 | 1006 | { |
f0e1509b | 1007 | sreal badness; |
ab38481c | 1008 | int growth; |
4adaad64 | 1009 | sreal edge_time, unspec_edge_time; |
d52f5295 | 1010 | struct cgraph_node *callee = edge->callee->ultimate_alias_target (); |
0bceb671 JH |
1011 | struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee); |
1012 | ipa_hints hints; | |
208e5afa JH |
1013 | cgraph_node *caller = (edge->caller->global.inlined_to |
1014 | ? edge->caller->global.inlined_to | |
1015 | : edge->caller); | |
1aa14195 | 1016 | |
03dfc36d | 1017 | growth = estimate_edge_growth (edge); |
4adaad64 | 1018 | edge_time = estimate_edge_time (edge, &unspec_edge_time); |
37678631 | 1019 | hints = estimate_edge_hints (edge); |
d59171da | 1020 | gcc_checking_assert (edge_time >= 0); |
4adaad64 JH |
1021 | /* Check that inlined time is better, but tolerate some roundoff issues. */ |
1022 | gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0); | |
d59171da | 1023 | gcc_checking_assert (growth <= callee_info->size); |
e89964e3 | 1024 | |
1ce18dc8 JH |
1025 | if (dump) |
1026 | { | |
464d0118 ML |
1027 | fprintf (dump_file, " Badness calculation for %s -> %s\n", |
1028 | edge->caller->dump_name (), | |
1029 | edge->callee->dump_name ()); | |
4adaad64 | 1030 | fprintf (dump_file, " size growth %i, time %f unspec %f ", |
1ce18dc8 | 1031 | growth, |
4adaad64 JH |
1032 | edge_time.to_double (), |
1033 | unspec_edge_time.to_double ()); | |
0bceb671 | 1034 | ipa_dump_hints (dump_file, hints); |
42f7b0fa JH |
1035 | if (big_speedup_p (edge)) |
1036 | fprintf (dump_file, " big_speedup"); | |
37678631 | 1037 | fprintf (dump_file, "\n"); |
1ce18dc8 | 1038 | } |
45a80bb9 JH |
1039 | |
1040 | /* Always prefer inlining saving code size. */ | |
1041 | if (growth <= 0) | |
1ce18dc8 | 1042 | { |
6d4ab5f8 | 1043 | badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256); |
1ce18dc8 | 1044 | if (dump) |
6d4ab5f8 | 1045 | fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (), |
1ce18dc8 JH |
1046 | growth); |
1047 | } | |
208e5afa JH |
1048 | /* Inlining into EXTERNAL functions is not going to change anything unless |
1049 | they are themselves inlined. */ | |
1050 | else if (DECL_EXTERNAL (caller->decl)) | |
1ce18dc8 | 1051 | { |
1ce18dc8 | 1052 | if (dump) |
208e5afa JH |
1053 | fprintf (dump_file, " max: function is external\n"); |
1054 | return sreal::max (); | |
1ce18dc8 | 1055 | } |
208e5afa | 1056 | /* When profile is available. Compute badness as: |
b4c0a884 | 1057 | |
208e5afa | 1058 | time_saved * caller_count |
133a84ab JH |
1059 | goodness = ------------------------------------------------- |
1060 | growth_of_caller * overall_growth * combined_size | |
d59171da JH |
1061 | |
1062 | badness = - goodness | |
b4c0a884 | 1063 | |
208e5afa JH |
1064 | Again use negative value to make calls with profile appear hotter |
1065 | then calls without. | |
b4c0a884 | 1066 | */ |
3995f3a2 JH |
1067 | else if (opt_for_fn (caller->decl, flag_guess_branch_prob) |
1068 | || caller->count > profile_count::zero ()) | |
670cd5c5 | 1069 | { |
6d4ab5f8 | 1070 | sreal numerator, denominator; |
41f669d8 | 1071 | int overall_growth; |
b7cb2251 | 1072 | sreal inlined_time = compute_inlined_call_time (edge, edge_time); |
208e5afa | 1073 | |
4adaad64 | 1074 | numerator = (compute_uninlined_call_time (edge, unspec_edge_time) |
b7cb2251 | 1075 | - inlined_time); |
208e5afa JH |
1076 | if (numerator == 0) |
1077 | numerator = ((sreal) 1 >> 8); | |
3995f3a2 JH |
1078 | if (caller->count > profile_count::zero ()) |
1079 | numerator *= caller->count.to_gcov_type (); | |
208e5afa JH |
1080 | else if (opt_for_fn (caller->decl, flag_branch_probabilities)) |
1081 | numerator = numerator >> 11; | |
1082 | denominator = growth; | |
41f669d8 JH |
1083 | |
1084 | overall_growth = callee_info->growth; | |
1085 | ||
1086 | /* Look for inliner wrappers of the form: | |
1087 | ||
1088 | inline_caller () | |
1089 | { | |
1090 | do_fast_job... | |
1091 | if (need_more_work) | |
1092 | noninline_callee (); | |
1093 | } | |
1094 | Withhout panilizing this case, we usually inline noninline_callee | |
1095 | into the inline_caller because overall_growth is small preventing | |
1096 | further inlining of inline_caller. | |
1097 | ||
1098 | Penalize only callgraph edges to functions with small overall | |
1099 | growth ... | |
1100 | */ | |
1101 | if (growth > overall_growth | |
1102 | /* ... and having only one caller which is not inlined ... */ | |
1103 | && callee_info->single_caller | |
1104 | && !edge->caller->global.inlined_to | |
1105 | /* ... and edges executed only conditionally ... */ | |
1106 | && edge->frequency < CGRAPH_FREQ_BASE | |
1107 | /* ... consider case where callee is not inline but caller is ... */ | |
1108 | && ((!DECL_DECLARED_INLINE_P (edge->callee->decl) | |
1109 | && DECL_DECLARED_INLINE_P (caller->decl)) | |
1110 | /* ... or when early optimizers decided to split and edge | |
1111 | frequency still indicates splitting is a win ... */ | |
1112 | || (callee->split_part && !caller->split_part | |
1113 | && edge->frequency | |
1114 | < CGRAPH_FREQ_BASE | |
1115 | * PARAM_VALUE | |
1116 | (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100 | |
1117 | /* ... and do not overwrite user specified hints. */ | |
1118 | && (!DECL_DECLARED_INLINE_P (edge->callee->decl) | |
1119 | || DECL_DECLARED_INLINE_P (caller->decl))))) | |
1120 | { | |
0bceb671 | 1121 | struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller); |
41f669d8 JH |
1122 | int caller_growth = caller_info->growth; |
1123 | ||
1124 | /* Only apply the penalty when caller looks like inline candidate, | |
1125 | and it is not called once and. */ | |
1126 | if (!caller_info->single_caller && overall_growth < caller_growth | |
1127 | && caller_info->inlinable | |
1128 | && caller_info->size | |
1129 | < (DECL_DECLARED_INLINE_P (caller->decl) | |
1130 | ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO)) | |
1131 | { | |
1132 | if (dump) | |
1133 | fprintf (dump_file, | |
1134 | " Wrapper penalty. Increasing growth %i to %i\n", | |
1135 | overall_growth, caller_growth); | |
1136 | overall_growth = caller_growth; | |
1137 | } | |
1138 | } | |
1139 | if (overall_growth > 0) | |
1140 | { | |
1141 | /* Strongly preffer functions with few callers that can be inlined | |
1142 | fully. The square root here leads to smaller binaries at average. | |
1143 | Watch however for extreme cases and return to linear function | |
1144 | when growth is large. */ | |
1145 | if (overall_growth < 256) | |
1146 | overall_growth *= overall_growth; | |
1147 | else | |
1148 | overall_growth += 256 * 256 - 256; | |
1149 | denominator *= overall_growth; | |
1150 | } | |
b7cb2251 | 1151 | denominator *= inlined_time; |
6d4ab5f8 JH |
1152 | |
1153 | badness = - numerator / denominator; | |
1154 | ||
1ce18dc8 JH |
1155 | if (dump) |
1156 | { | |
1157 | fprintf (dump_file, | |
16998094 JM |
1158 | " %f: guessed profile. frequency %f, count %" PRId64 |
1159 | " caller count %" PRId64 | |
4adaad64 | 1160 | " time w/o inlining %f, time with inlining %f" |
41f669d8 JH |
1161 | " overall growth %i (current) %i (original)" |
1162 | " %i (compensated)\n", | |
1163 | badness.to_double (), | |
1164 | (double)edge->frequency / CGRAPH_FREQ_BASE, | |
3995f3a2 JH |
1165 | edge->count.initialized_p () ? edge->count.to_gcov_type () : -1, |
1166 | caller->count.initialized_p () ? caller->count.to_gcov_type () : -1, | |
4adaad64 JH |
1167 | compute_uninlined_call_time (edge, |
1168 | unspec_edge_time).to_double (), | |
6d4ab5f8 | 1169 | compute_inlined_call_time (edge, edge_time).to_double (), |
d59171da | 1170 | estimate_growth (callee), |
41f669d8 | 1171 | callee_info->growth, overall_growth); |
1ce18dc8 | 1172 | } |
45a80bb9 JH |
1173 | } |
1174 | /* When function local profile is not available or it does not give | |
1175 | useful information (ie frequency is zero), base the cost on | |
1176 | loop nest and overall size growth, so we optimize for overall number | |
1177 | of functions fully inlined in program. */ | |
1178 | else | |
1179 | { | |
263e19c7 | 1180 | int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8); |
6d4ab5f8 | 1181 | badness = growth; |
670cd5c5 | 1182 | |
45a80bb9 | 1183 | /* Decrease badness if call is nested. */ |
b8698a0f | 1184 | if (badness > 0) |
f0e1509b | 1185 | badness = badness >> nest; |
45a80bb9 | 1186 | else |
6d4ab5f8 | 1187 | badness = badness << nest; |
1ce18dc8 | 1188 | if (dump) |
41f669d8 JH |
1189 | fprintf (dump_file, " %f: no profile. nest %i\n", |
1190 | badness.to_double (), nest); | |
670cd5c5 | 1191 | } |
6d4ab5f8 | 1192 | gcc_checking_assert (badness != 0); |
1ce18dc8 | 1193 | |
3dafb85c | 1194 | if (edge->recursive_p ()) |
6d4ab5f8 JH |
1195 | badness = badness.shift (badness > 0 ? 4 : -4); |
1196 | if ((hints & (INLINE_HINT_indirect_call | |
1197 | | INLINE_HINT_loop_iterations | |
1198 | | INLINE_HINT_array_index | |
1199 | | INLINE_HINT_loop_stride)) | |
1200 | || callee_info->growth <= 0) | |
1201 | badness = badness.shift (badness > 0 ? -2 : 2); | |
1202 | if (hints & (INLINE_HINT_same_scc)) | |
1203 | badness = badness.shift (badness > 0 ? 3 : -3); | |
1204 | else if (hints & (INLINE_HINT_in_scc)) | |
1205 | badness = badness.shift (badness > 0 ? 2 : -2); | |
1206 | else if (hints & (INLINE_HINT_cross_module)) | |
1207 | badness = badness.shift (badness > 0 ? 1 : -1); | |
208e5afa JH |
1208 | if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
1209 | badness = badness.shift (badness > 0 ? -4 : 4); | |
1210 | else if ((hints & INLINE_HINT_declared_inline)) | |
6d4ab5f8 JH |
1211 | badness = badness.shift (badness > 0 ? -3 : 3); |
1212 | if (dump) | |
1213 | fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ()); | |
1214 | return badness; | |
670cd5c5 JH |
1215 | } |
1216 | ||
9b8051b4 | 1217 | /* Recompute badness of EDGE and update its key in HEAP if needed. */ |
4c0f7679 | 1218 | static inline void |
4a910049 | 1219 | update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge) |
9b8051b4 | 1220 | { |
f0e1509b | 1221 | sreal badness = edge_badness (edge, false); |
9b8051b4 JH |
1222 | if (edge->aux) |
1223 | { | |
4a910049 ML |
1224 | edge_heap_node_t *n = (edge_heap_node_t *) edge->aux; |
1225 | gcc_checking_assert (n->get_data () == edge); | |
9b8051b4 | 1226 | |
6d4ab5f8 JH |
1227 | /* fibonacci_heap::replace_key does busy updating of the |
1228 | heap that is unnecesarily expensive. | |
1229 | We do lazy increases: after extracting minimum if the key | |
1230 | turns out to be out of date, it is re-inserted into heap | |
1231 | with correct value. */ | |
4a910049 | 1232 | if (badness < n->get_key ()) |
9b8051b4 | 1233 | { |
4c0f7679 JH |
1234 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1235 | { | |
1236 | fprintf (dump_file, | |
464d0118 ML |
1237 | " decreasing badness %s -> %s, %f to %f\n", |
1238 | edge->caller->dump_name (), | |
1239 | edge->callee->dump_name (), | |
6d4ab5f8 JH |
1240 | n->get_key ().to_double (), |
1241 | badness.to_double ()); | |
4c0f7679 | 1242 | } |
4a910049 | 1243 | heap->decrease_key (n, badness); |
9b8051b4 JH |
1244 | } |
1245 | } | |
1246 | else | |
4c0f7679 JH |
1247 | { |
1248 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1249 | { | |
1250 | fprintf (dump_file, | |
464d0118 ML |
1251 | " enqueuing call %s -> %s, badness %f\n", |
1252 | edge->caller->dump_name (), | |
1253 | edge->callee->dump_name (), | |
6d4ab5f8 | 1254 | badness.to_double ()); |
4c0f7679 | 1255 | } |
4a910049 | 1256 | edge->aux = heap->insert (badness, edge); |
4c0f7679 | 1257 | } |
9b8051b4 JH |
1258 | } |
1259 | ||
40fda55b JH |
1260 | |
1261 | /* NODE was inlined. | |
1262 | All caller edges needs to be resetted because | |
1263 | size estimates change. Similarly callees needs reset | |
1264 | because better context may be known. */ | |
1265 | ||
1266 | static void | |
1267 | reset_edge_caches (struct cgraph_node *node) | |
1268 | { | |
1269 | struct cgraph_edge *edge; | |
1270 | struct cgraph_edge *e = node->callees; | |
1271 | struct cgraph_node *where = node; | |
e55637b7 | 1272 | struct ipa_ref *ref; |
40fda55b JH |
1273 | |
1274 | if (where->global.inlined_to) | |
1275 | where = where->global.inlined_to; | |
1276 | ||
40fda55b JH |
1277 | for (edge = where->callers; edge; edge = edge->next_caller) |
1278 | if (edge->inline_failed) | |
1279 | reset_edge_growth_cache (edge); | |
e55637b7 ML |
1280 | |
1281 | FOR_EACH_ALIAS (where, ref) | |
1282 | reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring)); | |
40fda55b JH |
1283 | |
1284 | if (!e) | |
1285 | return; | |
1286 | ||
1287 | while (true) | |
1288 | if (!e->inline_failed && e->callee->callees) | |
1289 | e = e->callee->callees; | |
1290 | else | |
1291 | { | |
1292 | if (e->inline_failed) | |
1293 | reset_edge_growth_cache (e); | |
1294 | if (e->next_callee) | |
1295 | e = e->next_callee; | |
1296 | else | |
1297 | { | |
1298 | do | |
1299 | { | |
1300 | if (e->caller == node) | |
1301 | return; | |
1302 | e = e->caller->callers; | |
1303 | } | |
1304 | while (!e->next_callee); | |
1305 | e = e->next_callee; | |
1306 | } | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | /* Recompute HEAP nodes for each of caller of NODE. | |
1311 | UPDATED_NODES track nodes we already visited, to avoid redundant work. | |
1312 | When CHECK_INLINABLITY_FOR is set, re-check for specified edge that | |
1313 | it is inlinable. Otherwise check all edges. */ | |
670cd5c5 JH |
1314 | |
1315 | static void | |
4a910049 | 1316 | update_caller_keys (edge_heap_t *heap, struct cgraph_node *node, |
40fda55b JH |
1317 | bitmap updated_nodes, |
1318 | struct cgraph_edge *check_inlinablity_for) | |
670cd5c5 JH |
1319 | { |
1320 | struct cgraph_edge *edge; | |
e55637b7 | 1321 | struct ipa_ref *ref; |
670cd5c5 | 1322 | |
0bceb671 | 1323 | if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable) |
670cd5c5 JH |
1324 | || node->global.inlined_to) |
1325 | return; | |
fcaa4ca4 | 1326 | if (!bitmap_set_bit (updated_nodes, node->uid)) |
670cd5c5 | 1327 | return; |
670cd5c5 | 1328 | |
e55637b7 ML |
1329 | FOR_EACH_ALIAS (node, ref) |
1330 | { | |
1331 | struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring); | |
1332 | update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for); | |
1333 | } | |
39e2db00 | 1334 | |
cdc029b9 | 1335 | for (edge = node->callers; edge; edge = edge->next_caller) |
4c0f7679 JH |
1336 | if (edge->inline_failed) |
1337 | { | |
40fda55b JH |
1338 | if (!check_inlinablity_for |
1339 | || check_inlinablity_for == edge) | |
f10d1a74 | 1340 | { |
40fda55b JH |
1341 | if (can_inline_edge_p (edge, false) |
1342 | && want_inline_small_function_p (edge, false)) | |
1343 | update_edge_key (heap, edge); | |
1344 | else if (edge->aux) | |
1345 | { | |
1346 | report_inline_failed_reason (edge); | |
4a910049 | 1347 | heap->delete_node ((edge_heap_node_t *) edge->aux); |
40fda55b JH |
1348 | edge->aux = NULL; |
1349 | } | |
f10d1a74 | 1350 | } |
40fda55b JH |
1351 | else if (edge->aux) |
1352 | update_edge_key (heap, edge); | |
4c0f7679 | 1353 | } |
9b8051b4 JH |
1354 | } |
1355 | ||
40fda55b | 1356 | /* Recompute HEAP nodes for each uninlined call in NODE. |
9b8051b4 JH |
1357 | This is used when we know that edge badnesses are going only to increase |
1358 | (we introduced new call site) and thus all we need is to insert newly | |
1359 | created edges into heap. */ | |
1360 | ||
1361 | static void | |
4a910049 | 1362 | update_callee_keys (edge_heap_t *heap, struct cgraph_node *node, |
9b8051b4 JH |
1363 | bitmap updated_nodes) |
1364 | { | |
1365 | struct cgraph_edge *e = node->callees; | |
09a2806f | 1366 | |
9b8051b4 JH |
1367 | if (!e) |
1368 | return; | |
1369 | while (true) | |
1370 | if (!e->inline_failed && e->callee->callees) | |
1371 | e = e->callee->callees; | |
1372 | else | |
670cd5c5 | 1373 | { |
a5b1779f JH |
1374 | enum availability avail; |
1375 | struct cgraph_node *callee; | |
58696ce5 JH |
1376 | /* We do not reset callee growth cache here. Since we added a new call, |
1377 | growth chould have just increased and consequentely badness metric | |
1378 | don't need updating. */ | |
9b8051b4 | 1379 | if (e->inline_failed |
e6007a27 | 1380 | && (callee = e->callee->ultimate_alias_target (&avail, e->caller)) |
0bceb671 | 1381 | && ipa_fn_summaries->get (callee)->inlinable |
aaae719d | 1382 | && avail >= AVAIL_AVAILABLE |
a5b1779f | 1383 | && !bitmap_bit_p (updated_nodes, callee->uid)) |
670cd5c5 | 1384 | { |
40fda55b JH |
1385 | if (can_inline_edge_p (e, false) |
1386 | && want_inline_small_function_p (e, false)) | |
1387 | update_edge_key (heap, e); | |
1388 | else if (e->aux) | |
1389 | { | |
1390 | report_inline_failed_reason (e); | |
4a910049 | 1391 | heap->delete_node ((edge_heap_node_t *) e->aux); |
40fda55b JH |
1392 | e->aux = NULL; |
1393 | } | |
9b8051b4 JH |
1394 | } |
1395 | if (e->next_callee) | |
1396 | e = e->next_callee; | |
1397 | else | |
1398 | { | |
1399 | do | |
1ce18dc8 | 1400 | { |
9b8051b4 JH |
1401 | if (e->caller == node) |
1402 | return; | |
1403 | e = e->caller->callers; | |
1ce18dc8 | 1404 | } |
9b8051b4 JH |
1405 | while (!e->next_callee); |
1406 | e = e->next_callee; | |
670cd5c5 | 1407 | } |
670cd5c5 JH |
1408 | } |
1409 | } | |
1410 | ||
670cd5c5 | 1411 | /* Enqueue all recursive calls from NODE into priority queue depending on |
0fa2e4df | 1412 | how likely we want to recursively inline the call. */ |
670cd5c5 | 1413 | |
ca31b95f JH |
1414 | static void |
1415 | lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where, | |
4a910049 | 1416 | edge_heap_t *heap) |
ca31b95f JH |
1417 | { |
1418 | struct cgraph_edge *e; | |
a5b1779f JH |
1419 | enum availability avail; |
1420 | ||
ca31b95f | 1421 | for (e = where->callees; e; e = e->next_callee) |
a5b1779f | 1422 | if (e->callee == node |
e6007a27 | 1423 | || (e->callee->ultimate_alias_target (&avail, e->caller) == node |
d52f5295 | 1424 | && avail > AVAIL_INTERPOSABLE)) |
ca31b95f | 1425 | { |
c5a4444c | 1426 | /* When profile feedback is available, prioritize by expected number |
09a2806f | 1427 | of calls. */ |
3995f3a2 JH |
1428 | heap->insert (!(max_count > 0) || !e->count.initialized_p () ? -e->frequency |
1429 | : -(e->count.to_gcov_type () | |
1430 | / ((max_count.to_gcov_type () + (1<<24) - 1) | |
1431 | / (1<<24))), | |
4a910049 | 1432 | e); |
ca31b95f JH |
1433 | } |
1434 | for (e = where->callees; e; e = e->next_callee) | |
1435 | if (!e->inline_failed) | |
670cd5c5 | 1436 | lookup_recursive_calls (node, e->callee, heap); |
ca31b95f JH |
1437 | } |
1438 | ||
1439 | /* Decide on recursive inlining: in the case function has recursive calls, | |
3e293154 | 1440 | inline until body size reaches given argument. If any new indirect edges |
e56f5f3e JJ |
1441 | are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES |
1442 | is NULL. */ | |
670cd5c5 JH |
1443 | |
1444 | static bool | |
4c0f7679 | 1445 | recursive_inlining (struct cgraph_edge *edge, |
d52f5295 | 1446 | vec<cgraph_edge *> *new_edges) |
ca31b95f JH |
1447 | { |
1448 | int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO); | |
d1704358 | 1449 | edge_heap_t heap (sreal::min ()); |
d7d1d041 | 1450 | struct cgraph_node *node; |
ca31b95f | 1451 | struct cgraph_edge *e; |
4c0f7679 | 1452 | struct cgraph_node *master_clone = NULL, *next; |
ca31b95f JH |
1453 | int depth = 0; |
1454 | int n = 0; | |
1455 | ||
d7d1d041 RG |
1456 | node = edge->caller; |
1457 | if (node->global.inlined_to) | |
1458 | node = node->global.inlined_to; | |
1459 | ||
67348ccc | 1460 | if (DECL_DECLARED_INLINE_P (node->decl)) |
4c0f7679 | 1461 | limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE); |
ca31b95f JH |
1462 | |
1463 | /* Make sure that function is small enough to be considered for inlining. */ | |
4c0f7679 | 1464 | if (estimate_size_after_inlining (node, edge) >= limit) |
670cd5c5 | 1465 | return false; |
4a910049 ML |
1466 | lookup_recursive_calls (node, node, &heap); |
1467 | if (heap.empty ()) | |
1468 | return false; | |
ca31b95f JH |
1469 | |
1470 | if (dump_file) | |
b8698a0f | 1471 | fprintf (dump_file, |
670cd5c5 | 1472 | " Performing recursive inlining on %s\n", |
fec39fa6 | 1473 | node->name ()); |
ca31b95f | 1474 | |
ca31b95f | 1475 | /* Do the inlining and update list of recursive call during process. */ |
4a910049 | 1476 | while (!heap.empty ()) |
ca31b95f | 1477 | { |
4a910049 | 1478 | struct cgraph_edge *curr = heap.extract_min (); |
6ced940d | 1479 | struct cgraph_node *cnode, *dest = curr->callee; |
d7d1d041 | 1480 | |
4c0f7679 JH |
1481 | if (!can_inline_edge_p (curr, true)) |
1482 | continue; | |
1483 | ||
6ced940d JH |
1484 | /* MASTER_CLONE is produced in the case we already started modified |
1485 | the function. Be sure to redirect edge to the original body before | |
1486 | estimating growths otherwise we will be seeing growths after inlining | |
1487 | the already modified body. */ | |
1488 | if (master_clone) | |
1489 | { | |
3dafb85c ML |
1490 | curr->redirect_callee (master_clone); |
1491 | reset_edge_growth_cache (curr); | |
6ced940d JH |
1492 | } |
1493 | ||
1494 | if (estimate_size_after_inlining (node, curr) > limit) | |
1495 | { | |
3dafb85c | 1496 | curr->redirect_callee (dest); |
6ced940d JH |
1497 | reset_edge_growth_cache (curr); |
1498 | break; | |
1499 | } | |
1500 | ||
c5a4444c JH |
1501 | depth = 1; |
1502 | for (cnode = curr->caller; | |
1503 | cnode->global.inlined_to; cnode = cnode->callers->caller) | |
67348ccc | 1504 | if (node->decl |
d52f5295 | 1505 | == curr->callee->ultimate_alias_target ()->decl) |
f791d333 | 1506 | depth++; |
c5a4444c | 1507 | |
4c0f7679 | 1508 | if (!want_inline_self_recursive_call_p (curr, node, false, depth)) |
6ced940d | 1509 | { |
3dafb85c | 1510 | curr->redirect_callee (dest); |
6ced940d JH |
1511 | reset_edge_growth_cache (curr); |
1512 | continue; | |
1513 | } | |
ca31b95f | 1514 | |
670cd5c5 | 1515 | if (dump_file) |
c5a4444c | 1516 | { |
b8698a0f | 1517 | fprintf (dump_file, |
c5a4444c | 1518 | " Inlining call of depth %i", depth); |
3995f3a2 | 1519 | if (node->count > profile_count::zero ()) |
c5a4444c JH |
1520 | { |
1521 | fprintf (dump_file, " called approx. %.2f times per call", | |
3995f3a2 JH |
1522 | (double)curr->count.to_gcov_type () |
1523 | / node->count.to_gcov_type ()); | |
c5a4444c JH |
1524 | } |
1525 | fprintf (dump_file, "\n"); | |
1526 | } | |
4c0f7679 JH |
1527 | if (!master_clone) |
1528 | { | |
1529 | /* We need original clone to copy around. */ | |
d52f5295 ML |
1530 | master_clone = node->create_clone (node->decl, node->count, |
1531 | CGRAPH_FREQ_BASE, false, vNULL, | |
1532 | true, NULL, NULL); | |
4c0f7679 JH |
1533 | for (e = master_clone->callees; e; e = e->next_callee) |
1534 | if (!e->inline_failed) | |
bd936951 | 1535 | clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE); |
3dafb85c | 1536 | curr->redirect_callee (master_clone); |
6ced940d | 1537 | reset_edge_growth_cache (curr); |
4c0f7679 JH |
1538 | } |
1539 | ||
c170d40f | 1540 | inline_call (curr, false, new_edges, &overall_size, true); |
4a910049 | 1541 | lookup_recursive_calls (node, curr->callee, &heap); |
ca31b95f JH |
1542 | n++; |
1543 | } | |
4c0f7679 | 1544 | |
4a910049 | 1545 | if (!heap.empty () && dump_file) |
c5a4444c | 1546 | fprintf (dump_file, " Recursive inlining growth limit met.\n"); |
4c0f7679 JH |
1547 | |
1548 | if (!master_clone) | |
1549 | return false; | |
1550 | ||
ca31b95f | 1551 | if (dump_file) |
b8698a0f | 1552 | fprintf (dump_file, |
4c0f7679 | 1553 | "\n Inlined %i times, " |
ffb77fd6 | 1554 | "body grown from size %i to %i, time %f to %f\n", n, |
0bceb671 JH |
1555 | ipa_fn_summaries->get (master_clone)->size, |
1556 | ipa_fn_summaries->get (node)->size, | |
1557 | ipa_fn_summaries->get (master_clone)->time.to_double (), | |
1558 | ipa_fn_summaries->get (node)->time.to_double ()); | |
ca31b95f JH |
1559 | |
1560 | /* Remove master clone we used for inlining. We rely that clones inlined | |
1561 | into master clone gets queued just before master clone so we don't | |
1562 | need recursion. */ | |
3dafb85c | 1563 | for (node = symtab->first_function (); node != master_clone; |
96fc428c JH |
1564 | node = next) |
1565 | { | |
3dafb85c | 1566 | next = symtab->next_function (node); |
96fc428c | 1567 | if (node->global.inlined_to == master_clone) |
d52f5295 | 1568 | node->remove (); |
96fc428c | 1569 | } |
d52f5295 | 1570 | master_clone->remove (); |
4c0f7679 | 1571 | return true; |
ca31b95f JH |
1572 | } |
1573 | ||
09a2806f | 1574 | |
88512ba0 | 1575 | /* Given whole compilation unit estimate of INSNS, compute how large we can |
b7c27d51 | 1576 | allow the unit to grow. */ |
09a2806f | 1577 | |
b7c27d51 JH |
1578 | static int |
1579 | compute_max_insns (int insns) | |
1580 | { | |
1581 | int max_insns = insns; | |
1582 | if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS)) | |
1583 | max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS); | |
1584 | ||
a9243bfc | 1585 | return ((int64_t) max_insns |
3e2a6e7b | 1586 | * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100); |
b7c27d51 JH |
1587 | } |
1588 | ||
09a2806f | 1589 | |
3e293154 | 1590 | /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */ |
09a2806f | 1591 | |
3e293154 | 1592 | static void |
4a910049 | 1593 | add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges) |
3e293154 | 1594 | { |
9771b263 | 1595 | while (new_edges.length () > 0) |
3e293154 | 1596 | { |
9771b263 | 1597 | struct cgraph_edge *edge = new_edges.pop (); |
3e293154 MJ |
1598 | |
1599 | gcc_assert (!edge->aux); | |
a5b1779f | 1600 | if (edge->inline_failed |
4c0f7679 JH |
1601 | && can_inline_edge_p (edge, true) |
1602 | && want_inline_small_function_p (edge, true)) | |
4a910049 | 1603 | edge->aux = heap->insert (edge_badness (edge, false), edge); |
3e293154 MJ |
1604 | } |
1605 | } | |
1606 | ||
042ae7d2 JH |
1607 | /* Remove EDGE from the fibheap. */ |
1608 | ||
1609 | static void | |
1610 | heap_edge_removal_hook (struct cgraph_edge *e, void *data) | |
1611 | { | |
1612 | if (e->aux) | |
1613 | { | |
4a910049 | 1614 | ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux); |
042ae7d2 JH |
1615 | e->aux = NULL; |
1616 | } | |
1617 | } | |
3e293154 | 1618 | |
09ce3660 JH |
1619 | /* Return true if speculation of edge E seems useful. |
1620 | If ANTICIPATE_INLINING is true, be conservative and hope that E | |
1621 | may get inlined. */ | |
1622 | ||
1623 | bool | |
1624 | speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining) | |
1625 | { | |
1626 | enum availability avail; | |
e6007a27 JH |
1627 | struct cgraph_node *target = e->callee->ultimate_alias_target (&avail, |
1628 | e->caller); | |
09ce3660 JH |
1629 | struct cgraph_edge *direct, *indirect; |
1630 | struct ipa_ref *ref; | |
1631 | ||
1632 | gcc_assert (e->speculative && !e->indirect_unknown_callee); | |
1633 | ||
3dafb85c | 1634 | if (!e->maybe_hot_p ()) |
09ce3660 JH |
1635 | return false; |
1636 | ||
1637 | /* See if IP optimizations found something potentially useful about the | |
1638 | function. For now we look only for CONST/PURE flags. Almost everything | |
1639 | else we propagate is useless. */ | |
1640 | if (avail >= AVAIL_AVAILABLE) | |
1641 | { | |
67348ccc | 1642 | int ecf_flags = flags_from_decl_or_type (target->decl); |
09ce3660 JH |
1643 | if (ecf_flags & ECF_CONST) |
1644 | { | |
3dafb85c | 1645 | e->speculative_call_info (direct, indirect, ref); |
09ce3660 JH |
1646 | if (!(indirect->indirect_info->ecf_flags & ECF_CONST)) |
1647 | return true; | |
1648 | } | |
1649 | else if (ecf_flags & ECF_PURE) | |
1650 | { | |
3dafb85c | 1651 | e->speculative_call_info (direct, indirect, ref); |
09ce3660 JH |
1652 | if (!(indirect->indirect_info->ecf_flags & ECF_PURE)) |
1653 | return true; | |
1654 | } | |
1655 | } | |
1656 | /* If we did not managed to inline the function nor redirect | |
1657 | to an ipa-cp clone (that are seen by having local flag set), | |
1658 | it is probably pointless to inline it unless hardware is missing | |
1659 | indirect call predictor. */ | |
1660 | if (!anticipate_inlining && e->inline_failed && !target->local.local) | |
1661 | return false; | |
1662 | /* For overwritable targets there is not much to do. */ | |
1663 | if (e->inline_failed && !can_inline_edge_p (e, false, true)) | |
1664 | return false; | |
1665 | /* OK, speculation seems interesting. */ | |
1666 | return true; | |
1667 | } | |
1668 | ||
1669 | /* We know that EDGE is not going to be inlined. | |
1670 | See if we can remove speculation. */ | |
1671 | ||
1672 | static void | |
4a910049 | 1673 | resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge) |
09ce3660 JH |
1674 | { |
1675 | if (edge->speculative && !speculation_useful_p (edge, false)) | |
1676 | { | |
1677 | struct cgraph_node *node = edge->caller; | |
1678 | struct cgraph_node *where = node->global.inlined_to | |
1679 | ? node->global.inlined_to : node; | |
0e3de1d4 | 1680 | auto_bitmap updated_nodes; |
09ce3660 | 1681 | |
e86a910f | 1682 | spec_rem += edge->count; |
3dafb85c | 1683 | edge->resolve_speculation (); |
09ce3660 | 1684 | reset_edge_caches (where); |
0bceb671 | 1685 | ipa_update_overall_fn_summary (where); |
09ce3660 JH |
1686 | update_caller_keys (edge_heap, where, |
1687 | updated_nodes, NULL); | |
d0b66480 JH |
1688 | update_callee_keys (edge_heap, where, |
1689 | updated_nodes); | |
09ce3660 JH |
1690 | } |
1691 | } | |
1692 | ||
bb1e543c JH |
1693 | /* Return true if NODE should be accounted for overall size estimate. |
1694 | Skip all nodes optimized for size so we can measure the growth of hot | |
1695 | part of program no matter of the padding. */ | |
1696 | ||
1697 | bool | |
1698 | inline_account_function_p (struct cgraph_node *node) | |
1699 | { | |
1700 | return (!DECL_EXTERNAL (node->decl) | |
1701 | && !opt_for_fn (node->decl, optimize_size) | |
1702 | && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED); | |
1703 | } | |
1704 | ||
41f669d8 JH |
1705 | /* Count number of callers of NODE and store it into DATA (that |
1706 | points to int. Worker for cgraph_for_node_and_aliases. */ | |
1707 | ||
1708 | static bool | |
1709 | sum_callers (struct cgraph_node *node, void *data) | |
1710 | { | |
1711 | struct cgraph_edge *e; | |
1712 | int *num_calls = (int *)data; | |
1713 | ||
1714 | for (e = node->callers; e; e = e->next_caller) | |
1715 | (*num_calls)++; | |
1716 | return false; | |
1717 | } | |
1718 | ||
ca31b95f | 1719 | /* We use greedy algorithm for inlining of small functions: |
09a2806f JH |
1720 | All inline candidates are put into prioritized heap ordered in |
1721 | increasing badness. | |
ca31b95f | 1722 | |
09a2806f | 1723 | The inlining of small functions is bounded by unit growth parameters. */ |
ca31b95f JH |
1724 | |
1725 | static void | |
4c0f7679 | 1726 | inline_small_functions (void) |
ca31b95f JH |
1727 | { |
1728 | struct cgraph_node *node; | |
670cd5c5 | 1729 | struct cgraph_edge *edge; |
f0e1509b | 1730 | edge_heap_t edge_heap (sreal::min ()); |
0e3de1d4 | 1731 | auto_bitmap updated_nodes; |
85057983 | 1732 | int min_size, max_size; |
d52f5295 | 1733 | auto_vec<cgraph_edge *> new_indirect_edges; |
09a2806f | 1734 | int initial_size = 0; |
3dafb85c | 1735 | struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count); |
042ae7d2 | 1736 | struct cgraph_edge_hook_list *edge_removal_hook_holder; |
2bf86c84 | 1737 | new_indirect_edges.create (8); |
670cd5c5 | 1738 | |
042ae7d2 | 1739 | edge_removal_hook_holder |
4a910049 | 1740 | = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap); |
042ae7d2 | 1741 | |
1a3118e9 JH |
1742 | /* Compute overall unit size and other global parameters used by badness |
1743 | metrics. */ | |
ca31b95f | 1744 | |
3995f3a2 | 1745 | max_count = profile_count::uninitialized (); |
68cc8feb JH |
1746 | ipa_reduced_postorder (order, true, true, NULL); |
1747 | free (order); | |
1a3118e9 | 1748 | |
c47d0034 JH |
1749 | FOR_EACH_DEFINED_FUNCTION (node) |
1750 | if (!node->global.inlined_to) | |
e7f23018 | 1751 | { |
bb1e543c JH |
1752 | if (!node->alias && node->analyzed |
1753 | && (node->has_gimple_body_p () || node->thunk.thunk_p)) | |
a5b1779f | 1754 | { |
0bceb671 | 1755 | struct ipa_fn_summary *info = ipa_fn_summaries->get (node); |
67348ccc | 1756 | struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux; |
ca31b95f | 1757 | |
5e750dc6 JH |
1758 | /* Do not account external functions, they will be optimized out |
1759 | if not inlined. Also only count the non-cold portion of program. */ | |
bb1e543c | 1760 | if (inline_account_function_p (node)) |
a5b1779f | 1761 | initial_size += info->size; |
d59171da | 1762 | info->growth = estimate_growth (node); |
41f669d8 JH |
1763 | |
1764 | int num_calls = 0; | |
1765 | node->call_for_symbol_and_aliases (sum_callers, &num_calls, | |
1766 | true); | |
1767 | if (num_calls == 1) | |
1768 | info->single_caller = true; | |
bf3f6510 JH |
1769 | if (dfs && dfs->next_cycle) |
1770 | { | |
1771 | struct cgraph_node *n2; | |
1772 | int id = dfs->scc_no + 1; | |
1773 | for (n2 = node; n2; | |
67348ccc | 1774 | n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle) |
bf3f6510 | 1775 | { |
0bceb671 | 1776 | struct ipa_fn_summary *info2 = ipa_fn_summaries->get (n2); |
bf3f6510 JH |
1777 | if (info2->scc_no) |
1778 | break; | |
1779 | info2->scc_no = id; | |
1780 | } | |
1781 | } | |
a5b1779f | 1782 | } |
09a2806f | 1783 | |
e7f23018 | 1784 | for (edge = node->callers; edge; edge = edge->next_caller) |
3995f3a2 | 1785 | if (!(max_count >= edge->count)) |
632b4f8e | 1786 | max_count = edge->count; |
e7f23018 | 1787 | } |
b48ccf0d JH |
1788 | ipa_free_postorder_info (); |
1789 | initialize_growth_caches (); | |
1790 | ||
1791 | if (dump_file) | |
1792 | fprintf (dump_file, | |
1793 | "\nDeciding on inlining of small functions. Starting with size %i.\n", | |
1794 | initial_size); | |
b7c27d51 | 1795 | |
8a8dccb2 | 1796 | overall_size = initial_size; |
85057983 JH |
1797 | max_size = compute_max_insns (overall_size); |
1798 | min_size = overall_size; | |
1a3118e9 | 1799 | |
a99be3c9 | 1800 | /* Populate the heap with all edges we might inline. */ |
1a3118e9 | 1801 | |
c47d0034 | 1802 | FOR_EACH_DEFINED_FUNCTION (node) |
09ce3660 JH |
1803 | { |
1804 | bool update = false; | |
bcda57c1 | 1805 | struct cgraph_edge *next = NULL; |
c1eed5a1 | 1806 | bool has_speculative = false; |
1a3118e9 | 1807 | |
09ce3660 | 1808 | if (dump_file) |
464d0118 | 1809 | fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ()); |
09ce3660 JH |
1810 | |
1811 | for (edge = node->callees; edge; edge = next) | |
1812 | { | |
1813 | next = edge->next_callee; | |
1a3118e9 | 1814 | if (edge->inline_failed |
09ce3660 | 1815 | && !edge->aux |
1a3118e9 JH |
1816 | && can_inline_edge_p (edge, true) |
1817 | && want_inline_small_function_p (edge, true) | |
1818 | && edge->inline_failed) | |
1819 | { | |
1820 | gcc_assert (!edge->aux); | |
4a910049 | 1821 | update_edge_key (&edge_heap, edge); |
1a3118e9 | 1822 | } |
c1eed5a1 JH |
1823 | if (edge->speculative) |
1824 | has_speculative = true; | |
1825 | } | |
1826 | if (has_speculative) | |
1827 | for (edge = node->callees; edge; edge = next) | |
1828 | if (edge->speculative && !speculation_useful_p (edge, | |
1829 | edge->aux != NULL)) | |
09ce3660 | 1830 | { |
3dafb85c | 1831 | edge->resolve_speculation (); |
09ce3660 JH |
1832 | update = true; |
1833 | } | |
09ce3660 JH |
1834 | if (update) |
1835 | { | |
1836 | struct cgraph_node *where = node->global.inlined_to | |
1837 | ? node->global.inlined_to : node; | |
0bceb671 | 1838 | ipa_update_overall_fn_summary (where); |
09ce3660 | 1839 | reset_edge_caches (where); |
4a910049 | 1840 | update_caller_keys (&edge_heap, where, |
09ce3660 | 1841 | updated_nodes, NULL); |
2001028a JH |
1842 | update_callee_keys (&edge_heap, where, |
1843 | updated_nodes); | |
09ce3660 JH |
1844 | bitmap_clear (updated_nodes); |
1845 | } | |
1846 | } | |
1a3118e9 | 1847 | |
09a2806f | 1848 | gcc_assert (in_lto_p |
3995f3a2 | 1849 | || !(max_count > 0) |
09a2806f | 1850 | || (profile_info && flag_branch_probabilities)); |
b7c27d51 | 1851 | |
4a910049 | 1852 | while (!edge_heap.empty ()) |
ca31b95f | 1853 | { |
85057983 | 1854 | int old_size = overall_size; |
1ce18dc8 | 1855 | struct cgraph_node *where, *callee; |
f0e1509b ML |
1856 | sreal badness = edge_heap.min_key (); |
1857 | sreal current_badness; | |
1ce18dc8 | 1858 | int growth; |
670cd5c5 | 1859 | |
4a910049 | 1860 | edge = edge_heap.extract_min (); |
1ce18dc8 JH |
1861 | gcc_assert (edge->aux); |
1862 | edge->aux = NULL; | |
9de6f6c3 | 1863 | if (!edge->inline_failed || !edge->callee->analyzed) |
1ce18dc8 | 1864 | continue; |
cdc029b9 | 1865 | |
b2b29377 | 1866 | #if CHECKING_P |
6d4ab5f8 JH |
1867 | /* Be sure that caches are maintained consistent. */ |
1868 | sreal cached_badness = edge_badness (edge, false); | |
0d92b555 JH |
1869 | |
1870 | int old_size_est = estimate_edge_size (edge); | |
ab38481c | 1871 | sreal old_time_est = estimate_edge_time (edge); |
0d92b555 JH |
1872 | int old_hints_est = estimate_edge_hints (edge); |
1873 | ||
40fda55b | 1874 | reset_edge_growth_cache (edge); |
0d92b555 JH |
1875 | gcc_assert (old_size_est == estimate_edge_size (edge)); |
1876 | gcc_assert (old_time_est == estimate_edge_time (edge)); | |
52d36202 JH |
1877 | /* FIXME: |
1878 | ||
1879 | gcc_assert (old_hints_est == estimate_edge_hints (edge)); | |
1880 | ||
1881 | fails with profile feedback because some hints depends on | |
1882 | maybe_hot_edge_p predicate and because callee gets inlined to other | |
1883 | calls, the edge may become cold. | |
1884 | This ought to be fixed by computing relative probabilities | |
1885 | for given invocation but that will be better done once whole | |
1886 | code is converted to sreals. Disable for now and revert to "wrong" | |
1887 | value so enable/disable checking paths agree. */ | |
1888 | edge_growth_cache[edge->uid].hints = old_hints_est + 1; | |
40fda55b | 1889 | |
cdc029b9 | 1890 | /* When updating the edge costs, we only decrease badness in the keys. |
09a2806f JH |
1891 | Increases of badness are handled lazilly; when we see key with out |
1892 | of date value on it, we re-insert it now. */ | |
4c0f7679 | 1893 | current_badness = edge_badness (edge, false); |
3995f3a2 | 1894 | gcc_assert (cached_badness == current_badness); |
2001028a | 1895 | gcc_assert (current_badness >= badness); |
6d4ab5f8 JH |
1896 | #else |
1897 | current_badness = edge_badness (edge, false); | |
1898 | #endif | |
cdc029b9 JH |
1899 | if (current_badness != badness) |
1900 | { | |
d75de25b | 1901 | if (edge_heap.min () && current_badness > edge_heap.min_key ()) |
6d4ab5f8 JH |
1902 | { |
1903 | edge->aux = edge_heap.insert (current_badness, edge); | |
1904 | continue; | |
1905 | } | |
1906 | else | |
1907 | badness = current_badness; | |
cdc029b9 | 1908 | } |
4c0f7679 JH |
1909 | |
1910 | if (!can_inline_edge_p (edge, true)) | |
09ce3660 | 1911 | { |
4a910049 | 1912 | resolve_noninline_speculation (&edge_heap, edge); |
09ce3660 JH |
1913 | continue; |
1914 | } | |
cdc029b9 | 1915 | |
d52f5295 | 1916 | callee = edge->callee->ultimate_alias_target (); |
03dfc36d | 1917 | growth = estimate_edge_growth (edge); |
ca31b95f | 1918 | if (dump_file) |
ca31b95f | 1919 | { |
b8698a0f | 1920 | fprintf (dump_file, |
464d0118 ML |
1921 | "\nConsidering %s with %i size\n", |
1922 | callee->dump_name (), | |
0bceb671 | 1923 | ipa_fn_summaries->get (callee)->size); |
b8698a0f | 1924 | fprintf (dump_file, |
464d0118 | 1925 | " to be inlined into %s in %s:%i\n" |
6d4ab5f8 | 1926 | " Estimated badness is %f, frequency %.2f.\n", |
464d0118 | 1927 | edge->caller->dump_name (), |
9e145afd | 1928 | edge->call_stmt |
355fe088 | 1929 | && (LOCATION_LOCUS (gimple_location ((const gimple *) |
1b7706c8 JJ |
1930 | edge->call_stmt)) |
1931 | > BUILTINS_LOCATION) | |
355fe088 | 1932 | ? gimple_filename ((const gimple *) edge->call_stmt) |
9e145afd N |
1933 | : "unknown", |
1934 | edge->call_stmt | |
355fe088 | 1935 | ? gimple_lineno ((const gimple *) edge->call_stmt) |
9e145afd | 1936 | : -1, |
6d4ab5f8 | 1937 | badness.to_double (), |
45a80bb9 | 1938 | edge->frequency / (double)CGRAPH_FREQ_BASE); |
3995f3a2 JH |
1939 | if (edge->count.initialized_p ()) |
1940 | { | |
1941 | fprintf (dump_file, " Called "); | |
1942 | edge->count.dump (dump_file); | |
1943 | fprintf (dump_file, "times\n"); | |
1944 | } | |
1ce18dc8 | 1945 | if (dump_flags & TDF_DETAILS) |
4c0f7679 | 1946 | edge_badness (edge, true); |
ca31b95f JH |
1947 | } |
1948 | ||
4c0f7679 | 1949 | if (overall_size + growth > max_size |
67348ccc | 1950 | && !DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
670cd5c5 | 1951 | { |
4c0f7679 JH |
1952 | edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT; |
1953 | report_inline_failed_reason (edge); | |
4a910049 | 1954 | resolve_noninline_speculation (&edge_heap, edge); |
670cd5c5 JH |
1955 | continue; |
1956 | } | |
4c0f7679 JH |
1957 | |
1958 | if (!want_inline_small_function_p (edge, true)) | |
09ce3660 | 1959 | { |
4a910049 | 1960 | resolve_noninline_speculation (&edge_heap, edge); |
09ce3660 JH |
1961 | continue; |
1962 | } | |
09a2806f | 1963 | |
7ba03e5e JL |
1964 | /* Heuristics for inlining small functions work poorly for |
1965 | recursive calls where we do effects similar to loop unrolling. | |
1966 | When inlining such edge seems profitable, leave decision on | |
09a2806f | 1967 | specific inliner. */ |
3dafb85c | 1968 | if (edge->recursive_p ()) |
670cd5c5 JH |
1969 | { |
1970 | where = edge->caller; | |
1971 | if (where->global.inlined_to) | |
1972 | where = where->global.inlined_to; | |
4c0f7679 | 1973 | if (!recursive_inlining (edge, |
2bf86c84 JH |
1974 | opt_for_fn (edge->caller->decl, |
1975 | flag_indirect_inlining) | |
4c0f7679 | 1976 | ? &new_indirect_edges : NULL)) |
d7d1d041 RG |
1977 | { |
1978 | edge->inline_failed = CIF_RECURSIVE_INLINING; | |
4a910049 | 1979 | resolve_noninline_speculation (&edge_heap, edge); |
d7d1d041 RG |
1980 | continue; |
1981 | } | |
40fda55b | 1982 | reset_edge_caches (where); |
09a2806f JH |
1983 | /* Recursive inliner inlines all recursive calls of the function |
1984 | at once. Consequently we need to update all callee keys. */ | |
2bf86c84 | 1985 | if (opt_for_fn (edge->caller->decl, flag_indirect_inlining)) |
4a910049 ML |
1986 | add_new_edges_to_heap (&edge_heap, new_indirect_edges); |
1987 | update_callee_keys (&edge_heap, where, updated_nodes); | |
09ce3660 | 1988 | bitmap_clear (updated_nodes); |
670cd5c5 JH |
1989 | } |
1990 | else | |
1991 | { | |
4c0f7679 JH |
1992 | struct cgraph_node *outer_node = NULL; |
1993 | int depth = 0; | |
1994 | ||
7ba03e5e JL |
1995 | /* Consider the case where self recursive function A is inlined |
1996 | into B. This is desired optimization in some cases, since it | |
1997 | leads to effect similar of loop peeling and we might completely | |
1998 | optimize out the recursive call. However we must be extra | |
1999 | selective. */ | |
4c0f7679 JH |
2000 | |
2001 | where = edge->caller; | |
2002 | while (where->global.inlined_to) | |
670cd5c5 | 2003 | { |
67348ccc | 2004 | if (where->decl == callee->decl) |
4c0f7679 JH |
2005 | outer_node = where, depth++; |
2006 | where = where->callers->caller; | |
2007 | } | |
2008 | if (outer_node | |
2009 | && !want_inline_self_recursive_call_p (edge, outer_node, | |
2010 | true, depth)) | |
2011 | { | |
2012 | edge->inline_failed | |
67348ccc | 2013 | = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl) |
4c0f7679 | 2014 | ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED); |
4a910049 | 2015 | resolve_noninline_speculation (&edge_heap, edge); |
670cd5c5 JH |
2016 | continue; |
2017 | } | |
4c0f7679 JH |
2018 | else if (depth && dump_file) |
2019 | fprintf (dump_file, " Peeling recursion with depth %i\n", depth); | |
2020 | ||
9b8051b4 | 2021 | gcc_checking_assert (!callee->global.inlined_to); |
c170d40f | 2022 | inline_call (edge, true, &new_indirect_edges, &overall_size, true); |
2bf86c84 | 2023 | add_new_edges_to_heap (&edge_heap, new_indirect_edges); |
f8e2a1ed | 2024 | |
1f6f9079 | 2025 | reset_edge_caches (edge->callee); |
40fda55b | 2026 | |
4a910049 | 2027 | update_callee_keys (&edge_heap, where, updated_nodes); |
670cd5c5 JH |
2028 | } |
2029 | where = edge->caller; | |
2030 | if (where->global.inlined_to) | |
2031 | where = where->global.inlined_to; | |
2032 | ||
2033 | /* Our profitability metric can depend on local properties | |
2034 | such as number of inlinable calls and size of the function body. | |
2035 | After inlining these properties might change for the function we | |
2036 | inlined into (since it's body size changed) and for the functions | |
2037 | called by function we inlined (since number of it inlinable callers | |
2038 | might change). */ | |
4a910049 | 2039 | update_caller_keys (&edge_heap, where, updated_nodes, NULL); |
208e5afa JH |
2040 | /* Offline copy count has possibly changed, recompute if profile is |
2041 | available. */ | |
3995f3a2 | 2042 | if (max_count > profile_count::zero ()) |
208e5afa JH |
2043 | { |
2044 | struct cgraph_node *n = cgraph_node::get (edge->callee->decl); | |
2045 | if (n != edge->callee && n->analyzed) | |
2046 | update_callee_keys (&edge_heap, n, updated_nodes); | |
2047 | } | |
670cd5c5 | 2048 | bitmap_clear (updated_nodes); |
ca31b95f | 2049 | |
670cd5c5 | 2050 | if (dump_file) |
50fe876d | 2051 | { |
b8698a0f | 2052 | fprintf (dump_file, |
4adaad64 | 2053 | " Inlined %s into %s which now has time %f and size %i, " |
85057983 | 2054 | "net change of %+i.\n", |
4adaad64 | 2055 | edge->callee->name (), |
fec39fa6 | 2056 | edge->caller->name (), |
0bceb671 JH |
2057 | ipa_fn_summaries->get (edge->caller)->time.to_double (), |
2058 | ipa_fn_summaries->get (edge->caller)->size, | |
85057983 | 2059 | overall_size - old_size); |
50fe876d | 2060 | } |
85057983 | 2061 | if (min_size > overall_size) |
b7c27d51 | 2062 | { |
85057983 JH |
2063 | min_size = overall_size; |
2064 | max_size = compute_max_insns (min_size); | |
b7c27d51 JH |
2065 | |
2066 | if (dump_file) | |
85057983 | 2067 | fprintf (dump_file, "New minimal size reached: %i\n", min_size); |
b7c27d51 | 2068 | } |
ca31b95f | 2069 | } |
3e293154 | 2070 | |
632b4f8e | 2071 | free_growth_caches (); |
09a2806f JH |
2072 | if (dump_file) |
2073 | fprintf (dump_file, | |
2074 | "Unit growth for small function inlining: %i->%i (%i%%)\n", | |
632b4f8e JH |
2075 | initial_size, overall_size, |
2076 | initial_size ? overall_size * 100 / (initial_size) - 100: 0); | |
3dafb85c | 2077 | symtab->remove_edge_removal_hook (edge_removal_hook_holder); |
ca31b95f JH |
2078 | } |
2079 | ||
09a2806f JH |
2080 | /* Flatten NODE. Performed both during early inlining and |
2081 | at IPA inlining time. */ | |
af961c7f RG |
2082 | |
2083 | static void | |
632b4f8e | 2084 | flatten_function (struct cgraph_node *node, bool early) |
af961c7f RG |
2085 | { |
2086 | struct cgraph_edge *e; | |
2087 | ||
2088 | /* We shouldn't be called recursively when we are being processed. */ | |
67348ccc | 2089 | gcc_assert (node->aux == NULL); |
af961c7f | 2090 | |
67348ccc | 2091 | node->aux = (void *) node; |
af961c7f RG |
2092 | |
2093 | for (e = node->callees; e; e = e->next_callee) | |
2094 | { | |
2095 | struct cgraph_node *orig_callee; | |
d52f5295 | 2096 | struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
af961c7f | 2097 | |
af961c7f | 2098 | /* We've hit cycle? It is time to give up. */ |
67348ccc | 2099 | if (callee->aux) |
af961c7f RG |
2100 | { |
2101 | if (dump_file) | |
2102 | fprintf (dump_file, | |
2103 | "Not inlining %s into %s to avoid cycle.\n", | |
2a72a953 DM |
2104 | xstrdup_for_dump (callee->name ()), |
2105 | xstrdup_for_dump (e->caller->name ())); | |
af961c7f RG |
2106 | e->inline_failed = CIF_RECURSIVE_INLINING; |
2107 | continue; | |
2108 | } | |
2109 | ||
2110 | /* When the edge is already inlined, we just need to recurse into | |
2111 | it in order to fully flatten the leaves. */ | |
2112 | if (!e->inline_failed) | |
2113 | { | |
a5b1779f | 2114 | flatten_function (callee, early); |
af961c7f RG |
2115 | continue; |
2116 | } | |
2117 | ||
4c0f7679 JH |
2118 | /* Flatten attribute needs to be processed during late inlining. For |
2119 | extra code quality we however do flattening during early optimization, | |
2120 | too. */ | |
632b4f8e | 2121 | if (!early |
4c0f7679 JH |
2122 | ? !can_inline_edge_p (e, true) |
2123 | : !can_early_inline_edge_p (e)) | |
2124 | continue; | |
2125 | ||
3dafb85c | 2126 | if (e->recursive_p ()) |
af961c7f RG |
2127 | { |
2128 | if (dump_file) | |
2129 | fprintf (dump_file, "Not inlining: recursive call.\n"); | |
2130 | continue; | |
2131 | } | |
2132 | ||
67348ccc DM |
2133 | if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl)) |
2134 | != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl))) | |
59e0c6b7 RG |
2135 | { |
2136 | if (dump_file) | |
2137 | fprintf (dump_file, "Not inlining: SSA form does not match.\n"); | |
2138 | continue; | |
2139 | } | |
2140 | ||
af961c7f RG |
2141 | /* Inline the edge and flatten the inline clone. Avoid |
2142 | recursing through the original node if the node was cloned. */ | |
2143 | if (dump_file) | |
2144 | fprintf (dump_file, " Inlining %s into %s.\n", | |
2a72a953 DM |
2145 | xstrdup_for_dump (callee->name ()), |
2146 | xstrdup_for_dump (e->caller->name ())); | |
a5b1779f | 2147 | orig_callee = callee; |
c170d40f | 2148 | inline_call (e, true, NULL, NULL, false); |
af961c7f | 2149 | if (e->callee != orig_callee) |
67348ccc | 2150 | orig_callee->aux = (void *) node; |
632b4f8e | 2151 | flatten_function (e->callee, early); |
af961c7f | 2152 | if (e->callee != orig_callee) |
67348ccc | 2153 | orig_callee->aux = NULL; |
af961c7f RG |
2154 | } |
2155 | ||
67348ccc | 2156 | node->aux = NULL; |
c170d40f | 2157 | if (!node->global.inlined_to) |
0bceb671 | 2158 | ipa_update_overall_fn_summary (node); |
af961c7f RG |
2159 | } |
2160 | ||
a81b0a3d JH |
2161 | /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases. |
2162 | DATA points to number of calls originally found so we avoid infinite | |
2163 | recursion. */ | |
2164 | ||
2165 | static bool | |
bddead15 RB |
2166 | inline_to_all_callers_1 (struct cgraph_node *node, void *data, |
2167 | hash_set<cgraph_node *> *callers) | |
a81b0a3d JH |
2168 | { |
2169 | int *num_calls = (int *)data; | |
1bbb87c4 JH |
2170 | bool callee_removed = false; |
2171 | ||
a81b0a3d JH |
2172 | while (node->callers && !node->global.inlined_to) |
2173 | { | |
2174 | struct cgraph_node *caller = node->callers->caller; | |
2175 | ||
1af8bfe5 JH |
2176 | if (!can_inline_edge_p (node->callers, true) |
2177 | || node->callers->recursive_p ()) | |
2178 | { | |
2179 | if (dump_file) | |
2180 | fprintf (dump_file, "Uninlinable call found; giving up.\n"); | |
2181 | *num_calls = 0; | |
2182 | return false; | |
2183 | } | |
2184 | ||
a81b0a3d JH |
2185 | if (dump_file) |
2186 | { | |
2187 | fprintf (dump_file, | |
2188 | "\nInlining %s size %i.\n", | |
fec39fa6 | 2189 | node->name (), |
0bceb671 | 2190 | ipa_fn_summaries->get (node)->size); |
a81b0a3d JH |
2191 | fprintf (dump_file, |
2192 | " Called once from %s %i insns.\n", | |
fec39fa6 | 2193 | node->callers->caller->name (), |
0bceb671 | 2194 | ipa_fn_summaries->get (node->callers->caller)->size); |
a81b0a3d JH |
2195 | } |
2196 | ||
bddead15 RB |
2197 | /* Remember which callers we inlined to, delaying updating the |
2198 | overall summary. */ | |
2199 | callers->add (node->callers->caller); | |
2200 | inline_call (node->callers, true, NULL, NULL, false, &callee_removed); | |
a81b0a3d JH |
2201 | if (dump_file) |
2202 | fprintf (dump_file, | |
2203 | " Inlined into %s which now has %i size\n", | |
fec39fa6 | 2204 | caller->name (), |
0bceb671 | 2205 | ipa_fn_summaries->get (caller)->size); |
a81b0a3d JH |
2206 | if (!(*num_calls)--) |
2207 | { | |
2208 | if (dump_file) | |
2209 | fprintf (dump_file, "New calls found; giving up.\n"); | |
1bbb87c4 | 2210 | return callee_removed; |
a81b0a3d | 2211 | } |
1bbb87c4 JH |
2212 | if (callee_removed) |
2213 | return true; | |
a81b0a3d JH |
2214 | } |
2215 | return false; | |
2216 | } | |
2217 | ||
bddead15 RB |
2218 | /* Wrapper around inline_to_all_callers_1 doing delayed overall summary |
2219 | update. */ | |
2220 | ||
2221 | static bool | |
2222 | inline_to_all_callers (struct cgraph_node *node, void *data) | |
2223 | { | |
2224 | hash_set<cgraph_node *> callers; | |
2225 | bool res = inline_to_all_callers_1 (node, data, &callers); | |
2226 | /* Perform the delayed update of the overall summary of all callers | |
2227 | processed. This avoids quadratic behavior in the cases where | |
2228 | we have a lot of calls to the same function. */ | |
2229 | for (hash_set<cgraph_node *>::iterator i = callers.begin (); | |
2230 | i != callers.end (); ++i) | |
0bceb671 | 2231 | ipa_update_overall_fn_summary (*i); |
bddead15 RB |
2232 | return res; |
2233 | } | |
2234 | ||
e86a910f JH |
2235 | /* Output overall time estimate. */ |
2236 | static void | |
2237 | dump_overall_stats (void) | |
2238 | { | |
ab38481c | 2239 | sreal sum_weighted = 0, sum = 0; |
e86a910f JH |
2240 | struct cgraph_node *node; |
2241 | ||
2242 | FOR_EACH_DEFINED_FUNCTION (node) | |
2243 | if (!node->global.inlined_to | |
2244 | && !node->alias) | |
2245 | { | |
0bceb671 | 2246 | sreal time = ipa_fn_summaries->get (node)->time; |
e86a910f | 2247 | sum += time; |
3995f3a2 JH |
2248 | if (node->count.initialized_p ()) |
2249 | sum_weighted += time * node->count.to_gcov_type (); | |
e86a910f JH |
2250 | } |
2251 | fprintf (dump_file, "Overall time estimate: " | |
ab38481c JH |
2252 | "%f weighted by profile: " |
2253 | "%f\n", sum.to_double (), sum_weighted.to_double ()); | |
e86a910f JH |
2254 | } |
2255 | ||
2256 | /* Output some useful stats about inlining. */ | |
2257 | ||
2258 | static void | |
2259 | dump_inline_stats (void) | |
2260 | { | |
a9243bfc RB |
2261 | int64_t inlined_cnt = 0, inlined_indir_cnt = 0; |
2262 | int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0; | |
2263 | int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0; | |
2264 | int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0; | |
2265 | int64_t inlined_speculative = 0, inlined_speculative_ply = 0; | |
2266 | int64_t indirect_poly_cnt = 0, indirect_cnt = 0; | |
2267 | int64_t reason[CIF_N_REASONS][3]; | |
e86a910f JH |
2268 | int i; |
2269 | struct cgraph_node *node; | |
2270 | ||
2271 | memset (reason, 0, sizeof (reason)); | |
2272 | FOR_EACH_DEFINED_FUNCTION (node) | |
2273 | { | |
2274 | struct cgraph_edge *e; | |
2275 | for (e = node->callees; e; e = e->next_callee) | |
2276 | { | |
2277 | if (e->inline_failed) | |
2278 | { | |
3995f3a2 JH |
2279 | if (e->count.initialized_p ()) |
2280 | reason[(int) e->inline_failed][0] += e->count.to_gcov_type (); | |
e86a910f JH |
2281 | reason[(int) e->inline_failed][1] += e->frequency; |
2282 | reason[(int) e->inline_failed][2] ++; | |
3995f3a2 JH |
2283 | if (DECL_VIRTUAL_P (e->callee->decl) |
2284 | && e->count.initialized_p ()) | |
e86a910f JH |
2285 | { |
2286 | if (e->indirect_inlining_edge) | |
3995f3a2 | 2287 | noninlined_virt_indir_cnt += e->count.to_gcov_type (); |
e86a910f | 2288 | else |
3995f3a2 | 2289 | noninlined_virt_cnt += e->count.to_gcov_type (); |
e86a910f | 2290 | } |
3995f3a2 | 2291 | else if (e->count.initialized_p ()) |
e86a910f JH |
2292 | { |
2293 | if (e->indirect_inlining_edge) | |
3995f3a2 | 2294 | noninlined_indir_cnt += e->count.to_gcov_type (); |
e86a910f | 2295 | else |
3995f3a2 | 2296 | noninlined_cnt += e->count.to_gcov_type (); |
e86a910f JH |
2297 | } |
2298 | } | |
3995f3a2 | 2299 | else if (e->count.initialized_p ()) |
e86a910f JH |
2300 | { |
2301 | if (e->speculative) | |
2302 | { | |
2303 | if (DECL_VIRTUAL_P (e->callee->decl)) | |
3995f3a2 | 2304 | inlined_speculative_ply += e->count.to_gcov_type (); |
e86a910f | 2305 | else |
3995f3a2 | 2306 | inlined_speculative += e->count.to_gcov_type (); |
e86a910f JH |
2307 | } |
2308 | else if (DECL_VIRTUAL_P (e->callee->decl)) | |
2309 | { | |
2310 | if (e->indirect_inlining_edge) | |
3995f3a2 | 2311 | inlined_virt_indir_cnt += e->count.to_gcov_type (); |
e86a910f | 2312 | else |
3995f3a2 | 2313 | inlined_virt_cnt += e->count.to_gcov_type (); |
e86a910f JH |
2314 | } |
2315 | else | |
2316 | { | |
2317 | if (e->indirect_inlining_edge) | |
3995f3a2 | 2318 | inlined_indir_cnt += e->count.to_gcov_type (); |
e86a910f | 2319 | else |
3995f3a2 | 2320 | inlined_cnt += e->count.to_gcov_type (); |
e86a910f JH |
2321 | } |
2322 | } | |
2323 | } | |
2324 | for (e = node->indirect_calls; e; e = e->next_callee) | |
3995f3a2 JH |
2325 | if (e->indirect_info->polymorphic |
2326 | & e->count.initialized_p ()) | |
2327 | indirect_poly_cnt += e->count.to_gcov_type (); | |
2328 | else if (e->count.initialized_p ()) | |
2329 | indirect_cnt += e->count.to_gcov_type (); | |
e86a910f | 2330 | } |
3995f3a2 | 2331 | if (max_count.initialized_p ()) |
e86a910f JH |
2332 | { |
2333 | fprintf (dump_file, | |
16998094 JM |
2334 | "Inlined %" PRId64 " + speculative " |
2335 | "%" PRId64 " + speculative polymorphic " | |
2336 | "%" PRId64 " + previously indirect " | |
2337 | "%" PRId64 " + virtual " | |
2338 | "%" PRId64 " + virtual and previously indirect " | |
2339 | "%" PRId64 "\n" "Not inlined " | |
2340 | "%" PRId64 " + previously indirect " | |
2341 | "%" PRId64 " + virtual " | |
2342 | "%" PRId64 " + virtual and previously indirect " | |
2343 | "%" PRId64 " + stil indirect " | |
2344 | "%" PRId64 " + still indirect polymorphic " | |
2345 | "%" PRId64 "\n", inlined_cnt, | |
e86a910f JH |
2346 | inlined_speculative, inlined_speculative_ply, |
2347 | inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt, | |
2348 | noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt, | |
2349 | noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt); | |
3995f3a2 JH |
2350 | fprintf (dump_file, "Removed speculations "); |
2351 | spec_rem.dump (dump_file); | |
2352 | fprintf (dump_file, "\n"); | |
e86a910f JH |
2353 | } |
2354 | dump_overall_stats (); | |
2355 | fprintf (dump_file, "\nWhy inlining failed?\n"); | |
2356 | for (i = 0; i < CIF_N_REASONS; i++) | |
2357 | if (reason[i][2]) | |
16998094 | 2358 | fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n", |
e86a910f JH |
2359 | cgraph_inline_failed_string ((cgraph_inline_failed_t) i), |
2360 | (int) reason[i][2], (int) reason[i][1], reason[i][0]); | |
2361 | } | |
2362 | ||
ca31b95f JH |
2363 | /* Decide on the inlining. We do so in the topological order to avoid |
2364 | expenses on updating data structures. */ | |
2365 | ||
c2924966 | 2366 | static unsigned int |
4c0f7679 | 2367 | ipa_inline (void) |
ca31b95f JH |
2368 | { |
2369 | struct cgraph_node *node; | |
2370 | int nnodes; | |
b591a8b7 | 2371 | struct cgraph_node **order; |
ca31b95f | 2372 | int i; |
09ce3660 | 2373 | int cold; |
8a41354f JH |
2374 | bool remove_functions = false; |
2375 | ||
2376 | if (!optimize) | |
2377 | return 0; | |
ca31b95f | 2378 | |
6d4ab5f8 JH |
2379 | cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE; |
2380 | percent_rec = (sreal) 1 / (sreal) 100; | |
2381 | ||
3dafb85c | 2382 | order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count); |
b591a8b7 | 2383 | |
10a5dd5d | 2384 | if (dump_file) |
0bceb671 | 2385 | ipa_dump_fn_summaries (dump_file); |
670cd5c5 | 2386 | |
af8bca3c | 2387 | nnodes = ipa_reverse_postorder (order); |
ca31b95f | 2388 | |
65c70e6b | 2389 | FOR_EACH_FUNCTION (node) |
7ce7e4d4 JH |
2390 | { |
2391 | node->aux = 0; | |
2392 | ||
2393 | /* Recompute the default reasons for inlining because they may have | |
2394 | changed during merging. */ | |
2395 | if (in_lto_p) | |
2396 | { | |
2397 | for (cgraph_edge *e = node->callees; e; e = e->next_callee) | |
2398 | { | |
2399 | gcc_assert (e->inline_failed); | |
2400 | initialize_inline_failed (e); | |
2401 | } | |
2402 | for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee) | |
2403 | initialize_inline_failed (e); | |
2404 | } | |
2405 | } | |
ca31b95f JH |
2406 | |
2407 | if (dump_file) | |
af961c7f | 2408 | fprintf (dump_file, "\nFlattening functions:\n"); |
ca31b95f | 2409 | |
af961c7f RG |
2410 | /* In the first pass handle functions to be flattened. Do this with |
2411 | a priority so none of our later choices will make this impossible. */ | |
2412 | for (i = nnodes - 1; i >= 0; i--) | |
ca31b95f | 2413 | { |
af961c7f RG |
2414 | node = order[i]; |
2415 | ||
09a2806f | 2416 | /* Handle nodes to be flattened. |
af961c7f RG |
2417 | Ideally when processing callees we stop inlining at the |
2418 | entry of cycles, possibly cloning that entry point and | |
2419 | try to flatten itself turning it into a self-recursive | |
2420 | function. */ | |
2421 | if (lookup_attribute ("flatten", | |
67348ccc | 2422 | DECL_ATTRIBUTES (node->decl)) != NULL) |
f8e2a1ed | 2423 | { |
ca31b95f | 2424 | if (dump_file) |
b8698a0f | 2425 | fprintf (dump_file, |
fec39fa6 | 2426 | "Flattening %s\n", node->name ()); |
632b4f8e | 2427 | flatten_function (node, false); |
ca31b95f | 2428 | } |
ca31b95f | 2429 | } |
e86a910f JH |
2430 | if (dump_file) |
2431 | dump_overall_stats (); | |
ca31b95f | 2432 | |
4c0f7679 | 2433 | inline_small_functions (); |
e70670cf | 2434 | |
17e0fc92 JH |
2435 | gcc_assert (symtab->state == IPA_SSA); |
2436 | symtab->state = IPA_SSA_AFTER_INLINING; | |
2437 | /* Do first after-inlining removal. We want to remove all "stale" extern | |
2438 | inline functions and virtual functions so we really know what is called | |
2439 | once. */ | |
2440 | symtab->remove_unreachable_nodes (dump_file); | |
4c0f7679 | 2441 | free (order); |
ca31b95f | 2442 | |
100411f8 JH |
2443 | /* Inline functions with a property that after inlining into all callers the |
2444 | code size will shrink because the out-of-line copy is eliminated. | |
2445 | We do this regardless on the callee size as long as function growth limits | |
2446 | are met. */ | |
09ce3660 JH |
2447 | if (dump_file) |
2448 | fprintf (dump_file, | |
17e0fc92 JH |
2449 | "\nDeciding on functions to be inlined into all callers and " |
2450 | "removing useless speculations:\n"); | |
09ce3660 JH |
2451 | |
2452 | /* Inlining one function called once has good chance of preventing | |
2453 | inlining other function into the same callee. Ideally we should | |
2454 | work in priority order, but probably inlining hot functions first | |
2455 | is good cut without the extra pain of maintaining the queue. | |
2456 | ||
2457 | ??? this is not really fitting the bill perfectly: inlining function | |
2458 | into callee often leads to better optimization of callee due to | |
2459 | increased context for optimization. | |
2460 | For example if main() function calls a function that outputs help | |
2461 | and then function that does the main optmization, we should inline | |
2462 | the second with priority even if both calls are cold by themselves. | |
2463 | ||
2464 | We probably want to implement new predicate replacing our use of | |
2465 | maybe_hot_edge interpreted as maybe_hot_edge || callee is known | |
2466 | to be hot. */ | |
2467 | for (cold = 0; cold <= 1; cold ++) | |
355866de | 2468 | { |
09ce3660 | 2469 | FOR_EACH_DEFINED_FUNCTION (node) |
ca31b95f | 2470 | { |
09ce3660 JH |
2471 | struct cgraph_edge *edge, *next; |
2472 | bool update=false; | |
2473 | ||
2474 | for (edge = node->callees; edge; edge = next) | |
ca31b95f | 2475 | { |
09ce3660 JH |
2476 | next = edge->next_callee; |
2477 | if (edge->speculative && !speculation_useful_p (edge, false)) | |
e3c7b49c | 2478 | { |
3dafb85c | 2479 | edge->resolve_speculation (); |
e86a910f | 2480 | spec_rem += edge->count; |
09ce3660 | 2481 | update = true; |
8a41354f | 2482 | remove_functions = true; |
09ce3660 JH |
2483 | } |
2484 | } | |
2485 | if (update) | |
2486 | { | |
2487 | struct cgraph_node *where = node->global.inlined_to | |
2488 | ? node->global.inlined_to : node; | |
09ce3660 | 2489 | reset_edge_caches (where); |
0bceb671 | 2490 | ipa_update_overall_fn_summary (where); |
09ce3660 | 2491 | } |
2bf86c84 | 2492 | if (want_inline_function_to_all_callers_p (node, cold)) |
09ce3660 JH |
2493 | { |
2494 | int num_calls = 0; | |
1ede94c5 JH |
2495 | node->call_for_symbol_and_aliases (sum_callers, &num_calls, |
2496 | true); | |
2497 | while (node->call_for_symbol_and_aliases | |
17e0fc92 | 2498 | (inline_to_all_callers, &num_calls, true)) |
1bbb87c4 | 2499 | ; |
f019b607 | 2500 | remove_functions = true; |
ca31b95f JH |
2501 | } |
2502 | } | |
2503 | } | |
2504 | ||
f8e2a1ed | 2505 | /* Free ipa-prop structures if they are no longer needed. */ |
5ee53a06 | 2506 | if (optimize) |
e33c6cd6 | 2507 | ipa_free_all_structures_after_iinln (); |
f8e2a1ed | 2508 | |
ca31b95f | 2509 | if (dump_file) |
e86a910f JH |
2510 | { |
2511 | fprintf (dump_file, | |
2512 | "\nInlined %i calls, eliminated %i functions\n\n", | |
2513 | ncalls_inlined, nfunctions_inlined); | |
2514 | dump_inline_stats (); | |
2515 | } | |
09a2806f | 2516 | |
898b8927 | 2517 | if (dump_file) |
0bceb671 | 2518 | ipa_dump_fn_summaries (dump_file); |
10a5dd5d JH |
2519 | /* In WPA we use inline summaries for partitioning process. */ |
2520 | if (!flag_wpa) | |
d2db2e6b | 2521 | ipa_free_fn_summary (); |
8a41354f | 2522 | return remove_functions ? TODO_remove_functions : 0; |
ca31b95f JH |
2523 | } |
2524 | ||
275b4baa RG |
2525 | /* Inline always-inline function calls in NODE. */ |
2526 | ||
2527 | static bool | |
4c0f7679 | 2528 | inline_always_inline_functions (struct cgraph_node *node) |
275b4baa RG |
2529 | { |
2530 | struct cgraph_edge *e; | |
2531 | bool inlined = false; | |
2532 | ||
2533 | for (e = node->callees; e; e = e->next_callee) | |
2534 | { | |
d52f5295 | 2535 | struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
67348ccc | 2536 | if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
275b4baa RG |
2537 | continue; |
2538 | ||
3dafb85c | 2539 | if (e->recursive_p ()) |
275b4baa RG |
2540 | { |
2541 | if (dump_file) | |
4c0f7679 | 2542 | fprintf (dump_file, " Not inlining recursive call to %s.\n", |
fec39fa6 | 2543 | e->callee->name ()); |
275b4baa RG |
2544 | e->inline_failed = CIF_RECURSIVE_INLINING; |
2545 | continue; | |
2546 | } | |
2547 | ||
4c0f7679 | 2548 | if (!can_early_inline_edge_p (e)) |
bef8491a ST |
2549 | { |
2550 | /* Set inlined to true if the callee is marked "always_inline" but | |
2551 | is not inlinable. This will allow flagging an error later in | |
2552 | expand_call_inline in tree-inline.c. */ | |
2553 | if (lookup_attribute ("always_inline", | |
67348ccc | 2554 | DECL_ATTRIBUTES (callee->decl)) != NULL) |
bef8491a ST |
2555 | inlined = true; |
2556 | continue; | |
2557 | } | |
275b4baa RG |
2558 | |
2559 | if (dump_file) | |
4c0f7679 | 2560 | fprintf (dump_file, " Inlining %s into %s (always_inline).\n", |
2a72a953 DM |
2561 | xstrdup_for_dump (e->callee->name ()), |
2562 | xstrdup_for_dump (e->caller->name ())); | |
c170d40f | 2563 | inline_call (e, true, NULL, NULL, false); |
275b4baa RG |
2564 | inlined = true; |
2565 | } | |
c170d40f | 2566 | if (inlined) |
0bceb671 | 2567 | ipa_update_overall_fn_summary (node); |
275b4baa RG |
2568 | |
2569 | return inlined; | |
2570 | } | |
2571 | ||
ca31b95f | 2572 | /* Decide on the inlining. We do so in the topological order to avoid |
af961c7f | 2573 | expenses on updating data structures. */ |
ca31b95f | 2574 | |
7fa49e7b | 2575 | static bool |
4c0f7679 | 2576 | early_inline_small_functions (struct cgraph_node *node) |
ca31b95f JH |
2577 | { |
2578 | struct cgraph_edge *e; | |
d63db217 | 2579 | bool inlined = false; |
7fa49e7b | 2580 | |
275b4baa | 2581 | for (e = node->callees; e; e = e->next_callee) |
c3056c2d | 2582 | { |
d52f5295 | 2583 | struct cgraph_node *callee = e->callee->ultimate_alias_target (); |
0bceb671 | 2584 | if (!ipa_fn_summaries->get (callee)->inlinable |
4c0f7679 | 2585 | || !e->inline_failed) |
275b4baa RG |
2586 | continue; |
2587 | ||
2588 | /* Do not consider functions not declared inline. */ | |
67348ccc | 2589 | if (!DECL_DECLARED_INLINE_P (callee->decl) |
2bf86c84 JH |
2590 | && !opt_for_fn (node->decl, flag_inline_small_functions) |
2591 | && !opt_for_fn (node->decl, flag_inline_functions)) | |
275b4baa RG |
2592 | continue; |
2593 | ||
c3056c2d | 2594 | if (dump_file) |
275b4baa | 2595 | fprintf (dump_file, "Considering inline candidate %s.\n", |
fec39fa6 | 2596 | callee->name ()); |
ca31b95f | 2597 | |
4c0f7679 JH |
2598 | if (!can_early_inline_edge_p (e)) |
2599 | continue; | |
2600 | ||
3dafb85c | 2601 | if (e->recursive_p ()) |
275b4baa RG |
2602 | { |
2603 | if (dump_file) | |
4c0f7679 | 2604 | fprintf (dump_file, " Not inlining: recursive call.\n"); |
22ad64b6 | 2605 | continue; |
275b4baa | 2606 | } |
af961c7f | 2607 | |
4c0f7679 | 2608 | if (!want_early_inline_function_p (e)) |
275b4baa | 2609 | continue; |
ca31b95f | 2610 | |
4c0f7679 JH |
2611 | if (dump_file) |
2612 | fprintf (dump_file, " Inlining %s into %s.\n", | |
2a72a953 DM |
2613 | xstrdup_for_dump (callee->name ()), |
2614 | xstrdup_for_dump (e->caller->name ())); | |
bddead15 | 2615 | inline_call (e, true, NULL, NULL, false); |
4c0f7679 | 2616 | inlined = true; |
38bc76da | 2617 | } |
275b4baa | 2618 | |
bddead15 | 2619 | if (inlined) |
0bceb671 | 2620 | ipa_update_overall_fn_summary (node); |
bddead15 | 2621 | |
7fa49e7b | 2622 | return inlined; |
ca31b95f JH |
2623 | } |
2624 | ||
be55bfe6 | 2625 | unsigned int |
be3c16c4 | 2626 | early_inliner (function *fun) |
d63db217 | 2627 | { |
d52f5295 | 2628 | struct cgraph_node *node = cgraph_node::get (current_function_decl); |
10a5dd5d | 2629 | struct cgraph_edge *edge; |
7fa49e7b | 2630 | unsigned int todo = 0; |
796bda22 | 2631 | int iterations = 0; |
275b4baa | 2632 | bool inlined = false; |
d63db217 | 2633 | |
1da2ed5f | 2634 | if (seen_error ()) |
c2924966 | 2635 | return 0; |
af961c7f | 2636 | |
ecb62563 JH |
2637 | /* Do nothing if datastructures for ipa-inliner are already computed. This |
2638 | happens when some pass decides to construct new function and | |
2639 | cgraph_add_new_function calls lowering passes and early optimization on | |
2640 | it. This may confuse ourself when early inliner decide to inline call to | |
2641 | function clone, because function clones don't have parameter list in | |
2642 | ipa-prop matching their signature. */ | |
dd912cb8 | 2643 | if (ipa_node_params_sum) |
ecb62563 JH |
2644 | return 0; |
2645 | ||
b2b29377 MM |
2646 | if (flag_checking) |
2647 | node->verify (); | |
d122681a | 2648 | node->remove_all_references (); |
275b4baa | 2649 | |
c291690e IE |
2650 | /* Rebuild this reference because it dosn't depend on |
2651 | function's body and it's required to pass cgraph_node | |
2652 | verification. */ | |
2653 | if (node->instrumented_version | |
2654 | && !node->instrumentation_clone) | |
2655 | node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL); | |
2656 | ||
275b4baa RG |
2657 | /* Even when not optimizing or not inlining inline always-inline |
2658 | functions. */ | |
4c0f7679 | 2659 | inlined = inline_always_inline_functions (node); |
275b4baa | 2660 | |
af961c7f RG |
2661 | if (!optimize |
2662 | || flag_no_inline | |
4c0f7679 JH |
2663 | || !flag_early_inlining |
2664 | /* Never inline regular functions into always-inline functions | |
2665 | during incremental inlining. This sucks as functions calling | |
2666 | always inline functions will get less optimized, but at the | |
2667 | same time inlining of functions calling always inline | |
09a2806f | 2668 | function into an always inline function might introduce |
4c0f7679 JH |
2669 | cycles of edges to be always inlined in the callgraph. |
2670 | ||
2671 | We might want to be smarter and just avoid this type of inlining. */ | |
d67bce7c JH |
2672 | || (DECL_DISREGARD_INLINE_LIMITS (node->decl) |
2673 | && lookup_attribute ("always_inline", | |
2674 | DECL_ATTRIBUTES (node->decl)))) | |
275b4baa RG |
2675 | ; |
2676 | else if (lookup_attribute ("flatten", | |
67348ccc | 2677 | DECL_ATTRIBUTES (node->decl)) != NULL) |
7fa49e7b | 2678 | { |
275b4baa RG |
2679 | /* When the function is marked to be flattened, recursively inline |
2680 | all calls in it. */ | |
2681 | if (dump_file) | |
2682 | fprintf (dump_file, | |
fec39fa6 | 2683 | "Flattening %s\n", node->name ()); |
632b4f8e | 2684 | flatten_function (node, true); |
275b4baa | 2685 | inlined = true; |
7fa49e7b | 2686 | } |
af961c7f RG |
2687 | else |
2688 | { | |
d67bce7c JH |
2689 | /* If some always_inline functions was inlined, apply the changes. |
2690 | This way we will not account always inline into growth limits and | |
2691 | moreover we will inline calls from always inlines that we skipped | |
56aae4b7 | 2692 | previously because of conditional above. */ |
d67bce7c JH |
2693 | if (inlined) |
2694 | { | |
2695 | timevar_push (TV_INTEGRATION); | |
2696 | todo |= optimize_inline_calls (current_function_decl); | |
1cf06f1e MP |
2697 | /* optimize_inline_calls call above might have introduced new |
2698 | statements that don't have inline parameters computed. */ | |
2699 | for (edge = node->callees; edge; edge = edge->next_callee) | |
2700 | { | |
263e19c7 JH |
2701 | struct ipa_call_summary *es = ipa_call_summaries->get (edge); |
2702 | es->call_stmt_size | |
2703 | = estimate_num_insns (edge->call_stmt, &eni_size_weights); | |
2704 | es->call_stmt_time | |
2705 | = estimate_num_insns (edge->call_stmt, &eni_time_weights); | |
1cf06f1e | 2706 | } |
0bceb671 | 2707 | ipa_update_overall_fn_summary (node); |
d67bce7c JH |
2708 | inlined = false; |
2709 | timevar_pop (TV_INTEGRATION); | |
2710 | } | |
af961c7f RG |
2711 | /* We iterate incremental inlining to get trivial cases of indirect |
2712 | inlining. */ | |
2713 | while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) | |
4c0f7679 | 2714 | && early_inline_small_functions (node)) |
af961c7f RG |
2715 | { |
2716 | timevar_push (TV_INTEGRATION); | |
2717 | todo |= optimize_inline_calls (current_function_decl); | |
4c0f7679 JH |
2718 | |
2719 | /* Technically we ought to recompute inline parameters so the new | |
2720 | iteration of early inliner works as expected. We however have | |
2721 | values approximately right and thus we only need to update edge | |
2722 | info that might be cleared out for newly discovered edges. */ | |
2723 | for (edge = node->callees; edge; edge = edge->next_callee) | |
2724 | { | |
d5e254e1 | 2725 | /* We have no summary for new bound store calls yet. */ |
263e19c7 JH |
2726 | struct ipa_call_summary *es = ipa_call_summaries->get (edge); |
2727 | es->call_stmt_size | |
2728 | = estimate_num_insns (edge->call_stmt, &eni_size_weights); | |
2729 | es->call_stmt_time | |
2730 | = estimate_num_insns (edge->call_stmt, &eni_time_weights); | |
2731 | ||
67348ccc | 2732 | if (edge->callee->decl |
4de09b85 | 2733 | && !gimple_check_call_matching_types ( |
67348ccc | 2734 | edge->call_stmt, edge->callee->decl, false)) |
1a0bf5e1 JH |
2735 | { |
2736 | edge->inline_failed = CIF_MISMATCHED_ARGUMENTS; | |
2737 | edge->call_stmt_cannot_inline_p = true; | |
2738 | } | |
4c0f7679 | 2739 | } |
2f2a7d15 | 2740 | if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1) |
0bceb671 | 2741 | ipa_update_overall_fn_summary (node); |
af961c7f | 2742 | timevar_pop (TV_INTEGRATION); |
275b4baa RG |
2743 | iterations++; |
2744 | inlined = false; | |
af961c7f RG |
2745 | } |
2746 | if (dump_file) | |
2747 | fprintf (dump_file, "Iterations: %i\n", iterations); | |
2748 | } | |
2749 | ||
275b4baa RG |
2750 | if (inlined) |
2751 | { | |
2752 | timevar_push (TV_INTEGRATION); | |
2753 | todo |= optimize_inline_calls (current_function_decl); | |
2754 | timevar_pop (TV_INTEGRATION); | |
2755 | } | |
2756 | ||
be55bfe6 | 2757 | fun->always_inline_functions_inlined = true; |
d63db217 | 2758 | |
af961c7f | 2759 | return todo; |
d63db217 JH |
2760 | } |
2761 | ||
be3c16c4 DC |
2762 | /* Do inlining of small functions. Doing so early helps profiling and other |
2763 | passes to be somewhat more effective and avoids some code duplication in | |
2764 | later real inlining pass for testcases with very many function calls. */ | |
2765 | ||
2766 | namespace { | |
2767 | ||
2768 | const pass_data pass_data_early_inline = | |
2769 | { | |
2770 | GIMPLE_PASS, /* type */ | |
2771 | "einline", /* name */ | |
2772 | OPTGROUP_INLINE, /* optinfo_flags */ | |
2773 | TV_EARLY_INLINING, /* tv_id */ | |
2774 | PROP_ssa, /* properties_required */ | |
2775 | 0, /* properties_provided */ | |
2776 | 0, /* properties_destroyed */ | |
2777 | 0, /* todo_flags_start */ | |
2778 | 0, /* todo_flags_finish */ | |
2779 | }; | |
2780 | ||
2781 | class pass_early_inline : public gimple_opt_pass | |
2782 | { | |
2783 | public: | |
2784 | pass_early_inline (gcc::context *ctxt) | |
2785 | : gimple_opt_pass (pass_data_early_inline, ctxt) | |
2786 | {} | |
2787 | ||
2788 | /* opt_pass methods: */ | |
2789 | virtual unsigned int execute (function *); | |
2790 | ||
2791 | }; // class pass_early_inline | |
2792 | ||
2793 | unsigned int | |
2794 | pass_early_inline::execute (function *fun) | |
2795 | { | |
2796 | return early_inliner (fun); | |
2797 | } | |
2798 | ||
27a4cd48 DM |
2799 | } // anon namespace |
2800 | ||
2801 | gimple_opt_pass * | |
2802 | make_pass_early_inline (gcc::context *ctxt) | |
2803 | { | |
2804 | return new pass_early_inline (ctxt); | |
2805 | } | |
2806 | ||
27a4cd48 DM |
2807 | namespace { |
2808 | ||
2809 | const pass_data pass_data_ipa_inline = | |
873aa8f5 | 2810 | { |
27a4cd48 DM |
2811 | IPA_PASS, /* type */ |
2812 | "inline", /* name */ | |
2813 | OPTGROUP_INLINE, /* optinfo_flags */ | |
27a4cd48 DM |
2814 | TV_IPA_INLINING, /* tv_id */ |
2815 | 0, /* properties_required */ | |
2816 | 0, /* properties_provided */ | |
2817 | 0, /* properties_destroyed */ | |
8605403e | 2818 | 0, /* todo_flags_start */ |
8a41354f | 2819 | ( TODO_dump_symtab ), /* todo_flags_finish */ |
ca31b95f | 2820 | }; |
27a4cd48 DM |
2821 | |
2822 | class pass_ipa_inline : public ipa_opt_pass_d | |
2823 | { | |
2824 | public: | |
c3284718 RS |
2825 | pass_ipa_inline (gcc::context *ctxt) |
2826 | : ipa_opt_pass_d (pass_data_ipa_inline, ctxt, | |
d2db2e6b JH |
2827 | NULL, /* generate_summary */ |
2828 | NULL, /* write_summary */ | |
2829 | NULL, /* read_summary */ | |
c3284718 RS |
2830 | NULL, /* write_optimization_summary */ |
2831 | NULL, /* read_optimization_summary */ | |
2832 | NULL, /* stmt_fixup */ | |
2833 | 0, /* function_transform_todo_flags_start */ | |
2834 | inline_transform, /* function_transform */ | |
2835 | NULL) /* variable_transform */ | |
27a4cd48 DM |
2836 | {} |
2837 | ||
2838 | /* opt_pass methods: */ | |
be55bfe6 | 2839 | virtual unsigned int execute (function *) { return ipa_inline (); } |
27a4cd48 DM |
2840 | |
2841 | }; // class pass_ipa_inline | |
2842 | ||
2843 | } // anon namespace | |
2844 | ||
2845 | ipa_opt_pass_d * | |
2846 | make_pass_ipa_inline (gcc::context *ctxt) | |
2847 | { | |
2848 | return new pass_ipa_inline (ctxt); | |
2849 | } |