1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass cannot really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
107 #include "tree-inline.h"
109 #include "symbol-summary.h"
110 #include "tree-vrp.h"
111 #include "ipa-prop.h"
112 #include "ipa-fnsummary.h"
113 #include "ipa-inline.h"
114 #include "ipa-utils.h"
116 #include "auto-profile.h"
117 #include "builtins.h"
118 #include "fibonacci_heap.h"
119 #include "stringpool.h"
123 typedef fibonacci_heap
<sreal
, cgraph_edge
> edge_heap_t
;
124 typedef fibonacci_node
<sreal
, cgraph_edge
> edge_heap_node_t
;
126 /* Statistics we collect about inlining algorithm. */
127 static int overall_size
;
128 static profile_count max_count
;
129 static profile_count spec_rem
;
131 /* Return false when inlining edge E would lead to violating
132 limits on function unit growth or stack usage growth.
134 The relative function body growth limit is present generally
135 to avoid problems with non-linear behavior of the compiler.
136 To allow inlining huge functions into tiny wrapper, the limit
137 is always based on the bigger of the two functions considered.
139 For stack growth limits we always base the growth in stack usage
140 of the callers. We want to prevent applications from segfaulting
141 on stack overflow when functions with huge stack frames gets
145 caller_growth_limits (struct cgraph_edge
*e
)
147 struct cgraph_node
*to
= e
->caller
;
148 struct cgraph_node
*what
= e
->callee
->ultimate_alias_target ();
151 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
152 ipa_size_summary
*outer_info
= ipa_size_summaries
->get (to
);
154 /* Look for function e->caller is inlined to. While doing
155 so work out the largest function body on the way. As
156 described above, we want to base our function growth
157 limits based on that. Not on the self size of the
158 outer function, not on the self size of inline code
159 we immediately inline to. This is the most relaxed
160 interpretation of the rule "do not grow large functions
161 too much in order to prevent compiler from exploding". */
164 ipa_size_summary
*size_info
= ipa_size_summaries
->get (to
);
165 if (limit
< size_info
->self_size
)
166 limit
= size_info
->self_size
;
167 if (stack_size_limit
< size_info
->estimated_self_stack_size
)
168 stack_size_limit
= size_info
->estimated_self_stack_size
;
170 to
= to
->callers
->caller
;
175 ipa_fn_summary
*what_info
= ipa_fn_summaries
->get (what
);
176 ipa_size_summary
*what_size_info
= ipa_size_summaries
->get (what
);
178 if (limit
< what_size_info
->self_size
)
179 limit
= what_size_info
->self_size
;
181 limit
+= limit
* param_large_function_growth
/ 100;
183 /* Check the size after inlining against the function limits. But allow
184 the function to shrink if it went over the limits by forced inlining. */
185 newsize
= estimate_size_after_inlining (to
, e
);
186 if (newsize
>= ipa_size_summaries
->get (what
)->size
187 && newsize
> param_large_function_insns
190 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
194 if (!what_info
->estimated_stack_size
)
197 /* FIXME: Stack size limit often prevents inlining in Fortran programs
198 due to large i/o datastructures used by the Fortran front-end.
199 We ought to ignore this limit when we know that the edge is executed
200 on every invocation of the caller (i.e. its call statement dominates
201 exit block). We do not track this information, yet. */
202 stack_size_limit
+= ((gcov_type
)stack_size_limit
203 * param_stack_frame_growth
/ 100);
205 inlined_stack
= (ipa_get_stack_frame_offset (to
)
206 + outer_info
->estimated_self_stack_size
207 + what_info
->estimated_stack_size
);
208 /* Check new stack consumption with stack consumption at the place
210 if (inlined_stack
> stack_size_limit
211 /* If function already has large stack usage from sibling
212 inline call, we can inline, too.
213 This bit overoptimistically assume that we are good at stack
215 && inlined_stack
> ipa_fn_summaries
->get (to
)->estimated_stack_size
216 && inlined_stack
> param_large_stack_frame
)
218 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
224 /* Dump info about why inlining has failed. */
227 report_inline_failed_reason (struct cgraph_edge
*e
)
229 if (dump_enabled_p ())
231 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
232 " not inlinable: %C -> %C, %s\n",
233 e
->caller
, e
->callee
,
234 cgraph_inline_failed_string (e
->inline_failed
));
235 if ((e
->inline_failed
== CIF_TARGET_OPTION_MISMATCH
236 || e
->inline_failed
== CIF_OPTIMIZATION_MISMATCH
)
237 && e
->caller
->lto_file_data
238 && e
->callee
->ultimate_alias_target ()->lto_file_data
)
240 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
241 " LTO objects: %s, %s\n",
242 e
->caller
->lto_file_data
->file_name
,
243 e
->callee
->ultimate_alias_target ()->lto_file_data
->file_name
);
245 if (e
->inline_failed
== CIF_TARGET_OPTION_MISMATCH
)
247 cl_target_option_print_diff
248 (dump_file
, 2, target_opts_for_fn (e
->caller
->decl
),
249 target_opts_for_fn (e
->callee
->ultimate_alias_target ()->decl
));
250 if (e
->inline_failed
== CIF_OPTIMIZATION_MISMATCH
)
252 cl_optimization_print_diff
253 (dump_file
, 2, opts_for_fn (e
->caller
->decl
),
254 opts_for_fn (e
->callee
->ultimate_alias_target ()->decl
));
258 /* Decide whether sanitizer-related attributes allow inlining. */
261 sanitize_attrs_match_for_inline_p (const_tree caller
, const_tree callee
)
263 if (!caller
|| !callee
)
266 /* Allow inlining always_inline functions into no_sanitize_address
268 if (!sanitize_flags_p (SANITIZE_ADDRESS
, caller
)
269 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee
)))
272 return ((sanitize_flags_p (SANITIZE_ADDRESS
, caller
)
273 == sanitize_flags_p (SANITIZE_ADDRESS
, callee
))
274 && (sanitize_flags_p (SANITIZE_POINTER_COMPARE
, caller
)
275 == sanitize_flags_p (SANITIZE_POINTER_COMPARE
, callee
))
276 && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT
, caller
)
277 == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT
, callee
)));
280 /* Used for flags where it is safe to inline when caller's value is
281 grater than callee's. */
282 #define check_maybe_up(flag) \
283 (opts_for_fn (caller->decl)->x_##flag \
284 != opts_for_fn (callee->decl)->x_##flag \
286 || opts_for_fn (caller->decl)->x_##flag \
287 < opts_for_fn (callee->decl)->x_##flag))
288 /* Used for flags where it is safe to inline when caller's value is
289 smaller than callee's. */
290 #define check_maybe_down(flag) \
291 (opts_for_fn (caller->decl)->x_##flag \
292 != opts_for_fn (callee->decl)->x_##flag \
294 || opts_for_fn (caller->decl)->x_##flag \
295 > opts_for_fn (callee->decl)->x_##flag))
296 /* Used for flags where exact match is needed for correctness. */
297 #define check_match(flag) \
298 (opts_for_fn (caller->decl)->x_##flag \
299 != opts_for_fn (callee->decl)->x_##flag)
301 /* Decide if we can inline the edge and possibly update
302 inline_failed reason.
303 We check whether inlining is possible at all and whether
304 caller growth limits allow doing so.
306 if REPORT is true, output reason to the dump file. */
309 can_inline_edge_p (struct cgraph_edge
*e
, bool report
,
312 gcc_checking_assert (e
->inline_failed
);
314 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
317 report_inline_failed_reason (e
);
321 bool inlinable
= true;
322 enum availability avail
;
323 cgraph_node
*caller
= (e
->caller
->inlined_to
324 ? e
->caller
->inlined_to
: e
->caller
);
325 cgraph_node
*callee
= e
->callee
->ultimate_alias_target (&avail
, caller
);
327 if (!callee
->definition
)
329 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
332 if (!early
&& (!opt_for_fn (callee
->decl
, optimize
)
333 || !opt_for_fn (caller
->decl
, optimize
)))
335 e
->inline_failed
= CIF_FUNCTION_NOT_OPTIMIZED
;
338 else if (callee
->calls_comdat_local
)
340 e
->inline_failed
= CIF_USES_COMDAT_LOCAL
;
343 else if (avail
<= AVAIL_INTERPOSABLE
)
345 e
->inline_failed
= CIF_OVERWRITABLE
;
348 /* All edges with call_stmt_cannot_inline_p should have inline_failed
349 initialized to one of FINAL_ERROR reasons. */
350 else if (e
->call_stmt_cannot_inline_p
)
352 /* Don't inline if the functions have different EH personalities. */
353 else if (DECL_FUNCTION_PERSONALITY (caller
->decl
)
354 && DECL_FUNCTION_PERSONALITY (callee
->decl
)
355 && (DECL_FUNCTION_PERSONALITY (caller
->decl
)
356 != DECL_FUNCTION_PERSONALITY (callee
->decl
)))
358 e
->inline_failed
= CIF_EH_PERSONALITY
;
361 /* TM pure functions should not be inlined into non-TM_pure
363 else if (is_tm_pure (callee
->decl
) && !is_tm_pure (caller
->decl
))
365 e
->inline_failed
= CIF_UNSPECIFIED
;
368 /* Check compatibility of target optimization options. */
369 else if (!targetm
.target_option
.can_inline_p (caller
->decl
,
372 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
375 else if (ipa_fn_summaries
->get (callee
) == NULL
376 || !ipa_fn_summaries
->get (callee
)->inlinable
)
378 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
381 /* Don't inline a function with mismatched sanitization attributes. */
382 else if (!sanitize_attrs_match_for_inline_p (caller
->decl
, callee
->decl
))
384 e
->inline_failed
= CIF_ATTRIBUTE_MISMATCH
;
387 if (!inlinable
&& report
)
388 report_inline_failed_reason (e
);
392 /* Return inlining_insns_single limit for function N. If HINT is true
393 scale up the bound. */
396 inline_insns_single (cgraph_node
*n
, bool hint
)
398 if (opt_for_fn (n
->decl
, optimize
) >= 3)
401 return param_max_inline_insns_single
402 * param_inline_heuristics_hint_percent
/ 100;
403 return param_max_inline_insns_single
;
408 return param_max_inline_insns_single_o2
409 * param_inline_heuristics_hint_percent_o2
/ 100;
410 return param_max_inline_insns_single_o2
;
414 /* Return inlining_insns_auto limit for function N. If HINT is true
415 scale up the bound. */
418 inline_insns_auto (cgraph_node
*n
, bool hint
)
420 int max_inline_insns_auto
= opt_for_fn (n
->decl
, param_max_inline_insns_auto
);
422 return max_inline_insns_auto
* param_inline_heuristics_hint_percent
/ 100;
423 return max_inline_insns_auto
;
426 /* Decide if we can inline the edge and possibly update
427 inline_failed reason.
428 We check whether inlining is possible at all and whether
429 caller growth limits allow doing so.
431 if REPORT is true, output reason to the dump file.
433 if DISREGARD_LIMITS is true, ignore size limits. */
436 can_inline_edge_by_limits_p (struct cgraph_edge
*e
, bool report
,
437 bool disregard_limits
= false, bool early
= false)
439 gcc_checking_assert (e
->inline_failed
);
441 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
444 report_inline_failed_reason (e
);
448 bool inlinable
= true;
449 enum availability avail
;
450 cgraph_node
*caller
= (e
->caller
->inlined_to
451 ? e
->caller
->inlined_to
: e
->caller
);
452 cgraph_node
*callee
= e
->callee
->ultimate_alias_target (&avail
, caller
);
453 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller
->decl
);
455 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->decl
) : NULL
;
456 /* Check if caller growth allows the inlining. */
457 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
459 && !lookup_attribute ("flatten",
460 DECL_ATTRIBUTES (caller
->decl
))
461 && !caller_growth_limits (e
))
463 else if (callee
->externally_visible
464 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
465 && flag_live_patching
== LIVE_PATCHING_INLINE_ONLY_STATIC
)
467 e
->inline_failed
= CIF_EXTERN_LIVE_ONLY_STATIC
;
470 /* Don't inline a function with a higher optimization level than the
471 caller. FIXME: this is really just tip of iceberg of handling
472 optimization attribute. */
473 else if (caller_tree
!= callee_tree
)
476 (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
477 && lookup_attribute ("always_inline",
478 DECL_ATTRIBUTES (callee
->decl
)));
479 ipa_fn_summary
*caller_info
= ipa_fn_summaries
->get (caller
);
480 ipa_fn_summary
*callee_info
= ipa_fn_summaries
->get (callee
);
482 /* Until GCC 4.9 we did not check the semantics-altering flags
483 below and inlined across optimization boundaries.
484 Enabling checks below breaks several packages by refusing
485 to inline library always_inline functions. See PR65873.
486 Disable the check for early inlining for now until better solution
488 if (always_inline
&& early
)
490 /* There are some options that change IL semantics which means
491 we cannot inline in these cases for correctness reason.
492 Not even for always_inline declared functions. */
493 else if (check_match (flag_wrapv
)
494 || check_match (flag_trapv
)
495 || check_match (flag_pcc_struct_return
)
496 /* When caller or callee does FP math, be sure FP codegen flags
498 || ((caller_info
->fp_expressions
&& callee_info
->fp_expressions
)
499 && (check_maybe_up (flag_rounding_math
)
500 || check_maybe_up (flag_trapping_math
)
501 || check_maybe_down (flag_unsafe_math_optimizations
)
502 || check_maybe_down (flag_finite_math_only
)
503 || check_maybe_up (flag_signaling_nans
)
504 || check_maybe_down (flag_cx_limited_range
)
505 || check_maybe_up (flag_signed_zeros
)
506 || check_maybe_down (flag_associative_math
)
507 || check_maybe_down (flag_reciprocal_math
)
508 || check_maybe_down (flag_fp_int_builtin_inexact
)
509 /* Strictly speaking only when the callee contains function
510 calls that may end up setting errno. */
511 || check_maybe_up (flag_errno_math
)))
512 /* We do not want to make code compiled with exceptions to be
513 brought into a non-EH function unless we know that the callee
515 This is tracked by DECL_FUNCTION_PERSONALITY. */
516 || (check_maybe_up (flag_non_call_exceptions
)
517 && DECL_FUNCTION_PERSONALITY (callee
->decl
))
518 || (check_maybe_up (flag_exceptions
)
519 && DECL_FUNCTION_PERSONALITY (callee
->decl
))
520 /* When devirtualization is diabled for callee, it is not safe
521 to inline it as we possibly mangled the type info.
522 Allow early inlining of always inlines. */
523 || (!early
&& check_maybe_down (flag_devirtualize
)))
525 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
528 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
529 else if (always_inline
)
531 /* When user added an attribute to the callee honor it. */
532 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee
->decl
))
533 && opts_for_fn (caller
->decl
) != opts_for_fn (callee
->decl
))
535 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
538 /* If explicit optimize attribute are not used, the mismatch is caused
539 by different command line options used to build different units.
540 Do not care about COMDAT functions - those are intended to be
541 optimized with the optimization flags of module they are used in.
542 Also do not care about mixing up size/speed optimization when
543 DECL_DISREGARD_INLINE_LIMITS is set. */
544 else if ((callee
->merged_comdat
545 && !lookup_attribute ("optimize",
546 DECL_ATTRIBUTES (caller
->decl
)))
547 || DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
549 /* If mismatch is caused by merging two LTO units with different
550 optimizationflags we want to be bit nicer. However never inline
551 if one of functions is not optimized at all. */
552 else if (!opt_for_fn (callee
->decl
, optimize
)
553 || !opt_for_fn (caller
->decl
, optimize
))
555 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
558 /* If callee is optimized for size and caller is not, allow inlining if
559 code shrinks or we are in param_max_inline_insns_single limit and
560 callee is inline (and thus likely an unified comdat).
561 This will allow caller to run faster. */
562 else if (opt_for_fn (callee
->decl
, optimize_size
)
563 > opt_for_fn (caller
->decl
, optimize_size
))
565 int growth
= estimate_edge_growth (e
);
566 if (growth
> param_max_inline_insns_size
567 && (!DECL_DECLARED_INLINE_P (callee
->decl
)
568 && growth
>= MAX (inline_insns_single (caller
, false),
569 inline_insns_auto (caller
, false))))
571 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
575 /* If callee is more aggressively optimized for performance than caller,
576 we generally want to inline only cheap (runtime wise) functions. */
577 else if (opt_for_fn (callee
->decl
, optimize_size
)
578 < opt_for_fn (caller
->decl
, optimize_size
)
579 || (opt_for_fn (callee
->decl
, optimize
)
580 > opt_for_fn (caller
->decl
, optimize
)))
582 if (estimate_edge_time (e
)
583 >= 20 + ipa_call_summaries
->get (e
)->call_stmt_time
)
585 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
592 if (!inlinable
&& report
)
593 report_inline_failed_reason (e
);
598 /* Return true if the edge E is inlinable during early inlining. */
601 can_early_inline_edge_p (struct cgraph_edge
*e
)
603 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
604 /* Early inliner might get called at WPA stage when IPA pass adds new
605 function. In this case we cannot really do any of early inlining
606 because function bodies are missing. */
607 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
609 if (!gimple_has_body_p (callee
->decl
))
611 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
614 /* In early inliner some of callees may not be in SSA form yet
615 (i.e. the callgraph is cyclic and we did not process
616 the callee by early inliner, yet). We don't have CIF code for this
617 case; later we will re-do the decision in the real inliner. */
618 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->decl
))
619 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
621 if (dump_enabled_p ())
622 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
623 " edge not inlinable: not in SSA form\n");
626 if (!can_inline_edge_p (e
, true, true)
627 || !can_inline_edge_by_limits_p (e
, true, false, true))
633 /* Return number of calls in N. Ignore cheap builtins. */
636 num_calls (struct cgraph_node
*n
)
638 struct cgraph_edge
*e
;
641 for (e
= n
->callees
; e
; e
= e
->next_callee
)
642 if (!is_inexpensive_builtin (e
->callee
->decl
))
648 /* Return true if we are interested in inlining small function. */
651 want_early_inline_function_p (struct cgraph_edge
*e
)
653 bool want_inline
= true;
654 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
656 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
658 /* For AutoFDO, we need to make sure that before profile summary, all
659 hot paths' IR look exactly the same as profiled binary. As a result,
660 in einliner, we will disregard size limit and inline those callsites
662 * inlined in the profiled binary, and
663 * the cloned callee has enough samples to be considered "hot". */
664 else if (flag_auto_profile
&& afdo_callsite_hot_enough_for_early_inline (e
))
666 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
667 && !opt_for_fn (e
->caller
->decl
, flag_inline_small_functions
))
669 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
670 report_inline_failed_reason (e
);
675 /* First take care of very large functions. */
676 int min_growth
= estimate_min_edge_growth (e
), growth
= 0;
678 int early_inlining_insns
= opt_for_fn (e
->caller
->decl
, optimize
) >= 3
679 ? param_early_inlining_insns
680 : param_early_inlining_insns_o2
;
682 if (min_growth
> early_inlining_insns
)
684 if (dump_enabled_p ())
685 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
686 " will not early inline: %C->%C, "
687 "call is cold and code would grow "
694 growth
= estimate_edge_growth (e
);
697 if (!want_inline
|| growth
<= param_max_inline_insns_size
)
699 else if (!e
->maybe_hot_p ())
701 if (dump_enabled_p ())
702 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
703 " will not early inline: %C->%C, "
704 "call is cold and code would grow by %i\n",
709 else if (growth
> early_inlining_insns
)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
713 " will not early inline: %C->%C, "
714 "growth %i exceeds --param early-inlining-insns%s\n",
715 e
->caller
, callee
, growth
,
716 opt_for_fn (e
->caller
->decl
, optimize
) >= 3
720 else if ((n
= num_calls (callee
)) != 0
721 && growth
* (n
+ 1) > early_inlining_insns
)
723 if (dump_enabled_p ())
724 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
725 " will not early inline: %C->%C, "
726 "growth %i exceeds --param early-inlining-insns%s "
727 "divided by number of calls\n",
728 e
->caller
, callee
, growth
,
729 opt_for_fn (e
->caller
->decl
, optimize
) >= 3
737 /* Compute time of the edge->caller + edge->callee execution when inlining
741 compute_uninlined_call_time (struct cgraph_edge
*edge
,
742 sreal uninlined_call_time
,
745 cgraph_node
*caller
= (edge
->caller
->inlined_to
746 ? edge
->caller
->inlined_to
750 uninlined_call_time
*= freq
;
752 uninlined_call_time
= uninlined_call_time
>> 11;
754 sreal caller_time
= ipa_fn_summaries
->get (caller
)->time
;
755 return uninlined_call_time
+ caller_time
;
758 /* Same as compute_uinlined_call_time but compute time when inlining
762 compute_inlined_call_time (struct cgraph_edge
*edge
,
766 cgraph_node
*caller
= (edge
->caller
->inlined_to
767 ? edge
->caller
->inlined_to
769 sreal caller_time
= ipa_fn_summaries
->get (caller
)->time
;
776 /* This calculation should match one in ipa-inline-analysis.c
777 (estimate_edge_size_and_time). */
778 time
-= (sreal
)ipa_call_summaries
->get (edge
)->call_stmt_time
* freq
;
781 time
= ((sreal
) 1) >> 8;
782 gcc_checking_assert (time
>= 0);
786 /* Determine time saved by inlininig EDGE of frequency FREQ
787 where callee's runtime w/o inlineing is UNINLINED_TYPE
788 and with inlined is INLINED_TYPE. */
791 inlining_speedup (struct cgraph_edge
*edge
,
793 sreal uninlined_time
,
796 sreal speedup
= uninlined_time
- inlined_time
;
797 /* Handling of call_time should match one in ipa-inline-fnsummary.c
798 (estimate_edge_size_and_time). */
799 sreal call_time
= ipa_call_summaries
->get (edge
)->call_stmt_time
;
803 speedup
= (speedup
+ call_time
);
805 speedup
= speedup
* freq
;
808 speedup
= speedup
>> 11;
809 gcc_checking_assert (speedup
>= 0);
813 /* Return true if the speedup for inlining E is bigger than
814 PARAM_MAX_INLINE_MIN_SPEEDUP. */
817 big_speedup_p (struct cgraph_edge
*e
)
820 sreal spec_time
= estimate_edge_time (e
, &unspec_time
);
821 sreal freq
= e
->sreal_frequency ();
822 sreal time
= compute_uninlined_call_time (e
, unspec_time
, freq
);
823 sreal inlined_time
= compute_inlined_call_time (e
, spec_time
, freq
);
824 cgraph_node
*caller
= (e
->caller
->inlined_to
825 ? e
->caller
->inlined_to
827 int limit
= opt_for_fn (caller
->decl
, optimize
) >= 3
828 ? param_inline_min_speedup
829 : param_inline_min_speedup_o2
;
831 if ((time
- inlined_time
) * 100 > time
* limit
)
836 /* Return true if we are interested in inlining small function.
837 When REPORT is true, report reason to dump file. */
840 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
842 bool want_inline
= true;
843 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
845 /* Allow this function to be called before can_inline_edge_p,
846 since it's usually cheaper. */
847 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
849 else if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
851 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
852 && !opt_for_fn (e
->caller
->decl
, flag_inline_small_functions
))
854 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
857 /* Do fast and conservative check if the function can be good
859 else if ((!DECL_DECLARED_INLINE_P (callee
->decl
)
860 && (!e
->count
.ipa ().initialized_p () || !e
->maybe_hot_p ()))
861 && ipa_fn_summaries
->get (callee
)->min_size
862 - ipa_call_summaries
->get (e
)->call_stmt_size
863 > inline_insns_auto (e
->caller
, true))
865 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
868 else if ((DECL_DECLARED_INLINE_P (callee
->decl
)
869 || e
->count
.ipa ().nonzero_p ())
870 && ipa_fn_summaries
->get (callee
)->min_size
871 - ipa_call_summaries
->get (e
)->call_stmt_size
872 > inline_insns_single (e
->caller
, true))
874 if (opt_for_fn (e
->caller
->decl
, optimize
) >= 3)
875 e
->inline_failed
= (DECL_DECLARED_INLINE_P (callee
->decl
)
876 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
877 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT
);
879 e
->inline_failed
= (DECL_DECLARED_INLINE_P (callee
->decl
)
880 ? CIF_MAX_INLINE_INSNS_SINGLE_O2_LIMIT
881 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT
);
886 int growth
= estimate_edge_growth (e
);
887 ipa_hints hints
= estimate_edge_hints (e
);
888 bool apply_hints
= (hints
& (INLINE_HINT_indirect_call
889 | INLINE_HINT_known_hot
890 | INLINE_HINT_loop_iterations
891 | INLINE_HINT_loop_stride
));
893 if (growth
<= param_max_inline_insns_size
)
895 /* Apply param_max_inline_insns_single limit. Do not do so when
896 hints suggests that inlining given function is very profitable.
897 Avoid computation of big_speedup_p when not necessary to change
898 outcome of decision. */
899 else if (DECL_DECLARED_INLINE_P (callee
->decl
)
900 && growth
>= inline_insns_single (e
->caller
, apply_hints
)
902 || growth
>= inline_insns_single (e
->caller
, true)
903 || !big_speedup_p (e
)))
905 if (opt_for_fn (e
->caller
->decl
, optimize
) >= 3)
906 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
908 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_O2_LIMIT
;
911 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
912 && !opt_for_fn (e
->caller
->decl
, flag_inline_functions
)
913 && growth
>= param_max_inline_insns_small
)
915 /* growth_positive_p is expensive, always test it last. */
916 if (growth
>= inline_insns_single (e
->caller
, false)
917 || growth_positive_p (callee
, e
, growth
))
919 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
923 /* Apply param_max_inline_insns_auto limit for functions not declared
924 inline. Bypass the limit when speedup seems big. */
925 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
926 && growth
>= inline_insns_auto (e
->caller
, apply_hints
)
928 || growth
>= inline_insns_auto (e
->caller
, true)
929 || !big_speedup_p (e
)))
931 /* growth_positive_p is expensive, always test it last. */
932 if (growth
>= inline_insns_single (e
->caller
, false)
933 || growth_positive_p (callee
, e
, growth
))
935 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
939 /* If call is cold, do not inline when function body would grow. */
940 else if (!e
->maybe_hot_p ()
941 && (growth
>= inline_insns_single (e
->caller
, false)
942 || growth_positive_p (callee
, e
, growth
)))
944 e
->inline_failed
= CIF_UNLIKELY_CALL
;
948 if (!want_inline
&& report
)
949 report_inline_failed_reason (e
);
953 /* EDGE is self recursive edge.
954 We hand two cases - when function A is inlining into itself
955 or when function A is being inlined into another inliner copy of function
958 In first case OUTER_NODE points to the toplevel copy of A, while
959 in the second case OUTER_NODE points to the outermost copy of A in B.
961 In both cases we want to be extra selective since
962 inlining the call will just introduce new recursive calls to appear. */
965 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
966 struct cgraph_node
*outer_node
,
970 char const *reason
= NULL
;
971 bool want_inline
= true;
972 sreal caller_freq
= 1;
973 int max_depth
= param_max_inline_recursive_depth_auto
;
975 if (DECL_DECLARED_INLINE_P (edge
->caller
->decl
))
976 max_depth
= param_max_inline_recursive_depth
;
978 if (!edge
->maybe_hot_p ())
980 reason
= "recursive call is cold";
983 else if (depth
> max_depth
)
985 reason
= "--param max-inline-recursive-depth exceeded.";
988 else if (outer_node
->inlined_to
989 && (caller_freq
= outer_node
->callers
->sreal_frequency ()) == 0)
991 reason
= "caller frequency is 0";
997 /* Inlining of self recursive function into copy of itself within other
998 function is transformation similar to loop peeling.
1000 Peeling is profitable if we can inline enough copies to make probability
1001 of actual call to the self recursive function very small. Be sure that
1002 the probability of recursion is small.
1004 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
1005 This way the expected number of recursion is at most max_depth. */
1008 sreal max_prob
= (sreal
)1 - ((sreal
)1 / (sreal
)max_depth
);
1010 for (i
= 1; i
< depth
; i
++)
1011 max_prob
= max_prob
* max_prob
;
1012 if (edge
->sreal_frequency () >= max_prob
* caller_freq
)
1014 reason
= "frequency of recursive call is too large";
1015 want_inline
= false;
1018 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
1019 recursion depth is large. We reduce function call overhead and increase
1020 chances that things fit in hardware return predictor.
1022 Recursive inlining might however increase cost of stack frame setup
1023 actually slowing down functions whose recursion tree is wide rather than
1026 Deciding reliably on when to do recursive inlining without profile feedback
1027 is tricky. For now we disable recursive inlining when probability of self
1030 Recursive inlining of self recursive call within loop also results in
1031 large loop depths that generally optimize badly. We may want to throttle
1032 down inlining in those cases. In particular this seems to happen in one
1033 of libstdc++ rb tree methods. */
1036 if (edge
->sreal_frequency () * 100
1038 * param_min_inline_recursive_probability
)
1040 reason
= "frequency of recursive call is too small";
1041 want_inline
= false;
1044 if (!want_inline
&& dump_enabled_p ())
1045 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, edge
->call_stmt
,
1046 " not inlining recursively: %s\n", reason
);
1050 /* Return true when NODE has uninlinable caller;
1051 set HAS_HOT_CALL if it has hot call.
1052 Worker for cgraph_for_node_and_aliases. */
1055 check_callers (struct cgraph_node
*node
, void *has_hot_call
)
1057 struct cgraph_edge
*e
;
1058 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1060 if (!opt_for_fn (e
->caller
->decl
, flag_inline_functions_called_once
)
1061 || !opt_for_fn (e
->caller
->decl
, optimize
))
1063 if (!can_inline_edge_p (e
, true))
1065 if (e
->recursive_p ())
1067 if (!can_inline_edge_by_limits_p (e
, true))
1069 if (!(*(bool *)has_hot_call
) && e
->maybe_hot_p ())
1070 *(bool *)has_hot_call
= true;
1075 /* If NODE has a caller, return true. */
1078 has_caller_p (struct cgraph_node
*node
, void *data ATTRIBUTE_UNUSED
)
1085 /* Decide if inlining NODE would reduce unit size by eliminating
1086 the offline copy of function.
1087 When COLD is true the cold calls are considered, too. */
1090 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
1092 bool has_hot_call
= false;
1094 /* Aliases gets inlined along with the function they alias. */
1097 /* Already inlined? */
1098 if (node
->inlined_to
)
1100 /* Does it have callers? */
1101 if (!node
->call_for_symbol_and_aliases (has_caller_p
, NULL
, true))
1103 /* Inlining into all callers would increase size? */
1104 if (growth_positive_p (node
, NULL
, INT_MIN
) > 0)
1106 /* All inlines must be possible. */
1107 if (node
->call_for_symbol_and_aliases (check_callers
, &has_hot_call
,
1110 if (!cold
&& !has_hot_call
)
1115 /* Return true if WHERE of SIZE is a possible candidate for wrapper heuristics
1116 in estimate_edge_badness. */
1119 wrapper_heuristics_may_apply (struct cgraph_node
*where
, int size
)
1121 return size
< (DECL_DECLARED_INLINE_P (where
->decl
)
1122 ? inline_insns_single (where
, false)
1123 : inline_insns_auto (where
, false));
1126 /* A cost model driving the inlining heuristics in a way so the edges with
1127 smallest badness are inlined first. After each inlining is performed
1128 the costs of all caller edges of nodes affected are recomputed so the
1129 metrics may accurately depend on values such as number of inlinable callers
1130 of the function or function body size. */
1133 edge_badness (struct cgraph_edge
*edge
, bool dump
)
1137 sreal edge_time
, unspec_edge_time
;
1138 struct cgraph_node
*callee
= edge
->callee
->ultimate_alias_target ();
1139 class ipa_fn_summary
*callee_info
= ipa_fn_summaries
->get (callee
);
1141 cgraph_node
*caller
= (edge
->caller
->inlined_to
1142 ? edge
->caller
->inlined_to
1145 growth
= estimate_edge_growth (edge
);
1146 edge_time
= estimate_edge_time (edge
, &unspec_edge_time
);
1147 hints
= estimate_edge_hints (edge
);
1148 gcc_checking_assert (edge_time
>= 0);
1149 /* Check that inlined time is better, but tolerate some roundoff issues.
1150 FIXME: When callee profile drops to 0 we account calls more. This
1151 should be fixed by never doing that. */
1152 gcc_checking_assert ((edge_time
* 100
1153 - callee_info
->time
* 101).to_int () <= 0
1154 || callee
->count
.ipa ().initialized_p ());
1155 gcc_checking_assert (growth
<= ipa_size_summaries
->get (callee
)->size
);
1159 fprintf (dump_file
, " Badness calculation for %s -> %s\n",
1160 edge
->caller
->dump_name (),
1161 edge
->callee
->dump_name ());
1162 fprintf (dump_file
, " size growth %i, time %f unspec %f ",
1164 edge_time
.to_double (),
1165 unspec_edge_time
.to_double ());
1166 ipa_dump_hints (dump_file
, hints
);
1167 if (big_speedup_p (edge
))
1168 fprintf (dump_file
, " big_speedup");
1169 fprintf (dump_file
, "\n");
1172 /* Always prefer inlining saving code size. */
1175 badness
= (sreal
) (-SREAL_MIN_SIG
+ growth
) << (SREAL_MAX_EXP
/ 256);
1177 fprintf (dump_file
, " %f: Growth %d <= 0\n", badness
.to_double (),
1180 /* Inlining into EXTERNAL functions is not going to change anything unless
1181 they are themselves inlined. */
1182 else if (DECL_EXTERNAL (caller
->decl
))
1185 fprintf (dump_file
, " max: function is external\n");
1186 return sreal::max ();
1188 /* When profile is available. Compute badness as:
1190 time_saved * caller_count
1191 goodness = -------------------------------------------------
1192 growth_of_caller * overall_growth * combined_size
1194 badness = - goodness
1196 Again use negative value to make calls with profile appear hotter
1199 else if (opt_for_fn (caller
->decl
, flag_guess_branch_prob
)
1200 || caller
->count
.ipa ().nonzero_p ())
1202 sreal numerator
, denominator
;
1204 sreal freq
= edge
->sreal_frequency ();
1206 numerator
= inlining_speedup (edge
, freq
, unspec_edge_time
, edge_time
);
1208 numerator
= ((sreal
) 1 >> 8);
1209 if (caller
->count
.ipa ().nonzero_p ())
1210 numerator
*= caller
->count
.ipa ().to_gcov_type ();
1211 else if (caller
->count
.ipa ().initialized_p ())
1212 numerator
= numerator
>> 11;
1213 denominator
= growth
;
1215 overall_growth
= callee_info
->growth
;
1217 /* Look for inliner wrappers of the form:
1223 noninline_callee ();
1225 Withhout penalizing this case, we usually inline noninline_callee
1226 into the inline_caller because overall_growth is small preventing
1227 further inlining of inline_caller.
1229 Penalize only callgraph edges to functions with small overall
1232 if (growth
> overall_growth
1233 /* ... and having only one caller which is not inlined ... */
1234 && callee_info
->single_caller
1235 && !edge
->caller
->inlined_to
1236 /* ... and edges executed only conditionally ... */
1238 /* ... consider case where callee is not inline but caller is ... */
1239 && ((!DECL_DECLARED_INLINE_P (edge
->callee
->decl
)
1240 && DECL_DECLARED_INLINE_P (caller
->decl
))
1241 /* ... or when early optimizers decided to split and edge
1242 frequency still indicates splitting is a win ... */
1243 || (callee
->split_part
&& !caller
->split_part
1244 && freq
* 100 < param_partial_inlining_entry_probability
1245 /* ... and do not overwrite user specified hints. */
1246 && (!DECL_DECLARED_INLINE_P (edge
->callee
->decl
)
1247 || DECL_DECLARED_INLINE_P (caller
->decl
)))))
1249 ipa_fn_summary
*caller_info
= ipa_fn_summaries
->get (caller
);
1250 int caller_growth
= caller_info
->growth
;
1252 /* Only apply the penalty when caller looks like inline candidate,
1253 and it is not called once. */
1254 if (!caller_info
->single_caller
&& overall_growth
< caller_growth
1255 && caller_info
->inlinable
1256 && wrapper_heuristics_may_apply
1257 (caller
, ipa_size_summaries
->get (caller
)->size
))
1261 " Wrapper penalty. Increasing growth %i to %i\n",
1262 overall_growth
, caller_growth
);
1263 overall_growth
= caller_growth
;
1266 if (overall_growth
> 0)
1268 /* Strongly preffer functions with few callers that can be inlined
1269 fully. The square root here leads to smaller binaries at average.
1270 Watch however for extreme cases and return to linear function
1271 when growth is large. */
1272 if (overall_growth
< 256)
1273 overall_growth
*= overall_growth
;
1275 overall_growth
+= 256 * 256 - 256;
1276 denominator
*= overall_growth
;
1278 denominator
*= ipa_size_summaries
->get (caller
)->size
+ growth
;
1280 badness
= - numerator
/ denominator
;
1285 " %f: guessed profile. frequency %f, count %" PRId64
1286 " caller count %" PRId64
1288 " overall growth %i (current) %i (original)"
1289 " %i (compensated)\n",
1290 badness
.to_double (),
1292 edge
->count
.ipa ().initialized_p () ? edge
->count
.ipa ().to_gcov_type () : -1,
1293 caller
->count
.ipa ().initialized_p () ? caller
->count
.ipa ().to_gcov_type () : -1,
1294 inlining_speedup (edge
, freq
, unspec_edge_time
, edge_time
).to_double (),
1295 estimate_growth (callee
),
1296 callee_info
->growth
, overall_growth
);
1299 /* When function local profile is not available or it does not give
1300 useful information (ie frequency is zero), base the cost on
1301 loop nest and overall size growth, so we optimize for overall number
1302 of functions fully inlined in program. */
1305 int nest
= MIN (ipa_call_summaries
->get (edge
)->loop_depth
, 8);
1308 /* Decrease badness if call is nested. */
1310 badness
= badness
>> nest
;
1312 badness
= badness
<< nest
;
1314 fprintf (dump_file
, " %f: no profile. nest %i\n",
1315 badness
.to_double (), nest
);
1317 gcc_checking_assert (badness
!= 0);
1319 if (edge
->recursive_p ())
1320 badness
= badness
.shift (badness
> 0 ? 4 : -4);
1321 if ((hints
& (INLINE_HINT_indirect_call
1322 | INLINE_HINT_loop_iterations
1323 | INLINE_HINT_loop_stride
))
1324 || callee_info
->growth
<= 0)
1325 badness
= badness
.shift (badness
> 0 ? -2 : 2);
1326 if (hints
& (INLINE_HINT_same_scc
))
1327 badness
= badness
.shift (badness
> 0 ? 3 : -3);
1328 else if (hints
& (INLINE_HINT_in_scc
))
1329 badness
= badness
.shift (badness
> 0 ? 2 : -2);
1330 else if (hints
& (INLINE_HINT_cross_module
))
1331 badness
= badness
.shift (badness
> 0 ? 1 : -1);
1332 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
1333 badness
= badness
.shift (badness
> 0 ? -4 : 4);
1334 else if ((hints
& INLINE_HINT_declared_inline
))
1335 badness
= badness
.shift (badness
> 0 ? -3 : 3);
1337 fprintf (dump_file
, " Adjusted by hints %f\n", badness
.to_double ());
1341 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1343 update_edge_key (edge_heap_t
*heap
, struct cgraph_edge
*edge
)
1345 sreal badness
= edge_badness (edge
, false);
1348 edge_heap_node_t
*n
= (edge_heap_node_t
*) edge
->aux
;
1349 gcc_checking_assert (n
->get_data () == edge
);
1351 /* fibonacci_heap::replace_key does busy updating of the
1352 heap that is unnecesarily expensive.
1353 We do lazy increases: after extracting minimum if the key
1354 turns out to be out of date, it is re-inserted into heap
1355 with correct value. */
1356 if (badness
< n
->get_key ())
1358 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1361 " decreasing badness %s -> %s, %f to %f\n",
1362 edge
->caller
->dump_name (),
1363 edge
->callee
->dump_name (),
1364 n
->get_key ().to_double (),
1365 badness
.to_double ());
1367 heap
->decrease_key (n
, badness
);
1372 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1375 " enqueuing call %s -> %s, badness %f\n",
1376 edge
->caller
->dump_name (),
1377 edge
->callee
->dump_name (),
1378 badness
.to_double ());
1380 edge
->aux
= heap
->insert (badness
, edge
);
1385 /* NODE was inlined.
1386 All caller edges needs to be resetted because
1387 size estimates change. Similarly callees needs reset
1388 because better context may be known. */
1391 reset_edge_caches (struct cgraph_node
*node
)
1393 struct cgraph_edge
*edge
;
1394 struct cgraph_edge
*e
= node
->callees
;
1395 struct cgraph_node
*where
= node
;
1396 struct ipa_ref
*ref
;
1398 if (where
->inlined_to
)
1399 where
= where
->inlined_to
;
1401 reset_node_cache (where
);
1403 if (edge_growth_cache
!= NULL
)
1404 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1405 if (edge
->inline_failed
)
1406 edge_growth_cache
->remove (edge
);
1408 FOR_EACH_ALIAS (where
, ref
)
1409 reset_edge_caches (dyn_cast
<cgraph_node
*> (ref
->referring
));
1415 if (!e
->inline_failed
&& e
->callee
->callees
)
1416 e
= e
->callee
->callees
;
1419 if (edge_growth_cache
!= NULL
&& e
->inline_failed
)
1420 edge_growth_cache
->remove (e
);
1427 if (e
->caller
== node
)
1429 e
= e
->caller
->callers
;
1431 while (!e
->next_callee
);
1437 /* Recompute HEAP nodes for each of caller of NODE.
1438 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1439 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1440 it is inlinable. Otherwise check all edges. */
1443 update_caller_keys (edge_heap_t
*heap
, struct cgraph_node
*node
,
1444 bitmap updated_nodes
,
1445 struct cgraph_edge
*check_inlinablity_for
)
1447 struct cgraph_edge
*edge
;
1448 struct ipa_ref
*ref
;
1450 if ((!node
->alias
&& !ipa_fn_summaries
->get (node
)->inlinable
)
1451 || node
->inlined_to
)
1453 if (!bitmap_set_bit (updated_nodes
, node
->get_uid ()))
1456 FOR_EACH_ALIAS (node
, ref
)
1458 struct cgraph_node
*alias
= dyn_cast
<cgraph_node
*> (ref
->referring
);
1459 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1462 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1463 if (edge
->inline_failed
)
1465 if (!check_inlinablity_for
1466 || check_inlinablity_for
== edge
)
1468 if (can_inline_edge_p (edge
, false)
1469 && want_inline_small_function_p (edge
, false)
1470 && can_inline_edge_by_limits_p (edge
, false))
1471 update_edge_key (heap
, edge
);
1474 report_inline_failed_reason (edge
);
1475 heap
->delete_node ((edge_heap_node_t
*) edge
->aux
);
1480 update_edge_key (heap
, edge
);
1484 /* Recompute HEAP nodes for each uninlined call in NODE.
1485 This is used when we know that edge badnesses are going only to increase
1486 (we introduced new call site) and thus all we need is to insert newly
1487 created edges into heap. */
1490 update_callee_keys (edge_heap_t
*heap
, struct cgraph_node
*node
,
1491 bitmap updated_nodes
)
1493 struct cgraph_edge
*e
= node
->callees
;
1498 if (!e
->inline_failed
&& e
->callee
->callees
)
1499 e
= e
->callee
->callees
;
1502 enum availability avail
;
1503 struct cgraph_node
*callee
;
1504 /* We do not reset callee growth cache here. Since we added a new call,
1505 growth chould have just increased and consequentely badness metric
1506 don't need updating. */
1507 if (e
->inline_failed
1508 && (callee
= e
->callee
->ultimate_alias_target (&avail
, e
->caller
))
1509 && ipa_fn_summaries
->get (callee
) != NULL
1510 && ipa_fn_summaries
->get (callee
)->inlinable
1511 && avail
>= AVAIL_AVAILABLE
1512 && !bitmap_bit_p (updated_nodes
, callee
->get_uid ()))
1514 if (can_inline_edge_p (e
, false)
1515 && want_inline_small_function_p (e
, false)
1516 && can_inline_edge_by_limits_p (e
, false))
1517 update_edge_key (heap
, e
);
1520 report_inline_failed_reason (e
);
1521 heap
->delete_node ((edge_heap_node_t
*) e
->aux
);
1531 if (e
->caller
== node
)
1533 e
= e
->caller
->callers
;
1535 while (!e
->next_callee
);
1541 /* Enqueue all recursive calls from NODE into priority queue depending on
1542 how likely we want to recursively inline the call. */
1545 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1548 struct cgraph_edge
*e
;
1549 enum availability avail
;
1551 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1552 if (e
->callee
== node
1553 || (e
->callee
->ultimate_alias_target (&avail
, e
->caller
) == node
1554 && avail
> AVAIL_INTERPOSABLE
))
1555 heap
->insert (-e
->sreal_frequency (), e
);
1556 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1557 if (!e
->inline_failed
)
1558 lookup_recursive_calls (node
, e
->callee
, heap
);
1561 /* Decide on recursive inlining: in the case function has recursive calls,
1562 inline until body size reaches given argument. If any new indirect edges
1563 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1567 recursive_inlining (struct cgraph_edge
*edge
,
1568 vec
<cgraph_edge
*> *new_edges
)
1570 int limit
= param_max_inline_insns_recursive_auto
;
1571 edge_heap_t
heap (sreal::min ());
1572 struct cgraph_node
*node
;
1573 struct cgraph_edge
*e
;
1574 struct cgraph_node
*master_clone
= NULL
, *next
;
1578 node
= edge
->caller
;
1579 if (node
->inlined_to
)
1580 node
= node
->inlined_to
;
1582 if (DECL_DECLARED_INLINE_P (node
->decl
))
1583 limit
= param_max_inline_insns_recursive
;
1585 /* Make sure that function is small enough to be considered for inlining. */
1586 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1588 lookup_recursive_calls (node
, node
, &heap
);
1594 " Performing recursive inlining on %s\n",
1597 /* Do the inlining and update list of recursive call during process. */
1598 while (!heap
.empty ())
1600 struct cgraph_edge
*curr
= heap
.extract_min ();
1601 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1603 if (!can_inline_edge_p (curr
, true)
1604 || !can_inline_edge_by_limits_p (curr
, true))
1607 /* MASTER_CLONE is produced in the case we already started modified
1608 the function. Be sure to redirect edge to the original body before
1609 estimating growths otherwise we will be seeing growths after inlining
1610 the already modified body. */
1613 curr
->redirect_callee (master_clone
);
1614 if (edge_growth_cache
!= NULL
)
1615 edge_growth_cache
->remove (curr
);
1618 if (estimate_size_after_inlining (node
, curr
) > limit
)
1620 curr
->redirect_callee (dest
);
1621 if (edge_growth_cache
!= NULL
)
1622 edge_growth_cache
->remove (curr
);
1627 for (cnode
= curr
->caller
;
1628 cnode
->inlined_to
; cnode
= cnode
->callers
->caller
)
1630 == curr
->callee
->ultimate_alias_target ()->decl
)
1633 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1635 curr
->redirect_callee (dest
);
1636 if (edge_growth_cache
!= NULL
)
1637 edge_growth_cache
->remove (curr
);
1644 " Inlining call of depth %i", depth
);
1645 if (node
->count
.nonzero_p () && curr
->count
.initialized_p ())
1647 fprintf (dump_file
, " called approx. %.2f times per call",
1648 (double)curr
->count
.to_gcov_type ()
1649 / node
->count
.to_gcov_type ());
1651 fprintf (dump_file
, "\n");
1655 /* We need original clone to copy around. */
1656 master_clone
= node
->create_clone (node
->decl
, node
->count
,
1657 false, vNULL
, true, NULL
, NULL
);
1658 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1659 if (!e
->inline_failed
)
1660 clone_inlined_nodes (e
, true, false, NULL
);
1661 curr
->redirect_callee (master_clone
);
1662 if (edge_growth_cache
!= NULL
)
1663 edge_growth_cache
->remove (curr
);
1666 inline_call (curr
, false, new_edges
, &overall_size
, true);
1667 reset_node_cache (node
);
1668 lookup_recursive_calls (node
, curr
->callee
, &heap
);
1672 if (!heap
.empty () && dump_file
)
1673 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1678 if (dump_enabled_p ())
1679 dump_printf_loc (MSG_NOTE
, edge
->call_stmt
,
1680 "\n Inlined %i times, "
1681 "body grown from size %i to %i, time %f to %f\n", n
,
1682 ipa_size_summaries
->get (master_clone
)->size
,
1683 ipa_size_summaries
->get (node
)->size
,
1684 ipa_fn_summaries
->get (master_clone
)->time
.to_double (),
1685 ipa_fn_summaries
->get (node
)->time
.to_double ());
1687 /* Remove master clone we used for inlining. We rely that clones inlined
1688 into master clone gets queued just before master clone so we don't
1690 for (node
= symtab
->first_function (); node
!= master_clone
;
1693 next
= symtab
->next_function (node
);
1694 if (node
->inlined_to
== master_clone
)
1697 master_clone
->remove ();
1702 /* Given whole compilation unit estimate of INSNS, compute how large we can
1703 allow the unit to grow. */
1706 compute_max_insns (int insns
)
1708 int max_insns
= insns
;
1709 if (max_insns
< param_large_unit_insns
)
1710 max_insns
= param_large_unit_insns
;
1712 return ((int64_t) max_insns
1713 * (100 + param_inline_unit_growth
) / 100);
1717 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1720 add_new_edges_to_heap (edge_heap_t
*heap
, vec
<cgraph_edge
*> new_edges
)
1722 while (new_edges
.length () > 0)
1724 struct cgraph_edge
*edge
= new_edges
.pop ();
1726 gcc_assert (!edge
->aux
);
1727 gcc_assert (edge
->callee
);
1728 if (edge
->inline_failed
1729 && can_inline_edge_p (edge
, true)
1730 && want_inline_small_function_p (edge
, true)
1731 && can_inline_edge_by_limits_p (edge
, true))
1732 edge
->aux
= heap
->insert (edge_badness (edge
, false), edge
);
1736 /* Remove EDGE from the fibheap. */
1739 heap_edge_removal_hook (struct cgraph_edge
*e
, void *data
)
1743 ((edge_heap_t
*)data
)->delete_node ((edge_heap_node_t
*)e
->aux
);
1748 /* Return true if speculation of edge E seems useful.
1749 If ANTICIPATE_INLINING is true, be conservative and hope that E
1753 speculation_useful_p (struct cgraph_edge
*e
, bool anticipate_inlining
)
1755 /* If we have already decided to inline the edge, it seems useful. */
1756 if (!e
->inline_failed
)
1759 enum availability avail
;
1760 struct cgraph_node
*target
= e
->callee
->ultimate_alias_target (&avail
,
1762 struct cgraph_edge
*direct
, *indirect
;
1763 struct ipa_ref
*ref
;
1765 gcc_assert (e
->speculative
&& !e
->indirect_unknown_callee
);
1767 if (!e
->maybe_hot_p ())
1770 /* See if IP optimizations found something potentially useful about the
1771 function. For now we look only for CONST/PURE flags. Almost everything
1772 else we propagate is useless. */
1773 if (avail
>= AVAIL_AVAILABLE
)
1775 int ecf_flags
= flags_from_decl_or_type (target
->decl
);
1776 if (ecf_flags
& ECF_CONST
)
1778 e
->speculative_call_info (direct
, indirect
, ref
);
1779 if (!(indirect
->indirect_info
->ecf_flags
& ECF_CONST
))
1782 else if (ecf_flags
& ECF_PURE
)
1784 e
->speculative_call_info (direct
, indirect
, ref
);
1785 if (!(indirect
->indirect_info
->ecf_flags
& ECF_PURE
))
1789 /* If we did not managed to inline the function nor redirect
1790 to an ipa-cp clone (that are seen by having local flag set),
1791 it is probably pointless to inline it unless hardware is missing
1792 indirect call predictor. */
1793 if (!anticipate_inlining
&& !target
->local
)
1795 /* For overwritable targets there is not much to do. */
1796 if (!can_inline_edge_p (e
, false)
1797 || !can_inline_edge_by_limits_p (e
, false, true))
1799 /* OK, speculation seems interesting. */
1803 /* We know that EDGE is not going to be inlined.
1804 See if we can remove speculation. */
1807 resolve_noninline_speculation (edge_heap_t
*edge_heap
, struct cgraph_edge
*edge
)
1809 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
1811 struct cgraph_node
*node
= edge
->caller
;
1812 struct cgraph_node
*where
= node
->inlined_to
1813 ? node
->inlined_to
: node
;
1814 auto_bitmap updated_nodes
;
1816 if (edge
->count
.ipa ().initialized_p ())
1817 spec_rem
+= edge
->count
.ipa ();
1818 edge
->resolve_speculation ();
1819 reset_edge_caches (where
);
1820 ipa_update_overall_fn_summary (where
);
1821 update_caller_keys (edge_heap
, where
,
1822 updated_nodes
, NULL
);
1823 update_callee_keys (edge_heap
, where
,
1828 /* Return true if NODE should be accounted for overall size estimate.
1829 Skip all nodes optimized for size so we can measure the growth of hot
1830 part of program no matter of the padding. */
1833 inline_account_function_p (struct cgraph_node
*node
)
1835 return (!DECL_EXTERNAL (node
->decl
)
1836 && !opt_for_fn (node
->decl
, optimize_size
)
1837 && node
->frequency
!= NODE_FREQUENCY_UNLIKELY_EXECUTED
);
1840 /* Count number of callers of NODE and store it into DATA (that
1841 points to int. Worker for cgraph_for_node_and_aliases. */
1844 sum_callers (struct cgraph_node
*node
, void *data
)
1846 struct cgraph_edge
*e
;
1847 int *num_calls
= (int *)data
;
1849 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1854 /* We only propagate across edges with non-interposable callee. */
1857 ignore_edge_p (struct cgraph_edge
*e
)
1859 enum availability avail
;
1860 e
->callee
->function_or_virtual_thunk_symbol (&avail
, e
->caller
);
1861 return (avail
<= AVAIL_INTERPOSABLE
);
1864 /* We use greedy algorithm for inlining of small functions:
1865 All inline candidates are put into prioritized heap ordered in
1868 The inlining of small functions is bounded by unit growth parameters. */
1871 inline_small_functions (void)
1873 struct cgraph_node
*node
;
1874 struct cgraph_edge
*edge
;
1875 edge_heap_t
edge_heap (sreal::min ());
1876 auto_bitmap updated_nodes
;
1877 int min_size
, max_size
;
1878 auto_vec
<cgraph_edge
*> new_indirect_edges
;
1879 int initial_size
= 0;
1880 struct cgraph_node
**order
= XCNEWVEC (cgraph_node
*, symtab
->cgraph_count
);
1881 struct cgraph_edge_hook_list
*edge_removal_hook_holder
;
1882 new_indirect_edges
.create (8);
1884 edge_removal_hook_holder
1885 = symtab
->add_edge_removal_hook (&heap_edge_removal_hook
, &edge_heap
);
1887 /* Compute overall unit size and other global parameters used by badness
1890 max_count
= profile_count::uninitialized ();
1891 ipa_reduced_postorder (order
, true, ignore_edge_p
);
1894 FOR_EACH_DEFINED_FUNCTION (node
)
1895 if (!node
->inlined_to
)
1897 if (!node
->alias
&& node
->analyzed
1898 && (node
->has_gimple_body_p () || node
->thunk
.thunk_p
)
1899 && opt_for_fn (node
->decl
, optimize
))
1901 class ipa_fn_summary
*info
= ipa_fn_summaries
->get (node
);
1902 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->aux
;
1904 /* Do not account external functions, they will be optimized out
1905 if not inlined. Also only count the non-cold portion of program. */
1906 if (inline_account_function_p (node
))
1907 initial_size
+= ipa_size_summaries
->get (node
)->size
;
1908 info
->growth
= estimate_growth (node
);
1911 node
->call_for_symbol_and_aliases (sum_callers
, &num_calls
,
1914 info
->single_caller
= true;
1915 if (dfs
&& dfs
->next_cycle
)
1917 struct cgraph_node
*n2
;
1918 int id
= dfs
->scc_no
+ 1;
1920 n2
= ((struct ipa_dfs_info
*) n2
->aux
)->next_cycle
)
1921 if (opt_for_fn (n2
->decl
, optimize
))
1923 ipa_fn_summary
*info2
= ipa_fn_summaries
->get
1924 (n2
->inlined_to
? n2
->inlined_to
: n2
);
1932 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1933 max_count
= max_count
.max (edge
->count
.ipa ());
1935 ipa_free_postorder_info ();
1936 initialize_growth_caches ();
1940 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1943 overall_size
= initial_size
;
1944 max_size
= compute_max_insns (overall_size
);
1945 min_size
= overall_size
;
1947 /* Populate the heap with all edges we might inline. */
1949 FOR_EACH_DEFINED_FUNCTION (node
)
1951 bool update
= false;
1952 struct cgraph_edge
*next
= NULL
;
1953 bool has_speculative
= false;
1955 if (!opt_for_fn (node
->decl
, optimize
))
1959 fprintf (dump_file
, "Enqueueing calls in %s.\n", node
->dump_name ());
1961 for (edge
= node
->callees
; edge
; edge
= next
)
1963 next
= edge
->next_callee
;
1964 if (edge
->inline_failed
1966 && can_inline_edge_p (edge
, true)
1967 && want_inline_small_function_p (edge
, true)
1968 && can_inline_edge_by_limits_p (edge
, true)
1969 && edge
->inline_failed
)
1971 gcc_assert (!edge
->aux
);
1972 update_edge_key (&edge_heap
, edge
);
1974 if (edge
->speculative
)
1975 has_speculative
= true;
1977 if (has_speculative
)
1978 for (edge
= node
->callees
; edge
; edge
= next
)
1980 next
= edge
->next_callee
;
1981 if (edge
->speculative
1982 && !speculation_useful_p (edge
, edge
->aux
!= NULL
))
1984 edge
->resolve_speculation ();
1990 struct cgraph_node
*where
= node
->inlined_to
1991 ? node
->inlined_to
: node
;
1992 ipa_update_overall_fn_summary (where
);
1993 reset_edge_caches (where
);
1994 update_caller_keys (&edge_heap
, where
,
1995 updated_nodes
, NULL
);
1996 update_callee_keys (&edge_heap
, where
,
1998 bitmap_clear (updated_nodes
);
2002 gcc_assert (in_lto_p
2004 || (profile_info
&& flag_branch_probabilities
));
2006 while (!edge_heap
.empty ())
2008 int old_size
= overall_size
;
2009 struct cgraph_node
*where
, *callee
;
2010 sreal badness
= edge_heap
.min_key ();
2011 sreal current_badness
;
2014 edge
= edge_heap
.extract_min ();
2015 gcc_assert (edge
->aux
);
2017 if (!edge
->inline_failed
|| !edge
->callee
->analyzed
)
2020 /* Be sure that caches are maintained consistent.
2021 This check is affected by scaling roundoff errors when compiling for
2022 IPA this we skip it in that case. */
2023 if (flag_checking
&& !edge
->callee
->count
.ipa_p ()
2024 && (!max_count
.initialized_p () || !max_count
.nonzero_p ()))
2026 sreal cached_badness
= edge_badness (edge
, false);
2028 int old_size_est
= estimate_edge_size (edge
);
2029 sreal old_time_est
= estimate_edge_time (edge
);
2030 int old_hints_est
= estimate_edge_hints (edge
);
2032 if (edge_growth_cache
!= NULL
)
2033 edge_growth_cache
->remove (edge
);
2034 reset_node_cache (edge
->caller
->inlined_to
2035 ? edge
->caller
->inlined_to
2037 gcc_assert (old_size_est
== estimate_edge_size (edge
));
2038 gcc_assert (old_time_est
== estimate_edge_time (edge
));
2041 gcc_assert (old_hints_est == estimate_edge_hints (edge));
2043 fails with profile feedback because some hints depends on
2044 maybe_hot_edge_p predicate and because callee gets inlined to other
2045 calls, the edge may become cold.
2046 This ought to be fixed by computing relative probabilities
2047 for given invocation but that will be better done once whole
2048 code is converted to sreals. Disable for now and revert to "wrong"
2049 value so enable/disable checking paths agree. */
2050 edge_growth_cache
->get (edge
)->hints
= old_hints_est
+ 1;
2052 /* When updating the edge costs, we only decrease badness in the keys.
2053 Increases of badness are handled lazilly; when we see key with out
2054 of date value on it, we re-insert it now. */
2055 current_badness
= edge_badness (edge
, false);
2056 gcc_assert (cached_badness
== current_badness
);
2057 gcc_assert (current_badness
>= badness
);
2060 current_badness
= edge_badness (edge
, false);
2061 if (current_badness
!= badness
)
2063 if (edge_heap
.min () && current_badness
> edge_heap
.min_key ())
2065 edge
->aux
= edge_heap
.insert (current_badness
, edge
);
2069 badness
= current_badness
;
2072 if (!can_inline_edge_p (edge
, true)
2073 || !can_inline_edge_by_limits_p (edge
, true))
2075 resolve_noninline_speculation (&edge_heap
, edge
);
2079 callee
= edge
->callee
->ultimate_alias_target ();
2080 growth
= estimate_edge_growth (edge
);
2084 "\nConsidering %s with %i size\n",
2085 callee
->dump_name (),
2086 ipa_size_summaries
->get (callee
)->size
);
2088 " to be inlined into %s in %s:%i\n"
2089 " Estimated badness is %f, frequency %.2f.\n",
2090 edge
->caller
->dump_name (),
2092 && (LOCATION_LOCUS (gimple_location ((const gimple
*)
2094 > BUILTINS_LOCATION
)
2095 ? gimple_filename ((const gimple
*) edge
->call_stmt
)
2098 ? gimple_lineno ((const gimple
*) edge
->call_stmt
)
2100 badness
.to_double (),
2101 edge
->sreal_frequency ().to_double ());
2102 if (edge
->count
.ipa ().initialized_p ())
2104 fprintf (dump_file
, " Called ");
2105 edge
->count
.ipa ().dump (dump_file
);
2106 fprintf (dump_file
, " times\n");
2108 if (dump_flags
& TDF_DETAILS
)
2109 edge_badness (edge
, true);
2112 if (overall_size
+ growth
> max_size
2113 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
2115 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
2116 report_inline_failed_reason (edge
);
2117 resolve_noninline_speculation (&edge_heap
, edge
);
2121 if (!want_inline_small_function_p (edge
, true))
2123 resolve_noninline_speculation (&edge_heap
, edge
);
2127 /* Heuristics for inlining small functions work poorly for
2128 recursive calls where we do effects similar to loop unrolling.
2129 When inlining such edge seems profitable, leave decision on
2130 specific inliner. */
2131 if (edge
->recursive_p ())
2133 where
= edge
->caller
;
2134 if (where
->inlined_to
)
2135 where
= where
->inlined_to
;
2136 if (!recursive_inlining (edge
,
2137 opt_for_fn (edge
->caller
->decl
,
2138 flag_indirect_inlining
)
2139 ? &new_indirect_edges
: NULL
))
2141 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
2142 resolve_noninline_speculation (&edge_heap
, edge
);
2145 reset_edge_caches (where
);
2146 /* Recursive inliner inlines all recursive calls of the function
2147 at once. Consequently we need to update all callee keys. */
2148 if (opt_for_fn (edge
->caller
->decl
, flag_indirect_inlining
))
2149 add_new_edges_to_heap (&edge_heap
, new_indirect_edges
);
2150 update_callee_keys (&edge_heap
, where
, updated_nodes
);
2151 bitmap_clear (updated_nodes
);
2155 struct cgraph_node
*outer_node
= NULL
;
2158 /* Consider the case where self recursive function A is inlined
2159 into B. This is desired optimization in some cases, since it
2160 leads to effect similar of loop peeling and we might completely
2161 optimize out the recursive call. However we must be extra
2164 where
= edge
->caller
;
2165 while (where
->inlined_to
)
2167 if (where
->decl
== callee
->decl
)
2168 outer_node
= where
, depth
++;
2169 where
= where
->callers
->caller
;
2172 && !want_inline_self_recursive_call_p (edge
, outer_node
,
2176 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->decl
)
2177 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
2178 resolve_noninline_speculation (&edge_heap
, edge
);
2181 else if (depth
&& dump_file
)
2182 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
2184 gcc_checking_assert (!callee
->inlined_to
);
2186 int old_size
= ipa_size_summaries
->get (where
)->size
;
2187 sreal old_time
= ipa_fn_summaries
->get (where
)->time
;
2189 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
2190 reset_edge_caches (edge
->callee
);
2191 add_new_edges_to_heap (&edge_heap
, new_indirect_edges
);
2193 /* If caller's size and time increased we do not need to update
2194 all edges becuase badness is not going to decrease. */
2195 if (old_size
<= ipa_size_summaries
->get (where
)->size
2196 && old_time
<= ipa_fn_summaries
->get (where
)->time
2197 /* Wrapper penalty may be non-monotonous in this respect.
2198 Fortunately it only affects small functions. */
2199 && !wrapper_heuristics_may_apply (where
, old_size
))
2200 update_callee_keys (&edge_heap
, edge
->callee
, updated_nodes
);
2202 update_callee_keys (&edge_heap
, where
, updated_nodes
);
2204 where
= edge
->caller
;
2205 if (where
->inlined_to
)
2206 where
= where
->inlined_to
;
2208 /* Our profitability metric can depend on local properties
2209 such as number of inlinable calls and size of the function body.
2210 After inlining these properties might change for the function we
2211 inlined into (since it's body size changed) and for the functions
2212 called by function we inlined (since number of it inlinable callers
2214 update_caller_keys (&edge_heap
, where
, updated_nodes
, NULL
);
2215 /* Offline copy count has possibly changed, recompute if profile is
2217 struct cgraph_node
*n
= cgraph_node::get (edge
->callee
->decl
);
2218 if (n
!= edge
->callee
&& n
->analyzed
&& n
->count
.ipa ().initialized_p ())
2219 update_callee_keys (&edge_heap
, n
, updated_nodes
);
2220 bitmap_clear (updated_nodes
);
2222 if (dump_enabled_p ())
2224 ipa_fn_summary
*s
= ipa_fn_summaries
->get (where
);
2226 /* dump_printf can't handle %+i. */
2227 char buf_net_change
[100];
2228 snprintf (buf_net_change
, sizeof buf_net_change
, "%+i",
2229 overall_size
- old_size
);
2231 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, edge
->call_stmt
,
2232 " Inlined %C into %C which now has time %f and "
2233 "size %i, net change of %s.\n",
2234 edge
->callee
, edge
->caller
,
2235 s
->time
.to_double (),
2236 ipa_size_summaries
->get (edge
->caller
)->size
,
2239 if (min_size
> overall_size
)
2241 min_size
= overall_size
;
2242 max_size
= compute_max_insns (min_size
);
2245 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
2249 free_growth_caches ();
2250 if (dump_enabled_p ())
2251 dump_printf (MSG_NOTE
,
2252 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2253 initial_size
, overall_size
,
2254 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
2255 symtab
->remove_edge_removal_hook (edge_removal_hook_holder
);
2258 /* Flatten NODE. Performed both during early inlining and
2259 at IPA inlining time. */
2262 flatten_function (struct cgraph_node
*node
, bool early
, bool update
)
2264 struct cgraph_edge
*e
;
2266 /* We shouldn't be called recursively when we are being processed. */
2267 gcc_assert (node
->aux
== NULL
);
2269 node
->aux
= (void *) node
;
2271 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2273 struct cgraph_node
*orig_callee
;
2274 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2276 /* We've hit cycle? It is time to give up. */
2279 if (dump_enabled_p ())
2280 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2281 "Not inlining %C into %C to avoid cycle.\n",
2283 if (cgraph_inline_failed_type (e
->inline_failed
) != CIF_FINAL_ERROR
)
2284 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2288 /* When the edge is already inlined, we just need to recurse into
2289 it in order to fully flatten the leaves. */
2290 if (!e
->inline_failed
)
2292 flatten_function (callee
, early
, false);
2296 /* Flatten attribute needs to be processed during late inlining. For
2297 extra code quality we however do flattening during early optimization,
2300 ? !can_inline_edge_p (e
, true)
2301 && !can_inline_edge_by_limits_p (e
, true)
2302 : !can_early_inline_edge_p (e
))
2305 if (e
->recursive_p ())
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2309 "Not inlining: recursive call.\n");
2313 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
2314 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2318 "Not inlining: SSA form does not match.\n");
2322 /* Inline the edge and flatten the inline clone. Avoid
2323 recursing through the original node if the node was cloned. */
2324 if (dump_enabled_p ())
2325 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, e
->call_stmt
,
2326 " Inlining %C into %C.\n",
2328 orig_callee
= callee
;
2329 inline_call (e
, true, NULL
, NULL
, false);
2330 if (e
->callee
!= orig_callee
)
2331 orig_callee
->aux
= (void *) node
;
2332 flatten_function (e
->callee
, early
, false);
2333 if (e
->callee
!= orig_callee
)
2334 orig_callee
->aux
= NULL
;
2338 cgraph_node
*where
= node
->inlined_to
? node
->inlined_to
: node
;
2339 if (update
&& opt_for_fn (where
->decl
, optimize
))
2340 ipa_update_overall_fn_summary (where
);
2343 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2344 DATA points to number of calls originally found so we avoid infinite
2348 inline_to_all_callers_1 (struct cgraph_node
*node
, void *data
,
2349 hash_set
<cgraph_node
*> *callers
)
2351 int *num_calls
= (int *)data
;
2352 bool callee_removed
= false;
2354 while (node
->callers
&& !node
->inlined_to
)
2356 struct cgraph_node
*caller
= node
->callers
->caller
;
2358 if (!can_inline_edge_p (node
->callers
, true)
2359 || !can_inline_edge_by_limits_p (node
->callers
, true)
2360 || node
->callers
->recursive_p ())
2363 fprintf (dump_file
, "Uninlinable call found; giving up.\n");
2370 cgraph_node
*ultimate
= node
->ultimate_alias_target ();
2372 "\nInlining %s size %i.\n",
2374 ipa_size_summaries
->get (ultimate
)->size
);
2376 " Called once from %s %i insns.\n",
2377 node
->callers
->caller
->name (),
2378 ipa_size_summaries
->get (node
->callers
->caller
)->size
);
2381 /* Remember which callers we inlined to, delaying updating the
2383 callers
->add (node
->callers
->caller
);
2384 inline_call (node
->callers
, true, NULL
, NULL
, false, &callee_removed
);
2387 " Inlined into %s which now has %i size\n",
2389 ipa_size_summaries
->get (caller
)->size
);
2390 if (!(*num_calls
)--)
2393 fprintf (dump_file
, "New calls found; giving up.\n");
2394 return callee_removed
;
2402 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2406 inline_to_all_callers (struct cgraph_node
*node
, void *data
)
2408 hash_set
<cgraph_node
*> callers
;
2409 bool res
= inline_to_all_callers_1 (node
, data
, &callers
);
2410 /* Perform the delayed update of the overall summary of all callers
2411 processed. This avoids quadratic behavior in the cases where
2412 we have a lot of calls to the same function. */
2413 for (hash_set
<cgraph_node
*>::iterator i
= callers
.begin ();
2414 i
!= callers
.end (); ++i
)
2415 ipa_update_overall_fn_summary ((*i
)->inlined_to
? (*i
)->inlined_to
: *i
);
2419 /* Output overall time estimate. */
2421 dump_overall_stats (void)
2423 sreal sum_weighted
= 0, sum
= 0;
2424 struct cgraph_node
*node
;
2426 FOR_EACH_DEFINED_FUNCTION (node
)
2427 if (!node
->inlined_to
2430 ipa_fn_summary
*s
= ipa_fn_summaries
->get (node
);
2434 if (node
->count
.ipa ().initialized_p ())
2435 sum_weighted
+= s
->time
* node
->count
.ipa ().to_gcov_type ();
2438 fprintf (dump_file
, "Overall time estimate: "
2439 "%f weighted by profile: "
2440 "%f\n", sum
.to_double (), sum_weighted
.to_double ());
2443 /* Output some useful stats about inlining. */
2446 dump_inline_stats (void)
2448 int64_t inlined_cnt
= 0, inlined_indir_cnt
= 0;
2449 int64_t inlined_virt_cnt
= 0, inlined_virt_indir_cnt
= 0;
2450 int64_t noninlined_cnt
= 0, noninlined_indir_cnt
= 0;
2451 int64_t noninlined_virt_cnt
= 0, noninlined_virt_indir_cnt
= 0;
2452 int64_t inlined_speculative
= 0, inlined_speculative_ply
= 0;
2453 int64_t indirect_poly_cnt
= 0, indirect_cnt
= 0;
2454 int64_t reason
[CIF_N_REASONS
][2];
2455 sreal reason_freq
[CIF_N_REASONS
];
2457 struct cgraph_node
*node
;
2459 memset (reason
, 0, sizeof (reason
));
2460 for (i
=0; i
< CIF_N_REASONS
; i
++)
2462 FOR_EACH_DEFINED_FUNCTION (node
)
2464 struct cgraph_edge
*e
;
2465 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2467 if (e
->inline_failed
)
2469 if (e
->count
.ipa ().initialized_p ())
2470 reason
[(int) e
->inline_failed
][0] += e
->count
.ipa ().to_gcov_type ();
2471 reason_freq
[(int) e
->inline_failed
] += e
->sreal_frequency ();
2472 reason
[(int) e
->inline_failed
][1] ++;
2473 if (DECL_VIRTUAL_P (e
->callee
->decl
)
2474 && e
->count
.ipa ().initialized_p ())
2476 if (e
->indirect_inlining_edge
)
2477 noninlined_virt_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2479 noninlined_virt_cnt
+= e
->count
.ipa ().to_gcov_type ();
2481 else if (e
->count
.ipa ().initialized_p ())
2483 if (e
->indirect_inlining_edge
)
2484 noninlined_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2486 noninlined_cnt
+= e
->count
.ipa ().to_gcov_type ();
2489 else if (e
->count
.ipa ().initialized_p ())
2493 if (DECL_VIRTUAL_P (e
->callee
->decl
))
2494 inlined_speculative_ply
+= e
->count
.ipa ().to_gcov_type ();
2496 inlined_speculative
+= e
->count
.ipa ().to_gcov_type ();
2498 else if (DECL_VIRTUAL_P (e
->callee
->decl
))
2500 if (e
->indirect_inlining_edge
)
2501 inlined_virt_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2503 inlined_virt_cnt
+= e
->count
.ipa ().to_gcov_type ();
2507 if (e
->indirect_inlining_edge
)
2508 inlined_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2510 inlined_cnt
+= e
->count
.ipa ().to_gcov_type ();
2514 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2515 if (e
->indirect_info
->polymorphic
2516 & e
->count
.ipa ().initialized_p ())
2517 indirect_poly_cnt
+= e
->count
.ipa ().to_gcov_type ();
2518 else if (e
->count
.ipa ().initialized_p ())
2519 indirect_cnt
+= e
->count
.ipa ().to_gcov_type ();
2521 if (max_count
.initialized_p ())
2524 "Inlined %" PRId64
" + speculative "
2525 "%" PRId64
" + speculative polymorphic "
2526 "%" PRId64
" + previously indirect "
2527 "%" PRId64
" + virtual "
2528 "%" PRId64
" + virtual and previously indirect "
2529 "%" PRId64
"\n" "Not inlined "
2530 "%" PRId64
" + previously indirect "
2531 "%" PRId64
" + virtual "
2532 "%" PRId64
" + virtual and previously indirect "
2533 "%" PRId64
" + stil indirect "
2534 "%" PRId64
" + still indirect polymorphic "
2535 "%" PRId64
"\n", inlined_cnt
,
2536 inlined_speculative
, inlined_speculative_ply
,
2537 inlined_indir_cnt
, inlined_virt_cnt
, inlined_virt_indir_cnt
,
2538 noninlined_cnt
, noninlined_indir_cnt
, noninlined_virt_cnt
,
2539 noninlined_virt_indir_cnt
, indirect_cnt
, indirect_poly_cnt
);
2540 fprintf (dump_file
, "Removed speculations ");
2541 spec_rem
.dump (dump_file
);
2542 fprintf (dump_file
, "\n");
2544 dump_overall_stats ();
2545 fprintf (dump_file
, "\nWhy inlining failed?\n");
2546 for (i
= 0; i
< CIF_N_REASONS
; i
++)
2548 fprintf (dump_file
, "%-50s: %8i calls, %8f freq, %" PRId64
" count\n",
2549 cgraph_inline_failed_string ((cgraph_inline_failed_t
) i
),
2550 (int) reason
[i
][1], reason_freq
[i
].to_double (), reason
[i
][0]);
2553 /* Called when node is removed. */
2556 flatten_remove_node_hook (struct cgraph_node
*node
, void *data
)
2558 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node
->decl
)) == NULL
)
2561 hash_set
<struct cgraph_node
*> *removed
2562 = (hash_set
<struct cgraph_node
*> *) data
;
2563 removed
->add (node
);
2566 /* Decide on the inlining. We do so in the topological order to avoid
2567 expenses on updating data structures. */
2572 struct cgraph_node
*node
;
2574 struct cgraph_node
**order
;
2577 bool remove_functions
= false;
2579 order
= XCNEWVEC (struct cgraph_node
*, symtab
->cgraph_count
);
2582 ipa_dump_fn_summaries (dump_file
);
2584 nnodes
= ipa_reverse_postorder (order
);
2585 spec_rem
= profile_count::zero ();
2587 FOR_EACH_FUNCTION (node
)
2591 /* Recompute the default reasons for inlining because they may have
2592 changed during merging. */
2595 for (cgraph_edge
*e
= node
->callees
; e
; e
= e
->next_callee
)
2597 gcc_assert (e
->inline_failed
);
2598 initialize_inline_failed (e
);
2600 for (cgraph_edge
*e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2601 initialize_inline_failed (e
);
2606 fprintf (dump_file
, "\nFlattening functions:\n");
2608 /* First shrink order array, so that it only contains nodes with
2609 flatten attribute. */
2610 for (i
= nnodes
- 1, j
= i
; i
>= 0; i
--)
2613 if (node
->definition
2614 && lookup_attribute ("flatten",
2615 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2616 order
[j
--] = order
[i
];
2619 /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
2620 nodes with flatten attribute. If there is more than one such
2621 node, we need to register a node removal hook, as flatten_function
2622 could remove other nodes with flatten attribute. See PR82801. */
2623 struct cgraph_node_hook_list
*node_removal_hook_holder
= NULL
;
2624 hash_set
<struct cgraph_node
*> *flatten_removed_nodes
= NULL
;
2627 flatten_removed_nodes
= new hash_set
<struct cgraph_node
*>;
2628 node_removal_hook_holder
2629 = symtab
->add_cgraph_removal_hook (&flatten_remove_node_hook
,
2630 flatten_removed_nodes
);
2633 /* In the first pass handle functions to be flattened. Do this with
2634 a priority so none of our later choices will make this impossible. */
2635 for (i
= nnodes
- 1; i
> j
; i
--)
2638 if (flatten_removed_nodes
2639 && flatten_removed_nodes
->contains (node
))
2642 /* Handle nodes to be flattened.
2643 Ideally when processing callees we stop inlining at the
2644 entry of cycles, possibly cloning that entry point and
2645 try to flatten itself turning it into a self-recursive
2648 fprintf (dump_file
, "Flattening %s\n", node
->name ());
2649 flatten_function (node
, false, true);
2654 symtab
->remove_cgraph_removal_hook (node_removal_hook_holder
);
2655 delete flatten_removed_nodes
;
2660 dump_overall_stats ();
2662 inline_small_functions ();
2664 gcc_assert (symtab
->state
== IPA_SSA
);
2665 symtab
->state
= IPA_SSA_AFTER_INLINING
;
2666 /* Do first after-inlining removal. We want to remove all "stale" extern
2667 inline functions and virtual functions so we really know what is called
2669 symtab
->remove_unreachable_nodes (dump_file
);
2671 /* Inline functions with a property that after inlining into all callers the
2672 code size will shrink because the out-of-line copy is eliminated.
2673 We do this regardless on the callee size as long as function growth limits
2677 "\nDeciding on functions to be inlined into all callers and "
2678 "removing useless speculations:\n");
2680 /* Inlining one function called once has good chance of preventing
2681 inlining other function into the same callee. Ideally we should
2682 work in priority order, but probably inlining hot functions first
2683 is good cut without the extra pain of maintaining the queue.
2685 ??? this is not really fitting the bill perfectly: inlining function
2686 into callee often leads to better optimization of callee due to
2687 increased context for optimization.
2688 For example if main() function calls a function that outputs help
2689 and then function that does the main optmization, we should inline
2690 the second with priority even if both calls are cold by themselves.
2692 We probably want to implement new predicate replacing our use of
2693 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2695 for (cold
= 0; cold
<= 1; cold
++)
2697 FOR_EACH_DEFINED_FUNCTION (node
)
2699 struct cgraph_edge
*edge
, *next
;
2702 if (!opt_for_fn (node
->decl
, optimize
)
2703 || !opt_for_fn (node
->decl
, flag_inline_functions_called_once
))
2706 for (edge
= node
->callees
; edge
; edge
= next
)
2708 next
= edge
->next_callee
;
2709 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
2711 if (edge
->count
.ipa ().initialized_p ())
2712 spec_rem
+= edge
->count
.ipa ();
2713 edge
->resolve_speculation ();
2715 remove_functions
= true;
2720 struct cgraph_node
*where
= node
->inlined_to
2721 ? node
->inlined_to
: node
;
2722 reset_edge_caches (where
);
2723 ipa_update_overall_fn_summary (where
);
2725 if (want_inline_function_to_all_callers_p (node
, cold
))
2728 node
->call_for_symbol_and_aliases (sum_callers
, &num_calls
,
2730 while (node
->call_for_symbol_and_aliases
2731 (inline_to_all_callers
, &num_calls
, true))
2733 remove_functions
= true;
2738 /* Free ipa-prop structures if they are no longer needed. */
2739 ipa_free_all_structures_after_iinln ();
2741 if (dump_enabled_p ())
2742 dump_printf (MSG_NOTE
,
2743 "\nInlined %i calls, eliminated %i functions\n\n",
2744 ncalls_inlined
, nfunctions_inlined
);
2746 dump_inline_stats ();
2749 ipa_dump_fn_summaries (dump_file
);
2750 return remove_functions
? TODO_remove_functions
: 0;
2753 /* Inline always-inline function calls in NODE. */
2756 inline_always_inline_functions (struct cgraph_node
*node
)
2758 struct cgraph_edge
*e
;
2759 bool inlined
= false;
2761 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2763 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2764 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
2767 if (e
->recursive_p ())
2769 if (dump_enabled_p ())
2770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2771 " Not inlining recursive call to %C.\n",
2773 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2777 if (!can_early_inline_edge_p (e
))
2779 /* Set inlined to true if the callee is marked "always_inline" but
2780 is not inlinable. This will allow flagging an error later in
2781 expand_call_inline in tree-inline.c. */
2782 if (lookup_attribute ("always_inline",
2783 DECL_ATTRIBUTES (callee
->decl
)) != NULL
)
2788 if (dump_enabled_p ())
2789 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, e
->call_stmt
,
2790 " Inlining %C into %C (always_inline).\n",
2791 e
->callee
, e
->caller
);
2792 inline_call (e
, true, NULL
, NULL
, false);
2796 ipa_update_overall_fn_summary (node
);
2801 /* Decide on the inlining. We do so in the topological order to avoid
2802 expenses on updating data structures. */
2805 early_inline_small_functions (struct cgraph_node
*node
)
2807 struct cgraph_edge
*e
;
2808 bool inlined
= false;
2810 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2812 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2814 /* We can enounter not-yet-analyzed function during
2815 early inlining on callgraphs with strongly
2816 connected components. */
2817 ipa_fn_summary
*s
= ipa_fn_summaries
->get (callee
);
2818 if (s
== NULL
|| !s
->inlinable
|| !e
->inline_failed
)
2821 /* Do not consider functions not declared inline. */
2822 if (!DECL_DECLARED_INLINE_P (callee
->decl
)
2823 && !opt_for_fn (node
->decl
, flag_inline_small_functions
)
2824 && !opt_for_fn (node
->decl
, flag_inline_functions
))
2827 if (dump_enabled_p ())
2828 dump_printf_loc (MSG_NOTE
, e
->call_stmt
,
2829 "Considering inline candidate %C.\n",
2832 if (!can_early_inline_edge_p (e
))
2835 if (e
->recursive_p ())
2837 if (dump_enabled_p ())
2838 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2839 " Not inlining: recursive call.\n");
2843 if (!want_early_inline_function_p (e
))
2846 if (dump_enabled_p ())
2847 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, e
->call_stmt
,
2848 " Inlining %C into %C.\n",
2850 inline_call (e
, true, NULL
, NULL
, false);
2855 ipa_update_overall_fn_summary (node
);
2861 early_inliner (function
*fun
)
2863 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
2864 struct cgraph_edge
*edge
;
2865 unsigned int todo
= 0;
2867 bool inlined
= false;
2872 /* Do nothing if datastructures for ipa-inliner are already computed. This
2873 happens when some pass decides to construct new function and
2874 cgraph_add_new_function calls lowering passes and early optimization on
2875 it. This may confuse ourself when early inliner decide to inline call to
2876 function clone, because function clones don't have parameter list in
2877 ipa-prop matching their signature. */
2878 if (ipa_node_params_sum
)
2883 node
->remove_all_references ();
2885 /* Even when not optimizing or not inlining inline always-inline
2887 inlined
= inline_always_inline_functions (node
);
2891 || !flag_early_inlining
2892 /* Never inline regular functions into always-inline functions
2893 during incremental inlining. This sucks as functions calling
2894 always inline functions will get less optimized, but at the
2895 same time inlining of functions calling always inline
2896 function into an always inline function might introduce
2897 cycles of edges to be always inlined in the callgraph.
2899 We might want to be smarter and just avoid this type of inlining. */
2900 || (DECL_DISREGARD_INLINE_LIMITS (node
->decl
)
2901 && lookup_attribute ("always_inline",
2902 DECL_ATTRIBUTES (node
->decl
))))
2904 else if (lookup_attribute ("flatten",
2905 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2907 /* When the function is marked to be flattened, recursively inline
2909 if (dump_enabled_p ())
2910 dump_printf (MSG_OPTIMIZED_LOCATIONS
,
2911 "Flattening %C\n", node
);
2912 flatten_function (node
, true, true);
2917 /* If some always_inline functions was inlined, apply the changes.
2918 This way we will not account always inline into growth limits and
2919 moreover we will inline calls from always inlines that we skipped
2920 previously because of conditional above. */
2923 timevar_push (TV_INTEGRATION
);
2924 todo
|= optimize_inline_calls (current_function_decl
);
2925 /* optimize_inline_calls call above might have introduced new
2926 statements that don't have inline parameters computed. */
2927 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2929 /* We can enounter not-yet-analyzed function during
2930 early inlining on callgraphs with strongly
2931 connected components. */
2932 ipa_call_summary
*es
= ipa_call_summaries
->get_create (edge
);
2934 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2936 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2938 ipa_update_overall_fn_summary (node
);
2940 timevar_pop (TV_INTEGRATION
);
2942 /* We iterate incremental inlining to get trivial cases of indirect
2944 while (iterations
< param_early_inliner_max_iterations
2945 && early_inline_small_functions (node
))
2947 timevar_push (TV_INTEGRATION
);
2948 todo
|= optimize_inline_calls (current_function_decl
);
2950 /* Technically we ought to recompute inline parameters so the new
2951 iteration of early inliner works as expected. We however have
2952 values approximately right and thus we only need to update edge
2953 info that might be cleared out for newly discovered edges. */
2954 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2956 /* We have no summary for new bound store calls yet. */
2957 ipa_call_summary
*es
= ipa_call_summaries
->get_create (edge
);
2959 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2961 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2963 if (iterations
< param_early_inliner_max_iterations
- 1)
2964 ipa_update_overall_fn_summary (node
);
2965 timevar_pop (TV_INTEGRATION
);
2970 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2975 timevar_push (TV_INTEGRATION
);
2976 todo
|= optimize_inline_calls (current_function_decl
);
2977 timevar_pop (TV_INTEGRATION
);
2980 fun
->always_inline_functions_inlined
= true;
2985 /* Do inlining of small functions. Doing so early helps profiling and other
2986 passes to be somewhat more effective and avoids some code duplication in
2987 later real inlining pass for testcases with very many function calls. */
2991 const pass_data pass_data_early_inline
=
2993 GIMPLE_PASS
, /* type */
2994 "einline", /* name */
2995 OPTGROUP_INLINE
, /* optinfo_flags */
2996 TV_EARLY_INLINING
, /* tv_id */
2997 PROP_ssa
, /* properties_required */
2998 0, /* properties_provided */
2999 0, /* properties_destroyed */
3000 0, /* todo_flags_start */
3001 0, /* todo_flags_finish */
3004 class pass_early_inline
: public gimple_opt_pass
3007 pass_early_inline (gcc::context
*ctxt
)
3008 : gimple_opt_pass (pass_data_early_inline
, ctxt
)
3011 /* opt_pass methods: */
3012 virtual unsigned int execute (function
*);
3014 }; // class pass_early_inline
3017 pass_early_inline::execute (function
*fun
)
3019 return early_inliner (fun
);
3025 make_pass_early_inline (gcc::context
*ctxt
)
3027 return new pass_early_inline (ctxt
);
3032 const pass_data pass_data_ipa_inline
=
3034 IPA_PASS
, /* type */
3035 "inline", /* name */
3036 OPTGROUP_INLINE
, /* optinfo_flags */
3037 TV_IPA_INLINING
, /* tv_id */
3038 0, /* properties_required */
3039 0, /* properties_provided */
3040 0, /* properties_destroyed */
3041 0, /* todo_flags_start */
3042 ( TODO_dump_symtab
), /* todo_flags_finish */
3045 class pass_ipa_inline
: public ipa_opt_pass_d
3048 pass_ipa_inline (gcc::context
*ctxt
)
3049 : ipa_opt_pass_d (pass_data_ipa_inline
, ctxt
,
3050 NULL
, /* generate_summary */
3051 NULL
, /* write_summary */
3052 NULL
, /* read_summary */
3053 NULL
, /* write_optimization_summary */
3054 NULL
, /* read_optimization_summary */
3055 NULL
, /* stmt_fixup */
3056 0, /* function_transform_todo_flags_start */
3057 inline_transform
, /* function_transform */
3058 NULL
) /* variable_transform */
3061 /* opt_pass methods: */
3062 virtual unsigned int execute (function
*) { return ipa_inline (); }
3064 }; // class pass_ipa_inline
3069 make_pass_ipa_inline (gcc::context
*ctxt
)
3071 return new pass_ipa_inline (ctxt
);