]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline.c
Come up with cgraph_node::get_uid and make cgraph_node::uid private.
[thirdparty/gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 The implementation of inliner is organized as follows:
24
25 inlining heuristics limits
26
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
30
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
35
36 inlining heuristics
37
38 The inliner itself is split into two passes:
39
40 pass_early_inlining
41
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
45
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
52
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
56
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
61
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
67
68 pass_ipa_inline
69
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
72
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
76
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
81
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
87
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
91
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-fnsummary.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
116 #include "sreal.h"
117 #include "auto-profile.h"
118 #include "builtins.h"
119 #include "fibonacci_heap.h"
120 #include "stringpool.h"
121 #include "attribs.h"
122 #include "asan.h"
123
124 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
125 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
126
127 /* Statistics we collect about inlining algorithm. */
128 static int overall_size;
129 static profile_count max_count;
130 static profile_count spec_rem;
131
132 /* Return false when inlining edge E would lead to violating
133 limits on function unit growth or stack usage growth.
134
135 The relative function body growth limit is present generally
136 to avoid problems with non-linear behavior of the compiler.
137 To allow inlining huge functions into tiny wrapper, the limit
138 is always based on the bigger of the two functions considered.
139
140 For stack growth limits we always base the growth in stack usage
141 of the callers. We want to prevent applications from segfaulting
142 on stack overflow when functions with huge stack frames gets
143 inlined. */
144
145 static bool
146 caller_growth_limits (struct cgraph_edge *e)
147 {
148 struct cgraph_node *to = e->caller;
149 struct cgraph_node *what = e->callee->ultimate_alias_target ();
150 int newsize;
151 int limit = 0;
152 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
153 ipa_fn_summary *info, *what_info;
154 ipa_fn_summary *outer_info = ipa_fn_summaries->get_create (to);
155
156 /* Look for function e->caller is inlined to. While doing
157 so work out the largest function body on the way. As
158 described above, we want to base our function growth
159 limits based on that. Not on the self size of the
160 outer function, not on the self size of inline code
161 we immediately inline to. This is the most relaxed
162 interpretation of the rule "do not grow large functions
163 too much in order to prevent compiler from exploding". */
164 while (true)
165 {
166 info = ipa_fn_summaries->get_create (to);
167 if (limit < info->self_size)
168 limit = info->self_size;
169 if (stack_size_limit < info->estimated_self_stack_size)
170 stack_size_limit = info->estimated_self_stack_size;
171 if (to->global.inlined_to)
172 to = to->callers->caller;
173 else
174 break;
175 }
176
177 what_info = ipa_fn_summaries->get_create (what);
178
179 if (limit < what_info->self_size)
180 limit = what_info->self_size;
181
182 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
183
184 /* Check the size after inlining against the function limits. But allow
185 the function to shrink if it went over the limits by forced inlining. */
186 newsize = estimate_size_after_inlining (to, e);
187 if (newsize >= info->size
188 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
189 && newsize > limit)
190 {
191 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
192 return false;
193 }
194
195 if (!what_info->estimated_stack_size)
196 return true;
197
198 /* FIXME: Stack size limit often prevents inlining in Fortran programs
199 due to large i/o datastructures used by the Fortran front-end.
200 We ought to ignore this limit when we know that the edge is executed
201 on every invocation of the caller (i.e. its call statement dominates
202 exit block). We do not track this information, yet. */
203 stack_size_limit += ((gcov_type)stack_size_limit
204 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
205
206 inlined_stack = (outer_info->stack_frame_offset
207 + outer_info->estimated_self_stack_size
208 + what_info->estimated_stack_size);
209 /* Check new stack consumption with stack consumption at the place
210 stack is used. */
211 if (inlined_stack > stack_size_limit
212 /* If function already has large stack usage from sibling
213 inline call, we can inline, too.
214 This bit overoptimistically assume that we are good at stack
215 packing. */
216 && inlined_stack > info->estimated_stack_size
217 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
218 {
219 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
220 return false;
221 }
222 return true;
223 }
224
225 /* Dump info about why inlining has failed. */
226
227 static void
228 report_inline_failed_reason (struct cgraph_edge *e)
229 {
230 if (dump_file)
231 {
232 fprintf (dump_file, " not inlinable: %s -> %s, %s\n",
233 e->caller->dump_name (),
234 e->callee->dump_name (),
235 cgraph_inline_failed_string (e->inline_failed));
236 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
237 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
238 && e->caller->lto_file_data
239 && e->callee->ultimate_alias_target ()->lto_file_data)
240 {
241 fprintf (dump_file, " LTO objects: %s, %s\n",
242 e->caller->lto_file_data->file_name,
243 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
244 }
245 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
246 cl_target_option_print_diff
247 (dump_file, 2, target_opts_for_fn (e->caller->decl),
248 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
249 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
250 cl_optimization_print_diff
251 (dump_file, 2, opts_for_fn (e->caller->decl),
252 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
253 }
254 }
255
256 /* Decide whether sanitizer-related attributes allow inlining. */
257
258 static bool
259 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
260 {
261 if (!caller || !callee)
262 return true;
263
264 return ((sanitize_flags_p (SANITIZE_ADDRESS, caller)
265 == sanitize_flags_p (SANITIZE_ADDRESS, callee))
266 && (sanitize_flags_p (SANITIZE_POINTER_COMPARE, caller)
267 == sanitize_flags_p (SANITIZE_POINTER_COMPARE, callee))
268 && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, caller)
269 == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, callee)));
270 }
271
272 /* Used for flags where it is safe to inline when caller's value is
273 grater than callee's. */
274 #define check_maybe_up(flag) \
275 (opts_for_fn (caller->decl)->x_##flag \
276 != opts_for_fn (callee->decl)->x_##flag \
277 && (!always_inline \
278 || opts_for_fn (caller->decl)->x_##flag \
279 < opts_for_fn (callee->decl)->x_##flag))
280 /* Used for flags where it is safe to inline when caller's value is
281 smaller than callee's. */
282 #define check_maybe_down(flag) \
283 (opts_for_fn (caller->decl)->x_##flag \
284 != opts_for_fn (callee->decl)->x_##flag \
285 && (!always_inline \
286 || opts_for_fn (caller->decl)->x_##flag \
287 > opts_for_fn (callee->decl)->x_##flag))
288 /* Used for flags where exact match is needed for correctness. */
289 #define check_match(flag) \
290 (opts_for_fn (caller->decl)->x_##flag \
291 != opts_for_fn (callee->decl)->x_##flag)
292
293 /* Decide if we can inline the edge and possibly update
294 inline_failed reason.
295 We check whether inlining is possible at all and whether
296 caller growth limits allow doing so.
297
298 if REPORT is true, output reason to the dump file. */
299
300 static bool
301 can_inline_edge_p (struct cgraph_edge *e, bool report,
302 bool early = false)
303 {
304 gcc_checking_assert (e->inline_failed);
305
306 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
307 {
308 if (report)
309 report_inline_failed_reason (e);
310 return false;
311 }
312
313 bool inlinable = true;
314 enum availability avail;
315 cgraph_node *caller = e->caller->global.inlined_to
316 ? e->caller->global.inlined_to : e->caller;
317 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
318
319 if (!callee->definition)
320 {
321 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
322 inlinable = false;
323 }
324 if (!early && (!opt_for_fn (callee->decl, optimize)
325 || !opt_for_fn (caller->decl, optimize)))
326 {
327 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
328 inlinable = false;
329 }
330 else if (callee->calls_comdat_local)
331 {
332 e->inline_failed = CIF_USES_COMDAT_LOCAL;
333 inlinable = false;
334 }
335 else if (avail <= AVAIL_INTERPOSABLE)
336 {
337 e->inline_failed = CIF_OVERWRITABLE;
338 inlinable = false;
339 }
340 /* All edges with call_stmt_cannot_inline_p should have inline_failed
341 initialized to one of FINAL_ERROR reasons. */
342 else if (e->call_stmt_cannot_inline_p)
343 gcc_unreachable ();
344 /* Don't inline if the functions have different EH personalities. */
345 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
346 && DECL_FUNCTION_PERSONALITY (callee->decl)
347 && (DECL_FUNCTION_PERSONALITY (caller->decl)
348 != DECL_FUNCTION_PERSONALITY (callee->decl)))
349 {
350 e->inline_failed = CIF_EH_PERSONALITY;
351 inlinable = false;
352 }
353 /* TM pure functions should not be inlined into non-TM_pure
354 functions. */
355 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
356 {
357 e->inline_failed = CIF_UNSPECIFIED;
358 inlinable = false;
359 }
360 /* Check compatibility of target optimization options. */
361 else if (!targetm.target_option.can_inline_p (caller->decl,
362 callee->decl))
363 {
364 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
365 inlinable = false;
366 }
367 else if (!ipa_fn_summaries->get_create (callee)->inlinable)
368 {
369 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
370 inlinable = false;
371 }
372 /* Don't inline a function with mismatched sanitization attributes. */
373 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
374 {
375 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
376 inlinable = false;
377 }
378 if (!inlinable && report)
379 report_inline_failed_reason (e);
380 return inlinable;
381 }
382
383 /* Decide if we can inline the edge and possibly update
384 inline_failed reason.
385 We check whether inlining is possible at all and whether
386 caller growth limits allow doing so.
387
388 if REPORT is true, output reason to the dump file.
389
390 if DISREGARD_LIMITS is true, ignore size limits. */
391
392 static bool
393 can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
394 bool disregard_limits = false, bool early = false)
395 {
396 gcc_checking_assert (e->inline_failed);
397
398 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
399 {
400 if (report)
401 report_inline_failed_reason (e);
402 return false;
403 }
404
405 bool inlinable = true;
406 enum availability avail;
407 cgraph_node *caller = e->caller->global.inlined_to
408 ? e->caller->global.inlined_to : e->caller;
409 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
410 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
411 tree callee_tree
412 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
413 /* Check if caller growth allows the inlining. */
414 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
415 && !disregard_limits
416 && !lookup_attribute ("flatten",
417 DECL_ATTRIBUTES (caller->decl))
418 && !caller_growth_limits (e))
419 inlinable = false;
420 /* Don't inline a function with a higher optimization level than the
421 caller. FIXME: this is really just tip of iceberg of handling
422 optimization attribute. */
423 else if (caller_tree != callee_tree)
424 {
425 bool always_inline =
426 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
427 && lookup_attribute ("always_inline",
428 DECL_ATTRIBUTES (callee->decl)));
429 ipa_fn_summary *caller_info = ipa_fn_summaries->get_create (caller);
430 ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (callee);
431
432 /* Until GCC 4.9 we did not check the semantics alterning flags
433 bellow and inline across optimization boundry.
434 Enabling checks bellow breaks several packages by refusing
435 to inline library always_inline functions. See PR65873.
436 Disable the check for early inlining for now until better solution
437 is found. */
438 if (always_inline && early)
439 ;
440 /* There are some options that change IL semantics which means
441 we cannot inline in these cases for correctness reason.
442 Not even for always_inline declared functions. */
443 else if (check_match (flag_wrapv)
444 || check_match (flag_trapv)
445 || check_match (flag_pcc_struct_return)
446 /* When caller or callee does FP math, be sure FP codegen flags
447 compatible. */
448 || ((caller_info->fp_expressions && callee_info->fp_expressions)
449 && (check_maybe_up (flag_rounding_math)
450 || check_maybe_up (flag_trapping_math)
451 || check_maybe_down (flag_unsafe_math_optimizations)
452 || check_maybe_down (flag_finite_math_only)
453 || check_maybe_up (flag_signaling_nans)
454 || check_maybe_down (flag_cx_limited_range)
455 || check_maybe_up (flag_signed_zeros)
456 || check_maybe_down (flag_associative_math)
457 || check_maybe_down (flag_reciprocal_math)
458 || check_maybe_down (flag_fp_int_builtin_inexact)
459 /* Strictly speaking only when the callee contains function
460 calls that may end up setting errno. */
461 || check_maybe_up (flag_errno_math)))
462 /* We do not want to make code compiled with exceptions to be
463 brought into a non-EH function unless we know that the callee
464 does not throw.
465 This is tracked by DECL_FUNCTION_PERSONALITY. */
466 || (check_maybe_up (flag_non_call_exceptions)
467 && DECL_FUNCTION_PERSONALITY (callee->decl))
468 || (check_maybe_up (flag_exceptions)
469 && DECL_FUNCTION_PERSONALITY (callee->decl))
470 /* When devirtualization is diabled for callee, it is not safe
471 to inline it as we possibly mangled the type info.
472 Allow early inlining of always inlines. */
473 || (!early && check_maybe_down (flag_devirtualize)))
474 {
475 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
476 inlinable = false;
477 }
478 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
479 else if (always_inline)
480 ;
481 /* When user added an attribute to the callee honor it. */
482 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
483 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
484 {
485 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
486 inlinable = false;
487 }
488 /* If explicit optimize attribute are not used, the mismatch is caused
489 by different command line options used to build different units.
490 Do not care about COMDAT functions - those are intended to be
491 optimized with the optimization flags of module they are used in.
492 Also do not care about mixing up size/speed optimization when
493 DECL_DISREGARD_INLINE_LIMITS is set. */
494 else if ((callee->merged_comdat
495 && !lookup_attribute ("optimize",
496 DECL_ATTRIBUTES (caller->decl)))
497 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
498 ;
499 /* If mismatch is caused by merging two LTO units with different
500 optimizationflags we want to be bit nicer. However never inline
501 if one of functions is not optimized at all. */
502 else if (!opt_for_fn (callee->decl, optimize)
503 || !opt_for_fn (caller->decl, optimize))
504 {
505 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
506 inlinable = false;
507 }
508 /* If callee is optimized for size and caller is not, allow inlining if
509 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
510 is inline (and thus likely an unified comdat). This will allow caller
511 to run faster. */
512 else if (opt_for_fn (callee->decl, optimize_size)
513 > opt_for_fn (caller->decl, optimize_size))
514 {
515 int growth = estimate_edge_growth (e);
516 if (growth > 0
517 && (!DECL_DECLARED_INLINE_P (callee->decl)
518 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
519 MAX_INLINE_INSNS_AUTO)))
520 {
521 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
522 inlinable = false;
523 }
524 }
525 /* If callee is more aggressively optimized for performance than caller,
526 we generally want to inline only cheap (runtime wise) functions. */
527 else if (opt_for_fn (callee->decl, optimize_size)
528 < opt_for_fn (caller->decl, optimize_size)
529 || (opt_for_fn (callee->decl, optimize)
530 > opt_for_fn (caller->decl, optimize)))
531 {
532 if (estimate_edge_time (e)
533 >= 20 + ipa_call_summaries->get_create (e)->call_stmt_time)
534 {
535 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
536 inlinable = false;
537 }
538 }
539
540 }
541
542 if (!inlinable && report)
543 report_inline_failed_reason (e);
544 return inlinable;
545 }
546
547
548 /* Return true if the edge E is inlinable during early inlining. */
549
550 static bool
551 can_early_inline_edge_p (struct cgraph_edge *e)
552 {
553 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
554 /* Early inliner might get called at WPA stage when IPA pass adds new
555 function. In this case we can not really do any of early inlining
556 because function bodies are missing. */
557 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
558 return false;
559 if (!gimple_has_body_p (callee->decl))
560 {
561 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
562 return false;
563 }
564 /* In early inliner some of callees may not be in SSA form yet
565 (i.e. the callgraph is cyclic and we did not process
566 the callee by early inliner, yet). We don't have CIF code for this
567 case; later we will re-do the decision in the real inliner. */
568 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
569 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
570 {
571 if (dump_file)
572 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
573 return false;
574 }
575 if (!can_inline_edge_p (e, true, true)
576 || !can_inline_edge_by_limits_p (e, true, false, true))
577 return false;
578 return true;
579 }
580
581
582 /* Return number of calls in N. Ignore cheap builtins. */
583
584 static int
585 num_calls (struct cgraph_node *n)
586 {
587 struct cgraph_edge *e;
588 int num = 0;
589
590 for (e = n->callees; e; e = e->next_callee)
591 if (!is_inexpensive_builtin (e->callee->decl))
592 num++;
593 return num;
594 }
595
596
597 /* Return true if we are interested in inlining small function. */
598
599 static bool
600 want_early_inline_function_p (struct cgraph_edge *e)
601 {
602 bool want_inline = true;
603 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
604
605 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
606 ;
607 /* For AutoFDO, we need to make sure that before profile summary, all
608 hot paths' IR look exactly the same as profiled binary. As a result,
609 in einliner, we will disregard size limit and inline those callsites
610 that are:
611 * inlined in the profiled binary, and
612 * the cloned callee has enough samples to be considered "hot". */
613 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
614 ;
615 else if (!DECL_DECLARED_INLINE_P (callee->decl)
616 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
617 {
618 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
619 report_inline_failed_reason (e);
620 want_inline = false;
621 }
622 else
623 {
624 int growth = estimate_edge_growth (e);
625 int n;
626
627 if (growth <= 0)
628 ;
629 else if (!e->maybe_hot_p ()
630 && growth > 0)
631 {
632 if (dump_file)
633 fprintf (dump_file, " will not early inline: %s->%s, "
634 "call is cold and code would grow by %i\n",
635 e->caller->dump_name (),
636 callee->dump_name (),
637 growth);
638 want_inline = false;
639 }
640 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
641 {
642 if (dump_file)
643 fprintf (dump_file, " will not early inline: %s->%s, "
644 "growth %i exceeds --param early-inlining-insns\n",
645 e->caller->dump_name (),
646 callee->dump_name (),
647 growth);
648 want_inline = false;
649 }
650 else if ((n = num_calls (callee)) != 0
651 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
652 {
653 if (dump_file)
654 fprintf (dump_file, " will not early inline: %s->%s, "
655 "growth %i exceeds --param early-inlining-insns "
656 "divided by number of calls\n",
657 e->caller->dump_name (),
658 callee->dump_name (),
659 growth);
660 want_inline = false;
661 }
662 }
663 return want_inline;
664 }
665
666 /* Compute time of the edge->caller + edge->callee execution when inlining
667 does not happen. */
668
669 inline sreal
670 compute_uninlined_call_time (struct cgraph_edge *edge,
671 sreal uninlined_call_time)
672 {
673 cgraph_node *caller = (edge->caller->global.inlined_to
674 ? edge->caller->global.inlined_to
675 : edge->caller);
676
677 sreal freq = edge->sreal_frequency ();
678 if (freq > 0)
679 uninlined_call_time *= freq;
680 else
681 uninlined_call_time = uninlined_call_time >> 11;
682
683 sreal caller_time = ipa_fn_summaries->get_create (caller)->time;
684 return uninlined_call_time + caller_time;
685 }
686
687 /* Same as compute_uinlined_call_time but compute time when inlining
688 does happen. */
689
690 inline sreal
691 compute_inlined_call_time (struct cgraph_edge *edge,
692 sreal time)
693 {
694 cgraph_node *caller = (edge->caller->global.inlined_to
695 ? edge->caller->global.inlined_to
696 : edge->caller);
697 sreal caller_time = ipa_fn_summaries->get_create (caller)->time;
698
699 sreal freq = edge->sreal_frequency ();
700 if (freq > 0)
701 time *= freq;
702 else
703 time = time >> 11;
704
705 /* This calculation should match one in ipa-inline-analysis.c
706 (estimate_edge_size_and_time). */
707 time -= (sreal)ipa_call_summaries->get_create (edge)->call_stmt_time * freq;
708 time += caller_time;
709 if (time <= 0)
710 time = ((sreal) 1) >> 8;
711 gcc_checking_assert (time >= 0);
712 return time;
713 }
714
715 /* Return true if the speedup for inlining E is bigger than
716 PARAM_MAX_INLINE_MIN_SPEEDUP. */
717
718 static bool
719 big_speedup_p (struct cgraph_edge *e)
720 {
721 sreal unspec_time;
722 sreal spec_time = estimate_edge_time (e, &unspec_time);
723 sreal time = compute_uninlined_call_time (e, unspec_time);
724 sreal inlined_time = compute_inlined_call_time (e, spec_time);
725
726 if ((time - inlined_time) * 100
727 > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)))
728 return true;
729 return false;
730 }
731
732 /* Return true if we are interested in inlining small function.
733 When REPORT is true, report reason to dump file. */
734
735 static bool
736 want_inline_small_function_p (struct cgraph_edge *e, bool report)
737 {
738 bool want_inline = true;
739 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
740
741 /* Allow this function to be called before can_inline_edge_p,
742 since it's usually cheaper. */
743 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
744 want_inline = false;
745 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
746 ;
747 else if (!DECL_DECLARED_INLINE_P (callee->decl)
748 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
749 {
750 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
751 want_inline = false;
752 }
753 /* Do fast and conservative check if the function can be good
754 inline candidate. At the moment we allow inline hints to
755 promote non-inline functions to inline and we increase
756 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
757 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
758 && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
759 && ipa_fn_summaries->get_create (callee)->min_size
760 - ipa_call_summaries->get_create (e)->call_stmt_size
761 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
762 {
763 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
764 want_inline = false;
765 }
766 else if ((DECL_DECLARED_INLINE_P (callee->decl)
767 || e->count.ipa ().nonzero_p ())
768 && ipa_fn_summaries->get_create (callee)->min_size
769 - ipa_call_summaries->get_create (e)->call_stmt_size
770 > 16 * MAX_INLINE_INSNS_SINGLE)
771 {
772 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
773 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
774 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
775 want_inline = false;
776 }
777 else
778 {
779 int growth = estimate_edge_growth (e);
780 ipa_hints hints = estimate_edge_hints (e);
781 bool big_speedup = big_speedup_p (e);
782
783 if (growth <= 0)
784 ;
785 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
786 hints suggests that inlining given function is very profitable. */
787 else if (DECL_DECLARED_INLINE_P (callee->decl)
788 && growth >= MAX_INLINE_INSNS_SINGLE
789 && ((!big_speedup
790 && !(hints & (INLINE_HINT_indirect_call
791 | INLINE_HINT_known_hot
792 | INLINE_HINT_loop_iterations
793 | INLINE_HINT_array_index
794 | INLINE_HINT_loop_stride)))
795 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
796 {
797 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
798 want_inline = false;
799 }
800 else if (!DECL_DECLARED_INLINE_P (callee->decl)
801 && !opt_for_fn (e->caller->decl, flag_inline_functions))
802 {
803 /* growth_likely_positive is expensive, always test it last. */
804 if (growth >= MAX_INLINE_INSNS_SINGLE
805 || growth_likely_positive (callee, growth))
806 {
807 e->inline_failed = CIF_NOT_DECLARED_INLINED;
808 want_inline = false;
809 }
810 }
811 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
812 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
813 inlining given function is very profitable. */
814 else if (!DECL_DECLARED_INLINE_P (callee->decl)
815 && !big_speedup
816 && !(hints & INLINE_HINT_known_hot)
817 && growth >= ((hints & (INLINE_HINT_indirect_call
818 | INLINE_HINT_loop_iterations
819 | INLINE_HINT_array_index
820 | INLINE_HINT_loop_stride))
821 ? MAX (MAX_INLINE_INSNS_AUTO,
822 MAX_INLINE_INSNS_SINGLE)
823 : MAX_INLINE_INSNS_AUTO))
824 {
825 /* growth_likely_positive is expensive, always test it last. */
826 if (growth >= MAX_INLINE_INSNS_SINGLE
827 || growth_likely_positive (callee, growth))
828 {
829 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
830 want_inline = false;
831 }
832 }
833 /* If call is cold, do not inline when function body would grow. */
834 else if (!e->maybe_hot_p ()
835 && (growth >= MAX_INLINE_INSNS_SINGLE
836 || growth_likely_positive (callee, growth)))
837 {
838 e->inline_failed = CIF_UNLIKELY_CALL;
839 want_inline = false;
840 }
841 }
842 if (!want_inline && report)
843 report_inline_failed_reason (e);
844 return want_inline;
845 }
846
847 /* EDGE is self recursive edge.
848 We hand two cases - when function A is inlining into itself
849 or when function A is being inlined into another inliner copy of function
850 A within function B.
851
852 In first case OUTER_NODE points to the toplevel copy of A, while
853 in the second case OUTER_NODE points to the outermost copy of A in B.
854
855 In both cases we want to be extra selective since
856 inlining the call will just introduce new recursive calls to appear. */
857
858 static bool
859 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
860 struct cgraph_node *outer_node,
861 bool peeling,
862 int depth)
863 {
864 char const *reason = NULL;
865 bool want_inline = true;
866 sreal caller_freq = 1;
867 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
868
869 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
870 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
871
872 if (!edge->maybe_hot_p ())
873 {
874 reason = "recursive call is cold";
875 want_inline = false;
876 }
877 else if (depth > max_depth)
878 {
879 reason = "--param max-inline-recursive-depth exceeded.";
880 want_inline = false;
881 }
882 else if (outer_node->global.inlined_to
883 && (caller_freq = outer_node->callers->sreal_frequency ()) == 0)
884 {
885 reason = "caller frequency is 0";
886 want_inline = false;
887 }
888
889 if (!want_inline)
890 ;
891 /* Inlining of self recursive function into copy of itself within other
892 function is transformation similar to loop peeling.
893
894 Peeling is profitable if we can inline enough copies to make probability
895 of actual call to the self recursive function very small. Be sure that
896 the probability of recursion is small.
897
898 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
899 This way the expected number of recursion is at most max_depth. */
900 else if (peeling)
901 {
902 sreal max_prob = (sreal)1 - ((sreal)1 / (sreal)max_depth);
903 int i;
904 for (i = 1; i < depth; i++)
905 max_prob = max_prob * max_prob;
906 if (edge->sreal_frequency () >= max_prob * caller_freq)
907 {
908 reason = "frequency of recursive call is too large";
909 want_inline = false;
910 }
911 }
912 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
913 recursion depth is large. We reduce function call overhead and increase
914 chances that things fit in hardware return predictor.
915
916 Recursive inlining might however increase cost of stack frame setup
917 actually slowing down functions whose recursion tree is wide rather than
918 deep.
919
920 Deciding reliably on when to do recursive inlining without profile feedback
921 is tricky. For now we disable recursive inlining when probability of self
922 recursion is low.
923
924 Recursive inlining of self recursive call within loop also results in
925 large loop depths that generally optimize badly. We may want to throttle
926 down inlining in those cases. In particular this seems to happen in one
927 of libstdc++ rb tree methods. */
928 else
929 {
930 if (edge->sreal_frequency () * 100
931 <= caller_freq
932 * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))
933 {
934 reason = "frequency of recursive call is too small";
935 want_inline = false;
936 }
937 }
938 if (!want_inline && dump_file)
939 fprintf (dump_file, " not inlining recursively: %s\n", reason);
940 return want_inline;
941 }
942
943 /* Return true when NODE has uninlinable caller;
944 set HAS_HOT_CALL if it has hot call.
945 Worker for cgraph_for_node_and_aliases. */
946
947 static bool
948 check_callers (struct cgraph_node *node, void *has_hot_call)
949 {
950 struct cgraph_edge *e;
951 for (e = node->callers; e; e = e->next_caller)
952 {
953 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)
954 || !opt_for_fn (e->caller->decl, optimize))
955 return true;
956 if (!can_inline_edge_p (e, true))
957 return true;
958 if (e->recursive_p ())
959 return true;
960 if (!can_inline_edge_by_limits_p (e, true))
961 return true;
962 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
963 *(bool *)has_hot_call = true;
964 }
965 return false;
966 }
967
968 /* If NODE has a caller, return true. */
969
970 static bool
971 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
972 {
973 if (node->callers)
974 return true;
975 return false;
976 }
977
978 /* Decide if inlining NODE would reduce unit size by eliminating
979 the offline copy of function.
980 When COLD is true the cold calls are considered, too. */
981
982 static bool
983 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
984 {
985 bool has_hot_call = false;
986
987 /* Aliases gets inlined along with the function they alias. */
988 if (node->alias)
989 return false;
990 /* Already inlined? */
991 if (node->global.inlined_to)
992 return false;
993 /* Does it have callers? */
994 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
995 return false;
996 /* Inlining into all callers would increase size? */
997 if (estimate_growth (node) > 0)
998 return false;
999 /* All inlines must be possible. */
1000 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1001 true))
1002 return false;
1003 if (!cold && !has_hot_call)
1004 return false;
1005 return true;
1006 }
1007
1008 /* A cost model driving the inlining heuristics in a way so the edges with
1009 smallest badness are inlined first. After each inlining is performed
1010 the costs of all caller edges of nodes affected are recomputed so the
1011 metrics may accurately depend on values such as number of inlinable callers
1012 of the function or function body size. */
1013
1014 static sreal
1015 edge_badness (struct cgraph_edge *edge, bool dump)
1016 {
1017 sreal badness;
1018 int growth;
1019 sreal edge_time, unspec_edge_time;
1020 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1021 struct ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (callee);
1022 ipa_hints hints;
1023 cgraph_node *caller = (edge->caller->global.inlined_to
1024 ? edge->caller->global.inlined_to
1025 : edge->caller);
1026
1027 growth = estimate_edge_growth (edge);
1028 edge_time = estimate_edge_time (edge, &unspec_edge_time);
1029 hints = estimate_edge_hints (edge);
1030 gcc_checking_assert (edge_time >= 0);
1031 /* Check that inlined time is better, but tolerate some roundoff issues.
1032 FIXME: When callee profile drops to 0 we account calls more. This
1033 should be fixed by never doing that. */
1034 gcc_checking_assert ((edge_time * 100
1035 - callee_info->time * 101).to_int () <= 0
1036 || callee->count.ipa ().initialized_p ());
1037 gcc_checking_assert (growth <= callee_info->size);
1038
1039 if (dump)
1040 {
1041 fprintf (dump_file, " Badness calculation for %s -> %s\n",
1042 edge->caller->dump_name (),
1043 edge->callee->dump_name ());
1044 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1045 growth,
1046 edge_time.to_double (),
1047 unspec_edge_time.to_double ());
1048 ipa_dump_hints (dump_file, hints);
1049 if (big_speedup_p (edge))
1050 fprintf (dump_file, " big_speedup");
1051 fprintf (dump_file, "\n");
1052 }
1053
1054 /* Always prefer inlining saving code size. */
1055 if (growth <= 0)
1056 {
1057 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1058 if (dump)
1059 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1060 growth);
1061 }
1062 /* Inlining into EXTERNAL functions is not going to change anything unless
1063 they are themselves inlined. */
1064 else if (DECL_EXTERNAL (caller->decl))
1065 {
1066 if (dump)
1067 fprintf (dump_file, " max: function is external\n");
1068 return sreal::max ();
1069 }
1070 /* When profile is available. Compute badness as:
1071
1072 time_saved * caller_count
1073 goodness = -------------------------------------------------
1074 growth_of_caller * overall_growth * combined_size
1075
1076 badness = - goodness
1077
1078 Again use negative value to make calls with profile appear hotter
1079 then calls without.
1080 */
1081 else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
1082 || caller->count.ipa ().nonzero_p ())
1083 {
1084 sreal numerator, denominator;
1085 int overall_growth;
1086 sreal inlined_time = compute_inlined_call_time (edge, edge_time);
1087
1088 numerator = (compute_uninlined_call_time (edge, unspec_edge_time)
1089 - inlined_time);
1090 if (numerator <= 0)
1091 numerator = ((sreal) 1 >> 8);
1092 if (caller->count.ipa ().nonzero_p ())
1093 numerator *= caller->count.ipa ().to_gcov_type ();
1094 else if (caller->count.ipa ().initialized_p ())
1095 numerator = numerator >> 11;
1096 denominator = growth;
1097
1098 overall_growth = callee_info->growth;
1099
1100 /* Look for inliner wrappers of the form:
1101
1102 inline_caller ()
1103 {
1104 do_fast_job...
1105 if (need_more_work)
1106 noninline_callee ();
1107 }
1108 Withhout panilizing this case, we usually inline noninline_callee
1109 into the inline_caller because overall_growth is small preventing
1110 further inlining of inline_caller.
1111
1112 Penalize only callgraph edges to functions with small overall
1113 growth ...
1114 */
1115 if (growth > overall_growth
1116 /* ... and having only one caller which is not inlined ... */
1117 && callee_info->single_caller
1118 && !edge->caller->global.inlined_to
1119 /* ... and edges executed only conditionally ... */
1120 && edge->sreal_frequency () < 1
1121 /* ... consider case where callee is not inline but caller is ... */
1122 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1123 && DECL_DECLARED_INLINE_P (caller->decl))
1124 /* ... or when early optimizers decided to split and edge
1125 frequency still indicates splitting is a win ... */
1126 || (callee->split_part && !caller->split_part
1127 && edge->sreal_frequency () * 100
1128 < PARAM_VALUE
1129 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY)
1130 /* ... and do not overwrite user specified hints. */
1131 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1132 || DECL_DECLARED_INLINE_P (caller->decl)))))
1133 {
1134 ipa_fn_summary *caller_info = ipa_fn_summaries->get_create (caller);
1135 int caller_growth = caller_info->growth;
1136
1137 /* Only apply the penalty when caller looks like inline candidate,
1138 and it is not called once and. */
1139 if (!caller_info->single_caller && overall_growth < caller_growth
1140 && caller_info->inlinable
1141 && caller_info->size
1142 < (DECL_DECLARED_INLINE_P (caller->decl)
1143 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1144 {
1145 if (dump)
1146 fprintf (dump_file,
1147 " Wrapper penalty. Increasing growth %i to %i\n",
1148 overall_growth, caller_growth);
1149 overall_growth = caller_growth;
1150 }
1151 }
1152 if (overall_growth > 0)
1153 {
1154 /* Strongly preffer functions with few callers that can be inlined
1155 fully. The square root here leads to smaller binaries at average.
1156 Watch however for extreme cases and return to linear function
1157 when growth is large. */
1158 if (overall_growth < 256)
1159 overall_growth *= overall_growth;
1160 else
1161 overall_growth += 256 * 256 - 256;
1162 denominator *= overall_growth;
1163 }
1164 denominator *= inlined_time;
1165
1166 badness = - numerator / denominator;
1167
1168 if (dump)
1169 {
1170 fprintf (dump_file,
1171 " %f: guessed profile. frequency %f, count %" PRId64
1172 " caller count %" PRId64
1173 " time w/o inlining %f, time with inlining %f"
1174 " overall growth %i (current) %i (original)"
1175 " %i (compensated)\n",
1176 badness.to_double (),
1177 edge->sreal_frequency ().to_double (),
1178 edge->count.ipa ().initialized_p () ? edge->count.ipa ().to_gcov_type () : -1,
1179 caller->count.ipa ().initialized_p () ? caller->count.ipa ().to_gcov_type () : -1,
1180 compute_uninlined_call_time (edge,
1181 unspec_edge_time).to_double (),
1182 inlined_time.to_double (),
1183 estimate_growth (callee),
1184 callee_info->growth, overall_growth);
1185 }
1186 }
1187 /* When function local profile is not available or it does not give
1188 useful information (ie frequency is zero), base the cost on
1189 loop nest and overall size growth, so we optimize for overall number
1190 of functions fully inlined in program. */
1191 else
1192 {
1193 int nest = MIN (ipa_call_summaries->get_create (edge)->loop_depth, 8);
1194 badness = growth;
1195
1196 /* Decrease badness if call is nested. */
1197 if (badness > 0)
1198 badness = badness >> nest;
1199 else
1200 badness = badness << nest;
1201 if (dump)
1202 fprintf (dump_file, " %f: no profile. nest %i\n",
1203 badness.to_double (), nest);
1204 }
1205 gcc_checking_assert (badness != 0);
1206
1207 if (edge->recursive_p ())
1208 badness = badness.shift (badness > 0 ? 4 : -4);
1209 if ((hints & (INLINE_HINT_indirect_call
1210 | INLINE_HINT_loop_iterations
1211 | INLINE_HINT_array_index
1212 | INLINE_HINT_loop_stride))
1213 || callee_info->growth <= 0)
1214 badness = badness.shift (badness > 0 ? -2 : 2);
1215 if (hints & (INLINE_HINT_same_scc))
1216 badness = badness.shift (badness > 0 ? 3 : -3);
1217 else if (hints & (INLINE_HINT_in_scc))
1218 badness = badness.shift (badness > 0 ? 2 : -2);
1219 else if (hints & (INLINE_HINT_cross_module))
1220 badness = badness.shift (badness > 0 ? 1 : -1);
1221 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1222 badness = badness.shift (badness > 0 ? -4 : 4);
1223 else if ((hints & INLINE_HINT_declared_inline))
1224 badness = badness.shift (badness > 0 ? -3 : 3);
1225 if (dump)
1226 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1227 return badness;
1228 }
1229
1230 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1231 static inline void
1232 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1233 {
1234 sreal badness = edge_badness (edge, false);
1235 if (edge->aux)
1236 {
1237 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1238 gcc_checking_assert (n->get_data () == edge);
1239
1240 /* fibonacci_heap::replace_key does busy updating of the
1241 heap that is unnecesarily expensive.
1242 We do lazy increases: after extracting minimum if the key
1243 turns out to be out of date, it is re-inserted into heap
1244 with correct value. */
1245 if (badness < n->get_key ())
1246 {
1247 if (dump_file && (dump_flags & TDF_DETAILS))
1248 {
1249 fprintf (dump_file,
1250 " decreasing badness %s -> %s, %f to %f\n",
1251 edge->caller->dump_name (),
1252 edge->callee->dump_name (),
1253 n->get_key ().to_double (),
1254 badness.to_double ());
1255 }
1256 heap->decrease_key (n, badness);
1257 }
1258 }
1259 else
1260 {
1261 if (dump_file && (dump_flags & TDF_DETAILS))
1262 {
1263 fprintf (dump_file,
1264 " enqueuing call %s -> %s, badness %f\n",
1265 edge->caller->dump_name (),
1266 edge->callee->dump_name (),
1267 badness.to_double ());
1268 }
1269 edge->aux = heap->insert (badness, edge);
1270 }
1271 }
1272
1273
1274 /* NODE was inlined.
1275 All caller edges needs to be resetted because
1276 size estimates change. Similarly callees needs reset
1277 because better context may be known. */
1278
1279 static void
1280 reset_edge_caches (struct cgraph_node *node)
1281 {
1282 struct cgraph_edge *edge;
1283 struct cgraph_edge *e = node->callees;
1284 struct cgraph_node *where = node;
1285 struct ipa_ref *ref;
1286
1287 if (where->global.inlined_to)
1288 where = where->global.inlined_to;
1289
1290 if (edge_growth_cache != NULL)
1291 for (edge = where->callers; edge; edge = edge->next_caller)
1292 if (edge->inline_failed)
1293 edge_growth_cache->remove (edge);
1294
1295 FOR_EACH_ALIAS (where, ref)
1296 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1297
1298 if (!e)
1299 return;
1300
1301 while (true)
1302 if (!e->inline_failed && e->callee->callees)
1303 e = e->callee->callees;
1304 else
1305 {
1306 if (edge_growth_cache != NULL && e->inline_failed)
1307 edge_growth_cache->remove (e);
1308 if (e->next_callee)
1309 e = e->next_callee;
1310 else
1311 {
1312 do
1313 {
1314 if (e->caller == node)
1315 return;
1316 e = e->caller->callers;
1317 }
1318 while (!e->next_callee);
1319 e = e->next_callee;
1320 }
1321 }
1322 }
1323
1324 /* Recompute HEAP nodes for each of caller of NODE.
1325 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1326 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1327 it is inlinable. Otherwise check all edges. */
1328
1329 static void
1330 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1331 bitmap updated_nodes,
1332 struct cgraph_edge *check_inlinablity_for)
1333 {
1334 struct cgraph_edge *edge;
1335 struct ipa_ref *ref;
1336
1337 if ((!node->alias && !ipa_fn_summaries->get_create (node)->inlinable)
1338 || node->global.inlined_to)
1339 return;
1340 if (!bitmap_set_bit (updated_nodes, node->get_uid ()))
1341 return;
1342
1343 FOR_EACH_ALIAS (node, ref)
1344 {
1345 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1346 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1347 }
1348
1349 for (edge = node->callers; edge; edge = edge->next_caller)
1350 if (edge->inline_failed)
1351 {
1352 if (!check_inlinablity_for
1353 || check_inlinablity_for == edge)
1354 {
1355 if (can_inline_edge_p (edge, false)
1356 && want_inline_small_function_p (edge, false)
1357 && can_inline_edge_by_limits_p (edge, false))
1358 update_edge_key (heap, edge);
1359 else if (edge->aux)
1360 {
1361 report_inline_failed_reason (edge);
1362 heap->delete_node ((edge_heap_node_t *) edge->aux);
1363 edge->aux = NULL;
1364 }
1365 }
1366 else if (edge->aux)
1367 update_edge_key (heap, edge);
1368 }
1369 }
1370
1371 /* Recompute HEAP nodes for each uninlined call in NODE.
1372 This is used when we know that edge badnesses are going only to increase
1373 (we introduced new call site) and thus all we need is to insert newly
1374 created edges into heap. */
1375
1376 static void
1377 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1378 bitmap updated_nodes)
1379 {
1380 struct cgraph_edge *e = node->callees;
1381
1382 if (!e)
1383 return;
1384 while (true)
1385 if (!e->inline_failed && e->callee->callees)
1386 e = e->callee->callees;
1387 else
1388 {
1389 enum availability avail;
1390 struct cgraph_node *callee;
1391 /* We do not reset callee growth cache here. Since we added a new call,
1392 growth chould have just increased and consequentely badness metric
1393 don't need updating. */
1394 if (e->inline_failed
1395 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1396 && ipa_fn_summaries->get_create (callee)->inlinable
1397 && avail >= AVAIL_AVAILABLE
1398 && !bitmap_bit_p (updated_nodes, callee->get_uid ()))
1399 {
1400 if (can_inline_edge_p (e, false)
1401 && want_inline_small_function_p (e, false)
1402 && can_inline_edge_by_limits_p (e, false))
1403 update_edge_key (heap, e);
1404 else if (e->aux)
1405 {
1406 report_inline_failed_reason (e);
1407 heap->delete_node ((edge_heap_node_t *) e->aux);
1408 e->aux = NULL;
1409 }
1410 }
1411 if (e->next_callee)
1412 e = e->next_callee;
1413 else
1414 {
1415 do
1416 {
1417 if (e->caller == node)
1418 return;
1419 e = e->caller->callers;
1420 }
1421 while (!e->next_callee);
1422 e = e->next_callee;
1423 }
1424 }
1425 }
1426
1427 /* Enqueue all recursive calls from NODE into priority queue depending on
1428 how likely we want to recursively inline the call. */
1429
1430 static void
1431 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1432 edge_heap_t *heap)
1433 {
1434 struct cgraph_edge *e;
1435 enum availability avail;
1436
1437 for (e = where->callees; e; e = e->next_callee)
1438 if (e->callee == node
1439 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1440 && avail > AVAIL_INTERPOSABLE))
1441 heap->insert (-e->sreal_frequency (), e);
1442 for (e = where->callees; e; e = e->next_callee)
1443 if (!e->inline_failed)
1444 lookup_recursive_calls (node, e->callee, heap);
1445 }
1446
1447 /* Decide on recursive inlining: in the case function has recursive calls,
1448 inline until body size reaches given argument. If any new indirect edges
1449 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1450 is NULL. */
1451
1452 static bool
1453 recursive_inlining (struct cgraph_edge *edge,
1454 vec<cgraph_edge *> *new_edges)
1455 {
1456 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1457 edge_heap_t heap (sreal::min ());
1458 struct cgraph_node *node;
1459 struct cgraph_edge *e;
1460 struct cgraph_node *master_clone = NULL, *next;
1461 int depth = 0;
1462 int n = 0;
1463
1464 node = edge->caller;
1465 if (node->global.inlined_to)
1466 node = node->global.inlined_to;
1467
1468 if (DECL_DECLARED_INLINE_P (node->decl))
1469 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1470
1471 /* Make sure that function is small enough to be considered for inlining. */
1472 if (estimate_size_after_inlining (node, edge) >= limit)
1473 return false;
1474 lookup_recursive_calls (node, node, &heap);
1475 if (heap.empty ())
1476 return false;
1477
1478 if (dump_file)
1479 fprintf (dump_file,
1480 " Performing recursive inlining on %s\n",
1481 node->name ());
1482
1483 /* Do the inlining and update list of recursive call during process. */
1484 while (!heap.empty ())
1485 {
1486 struct cgraph_edge *curr = heap.extract_min ();
1487 struct cgraph_node *cnode, *dest = curr->callee;
1488
1489 if (!can_inline_edge_p (curr, true)
1490 || can_inline_edge_by_limits_p (curr, true))
1491 continue;
1492
1493 /* MASTER_CLONE is produced in the case we already started modified
1494 the function. Be sure to redirect edge to the original body before
1495 estimating growths otherwise we will be seeing growths after inlining
1496 the already modified body. */
1497 if (master_clone)
1498 {
1499 curr->redirect_callee (master_clone);
1500 if (edge_growth_cache != NULL)
1501 edge_growth_cache->remove (curr);
1502 }
1503
1504 if (estimate_size_after_inlining (node, curr) > limit)
1505 {
1506 curr->redirect_callee (dest);
1507 if (edge_growth_cache != NULL)
1508 edge_growth_cache->remove (curr);
1509 break;
1510 }
1511
1512 depth = 1;
1513 for (cnode = curr->caller;
1514 cnode->global.inlined_to; cnode = cnode->callers->caller)
1515 if (node->decl
1516 == curr->callee->ultimate_alias_target ()->decl)
1517 depth++;
1518
1519 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1520 {
1521 curr->redirect_callee (dest);
1522 if (edge_growth_cache != NULL)
1523 edge_growth_cache->remove (curr);
1524 continue;
1525 }
1526
1527 if (dump_file)
1528 {
1529 fprintf (dump_file,
1530 " Inlining call of depth %i", depth);
1531 if (node->count.nonzero_p ())
1532 {
1533 fprintf (dump_file, " called approx. %.2f times per call",
1534 (double)curr->count.to_gcov_type ()
1535 / node->count.to_gcov_type ());
1536 }
1537 fprintf (dump_file, "\n");
1538 }
1539 if (!master_clone)
1540 {
1541 /* We need original clone to copy around. */
1542 master_clone = node->create_clone (node->decl, node->count,
1543 false, vNULL, true, NULL, NULL);
1544 for (e = master_clone->callees; e; e = e->next_callee)
1545 if (!e->inline_failed)
1546 clone_inlined_nodes (e, true, false, NULL);
1547 curr->redirect_callee (master_clone);
1548 if (edge_growth_cache != NULL)
1549 edge_growth_cache->remove (curr);
1550 }
1551
1552 inline_call (curr, false, new_edges, &overall_size, true);
1553 lookup_recursive_calls (node, curr->callee, &heap);
1554 n++;
1555 }
1556
1557 if (!heap.empty () && dump_file)
1558 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1559
1560 if (!master_clone)
1561 return false;
1562
1563 if (dump_file)
1564 fprintf (dump_file,
1565 "\n Inlined %i times, "
1566 "body grown from size %i to %i, time %f to %f\n", n,
1567 ipa_fn_summaries->get (master_clone)->size,
1568 ipa_fn_summaries->get (node)->size,
1569 ipa_fn_summaries->get (master_clone)->time.to_double (),
1570 ipa_fn_summaries->get (node)->time.to_double ());
1571
1572 /* Remove master clone we used for inlining. We rely that clones inlined
1573 into master clone gets queued just before master clone so we don't
1574 need recursion. */
1575 for (node = symtab->first_function (); node != master_clone;
1576 node = next)
1577 {
1578 next = symtab->next_function (node);
1579 if (node->global.inlined_to == master_clone)
1580 node->remove ();
1581 }
1582 master_clone->remove ();
1583 return true;
1584 }
1585
1586
1587 /* Given whole compilation unit estimate of INSNS, compute how large we can
1588 allow the unit to grow. */
1589
1590 static int
1591 compute_max_insns (int insns)
1592 {
1593 int max_insns = insns;
1594 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1595 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1596
1597 return ((int64_t) max_insns
1598 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1599 }
1600
1601
1602 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1603
1604 static void
1605 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1606 {
1607 while (new_edges.length () > 0)
1608 {
1609 struct cgraph_edge *edge = new_edges.pop ();
1610
1611 gcc_assert (!edge->aux);
1612 if (edge->inline_failed
1613 && can_inline_edge_p (edge, true)
1614 && want_inline_small_function_p (edge, true)
1615 && can_inline_edge_by_limits_p (edge, true))
1616 edge->aux = heap->insert (edge_badness (edge, false), edge);
1617 }
1618 }
1619
1620 /* Remove EDGE from the fibheap. */
1621
1622 static void
1623 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1624 {
1625 if (e->aux)
1626 {
1627 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1628 e->aux = NULL;
1629 }
1630 }
1631
1632 /* Return true if speculation of edge E seems useful.
1633 If ANTICIPATE_INLINING is true, be conservative and hope that E
1634 may get inlined. */
1635
1636 bool
1637 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1638 {
1639 enum availability avail;
1640 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1641 e->caller);
1642 struct cgraph_edge *direct, *indirect;
1643 struct ipa_ref *ref;
1644
1645 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1646
1647 if (!e->maybe_hot_p ())
1648 return false;
1649
1650 /* See if IP optimizations found something potentially useful about the
1651 function. For now we look only for CONST/PURE flags. Almost everything
1652 else we propagate is useless. */
1653 if (avail >= AVAIL_AVAILABLE)
1654 {
1655 int ecf_flags = flags_from_decl_or_type (target->decl);
1656 if (ecf_flags & ECF_CONST)
1657 {
1658 e->speculative_call_info (direct, indirect, ref);
1659 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1660 return true;
1661 }
1662 else if (ecf_flags & ECF_PURE)
1663 {
1664 e->speculative_call_info (direct, indirect, ref);
1665 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1666 return true;
1667 }
1668 }
1669 /* If we did not managed to inline the function nor redirect
1670 to an ipa-cp clone (that are seen by having local flag set),
1671 it is probably pointless to inline it unless hardware is missing
1672 indirect call predictor. */
1673 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1674 return false;
1675 /* For overwritable targets there is not much to do. */
1676 if (e->inline_failed
1677 && (!can_inline_edge_p (e, false)
1678 || !can_inline_edge_by_limits_p (e, false, true)))
1679 return false;
1680 /* OK, speculation seems interesting. */
1681 return true;
1682 }
1683
1684 /* We know that EDGE is not going to be inlined.
1685 See if we can remove speculation. */
1686
1687 static void
1688 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1689 {
1690 if (edge->speculative && !speculation_useful_p (edge, false))
1691 {
1692 struct cgraph_node *node = edge->caller;
1693 struct cgraph_node *where = node->global.inlined_to
1694 ? node->global.inlined_to : node;
1695 auto_bitmap updated_nodes;
1696
1697 if (edge->count.ipa ().initialized_p ())
1698 spec_rem += edge->count.ipa ();
1699 edge->resolve_speculation ();
1700 reset_edge_caches (where);
1701 ipa_update_overall_fn_summary (where);
1702 update_caller_keys (edge_heap, where,
1703 updated_nodes, NULL);
1704 update_callee_keys (edge_heap, where,
1705 updated_nodes);
1706 }
1707 }
1708
1709 /* Return true if NODE should be accounted for overall size estimate.
1710 Skip all nodes optimized for size so we can measure the growth of hot
1711 part of program no matter of the padding. */
1712
1713 bool
1714 inline_account_function_p (struct cgraph_node *node)
1715 {
1716 return (!DECL_EXTERNAL (node->decl)
1717 && !opt_for_fn (node->decl, optimize_size)
1718 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1719 }
1720
1721 /* Count number of callers of NODE and store it into DATA (that
1722 points to int. Worker for cgraph_for_node_and_aliases. */
1723
1724 static bool
1725 sum_callers (struct cgraph_node *node, void *data)
1726 {
1727 struct cgraph_edge *e;
1728 int *num_calls = (int *)data;
1729
1730 for (e = node->callers; e; e = e->next_caller)
1731 (*num_calls)++;
1732 return false;
1733 }
1734
1735 /* We use greedy algorithm for inlining of small functions:
1736 All inline candidates are put into prioritized heap ordered in
1737 increasing badness.
1738
1739 The inlining of small functions is bounded by unit growth parameters. */
1740
1741 static void
1742 inline_small_functions (void)
1743 {
1744 struct cgraph_node *node;
1745 struct cgraph_edge *edge;
1746 edge_heap_t edge_heap (sreal::min ());
1747 auto_bitmap updated_nodes;
1748 int min_size, max_size;
1749 auto_vec<cgraph_edge *> new_indirect_edges;
1750 int initial_size = 0;
1751 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1752 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1753 new_indirect_edges.create (8);
1754
1755 edge_removal_hook_holder
1756 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1757
1758 /* Compute overall unit size and other global parameters used by badness
1759 metrics. */
1760
1761 max_count = profile_count::uninitialized ();
1762 ipa_reduced_postorder (order, true, true, NULL);
1763 free (order);
1764
1765 FOR_EACH_DEFINED_FUNCTION (node)
1766 if (!node->global.inlined_to)
1767 {
1768 if (!node->alias && node->analyzed
1769 && (node->has_gimple_body_p () || node->thunk.thunk_p)
1770 && opt_for_fn (node->decl, optimize))
1771 {
1772 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
1773 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1774
1775 /* Do not account external functions, they will be optimized out
1776 if not inlined. Also only count the non-cold portion of program. */
1777 if (inline_account_function_p (node))
1778 initial_size += info->size;
1779 info->growth = estimate_growth (node);
1780
1781 int num_calls = 0;
1782 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1783 true);
1784 if (num_calls == 1)
1785 info->single_caller = true;
1786 if (dfs && dfs->next_cycle)
1787 {
1788 struct cgraph_node *n2;
1789 int id = dfs->scc_no + 1;
1790 for (n2 = node; n2;
1791 n2 = ((struct ipa_dfs_info *) n2->aux)->next_cycle)
1792 if (opt_for_fn (n2->decl, optimize))
1793 {
1794 ipa_fn_summary *info2 = ipa_fn_summaries->get_create (n2);
1795 if (info2->scc_no)
1796 break;
1797 info2->scc_no = id;
1798 }
1799 }
1800 }
1801
1802 for (edge = node->callers; edge; edge = edge->next_caller)
1803 max_count = max_count.max (edge->count.ipa ());
1804 }
1805 ipa_free_postorder_info ();
1806 edge_growth_cache
1807 = new call_summary<edge_growth_cache_entry *> (symtab, false);
1808
1809 if (dump_file)
1810 fprintf (dump_file,
1811 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1812 initial_size);
1813
1814 overall_size = initial_size;
1815 max_size = compute_max_insns (overall_size);
1816 min_size = overall_size;
1817
1818 /* Populate the heap with all edges we might inline. */
1819
1820 FOR_EACH_DEFINED_FUNCTION (node)
1821 {
1822 bool update = false;
1823 struct cgraph_edge *next = NULL;
1824 bool has_speculative = false;
1825
1826 if (!opt_for_fn (node->decl, optimize))
1827 continue;
1828
1829 if (dump_file)
1830 fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
1831
1832 for (edge = node->callees; edge; edge = next)
1833 {
1834 next = edge->next_callee;
1835 if (edge->inline_failed
1836 && !edge->aux
1837 && can_inline_edge_p (edge, true)
1838 && want_inline_small_function_p (edge, true)
1839 && can_inline_edge_by_limits_p (edge, true)
1840 && edge->inline_failed)
1841 {
1842 gcc_assert (!edge->aux);
1843 update_edge_key (&edge_heap, edge);
1844 }
1845 if (edge->speculative)
1846 has_speculative = true;
1847 }
1848 if (has_speculative)
1849 for (edge = node->callees; edge; edge = next)
1850 if (edge->speculative && !speculation_useful_p (edge,
1851 edge->aux != NULL))
1852 {
1853 edge->resolve_speculation ();
1854 update = true;
1855 }
1856 if (update)
1857 {
1858 struct cgraph_node *where = node->global.inlined_to
1859 ? node->global.inlined_to : node;
1860 ipa_update_overall_fn_summary (where);
1861 reset_edge_caches (where);
1862 update_caller_keys (&edge_heap, where,
1863 updated_nodes, NULL);
1864 update_callee_keys (&edge_heap, where,
1865 updated_nodes);
1866 bitmap_clear (updated_nodes);
1867 }
1868 }
1869
1870 gcc_assert (in_lto_p
1871 || !(max_count > 0)
1872 || (profile_info && flag_branch_probabilities));
1873
1874 while (!edge_heap.empty ())
1875 {
1876 int old_size = overall_size;
1877 struct cgraph_node *where, *callee;
1878 sreal badness = edge_heap.min_key ();
1879 sreal current_badness;
1880 int growth;
1881
1882 edge = edge_heap.extract_min ();
1883 gcc_assert (edge->aux);
1884 edge->aux = NULL;
1885 if (!edge->inline_failed || !edge->callee->analyzed)
1886 continue;
1887
1888 #if CHECKING_P
1889 /* Be sure that caches are maintained consistent.
1890 This check is affected by scaling roundoff errors when compiling for
1891 IPA this we skip it in that case. */
1892 if (!edge->callee->count.ipa_p ()
1893 && (!max_count.initialized_p () || !max_count.nonzero_p ()))
1894 {
1895 sreal cached_badness = edge_badness (edge, false);
1896
1897 int old_size_est = estimate_edge_size (edge);
1898 sreal old_time_est = estimate_edge_time (edge);
1899 int old_hints_est = estimate_edge_hints (edge);
1900
1901 if (edge_growth_cache != NULL)
1902 edge_growth_cache->remove (edge);
1903 gcc_assert (old_size_est == estimate_edge_size (edge));
1904 gcc_assert (old_time_est == estimate_edge_time (edge));
1905 /* FIXME:
1906
1907 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1908
1909 fails with profile feedback because some hints depends on
1910 maybe_hot_edge_p predicate and because callee gets inlined to other
1911 calls, the edge may become cold.
1912 This ought to be fixed by computing relative probabilities
1913 for given invocation but that will be better done once whole
1914 code is converted to sreals. Disable for now and revert to "wrong"
1915 value so enable/disable checking paths agree. */
1916 edge_growth_cache->get (edge)->hints = old_hints_est + 1;
1917
1918 /* When updating the edge costs, we only decrease badness in the keys.
1919 Increases of badness are handled lazilly; when we see key with out
1920 of date value on it, we re-insert it now. */
1921 current_badness = edge_badness (edge, false);
1922 gcc_assert (cached_badness == current_badness);
1923 gcc_assert (current_badness >= badness);
1924 }
1925 else
1926 current_badness = edge_badness (edge, false);
1927 #else
1928 current_badness = edge_badness (edge, false);
1929 #endif
1930 if (current_badness != badness)
1931 {
1932 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1933 {
1934 edge->aux = edge_heap.insert (current_badness, edge);
1935 continue;
1936 }
1937 else
1938 badness = current_badness;
1939 }
1940
1941 if (!can_inline_edge_p (edge, true)
1942 || !can_inline_edge_by_limits_p (edge, true))
1943 {
1944 resolve_noninline_speculation (&edge_heap, edge);
1945 continue;
1946 }
1947
1948 callee = edge->callee->ultimate_alias_target ();
1949 growth = estimate_edge_growth (edge);
1950 if (dump_file)
1951 {
1952 fprintf (dump_file,
1953 "\nConsidering %s with %i size\n",
1954 callee->dump_name (),
1955 ipa_fn_summaries->get_create (callee)->size);
1956 fprintf (dump_file,
1957 " to be inlined into %s in %s:%i\n"
1958 " Estimated badness is %f, frequency %.2f.\n",
1959 edge->caller->dump_name (),
1960 edge->call_stmt
1961 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1962 edge->call_stmt))
1963 > BUILTINS_LOCATION)
1964 ? gimple_filename ((const gimple *) edge->call_stmt)
1965 : "unknown",
1966 edge->call_stmt
1967 ? gimple_lineno ((const gimple *) edge->call_stmt)
1968 : -1,
1969 badness.to_double (),
1970 edge->sreal_frequency ().to_double ());
1971 if (edge->count.ipa ().initialized_p ())
1972 {
1973 fprintf (dump_file, " Called ");
1974 edge->count.ipa ().dump (dump_file);
1975 fprintf (dump_file, " times\n");
1976 }
1977 if (dump_flags & TDF_DETAILS)
1978 edge_badness (edge, true);
1979 }
1980
1981 if (overall_size + growth > max_size
1982 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1983 {
1984 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1985 report_inline_failed_reason (edge);
1986 resolve_noninline_speculation (&edge_heap, edge);
1987 continue;
1988 }
1989
1990 if (!want_inline_small_function_p (edge, true))
1991 {
1992 resolve_noninline_speculation (&edge_heap, edge);
1993 continue;
1994 }
1995
1996 /* Heuristics for inlining small functions work poorly for
1997 recursive calls where we do effects similar to loop unrolling.
1998 When inlining such edge seems profitable, leave decision on
1999 specific inliner. */
2000 if (edge->recursive_p ())
2001 {
2002 where = edge->caller;
2003 if (where->global.inlined_to)
2004 where = where->global.inlined_to;
2005 if (!recursive_inlining (edge,
2006 opt_for_fn (edge->caller->decl,
2007 flag_indirect_inlining)
2008 ? &new_indirect_edges : NULL))
2009 {
2010 edge->inline_failed = CIF_RECURSIVE_INLINING;
2011 resolve_noninline_speculation (&edge_heap, edge);
2012 continue;
2013 }
2014 reset_edge_caches (where);
2015 /* Recursive inliner inlines all recursive calls of the function
2016 at once. Consequently we need to update all callee keys. */
2017 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
2018 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2019 update_callee_keys (&edge_heap, where, updated_nodes);
2020 bitmap_clear (updated_nodes);
2021 }
2022 else
2023 {
2024 struct cgraph_node *outer_node = NULL;
2025 int depth = 0;
2026
2027 /* Consider the case where self recursive function A is inlined
2028 into B. This is desired optimization in some cases, since it
2029 leads to effect similar of loop peeling and we might completely
2030 optimize out the recursive call. However we must be extra
2031 selective. */
2032
2033 where = edge->caller;
2034 while (where->global.inlined_to)
2035 {
2036 if (where->decl == callee->decl)
2037 outer_node = where, depth++;
2038 where = where->callers->caller;
2039 }
2040 if (outer_node
2041 && !want_inline_self_recursive_call_p (edge, outer_node,
2042 true, depth))
2043 {
2044 edge->inline_failed
2045 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2046 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2047 resolve_noninline_speculation (&edge_heap, edge);
2048 continue;
2049 }
2050 else if (depth && dump_file)
2051 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2052
2053 gcc_checking_assert (!callee->global.inlined_to);
2054 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2055 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2056
2057 reset_edge_caches (edge->callee);
2058
2059 update_callee_keys (&edge_heap, where, updated_nodes);
2060 }
2061 where = edge->caller;
2062 if (where->global.inlined_to)
2063 where = where->global.inlined_to;
2064
2065 /* Our profitability metric can depend on local properties
2066 such as number of inlinable calls and size of the function body.
2067 After inlining these properties might change for the function we
2068 inlined into (since it's body size changed) and for the functions
2069 called by function we inlined (since number of it inlinable callers
2070 might change). */
2071 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2072 /* Offline copy count has possibly changed, recompute if profile is
2073 available. */
2074 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2075 if (n != edge->callee && n->analyzed && n->count.ipa ().initialized_p ())
2076 update_callee_keys (&edge_heap, n, updated_nodes);
2077 bitmap_clear (updated_nodes);
2078
2079 if (dump_file)
2080 {
2081 ipa_fn_summary *s = ipa_fn_summaries->get_create (edge->caller);
2082 fprintf (dump_file,
2083 " Inlined %s into %s which now has time %f and size %i, "
2084 "net change of %+i.\n",
2085 xstrdup_for_dump (edge->callee->name ()),
2086 xstrdup_for_dump (edge->caller->name ()),
2087 s->time.to_double (),
2088 s->size,
2089 overall_size - old_size);
2090 }
2091 if (min_size > overall_size)
2092 {
2093 min_size = overall_size;
2094 max_size = compute_max_insns (min_size);
2095
2096 if (dump_file)
2097 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2098 }
2099 }
2100
2101 free_growth_caches ();
2102 if (dump_file)
2103 fprintf (dump_file,
2104 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2105 initial_size, overall_size,
2106 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2107 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2108 }
2109
2110 /* Flatten NODE. Performed both during early inlining and
2111 at IPA inlining time. */
2112
2113 static void
2114 flatten_function (struct cgraph_node *node, bool early)
2115 {
2116 struct cgraph_edge *e;
2117
2118 /* We shouldn't be called recursively when we are being processed. */
2119 gcc_assert (node->aux == NULL);
2120
2121 node->aux = (void *) node;
2122
2123 for (e = node->callees; e; e = e->next_callee)
2124 {
2125 struct cgraph_node *orig_callee;
2126 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2127
2128 /* We've hit cycle? It is time to give up. */
2129 if (callee->aux)
2130 {
2131 if (dump_file)
2132 fprintf (dump_file,
2133 "Not inlining %s into %s to avoid cycle.\n",
2134 xstrdup_for_dump (callee->name ()),
2135 xstrdup_for_dump (e->caller->name ()));
2136 if (cgraph_inline_failed_type (e->inline_failed) != CIF_FINAL_ERROR)
2137 e->inline_failed = CIF_RECURSIVE_INLINING;
2138 continue;
2139 }
2140
2141 /* When the edge is already inlined, we just need to recurse into
2142 it in order to fully flatten the leaves. */
2143 if (!e->inline_failed)
2144 {
2145 flatten_function (callee, early);
2146 continue;
2147 }
2148
2149 /* Flatten attribute needs to be processed during late inlining. For
2150 extra code quality we however do flattening during early optimization,
2151 too. */
2152 if (!early
2153 ? !can_inline_edge_p (e, true)
2154 && !can_inline_edge_by_limits_p (e, true)
2155 : !can_early_inline_edge_p (e))
2156 continue;
2157
2158 if (e->recursive_p ())
2159 {
2160 if (dump_file)
2161 fprintf (dump_file, "Not inlining: recursive call.\n");
2162 continue;
2163 }
2164
2165 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2166 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2167 {
2168 if (dump_file)
2169 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2170 continue;
2171 }
2172
2173 /* Inline the edge and flatten the inline clone. Avoid
2174 recursing through the original node if the node was cloned. */
2175 if (dump_file)
2176 fprintf (dump_file, " Inlining %s into %s.\n",
2177 xstrdup_for_dump (callee->name ()),
2178 xstrdup_for_dump (e->caller->name ()));
2179 orig_callee = callee;
2180 inline_call (e, true, NULL, NULL, false);
2181 if (e->callee != orig_callee)
2182 orig_callee->aux = (void *) node;
2183 flatten_function (e->callee, early);
2184 if (e->callee != orig_callee)
2185 orig_callee->aux = NULL;
2186 }
2187
2188 node->aux = NULL;
2189 if (!node->global.inlined_to)
2190 ipa_update_overall_fn_summary (node);
2191 }
2192
2193 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2194 DATA points to number of calls originally found so we avoid infinite
2195 recursion. */
2196
2197 static bool
2198 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2199 hash_set<cgraph_node *> *callers)
2200 {
2201 int *num_calls = (int *)data;
2202 bool callee_removed = false;
2203
2204 while (node->callers && !node->global.inlined_to)
2205 {
2206 struct cgraph_node *caller = node->callers->caller;
2207
2208 if (!can_inline_edge_p (node->callers, true)
2209 || !can_inline_edge_by_limits_p (node->callers, true)
2210 || node->callers->recursive_p ())
2211 {
2212 if (dump_file)
2213 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2214 *num_calls = 0;
2215 return false;
2216 }
2217
2218 if (dump_file)
2219 {
2220 fprintf (dump_file,
2221 "\nInlining %s size %i.\n",
2222 node->name (),
2223 ipa_fn_summaries->get_create (node)->size);
2224 fprintf (dump_file,
2225 " Called once from %s %i insns.\n",
2226 node->callers->caller->name (),
2227 ipa_fn_summaries->get_create (node->callers->caller)->size);
2228 }
2229
2230 /* Remember which callers we inlined to, delaying updating the
2231 overall summary. */
2232 callers->add (node->callers->caller);
2233 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2234 if (dump_file)
2235 fprintf (dump_file,
2236 " Inlined into %s which now has %i size\n",
2237 caller->name (),
2238 ipa_fn_summaries->get_create (caller)->size);
2239 if (!(*num_calls)--)
2240 {
2241 if (dump_file)
2242 fprintf (dump_file, "New calls found; giving up.\n");
2243 return callee_removed;
2244 }
2245 if (callee_removed)
2246 return true;
2247 }
2248 return false;
2249 }
2250
2251 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2252 update. */
2253
2254 static bool
2255 inline_to_all_callers (struct cgraph_node *node, void *data)
2256 {
2257 hash_set<cgraph_node *> callers;
2258 bool res = inline_to_all_callers_1 (node, data, &callers);
2259 /* Perform the delayed update of the overall summary of all callers
2260 processed. This avoids quadratic behavior in the cases where
2261 we have a lot of calls to the same function. */
2262 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2263 i != callers.end (); ++i)
2264 ipa_update_overall_fn_summary (*i);
2265 return res;
2266 }
2267
2268 /* Output overall time estimate. */
2269 static void
2270 dump_overall_stats (void)
2271 {
2272 sreal sum_weighted = 0, sum = 0;
2273 struct cgraph_node *node;
2274
2275 FOR_EACH_DEFINED_FUNCTION (node)
2276 if (!node->global.inlined_to
2277 && !node->alias)
2278 {
2279 sreal time = ipa_fn_summaries->get_create (node)->time;
2280 sum += time;
2281 if (node->count.ipa ().initialized_p ())
2282 sum_weighted += time * node->count.ipa ().to_gcov_type ();
2283 }
2284 fprintf (dump_file, "Overall time estimate: "
2285 "%f weighted by profile: "
2286 "%f\n", sum.to_double (), sum_weighted.to_double ());
2287 }
2288
2289 /* Output some useful stats about inlining. */
2290
2291 static void
2292 dump_inline_stats (void)
2293 {
2294 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2295 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2296 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2297 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2298 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2299 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2300 int64_t reason[CIF_N_REASONS][2];
2301 sreal reason_freq[CIF_N_REASONS];
2302 int i;
2303 struct cgraph_node *node;
2304
2305 memset (reason, 0, sizeof (reason));
2306 for (i=0; i < CIF_N_REASONS; i++)
2307 reason_freq[i] = 0;
2308 FOR_EACH_DEFINED_FUNCTION (node)
2309 {
2310 struct cgraph_edge *e;
2311 for (e = node->callees; e; e = e->next_callee)
2312 {
2313 if (e->inline_failed)
2314 {
2315 if (e->count.ipa ().initialized_p ())
2316 reason[(int) e->inline_failed][0] += e->count.ipa ().to_gcov_type ();
2317 reason_freq[(int) e->inline_failed] += e->sreal_frequency ();
2318 reason[(int) e->inline_failed][1] ++;
2319 if (DECL_VIRTUAL_P (e->callee->decl)
2320 && e->count.ipa ().initialized_p ())
2321 {
2322 if (e->indirect_inlining_edge)
2323 noninlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
2324 else
2325 noninlined_virt_cnt += e->count.ipa ().to_gcov_type ();
2326 }
2327 else if (e->count.ipa ().initialized_p ())
2328 {
2329 if (e->indirect_inlining_edge)
2330 noninlined_indir_cnt += e->count.ipa ().to_gcov_type ();
2331 else
2332 noninlined_cnt += e->count.ipa ().to_gcov_type ();
2333 }
2334 }
2335 else if (e->count.ipa ().initialized_p ())
2336 {
2337 if (e->speculative)
2338 {
2339 if (DECL_VIRTUAL_P (e->callee->decl))
2340 inlined_speculative_ply += e->count.ipa ().to_gcov_type ();
2341 else
2342 inlined_speculative += e->count.ipa ().to_gcov_type ();
2343 }
2344 else if (DECL_VIRTUAL_P (e->callee->decl))
2345 {
2346 if (e->indirect_inlining_edge)
2347 inlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
2348 else
2349 inlined_virt_cnt += e->count.ipa ().to_gcov_type ();
2350 }
2351 else
2352 {
2353 if (e->indirect_inlining_edge)
2354 inlined_indir_cnt += e->count.ipa ().to_gcov_type ();
2355 else
2356 inlined_cnt += e->count.ipa ().to_gcov_type ();
2357 }
2358 }
2359 }
2360 for (e = node->indirect_calls; e; e = e->next_callee)
2361 if (e->indirect_info->polymorphic
2362 & e->count.ipa ().initialized_p ())
2363 indirect_poly_cnt += e->count.ipa ().to_gcov_type ();
2364 else if (e->count.ipa ().initialized_p ())
2365 indirect_cnt += e->count.ipa ().to_gcov_type ();
2366 }
2367 if (max_count.initialized_p ())
2368 {
2369 fprintf (dump_file,
2370 "Inlined %" PRId64 " + speculative "
2371 "%" PRId64 " + speculative polymorphic "
2372 "%" PRId64 " + previously indirect "
2373 "%" PRId64 " + virtual "
2374 "%" PRId64 " + virtual and previously indirect "
2375 "%" PRId64 "\n" "Not inlined "
2376 "%" PRId64 " + previously indirect "
2377 "%" PRId64 " + virtual "
2378 "%" PRId64 " + virtual and previously indirect "
2379 "%" PRId64 " + stil indirect "
2380 "%" PRId64 " + still indirect polymorphic "
2381 "%" PRId64 "\n", inlined_cnt,
2382 inlined_speculative, inlined_speculative_ply,
2383 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2384 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2385 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2386 fprintf (dump_file, "Removed speculations ");
2387 spec_rem.dump (dump_file);
2388 fprintf (dump_file, "\n");
2389 }
2390 dump_overall_stats ();
2391 fprintf (dump_file, "\nWhy inlining failed?\n");
2392 for (i = 0; i < CIF_N_REASONS; i++)
2393 if (reason[i][1])
2394 fprintf (dump_file, "%-50s: %8i calls, %8f freq, %" PRId64" count\n",
2395 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2396 (int) reason[i][1], reason_freq[i].to_double (), reason[i][0]);
2397 }
2398
2399 /* Called when node is removed. */
2400
2401 static void
2402 flatten_remove_node_hook (struct cgraph_node *node, void *data)
2403 {
2404 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) == NULL)
2405 return;
2406
2407 hash_set<struct cgraph_node *> *removed
2408 = (hash_set<struct cgraph_node *> *) data;
2409 removed->add (node);
2410 }
2411
2412 /* Decide on the inlining. We do so in the topological order to avoid
2413 expenses on updating data structures. */
2414
2415 static unsigned int
2416 ipa_inline (void)
2417 {
2418 struct cgraph_node *node;
2419 int nnodes;
2420 struct cgraph_node **order;
2421 int i, j;
2422 int cold;
2423 bool remove_functions = false;
2424
2425 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2426
2427 if (dump_file)
2428 ipa_dump_fn_summaries (dump_file);
2429
2430 nnodes = ipa_reverse_postorder (order);
2431 spec_rem = profile_count::zero ();
2432
2433 FOR_EACH_FUNCTION (node)
2434 {
2435 node->aux = 0;
2436
2437 /* Recompute the default reasons for inlining because they may have
2438 changed during merging. */
2439 if (in_lto_p)
2440 {
2441 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2442 {
2443 gcc_assert (e->inline_failed);
2444 initialize_inline_failed (e);
2445 }
2446 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2447 initialize_inline_failed (e);
2448 }
2449 }
2450
2451 if (dump_file)
2452 fprintf (dump_file, "\nFlattening functions:\n");
2453
2454 /* First shrink order array, so that it only contains nodes with
2455 flatten attribute. */
2456 for (i = nnodes - 1, j = i; i >= 0; i--)
2457 {
2458 node = order[i];
2459 if (lookup_attribute ("flatten",
2460 DECL_ATTRIBUTES (node->decl)) != NULL)
2461 order[j--] = order[i];
2462 }
2463
2464 /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
2465 nodes with flatten attribute. If there is more than one such
2466 node, we need to register a node removal hook, as flatten_function
2467 could remove other nodes with flatten attribute. See PR82801. */
2468 struct cgraph_node_hook_list *node_removal_hook_holder = NULL;
2469 hash_set<struct cgraph_node *> *flatten_removed_nodes = NULL;
2470 if (j < nnodes - 2)
2471 {
2472 flatten_removed_nodes = new hash_set<struct cgraph_node *>;
2473 node_removal_hook_holder
2474 = symtab->add_cgraph_removal_hook (&flatten_remove_node_hook,
2475 flatten_removed_nodes);
2476 }
2477
2478 /* In the first pass handle functions to be flattened. Do this with
2479 a priority so none of our later choices will make this impossible. */
2480 for (i = nnodes - 1; i > j; i--)
2481 {
2482 node = order[i];
2483 if (flatten_removed_nodes
2484 && flatten_removed_nodes->contains (node))
2485 continue;
2486
2487 /* Handle nodes to be flattened.
2488 Ideally when processing callees we stop inlining at the
2489 entry of cycles, possibly cloning that entry point and
2490 try to flatten itself turning it into a self-recursive
2491 function. */
2492 if (dump_file)
2493 fprintf (dump_file, "Flattening %s\n", node->name ());
2494 flatten_function (node, false);
2495 }
2496
2497 if (j < nnodes - 2)
2498 {
2499 symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
2500 delete flatten_removed_nodes;
2501 }
2502 free (order);
2503
2504 if (dump_file)
2505 dump_overall_stats ();
2506
2507 inline_small_functions ();
2508
2509 gcc_assert (symtab->state == IPA_SSA);
2510 symtab->state = IPA_SSA_AFTER_INLINING;
2511 /* Do first after-inlining removal. We want to remove all "stale" extern
2512 inline functions and virtual functions so we really know what is called
2513 once. */
2514 symtab->remove_unreachable_nodes (dump_file);
2515
2516 /* Inline functions with a property that after inlining into all callers the
2517 code size will shrink because the out-of-line copy is eliminated.
2518 We do this regardless on the callee size as long as function growth limits
2519 are met. */
2520 if (dump_file)
2521 fprintf (dump_file,
2522 "\nDeciding on functions to be inlined into all callers and "
2523 "removing useless speculations:\n");
2524
2525 /* Inlining one function called once has good chance of preventing
2526 inlining other function into the same callee. Ideally we should
2527 work in priority order, but probably inlining hot functions first
2528 is good cut without the extra pain of maintaining the queue.
2529
2530 ??? this is not really fitting the bill perfectly: inlining function
2531 into callee often leads to better optimization of callee due to
2532 increased context for optimization.
2533 For example if main() function calls a function that outputs help
2534 and then function that does the main optmization, we should inline
2535 the second with priority even if both calls are cold by themselves.
2536
2537 We probably want to implement new predicate replacing our use of
2538 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2539 to be hot. */
2540 for (cold = 0; cold <= 1; cold ++)
2541 {
2542 FOR_EACH_DEFINED_FUNCTION (node)
2543 {
2544 struct cgraph_edge *edge, *next;
2545 bool update=false;
2546
2547 if (!opt_for_fn (node->decl, optimize)
2548 || !opt_for_fn (node->decl, flag_inline_functions_called_once))
2549 continue;
2550
2551 for (edge = node->callees; edge; edge = next)
2552 {
2553 next = edge->next_callee;
2554 if (edge->speculative && !speculation_useful_p (edge, false))
2555 {
2556 if (edge->count.ipa ().initialized_p ())
2557 spec_rem += edge->count.ipa ();
2558 edge->resolve_speculation ();
2559 update = true;
2560 remove_functions = true;
2561 }
2562 }
2563 if (update)
2564 {
2565 struct cgraph_node *where = node->global.inlined_to
2566 ? node->global.inlined_to : node;
2567 reset_edge_caches (where);
2568 ipa_update_overall_fn_summary (where);
2569 }
2570 if (want_inline_function_to_all_callers_p (node, cold))
2571 {
2572 int num_calls = 0;
2573 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2574 true);
2575 while (node->call_for_symbol_and_aliases
2576 (inline_to_all_callers, &num_calls, true))
2577 ;
2578 remove_functions = true;
2579 }
2580 }
2581 }
2582
2583 /* Free ipa-prop structures if they are no longer needed. */
2584 ipa_free_all_structures_after_iinln ();
2585
2586 if (dump_file)
2587 {
2588 fprintf (dump_file,
2589 "\nInlined %i calls, eliminated %i functions\n\n",
2590 ncalls_inlined, nfunctions_inlined);
2591 dump_inline_stats ();
2592 }
2593
2594 if (dump_file)
2595 ipa_dump_fn_summaries (dump_file);
2596 return remove_functions ? TODO_remove_functions : 0;
2597 }
2598
2599 /* Inline always-inline function calls in NODE. */
2600
2601 static bool
2602 inline_always_inline_functions (struct cgraph_node *node)
2603 {
2604 struct cgraph_edge *e;
2605 bool inlined = false;
2606
2607 for (e = node->callees; e; e = e->next_callee)
2608 {
2609 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2610 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2611 continue;
2612
2613 if (e->recursive_p ())
2614 {
2615 if (dump_file)
2616 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2617 e->callee->name ());
2618 e->inline_failed = CIF_RECURSIVE_INLINING;
2619 continue;
2620 }
2621
2622 if (!can_early_inline_edge_p (e))
2623 {
2624 /* Set inlined to true if the callee is marked "always_inline" but
2625 is not inlinable. This will allow flagging an error later in
2626 expand_call_inline in tree-inline.c. */
2627 if (lookup_attribute ("always_inline",
2628 DECL_ATTRIBUTES (callee->decl)) != NULL)
2629 inlined = true;
2630 continue;
2631 }
2632
2633 if (dump_file)
2634 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2635 xstrdup_for_dump (e->callee->name ()),
2636 xstrdup_for_dump (e->caller->name ()));
2637 inline_call (e, true, NULL, NULL, false);
2638 inlined = true;
2639 }
2640 if (inlined)
2641 ipa_update_overall_fn_summary (node);
2642
2643 return inlined;
2644 }
2645
2646 /* Decide on the inlining. We do so in the topological order to avoid
2647 expenses on updating data structures. */
2648
2649 static bool
2650 early_inline_small_functions (struct cgraph_node *node)
2651 {
2652 struct cgraph_edge *e;
2653 bool inlined = false;
2654
2655 for (e = node->callees; e; e = e->next_callee)
2656 {
2657 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2658 if (!ipa_fn_summaries->get_create (callee)->inlinable
2659 || !e->inline_failed)
2660 continue;
2661
2662 /* Do not consider functions not declared inline. */
2663 if (!DECL_DECLARED_INLINE_P (callee->decl)
2664 && !opt_for_fn (node->decl, flag_inline_small_functions)
2665 && !opt_for_fn (node->decl, flag_inline_functions))
2666 continue;
2667
2668 if (dump_file)
2669 fprintf (dump_file, "Considering inline candidate %s.\n",
2670 callee->name ());
2671
2672 if (!can_early_inline_edge_p (e))
2673 continue;
2674
2675 if (e->recursive_p ())
2676 {
2677 if (dump_file)
2678 fprintf (dump_file, " Not inlining: recursive call.\n");
2679 continue;
2680 }
2681
2682 if (!want_early_inline_function_p (e))
2683 continue;
2684
2685 if (dump_file)
2686 fprintf (dump_file, " Inlining %s into %s.\n",
2687 xstrdup_for_dump (callee->name ()),
2688 xstrdup_for_dump (e->caller->name ()));
2689 inline_call (e, true, NULL, NULL, false);
2690 inlined = true;
2691 }
2692
2693 if (inlined)
2694 ipa_update_overall_fn_summary (node);
2695
2696 return inlined;
2697 }
2698
2699 unsigned int
2700 early_inliner (function *fun)
2701 {
2702 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2703 struct cgraph_edge *edge;
2704 unsigned int todo = 0;
2705 int iterations = 0;
2706 bool inlined = false;
2707
2708 if (seen_error ())
2709 return 0;
2710
2711 /* Do nothing if datastructures for ipa-inliner are already computed. This
2712 happens when some pass decides to construct new function and
2713 cgraph_add_new_function calls lowering passes and early optimization on
2714 it. This may confuse ourself when early inliner decide to inline call to
2715 function clone, because function clones don't have parameter list in
2716 ipa-prop matching their signature. */
2717 if (ipa_node_params_sum)
2718 return 0;
2719
2720 if (flag_checking)
2721 node->verify ();
2722 node->remove_all_references ();
2723
2724 /* Even when not optimizing or not inlining inline always-inline
2725 functions. */
2726 inlined = inline_always_inline_functions (node);
2727
2728 if (!optimize
2729 || flag_no_inline
2730 || !flag_early_inlining
2731 /* Never inline regular functions into always-inline functions
2732 during incremental inlining. This sucks as functions calling
2733 always inline functions will get less optimized, but at the
2734 same time inlining of functions calling always inline
2735 function into an always inline function might introduce
2736 cycles of edges to be always inlined in the callgraph.
2737
2738 We might want to be smarter and just avoid this type of inlining. */
2739 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2740 && lookup_attribute ("always_inline",
2741 DECL_ATTRIBUTES (node->decl))))
2742 ;
2743 else if (lookup_attribute ("flatten",
2744 DECL_ATTRIBUTES (node->decl)) != NULL)
2745 {
2746 /* When the function is marked to be flattened, recursively inline
2747 all calls in it. */
2748 if (dump_file)
2749 fprintf (dump_file,
2750 "Flattening %s\n", node->name ());
2751 flatten_function (node, true);
2752 inlined = true;
2753 }
2754 else
2755 {
2756 /* If some always_inline functions was inlined, apply the changes.
2757 This way we will not account always inline into growth limits and
2758 moreover we will inline calls from always inlines that we skipped
2759 previously because of conditional above. */
2760 if (inlined)
2761 {
2762 timevar_push (TV_INTEGRATION);
2763 todo |= optimize_inline_calls (current_function_decl);
2764 /* optimize_inline_calls call above might have introduced new
2765 statements that don't have inline parameters computed. */
2766 for (edge = node->callees; edge; edge = edge->next_callee)
2767 {
2768 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
2769 es->call_stmt_size
2770 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2771 es->call_stmt_time
2772 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2773 }
2774 ipa_update_overall_fn_summary (node);
2775 inlined = false;
2776 timevar_pop (TV_INTEGRATION);
2777 }
2778 /* We iterate incremental inlining to get trivial cases of indirect
2779 inlining. */
2780 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2781 && early_inline_small_functions (node))
2782 {
2783 timevar_push (TV_INTEGRATION);
2784 todo |= optimize_inline_calls (current_function_decl);
2785
2786 /* Technically we ought to recompute inline parameters so the new
2787 iteration of early inliner works as expected. We however have
2788 values approximately right and thus we only need to update edge
2789 info that might be cleared out for newly discovered edges. */
2790 for (edge = node->callees; edge; edge = edge->next_callee)
2791 {
2792 /* We have no summary for new bound store calls yet. */
2793 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
2794 es->call_stmt_size
2795 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2796 es->call_stmt_time
2797 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2798
2799 if (edge->callee->decl
2800 && !gimple_check_call_matching_types (
2801 edge->call_stmt, edge->callee->decl, false))
2802 {
2803 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2804 edge->call_stmt_cannot_inline_p = true;
2805 }
2806 }
2807 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2808 ipa_update_overall_fn_summary (node);
2809 timevar_pop (TV_INTEGRATION);
2810 iterations++;
2811 inlined = false;
2812 }
2813 if (dump_file)
2814 fprintf (dump_file, "Iterations: %i\n", iterations);
2815 }
2816
2817 if (inlined)
2818 {
2819 timevar_push (TV_INTEGRATION);
2820 todo |= optimize_inline_calls (current_function_decl);
2821 timevar_pop (TV_INTEGRATION);
2822 }
2823
2824 fun->always_inline_functions_inlined = true;
2825
2826 return todo;
2827 }
2828
2829 /* Do inlining of small functions. Doing so early helps profiling and other
2830 passes to be somewhat more effective and avoids some code duplication in
2831 later real inlining pass for testcases with very many function calls. */
2832
2833 namespace {
2834
2835 const pass_data pass_data_early_inline =
2836 {
2837 GIMPLE_PASS, /* type */
2838 "einline", /* name */
2839 OPTGROUP_INLINE, /* optinfo_flags */
2840 TV_EARLY_INLINING, /* tv_id */
2841 PROP_ssa, /* properties_required */
2842 0, /* properties_provided */
2843 0, /* properties_destroyed */
2844 0, /* todo_flags_start */
2845 0, /* todo_flags_finish */
2846 };
2847
2848 class pass_early_inline : public gimple_opt_pass
2849 {
2850 public:
2851 pass_early_inline (gcc::context *ctxt)
2852 : gimple_opt_pass (pass_data_early_inline, ctxt)
2853 {}
2854
2855 /* opt_pass methods: */
2856 virtual unsigned int execute (function *);
2857
2858 }; // class pass_early_inline
2859
2860 unsigned int
2861 pass_early_inline::execute (function *fun)
2862 {
2863 return early_inliner (fun);
2864 }
2865
2866 } // anon namespace
2867
2868 gimple_opt_pass *
2869 make_pass_early_inline (gcc::context *ctxt)
2870 {
2871 return new pass_early_inline (ctxt);
2872 }
2873
2874 namespace {
2875
2876 const pass_data pass_data_ipa_inline =
2877 {
2878 IPA_PASS, /* type */
2879 "inline", /* name */
2880 OPTGROUP_INLINE, /* optinfo_flags */
2881 TV_IPA_INLINING, /* tv_id */
2882 0, /* properties_required */
2883 0, /* properties_provided */
2884 0, /* properties_destroyed */
2885 0, /* todo_flags_start */
2886 ( TODO_dump_symtab ), /* todo_flags_finish */
2887 };
2888
2889 class pass_ipa_inline : public ipa_opt_pass_d
2890 {
2891 public:
2892 pass_ipa_inline (gcc::context *ctxt)
2893 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2894 NULL, /* generate_summary */
2895 NULL, /* write_summary */
2896 NULL, /* read_summary */
2897 NULL, /* write_optimization_summary */
2898 NULL, /* read_optimization_summary */
2899 NULL, /* stmt_fixup */
2900 0, /* function_transform_todo_flags_start */
2901 inline_transform, /* function_transform */
2902 NULL) /* variable_transform */
2903 {}
2904
2905 /* opt_pass methods: */
2906 virtual unsigned int execute (function *) { return ipa_inline (); }
2907
2908 }; // class pass_ipa_inline
2909
2910 } // anon namespace
2911
2912 ipa_opt_pass_d *
2913 make_pass_ipa_inline (gcc::context *ctxt)
2914 {
2915 return new pass_ipa_inline (ctxt);
2916 }