]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline.c
* cgraph.c (cgraph_edge::maybe_hot_p): Do not check
[thirdparty/gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 The implementation of inliner is organized as follows:
24
25 inlining heuristics limits
26
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
30
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
35
36 inlining heuristics
37
38 The inliner itself is split into two passes:
39
40 pass_early_inlining
41
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
45
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
52
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
56
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
61
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
67
68 pass_ipa_inline
69
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
72
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
76
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
81
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
87
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
91
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-fnsummary.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
116 #include "sreal.h"
117 #include "auto-profile.h"
118 #include "builtins.h"
119 #include "fibonacci_heap.h"
120
121 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
122 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
123
124 /* Statistics we collect about inlining algorithm. */
125 static int overall_size;
126 static profile_count max_count;
127 static profile_count spec_rem;
128
129 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
130 static sreal cgraph_freq_base_rec, percent_rec;
131
132 /* Return false when inlining edge E would lead to violating
133 limits on function unit growth or stack usage growth.
134
135 The relative function body growth limit is present generally
136 to avoid problems with non-linear behavior of the compiler.
137 To allow inlining huge functions into tiny wrapper, the limit
138 is always based on the bigger of the two functions considered.
139
140 For stack growth limits we always base the growth in stack usage
141 of the callers. We want to prevent applications from segfaulting
142 on stack overflow when functions with huge stack frames gets
143 inlined. */
144
145 static bool
146 caller_growth_limits (struct cgraph_edge *e)
147 {
148 struct cgraph_node *to = e->caller;
149 struct cgraph_node *what = e->callee->ultimate_alias_target ();
150 int newsize;
151 int limit = 0;
152 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
153 ipa_fn_summary *info, *what_info, *outer_info = ipa_fn_summaries->get (to);
154
155 /* Look for function e->caller is inlined to. While doing
156 so work out the largest function body on the way. As
157 described above, we want to base our function growth
158 limits based on that. Not on the self size of the
159 outer function, not on the self size of inline code
160 we immediately inline to. This is the most relaxed
161 interpretation of the rule "do not grow large functions
162 too much in order to prevent compiler from exploding". */
163 while (true)
164 {
165 info = ipa_fn_summaries->get (to);
166 if (limit < info->self_size)
167 limit = info->self_size;
168 if (stack_size_limit < info->estimated_self_stack_size)
169 stack_size_limit = info->estimated_self_stack_size;
170 if (to->global.inlined_to)
171 to = to->callers->caller;
172 else
173 break;
174 }
175
176 what_info = ipa_fn_summaries->get (what);
177
178 if (limit < what_info->self_size)
179 limit = what_info->self_size;
180
181 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
182
183 /* Check the size after inlining against the function limits. But allow
184 the function to shrink if it went over the limits by forced inlining. */
185 newsize = estimate_size_after_inlining (to, e);
186 if (newsize >= info->size
187 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
188 && newsize > limit)
189 {
190 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
191 return false;
192 }
193
194 if (!what_info->estimated_stack_size)
195 return true;
196
197 /* FIXME: Stack size limit often prevents inlining in Fortran programs
198 due to large i/o datastructures used by the Fortran front-end.
199 We ought to ignore this limit when we know that the edge is executed
200 on every invocation of the caller (i.e. its call statement dominates
201 exit block). We do not track this information, yet. */
202 stack_size_limit += ((gcov_type)stack_size_limit
203 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
204
205 inlined_stack = (outer_info->stack_frame_offset
206 + outer_info->estimated_self_stack_size
207 + what_info->estimated_stack_size);
208 /* Check new stack consumption with stack consumption at the place
209 stack is used. */
210 if (inlined_stack > stack_size_limit
211 /* If function already has large stack usage from sibling
212 inline call, we can inline, too.
213 This bit overoptimistically assume that we are good at stack
214 packing. */
215 && inlined_stack > info->estimated_stack_size
216 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
217 {
218 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
219 return false;
220 }
221 return true;
222 }
223
224 /* Dump info about why inlining has failed. */
225
226 static void
227 report_inline_failed_reason (struct cgraph_edge *e)
228 {
229 if (dump_file)
230 {
231 fprintf (dump_file, " not inlinable: %s -> %s, %s\n",
232 e->caller->dump_name (),
233 e->callee->dump_name (),
234 cgraph_inline_failed_string (e->inline_failed));
235 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
236 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
237 && e->caller->lto_file_data
238 && e->callee->ultimate_alias_target ()->lto_file_data)
239 {
240 fprintf (dump_file, " LTO objects: %s, %s\n",
241 e->caller->lto_file_data->file_name,
242 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
243 }
244 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
245 cl_target_option_print_diff
246 (dump_file, 2, target_opts_for_fn (e->caller->decl),
247 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
248 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
249 cl_optimization_print_diff
250 (dump_file, 2, opts_for_fn (e->caller->decl),
251 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
252 }
253 }
254
255 /* Decide whether sanitizer-related attributes allow inlining. */
256
257 static bool
258 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
259 {
260 /* Don't care if sanitizer is disabled */
261 if (!(flag_sanitize & SANITIZE_ADDRESS))
262 return true;
263
264 if (!caller || !callee)
265 return true;
266
267 return !!lookup_attribute ("no_sanitize_address",
268 DECL_ATTRIBUTES (caller)) ==
269 !!lookup_attribute ("no_sanitize_address",
270 DECL_ATTRIBUTES (callee));
271 }
272
273 /* Used for flags where it is safe to inline when caller's value is
274 grater than callee's. */
275 #define check_maybe_up(flag) \
276 (opts_for_fn (caller->decl)->x_##flag \
277 != opts_for_fn (callee->decl)->x_##flag \
278 && (!always_inline \
279 || opts_for_fn (caller->decl)->x_##flag \
280 < opts_for_fn (callee->decl)->x_##flag))
281 /* Used for flags where it is safe to inline when caller's value is
282 smaller than callee's. */
283 #define check_maybe_down(flag) \
284 (opts_for_fn (caller->decl)->x_##flag \
285 != opts_for_fn (callee->decl)->x_##flag \
286 && (!always_inline \
287 || opts_for_fn (caller->decl)->x_##flag \
288 > opts_for_fn (callee->decl)->x_##flag))
289 /* Used for flags where exact match is needed for correctness. */
290 #define check_match(flag) \
291 (opts_for_fn (caller->decl)->x_##flag \
292 != opts_for_fn (callee->decl)->x_##flag)
293
294 /* Decide if we can inline the edge and possibly update
295 inline_failed reason.
296 We check whether inlining is possible at all and whether
297 caller growth limits allow doing so.
298
299 if REPORT is true, output reason to the dump file.
300
301 if DISREGARD_LIMITS is true, ignore size limits.*/
302
303 static bool
304 can_inline_edge_p (struct cgraph_edge *e, bool report,
305 bool disregard_limits = false, bool early = false)
306 {
307 gcc_checking_assert (e->inline_failed);
308
309 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
310 {
311 if (report)
312 report_inline_failed_reason (e);
313 return false;
314 }
315
316 bool inlinable = true;
317 enum availability avail;
318 cgraph_node *caller = e->caller->global.inlined_to
319 ? e->caller->global.inlined_to : e->caller;
320 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
321 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
322 tree callee_tree
323 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
324
325 if (!callee->definition)
326 {
327 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
328 inlinable = false;
329 }
330 else if (callee->calls_comdat_local)
331 {
332 e->inline_failed = CIF_USES_COMDAT_LOCAL;
333 inlinable = false;
334 }
335 else if (avail <= AVAIL_INTERPOSABLE)
336 {
337 e->inline_failed = CIF_OVERWRITABLE;
338 inlinable = false;
339 }
340 /* All edges with call_stmt_cannot_inline_p should have inline_failed
341 initialized to one of FINAL_ERROR reasons. */
342 else if (e->call_stmt_cannot_inline_p)
343 gcc_unreachable ();
344 /* Don't inline if the functions have different EH personalities. */
345 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
346 && DECL_FUNCTION_PERSONALITY (callee->decl)
347 && (DECL_FUNCTION_PERSONALITY (caller->decl)
348 != DECL_FUNCTION_PERSONALITY (callee->decl)))
349 {
350 e->inline_failed = CIF_EH_PERSONALITY;
351 inlinable = false;
352 }
353 /* TM pure functions should not be inlined into non-TM_pure
354 functions. */
355 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
356 {
357 e->inline_failed = CIF_UNSPECIFIED;
358 inlinable = false;
359 }
360 /* Check compatibility of target optimization options. */
361 else if (!targetm.target_option.can_inline_p (caller->decl,
362 callee->decl))
363 {
364 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
365 inlinable = false;
366 }
367 else if (!ipa_fn_summaries->get (callee)->inlinable)
368 {
369 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
370 inlinable = false;
371 }
372 /* Don't inline a function with mismatched sanitization attributes. */
373 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
374 {
375 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
376 inlinable = false;
377 }
378 /* Check if caller growth allows the inlining. */
379 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
380 && !disregard_limits
381 && !lookup_attribute ("flatten",
382 DECL_ATTRIBUTES (caller->decl))
383 && !caller_growth_limits (e))
384 inlinable = false;
385 /* Don't inline a function with a higher optimization level than the
386 caller. FIXME: this is really just tip of iceberg of handling
387 optimization attribute. */
388 else if (caller_tree != callee_tree)
389 {
390 bool always_inline =
391 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
392 && lookup_attribute ("always_inline",
393 DECL_ATTRIBUTES (callee->decl)));
394 ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
395 ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
396
397 /* Until GCC 4.9 we did not check the semantics alterning flags
398 bellow and inline across optimization boundry.
399 Enabling checks bellow breaks several packages by refusing
400 to inline library always_inline functions. See PR65873.
401 Disable the check for early inlining for now until better solution
402 is found. */
403 if (always_inline && early)
404 ;
405 /* There are some options that change IL semantics which means
406 we cannot inline in these cases for correctness reason.
407 Not even for always_inline declared functions. */
408 else if (check_match (flag_wrapv)
409 || check_match (flag_trapv)
410 /* When caller or callee does FP math, be sure FP codegen flags
411 compatible. */
412 || ((caller_info->fp_expressions && callee_info->fp_expressions)
413 && (check_maybe_up (flag_rounding_math)
414 || check_maybe_up (flag_trapping_math)
415 || check_maybe_down (flag_unsafe_math_optimizations)
416 || check_maybe_down (flag_finite_math_only)
417 || check_maybe_up (flag_signaling_nans)
418 || check_maybe_down (flag_cx_limited_range)
419 || check_maybe_up (flag_signed_zeros)
420 || check_maybe_down (flag_associative_math)
421 || check_maybe_down (flag_reciprocal_math)
422 || check_maybe_down (flag_fp_int_builtin_inexact)
423 /* Strictly speaking only when the callee contains function
424 calls that may end up setting errno. */
425 || check_maybe_up (flag_errno_math)))
426 /* We do not want to make code compiled with exceptions to be
427 brought into a non-EH function unless we know that the callee
428 does not throw.
429 This is tracked by DECL_FUNCTION_PERSONALITY. */
430 || (check_maybe_up (flag_non_call_exceptions)
431 && DECL_FUNCTION_PERSONALITY (callee->decl))
432 || (check_maybe_up (flag_exceptions)
433 && DECL_FUNCTION_PERSONALITY (callee->decl))
434 /* When devirtualization is diabled for callee, it is not safe
435 to inline it as we possibly mangled the type info.
436 Allow early inlining of always inlines. */
437 || (!early && check_maybe_down (flag_devirtualize)))
438 {
439 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
440 inlinable = false;
441 }
442 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
443 else if (always_inline)
444 ;
445 /* When user added an attribute to the callee honor it. */
446 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
447 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
448 {
449 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
450 inlinable = false;
451 }
452 /* If explicit optimize attribute are not used, the mismatch is caused
453 by different command line options used to build different units.
454 Do not care about COMDAT functions - those are intended to be
455 optimized with the optimization flags of module they are used in.
456 Also do not care about mixing up size/speed optimization when
457 DECL_DISREGARD_INLINE_LIMITS is set. */
458 else if ((callee->merged_comdat
459 && !lookup_attribute ("optimize",
460 DECL_ATTRIBUTES (caller->decl)))
461 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
462 ;
463 /* If mismatch is caused by merging two LTO units with different
464 optimizationflags we want to be bit nicer. However never inline
465 if one of functions is not optimized at all. */
466 else if (!opt_for_fn (callee->decl, optimize)
467 || !opt_for_fn (caller->decl, optimize))
468 {
469 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
470 inlinable = false;
471 }
472 /* If callee is optimized for size and caller is not, allow inlining if
473 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
474 is inline (and thus likely an unified comdat). This will allow caller
475 to run faster. */
476 else if (opt_for_fn (callee->decl, optimize_size)
477 > opt_for_fn (caller->decl, optimize_size))
478 {
479 int growth = estimate_edge_growth (e);
480 if (growth > 0
481 && (!DECL_DECLARED_INLINE_P (callee->decl)
482 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
483 MAX_INLINE_INSNS_AUTO)))
484 {
485 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
486 inlinable = false;
487 }
488 }
489 /* If callee is more aggressively optimized for performance than caller,
490 we generally want to inline only cheap (runtime wise) functions. */
491 else if (opt_for_fn (callee->decl, optimize_size)
492 < opt_for_fn (caller->decl, optimize_size)
493 || (opt_for_fn (callee->decl, optimize)
494 > opt_for_fn (caller->decl, optimize)))
495 {
496 if (estimate_edge_time (e)
497 >= 20 + ipa_call_summaries->get (e)->call_stmt_time)
498 {
499 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
500 inlinable = false;
501 }
502 }
503
504 }
505
506 if (!inlinable && report)
507 report_inline_failed_reason (e);
508 return inlinable;
509 }
510
511
512 /* Return true if the edge E is inlinable during early inlining. */
513
514 static bool
515 can_early_inline_edge_p (struct cgraph_edge *e)
516 {
517 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
518 /* Early inliner might get called at WPA stage when IPA pass adds new
519 function. In this case we can not really do any of early inlining
520 because function bodies are missing. */
521 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
522 return false;
523 if (!gimple_has_body_p (callee->decl))
524 {
525 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
526 return false;
527 }
528 /* In early inliner some of callees may not be in SSA form yet
529 (i.e. the callgraph is cyclic and we did not process
530 the callee by early inliner, yet). We don't have CIF code for this
531 case; later we will re-do the decision in the real inliner. */
532 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
533 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
534 {
535 if (dump_file)
536 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
537 return false;
538 }
539 if (!can_inline_edge_p (e, true, false, true))
540 return false;
541 return true;
542 }
543
544
545 /* Return number of calls in N. Ignore cheap builtins. */
546
547 static int
548 num_calls (struct cgraph_node *n)
549 {
550 struct cgraph_edge *e;
551 int num = 0;
552
553 for (e = n->callees; e; e = e->next_callee)
554 if (!is_inexpensive_builtin (e->callee->decl))
555 num++;
556 return num;
557 }
558
559
560 /* Return true if we are interested in inlining small function. */
561
562 static bool
563 want_early_inline_function_p (struct cgraph_edge *e)
564 {
565 bool want_inline = true;
566 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
567
568 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
569 ;
570 /* For AutoFDO, we need to make sure that before profile summary, all
571 hot paths' IR look exactly the same as profiled binary. As a result,
572 in einliner, we will disregard size limit and inline those callsites
573 that are:
574 * inlined in the profiled binary, and
575 * the cloned callee has enough samples to be considered "hot". */
576 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
577 ;
578 else if (!DECL_DECLARED_INLINE_P (callee->decl)
579 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
580 {
581 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
582 report_inline_failed_reason (e);
583 want_inline = false;
584 }
585 else
586 {
587 int growth = estimate_edge_growth (e);
588 int n;
589
590 if (growth <= 0)
591 ;
592 else if (!e->maybe_hot_p ()
593 && growth > 0)
594 {
595 if (dump_file)
596 fprintf (dump_file, " will not early inline: %s->%s, "
597 "call is cold and code would grow by %i\n",
598 e->caller->dump_name (),
599 callee->dump_name (),
600 growth);
601 want_inline = false;
602 }
603 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
604 {
605 if (dump_file)
606 fprintf (dump_file, " will not early inline: %s->%s, "
607 "growth %i exceeds --param early-inlining-insns\n",
608 e->caller->dump_name (),
609 callee->dump_name (),
610 growth);
611 want_inline = false;
612 }
613 else if ((n = num_calls (callee)) != 0
614 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
615 {
616 if (dump_file)
617 fprintf (dump_file, " will not early inline: %s->%s, "
618 "growth %i exceeds --param early-inlining-insns "
619 "divided by number of calls\n",
620 e->caller->dump_name (),
621 callee->dump_name (),
622 growth);
623 want_inline = false;
624 }
625 }
626 return want_inline;
627 }
628
629 /* Compute time of the edge->caller + edge->callee execution when inlining
630 does not happen. */
631
632 inline sreal
633 compute_uninlined_call_time (struct cgraph_edge *edge,
634 sreal uninlined_call_time)
635 {
636 cgraph_node *caller = (edge->caller->global.inlined_to
637 ? edge->caller->global.inlined_to
638 : edge->caller);
639
640 if (edge->count > profile_count::zero ()
641 && caller->count > profile_count::zero ())
642 uninlined_call_time *= (sreal)edge->count.to_gcov_type ()
643 / caller->count.to_gcov_type ();
644 if (edge->frequency)
645 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
646 else
647 uninlined_call_time = uninlined_call_time >> 11;
648
649 sreal caller_time = ipa_fn_summaries->get (caller)->time;
650 return uninlined_call_time + caller_time;
651 }
652
653 /* Same as compute_uinlined_call_time but compute time when inlining
654 does happen. */
655
656 inline sreal
657 compute_inlined_call_time (struct cgraph_edge *edge,
658 sreal time)
659 {
660 cgraph_node *caller = (edge->caller->global.inlined_to
661 ? edge->caller->global.inlined_to
662 : edge->caller);
663 sreal caller_time = ipa_fn_summaries->get (caller)->time;
664
665 if (edge->count > profile_count::zero ()
666 && caller->count > profile_count::zero ())
667 time *= (sreal)edge->count.to_gcov_type () / caller->count.to_gcov_type ();
668 if (edge->frequency)
669 time *= cgraph_freq_base_rec * edge->frequency;
670 else
671 time = time >> 11;
672
673 /* This calculation should match one in ipa-inline-analysis.c
674 (estimate_edge_size_and_time). */
675 time -= (sreal) edge->frequency
676 * ipa_call_summaries->get (edge)->call_stmt_time / CGRAPH_FREQ_BASE;
677 time += caller_time;
678 if (time <= 0)
679 time = ((sreal) 1) >> 8;
680 gcc_checking_assert (time >= 0);
681 return time;
682 }
683
684 /* Return true if the speedup for inlining E is bigger than
685 PARAM_MAX_INLINE_MIN_SPEEDUP. */
686
687 static bool
688 big_speedup_p (struct cgraph_edge *e)
689 {
690 sreal unspec_time;
691 sreal spec_time = estimate_edge_time (e, &unspec_time);
692 sreal time = compute_uninlined_call_time (e, unspec_time);
693 sreal inlined_time = compute_inlined_call_time (e, spec_time);
694
695 if (time - inlined_time
696 > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP))
697 * percent_rec)
698 return true;
699 return false;
700 }
701
702 /* Return true if we are interested in inlining small function.
703 When REPORT is true, report reason to dump file. */
704
705 static bool
706 want_inline_small_function_p (struct cgraph_edge *e, bool report)
707 {
708 bool want_inline = true;
709 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
710
711 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
712 ;
713 else if (!DECL_DECLARED_INLINE_P (callee->decl)
714 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
715 {
716 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
717 want_inline = false;
718 }
719 /* Do fast and conservative check if the function can be good
720 inline candidate. At the moment we allow inline hints to
721 promote non-inline functions to inline and we increase
722 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
723 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
724 && (!e->count.initialized_p () || !e->maybe_hot_p ()))
725 && ipa_fn_summaries->get (callee)->min_size
726 - ipa_call_summaries->get (e)->call_stmt_size
727 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
728 {
729 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
730 want_inline = false;
731 }
732 else if ((DECL_DECLARED_INLINE_P (callee->decl)
733 || e->count > profile_count::zero ())
734 && ipa_fn_summaries->get (callee)->min_size
735 - ipa_call_summaries->get (e)->call_stmt_size
736 > 16 * MAX_INLINE_INSNS_SINGLE)
737 {
738 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
739 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
740 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
741 want_inline = false;
742 }
743 else
744 {
745 int growth = estimate_edge_growth (e);
746 ipa_hints hints = estimate_edge_hints (e);
747 bool big_speedup = big_speedup_p (e);
748
749 if (growth <= 0)
750 ;
751 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
752 hints suggests that inlining given function is very profitable. */
753 else if (DECL_DECLARED_INLINE_P (callee->decl)
754 && growth >= MAX_INLINE_INSNS_SINGLE
755 && ((!big_speedup
756 && !(hints & (INLINE_HINT_indirect_call
757 | INLINE_HINT_known_hot
758 | INLINE_HINT_loop_iterations
759 | INLINE_HINT_array_index
760 | INLINE_HINT_loop_stride)))
761 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
762 {
763 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
764 want_inline = false;
765 }
766 else if (!DECL_DECLARED_INLINE_P (callee->decl)
767 && !opt_for_fn (e->caller->decl, flag_inline_functions))
768 {
769 /* growth_likely_positive is expensive, always test it last. */
770 if (growth >= MAX_INLINE_INSNS_SINGLE
771 || growth_likely_positive (callee, growth))
772 {
773 e->inline_failed = CIF_NOT_DECLARED_INLINED;
774 want_inline = false;
775 }
776 }
777 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
778 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
779 inlining given function is very profitable. */
780 else if (!DECL_DECLARED_INLINE_P (callee->decl)
781 && !big_speedup
782 && !(hints & INLINE_HINT_known_hot)
783 && growth >= ((hints & (INLINE_HINT_indirect_call
784 | INLINE_HINT_loop_iterations
785 | INLINE_HINT_array_index
786 | INLINE_HINT_loop_stride))
787 ? MAX (MAX_INLINE_INSNS_AUTO,
788 MAX_INLINE_INSNS_SINGLE)
789 : MAX_INLINE_INSNS_AUTO))
790 {
791 /* growth_likely_positive is expensive, always test it last. */
792 if (growth >= MAX_INLINE_INSNS_SINGLE
793 || growth_likely_positive (callee, growth))
794 {
795 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
796 want_inline = false;
797 }
798 }
799 /* If call is cold, do not inline when function body would grow. */
800 else if (!e->maybe_hot_p ()
801 && (growth >= MAX_INLINE_INSNS_SINGLE
802 || growth_likely_positive (callee, growth)))
803 {
804 e->inline_failed = CIF_UNLIKELY_CALL;
805 want_inline = false;
806 }
807 }
808 if (!want_inline && report)
809 report_inline_failed_reason (e);
810 return want_inline;
811 }
812
813 /* EDGE is self recursive edge.
814 We hand two cases - when function A is inlining into itself
815 or when function A is being inlined into another inliner copy of function
816 A within function B.
817
818 In first case OUTER_NODE points to the toplevel copy of A, while
819 in the second case OUTER_NODE points to the outermost copy of A in B.
820
821 In both cases we want to be extra selective since
822 inlining the call will just introduce new recursive calls to appear. */
823
824 static bool
825 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
826 struct cgraph_node *outer_node,
827 bool peeling,
828 int depth)
829 {
830 char const *reason = NULL;
831 bool want_inline = true;
832 int caller_freq = CGRAPH_FREQ_BASE;
833 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
834
835 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
836 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
837
838 if (!edge->maybe_hot_p ())
839 {
840 reason = "recursive call is cold";
841 want_inline = false;
842 }
843 else if (outer_node->count == profile_count::zero ())
844 {
845 reason = "not executed in profile";
846 want_inline = false;
847 }
848 else if (depth > max_depth)
849 {
850 reason = "--param max-inline-recursive-depth exceeded.";
851 want_inline = false;
852 }
853
854 if (outer_node->global.inlined_to)
855 caller_freq = outer_node->callers->frequency;
856
857 if (!caller_freq)
858 {
859 reason = "function is inlined and unlikely";
860 want_inline = false;
861 }
862
863 if (!want_inline)
864 ;
865 /* Inlining of self recursive function into copy of itself within other function
866 is transformation similar to loop peeling.
867
868 Peeling is profitable if we can inline enough copies to make probability
869 of actual call to the self recursive function very small. Be sure that
870 the probability of recursion is small.
871
872 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
873 This way the expected number of recision is at most max_depth. */
874 else if (peeling)
875 {
876 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
877 / max_depth);
878 int i;
879 for (i = 1; i < depth; i++)
880 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
881 if (max_count > profile_count::zero () && edge->count > profile_count::zero ()
882 && (edge->count.to_gcov_type () * CGRAPH_FREQ_BASE
883 / outer_node->count.to_gcov_type ()
884 >= max_prob))
885 {
886 reason = "profile of recursive call is too large";
887 want_inline = false;
888 }
889 if (max_count == profile_count::zero ()
890 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
891 >= max_prob))
892 {
893 reason = "frequency of recursive call is too large";
894 want_inline = false;
895 }
896 }
897 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
898 depth is large. We reduce function call overhead and increase chances that
899 things fit in hardware return predictor.
900
901 Recursive inlining might however increase cost of stack frame setup
902 actually slowing down functions whose recursion tree is wide rather than
903 deep.
904
905 Deciding reliably on when to do recursive inlining without profile feedback
906 is tricky. For now we disable recursive inlining when probability of self
907 recursion is low.
908
909 Recursive inlining of self recursive call within loop also results in large loop
910 depths that generally optimize badly. We may want to throttle down inlining
911 in those cases. In particular this seems to happen in one of libstdc++ rb tree
912 methods. */
913 else
914 {
915 if (max_count > profile_count::zero () && edge->count.initialized_p ()
916 && (edge->count.to_gcov_type () * 100
917 / outer_node->count.to_gcov_type ()
918 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
919 {
920 reason = "profile of recursive call is too small";
921 want_inline = false;
922 }
923 else if ((max_count == profile_count::zero ()
924 || !edge->count.initialized_p ())
925 && (edge->frequency * 100 / caller_freq
926 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
927 {
928 reason = "frequency of recursive call is too small";
929 want_inline = false;
930 }
931 }
932 if (!want_inline && dump_file)
933 fprintf (dump_file, " not inlining recursively: %s\n", reason);
934 return want_inline;
935 }
936
937 /* Return true when NODE has uninlinable caller;
938 set HAS_HOT_CALL if it has hot call.
939 Worker for cgraph_for_node_and_aliases. */
940
941 static bool
942 check_callers (struct cgraph_node *node, void *has_hot_call)
943 {
944 struct cgraph_edge *e;
945 for (e = node->callers; e; e = e->next_caller)
946 {
947 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
948 return true;
949 if (!can_inline_edge_p (e, true))
950 return true;
951 if (e->recursive_p ())
952 return true;
953 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
954 *(bool *)has_hot_call = true;
955 }
956 return false;
957 }
958
959 /* If NODE has a caller, return true. */
960
961 static bool
962 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
963 {
964 if (node->callers)
965 return true;
966 return false;
967 }
968
969 /* Decide if inlining NODE would reduce unit size by eliminating
970 the offline copy of function.
971 When COLD is true the cold calls are considered, too. */
972
973 static bool
974 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
975 {
976 bool has_hot_call = false;
977
978 /* Aliases gets inlined along with the function they alias. */
979 if (node->alias)
980 return false;
981 /* Already inlined? */
982 if (node->global.inlined_to)
983 return false;
984 /* Does it have callers? */
985 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
986 return false;
987 /* Inlining into all callers would increase size? */
988 if (estimate_growth (node) > 0)
989 return false;
990 /* All inlines must be possible. */
991 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
992 true))
993 return false;
994 if (!cold && !has_hot_call)
995 return false;
996 return true;
997 }
998
999 /* A cost model driving the inlining heuristics in a way so the edges with
1000 smallest badness are inlined first. After each inlining is performed
1001 the costs of all caller edges of nodes affected are recomputed so the
1002 metrics may accurately depend on values such as number of inlinable callers
1003 of the function or function body size. */
1004
1005 static sreal
1006 edge_badness (struct cgraph_edge *edge, bool dump)
1007 {
1008 sreal badness;
1009 int growth;
1010 sreal edge_time, unspec_edge_time;
1011 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1012 struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
1013 ipa_hints hints;
1014 cgraph_node *caller = (edge->caller->global.inlined_to
1015 ? edge->caller->global.inlined_to
1016 : edge->caller);
1017
1018 growth = estimate_edge_growth (edge);
1019 edge_time = estimate_edge_time (edge, &unspec_edge_time);
1020 hints = estimate_edge_hints (edge);
1021 gcc_checking_assert (edge_time >= 0);
1022 /* Check that inlined time is better, but tolerate some roundoff issues. */
1023 gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0);
1024 gcc_checking_assert (growth <= callee_info->size);
1025
1026 if (dump)
1027 {
1028 fprintf (dump_file, " Badness calculation for %s -> %s\n",
1029 edge->caller->dump_name (),
1030 edge->callee->dump_name ());
1031 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1032 growth,
1033 edge_time.to_double (),
1034 unspec_edge_time.to_double ());
1035 ipa_dump_hints (dump_file, hints);
1036 if (big_speedup_p (edge))
1037 fprintf (dump_file, " big_speedup");
1038 fprintf (dump_file, "\n");
1039 }
1040
1041 /* Always prefer inlining saving code size. */
1042 if (growth <= 0)
1043 {
1044 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1045 if (dump)
1046 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1047 growth);
1048 }
1049 /* Inlining into EXTERNAL functions is not going to change anything unless
1050 they are themselves inlined. */
1051 else if (DECL_EXTERNAL (caller->decl))
1052 {
1053 if (dump)
1054 fprintf (dump_file, " max: function is external\n");
1055 return sreal::max ();
1056 }
1057 /* When profile is available. Compute badness as:
1058
1059 time_saved * caller_count
1060 goodness = -------------------------------------------------
1061 growth_of_caller * overall_growth * combined_size
1062
1063 badness = - goodness
1064
1065 Again use negative value to make calls with profile appear hotter
1066 then calls without.
1067 */
1068 else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
1069 || caller->count > profile_count::zero ())
1070 {
1071 sreal numerator, denominator;
1072 int overall_growth;
1073 sreal inlined_time = compute_inlined_call_time (edge, edge_time);
1074
1075 numerator = (compute_uninlined_call_time (edge, unspec_edge_time)
1076 - inlined_time);
1077 if (numerator == 0)
1078 numerator = ((sreal) 1 >> 8);
1079 if (caller->count > profile_count::zero ())
1080 numerator *= caller->count.to_gcov_type ();
1081 else if (caller->count.initialized_p ())
1082 numerator = numerator >> 11;
1083 denominator = growth;
1084
1085 overall_growth = callee_info->growth;
1086
1087 /* Look for inliner wrappers of the form:
1088
1089 inline_caller ()
1090 {
1091 do_fast_job...
1092 if (need_more_work)
1093 noninline_callee ();
1094 }
1095 Withhout panilizing this case, we usually inline noninline_callee
1096 into the inline_caller because overall_growth is small preventing
1097 further inlining of inline_caller.
1098
1099 Penalize only callgraph edges to functions with small overall
1100 growth ...
1101 */
1102 if (growth > overall_growth
1103 /* ... and having only one caller which is not inlined ... */
1104 && callee_info->single_caller
1105 && !edge->caller->global.inlined_to
1106 /* ... and edges executed only conditionally ... */
1107 && edge->frequency < CGRAPH_FREQ_BASE
1108 /* ... consider case where callee is not inline but caller is ... */
1109 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1110 && DECL_DECLARED_INLINE_P (caller->decl))
1111 /* ... or when early optimizers decided to split and edge
1112 frequency still indicates splitting is a win ... */
1113 || (callee->split_part && !caller->split_part
1114 && edge->frequency
1115 < CGRAPH_FREQ_BASE
1116 * PARAM_VALUE
1117 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
1118 /* ... and do not overwrite user specified hints. */
1119 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1120 || DECL_DECLARED_INLINE_P (caller->decl)))))
1121 {
1122 struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
1123 int caller_growth = caller_info->growth;
1124
1125 /* Only apply the penalty when caller looks like inline candidate,
1126 and it is not called once and. */
1127 if (!caller_info->single_caller && overall_growth < caller_growth
1128 && caller_info->inlinable
1129 && caller_info->size
1130 < (DECL_DECLARED_INLINE_P (caller->decl)
1131 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1132 {
1133 if (dump)
1134 fprintf (dump_file,
1135 " Wrapper penalty. Increasing growth %i to %i\n",
1136 overall_growth, caller_growth);
1137 overall_growth = caller_growth;
1138 }
1139 }
1140 if (overall_growth > 0)
1141 {
1142 /* Strongly preffer functions with few callers that can be inlined
1143 fully. The square root here leads to smaller binaries at average.
1144 Watch however for extreme cases and return to linear function
1145 when growth is large. */
1146 if (overall_growth < 256)
1147 overall_growth *= overall_growth;
1148 else
1149 overall_growth += 256 * 256 - 256;
1150 denominator *= overall_growth;
1151 }
1152 denominator *= inlined_time;
1153
1154 badness = - numerator / denominator;
1155
1156 if (dump)
1157 {
1158 fprintf (dump_file,
1159 " %f: guessed profile. frequency %f, count %" PRId64
1160 " caller count %" PRId64
1161 " time w/o inlining %f, time with inlining %f"
1162 " overall growth %i (current) %i (original)"
1163 " %i (compensated)\n",
1164 badness.to_double (),
1165 (double)edge->frequency / CGRAPH_FREQ_BASE,
1166 edge->count.initialized_p () ? edge->count.to_gcov_type () : -1,
1167 caller->count.initialized_p () ? caller->count.to_gcov_type () : -1,
1168 compute_uninlined_call_time (edge,
1169 unspec_edge_time).to_double (),
1170 compute_inlined_call_time (edge, edge_time).to_double (),
1171 estimate_growth (callee),
1172 callee_info->growth, overall_growth);
1173 }
1174 }
1175 /* When function local profile is not available or it does not give
1176 useful information (ie frequency is zero), base the cost on
1177 loop nest and overall size growth, so we optimize for overall number
1178 of functions fully inlined in program. */
1179 else
1180 {
1181 int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8);
1182 badness = growth;
1183
1184 /* Decrease badness if call is nested. */
1185 if (badness > 0)
1186 badness = badness >> nest;
1187 else
1188 badness = badness << nest;
1189 if (dump)
1190 fprintf (dump_file, " %f: no profile. nest %i\n",
1191 badness.to_double (), nest);
1192 }
1193 gcc_checking_assert (badness != 0);
1194
1195 if (edge->recursive_p ())
1196 badness = badness.shift (badness > 0 ? 4 : -4);
1197 if ((hints & (INLINE_HINT_indirect_call
1198 | INLINE_HINT_loop_iterations
1199 | INLINE_HINT_array_index
1200 | INLINE_HINT_loop_stride))
1201 || callee_info->growth <= 0)
1202 badness = badness.shift (badness > 0 ? -2 : 2);
1203 if (hints & (INLINE_HINT_same_scc))
1204 badness = badness.shift (badness > 0 ? 3 : -3);
1205 else if (hints & (INLINE_HINT_in_scc))
1206 badness = badness.shift (badness > 0 ? 2 : -2);
1207 else if (hints & (INLINE_HINT_cross_module))
1208 badness = badness.shift (badness > 0 ? 1 : -1);
1209 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1210 badness = badness.shift (badness > 0 ? -4 : 4);
1211 else if ((hints & INLINE_HINT_declared_inline))
1212 badness = badness.shift (badness > 0 ? -3 : 3);
1213 if (dump)
1214 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1215 return badness;
1216 }
1217
1218 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1219 static inline void
1220 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1221 {
1222 sreal badness = edge_badness (edge, false);
1223 if (edge->aux)
1224 {
1225 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1226 gcc_checking_assert (n->get_data () == edge);
1227
1228 /* fibonacci_heap::replace_key does busy updating of the
1229 heap that is unnecesarily expensive.
1230 We do lazy increases: after extracting minimum if the key
1231 turns out to be out of date, it is re-inserted into heap
1232 with correct value. */
1233 if (badness < n->get_key ())
1234 {
1235 if (dump_file && (dump_flags & TDF_DETAILS))
1236 {
1237 fprintf (dump_file,
1238 " decreasing badness %s -> %s, %f to %f\n",
1239 edge->caller->dump_name (),
1240 edge->callee->dump_name (),
1241 n->get_key ().to_double (),
1242 badness.to_double ());
1243 }
1244 heap->decrease_key (n, badness);
1245 }
1246 }
1247 else
1248 {
1249 if (dump_file && (dump_flags & TDF_DETAILS))
1250 {
1251 fprintf (dump_file,
1252 " enqueuing call %s -> %s, badness %f\n",
1253 edge->caller->dump_name (),
1254 edge->callee->dump_name (),
1255 badness.to_double ());
1256 }
1257 edge->aux = heap->insert (badness, edge);
1258 }
1259 }
1260
1261
1262 /* NODE was inlined.
1263 All caller edges needs to be resetted because
1264 size estimates change. Similarly callees needs reset
1265 because better context may be known. */
1266
1267 static void
1268 reset_edge_caches (struct cgraph_node *node)
1269 {
1270 struct cgraph_edge *edge;
1271 struct cgraph_edge *e = node->callees;
1272 struct cgraph_node *where = node;
1273 struct ipa_ref *ref;
1274
1275 if (where->global.inlined_to)
1276 where = where->global.inlined_to;
1277
1278 for (edge = where->callers; edge; edge = edge->next_caller)
1279 if (edge->inline_failed)
1280 reset_edge_growth_cache (edge);
1281
1282 FOR_EACH_ALIAS (where, ref)
1283 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1284
1285 if (!e)
1286 return;
1287
1288 while (true)
1289 if (!e->inline_failed && e->callee->callees)
1290 e = e->callee->callees;
1291 else
1292 {
1293 if (e->inline_failed)
1294 reset_edge_growth_cache (e);
1295 if (e->next_callee)
1296 e = e->next_callee;
1297 else
1298 {
1299 do
1300 {
1301 if (e->caller == node)
1302 return;
1303 e = e->caller->callers;
1304 }
1305 while (!e->next_callee);
1306 e = e->next_callee;
1307 }
1308 }
1309 }
1310
1311 /* Recompute HEAP nodes for each of caller of NODE.
1312 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1313 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1314 it is inlinable. Otherwise check all edges. */
1315
1316 static void
1317 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1318 bitmap updated_nodes,
1319 struct cgraph_edge *check_inlinablity_for)
1320 {
1321 struct cgraph_edge *edge;
1322 struct ipa_ref *ref;
1323
1324 if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable)
1325 || node->global.inlined_to)
1326 return;
1327 if (!bitmap_set_bit (updated_nodes, node->uid))
1328 return;
1329
1330 FOR_EACH_ALIAS (node, ref)
1331 {
1332 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1333 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1334 }
1335
1336 for (edge = node->callers; edge; edge = edge->next_caller)
1337 if (edge->inline_failed)
1338 {
1339 if (!check_inlinablity_for
1340 || check_inlinablity_for == edge)
1341 {
1342 if (can_inline_edge_p (edge, false)
1343 && want_inline_small_function_p (edge, false))
1344 update_edge_key (heap, edge);
1345 else if (edge->aux)
1346 {
1347 report_inline_failed_reason (edge);
1348 heap->delete_node ((edge_heap_node_t *) edge->aux);
1349 edge->aux = NULL;
1350 }
1351 }
1352 else if (edge->aux)
1353 update_edge_key (heap, edge);
1354 }
1355 }
1356
1357 /* Recompute HEAP nodes for each uninlined call in NODE.
1358 This is used when we know that edge badnesses are going only to increase
1359 (we introduced new call site) and thus all we need is to insert newly
1360 created edges into heap. */
1361
1362 static void
1363 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1364 bitmap updated_nodes)
1365 {
1366 struct cgraph_edge *e = node->callees;
1367
1368 if (!e)
1369 return;
1370 while (true)
1371 if (!e->inline_failed && e->callee->callees)
1372 e = e->callee->callees;
1373 else
1374 {
1375 enum availability avail;
1376 struct cgraph_node *callee;
1377 /* We do not reset callee growth cache here. Since we added a new call,
1378 growth chould have just increased and consequentely badness metric
1379 don't need updating. */
1380 if (e->inline_failed
1381 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1382 && ipa_fn_summaries->get (callee)->inlinable
1383 && avail >= AVAIL_AVAILABLE
1384 && !bitmap_bit_p (updated_nodes, callee->uid))
1385 {
1386 if (can_inline_edge_p (e, false)
1387 && want_inline_small_function_p (e, false))
1388 update_edge_key (heap, e);
1389 else if (e->aux)
1390 {
1391 report_inline_failed_reason (e);
1392 heap->delete_node ((edge_heap_node_t *) e->aux);
1393 e->aux = NULL;
1394 }
1395 }
1396 if (e->next_callee)
1397 e = e->next_callee;
1398 else
1399 {
1400 do
1401 {
1402 if (e->caller == node)
1403 return;
1404 e = e->caller->callers;
1405 }
1406 while (!e->next_callee);
1407 e = e->next_callee;
1408 }
1409 }
1410 }
1411
1412 /* Enqueue all recursive calls from NODE into priority queue depending on
1413 how likely we want to recursively inline the call. */
1414
1415 static void
1416 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1417 edge_heap_t *heap)
1418 {
1419 struct cgraph_edge *e;
1420 enum availability avail;
1421
1422 for (e = where->callees; e; e = e->next_callee)
1423 if (e->callee == node
1424 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1425 && avail > AVAIL_INTERPOSABLE))
1426 {
1427 /* When profile feedback is available, prioritize by expected number
1428 of calls. */
1429 heap->insert (!(max_count > 0) || !e->count.initialized_p () ? -e->frequency
1430 : -(e->count.to_gcov_type ()
1431 / ((max_count.to_gcov_type () + (1<<24) - 1)
1432 / (1<<24))),
1433 e);
1434 }
1435 for (e = where->callees; e; e = e->next_callee)
1436 if (!e->inline_failed)
1437 lookup_recursive_calls (node, e->callee, heap);
1438 }
1439
1440 /* Decide on recursive inlining: in the case function has recursive calls,
1441 inline until body size reaches given argument. If any new indirect edges
1442 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1443 is NULL. */
1444
1445 static bool
1446 recursive_inlining (struct cgraph_edge *edge,
1447 vec<cgraph_edge *> *new_edges)
1448 {
1449 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1450 edge_heap_t heap (sreal::min ());
1451 struct cgraph_node *node;
1452 struct cgraph_edge *e;
1453 struct cgraph_node *master_clone = NULL, *next;
1454 int depth = 0;
1455 int n = 0;
1456
1457 node = edge->caller;
1458 if (node->global.inlined_to)
1459 node = node->global.inlined_to;
1460
1461 if (DECL_DECLARED_INLINE_P (node->decl))
1462 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1463
1464 /* Make sure that function is small enough to be considered for inlining. */
1465 if (estimate_size_after_inlining (node, edge) >= limit)
1466 return false;
1467 lookup_recursive_calls (node, node, &heap);
1468 if (heap.empty ())
1469 return false;
1470
1471 if (dump_file)
1472 fprintf (dump_file,
1473 " Performing recursive inlining on %s\n",
1474 node->name ());
1475
1476 /* Do the inlining and update list of recursive call during process. */
1477 while (!heap.empty ())
1478 {
1479 struct cgraph_edge *curr = heap.extract_min ();
1480 struct cgraph_node *cnode, *dest = curr->callee;
1481
1482 if (!can_inline_edge_p (curr, true))
1483 continue;
1484
1485 /* MASTER_CLONE is produced in the case we already started modified
1486 the function. Be sure to redirect edge to the original body before
1487 estimating growths otherwise we will be seeing growths after inlining
1488 the already modified body. */
1489 if (master_clone)
1490 {
1491 curr->redirect_callee (master_clone);
1492 reset_edge_growth_cache (curr);
1493 }
1494
1495 if (estimate_size_after_inlining (node, curr) > limit)
1496 {
1497 curr->redirect_callee (dest);
1498 reset_edge_growth_cache (curr);
1499 break;
1500 }
1501
1502 depth = 1;
1503 for (cnode = curr->caller;
1504 cnode->global.inlined_to; cnode = cnode->callers->caller)
1505 if (node->decl
1506 == curr->callee->ultimate_alias_target ()->decl)
1507 depth++;
1508
1509 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1510 {
1511 curr->redirect_callee (dest);
1512 reset_edge_growth_cache (curr);
1513 continue;
1514 }
1515
1516 if (dump_file)
1517 {
1518 fprintf (dump_file,
1519 " Inlining call of depth %i", depth);
1520 if (node->count > profile_count::zero ())
1521 {
1522 fprintf (dump_file, " called approx. %.2f times per call",
1523 (double)curr->count.to_gcov_type ()
1524 / node->count.to_gcov_type ());
1525 }
1526 fprintf (dump_file, "\n");
1527 }
1528 if (!master_clone)
1529 {
1530 /* We need original clone to copy around. */
1531 master_clone = node->create_clone (node->decl, node->count,
1532 CGRAPH_FREQ_BASE, false, vNULL,
1533 true, NULL, NULL);
1534 for (e = master_clone->callees; e; e = e->next_callee)
1535 if (!e->inline_failed)
1536 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1537 curr->redirect_callee (master_clone);
1538 reset_edge_growth_cache (curr);
1539 }
1540
1541 inline_call (curr, false, new_edges, &overall_size, true);
1542 lookup_recursive_calls (node, curr->callee, &heap);
1543 n++;
1544 }
1545
1546 if (!heap.empty () && dump_file)
1547 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1548
1549 if (!master_clone)
1550 return false;
1551
1552 if (dump_file)
1553 fprintf (dump_file,
1554 "\n Inlined %i times, "
1555 "body grown from size %i to %i, time %f to %f\n", n,
1556 ipa_fn_summaries->get (master_clone)->size,
1557 ipa_fn_summaries->get (node)->size,
1558 ipa_fn_summaries->get (master_clone)->time.to_double (),
1559 ipa_fn_summaries->get (node)->time.to_double ());
1560
1561 /* Remove master clone we used for inlining. We rely that clones inlined
1562 into master clone gets queued just before master clone so we don't
1563 need recursion. */
1564 for (node = symtab->first_function (); node != master_clone;
1565 node = next)
1566 {
1567 next = symtab->next_function (node);
1568 if (node->global.inlined_to == master_clone)
1569 node->remove ();
1570 }
1571 master_clone->remove ();
1572 return true;
1573 }
1574
1575
1576 /* Given whole compilation unit estimate of INSNS, compute how large we can
1577 allow the unit to grow. */
1578
1579 static int
1580 compute_max_insns (int insns)
1581 {
1582 int max_insns = insns;
1583 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1584 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1585
1586 return ((int64_t) max_insns
1587 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1588 }
1589
1590
1591 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1592
1593 static void
1594 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1595 {
1596 while (new_edges.length () > 0)
1597 {
1598 struct cgraph_edge *edge = new_edges.pop ();
1599
1600 gcc_assert (!edge->aux);
1601 if (edge->inline_failed
1602 && can_inline_edge_p (edge, true)
1603 && want_inline_small_function_p (edge, true))
1604 edge->aux = heap->insert (edge_badness (edge, false), edge);
1605 }
1606 }
1607
1608 /* Remove EDGE from the fibheap. */
1609
1610 static void
1611 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1612 {
1613 if (e->aux)
1614 {
1615 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1616 e->aux = NULL;
1617 }
1618 }
1619
1620 /* Return true if speculation of edge E seems useful.
1621 If ANTICIPATE_INLINING is true, be conservative and hope that E
1622 may get inlined. */
1623
1624 bool
1625 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1626 {
1627 enum availability avail;
1628 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1629 e->caller);
1630 struct cgraph_edge *direct, *indirect;
1631 struct ipa_ref *ref;
1632
1633 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1634
1635 if (!e->maybe_hot_p ())
1636 return false;
1637
1638 /* See if IP optimizations found something potentially useful about the
1639 function. For now we look only for CONST/PURE flags. Almost everything
1640 else we propagate is useless. */
1641 if (avail >= AVAIL_AVAILABLE)
1642 {
1643 int ecf_flags = flags_from_decl_or_type (target->decl);
1644 if (ecf_flags & ECF_CONST)
1645 {
1646 e->speculative_call_info (direct, indirect, ref);
1647 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1648 return true;
1649 }
1650 else if (ecf_flags & ECF_PURE)
1651 {
1652 e->speculative_call_info (direct, indirect, ref);
1653 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1654 return true;
1655 }
1656 }
1657 /* If we did not managed to inline the function nor redirect
1658 to an ipa-cp clone (that are seen by having local flag set),
1659 it is probably pointless to inline it unless hardware is missing
1660 indirect call predictor. */
1661 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1662 return false;
1663 /* For overwritable targets there is not much to do. */
1664 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1665 return false;
1666 /* OK, speculation seems interesting. */
1667 return true;
1668 }
1669
1670 /* We know that EDGE is not going to be inlined.
1671 See if we can remove speculation. */
1672
1673 static void
1674 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1675 {
1676 if (edge->speculative && !speculation_useful_p (edge, false))
1677 {
1678 struct cgraph_node *node = edge->caller;
1679 struct cgraph_node *where = node->global.inlined_to
1680 ? node->global.inlined_to : node;
1681 auto_bitmap updated_nodes;
1682
1683 spec_rem += edge->count;
1684 edge->resolve_speculation ();
1685 reset_edge_caches (where);
1686 ipa_update_overall_fn_summary (where);
1687 update_caller_keys (edge_heap, where,
1688 updated_nodes, NULL);
1689 update_callee_keys (edge_heap, where,
1690 updated_nodes);
1691 }
1692 }
1693
1694 /* Return true if NODE should be accounted for overall size estimate.
1695 Skip all nodes optimized for size so we can measure the growth of hot
1696 part of program no matter of the padding. */
1697
1698 bool
1699 inline_account_function_p (struct cgraph_node *node)
1700 {
1701 return (!DECL_EXTERNAL (node->decl)
1702 && !opt_for_fn (node->decl, optimize_size)
1703 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1704 }
1705
1706 /* Count number of callers of NODE and store it into DATA (that
1707 points to int. Worker for cgraph_for_node_and_aliases. */
1708
1709 static bool
1710 sum_callers (struct cgraph_node *node, void *data)
1711 {
1712 struct cgraph_edge *e;
1713 int *num_calls = (int *)data;
1714
1715 for (e = node->callers; e; e = e->next_caller)
1716 (*num_calls)++;
1717 return false;
1718 }
1719
1720 /* We use greedy algorithm for inlining of small functions:
1721 All inline candidates are put into prioritized heap ordered in
1722 increasing badness.
1723
1724 The inlining of small functions is bounded by unit growth parameters. */
1725
1726 static void
1727 inline_small_functions (void)
1728 {
1729 struct cgraph_node *node;
1730 struct cgraph_edge *edge;
1731 edge_heap_t edge_heap (sreal::min ());
1732 auto_bitmap updated_nodes;
1733 int min_size, max_size;
1734 auto_vec<cgraph_edge *> new_indirect_edges;
1735 int initial_size = 0;
1736 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1737 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1738 new_indirect_edges.create (8);
1739
1740 edge_removal_hook_holder
1741 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1742
1743 /* Compute overall unit size and other global parameters used by badness
1744 metrics. */
1745
1746 max_count = profile_count::uninitialized ();
1747 ipa_reduced_postorder (order, true, true, NULL);
1748 free (order);
1749
1750 FOR_EACH_DEFINED_FUNCTION (node)
1751 if (!node->global.inlined_to)
1752 {
1753 if (!node->alias && node->analyzed
1754 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1755 {
1756 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
1757 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1758
1759 /* Do not account external functions, they will be optimized out
1760 if not inlined. Also only count the non-cold portion of program. */
1761 if (inline_account_function_p (node))
1762 initial_size += info->size;
1763 info->growth = estimate_growth (node);
1764
1765 int num_calls = 0;
1766 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1767 true);
1768 if (num_calls == 1)
1769 info->single_caller = true;
1770 if (dfs && dfs->next_cycle)
1771 {
1772 struct cgraph_node *n2;
1773 int id = dfs->scc_no + 1;
1774 for (n2 = node; n2;
1775 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1776 {
1777 struct ipa_fn_summary *info2 = ipa_fn_summaries->get (n2);
1778 if (info2->scc_no)
1779 break;
1780 info2->scc_no = id;
1781 }
1782 }
1783 }
1784
1785 for (edge = node->callers; edge; edge = edge->next_caller)
1786 if (!(max_count >= edge->count))
1787 max_count = edge->count;
1788 }
1789 ipa_free_postorder_info ();
1790 initialize_growth_caches ();
1791
1792 if (dump_file)
1793 fprintf (dump_file,
1794 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1795 initial_size);
1796
1797 overall_size = initial_size;
1798 max_size = compute_max_insns (overall_size);
1799 min_size = overall_size;
1800
1801 /* Populate the heap with all edges we might inline. */
1802
1803 FOR_EACH_DEFINED_FUNCTION (node)
1804 {
1805 bool update = false;
1806 struct cgraph_edge *next = NULL;
1807 bool has_speculative = false;
1808
1809 if (dump_file)
1810 fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
1811
1812 for (edge = node->callees; edge; edge = next)
1813 {
1814 next = edge->next_callee;
1815 if (edge->inline_failed
1816 && !edge->aux
1817 && can_inline_edge_p (edge, true)
1818 && want_inline_small_function_p (edge, true)
1819 && edge->inline_failed)
1820 {
1821 gcc_assert (!edge->aux);
1822 update_edge_key (&edge_heap, edge);
1823 }
1824 if (edge->speculative)
1825 has_speculative = true;
1826 }
1827 if (has_speculative)
1828 for (edge = node->callees; edge; edge = next)
1829 if (edge->speculative && !speculation_useful_p (edge,
1830 edge->aux != NULL))
1831 {
1832 edge->resolve_speculation ();
1833 update = true;
1834 }
1835 if (update)
1836 {
1837 struct cgraph_node *where = node->global.inlined_to
1838 ? node->global.inlined_to : node;
1839 ipa_update_overall_fn_summary (where);
1840 reset_edge_caches (where);
1841 update_caller_keys (&edge_heap, where,
1842 updated_nodes, NULL);
1843 update_callee_keys (&edge_heap, where,
1844 updated_nodes);
1845 bitmap_clear (updated_nodes);
1846 }
1847 }
1848
1849 gcc_assert (in_lto_p
1850 || !(max_count > 0)
1851 || (profile_info && flag_branch_probabilities));
1852
1853 while (!edge_heap.empty ())
1854 {
1855 int old_size = overall_size;
1856 struct cgraph_node *where, *callee;
1857 sreal badness = edge_heap.min_key ();
1858 sreal current_badness;
1859 int growth;
1860
1861 edge = edge_heap.extract_min ();
1862 gcc_assert (edge->aux);
1863 edge->aux = NULL;
1864 if (!edge->inline_failed || !edge->callee->analyzed)
1865 continue;
1866
1867 #if CHECKING_P
1868 /* Be sure that caches are maintained consistent. */
1869 sreal cached_badness = edge_badness (edge, false);
1870
1871 int old_size_est = estimate_edge_size (edge);
1872 sreal old_time_est = estimate_edge_time (edge);
1873 int old_hints_est = estimate_edge_hints (edge);
1874
1875 reset_edge_growth_cache (edge);
1876 gcc_assert (old_size_est == estimate_edge_size (edge));
1877 gcc_assert (old_time_est == estimate_edge_time (edge));
1878 /* FIXME:
1879
1880 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1881
1882 fails with profile feedback because some hints depends on
1883 maybe_hot_edge_p predicate and because callee gets inlined to other
1884 calls, the edge may become cold.
1885 This ought to be fixed by computing relative probabilities
1886 for given invocation but that will be better done once whole
1887 code is converted to sreals. Disable for now and revert to "wrong"
1888 value so enable/disable checking paths agree. */
1889 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1890
1891 /* When updating the edge costs, we only decrease badness in the keys.
1892 Increases of badness are handled lazilly; when we see key with out
1893 of date value on it, we re-insert it now. */
1894 current_badness = edge_badness (edge, false);
1895 gcc_assert (cached_badness == current_badness);
1896 gcc_assert (current_badness >= badness);
1897 #else
1898 current_badness = edge_badness (edge, false);
1899 #endif
1900 if (current_badness != badness)
1901 {
1902 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1903 {
1904 edge->aux = edge_heap.insert (current_badness, edge);
1905 continue;
1906 }
1907 else
1908 badness = current_badness;
1909 }
1910
1911 if (!can_inline_edge_p (edge, true))
1912 {
1913 resolve_noninline_speculation (&edge_heap, edge);
1914 continue;
1915 }
1916
1917 callee = edge->callee->ultimate_alias_target ();
1918 growth = estimate_edge_growth (edge);
1919 if (dump_file)
1920 {
1921 fprintf (dump_file,
1922 "\nConsidering %s with %i size\n",
1923 callee->dump_name (),
1924 ipa_fn_summaries->get (callee)->size);
1925 fprintf (dump_file,
1926 " to be inlined into %s in %s:%i\n"
1927 " Estimated badness is %f, frequency %.2f.\n",
1928 edge->caller->dump_name (),
1929 edge->call_stmt
1930 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1931 edge->call_stmt))
1932 > BUILTINS_LOCATION)
1933 ? gimple_filename ((const gimple *) edge->call_stmt)
1934 : "unknown",
1935 edge->call_stmt
1936 ? gimple_lineno ((const gimple *) edge->call_stmt)
1937 : -1,
1938 badness.to_double (),
1939 edge->frequency / (double)CGRAPH_FREQ_BASE);
1940 if (edge->count.initialized_p ())
1941 {
1942 fprintf (dump_file, " Called ");
1943 edge->count.dump (dump_file);
1944 fprintf (dump_file, "times\n");
1945 }
1946 if (dump_flags & TDF_DETAILS)
1947 edge_badness (edge, true);
1948 }
1949
1950 if (overall_size + growth > max_size
1951 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1952 {
1953 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1954 report_inline_failed_reason (edge);
1955 resolve_noninline_speculation (&edge_heap, edge);
1956 continue;
1957 }
1958
1959 if (!want_inline_small_function_p (edge, true))
1960 {
1961 resolve_noninline_speculation (&edge_heap, edge);
1962 continue;
1963 }
1964
1965 /* Heuristics for inlining small functions work poorly for
1966 recursive calls where we do effects similar to loop unrolling.
1967 When inlining such edge seems profitable, leave decision on
1968 specific inliner. */
1969 if (edge->recursive_p ())
1970 {
1971 where = edge->caller;
1972 if (where->global.inlined_to)
1973 where = where->global.inlined_to;
1974 if (!recursive_inlining (edge,
1975 opt_for_fn (edge->caller->decl,
1976 flag_indirect_inlining)
1977 ? &new_indirect_edges : NULL))
1978 {
1979 edge->inline_failed = CIF_RECURSIVE_INLINING;
1980 resolve_noninline_speculation (&edge_heap, edge);
1981 continue;
1982 }
1983 reset_edge_caches (where);
1984 /* Recursive inliner inlines all recursive calls of the function
1985 at once. Consequently we need to update all callee keys. */
1986 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1987 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1988 update_callee_keys (&edge_heap, where, updated_nodes);
1989 bitmap_clear (updated_nodes);
1990 }
1991 else
1992 {
1993 struct cgraph_node *outer_node = NULL;
1994 int depth = 0;
1995
1996 /* Consider the case where self recursive function A is inlined
1997 into B. This is desired optimization in some cases, since it
1998 leads to effect similar of loop peeling and we might completely
1999 optimize out the recursive call. However we must be extra
2000 selective. */
2001
2002 where = edge->caller;
2003 while (where->global.inlined_to)
2004 {
2005 if (where->decl == callee->decl)
2006 outer_node = where, depth++;
2007 where = where->callers->caller;
2008 }
2009 if (outer_node
2010 && !want_inline_self_recursive_call_p (edge, outer_node,
2011 true, depth))
2012 {
2013 edge->inline_failed
2014 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2015 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2016 resolve_noninline_speculation (&edge_heap, edge);
2017 continue;
2018 }
2019 else if (depth && dump_file)
2020 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2021
2022 gcc_checking_assert (!callee->global.inlined_to);
2023 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2024 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2025
2026 reset_edge_caches (edge->callee);
2027
2028 update_callee_keys (&edge_heap, where, updated_nodes);
2029 }
2030 where = edge->caller;
2031 if (where->global.inlined_to)
2032 where = where->global.inlined_to;
2033
2034 /* Our profitability metric can depend on local properties
2035 such as number of inlinable calls and size of the function body.
2036 After inlining these properties might change for the function we
2037 inlined into (since it's body size changed) and for the functions
2038 called by function we inlined (since number of it inlinable callers
2039 might change). */
2040 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2041 /* Offline copy count has possibly changed, recompute if profile is
2042 available. */
2043 if (max_count > profile_count::zero ())
2044 {
2045 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2046 if (n != edge->callee && n->analyzed)
2047 update_callee_keys (&edge_heap, n, updated_nodes);
2048 }
2049 bitmap_clear (updated_nodes);
2050
2051 if (dump_file)
2052 {
2053 fprintf (dump_file,
2054 " Inlined %s into %s which now has time %f and size %i, "
2055 "net change of %+i.\n",
2056 edge->callee->name (),
2057 edge->caller->name (),
2058 ipa_fn_summaries->get (edge->caller)->time.to_double (),
2059 ipa_fn_summaries->get (edge->caller)->size,
2060 overall_size - old_size);
2061 }
2062 if (min_size > overall_size)
2063 {
2064 min_size = overall_size;
2065 max_size = compute_max_insns (min_size);
2066
2067 if (dump_file)
2068 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2069 }
2070 }
2071
2072 free_growth_caches ();
2073 if (dump_file)
2074 fprintf (dump_file,
2075 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2076 initial_size, overall_size,
2077 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2078 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2079 }
2080
2081 /* Flatten NODE. Performed both during early inlining and
2082 at IPA inlining time. */
2083
2084 static void
2085 flatten_function (struct cgraph_node *node, bool early)
2086 {
2087 struct cgraph_edge *e;
2088
2089 /* We shouldn't be called recursively when we are being processed. */
2090 gcc_assert (node->aux == NULL);
2091
2092 node->aux = (void *) node;
2093
2094 for (e = node->callees; e; e = e->next_callee)
2095 {
2096 struct cgraph_node *orig_callee;
2097 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2098
2099 /* We've hit cycle? It is time to give up. */
2100 if (callee->aux)
2101 {
2102 if (dump_file)
2103 fprintf (dump_file,
2104 "Not inlining %s into %s to avoid cycle.\n",
2105 xstrdup_for_dump (callee->name ()),
2106 xstrdup_for_dump (e->caller->name ()));
2107 e->inline_failed = CIF_RECURSIVE_INLINING;
2108 continue;
2109 }
2110
2111 /* When the edge is already inlined, we just need to recurse into
2112 it in order to fully flatten the leaves. */
2113 if (!e->inline_failed)
2114 {
2115 flatten_function (callee, early);
2116 continue;
2117 }
2118
2119 /* Flatten attribute needs to be processed during late inlining. For
2120 extra code quality we however do flattening during early optimization,
2121 too. */
2122 if (!early
2123 ? !can_inline_edge_p (e, true)
2124 : !can_early_inline_edge_p (e))
2125 continue;
2126
2127 if (e->recursive_p ())
2128 {
2129 if (dump_file)
2130 fprintf (dump_file, "Not inlining: recursive call.\n");
2131 continue;
2132 }
2133
2134 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2135 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2136 {
2137 if (dump_file)
2138 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2139 continue;
2140 }
2141
2142 /* Inline the edge and flatten the inline clone. Avoid
2143 recursing through the original node if the node was cloned. */
2144 if (dump_file)
2145 fprintf (dump_file, " Inlining %s into %s.\n",
2146 xstrdup_for_dump (callee->name ()),
2147 xstrdup_for_dump (e->caller->name ()));
2148 orig_callee = callee;
2149 inline_call (e, true, NULL, NULL, false);
2150 if (e->callee != orig_callee)
2151 orig_callee->aux = (void *) node;
2152 flatten_function (e->callee, early);
2153 if (e->callee != orig_callee)
2154 orig_callee->aux = NULL;
2155 }
2156
2157 node->aux = NULL;
2158 if (!node->global.inlined_to)
2159 ipa_update_overall_fn_summary (node);
2160 }
2161
2162 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2163 DATA points to number of calls originally found so we avoid infinite
2164 recursion. */
2165
2166 static bool
2167 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2168 hash_set<cgraph_node *> *callers)
2169 {
2170 int *num_calls = (int *)data;
2171 bool callee_removed = false;
2172
2173 while (node->callers && !node->global.inlined_to)
2174 {
2175 struct cgraph_node *caller = node->callers->caller;
2176
2177 if (!can_inline_edge_p (node->callers, true)
2178 || node->callers->recursive_p ())
2179 {
2180 if (dump_file)
2181 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2182 *num_calls = 0;
2183 return false;
2184 }
2185
2186 if (dump_file)
2187 {
2188 fprintf (dump_file,
2189 "\nInlining %s size %i.\n",
2190 node->name (),
2191 ipa_fn_summaries->get (node)->size);
2192 fprintf (dump_file,
2193 " Called once from %s %i insns.\n",
2194 node->callers->caller->name (),
2195 ipa_fn_summaries->get (node->callers->caller)->size);
2196 }
2197
2198 /* Remember which callers we inlined to, delaying updating the
2199 overall summary. */
2200 callers->add (node->callers->caller);
2201 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2202 if (dump_file)
2203 fprintf (dump_file,
2204 " Inlined into %s which now has %i size\n",
2205 caller->name (),
2206 ipa_fn_summaries->get (caller)->size);
2207 if (!(*num_calls)--)
2208 {
2209 if (dump_file)
2210 fprintf (dump_file, "New calls found; giving up.\n");
2211 return callee_removed;
2212 }
2213 if (callee_removed)
2214 return true;
2215 }
2216 return false;
2217 }
2218
2219 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2220 update. */
2221
2222 static bool
2223 inline_to_all_callers (struct cgraph_node *node, void *data)
2224 {
2225 hash_set<cgraph_node *> callers;
2226 bool res = inline_to_all_callers_1 (node, data, &callers);
2227 /* Perform the delayed update of the overall summary of all callers
2228 processed. This avoids quadratic behavior in the cases where
2229 we have a lot of calls to the same function. */
2230 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2231 i != callers.end (); ++i)
2232 ipa_update_overall_fn_summary (*i);
2233 return res;
2234 }
2235
2236 /* Output overall time estimate. */
2237 static void
2238 dump_overall_stats (void)
2239 {
2240 sreal sum_weighted = 0, sum = 0;
2241 struct cgraph_node *node;
2242
2243 FOR_EACH_DEFINED_FUNCTION (node)
2244 if (!node->global.inlined_to
2245 && !node->alias)
2246 {
2247 sreal time = ipa_fn_summaries->get (node)->time;
2248 sum += time;
2249 if (node->count.initialized_p ())
2250 sum_weighted += time * node->count.to_gcov_type ();
2251 }
2252 fprintf (dump_file, "Overall time estimate: "
2253 "%f weighted by profile: "
2254 "%f\n", sum.to_double (), sum_weighted.to_double ());
2255 }
2256
2257 /* Output some useful stats about inlining. */
2258
2259 static void
2260 dump_inline_stats (void)
2261 {
2262 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2263 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2264 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2265 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2266 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2267 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2268 int64_t reason[CIF_N_REASONS][3];
2269 int i;
2270 struct cgraph_node *node;
2271
2272 memset (reason, 0, sizeof (reason));
2273 FOR_EACH_DEFINED_FUNCTION (node)
2274 {
2275 struct cgraph_edge *e;
2276 for (e = node->callees; e; e = e->next_callee)
2277 {
2278 if (e->inline_failed)
2279 {
2280 if (e->count.initialized_p ())
2281 reason[(int) e->inline_failed][0] += e->count.to_gcov_type ();
2282 reason[(int) e->inline_failed][1] += e->frequency;
2283 reason[(int) e->inline_failed][2] ++;
2284 if (DECL_VIRTUAL_P (e->callee->decl)
2285 && e->count.initialized_p ())
2286 {
2287 if (e->indirect_inlining_edge)
2288 noninlined_virt_indir_cnt += e->count.to_gcov_type ();
2289 else
2290 noninlined_virt_cnt += e->count.to_gcov_type ();
2291 }
2292 else if (e->count.initialized_p ())
2293 {
2294 if (e->indirect_inlining_edge)
2295 noninlined_indir_cnt += e->count.to_gcov_type ();
2296 else
2297 noninlined_cnt += e->count.to_gcov_type ();
2298 }
2299 }
2300 else if (e->count.initialized_p ())
2301 {
2302 if (e->speculative)
2303 {
2304 if (DECL_VIRTUAL_P (e->callee->decl))
2305 inlined_speculative_ply += e->count.to_gcov_type ();
2306 else
2307 inlined_speculative += e->count.to_gcov_type ();
2308 }
2309 else if (DECL_VIRTUAL_P (e->callee->decl))
2310 {
2311 if (e->indirect_inlining_edge)
2312 inlined_virt_indir_cnt += e->count.to_gcov_type ();
2313 else
2314 inlined_virt_cnt += e->count.to_gcov_type ();
2315 }
2316 else
2317 {
2318 if (e->indirect_inlining_edge)
2319 inlined_indir_cnt += e->count.to_gcov_type ();
2320 else
2321 inlined_cnt += e->count.to_gcov_type ();
2322 }
2323 }
2324 }
2325 for (e = node->indirect_calls; e; e = e->next_callee)
2326 if (e->indirect_info->polymorphic
2327 & e->count.initialized_p ())
2328 indirect_poly_cnt += e->count.to_gcov_type ();
2329 else if (e->count.initialized_p ())
2330 indirect_cnt += e->count.to_gcov_type ();
2331 }
2332 if (max_count.initialized_p ())
2333 {
2334 fprintf (dump_file,
2335 "Inlined %" PRId64 " + speculative "
2336 "%" PRId64 " + speculative polymorphic "
2337 "%" PRId64 " + previously indirect "
2338 "%" PRId64 " + virtual "
2339 "%" PRId64 " + virtual and previously indirect "
2340 "%" PRId64 "\n" "Not inlined "
2341 "%" PRId64 " + previously indirect "
2342 "%" PRId64 " + virtual "
2343 "%" PRId64 " + virtual and previously indirect "
2344 "%" PRId64 " + stil indirect "
2345 "%" PRId64 " + still indirect polymorphic "
2346 "%" PRId64 "\n", inlined_cnt,
2347 inlined_speculative, inlined_speculative_ply,
2348 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2349 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2350 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2351 fprintf (dump_file, "Removed speculations ");
2352 spec_rem.dump (dump_file);
2353 fprintf (dump_file, "\n");
2354 }
2355 dump_overall_stats ();
2356 fprintf (dump_file, "\nWhy inlining failed?\n");
2357 for (i = 0; i < CIF_N_REASONS; i++)
2358 if (reason[i][2])
2359 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
2360 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2361 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2362 }
2363
2364 /* Decide on the inlining. We do so in the topological order to avoid
2365 expenses on updating data structures. */
2366
2367 static unsigned int
2368 ipa_inline (void)
2369 {
2370 struct cgraph_node *node;
2371 int nnodes;
2372 struct cgraph_node **order;
2373 int i;
2374 int cold;
2375 bool remove_functions = false;
2376
2377 if (!optimize)
2378 return 0;
2379
2380 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2381 percent_rec = (sreal) 1 / (sreal) 100;
2382
2383 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2384
2385 if (dump_file)
2386 ipa_dump_fn_summaries (dump_file);
2387
2388 nnodes = ipa_reverse_postorder (order);
2389
2390 FOR_EACH_FUNCTION (node)
2391 {
2392 node->aux = 0;
2393
2394 /* Recompute the default reasons for inlining because they may have
2395 changed during merging. */
2396 if (in_lto_p)
2397 {
2398 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2399 {
2400 gcc_assert (e->inline_failed);
2401 initialize_inline_failed (e);
2402 }
2403 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2404 initialize_inline_failed (e);
2405 }
2406 }
2407
2408 if (dump_file)
2409 fprintf (dump_file, "\nFlattening functions:\n");
2410
2411 /* In the first pass handle functions to be flattened. Do this with
2412 a priority so none of our later choices will make this impossible. */
2413 for (i = nnodes - 1; i >= 0; i--)
2414 {
2415 node = order[i];
2416
2417 /* Handle nodes to be flattened.
2418 Ideally when processing callees we stop inlining at the
2419 entry of cycles, possibly cloning that entry point and
2420 try to flatten itself turning it into a self-recursive
2421 function. */
2422 if (lookup_attribute ("flatten",
2423 DECL_ATTRIBUTES (node->decl)) != NULL)
2424 {
2425 if (dump_file)
2426 fprintf (dump_file,
2427 "Flattening %s\n", node->name ());
2428 flatten_function (node, false);
2429 }
2430 }
2431 if (dump_file)
2432 dump_overall_stats ();
2433
2434 inline_small_functions ();
2435
2436 gcc_assert (symtab->state == IPA_SSA);
2437 symtab->state = IPA_SSA_AFTER_INLINING;
2438 /* Do first after-inlining removal. We want to remove all "stale" extern
2439 inline functions and virtual functions so we really know what is called
2440 once. */
2441 symtab->remove_unreachable_nodes (dump_file);
2442 free (order);
2443
2444 /* Inline functions with a property that after inlining into all callers the
2445 code size will shrink because the out-of-line copy is eliminated.
2446 We do this regardless on the callee size as long as function growth limits
2447 are met. */
2448 if (dump_file)
2449 fprintf (dump_file,
2450 "\nDeciding on functions to be inlined into all callers and "
2451 "removing useless speculations:\n");
2452
2453 /* Inlining one function called once has good chance of preventing
2454 inlining other function into the same callee. Ideally we should
2455 work in priority order, but probably inlining hot functions first
2456 is good cut without the extra pain of maintaining the queue.
2457
2458 ??? this is not really fitting the bill perfectly: inlining function
2459 into callee often leads to better optimization of callee due to
2460 increased context for optimization.
2461 For example if main() function calls a function that outputs help
2462 and then function that does the main optmization, we should inline
2463 the second with priority even if both calls are cold by themselves.
2464
2465 We probably want to implement new predicate replacing our use of
2466 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2467 to be hot. */
2468 for (cold = 0; cold <= 1; cold ++)
2469 {
2470 FOR_EACH_DEFINED_FUNCTION (node)
2471 {
2472 struct cgraph_edge *edge, *next;
2473 bool update=false;
2474
2475 for (edge = node->callees; edge; edge = next)
2476 {
2477 next = edge->next_callee;
2478 if (edge->speculative && !speculation_useful_p (edge, false))
2479 {
2480 edge->resolve_speculation ();
2481 spec_rem += edge->count;
2482 update = true;
2483 remove_functions = true;
2484 }
2485 }
2486 if (update)
2487 {
2488 struct cgraph_node *where = node->global.inlined_to
2489 ? node->global.inlined_to : node;
2490 reset_edge_caches (where);
2491 ipa_update_overall_fn_summary (where);
2492 }
2493 if (want_inline_function_to_all_callers_p (node, cold))
2494 {
2495 int num_calls = 0;
2496 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2497 true);
2498 while (node->call_for_symbol_and_aliases
2499 (inline_to_all_callers, &num_calls, true))
2500 ;
2501 remove_functions = true;
2502 }
2503 }
2504 }
2505
2506 /* Free ipa-prop structures if they are no longer needed. */
2507 if (optimize)
2508 ipa_free_all_structures_after_iinln ();
2509
2510 if (dump_file)
2511 {
2512 fprintf (dump_file,
2513 "\nInlined %i calls, eliminated %i functions\n\n",
2514 ncalls_inlined, nfunctions_inlined);
2515 dump_inline_stats ();
2516 }
2517
2518 if (dump_file)
2519 ipa_dump_fn_summaries (dump_file);
2520 /* In WPA we use inline summaries for partitioning process. */
2521 if (!flag_wpa)
2522 ipa_free_fn_summary ();
2523 return remove_functions ? TODO_remove_functions : 0;
2524 }
2525
2526 /* Inline always-inline function calls in NODE. */
2527
2528 static bool
2529 inline_always_inline_functions (struct cgraph_node *node)
2530 {
2531 struct cgraph_edge *e;
2532 bool inlined = false;
2533
2534 for (e = node->callees; e; e = e->next_callee)
2535 {
2536 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2537 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2538 continue;
2539
2540 if (e->recursive_p ())
2541 {
2542 if (dump_file)
2543 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2544 e->callee->name ());
2545 e->inline_failed = CIF_RECURSIVE_INLINING;
2546 continue;
2547 }
2548
2549 if (!can_early_inline_edge_p (e))
2550 {
2551 /* Set inlined to true if the callee is marked "always_inline" but
2552 is not inlinable. This will allow flagging an error later in
2553 expand_call_inline in tree-inline.c. */
2554 if (lookup_attribute ("always_inline",
2555 DECL_ATTRIBUTES (callee->decl)) != NULL)
2556 inlined = true;
2557 continue;
2558 }
2559
2560 if (dump_file)
2561 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2562 xstrdup_for_dump (e->callee->name ()),
2563 xstrdup_for_dump (e->caller->name ()));
2564 inline_call (e, true, NULL, NULL, false);
2565 inlined = true;
2566 }
2567 if (inlined)
2568 ipa_update_overall_fn_summary (node);
2569
2570 return inlined;
2571 }
2572
2573 /* Decide on the inlining. We do so in the topological order to avoid
2574 expenses on updating data structures. */
2575
2576 static bool
2577 early_inline_small_functions (struct cgraph_node *node)
2578 {
2579 struct cgraph_edge *e;
2580 bool inlined = false;
2581
2582 for (e = node->callees; e; e = e->next_callee)
2583 {
2584 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2585 if (!ipa_fn_summaries->get (callee)->inlinable
2586 || !e->inline_failed)
2587 continue;
2588
2589 /* Do not consider functions not declared inline. */
2590 if (!DECL_DECLARED_INLINE_P (callee->decl)
2591 && !opt_for_fn (node->decl, flag_inline_small_functions)
2592 && !opt_for_fn (node->decl, flag_inline_functions))
2593 continue;
2594
2595 if (dump_file)
2596 fprintf (dump_file, "Considering inline candidate %s.\n",
2597 callee->name ());
2598
2599 if (!can_early_inline_edge_p (e))
2600 continue;
2601
2602 if (e->recursive_p ())
2603 {
2604 if (dump_file)
2605 fprintf (dump_file, " Not inlining: recursive call.\n");
2606 continue;
2607 }
2608
2609 if (!want_early_inline_function_p (e))
2610 continue;
2611
2612 if (dump_file)
2613 fprintf (dump_file, " Inlining %s into %s.\n",
2614 xstrdup_for_dump (callee->name ()),
2615 xstrdup_for_dump (e->caller->name ()));
2616 inline_call (e, true, NULL, NULL, false);
2617 inlined = true;
2618 }
2619
2620 if (inlined)
2621 ipa_update_overall_fn_summary (node);
2622
2623 return inlined;
2624 }
2625
2626 unsigned int
2627 early_inliner (function *fun)
2628 {
2629 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2630 struct cgraph_edge *edge;
2631 unsigned int todo = 0;
2632 int iterations = 0;
2633 bool inlined = false;
2634
2635 if (seen_error ())
2636 return 0;
2637
2638 /* Do nothing if datastructures for ipa-inliner are already computed. This
2639 happens when some pass decides to construct new function and
2640 cgraph_add_new_function calls lowering passes and early optimization on
2641 it. This may confuse ourself when early inliner decide to inline call to
2642 function clone, because function clones don't have parameter list in
2643 ipa-prop matching their signature. */
2644 if (ipa_node_params_sum)
2645 return 0;
2646
2647 if (flag_checking)
2648 node->verify ();
2649 node->remove_all_references ();
2650
2651 /* Rebuild this reference because it dosn't depend on
2652 function's body and it's required to pass cgraph_node
2653 verification. */
2654 if (node->instrumented_version
2655 && !node->instrumentation_clone)
2656 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2657
2658 /* Even when not optimizing or not inlining inline always-inline
2659 functions. */
2660 inlined = inline_always_inline_functions (node);
2661
2662 if (!optimize
2663 || flag_no_inline
2664 || !flag_early_inlining
2665 /* Never inline regular functions into always-inline functions
2666 during incremental inlining. This sucks as functions calling
2667 always inline functions will get less optimized, but at the
2668 same time inlining of functions calling always inline
2669 function into an always inline function might introduce
2670 cycles of edges to be always inlined in the callgraph.
2671
2672 We might want to be smarter and just avoid this type of inlining. */
2673 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2674 && lookup_attribute ("always_inline",
2675 DECL_ATTRIBUTES (node->decl))))
2676 ;
2677 else if (lookup_attribute ("flatten",
2678 DECL_ATTRIBUTES (node->decl)) != NULL)
2679 {
2680 /* When the function is marked to be flattened, recursively inline
2681 all calls in it. */
2682 if (dump_file)
2683 fprintf (dump_file,
2684 "Flattening %s\n", node->name ());
2685 flatten_function (node, true);
2686 inlined = true;
2687 }
2688 else
2689 {
2690 /* If some always_inline functions was inlined, apply the changes.
2691 This way we will not account always inline into growth limits and
2692 moreover we will inline calls from always inlines that we skipped
2693 previously because of conditional above. */
2694 if (inlined)
2695 {
2696 timevar_push (TV_INTEGRATION);
2697 todo |= optimize_inline_calls (current_function_decl);
2698 /* optimize_inline_calls call above might have introduced new
2699 statements that don't have inline parameters computed. */
2700 for (edge = node->callees; edge; edge = edge->next_callee)
2701 {
2702 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2703 es->call_stmt_size
2704 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2705 es->call_stmt_time
2706 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2707 }
2708 ipa_update_overall_fn_summary (node);
2709 inlined = false;
2710 timevar_pop (TV_INTEGRATION);
2711 }
2712 /* We iterate incremental inlining to get trivial cases of indirect
2713 inlining. */
2714 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2715 && early_inline_small_functions (node))
2716 {
2717 timevar_push (TV_INTEGRATION);
2718 todo |= optimize_inline_calls (current_function_decl);
2719
2720 /* Technically we ought to recompute inline parameters so the new
2721 iteration of early inliner works as expected. We however have
2722 values approximately right and thus we only need to update edge
2723 info that might be cleared out for newly discovered edges. */
2724 for (edge = node->callees; edge; edge = edge->next_callee)
2725 {
2726 /* We have no summary for new bound store calls yet. */
2727 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2728 es->call_stmt_size
2729 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2730 es->call_stmt_time
2731 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2732
2733 if (edge->callee->decl
2734 && !gimple_check_call_matching_types (
2735 edge->call_stmt, edge->callee->decl, false))
2736 {
2737 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2738 edge->call_stmt_cannot_inline_p = true;
2739 }
2740 }
2741 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2742 ipa_update_overall_fn_summary (node);
2743 timevar_pop (TV_INTEGRATION);
2744 iterations++;
2745 inlined = false;
2746 }
2747 if (dump_file)
2748 fprintf (dump_file, "Iterations: %i\n", iterations);
2749 }
2750
2751 if (inlined)
2752 {
2753 timevar_push (TV_INTEGRATION);
2754 todo |= optimize_inline_calls (current_function_decl);
2755 timevar_pop (TV_INTEGRATION);
2756 }
2757
2758 fun->always_inline_functions_inlined = true;
2759
2760 return todo;
2761 }
2762
2763 /* Do inlining of small functions. Doing so early helps profiling and other
2764 passes to be somewhat more effective and avoids some code duplication in
2765 later real inlining pass for testcases with very many function calls. */
2766
2767 namespace {
2768
2769 const pass_data pass_data_early_inline =
2770 {
2771 GIMPLE_PASS, /* type */
2772 "einline", /* name */
2773 OPTGROUP_INLINE, /* optinfo_flags */
2774 TV_EARLY_INLINING, /* tv_id */
2775 PROP_ssa, /* properties_required */
2776 0, /* properties_provided */
2777 0, /* properties_destroyed */
2778 0, /* todo_flags_start */
2779 0, /* todo_flags_finish */
2780 };
2781
2782 class pass_early_inline : public gimple_opt_pass
2783 {
2784 public:
2785 pass_early_inline (gcc::context *ctxt)
2786 : gimple_opt_pass (pass_data_early_inline, ctxt)
2787 {}
2788
2789 /* opt_pass methods: */
2790 virtual unsigned int execute (function *);
2791
2792 }; // class pass_early_inline
2793
2794 unsigned int
2795 pass_early_inline::execute (function *fun)
2796 {
2797 return early_inliner (fun);
2798 }
2799
2800 } // anon namespace
2801
2802 gimple_opt_pass *
2803 make_pass_early_inline (gcc::context *ctxt)
2804 {
2805 return new pass_early_inline (ctxt);
2806 }
2807
2808 namespace {
2809
2810 const pass_data pass_data_ipa_inline =
2811 {
2812 IPA_PASS, /* type */
2813 "inline", /* name */
2814 OPTGROUP_INLINE, /* optinfo_flags */
2815 TV_IPA_INLINING, /* tv_id */
2816 0, /* properties_required */
2817 0, /* properties_provided */
2818 0, /* properties_destroyed */
2819 0, /* todo_flags_start */
2820 ( TODO_dump_symtab ), /* todo_flags_finish */
2821 };
2822
2823 class pass_ipa_inline : public ipa_opt_pass_d
2824 {
2825 public:
2826 pass_ipa_inline (gcc::context *ctxt)
2827 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2828 NULL, /* generate_summary */
2829 NULL, /* write_summary */
2830 NULL, /* read_summary */
2831 NULL, /* write_optimization_summary */
2832 NULL, /* read_optimization_summary */
2833 NULL, /* stmt_fixup */
2834 0, /* function_transform_todo_flags_start */
2835 inline_transform, /* function_transform */
2836 NULL) /* variable_transform */
2837 {}
2838
2839 /* opt_pass methods: */
2840 virtual unsigned int execute (function *) { return ipa_inline (); }
2841
2842 }; // class pass_ipa_inline
2843
2844 } // anon namespace
2845
2846 ipa_opt_pass_d *
2847 make_pass_ipa_inline (gcc::context *ctxt)
2848 {
2849 return new pass_ipa_inline (ctxt);
2850 }