]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline.c
2016-11-17 Richard Biener <rguenther@suse.de>
[thirdparty/gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 The implementation of inliner is organized as follows:
24
25 inlining heuristics limits
26
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
30
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
35
36 inlining heuristics
37
38 The inliner itself is split into two passes:
39
40 pass_early_inlining
41
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
45
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
52
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
56
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
61
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
67
68 pass_ipa_inline
69
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
72
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
76
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
81
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
87
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
91
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-inline.h"
114 #include "ipa-utils.h"
115 #include "sreal.h"
116 #include "auto-profile.h"
117 #include "builtins.h"
118 #include "fibonacci_heap.h"
119
120 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
121 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
122
123 /* Statistics we collect about inlining algorithm. */
124 static int overall_size;
125 static gcov_type max_count;
126 static gcov_type spec_rem;
127
128 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
129 static sreal cgraph_freq_base_rec, percent_rec;
130
131 /* Return false when inlining edge E would lead to violating
132 limits on function unit growth or stack usage growth.
133
134 The relative function body growth limit is present generally
135 to avoid problems with non-linear behavior of the compiler.
136 To allow inlining huge functions into tiny wrapper, the limit
137 is always based on the bigger of the two functions considered.
138
139 For stack growth limits we always base the growth in stack usage
140 of the callers. We want to prevent applications from segfaulting
141 on stack overflow when functions with huge stack frames gets
142 inlined. */
143
144 static bool
145 caller_growth_limits (struct cgraph_edge *e)
146 {
147 struct cgraph_node *to = e->caller;
148 struct cgraph_node *what = e->callee->ultimate_alias_target ();
149 int newsize;
150 int limit = 0;
151 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
152 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
153
154 /* Look for function e->caller is inlined to. While doing
155 so work out the largest function body on the way. As
156 described above, we want to base our function growth
157 limits based on that. Not on the self size of the
158 outer function, not on the self size of inline code
159 we immediately inline to. This is the most relaxed
160 interpretation of the rule "do not grow large functions
161 too much in order to prevent compiler from exploding". */
162 while (true)
163 {
164 info = inline_summaries->get (to);
165 if (limit < info->self_size)
166 limit = info->self_size;
167 if (stack_size_limit < info->estimated_self_stack_size)
168 stack_size_limit = info->estimated_self_stack_size;
169 if (to->global.inlined_to)
170 to = to->callers->caller;
171 else
172 break;
173 }
174
175 what_info = inline_summaries->get (what);
176
177 if (limit < what_info->self_size)
178 limit = what_info->self_size;
179
180 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
181
182 /* Check the size after inlining against the function limits. But allow
183 the function to shrink if it went over the limits by forced inlining. */
184 newsize = estimate_size_after_inlining (to, e);
185 if (newsize >= info->size
186 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
187 && newsize > limit)
188 {
189 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
190 return false;
191 }
192
193 if (!what_info->estimated_stack_size)
194 return true;
195
196 /* FIXME: Stack size limit often prevents inlining in Fortran programs
197 due to large i/o datastructures used by the Fortran front-end.
198 We ought to ignore this limit when we know that the edge is executed
199 on every invocation of the caller (i.e. its call statement dominates
200 exit block). We do not track this information, yet. */
201 stack_size_limit += ((gcov_type)stack_size_limit
202 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
203
204 inlined_stack = (outer_info->stack_frame_offset
205 + outer_info->estimated_self_stack_size
206 + what_info->estimated_stack_size);
207 /* Check new stack consumption with stack consumption at the place
208 stack is used. */
209 if (inlined_stack > stack_size_limit
210 /* If function already has large stack usage from sibling
211 inline call, we can inline, too.
212 This bit overoptimistically assume that we are good at stack
213 packing. */
214 && inlined_stack > info->estimated_stack_size
215 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
216 {
217 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
218 return false;
219 }
220 return true;
221 }
222
223 /* Dump info about why inlining has failed. */
224
225 static void
226 report_inline_failed_reason (struct cgraph_edge *e)
227 {
228 if (dump_file)
229 {
230 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
231 xstrdup_for_dump (e->caller->name ()), e->caller->order,
232 xstrdup_for_dump (e->callee->name ()), e->callee->order,
233 cgraph_inline_failed_string (e->inline_failed));
234 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
235 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
236 && e->caller->lto_file_data
237 && e->callee->ultimate_alias_target ()->lto_file_data)
238 {
239 fprintf (dump_file, " LTO objects: %s, %s\n",
240 e->caller->lto_file_data->file_name,
241 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
242 }
243 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
244 cl_target_option_print_diff
245 (dump_file, 2, target_opts_for_fn (e->caller->decl),
246 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
247 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
248 cl_optimization_print_diff
249 (dump_file, 2, opts_for_fn (e->caller->decl),
250 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
251 }
252 }
253
254 /* Decide whether sanitizer-related attributes allow inlining. */
255
256 static bool
257 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
258 {
259 /* Don't care if sanitizer is disabled */
260 if (!(flag_sanitize & SANITIZE_ADDRESS))
261 return true;
262
263 if (!caller || !callee)
264 return true;
265
266 return !!lookup_attribute ("no_sanitize_address",
267 DECL_ATTRIBUTES (caller)) ==
268 !!lookup_attribute ("no_sanitize_address",
269 DECL_ATTRIBUTES (callee));
270 }
271
272 /* Used for flags where it is safe to inline when caller's value is
273 grater than callee's. */
274 #define check_maybe_up(flag) \
275 (opts_for_fn (caller->decl)->x_##flag \
276 != opts_for_fn (callee->decl)->x_##flag \
277 && (!always_inline \
278 || opts_for_fn (caller->decl)->x_##flag \
279 < opts_for_fn (callee->decl)->x_##flag))
280 /* Used for flags where it is safe to inline when caller's value is
281 smaller than callee's. */
282 #define check_maybe_down(flag) \
283 (opts_for_fn (caller->decl)->x_##flag \
284 != opts_for_fn (callee->decl)->x_##flag \
285 && (!always_inline \
286 || opts_for_fn (caller->decl)->x_##flag \
287 > opts_for_fn (callee->decl)->x_##flag))
288 /* Used for flags where exact match is needed for correctness. */
289 #define check_match(flag) \
290 (opts_for_fn (caller->decl)->x_##flag \
291 != opts_for_fn (callee->decl)->x_##flag)
292
293 /* Decide if we can inline the edge and possibly update
294 inline_failed reason.
295 We check whether inlining is possible at all and whether
296 caller growth limits allow doing so.
297
298 if REPORT is true, output reason to the dump file.
299
300 if DISREGARD_LIMITS is true, ignore size limits.*/
301
302 static bool
303 can_inline_edge_p (struct cgraph_edge *e, bool report,
304 bool disregard_limits = false, bool early = false)
305 {
306 gcc_checking_assert (e->inline_failed);
307
308 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
309 {
310 if (report)
311 report_inline_failed_reason (e);
312 return false;
313 }
314
315 bool inlinable = true;
316 enum availability avail;
317 cgraph_node *caller = e->caller->global.inlined_to
318 ? e->caller->global.inlined_to : e->caller;
319 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
320 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
321 tree callee_tree
322 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
323
324 if (!callee->definition)
325 {
326 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
327 inlinable = false;
328 }
329 else if (callee->calls_comdat_local)
330 {
331 e->inline_failed = CIF_USES_COMDAT_LOCAL;
332 inlinable = false;
333 }
334 else if (avail <= AVAIL_INTERPOSABLE)
335 {
336 e->inline_failed = CIF_OVERWRITABLE;
337 inlinable = false;
338 }
339 /* All edges with call_stmt_cannot_inline_p should have inline_failed
340 initialized to one of FINAL_ERROR reasons. */
341 else if (e->call_stmt_cannot_inline_p)
342 gcc_unreachable ();
343 /* Don't inline if the functions have different EH personalities. */
344 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
345 && DECL_FUNCTION_PERSONALITY (callee->decl)
346 && (DECL_FUNCTION_PERSONALITY (caller->decl)
347 != DECL_FUNCTION_PERSONALITY (callee->decl)))
348 {
349 e->inline_failed = CIF_EH_PERSONALITY;
350 inlinable = false;
351 }
352 /* TM pure functions should not be inlined into non-TM_pure
353 functions. */
354 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
355 {
356 e->inline_failed = CIF_UNSPECIFIED;
357 inlinable = false;
358 }
359 /* Check compatibility of target optimization options. */
360 else if (!targetm.target_option.can_inline_p (caller->decl,
361 callee->decl))
362 {
363 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
364 inlinable = false;
365 }
366 else if (!inline_summaries->get (callee)->inlinable)
367 {
368 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
369 inlinable = false;
370 }
371 /* Don't inline a function with mismatched sanitization attributes. */
372 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
373 {
374 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
375 inlinable = false;
376 }
377 /* Check if caller growth allows the inlining. */
378 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
379 && !disregard_limits
380 && !lookup_attribute ("flatten",
381 DECL_ATTRIBUTES (caller->decl))
382 && !caller_growth_limits (e))
383 inlinable = false;
384 /* Don't inline a function with a higher optimization level than the
385 caller. FIXME: this is really just tip of iceberg of handling
386 optimization attribute. */
387 else if (caller_tree != callee_tree)
388 {
389 bool always_inline =
390 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
391 && lookup_attribute ("always_inline",
392 DECL_ATTRIBUTES (callee->decl)));
393 inline_summary *caller_info = inline_summaries->get (caller);
394 inline_summary *callee_info = inline_summaries->get (callee);
395
396 /* Until GCC 4.9 we did not check the semantics alterning flags
397 bellow and inline across optimization boundry.
398 Enabling checks bellow breaks several packages by refusing
399 to inline library always_inline functions. See PR65873.
400 Disable the check for early inlining for now until better solution
401 is found. */
402 if (always_inline && early)
403 ;
404 /* There are some options that change IL semantics which means
405 we cannot inline in these cases for correctness reason.
406 Not even for always_inline declared functions. */
407 /* Strictly speaking only when the callee contains signed integer
408 math where overflow is undefined. */
409 else if ((check_maybe_up (flag_strict_overflow)
410 /* this flag is set by optimize. Allow inlining across
411 optimize boundary. */
412 && (!opt_for_fn (caller->decl, optimize)
413 == !opt_for_fn (callee->decl, optimize) || !always_inline))
414 || check_match (flag_wrapv)
415 || check_match (flag_trapv)
416 /* When caller or callee does FP math, be sure FP codegen flags
417 compatible. */
418 || ((caller_info->fp_expressions && callee_info->fp_expressions)
419 && (check_maybe_up (flag_rounding_math)
420 || check_maybe_up (flag_trapping_math)
421 || check_maybe_down (flag_unsafe_math_optimizations)
422 || check_maybe_down (flag_finite_math_only)
423 || check_maybe_up (flag_signaling_nans)
424 || check_maybe_down (flag_cx_limited_range)
425 || check_maybe_up (flag_signed_zeros)
426 || check_maybe_down (flag_associative_math)
427 || check_maybe_down (flag_reciprocal_math)
428 || check_maybe_down (flag_fp_int_builtin_inexact)
429 /* Strictly speaking only when the callee contains function
430 calls that may end up setting errno. */
431 || check_maybe_up (flag_errno_math)))
432 /* We do not want to make code compiled with exceptions to be
433 brought into a non-EH function unless we know that the callee
434 does not throw.
435 This is tracked by DECL_FUNCTION_PERSONALITY. */
436 || (check_maybe_up (flag_non_call_exceptions)
437 && DECL_FUNCTION_PERSONALITY (callee->decl))
438 || (check_maybe_up (flag_exceptions)
439 && DECL_FUNCTION_PERSONALITY (callee->decl))
440 /* When devirtualization is diabled for callee, it is not safe
441 to inline it as we possibly mangled the type info.
442 Allow early inlining of always inlines. */
443 || (!early && check_maybe_down (flag_devirtualize)))
444 {
445 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
446 inlinable = false;
447 }
448 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
449 else if (always_inline)
450 ;
451 /* When user added an attribute to the callee honor it. */
452 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
453 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
454 {
455 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
456 inlinable = false;
457 }
458 /* If explicit optimize attribute are not used, the mismatch is caused
459 by different command line options used to build different units.
460 Do not care about COMDAT functions - those are intended to be
461 optimized with the optimization flags of module they are used in.
462 Also do not care about mixing up size/speed optimization when
463 DECL_DISREGARD_INLINE_LIMITS is set. */
464 else if ((callee->merged_comdat
465 && !lookup_attribute ("optimize",
466 DECL_ATTRIBUTES (caller->decl)))
467 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
468 ;
469 /* If mismatch is caused by merging two LTO units with different
470 optimizationflags we want to be bit nicer. However never inline
471 if one of functions is not optimized at all. */
472 else if (!opt_for_fn (callee->decl, optimize)
473 || !opt_for_fn (caller->decl, optimize))
474 {
475 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
476 inlinable = false;
477 }
478 /* If callee is optimized for size and caller is not, allow inlining if
479 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
480 is inline (and thus likely an unified comdat). This will allow caller
481 to run faster. */
482 else if (opt_for_fn (callee->decl, optimize_size)
483 > opt_for_fn (caller->decl, optimize_size))
484 {
485 int growth = estimate_edge_growth (e);
486 if (growth > 0
487 && (!DECL_DECLARED_INLINE_P (callee->decl)
488 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
489 MAX_INLINE_INSNS_AUTO)))
490 {
491 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
492 inlinable = false;
493 }
494 }
495 /* If callee is more aggressively optimized for performance than caller,
496 we generally want to inline only cheap (runtime wise) functions. */
497 else if (opt_for_fn (callee->decl, optimize_size)
498 < opt_for_fn (caller->decl, optimize_size)
499 || (opt_for_fn (callee->decl, optimize)
500 > opt_for_fn (caller->decl, optimize)))
501 {
502 if (estimate_edge_time (e)
503 >= 20 + inline_edge_summary (e)->call_stmt_time)
504 {
505 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
506 inlinable = false;
507 }
508 }
509
510 }
511
512 if (!inlinable && report)
513 report_inline_failed_reason (e);
514 return inlinable;
515 }
516
517
518 /* Return true if the edge E is inlinable during early inlining. */
519
520 static bool
521 can_early_inline_edge_p (struct cgraph_edge *e)
522 {
523 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
524 /* Early inliner might get called at WPA stage when IPA pass adds new
525 function. In this case we can not really do any of early inlining
526 because function bodies are missing. */
527 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
528 return false;
529 if (!gimple_has_body_p (callee->decl))
530 {
531 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
532 return false;
533 }
534 /* In early inliner some of callees may not be in SSA form yet
535 (i.e. the callgraph is cyclic and we did not process
536 the callee by early inliner, yet). We don't have CIF code for this
537 case; later we will re-do the decision in the real inliner. */
538 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
539 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
540 {
541 if (dump_file)
542 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
543 return false;
544 }
545 if (!can_inline_edge_p (e, true, false, true))
546 return false;
547 return true;
548 }
549
550
551 /* Return number of calls in N. Ignore cheap builtins. */
552
553 static int
554 num_calls (struct cgraph_node *n)
555 {
556 struct cgraph_edge *e;
557 int num = 0;
558
559 for (e = n->callees; e; e = e->next_callee)
560 if (!is_inexpensive_builtin (e->callee->decl))
561 num++;
562 return num;
563 }
564
565
566 /* Return true if we are interested in inlining small function. */
567
568 static bool
569 want_early_inline_function_p (struct cgraph_edge *e)
570 {
571 bool want_inline = true;
572 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
573
574 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
575 ;
576 /* For AutoFDO, we need to make sure that before profile summary, all
577 hot paths' IR look exactly the same as profiled binary. As a result,
578 in einliner, we will disregard size limit and inline those callsites
579 that are:
580 * inlined in the profiled binary, and
581 * the cloned callee has enough samples to be considered "hot". */
582 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
583 ;
584 else if (!DECL_DECLARED_INLINE_P (callee->decl)
585 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
586 {
587 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
588 report_inline_failed_reason (e);
589 want_inline = false;
590 }
591 else
592 {
593 int growth = estimate_edge_growth (e);
594 int n;
595
596 if (growth <= 0)
597 ;
598 else if (!e->maybe_hot_p ()
599 && growth > 0)
600 {
601 if (dump_file)
602 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
603 "call is cold and code would grow by %i\n",
604 xstrdup_for_dump (e->caller->name ()),
605 e->caller->order,
606 xstrdup_for_dump (callee->name ()), callee->order,
607 growth);
608 want_inline = false;
609 }
610 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
611 {
612 if (dump_file)
613 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
614 "growth %i exceeds --param early-inlining-insns\n",
615 xstrdup_for_dump (e->caller->name ()),
616 e->caller->order,
617 xstrdup_for_dump (callee->name ()), callee->order,
618 growth);
619 want_inline = false;
620 }
621 else if ((n = num_calls (callee)) != 0
622 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
623 {
624 if (dump_file)
625 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
626 "growth %i exceeds --param early-inlining-insns "
627 "divided by number of calls\n",
628 xstrdup_for_dump (e->caller->name ()),
629 e->caller->order,
630 xstrdup_for_dump (callee->name ()), callee->order,
631 growth);
632 want_inline = false;
633 }
634 }
635 return want_inline;
636 }
637
638 /* Compute time of the edge->caller + edge->callee execution when inlining
639 does not happen. */
640
641 inline sreal
642 compute_uninlined_call_time (struct inline_summary *callee_info,
643 struct cgraph_edge *edge)
644 {
645 sreal uninlined_call_time = (sreal)callee_info->time;
646 cgraph_node *caller = (edge->caller->global.inlined_to
647 ? edge->caller->global.inlined_to
648 : edge->caller);
649
650 if (edge->count && caller->count)
651 uninlined_call_time *= (sreal)edge->count / caller->count;
652 if (edge->frequency)
653 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
654 else
655 uninlined_call_time = uninlined_call_time >> 11;
656
657 int caller_time = inline_summaries->get (caller)->time;
658 return uninlined_call_time + caller_time;
659 }
660
661 /* Same as compute_uinlined_call_time but compute time when inlining
662 does happen. */
663
664 inline sreal
665 compute_inlined_call_time (struct cgraph_edge *edge,
666 int edge_time)
667 {
668 cgraph_node *caller = (edge->caller->global.inlined_to
669 ? edge->caller->global.inlined_to
670 : edge->caller);
671 int caller_time = inline_summaries->get (caller)->time;
672 sreal time = edge_time;
673
674 if (edge->count && caller->count)
675 time *= (sreal)edge->count / caller->count;
676 if (edge->frequency)
677 time *= cgraph_freq_base_rec * edge->frequency;
678 else
679 time = time >> 11;
680
681 /* This calculation should match one in ipa-inline-analysis.
682 FIXME: Once ipa-inline-analysis is converted to sreal this can be
683 simplified. */
684 time -= (sreal) ((gcov_type) edge->frequency
685 * inline_edge_summary (edge)->call_stmt_time
686 * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)) / INLINE_TIME_SCALE;
687 time += caller_time;
688 if (time <= 0)
689 time = ((sreal) 1) >> 8;
690 gcc_checking_assert (time >= 0);
691 return time;
692 }
693
694 /* Return true if the speedup for inlining E is bigger than
695 PARAM_MAX_INLINE_MIN_SPEEDUP. */
696
697 static bool
698 big_speedup_p (struct cgraph_edge *e)
699 {
700 sreal time = compute_uninlined_call_time (inline_summaries->get (e->callee),
701 e);
702 sreal inlined_time = compute_inlined_call_time (e, estimate_edge_time (e));
703
704 if (time - inlined_time
705 > (sreal) time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
706 * percent_rec)
707 return true;
708 return false;
709 }
710
711 /* Return true if we are interested in inlining small function.
712 When REPORT is true, report reason to dump file. */
713
714 static bool
715 want_inline_small_function_p (struct cgraph_edge *e, bool report)
716 {
717 bool want_inline = true;
718 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
719
720 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
721 ;
722 else if (!DECL_DECLARED_INLINE_P (callee->decl)
723 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
724 {
725 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
726 want_inline = false;
727 }
728 /* Do fast and conservative check if the function can be good
729 inline candidate. At the moment we allow inline hints to
730 promote non-inline functions to inline and we increase
731 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
732 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
733 && (!e->count || !e->maybe_hot_p ()))
734 && inline_summaries->get (callee)->min_size
735 - inline_edge_summary (e)->call_stmt_size
736 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
737 {
738 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
739 want_inline = false;
740 }
741 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
742 && inline_summaries->get (callee)->min_size
743 - inline_edge_summary (e)->call_stmt_size
744 > 16 * MAX_INLINE_INSNS_SINGLE)
745 {
746 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
747 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
748 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
749 want_inline = false;
750 }
751 else
752 {
753 int growth = estimate_edge_growth (e);
754 inline_hints hints = estimate_edge_hints (e);
755 bool big_speedup = big_speedup_p (e);
756
757 if (growth <= 0)
758 ;
759 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
760 hints suggests that inlining given function is very profitable. */
761 else if (DECL_DECLARED_INLINE_P (callee->decl)
762 && growth >= MAX_INLINE_INSNS_SINGLE
763 && ((!big_speedup
764 && !(hints & (INLINE_HINT_indirect_call
765 | INLINE_HINT_known_hot
766 | INLINE_HINT_loop_iterations
767 | INLINE_HINT_array_index
768 | INLINE_HINT_loop_stride)))
769 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
770 {
771 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
772 want_inline = false;
773 }
774 else if (!DECL_DECLARED_INLINE_P (callee->decl)
775 && !opt_for_fn (e->caller->decl, flag_inline_functions))
776 {
777 /* growth_likely_positive is expensive, always test it last. */
778 if (growth >= MAX_INLINE_INSNS_SINGLE
779 || growth_likely_positive (callee, growth))
780 {
781 e->inline_failed = CIF_NOT_DECLARED_INLINED;
782 want_inline = false;
783 }
784 }
785 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
786 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
787 inlining given function is very profitable. */
788 else if (!DECL_DECLARED_INLINE_P (callee->decl)
789 && !big_speedup
790 && !(hints & INLINE_HINT_known_hot)
791 && growth >= ((hints & (INLINE_HINT_indirect_call
792 | INLINE_HINT_loop_iterations
793 | INLINE_HINT_array_index
794 | INLINE_HINT_loop_stride))
795 ? MAX (MAX_INLINE_INSNS_AUTO,
796 MAX_INLINE_INSNS_SINGLE)
797 : MAX_INLINE_INSNS_AUTO))
798 {
799 /* growth_likely_positive is expensive, always test it last. */
800 if (growth >= MAX_INLINE_INSNS_SINGLE
801 || growth_likely_positive (callee, growth))
802 {
803 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
804 want_inline = false;
805 }
806 }
807 /* If call is cold, do not inline when function body would grow. */
808 else if (!e->maybe_hot_p ()
809 && (growth >= MAX_INLINE_INSNS_SINGLE
810 || growth_likely_positive (callee, growth)))
811 {
812 e->inline_failed = CIF_UNLIKELY_CALL;
813 want_inline = false;
814 }
815 }
816 if (!want_inline && report)
817 report_inline_failed_reason (e);
818 return want_inline;
819 }
820
821 /* EDGE is self recursive edge.
822 We hand two cases - when function A is inlining into itself
823 or when function A is being inlined into another inliner copy of function
824 A within function B.
825
826 In first case OUTER_NODE points to the toplevel copy of A, while
827 in the second case OUTER_NODE points to the outermost copy of A in B.
828
829 In both cases we want to be extra selective since
830 inlining the call will just introduce new recursive calls to appear. */
831
832 static bool
833 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
834 struct cgraph_node *outer_node,
835 bool peeling,
836 int depth)
837 {
838 char const *reason = NULL;
839 bool want_inline = true;
840 int caller_freq = CGRAPH_FREQ_BASE;
841 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
842
843 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
844 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
845
846 if (!edge->maybe_hot_p ())
847 {
848 reason = "recursive call is cold";
849 want_inline = false;
850 }
851 else if (max_count && !outer_node->count)
852 {
853 reason = "not executed in profile";
854 want_inline = false;
855 }
856 else if (depth > max_depth)
857 {
858 reason = "--param max-inline-recursive-depth exceeded.";
859 want_inline = false;
860 }
861
862 if (outer_node->global.inlined_to)
863 caller_freq = outer_node->callers->frequency;
864
865 if (!caller_freq)
866 {
867 reason = "function is inlined and unlikely";
868 want_inline = false;
869 }
870
871 if (!want_inline)
872 ;
873 /* Inlining of self recursive function into copy of itself within other function
874 is transformation similar to loop peeling.
875
876 Peeling is profitable if we can inline enough copies to make probability
877 of actual call to the self recursive function very small. Be sure that
878 the probability of recursion is small.
879
880 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
881 This way the expected number of recision is at most max_depth. */
882 else if (peeling)
883 {
884 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
885 / max_depth);
886 int i;
887 for (i = 1; i < depth; i++)
888 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
889 if (max_count
890 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
891 >= max_prob))
892 {
893 reason = "profile of recursive call is too large";
894 want_inline = false;
895 }
896 if (!max_count
897 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
898 >= max_prob))
899 {
900 reason = "frequency of recursive call is too large";
901 want_inline = false;
902 }
903 }
904 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
905 depth is large. We reduce function call overhead and increase chances that
906 things fit in hardware return predictor.
907
908 Recursive inlining might however increase cost of stack frame setup
909 actually slowing down functions whose recursion tree is wide rather than
910 deep.
911
912 Deciding reliably on when to do recursive inlining without profile feedback
913 is tricky. For now we disable recursive inlining when probability of self
914 recursion is low.
915
916 Recursive inlining of self recursive call within loop also results in large loop
917 depths that generally optimize badly. We may want to throttle down inlining
918 in those cases. In particular this seems to happen in one of libstdc++ rb tree
919 methods. */
920 else
921 {
922 if (max_count
923 && (edge->count * 100 / outer_node->count
924 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
925 {
926 reason = "profile of recursive call is too small";
927 want_inline = false;
928 }
929 else if (!max_count
930 && (edge->frequency * 100 / caller_freq
931 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
932 {
933 reason = "frequency of recursive call is too small";
934 want_inline = false;
935 }
936 }
937 if (!want_inline && dump_file)
938 fprintf (dump_file, " not inlining recursively: %s\n", reason);
939 return want_inline;
940 }
941
942 /* Return true when NODE has uninlinable caller;
943 set HAS_HOT_CALL if it has hot call.
944 Worker for cgraph_for_node_and_aliases. */
945
946 static bool
947 check_callers (struct cgraph_node *node, void *has_hot_call)
948 {
949 struct cgraph_edge *e;
950 for (e = node->callers; e; e = e->next_caller)
951 {
952 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
953 return true;
954 if (!can_inline_edge_p (e, true))
955 return true;
956 if (e->recursive_p ())
957 return true;
958 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
959 *(bool *)has_hot_call = true;
960 }
961 return false;
962 }
963
964 /* If NODE has a caller, return true. */
965
966 static bool
967 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
968 {
969 if (node->callers)
970 return true;
971 return false;
972 }
973
974 /* Decide if inlining NODE would reduce unit size by eliminating
975 the offline copy of function.
976 When COLD is true the cold calls are considered, too. */
977
978 static bool
979 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
980 {
981 bool has_hot_call = false;
982
983 /* Aliases gets inlined along with the function they alias. */
984 if (node->alias)
985 return false;
986 /* Already inlined? */
987 if (node->global.inlined_to)
988 return false;
989 /* Does it have callers? */
990 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
991 return false;
992 /* Inlining into all callers would increase size? */
993 if (estimate_growth (node) > 0)
994 return false;
995 /* All inlines must be possible. */
996 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
997 true))
998 return false;
999 if (!cold && !has_hot_call)
1000 return false;
1001 return true;
1002 }
1003
1004 /* A cost model driving the inlining heuristics in a way so the edges with
1005 smallest badness are inlined first. After each inlining is performed
1006 the costs of all caller edges of nodes affected are recomputed so the
1007 metrics may accurately depend on values such as number of inlinable callers
1008 of the function or function body size. */
1009
1010 static sreal
1011 edge_badness (struct cgraph_edge *edge, bool dump)
1012 {
1013 sreal badness;
1014 int growth, edge_time;
1015 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1016 struct inline_summary *callee_info = inline_summaries->get (callee);
1017 inline_hints hints;
1018 cgraph_node *caller = (edge->caller->global.inlined_to
1019 ? edge->caller->global.inlined_to
1020 : edge->caller);
1021
1022 growth = estimate_edge_growth (edge);
1023 edge_time = estimate_edge_time (edge);
1024 hints = estimate_edge_hints (edge);
1025 gcc_checking_assert (edge_time >= 0);
1026 gcc_checking_assert (edge_time <= callee_info->time);
1027 gcc_checking_assert (growth <= callee_info->size);
1028
1029 if (dump)
1030 {
1031 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1032 xstrdup_for_dump (edge->caller->name ()),
1033 edge->caller->order,
1034 xstrdup_for_dump (callee->name ()),
1035 edge->callee->order);
1036 fprintf (dump_file, " size growth %i, time %i ",
1037 growth,
1038 edge_time);
1039 dump_inline_hints (dump_file, hints);
1040 if (big_speedup_p (edge))
1041 fprintf (dump_file, " big_speedup");
1042 fprintf (dump_file, "\n");
1043 }
1044
1045 /* Always prefer inlining saving code size. */
1046 if (growth <= 0)
1047 {
1048 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1049 if (dump)
1050 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1051 growth);
1052 }
1053 /* Inlining into EXTERNAL functions is not going to change anything unless
1054 they are themselves inlined. */
1055 else if (DECL_EXTERNAL (caller->decl))
1056 {
1057 if (dump)
1058 fprintf (dump_file, " max: function is external\n");
1059 return sreal::max ();
1060 }
1061 /* When profile is available. Compute badness as:
1062
1063 time_saved * caller_count
1064 goodness = -------------------------------------------------
1065 growth_of_caller * overall_growth * combined_size
1066
1067 badness = - goodness
1068
1069 Again use negative value to make calls with profile appear hotter
1070 then calls without.
1071 */
1072 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1073 {
1074 sreal numerator, denominator;
1075 int overall_growth;
1076
1077 numerator = (compute_uninlined_call_time (callee_info, edge)
1078 - compute_inlined_call_time (edge, edge_time));
1079 if (numerator == 0)
1080 numerator = ((sreal) 1 >> 8);
1081 if (caller->count)
1082 numerator *= caller->count;
1083 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1084 numerator = numerator >> 11;
1085 denominator = growth;
1086
1087 overall_growth = callee_info->growth;
1088
1089 /* Look for inliner wrappers of the form:
1090
1091 inline_caller ()
1092 {
1093 do_fast_job...
1094 if (need_more_work)
1095 noninline_callee ();
1096 }
1097 Withhout panilizing this case, we usually inline noninline_callee
1098 into the inline_caller because overall_growth is small preventing
1099 further inlining of inline_caller.
1100
1101 Penalize only callgraph edges to functions with small overall
1102 growth ...
1103 */
1104 if (growth > overall_growth
1105 /* ... and having only one caller which is not inlined ... */
1106 && callee_info->single_caller
1107 && !edge->caller->global.inlined_to
1108 /* ... and edges executed only conditionally ... */
1109 && edge->frequency < CGRAPH_FREQ_BASE
1110 /* ... consider case where callee is not inline but caller is ... */
1111 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1112 && DECL_DECLARED_INLINE_P (caller->decl))
1113 /* ... or when early optimizers decided to split and edge
1114 frequency still indicates splitting is a win ... */
1115 || (callee->split_part && !caller->split_part
1116 && edge->frequency
1117 < CGRAPH_FREQ_BASE
1118 * PARAM_VALUE
1119 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
1120 /* ... and do not overwrite user specified hints. */
1121 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1122 || DECL_DECLARED_INLINE_P (caller->decl)))))
1123 {
1124 struct inline_summary *caller_info = inline_summaries->get (caller);
1125 int caller_growth = caller_info->growth;
1126
1127 /* Only apply the penalty when caller looks like inline candidate,
1128 and it is not called once and. */
1129 if (!caller_info->single_caller && overall_growth < caller_growth
1130 && caller_info->inlinable
1131 && caller_info->size
1132 < (DECL_DECLARED_INLINE_P (caller->decl)
1133 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1134 {
1135 if (dump)
1136 fprintf (dump_file,
1137 " Wrapper penalty. Increasing growth %i to %i\n",
1138 overall_growth, caller_growth);
1139 overall_growth = caller_growth;
1140 }
1141 }
1142 if (overall_growth > 0)
1143 {
1144 /* Strongly preffer functions with few callers that can be inlined
1145 fully. The square root here leads to smaller binaries at average.
1146 Watch however for extreme cases and return to linear function
1147 when growth is large. */
1148 if (overall_growth < 256)
1149 overall_growth *= overall_growth;
1150 else
1151 overall_growth += 256 * 256 - 256;
1152 denominator *= overall_growth;
1153 }
1154 denominator *= inline_summaries->get (caller)->self_size + growth;
1155
1156 badness = - numerator / denominator;
1157
1158 if (dump)
1159 {
1160 fprintf (dump_file,
1161 " %f: guessed profile. frequency %f, count %" PRId64
1162 " caller count %" PRId64
1163 " time w/o inlining %f, time w/ inlining %f"
1164 " overall growth %i (current) %i (original)"
1165 " %i (compensated)\n",
1166 badness.to_double (),
1167 (double)edge->frequency / CGRAPH_FREQ_BASE,
1168 edge->count, caller->count,
1169 compute_uninlined_call_time (callee_info, edge).to_double (),
1170 compute_inlined_call_time (edge, edge_time).to_double (),
1171 estimate_growth (callee),
1172 callee_info->growth, overall_growth);
1173 }
1174 }
1175 /* When function local profile is not available or it does not give
1176 useful information (ie frequency is zero), base the cost on
1177 loop nest and overall size growth, so we optimize for overall number
1178 of functions fully inlined in program. */
1179 else
1180 {
1181 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1182 badness = growth;
1183
1184 /* Decrease badness if call is nested. */
1185 if (badness > 0)
1186 badness = badness >> nest;
1187 else
1188 badness = badness << nest;
1189 if (dump)
1190 fprintf (dump_file, " %f: no profile. nest %i\n",
1191 badness.to_double (), nest);
1192 }
1193 gcc_checking_assert (badness != 0);
1194
1195 if (edge->recursive_p ())
1196 badness = badness.shift (badness > 0 ? 4 : -4);
1197 if ((hints & (INLINE_HINT_indirect_call
1198 | INLINE_HINT_loop_iterations
1199 | INLINE_HINT_array_index
1200 | INLINE_HINT_loop_stride))
1201 || callee_info->growth <= 0)
1202 badness = badness.shift (badness > 0 ? -2 : 2);
1203 if (hints & (INLINE_HINT_same_scc))
1204 badness = badness.shift (badness > 0 ? 3 : -3);
1205 else if (hints & (INLINE_HINT_in_scc))
1206 badness = badness.shift (badness > 0 ? 2 : -2);
1207 else if (hints & (INLINE_HINT_cross_module))
1208 badness = badness.shift (badness > 0 ? 1 : -1);
1209 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1210 badness = badness.shift (badness > 0 ? -4 : 4);
1211 else if ((hints & INLINE_HINT_declared_inline))
1212 badness = badness.shift (badness > 0 ? -3 : 3);
1213 if (dump)
1214 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1215 return badness;
1216 }
1217
1218 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1219 static inline void
1220 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1221 {
1222 sreal badness = edge_badness (edge, false);
1223 if (edge->aux)
1224 {
1225 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1226 gcc_checking_assert (n->get_data () == edge);
1227
1228 /* fibonacci_heap::replace_key does busy updating of the
1229 heap that is unnecesarily expensive.
1230 We do lazy increases: after extracting minimum if the key
1231 turns out to be out of date, it is re-inserted into heap
1232 with correct value. */
1233 if (badness < n->get_key ())
1234 {
1235 if (dump_file && (dump_flags & TDF_DETAILS))
1236 {
1237 fprintf (dump_file,
1238 " decreasing badness %s/%i -> %s/%i, %f"
1239 " to %f\n",
1240 xstrdup_for_dump (edge->caller->name ()),
1241 edge->caller->order,
1242 xstrdup_for_dump (edge->callee->name ()),
1243 edge->callee->order,
1244 n->get_key ().to_double (),
1245 badness.to_double ());
1246 }
1247 heap->decrease_key (n, badness);
1248 }
1249 }
1250 else
1251 {
1252 if (dump_file && (dump_flags & TDF_DETAILS))
1253 {
1254 fprintf (dump_file,
1255 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1256 xstrdup_for_dump (edge->caller->name ()),
1257 edge->caller->order,
1258 xstrdup_for_dump (edge->callee->name ()),
1259 edge->callee->order,
1260 badness.to_double ());
1261 }
1262 edge->aux = heap->insert (badness, edge);
1263 }
1264 }
1265
1266
1267 /* NODE was inlined.
1268 All caller edges needs to be resetted because
1269 size estimates change. Similarly callees needs reset
1270 because better context may be known. */
1271
1272 static void
1273 reset_edge_caches (struct cgraph_node *node)
1274 {
1275 struct cgraph_edge *edge;
1276 struct cgraph_edge *e = node->callees;
1277 struct cgraph_node *where = node;
1278 struct ipa_ref *ref;
1279
1280 if (where->global.inlined_to)
1281 where = where->global.inlined_to;
1282
1283 for (edge = where->callers; edge; edge = edge->next_caller)
1284 if (edge->inline_failed)
1285 reset_edge_growth_cache (edge);
1286
1287 FOR_EACH_ALIAS (where, ref)
1288 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1289
1290 if (!e)
1291 return;
1292
1293 while (true)
1294 if (!e->inline_failed && e->callee->callees)
1295 e = e->callee->callees;
1296 else
1297 {
1298 if (e->inline_failed)
1299 reset_edge_growth_cache (e);
1300 if (e->next_callee)
1301 e = e->next_callee;
1302 else
1303 {
1304 do
1305 {
1306 if (e->caller == node)
1307 return;
1308 e = e->caller->callers;
1309 }
1310 while (!e->next_callee);
1311 e = e->next_callee;
1312 }
1313 }
1314 }
1315
1316 /* Recompute HEAP nodes for each of caller of NODE.
1317 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1318 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1319 it is inlinable. Otherwise check all edges. */
1320
1321 static void
1322 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1323 bitmap updated_nodes,
1324 struct cgraph_edge *check_inlinablity_for)
1325 {
1326 struct cgraph_edge *edge;
1327 struct ipa_ref *ref;
1328
1329 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1330 || node->global.inlined_to)
1331 return;
1332 if (!bitmap_set_bit (updated_nodes, node->uid))
1333 return;
1334
1335 FOR_EACH_ALIAS (node, ref)
1336 {
1337 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1338 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1339 }
1340
1341 for (edge = node->callers; edge; edge = edge->next_caller)
1342 if (edge->inline_failed)
1343 {
1344 if (!check_inlinablity_for
1345 || check_inlinablity_for == edge)
1346 {
1347 if (can_inline_edge_p (edge, false)
1348 && want_inline_small_function_p (edge, false))
1349 update_edge_key (heap, edge);
1350 else if (edge->aux)
1351 {
1352 report_inline_failed_reason (edge);
1353 heap->delete_node ((edge_heap_node_t *) edge->aux);
1354 edge->aux = NULL;
1355 }
1356 }
1357 else if (edge->aux)
1358 update_edge_key (heap, edge);
1359 }
1360 }
1361
1362 /* Recompute HEAP nodes for each uninlined call in NODE.
1363 This is used when we know that edge badnesses are going only to increase
1364 (we introduced new call site) and thus all we need is to insert newly
1365 created edges into heap. */
1366
1367 static void
1368 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1369 bitmap updated_nodes)
1370 {
1371 struct cgraph_edge *e = node->callees;
1372
1373 if (!e)
1374 return;
1375 while (true)
1376 if (!e->inline_failed && e->callee->callees)
1377 e = e->callee->callees;
1378 else
1379 {
1380 enum availability avail;
1381 struct cgraph_node *callee;
1382 /* We do not reset callee growth cache here. Since we added a new call,
1383 growth chould have just increased and consequentely badness metric
1384 don't need updating. */
1385 if (e->inline_failed
1386 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1387 && inline_summaries->get (callee)->inlinable
1388 && avail >= AVAIL_AVAILABLE
1389 && !bitmap_bit_p (updated_nodes, callee->uid))
1390 {
1391 if (can_inline_edge_p (e, false)
1392 && want_inline_small_function_p (e, false))
1393 update_edge_key (heap, e);
1394 else if (e->aux)
1395 {
1396 report_inline_failed_reason (e);
1397 heap->delete_node ((edge_heap_node_t *) e->aux);
1398 e->aux = NULL;
1399 }
1400 }
1401 if (e->next_callee)
1402 e = e->next_callee;
1403 else
1404 {
1405 do
1406 {
1407 if (e->caller == node)
1408 return;
1409 e = e->caller->callers;
1410 }
1411 while (!e->next_callee);
1412 e = e->next_callee;
1413 }
1414 }
1415 }
1416
1417 /* Enqueue all recursive calls from NODE into priority queue depending on
1418 how likely we want to recursively inline the call. */
1419
1420 static void
1421 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1422 edge_heap_t *heap)
1423 {
1424 struct cgraph_edge *e;
1425 enum availability avail;
1426
1427 for (e = where->callees; e; e = e->next_callee)
1428 if (e->callee == node
1429 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1430 && avail > AVAIL_INTERPOSABLE))
1431 {
1432 /* When profile feedback is available, prioritize by expected number
1433 of calls. */
1434 heap->insert (!max_count ? -e->frequency
1435 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1436 e);
1437 }
1438 for (e = where->callees; e; e = e->next_callee)
1439 if (!e->inline_failed)
1440 lookup_recursive_calls (node, e->callee, heap);
1441 }
1442
1443 /* Decide on recursive inlining: in the case function has recursive calls,
1444 inline until body size reaches given argument. If any new indirect edges
1445 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1446 is NULL. */
1447
1448 static bool
1449 recursive_inlining (struct cgraph_edge *edge,
1450 vec<cgraph_edge *> *new_edges)
1451 {
1452 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1453 edge_heap_t heap (sreal::min ());
1454 struct cgraph_node *node;
1455 struct cgraph_edge *e;
1456 struct cgraph_node *master_clone = NULL, *next;
1457 int depth = 0;
1458 int n = 0;
1459
1460 node = edge->caller;
1461 if (node->global.inlined_to)
1462 node = node->global.inlined_to;
1463
1464 if (DECL_DECLARED_INLINE_P (node->decl))
1465 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1466
1467 /* Make sure that function is small enough to be considered for inlining. */
1468 if (estimate_size_after_inlining (node, edge) >= limit)
1469 return false;
1470 lookup_recursive_calls (node, node, &heap);
1471 if (heap.empty ())
1472 return false;
1473
1474 if (dump_file)
1475 fprintf (dump_file,
1476 " Performing recursive inlining on %s\n",
1477 node->name ());
1478
1479 /* Do the inlining and update list of recursive call during process. */
1480 while (!heap.empty ())
1481 {
1482 struct cgraph_edge *curr = heap.extract_min ();
1483 struct cgraph_node *cnode, *dest = curr->callee;
1484
1485 if (!can_inline_edge_p (curr, true))
1486 continue;
1487
1488 /* MASTER_CLONE is produced in the case we already started modified
1489 the function. Be sure to redirect edge to the original body before
1490 estimating growths otherwise we will be seeing growths after inlining
1491 the already modified body. */
1492 if (master_clone)
1493 {
1494 curr->redirect_callee (master_clone);
1495 reset_edge_growth_cache (curr);
1496 }
1497
1498 if (estimate_size_after_inlining (node, curr) > limit)
1499 {
1500 curr->redirect_callee (dest);
1501 reset_edge_growth_cache (curr);
1502 break;
1503 }
1504
1505 depth = 1;
1506 for (cnode = curr->caller;
1507 cnode->global.inlined_to; cnode = cnode->callers->caller)
1508 if (node->decl
1509 == curr->callee->ultimate_alias_target ()->decl)
1510 depth++;
1511
1512 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1513 {
1514 curr->redirect_callee (dest);
1515 reset_edge_growth_cache (curr);
1516 continue;
1517 }
1518
1519 if (dump_file)
1520 {
1521 fprintf (dump_file,
1522 " Inlining call of depth %i", depth);
1523 if (node->count)
1524 {
1525 fprintf (dump_file, " called approx. %.2f times per call",
1526 (double)curr->count / node->count);
1527 }
1528 fprintf (dump_file, "\n");
1529 }
1530 if (!master_clone)
1531 {
1532 /* We need original clone to copy around. */
1533 master_clone = node->create_clone (node->decl, node->count,
1534 CGRAPH_FREQ_BASE, false, vNULL,
1535 true, NULL, NULL);
1536 for (e = master_clone->callees; e; e = e->next_callee)
1537 if (!e->inline_failed)
1538 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1539 curr->redirect_callee (master_clone);
1540 reset_edge_growth_cache (curr);
1541 }
1542
1543 inline_call (curr, false, new_edges, &overall_size, true);
1544 lookup_recursive_calls (node, curr->callee, &heap);
1545 n++;
1546 }
1547
1548 if (!heap.empty () && dump_file)
1549 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1550
1551 if (!master_clone)
1552 return false;
1553
1554 if (dump_file)
1555 fprintf (dump_file,
1556 "\n Inlined %i times, "
1557 "body grown from size %i to %i, time %i to %i\n", n,
1558 inline_summaries->get (master_clone)->size, inline_summaries->get (node)->size,
1559 inline_summaries->get (master_clone)->time, inline_summaries->get (node)->time);
1560
1561 /* Remove master clone we used for inlining. We rely that clones inlined
1562 into master clone gets queued just before master clone so we don't
1563 need recursion. */
1564 for (node = symtab->first_function (); node != master_clone;
1565 node = next)
1566 {
1567 next = symtab->next_function (node);
1568 if (node->global.inlined_to == master_clone)
1569 node->remove ();
1570 }
1571 master_clone->remove ();
1572 return true;
1573 }
1574
1575
1576 /* Given whole compilation unit estimate of INSNS, compute how large we can
1577 allow the unit to grow. */
1578
1579 static int
1580 compute_max_insns (int insns)
1581 {
1582 int max_insns = insns;
1583 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1584 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1585
1586 return ((int64_t) max_insns
1587 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1588 }
1589
1590
1591 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1592
1593 static void
1594 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1595 {
1596 while (new_edges.length () > 0)
1597 {
1598 struct cgraph_edge *edge = new_edges.pop ();
1599
1600 gcc_assert (!edge->aux);
1601 if (edge->inline_failed
1602 && can_inline_edge_p (edge, true)
1603 && want_inline_small_function_p (edge, true))
1604 edge->aux = heap->insert (edge_badness (edge, false), edge);
1605 }
1606 }
1607
1608 /* Remove EDGE from the fibheap. */
1609
1610 static void
1611 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1612 {
1613 if (e->aux)
1614 {
1615 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1616 e->aux = NULL;
1617 }
1618 }
1619
1620 /* Return true if speculation of edge E seems useful.
1621 If ANTICIPATE_INLINING is true, be conservative and hope that E
1622 may get inlined. */
1623
1624 bool
1625 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1626 {
1627 enum availability avail;
1628 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1629 e->caller);
1630 struct cgraph_edge *direct, *indirect;
1631 struct ipa_ref *ref;
1632
1633 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1634
1635 if (!e->maybe_hot_p ())
1636 return false;
1637
1638 /* See if IP optimizations found something potentially useful about the
1639 function. For now we look only for CONST/PURE flags. Almost everything
1640 else we propagate is useless. */
1641 if (avail >= AVAIL_AVAILABLE)
1642 {
1643 int ecf_flags = flags_from_decl_or_type (target->decl);
1644 if (ecf_flags & ECF_CONST)
1645 {
1646 e->speculative_call_info (direct, indirect, ref);
1647 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1648 return true;
1649 }
1650 else if (ecf_flags & ECF_PURE)
1651 {
1652 e->speculative_call_info (direct, indirect, ref);
1653 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1654 return true;
1655 }
1656 }
1657 /* If we did not managed to inline the function nor redirect
1658 to an ipa-cp clone (that are seen by having local flag set),
1659 it is probably pointless to inline it unless hardware is missing
1660 indirect call predictor. */
1661 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1662 return false;
1663 /* For overwritable targets there is not much to do. */
1664 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1665 return false;
1666 /* OK, speculation seems interesting. */
1667 return true;
1668 }
1669
1670 /* We know that EDGE is not going to be inlined.
1671 See if we can remove speculation. */
1672
1673 static void
1674 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1675 {
1676 if (edge->speculative && !speculation_useful_p (edge, false))
1677 {
1678 struct cgraph_node *node = edge->caller;
1679 struct cgraph_node *where = node->global.inlined_to
1680 ? node->global.inlined_to : node;
1681 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1682
1683 spec_rem += edge->count;
1684 edge->resolve_speculation ();
1685 reset_edge_caches (where);
1686 inline_update_overall_summary (where);
1687 update_caller_keys (edge_heap, where,
1688 updated_nodes, NULL);
1689 update_callee_keys (edge_heap, where,
1690 updated_nodes);
1691 BITMAP_FREE (updated_nodes);
1692 }
1693 }
1694
1695 /* Return true if NODE should be accounted for overall size estimate.
1696 Skip all nodes optimized for size so we can measure the growth of hot
1697 part of program no matter of the padding. */
1698
1699 bool
1700 inline_account_function_p (struct cgraph_node *node)
1701 {
1702 return (!DECL_EXTERNAL (node->decl)
1703 && !opt_for_fn (node->decl, optimize_size)
1704 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1705 }
1706
1707 /* Count number of callers of NODE and store it into DATA (that
1708 points to int. Worker for cgraph_for_node_and_aliases. */
1709
1710 static bool
1711 sum_callers (struct cgraph_node *node, void *data)
1712 {
1713 struct cgraph_edge *e;
1714 int *num_calls = (int *)data;
1715
1716 for (e = node->callers; e; e = e->next_caller)
1717 (*num_calls)++;
1718 return false;
1719 }
1720
1721 /* We use greedy algorithm for inlining of small functions:
1722 All inline candidates are put into prioritized heap ordered in
1723 increasing badness.
1724
1725 The inlining of small functions is bounded by unit growth parameters. */
1726
1727 static void
1728 inline_small_functions (void)
1729 {
1730 struct cgraph_node *node;
1731 struct cgraph_edge *edge;
1732 edge_heap_t edge_heap (sreal::min ());
1733 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1734 int min_size, max_size;
1735 auto_vec<cgraph_edge *> new_indirect_edges;
1736 int initial_size = 0;
1737 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1738 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1739 new_indirect_edges.create (8);
1740
1741 edge_removal_hook_holder
1742 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1743
1744 /* Compute overall unit size and other global parameters used by badness
1745 metrics. */
1746
1747 max_count = 0;
1748 ipa_reduced_postorder (order, true, true, NULL);
1749 free (order);
1750
1751 FOR_EACH_DEFINED_FUNCTION (node)
1752 if (!node->global.inlined_to)
1753 {
1754 if (!node->alias && node->analyzed
1755 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1756 {
1757 struct inline_summary *info = inline_summaries->get (node);
1758 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1759
1760 /* Do not account external functions, they will be optimized out
1761 if not inlined. Also only count the non-cold portion of program. */
1762 if (inline_account_function_p (node))
1763 initial_size += info->size;
1764 info->growth = estimate_growth (node);
1765
1766 int num_calls = 0;
1767 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1768 true);
1769 if (num_calls == 1)
1770 info->single_caller = true;
1771 if (dfs && dfs->next_cycle)
1772 {
1773 struct cgraph_node *n2;
1774 int id = dfs->scc_no + 1;
1775 for (n2 = node; n2;
1776 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1777 {
1778 struct inline_summary *info2 = inline_summaries->get (n2);
1779 if (info2->scc_no)
1780 break;
1781 info2->scc_no = id;
1782 }
1783 }
1784 }
1785
1786 for (edge = node->callers; edge; edge = edge->next_caller)
1787 if (max_count < edge->count)
1788 max_count = edge->count;
1789 }
1790 ipa_free_postorder_info ();
1791 initialize_growth_caches ();
1792
1793 if (dump_file)
1794 fprintf (dump_file,
1795 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1796 initial_size);
1797
1798 overall_size = initial_size;
1799 max_size = compute_max_insns (overall_size);
1800 min_size = overall_size;
1801
1802 /* Populate the heap with all edges we might inline. */
1803
1804 FOR_EACH_DEFINED_FUNCTION (node)
1805 {
1806 bool update = false;
1807 struct cgraph_edge *next = NULL;
1808 bool has_speculative = false;
1809
1810 if (dump_file)
1811 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1812 node->name (), node->order);
1813
1814 for (edge = node->callees; edge; edge = next)
1815 {
1816 next = edge->next_callee;
1817 if (edge->inline_failed
1818 && !edge->aux
1819 && can_inline_edge_p (edge, true)
1820 && want_inline_small_function_p (edge, true)
1821 && edge->inline_failed)
1822 {
1823 gcc_assert (!edge->aux);
1824 update_edge_key (&edge_heap, edge);
1825 }
1826 if (edge->speculative)
1827 has_speculative = true;
1828 }
1829 if (has_speculative)
1830 for (edge = node->callees; edge; edge = next)
1831 if (edge->speculative && !speculation_useful_p (edge,
1832 edge->aux != NULL))
1833 {
1834 edge->resolve_speculation ();
1835 update = true;
1836 }
1837 if (update)
1838 {
1839 struct cgraph_node *where = node->global.inlined_to
1840 ? node->global.inlined_to : node;
1841 inline_update_overall_summary (where);
1842 reset_edge_caches (where);
1843 update_caller_keys (&edge_heap, where,
1844 updated_nodes, NULL);
1845 update_callee_keys (&edge_heap, where,
1846 updated_nodes);
1847 bitmap_clear (updated_nodes);
1848 }
1849 }
1850
1851 gcc_assert (in_lto_p
1852 || !max_count
1853 || (profile_info && flag_branch_probabilities));
1854
1855 while (!edge_heap.empty ())
1856 {
1857 int old_size = overall_size;
1858 struct cgraph_node *where, *callee;
1859 sreal badness = edge_heap.min_key ();
1860 sreal current_badness;
1861 int growth;
1862
1863 edge = edge_heap.extract_min ();
1864 gcc_assert (edge->aux);
1865 edge->aux = NULL;
1866 if (!edge->inline_failed || !edge->callee->analyzed)
1867 continue;
1868
1869 #if CHECKING_P
1870 /* Be sure that caches are maintained consistent. */
1871 sreal cached_badness = edge_badness (edge, false);
1872
1873 int old_size_est = estimate_edge_size (edge);
1874 int old_time_est = estimate_edge_time (edge);
1875 int old_hints_est = estimate_edge_hints (edge);
1876
1877 reset_edge_growth_cache (edge);
1878 gcc_assert (old_size_est == estimate_edge_size (edge));
1879 gcc_assert (old_time_est == estimate_edge_time (edge));
1880 /* FIXME:
1881
1882 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1883
1884 fails with profile feedback because some hints depends on
1885 maybe_hot_edge_p predicate and because callee gets inlined to other
1886 calls, the edge may become cold.
1887 This ought to be fixed by computing relative probabilities
1888 for given invocation but that will be better done once whole
1889 code is converted to sreals. Disable for now and revert to "wrong"
1890 value so enable/disable checking paths agree. */
1891 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1892
1893 /* When updating the edge costs, we only decrease badness in the keys.
1894 Increases of badness are handled lazilly; when we see key with out
1895 of date value on it, we re-insert it now. */
1896 current_badness = edge_badness (edge, false);
1897 /* Disable checking for profile because roundoff errors may cause slight
1898 deviations in the order. */
1899 gcc_assert (max_count || cached_badness == current_badness);
1900 gcc_assert (current_badness >= badness);
1901 #else
1902 current_badness = edge_badness (edge, false);
1903 #endif
1904 if (current_badness != badness)
1905 {
1906 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1907 {
1908 edge->aux = edge_heap.insert (current_badness, edge);
1909 continue;
1910 }
1911 else
1912 badness = current_badness;
1913 }
1914
1915 if (!can_inline_edge_p (edge, true))
1916 {
1917 resolve_noninline_speculation (&edge_heap, edge);
1918 continue;
1919 }
1920
1921 callee = edge->callee->ultimate_alias_target ();
1922 growth = estimate_edge_growth (edge);
1923 if (dump_file)
1924 {
1925 fprintf (dump_file,
1926 "\nConsidering %s/%i with %i size\n",
1927 callee->name (), callee->order,
1928 inline_summaries->get (callee)->size);
1929 fprintf (dump_file,
1930 " to be inlined into %s/%i in %s:%i\n"
1931 " Estimated badness is %f, frequency %.2f.\n",
1932 edge->caller->name (), edge->caller->order,
1933 edge->call_stmt
1934 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1935 edge->call_stmt))
1936 > BUILTINS_LOCATION)
1937 ? gimple_filename ((const gimple *) edge->call_stmt)
1938 : "unknown",
1939 edge->call_stmt
1940 ? gimple_lineno ((const gimple *) edge->call_stmt)
1941 : -1,
1942 badness.to_double (),
1943 edge->frequency / (double)CGRAPH_FREQ_BASE);
1944 if (edge->count)
1945 fprintf (dump_file," Called %" PRId64"x\n",
1946 edge->count);
1947 if (dump_flags & TDF_DETAILS)
1948 edge_badness (edge, true);
1949 }
1950
1951 if (overall_size + growth > max_size
1952 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1953 {
1954 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1955 report_inline_failed_reason (edge);
1956 resolve_noninline_speculation (&edge_heap, edge);
1957 continue;
1958 }
1959
1960 if (!want_inline_small_function_p (edge, true))
1961 {
1962 resolve_noninline_speculation (&edge_heap, edge);
1963 continue;
1964 }
1965
1966 /* Heuristics for inlining small functions work poorly for
1967 recursive calls where we do effects similar to loop unrolling.
1968 When inlining such edge seems profitable, leave decision on
1969 specific inliner. */
1970 if (edge->recursive_p ())
1971 {
1972 where = edge->caller;
1973 if (where->global.inlined_to)
1974 where = where->global.inlined_to;
1975 if (!recursive_inlining (edge,
1976 opt_for_fn (edge->caller->decl,
1977 flag_indirect_inlining)
1978 ? &new_indirect_edges : NULL))
1979 {
1980 edge->inline_failed = CIF_RECURSIVE_INLINING;
1981 resolve_noninline_speculation (&edge_heap, edge);
1982 continue;
1983 }
1984 reset_edge_caches (where);
1985 /* Recursive inliner inlines all recursive calls of the function
1986 at once. Consequently we need to update all callee keys. */
1987 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1988 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1989 update_callee_keys (&edge_heap, where, updated_nodes);
1990 bitmap_clear (updated_nodes);
1991 }
1992 else
1993 {
1994 struct cgraph_node *outer_node = NULL;
1995 int depth = 0;
1996
1997 /* Consider the case where self recursive function A is inlined
1998 into B. This is desired optimization in some cases, since it
1999 leads to effect similar of loop peeling and we might completely
2000 optimize out the recursive call. However we must be extra
2001 selective. */
2002
2003 where = edge->caller;
2004 while (where->global.inlined_to)
2005 {
2006 if (where->decl == callee->decl)
2007 outer_node = where, depth++;
2008 where = where->callers->caller;
2009 }
2010 if (outer_node
2011 && !want_inline_self_recursive_call_p (edge, outer_node,
2012 true, depth))
2013 {
2014 edge->inline_failed
2015 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2016 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2017 resolve_noninline_speculation (&edge_heap, edge);
2018 continue;
2019 }
2020 else if (depth && dump_file)
2021 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2022
2023 gcc_checking_assert (!callee->global.inlined_to);
2024 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2025 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2026
2027 reset_edge_caches (edge->callee);
2028
2029 update_callee_keys (&edge_heap, where, updated_nodes);
2030 }
2031 where = edge->caller;
2032 if (where->global.inlined_to)
2033 where = where->global.inlined_to;
2034
2035 /* Our profitability metric can depend on local properties
2036 such as number of inlinable calls and size of the function body.
2037 After inlining these properties might change for the function we
2038 inlined into (since it's body size changed) and for the functions
2039 called by function we inlined (since number of it inlinable callers
2040 might change). */
2041 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2042 /* Offline copy count has possibly changed, recompute if profile is
2043 available. */
2044 if (max_count)
2045 {
2046 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2047 if (n != edge->callee && n->analyzed)
2048 update_callee_keys (&edge_heap, n, updated_nodes);
2049 }
2050 bitmap_clear (updated_nodes);
2051
2052 if (dump_file)
2053 {
2054 fprintf (dump_file,
2055 " Inlined into %s which now has time %i and size %i,"
2056 "net change of %+i.\n",
2057 edge->caller->name (),
2058 inline_summaries->get (edge->caller)->time,
2059 inline_summaries->get (edge->caller)->size,
2060 overall_size - old_size);
2061 }
2062 if (min_size > overall_size)
2063 {
2064 min_size = overall_size;
2065 max_size = compute_max_insns (min_size);
2066
2067 if (dump_file)
2068 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2069 }
2070 }
2071
2072 free_growth_caches ();
2073 if (dump_file)
2074 fprintf (dump_file,
2075 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2076 initial_size, overall_size,
2077 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2078 BITMAP_FREE (updated_nodes);
2079 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2080 }
2081
2082 /* Flatten NODE. Performed both during early inlining and
2083 at IPA inlining time. */
2084
2085 static void
2086 flatten_function (struct cgraph_node *node, bool early)
2087 {
2088 struct cgraph_edge *e;
2089
2090 /* We shouldn't be called recursively when we are being processed. */
2091 gcc_assert (node->aux == NULL);
2092
2093 node->aux = (void *) node;
2094
2095 for (e = node->callees; e; e = e->next_callee)
2096 {
2097 struct cgraph_node *orig_callee;
2098 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2099
2100 /* We've hit cycle? It is time to give up. */
2101 if (callee->aux)
2102 {
2103 if (dump_file)
2104 fprintf (dump_file,
2105 "Not inlining %s into %s to avoid cycle.\n",
2106 xstrdup_for_dump (callee->name ()),
2107 xstrdup_for_dump (e->caller->name ()));
2108 e->inline_failed = CIF_RECURSIVE_INLINING;
2109 continue;
2110 }
2111
2112 /* When the edge is already inlined, we just need to recurse into
2113 it in order to fully flatten the leaves. */
2114 if (!e->inline_failed)
2115 {
2116 flatten_function (callee, early);
2117 continue;
2118 }
2119
2120 /* Flatten attribute needs to be processed during late inlining. For
2121 extra code quality we however do flattening during early optimization,
2122 too. */
2123 if (!early
2124 ? !can_inline_edge_p (e, true)
2125 : !can_early_inline_edge_p (e))
2126 continue;
2127
2128 if (e->recursive_p ())
2129 {
2130 if (dump_file)
2131 fprintf (dump_file, "Not inlining: recursive call.\n");
2132 continue;
2133 }
2134
2135 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2136 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2137 {
2138 if (dump_file)
2139 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2140 continue;
2141 }
2142
2143 /* Inline the edge and flatten the inline clone. Avoid
2144 recursing through the original node if the node was cloned. */
2145 if (dump_file)
2146 fprintf (dump_file, " Inlining %s into %s.\n",
2147 xstrdup_for_dump (callee->name ()),
2148 xstrdup_for_dump (e->caller->name ()));
2149 orig_callee = callee;
2150 inline_call (e, true, NULL, NULL, false);
2151 if (e->callee != orig_callee)
2152 orig_callee->aux = (void *) node;
2153 flatten_function (e->callee, early);
2154 if (e->callee != orig_callee)
2155 orig_callee->aux = NULL;
2156 }
2157
2158 node->aux = NULL;
2159 if (!node->global.inlined_to)
2160 inline_update_overall_summary (node);
2161 }
2162
2163 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2164 DATA points to number of calls originally found so we avoid infinite
2165 recursion. */
2166
2167 static bool
2168 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2169 hash_set<cgraph_node *> *callers)
2170 {
2171 int *num_calls = (int *)data;
2172 bool callee_removed = false;
2173
2174 while (node->callers && !node->global.inlined_to)
2175 {
2176 struct cgraph_node *caller = node->callers->caller;
2177
2178 if (!can_inline_edge_p (node->callers, true)
2179 || node->callers->recursive_p ())
2180 {
2181 if (dump_file)
2182 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2183 *num_calls = 0;
2184 return false;
2185 }
2186
2187 if (dump_file)
2188 {
2189 fprintf (dump_file,
2190 "\nInlining %s size %i.\n",
2191 node->name (),
2192 inline_summaries->get (node)->size);
2193 fprintf (dump_file,
2194 " Called once from %s %i insns.\n",
2195 node->callers->caller->name (),
2196 inline_summaries->get (node->callers->caller)->size);
2197 }
2198
2199 /* Remember which callers we inlined to, delaying updating the
2200 overall summary. */
2201 callers->add (node->callers->caller);
2202 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2203 if (dump_file)
2204 fprintf (dump_file,
2205 " Inlined into %s which now has %i size\n",
2206 caller->name (),
2207 inline_summaries->get (caller)->size);
2208 if (!(*num_calls)--)
2209 {
2210 if (dump_file)
2211 fprintf (dump_file, "New calls found; giving up.\n");
2212 return callee_removed;
2213 }
2214 if (callee_removed)
2215 return true;
2216 }
2217 return false;
2218 }
2219
2220 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2221 update. */
2222
2223 static bool
2224 inline_to_all_callers (struct cgraph_node *node, void *data)
2225 {
2226 hash_set<cgraph_node *> callers;
2227 bool res = inline_to_all_callers_1 (node, data, &callers);
2228 /* Perform the delayed update of the overall summary of all callers
2229 processed. This avoids quadratic behavior in the cases where
2230 we have a lot of calls to the same function. */
2231 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2232 i != callers.end (); ++i)
2233 inline_update_overall_summary (*i);
2234 return res;
2235 }
2236
2237 /* Output overall time estimate. */
2238 static void
2239 dump_overall_stats (void)
2240 {
2241 int64_t sum_weighted = 0, sum = 0;
2242 struct cgraph_node *node;
2243
2244 FOR_EACH_DEFINED_FUNCTION (node)
2245 if (!node->global.inlined_to
2246 && !node->alias)
2247 {
2248 int time = inline_summaries->get (node)->time;
2249 sum += time;
2250 sum_weighted += time * node->count;
2251 }
2252 fprintf (dump_file, "Overall time estimate: "
2253 "%" PRId64" weighted by profile: "
2254 "%" PRId64"\n", sum, sum_weighted);
2255 }
2256
2257 /* Output some useful stats about inlining. */
2258
2259 static void
2260 dump_inline_stats (void)
2261 {
2262 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2263 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2264 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2265 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2266 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2267 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2268 int64_t reason[CIF_N_REASONS][3];
2269 int i;
2270 struct cgraph_node *node;
2271
2272 memset (reason, 0, sizeof (reason));
2273 FOR_EACH_DEFINED_FUNCTION (node)
2274 {
2275 struct cgraph_edge *e;
2276 for (e = node->callees; e; e = e->next_callee)
2277 {
2278 if (e->inline_failed)
2279 {
2280 reason[(int) e->inline_failed][0] += e->count;
2281 reason[(int) e->inline_failed][1] += e->frequency;
2282 reason[(int) e->inline_failed][2] ++;
2283 if (DECL_VIRTUAL_P (e->callee->decl))
2284 {
2285 if (e->indirect_inlining_edge)
2286 noninlined_virt_indir_cnt += e->count;
2287 else
2288 noninlined_virt_cnt += e->count;
2289 }
2290 else
2291 {
2292 if (e->indirect_inlining_edge)
2293 noninlined_indir_cnt += e->count;
2294 else
2295 noninlined_cnt += e->count;
2296 }
2297 }
2298 else
2299 {
2300 if (e->speculative)
2301 {
2302 if (DECL_VIRTUAL_P (e->callee->decl))
2303 inlined_speculative_ply += e->count;
2304 else
2305 inlined_speculative += e->count;
2306 }
2307 else if (DECL_VIRTUAL_P (e->callee->decl))
2308 {
2309 if (e->indirect_inlining_edge)
2310 inlined_virt_indir_cnt += e->count;
2311 else
2312 inlined_virt_cnt += e->count;
2313 }
2314 else
2315 {
2316 if (e->indirect_inlining_edge)
2317 inlined_indir_cnt += e->count;
2318 else
2319 inlined_cnt += e->count;
2320 }
2321 }
2322 }
2323 for (e = node->indirect_calls; e; e = e->next_callee)
2324 if (e->indirect_info->polymorphic)
2325 indirect_poly_cnt += e->count;
2326 else
2327 indirect_cnt += e->count;
2328 }
2329 if (max_count)
2330 {
2331 fprintf (dump_file,
2332 "Inlined %" PRId64 " + speculative "
2333 "%" PRId64 " + speculative polymorphic "
2334 "%" PRId64 " + previously indirect "
2335 "%" PRId64 " + virtual "
2336 "%" PRId64 " + virtual and previously indirect "
2337 "%" PRId64 "\n" "Not inlined "
2338 "%" PRId64 " + previously indirect "
2339 "%" PRId64 " + virtual "
2340 "%" PRId64 " + virtual and previously indirect "
2341 "%" PRId64 " + stil indirect "
2342 "%" PRId64 " + still indirect polymorphic "
2343 "%" PRId64 "\n", inlined_cnt,
2344 inlined_speculative, inlined_speculative_ply,
2345 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2346 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2347 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2348 fprintf (dump_file,
2349 "Removed speculations %" PRId64 "\n",
2350 spec_rem);
2351 }
2352 dump_overall_stats ();
2353 fprintf (dump_file, "\nWhy inlining failed?\n");
2354 for (i = 0; i < CIF_N_REASONS; i++)
2355 if (reason[i][2])
2356 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
2357 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2358 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2359 }
2360
2361 /* Decide on the inlining. We do so in the topological order to avoid
2362 expenses on updating data structures. */
2363
2364 static unsigned int
2365 ipa_inline (void)
2366 {
2367 struct cgraph_node *node;
2368 int nnodes;
2369 struct cgraph_node **order;
2370 int i;
2371 int cold;
2372 bool remove_functions = false;
2373
2374 if (!optimize)
2375 return 0;
2376
2377 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2378 percent_rec = (sreal) 1 / (sreal) 100;
2379
2380 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2381
2382 if (in_lto_p && optimize)
2383 ipa_update_after_lto_read ();
2384
2385 if (dump_file)
2386 dump_inline_summaries (dump_file);
2387
2388 nnodes = ipa_reverse_postorder (order);
2389
2390 FOR_EACH_FUNCTION (node)
2391 {
2392 node->aux = 0;
2393
2394 /* Recompute the default reasons for inlining because they may have
2395 changed during merging. */
2396 if (in_lto_p)
2397 {
2398 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2399 {
2400 gcc_assert (e->inline_failed);
2401 initialize_inline_failed (e);
2402 }
2403 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2404 initialize_inline_failed (e);
2405 }
2406 }
2407
2408 if (dump_file)
2409 fprintf (dump_file, "\nFlattening functions:\n");
2410
2411 /* In the first pass handle functions to be flattened. Do this with
2412 a priority so none of our later choices will make this impossible. */
2413 for (i = nnodes - 1; i >= 0; i--)
2414 {
2415 node = order[i];
2416
2417 /* Handle nodes to be flattened.
2418 Ideally when processing callees we stop inlining at the
2419 entry of cycles, possibly cloning that entry point and
2420 try to flatten itself turning it into a self-recursive
2421 function. */
2422 if (lookup_attribute ("flatten",
2423 DECL_ATTRIBUTES (node->decl)) != NULL)
2424 {
2425 if (dump_file)
2426 fprintf (dump_file,
2427 "Flattening %s\n", node->name ());
2428 flatten_function (node, false);
2429 }
2430 }
2431 if (dump_file)
2432 dump_overall_stats ();
2433
2434 inline_small_functions ();
2435
2436 gcc_assert (symtab->state == IPA_SSA);
2437 symtab->state = IPA_SSA_AFTER_INLINING;
2438 /* Do first after-inlining removal. We want to remove all "stale" extern
2439 inline functions and virtual functions so we really know what is called
2440 once. */
2441 symtab->remove_unreachable_nodes (dump_file);
2442 free (order);
2443
2444 /* Inline functions with a property that after inlining into all callers the
2445 code size will shrink because the out-of-line copy is eliminated.
2446 We do this regardless on the callee size as long as function growth limits
2447 are met. */
2448 if (dump_file)
2449 fprintf (dump_file,
2450 "\nDeciding on functions to be inlined into all callers and "
2451 "removing useless speculations:\n");
2452
2453 /* Inlining one function called once has good chance of preventing
2454 inlining other function into the same callee. Ideally we should
2455 work in priority order, but probably inlining hot functions first
2456 is good cut without the extra pain of maintaining the queue.
2457
2458 ??? this is not really fitting the bill perfectly: inlining function
2459 into callee often leads to better optimization of callee due to
2460 increased context for optimization.
2461 For example if main() function calls a function that outputs help
2462 and then function that does the main optmization, we should inline
2463 the second with priority even if both calls are cold by themselves.
2464
2465 We probably want to implement new predicate replacing our use of
2466 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2467 to be hot. */
2468 for (cold = 0; cold <= 1; cold ++)
2469 {
2470 FOR_EACH_DEFINED_FUNCTION (node)
2471 {
2472 struct cgraph_edge *edge, *next;
2473 bool update=false;
2474
2475 for (edge = node->callees; edge; edge = next)
2476 {
2477 next = edge->next_callee;
2478 if (edge->speculative && !speculation_useful_p (edge, false))
2479 {
2480 edge->resolve_speculation ();
2481 spec_rem += edge->count;
2482 update = true;
2483 remove_functions = true;
2484 }
2485 }
2486 if (update)
2487 {
2488 struct cgraph_node *where = node->global.inlined_to
2489 ? node->global.inlined_to : node;
2490 reset_edge_caches (where);
2491 inline_update_overall_summary (where);
2492 }
2493 if (want_inline_function_to_all_callers_p (node, cold))
2494 {
2495 int num_calls = 0;
2496 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2497 true);
2498 while (node->call_for_symbol_and_aliases
2499 (inline_to_all_callers, &num_calls, true))
2500 ;
2501 remove_functions = true;
2502 }
2503 }
2504 }
2505
2506 /* Free ipa-prop structures if they are no longer needed. */
2507 if (optimize)
2508 ipa_free_all_structures_after_iinln ();
2509
2510 if (dump_file)
2511 {
2512 fprintf (dump_file,
2513 "\nInlined %i calls, eliminated %i functions\n\n",
2514 ncalls_inlined, nfunctions_inlined);
2515 dump_inline_stats ();
2516 }
2517
2518 if (dump_file)
2519 dump_inline_summaries (dump_file);
2520 /* In WPA we use inline summaries for partitioning process. */
2521 if (!flag_wpa)
2522 inline_free_summary ();
2523 return remove_functions ? TODO_remove_functions : 0;
2524 }
2525
2526 /* Inline always-inline function calls in NODE. */
2527
2528 static bool
2529 inline_always_inline_functions (struct cgraph_node *node)
2530 {
2531 struct cgraph_edge *e;
2532 bool inlined = false;
2533
2534 for (e = node->callees; e; e = e->next_callee)
2535 {
2536 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2537 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2538 continue;
2539
2540 if (e->recursive_p ())
2541 {
2542 if (dump_file)
2543 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2544 e->callee->name ());
2545 e->inline_failed = CIF_RECURSIVE_INLINING;
2546 continue;
2547 }
2548
2549 if (!can_early_inline_edge_p (e))
2550 {
2551 /* Set inlined to true if the callee is marked "always_inline" but
2552 is not inlinable. This will allow flagging an error later in
2553 expand_call_inline in tree-inline.c. */
2554 if (lookup_attribute ("always_inline",
2555 DECL_ATTRIBUTES (callee->decl)) != NULL)
2556 inlined = true;
2557 continue;
2558 }
2559
2560 if (dump_file)
2561 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2562 xstrdup_for_dump (e->callee->name ()),
2563 xstrdup_for_dump (e->caller->name ()));
2564 inline_call (e, true, NULL, NULL, false);
2565 inlined = true;
2566 }
2567 if (inlined)
2568 inline_update_overall_summary (node);
2569
2570 return inlined;
2571 }
2572
2573 /* Decide on the inlining. We do so in the topological order to avoid
2574 expenses on updating data structures. */
2575
2576 static bool
2577 early_inline_small_functions (struct cgraph_node *node)
2578 {
2579 struct cgraph_edge *e;
2580 bool inlined = false;
2581
2582 for (e = node->callees; e; e = e->next_callee)
2583 {
2584 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2585 if (!inline_summaries->get (callee)->inlinable
2586 || !e->inline_failed)
2587 continue;
2588
2589 /* Do not consider functions not declared inline. */
2590 if (!DECL_DECLARED_INLINE_P (callee->decl)
2591 && !opt_for_fn (node->decl, flag_inline_small_functions)
2592 && !opt_for_fn (node->decl, flag_inline_functions))
2593 continue;
2594
2595 if (dump_file)
2596 fprintf (dump_file, "Considering inline candidate %s.\n",
2597 callee->name ());
2598
2599 if (!can_early_inline_edge_p (e))
2600 continue;
2601
2602 if (e->recursive_p ())
2603 {
2604 if (dump_file)
2605 fprintf (dump_file, " Not inlining: recursive call.\n");
2606 continue;
2607 }
2608
2609 if (!want_early_inline_function_p (e))
2610 continue;
2611
2612 if (dump_file)
2613 fprintf (dump_file, " Inlining %s into %s.\n",
2614 xstrdup_for_dump (callee->name ()),
2615 xstrdup_for_dump (e->caller->name ()));
2616 inline_call (e, true, NULL, NULL, false);
2617 inlined = true;
2618 }
2619
2620 if (inlined)
2621 inline_update_overall_summary (node);
2622
2623 return inlined;
2624 }
2625
2626 unsigned int
2627 early_inliner (function *fun)
2628 {
2629 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2630 struct cgraph_edge *edge;
2631 unsigned int todo = 0;
2632 int iterations = 0;
2633 bool inlined = false;
2634
2635 if (seen_error ())
2636 return 0;
2637
2638 /* Do nothing if datastructures for ipa-inliner are already computed. This
2639 happens when some pass decides to construct new function and
2640 cgraph_add_new_function calls lowering passes and early optimization on
2641 it. This may confuse ourself when early inliner decide to inline call to
2642 function clone, because function clones don't have parameter list in
2643 ipa-prop matching their signature. */
2644 if (ipa_node_params_sum)
2645 return 0;
2646
2647 if (flag_checking)
2648 node->verify ();
2649 node->remove_all_references ();
2650
2651 /* Rebuild this reference because it dosn't depend on
2652 function's body and it's required to pass cgraph_node
2653 verification. */
2654 if (node->instrumented_version
2655 && !node->instrumentation_clone)
2656 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2657
2658 /* Even when not optimizing or not inlining inline always-inline
2659 functions. */
2660 inlined = inline_always_inline_functions (node);
2661
2662 if (!optimize
2663 || flag_no_inline
2664 || !flag_early_inlining
2665 /* Never inline regular functions into always-inline functions
2666 during incremental inlining. This sucks as functions calling
2667 always inline functions will get less optimized, but at the
2668 same time inlining of functions calling always inline
2669 function into an always inline function might introduce
2670 cycles of edges to be always inlined in the callgraph.
2671
2672 We might want to be smarter and just avoid this type of inlining. */
2673 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2674 && lookup_attribute ("always_inline",
2675 DECL_ATTRIBUTES (node->decl))))
2676 ;
2677 else if (lookup_attribute ("flatten",
2678 DECL_ATTRIBUTES (node->decl)) != NULL)
2679 {
2680 /* When the function is marked to be flattened, recursively inline
2681 all calls in it. */
2682 if (dump_file)
2683 fprintf (dump_file,
2684 "Flattening %s\n", node->name ());
2685 flatten_function (node, true);
2686 inlined = true;
2687 }
2688 else
2689 {
2690 /* If some always_inline functions was inlined, apply the changes.
2691 This way we will not account always inline into growth limits and
2692 moreover we will inline calls from always inlines that we skipped
2693 previously because of conditional above. */
2694 if (inlined)
2695 {
2696 timevar_push (TV_INTEGRATION);
2697 todo |= optimize_inline_calls (current_function_decl);
2698 /* optimize_inline_calls call above might have introduced new
2699 statements that don't have inline parameters computed. */
2700 for (edge = node->callees; edge; edge = edge->next_callee)
2701 {
2702 if (inline_edge_summary_vec.length () > (unsigned) edge->uid)
2703 {
2704 struct inline_edge_summary *es = inline_edge_summary (edge);
2705 es->call_stmt_size
2706 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2707 es->call_stmt_time
2708 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2709 }
2710 }
2711 inline_update_overall_summary (node);
2712 inlined = false;
2713 timevar_pop (TV_INTEGRATION);
2714 }
2715 /* We iterate incremental inlining to get trivial cases of indirect
2716 inlining. */
2717 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2718 && early_inline_small_functions (node))
2719 {
2720 timevar_push (TV_INTEGRATION);
2721 todo |= optimize_inline_calls (current_function_decl);
2722
2723 /* Technically we ought to recompute inline parameters so the new
2724 iteration of early inliner works as expected. We however have
2725 values approximately right and thus we only need to update edge
2726 info that might be cleared out for newly discovered edges. */
2727 for (edge = node->callees; edge; edge = edge->next_callee)
2728 {
2729 /* We have no summary for new bound store calls yet. */
2730 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2731 {
2732 struct inline_edge_summary *es = inline_edge_summary (edge);
2733 es->call_stmt_size
2734 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2735 es->call_stmt_time
2736 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2737 }
2738 if (edge->callee->decl
2739 && !gimple_check_call_matching_types (
2740 edge->call_stmt, edge->callee->decl, false))
2741 {
2742 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2743 edge->call_stmt_cannot_inline_p = true;
2744 }
2745 }
2746 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2747 inline_update_overall_summary (node);
2748 timevar_pop (TV_INTEGRATION);
2749 iterations++;
2750 inlined = false;
2751 }
2752 if (dump_file)
2753 fprintf (dump_file, "Iterations: %i\n", iterations);
2754 }
2755
2756 if (inlined)
2757 {
2758 timevar_push (TV_INTEGRATION);
2759 todo |= optimize_inline_calls (current_function_decl);
2760 timevar_pop (TV_INTEGRATION);
2761 }
2762
2763 fun->always_inline_functions_inlined = true;
2764
2765 return todo;
2766 }
2767
2768 /* Do inlining of small functions. Doing so early helps profiling and other
2769 passes to be somewhat more effective and avoids some code duplication in
2770 later real inlining pass for testcases with very many function calls. */
2771
2772 namespace {
2773
2774 const pass_data pass_data_early_inline =
2775 {
2776 GIMPLE_PASS, /* type */
2777 "einline", /* name */
2778 OPTGROUP_INLINE, /* optinfo_flags */
2779 TV_EARLY_INLINING, /* tv_id */
2780 PROP_ssa, /* properties_required */
2781 0, /* properties_provided */
2782 0, /* properties_destroyed */
2783 0, /* todo_flags_start */
2784 0, /* todo_flags_finish */
2785 };
2786
2787 class pass_early_inline : public gimple_opt_pass
2788 {
2789 public:
2790 pass_early_inline (gcc::context *ctxt)
2791 : gimple_opt_pass (pass_data_early_inline, ctxt)
2792 {}
2793
2794 /* opt_pass methods: */
2795 virtual unsigned int execute (function *);
2796
2797 }; // class pass_early_inline
2798
2799 unsigned int
2800 pass_early_inline::execute (function *fun)
2801 {
2802 return early_inliner (fun);
2803 }
2804
2805 } // anon namespace
2806
2807 gimple_opt_pass *
2808 make_pass_early_inline (gcc::context *ctxt)
2809 {
2810 return new pass_early_inline (ctxt);
2811 }
2812
2813 namespace {
2814
2815 const pass_data pass_data_ipa_inline =
2816 {
2817 IPA_PASS, /* type */
2818 "inline", /* name */
2819 OPTGROUP_INLINE, /* optinfo_flags */
2820 TV_IPA_INLINING, /* tv_id */
2821 0, /* properties_required */
2822 0, /* properties_provided */
2823 0, /* properties_destroyed */
2824 0, /* todo_flags_start */
2825 ( TODO_dump_symtab ), /* todo_flags_finish */
2826 };
2827
2828 class pass_ipa_inline : public ipa_opt_pass_d
2829 {
2830 public:
2831 pass_ipa_inline (gcc::context *ctxt)
2832 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2833 inline_generate_summary, /* generate_summary */
2834 inline_write_summary, /* write_summary */
2835 inline_read_summary, /* read_summary */
2836 NULL, /* write_optimization_summary */
2837 NULL, /* read_optimization_summary */
2838 NULL, /* stmt_fixup */
2839 0, /* function_transform_todo_flags_start */
2840 inline_transform, /* function_transform */
2841 NULL) /* variable_transform */
2842 {}
2843
2844 /* opt_pass methods: */
2845 virtual unsigned int execute (function *) { return ipa_inline (); }
2846
2847 }; // class pass_ipa_inline
2848
2849 } // anon namespace
2850
2851 ipa_opt_pass_d *
2852 make_pass_ipa_inline (gcc::context *ctxt)
2853 {
2854 return new pass_ipa_inline (ctxt);
2855 }