]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline.c
invoke.texi: Update -fopt-info documentation.
[thirdparty/gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Inlining decision heuristics
23
24 The implementation of inliner is organized as follows:
25
26 inlining heuristics limits
27
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
30 on).
31
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
35 inlining.
36
37 inlining heuristics
38
39 The inliner itself is split into two passes:
40
41 pass_early_inlining
42
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
46
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
53
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
56 flattening.
57
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
61 optimizers.
62
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
68
69 pass_ipa_inline
70
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
73
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
77
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
82
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
88
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
92
93 #include "config.h"
94 #include "system.h"
95 #include "coretypes.h"
96 #include "tm.h"
97 #include "tree.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
100 #include "flags.h"
101 #include "cgraph.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
104 #include "params.h"
105 #include "fibheap.h"
106 #include "intl.h"
107 #include "tree-pass.h"
108 #include "coverage.h"
109 #include "ggc.h"
110 #include "rtl.h"
111 #include "tree-flow.h"
112 #include "ipa-prop.h"
113 #include "except.h"
114 #include "target.h"
115 #include "ipa-inline.h"
116 #include "ipa-utils.h"
117
118 /* Statistics we collect about inlining algorithm. */
119 static int overall_size;
120 static gcov_type max_count;
121
122 /* Return false when inlining edge E would lead to violating
123 limits on function unit growth or stack usage growth.
124
125 The relative function body growth limit is present generally
126 to avoid problems with non-linear behavior of the compiler.
127 To allow inlining huge functions into tiny wrapper, the limit
128 is always based on the bigger of the two functions considered.
129
130 For stack growth limits we always base the growth in stack usage
131 of the callers. We want to prevent applications from segfaulting
132 on stack overflow when functions with huge stack frames gets
133 inlined. */
134
135 static bool
136 caller_growth_limits (struct cgraph_edge *e)
137 {
138 struct cgraph_node *to = e->caller;
139 struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL);
140 int newsize;
141 int limit = 0;
142 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
143 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
144
145 /* Look for function e->caller is inlined to. While doing
146 so work out the largest function body on the way. As
147 described above, we want to base our function growth
148 limits based on that. Not on the self size of the
149 outer function, not on the self size of inline code
150 we immediately inline to. This is the most relaxed
151 interpretation of the rule "do not grow large functions
152 too much in order to prevent compiler from exploding". */
153 while (true)
154 {
155 info = inline_summary (to);
156 if (limit < info->self_size)
157 limit = info->self_size;
158 if (stack_size_limit < info->estimated_self_stack_size)
159 stack_size_limit = info->estimated_self_stack_size;
160 if (to->global.inlined_to)
161 to = to->callers->caller;
162 else
163 break;
164 }
165
166 what_info = inline_summary (what);
167
168 if (limit < what_info->self_size)
169 limit = what_info->self_size;
170
171 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
172
173 /* Check the size after inlining against the function limits. But allow
174 the function to shrink if it went over the limits by forced inlining. */
175 newsize = estimate_size_after_inlining (to, e);
176 if (newsize >= info->size
177 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
178 && newsize > limit)
179 {
180 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
181 return false;
182 }
183
184 if (!what_info->estimated_stack_size)
185 return true;
186
187 /* FIXME: Stack size limit often prevents inlining in Fortran programs
188 due to large i/o datastructures used by the Fortran front-end.
189 We ought to ignore this limit when we know that the edge is executed
190 on every invocation of the caller (i.e. its call statement dominates
191 exit block). We do not track this information, yet. */
192 stack_size_limit += ((gcov_type)stack_size_limit
193 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
194
195 inlined_stack = (outer_info->stack_frame_offset
196 + outer_info->estimated_self_stack_size
197 + what_info->estimated_stack_size);
198 /* Check new stack consumption with stack consumption at the place
199 stack is used. */
200 if (inlined_stack > stack_size_limit
201 /* If function already has large stack usage from sibling
202 inline call, we can inline, too.
203 This bit overoptimistically assume that we are good at stack
204 packing. */
205 && inlined_stack > info->estimated_stack_size
206 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
207 {
208 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
209 return false;
210 }
211 return true;
212 }
213
214 /* Dump info about why inlining has failed. */
215
216 static void
217 report_inline_failed_reason (struct cgraph_edge *e)
218 {
219 if (dump_file)
220 {
221 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
222 xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
223 xstrdup (cgraph_node_name (e->callee)), e->callee->uid,
224 cgraph_inline_failed_string (e->inline_failed));
225 }
226 }
227
228 /* Decide if we can inline the edge and possibly update
229 inline_failed reason.
230 We check whether inlining is possible at all and whether
231 caller growth limits allow doing so.
232
233 if REPORT is true, output reason to the dump file. */
234
235 static bool
236 can_inline_edge_p (struct cgraph_edge *e, bool report)
237 {
238 bool inlinable = true;
239 enum availability avail;
240 struct cgraph_node *callee
241 = cgraph_function_or_thunk_node (e->callee, &avail);
242 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->symbol.decl);
243 tree callee_tree
244 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->symbol.decl) : NULL;
245 struct function *caller_cfun = DECL_STRUCT_FUNCTION (e->caller->symbol.decl);
246 struct function *callee_cfun
247 = callee ? DECL_STRUCT_FUNCTION (callee->symbol.decl) : NULL;
248
249 if (!caller_cfun && e->caller->clone_of)
250 caller_cfun = DECL_STRUCT_FUNCTION (e->caller->clone_of->symbol.decl);
251
252 if (!callee_cfun && callee && callee->clone_of)
253 callee_cfun = DECL_STRUCT_FUNCTION (callee->clone_of->symbol.decl);
254
255 gcc_assert (e->inline_failed);
256
257 if (!callee || !callee->analyzed)
258 {
259 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
260 inlinable = false;
261 }
262 else if (!inline_summary (callee)->inlinable)
263 {
264 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
265 inlinable = false;
266 }
267 else if (avail <= AVAIL_OVERWRITABLE)
268 {
269 e->inline_failed = CIF_OVERWRITABLE;
270 return false;
271 }
272 else if (e->call_stmt_cannot_inline_p)
273 {
274 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
275 inlinable = false;
276 }
277 /* Don't inline if the functions have different EH personalities. */
278 else if (DECL_FUNCTION_PERSONALITY (e->caller->symbol.decl)
279 && DECL_FUNCTION_PERSONALITY (callee->symbol.decl)
280 && (DECL_FUNCTION_PERSONALITY (e->caller->symbol.decl)
281 != DECL_FUNCTION_PERSONALITY (callee->symbol.decl)))
282 {
283 e->inline_failed = CIF_EH_PERSONALITY;
284 inlinable = false;
285 }
286 /* TM pure functions should not be inlined into non-TM_pure
287 functions. */
288 else if (is_tm_pure (callee->symbol.decl)
289 && !is_tm_pure (e->caller->symbol.decl))
290 {
291 e->inline_failed = CIF_UNSPECIFIED;
292 inlinable = false;
293 }
294 /* Don't inline if the callee can throw non-call exceptions but the
295 caller cannot.
296 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
297 Move the flag into cgraph node or mirror it in the inline summary. */
298 else if (callee_cfun && callee_cfun->can_throw_non_call_exceptions
299 && !(caller_cfun && caller_cfun->can_throw_non_call_exceptions))
300 {
301 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
302 inlinable = false;
303 }
304 /* Check compatibility of target optimization options. */
305 else if (!targetm.target_option.can_inline_p (e->caller->symbol.decl,
306 callee->symbol.decl))
307 {
308 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
309 inlinable = false;
310 }
311 /* Check if caller growth allows the inlining. */
312 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)
313 && !lookup_attribute ("flatten",
314 DECL_ATTRIBUTES
315 (e->caller->global.inlined_to
316 ? e->caller->global.inlined_to->symbol.decl
317 : e->caller->symbol.decl))
318 && !caller_growth_limits (e))
319 inlinable = false;
320 /* Don't inline a function with a higher optimization level than the
321 caller. FIXME: this is really just tip of iceberg of handling
322 optimization attribute. */
323 else if (caller_tree != callee_tree)
324 {
325 struct cl_optimization *caller_opt
326 = TREE_OPTIMIZATION ((caller_tree)
327 ? caller_tree
328 : optimization_default_node);
329
330 struct cl_optimization *callee_opt
331 = TREE_OPTIMIZATION ((callee_tree)
332 ? callee_tree
333 : optimization_default_node);
334
335 if (((caller_opt->x_optimize > callee_opt->x_optimize)
336 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
337 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
338 && !DECL_DISREGARD_INLINE_LIMITS (e->callee->symbol.decl))
339 {
340 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
341 inlinable = false;
342 }
343 }
344
345 if (!inlinable && report)
346 report_inline_failed_reason (e);
347 return inlinable;
348 }
349
350
351 /* Return true if the edge E is inlinable during early inlining. */
352
353 static bool
354 can_early_inline_edge_p (struct cgraph_edge *e)
355 {
356 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
357 NULL);
358 /* Early inliner might get called at WPA stage when IPA pass adds new
359 function. In this case we can not really do any of early inlining
360 because function bodies are missing. */
361 if (!gimple_has_body_p (callee->symbol.decl))
362 {
363 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
364 return false;
365 }
366 /* In early inliner some of callees may not be in SSA form yet
367 (i.e. the callgraph is cyclic and we did not process
368 the callee by early inliner, yet). We don't have CIF code for this
369 case; later we will re-do the decision in the real inliner. */
370 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->symbol.decl))
371 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->symbol.decl)))
372 {
373 if (dump_file)
374 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
375 return false;
376 }
377 if (!can_inline_edge_p (e, true))
378 return false;
379 return true;
380 }
381
382
383 /* Return true when N is leaf function. Accept cheap builtins
384 in leaf functions. */
385
386 static bool
387 leaf_node_p (struct cgraph_node *n)
388 {
389 struct cgraph_edge *e;
390 for (e = n->callees; e; e = e->next_callee)
391 if (!is_inexpensive_builtin (e->callee->symbol.decl))
392 return false;
393 return true;
394 }
395
396
397 /* Return true if we are interested in inlining small function. */
398
399 static bool
400 want_early_inline_function_p (struct cgraph_edge *e)
401 {
402 bool want_inline = true;
403 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
404
405 if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
406 ;
407 else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
408 && !flag_inline_small_functions)
409 {
410 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
411 report_inline_failed_reason (e);
412 want_inline = false;
413 }
414 else
415 {
416 int growth = estimate_edge_growth (e);
417 if (growth <= 0)
418 ;
419 else if (!cgraph_maybe_hot_edge_p (e)
420 && growth > 0)
421 {
422 if (dump_file)
423 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
424 "call is cold and code would grow by %i\n",
425 xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
426 xstrdup (cgraph_node_name (callee)), callee->uid,
427 growth);
428 want_inline = false;
429 }
430 else if (!leaf_node_p (callee)
431 && growth > 0)
432 {
433 if (dump_file)
434 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
435 "callee is not leaf and code would grow by %i\n",
436 xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
437 xstrdup (cgraph_node_name (callee)), callee->uid,
438 growth);
439 want_inline = false;
440 }
441 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
442 {
443 if (dump_file)
444 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
445 "growth %i exceeds --param early-inlining-insns\n",
446 xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
447 xstrdup (cgraph_node_name (callee)), callee->uid,
448 growth);
449 want_inline = false;
450 }
451 }
452 return want_inline;
453 }
454
455 /* Return true if we are interested in inlining small function.
456 When REPORT is true, report reason to dump file. */
457
458 static bool
459 want_inline_small_function_p (struct cgraph_edge *e, bool report)
460 {
461 bool want_inline = true;
462 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
463
464 if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
465 ;
466 else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
467 && !flag_inline_small_functions)
468 {
469 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
470 want_inline = false;
471 }
472 else
473 {
474 int growth = estimate_edge_growth (e);
475 inline_hints hints = estimate_edge_hints (e);
476
477 if (growth <= 0)
478 ;
479 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
480 hints suggests that inlining given function is very profitable. */
481 else if (DECL_DECLARED_INLINE_P (callee->symbol.decl)
482 && growth >= MAX_INLINE_INSNS_SINGLE
483 && !(hints & (INLINE_HINT_indirect_call
484 | INLINE_HINT_loop_iterations
485 | INLINE_HINT_loop_stride)))
486 {
487 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
488 want_inline = false;
489 }
490 /* Before giving up based on fact that caller size will grow, allow
491 functions that are called few times and eliminating the offline
492 copy will lead to overall code size reduction.
493 Not all of these will be handled by subsequent inlining of functions
494 called once: in particular weak functions are not handled or funcitons
495 that inline to multiple calls but a lot of bodies is optimized out.
496 Finally we want to inline earlier to allow inlining of callbacks.
497
498 This is slightly wrong on aggressive side: it is entirely possible
499 that function is called many times with a context where inlining
500 reduces code size and few times with a context where inlining increase
501 code size. Resoluting growth estimate will be negative even if it
502 would make more sense to keep offline copy and do not inline into the
503 call sites that makes the code size grow.
504
505 When badness orders the calls in a way that code reducing calls come
506 first, this situation is not a problem at all: after inlining all
507 "good" calls, we will realize that keeping the function around is
508 better. */
509 else if (growth <= MAX_INLINE_INSNS_SINGLE
510 /* Unlike for functions called once, we play unsafe with
511 COMDATs. We can allow that since we know functions
512 in consideration are small (and thus risk is small) and
513 moreover grow estimates already accounts that COMDAT
514 functions may or may not disappear when eliminated from
515 current unit. With good probability making aggressive
516 choice in all units is going to make overall program
517 smaller.
518
519 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
520 instead of
521 cgraph_will_be_removed_from_program_if_no_direct_calls */
522 && !DECL_EXTERNAL (callee->symbol.decl)
523 && cgraph_can_remove_if_no_direct_calls_p (callee)
524 && estimate_growth (callee) <= 0)
525 ;
526 else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
527 && !flag_inline_functions)
528 {
529 e->inline_failed = CIF_NOT_DECLARED_INLINED;
530 want_inline = false;
531 }
532 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
533 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
534 inlining given function is very profitable. */
535 else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
536 && growth >= ((hints & (INLINE_HINT_indirect_call
537 | INLINE_HINT_loop_iterations
538 | INLINE_HINT_loop_stride))
539 ? MAX (MAX_INLINE_INSNS_AUTO,
540 MAX_INLINE_INSNS_SINGLE)
541 : MAX_INLINE_INSNS_AUTO))
542 {
543 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
544 want_inline = false;
545 }
546 /* If call is cold, do not inline when function body would grow. */
547 else if (!cgraph_maybe_hot_edge_p (e))
548 {
549 e->inline_failed = CIF_UNLIKELY_CALL;
550 want_inline = false;
551 }
552 }
553 if (!want_inline && report)
554 report_inline_failed_reason (e);
555 return want_inline;
556 }
557
558 /* EDGE is self recursive edge.
559 We hand two cases - when function A is inlining into itself
560 or when function A is being inlined into another inliner copy of function
561 A within function B.
562
563 In first case OUTER_NODE points to the toplevel copy of A, while
564 in the second case OUTER_NODE points to the outermost copy of A in B.
565
566 In both cases we want to be extra selective since
567 inlining the call will just introduce new recursive calls to appear. */
568
569 static bool
570 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
571 struct cgraph_node *outer_node,
572 bool peeling,
573 int depth)
574 {
575 char const *reason = NULL;
576 bool want_inline = true;
577 int caller_freq = CGRAPH_FREQ_BASE;
578 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
579
580 if (DECL_DECLARED_INLINE_P (edge->caller->symbol.decl))
581 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
582
583 if (!cgraph_maybe_hot_edge_p (edge))
584 {
585 reason = "recursive call is cold";
586 want_inline = false;
587 }
588 else if (max_count && !outer_node->count)
589 {
590 reason = "not executed in profile";
591 want_inline = false;
592 }
593 else if (depth > max_depth)
594 {
595 reason = "--param max-inline-recursive-depth exceeded.";
596 want_inline = false;
597 }
598
599 if (outer_node->global.inlined_to)
600 caller_freq = outer_node->callers->frequency;
601
602 if (!want_inline)
603 ;
604 /* Inlining of self recursive function into copy of itself within other function
605 is transformation similar to loop peeling.
606
607 Peeling is profitable if we can inline enough copies to make probability
608 of actual call to the self recursive function very small. Be sure that
609 the probability of recursion is small.
610
611 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
612 This way the expected number of recision is at most max_depth. */
613 else if (peeling)
614 {
615 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
616 / max_depth);
617 int i;
618 for (i = 1; i < depth; i++)
619 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
620 if (max_count
621 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
622 >= max_prob))
623 {
624 reason = "profile of recursive call is too large";
625 want_inline = false;
626 }
627 if (!max_count
628 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
629 >= max_prob))
630 {
631 reason = "frequency of recursive call is too large";
632 want_inline = false;
633 }
634 }
635 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
636 depth is large. We reduce function call overhead and increase chances that
637 things fit in hardware return predictor.
638
639 Recursive inlining might however increase cost of stack frame setup
640 actually slowing down functions whose recursion tree is wide rather than
641 deep.
642
643 Deciding reliably on when to do recursive inlining without profile feedback
644 is tricky. For now we disable recursive inlining when probability of self
645 recursion is low.
646
647 Recursive inlining of self recursive call within loop also results in large loop
648 depths that generally optimize badly. We may want to throttle down inlining
649 in those cases. In particular this seems to happen in one of libstdc++ rb tree
650 methods. */
651 else
652 {
653 if (max_count
654 && (edge->count * 100 / outer_node->count
655 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
656 {
657 reason = "profile of recursive call is too small";
658 want_inline = false;
659 }
660 else if (!max_count
661 && (edge->frequency * 100 / caller_freq
662 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
663 {
664 reason = "frequency of recursive call is too small";
665 want_inline = false;
666 }
667 }
668 if (!want_inline && dump_file)
669 fprintf (dump_file, " not inlining recursively: %s\n", reason);
670 return want_inline;
671 }
672
673 /* Return true when NODE has caller other than EDGE.
674 Worker for cgraph_for_node_and_aliases. */
675
676 static bool
677 check_caller_edge (struct cgraph_node *node, void *edge)
678 {
679 return (node->callers
680 && node->callers != edge);
681 }
682
683
684 /* Decide if inlining NODE would reduce unit size by eliminating
685 the offline copy of function.
686 When COLD is true the cold calls are considered, too. */
687
688 static bool
689 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
690 {
691 struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL);
692 struct cgraph_edge *e;
693 bool has_hot_call = false;
694
695 /* Does it have callers? */
696 if (!node->callers)
697 return false;
698 /* Already inlined? */
699 if (function->global.inlined_to)
700 return false;
701 if (cgraph_function_or_thunk_node (node, NULL) != node)
702 return false;
703 /* Inlining into all callers would increase size? */
704 if (estimate_growth (node) > 0)
705 return false;
706 /* Maybe other aliases has more direct calls. */
707 if (cgraph_for_node_and_aliases (node, check_caller_edge, node->callers, true))
708 return false;
709 /* All inlines must be possible. */
710 for (e = node->callers; e; e = e->next_caller)
711 {
712 if (!can_inline_edge_p (e, true))
713 return false;
714 if (!has_hot_call && cgraph_maybe_hot_edge_p (e))
715 has_hot_call = 1;
716 }
717
718 if (!cold && !has_hot_call)
719 return false;
720 return true;
721 }
722
723
724 /* Return relative time improvement for inlining EDGE in range
725 1...2^9. */
726
727 static inline int
728 relative_time_benefit (struct inline_summary *callee_info,
729 struct cgraph_edge *edge,
730 int time_growth)
731 {
732 int relbenefit;
733 gcov_type uninlined_call_time;
734
735 uninlined_call_time =
736 ((gcov_type)
737 (callee_info->time
738 + inline_edge_summary (edge)->call_stmt_time) * edge->frequency
739 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
740 /* Compute relative time benefit, i.e. how much the call becomes faster.
741 ??? perhaps computing how much the caller+calle together become faster
742 would lead to more realistic results. */
743 if (!uninlined_call_time)
744 uninlined_call_time = 1;
745 relbenefit =
746 (uninlined_call_time - time_growth) * 256 / (uninlined_call_time);
747 relbenefit = MIN (relbenefit, 512);
748 relbenefit = MAX (relbenefit, 1);
749 return relbenefit;
750 }
751
752
753 /* A cost model driving the inlining heuristics in a way so the edges with
754 smallest badness are inlined first. After each inlining is performed
755 the costs of all caller edges of nodes affected are recomputed so the
756 metrics may accurately depend on values such as number of inlinable callers
757 of the function or function body size. */
758
759 static int
760 edge_badness (struct cgraph_edge *edge, bool dump)
761 {
762 gcov_type badness;
763 int growth, time_growth;
764 struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee,
765 NULL);
766 struct inline_summary *callee_info = inline_summary (callee);
767 inline_hints hints;
768
769 if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
770 return INT_MIN;
771
772 growth = estimate_edge_growth (edge);
773 time_growth = estimate_edge_time (edge);
774 hints = estimate_edge_hints (edge);
775
776 if (dump)
777 {
778 fprintf (dump_file, " Badness calculation for %s -> %s\n",
779 xstrdup (cgraph_node_name (edge->caller)),
780 xstrdup (cgraph_node_name (callee)));
781 fprintf (dump_file, " size growth %i, time growth %i ",
782 growth,
783 time_growth);
784 dump_inline_hints (dump_file, hints);
785 fprintf (dump_file, "\n");
786 }
787
788 /* Always prefer inlining saving code size. */
789 if (growth <= 0)
790 {
791 badness = INT_MIN / 2 + growth;
792 if (dump)
793 fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness,
794 growth);
795 }
796
797 /* When profiling is available, compute badness as:
798
799 relative_edge_count * relative_time_benefit
800 goodness = -------------------------------------------
801 edge_growth
802 badness = -goodness
803
804 The fraction is upside down, because on edge counts and time beneits
805 the bounds are known. Edge growth is essentially unlimited. */
806
807 else if (max_count)
808 {
809 int relbenefit = relative_time_benefit (callee_info, edge, time_growth);
810 badness =
811 ((int)
812 ((double) edge->count * INT_MIN / 2 / max_count / 512) *
813 relative_time_benefit (callee_info, edge, time_growth)) / growth;
814
815 /* Be sure that insanity of the profile won't lead to increasing counts
816 in the scalling and thus to overflow in the computation above. */
817 gcc_assert (max_count >= edge->count);
818 if (dump)
819 {
820 fprintf (dump_file,
821 " %i (relative %f): profile info. Relative count %f"
822 " * Relative benefit %f\n",
823 (int) badness, (double) badness / INT_MIN,
824 (double) edge->count / max_count,
825 relbenefit * 100 / 256.0);
826 }
827 }
828
829 /* When function local profile is available. Compute badness as:
830
831
832 growth_of_callee
833 badness = -------------------------------------- + growth_for-all
834 relative_time_benefit * edge_frequency
835
836 */
837 else if (flag_guess_branch_prob)
838 {
839 int div = edge->frequency * (1<<10) / CGRAPH_FREQ_MAX;
840
841 div = MAX (div, 1);
842 gcc_checking_assert (edge->frequency <= CGRAPH_FREQ_MAX);
843 div *= relative_time_benefit (callee_info, edge, time_growth);
844
845 /* frequency is normalized in range 1...2^10.
846 relbenefit in range 1...2^9
847 DIV should be in range 1....2^19. */
848 gcc_checking_assert (div >= 1 && div <= (1<<19));
849
850 /* Result must be integer in range 0...INT_MAX.
851 Set the base of fixed point calculation so we don't lose much of
852 precision for small bandesses (those are interesting) yet we don't
853 overflow for growths that are still in interesting range.
854
855 Fixed point arithmetic with point at 6th bit. */
856 badness = ((gcov_type)growth) * (1<<(19+6));
857 badness = (badness + div / 2) / div;
858
859 /* Overall growth of inlining all calls of function matters: we want to
860 inline so offline copy of function is no longer needed.
861
862 Additionally functions that can be fully inlined without much of
863 effort are better inline candidates than functions that can be fully
864 inlined only after noticeable overall unit growths. The latter
865 are better in a sense compressing of code size by factoring out common
866 code into separate function shared by multiple code paths.
867
868 We might mix the valud into the fraction by taking into account
869 relative growth of the unit, but for now just add the number
870 into resulting fraction. */
871 if (badness > INT_MAX / 8)
872 {
873 badness = INT_MAX / 8;
874 if (dump)
875 fprintf (dump_file, "Badness overflow\n");
876 }
877 if (hints & (INLINE_HINT_indirect_call
878 | INLINE_HINT_loop_iterations
879 | INLINE_HINT_loop_stride))
880 badness /= 8;
881 if (hints & (INLINE_HINT_same_scc))
882 badness *= 4;
883 if (hints & (INLINE_HINT_in_scc))
884 badness *= 2;
885 if (dump)
886 {
887 fprintf (dump_file,
888 " %i: guessed profile. frequency %f,"
889 " benefit %f%%, divisor %i\n",
890 (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE,
891 relative_time_benefit (callee_info, edge, time_growth) * 100 / 256.0, div);
892 }
893 }
894 /* When function local profile is not available or it does not give
895 useful information (ie frequency is zero), base the cost on
896 loop nest and overall size growth, so we optimize for overall number
897 of functions fully inlined in program. */
898 else
899 {
900 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
901 badness = growth * 256;
902
903 /* Decrease badness if call is nested. */
904 if (badness > 0)
905 badness >>= nest;
906 else
907 {
908 badness <<= nest;
909 }
910 if (dump)
911 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
912 nest);
913 }
914
915 /* Ensure that we did not overflow in all the fixed point math above. */
916 gcc_assert (badness >= INT_MIN);
917 gcc_assert (badness <= INT_MAX - 1);
918 /* Make recursive inlining happen always after other inlining is done. */
919 if (cgraph_edge_recursive_p (edge))
920 return badness + 1;
921 else
922 return badness;
923 }
924
925 /* Recompute badness of EDGE and update its key in HEAP if needed. */
926 static inline void
927 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
928 {
929 int badness = edge_badness (edge, false);
930 if (edge->aux)
931 {
932 fibnode_t n = (fibnode_t) edge->aux;
933 gcc_checking_assert (n->data == edge);
934
935 /* fibheap_replace_key only decrease the keys.
936 When we increase the key we do not update heap
937 and instead re-insert the element once it becomes
938 a minimum of heap. */
939 if (badness < n->key)
940 {
941 if (dump_file && (dump_flags & TDF_DETAILS))
942 {
943 fprintf (dump_file,
944 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
945 xstrdup (cgraph_node_name (edge->caller)),
946 edge->caller->uid,
947 xstrdup (cgraph_node_name (edge->callee)),
948 edge->callee->uid,
949 (int)n->key,
950 badness);
951 }
952 fibheap_replace_key (heap, n, badness);
953 gcc_checking_assert (n->key == badness);
954 }
955 }
956 else
957 {
958 if (dump_file && (dump_flags & TDF_DETAILS))
959 {
960 fprintf (dump_file,
961 " enqueuing call %s/%i -> %s/%i, badness %i\n",
962 xstrdup (cgraph_node_name (edge->caller)),
963 edge->caller->uid,
964 xstrdup (cgraph_node_name (edge->callee)),
965 edge->callee->uid,
966 badness);
967 }
968 edge->aux = fibheap_insert (heap, badness, edge);
969 }
970 }
971
972
973 /* NODE was inlined.
974 All caller edges needs to be resetted because
975 size estimates change. Similarly callees needs reset
976 because better context may be known. */
977
978 static void
979 reset_edge_caches (struct cgraph_node *node)
980 {
981 struct cgraph_edge *edge;
982 struct cgraph_edge *e = node->callees;
983 struct cgraph_node *where = node;
984 int i;
985 struct ipa_ref *ref;
986
987 if (where->global.inlined_to)
988 where = where->global.inlined_to;
989
990 /* WHERE body size has changed, the cached growth is invalid. */
991 reset_node_growth_cache (where);
992
993 for (edge = where->callers; edge; edge = edge->next_caller)
994 if (edge->inline_failed)
995 reset_edge_growth_cache (edge);
996 for (i = 0; ipa_ref_list_referring_iterate (&where->symbol.ref_list,
997 i, ref); i++)
998 if (ref->use == IPA_REF_ALIAS)
999 reset_edge_caches (ipa_ref_referring_node (ref));
1000
1001 if (!e)
1002 return;
1003
1004 while (true)
1005 if (!e->inline_failed && e->callee->callees)
1006 e = e->callee->callees;
1007 else
1008 {
1009 if (e->inline_failed)
1010 reset_edge_growth_cache (e);
1011 if (e->next_callee)
1012 e = e->next_callee;
1013 else
1014 {
1015 do
1016 {
1017 if (e->caller == node)
1018 return;
1019 e = e->caller->callers;
1020 }
1021 while (!e->next_callee);
1022 e = e->next_callee;
1023 }
1024 }
1025 }
1026
1027 /* Recompute HEAP nodes for each of caller of NODE.
1028 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1029 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1030 it is inlinable. Otherwise check all edges. */
1031
1032 static void
1033 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
1034 bitmap updated_nodes,
1035 struct cgraph_edge *check_inlinablity_for)
1036 {
1037 struct cgraph_edge *edge;
1038 int i;
1039 struct ipa_ref *ref;
1040
1041 if ((!node->alias && !inline_summary (node)->inlinable)
1042 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
1043 || node->global.inlined_to)
1044 return;
1045 if (!bitmap_set_bit (updated_nodes, node->uid))
1046 return;
1047
1048 for (i = 0; ipa_ref_list_referring_iterate (&node->symbol.ref_list,
1049 i, ref); i++)
1050 if (ref->use == IPA_REF_ALIAS)
1051 {
1052 struct cgraph_node *alias = ipa_ref_referring_node (ref);
1053 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1054 }
1055
1056 for (edge = node->callers; edge; edge = edge->next_caller)
1057 if (edge->inline_failed)
1058 {
1059 if (!check_inlinablity_for
1060 || check_inlinablity_for == edge)
1061 {
1062 if (can_inline_edge_p (edge, false)
1063 && want_inline_small_function_p (edge, false))
1064 update_edge_key (heap, edge);
1065 else if (edge->aux)
1066 {
1067 report_inline_failed_reason (edge);
1068 fibheap_delete_node (heap, (fibnode_t) edge->aux);
1069 edge->aux = NULL;
1070 }
1071 }
1072 else if (edge->aux)
1073 update_edge_key (heap, edge);
1074 }
1075 }
1076
1077 /* Recompute HEAP nodes for each uninlined call in NODE.
1078 This is used when we know that edge badnesses are going only to increase
1079 (we introduced new call site) and thus all we need is to insert newly
1080 created edges into heap. */
1081
1082 static void
1083 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
1084 bitmap updated_nodes)
1085 {
1086 struct cgraph_edge *e = node->callees;
1087
1088 if (!e)
1089 return;
1090 while (true)
1091 if (!e->inline_failed && e->callee->callees)
1092 e = e->callee->callees;
1093 else
1094 {
1095 enum availability avail;
1096 struct cgraph_node *callee;
1097 /* We do not reset callee growth cache here. Since we added a new call,
1098 growth chould have just increased and consequentely badness metric
1099 don't need updating. */
1100 if (e->inline_failed
1101 && (callee = cgraph_function_or_thunk_node (e->callee, &avail))
1102 && inline_summary (callee)->inlinable
1103 && cgraph_function_body_availability (callee) >= AVAIL_AVAILABLE
1104 && !bitmap_bit_p (updated_nodes, callee->uid))
1105 {
1106 if (can_inline_edge_p (e, false)
1107 && want_inline_small_function_p (e, false))
1108 update_edge_key (heap, e);
1109 else if (e->aux)
1110 {
1111 report_inline_failed_reason (e);
1112 fibheap_delete_node (heap, (fibnode_t) e->aux);
1113 e->aux = NULL;
1114 }
1115 }
1116 if (e->next_callee)
1117 e = e->next_callee;
1118 else
1119 {
1120 do
1121 {
1122 if (e->caller == node)
1123 return;
1124 e = e->caller->callers;
1125 }
1126 while (!e->next_callee);
1127 e = e->next_callee;
1128 }
1129 }
1130 }
1131
1132 /* Enqueue all recursive calls from NODE into priority queue depending on
1133 how likely we want to recursively inline the call. */
1134
1135 static void
1136 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1137 fibheap_t heap)
1138 {
1139 struct cgraph_edge *e;
1140 enum availability avail;
1141
1142 for (e = where->callees; e; e = e->next_callee)
1143 if (e->callee == node
1144 || (cgraph_function_or_thunk_node (e->callee, &avail) == node
1145 && avail > AVAIL_OVERWRITABLE))
1146 {
1147 /* When profile feedback is available, prioritize by expected number
1148 of calls. */
1149 fibheap_insert (heap,
1150 !max_count ? -e->frequency
1151 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1152 e);
1153 }
1154 for (e = where->callees; e; e = e->next_callee)
1155 if (!e->inline_failed)
1156 lookup_recursive_calls (node, e->callee, heap);
1157 }
1158
1159 /* Decide on recursive inlining: in the case function has recursive calls,
1160 inline until body size reaches given argument. If any new indirect edges
1161 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1162 is NULL. */
1163
1164 static bool
1165 recursive_inlining (struct cgraph_edge *edge,
1166 VEC (cgraph_edge_p, heap) **new_edges)
1167 {
1168 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1169 fibheap_t heap;
1170 struct cgraph_node *node;
1171 struct cgraph_edge *e;
1172 struct cgraph_node *master_clone = NULL, *next;
1173 int depth = 0;
1174 int n = 0;
1175
1176 node = edge->caller;
1177 if (node->global.inlined_to)
1178 node = node->global.inlined_to;
1179
1180 if (DECL_DECLARED_INLINE_P (node->symbol.decl))
1181 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1182
1183 /* Make sure that function is small enough to be considered for inlining. */
1184 if (estimate_size_after_inlining (node, edge) >= limit)
1185 return false;
1186 heap = fibheap_new ();
1187 lookup_recursive_calls (node, node, heap);
1188 if (fibheap_empty (heap))
1189 {
1190 fibheap_delete (heap);
1191 return false;
1192 }
1193
1194 if (dump_file)
1195 fprintf (dump_file,
1196 " Performing recursive inlining on %s\n",
1197 cgraph_node_name (node));
1198
1199 /* Do the inlining and update list of recursive call during process. */
1200 while (!fibheap_empty (heap))
1201 {
1202 struct cgraph_edge *curr
1203 = (struct cgraph_edge *) fibheap_extract_min (heap);
1204 struct cgraph_node *cnode, *dest = curr->callee;
1205
1206 if (!can_inline_edge_p (curr, true))
1207 continue;
1208
1209 /* MASTER_CLONE is produced in the case we already started modified
1210 the function. Be sure to redirect edge to the original body before
1211 estimating growths otherwise we will be seeing growths after inlining
1212 the already modified body. */
1213 if (master_clone)
1214 {
1215 cgraph_redirect_edge_callee (curr, master_clone);
1216 reset_edge_growth_cache (curr);
1217 }
1218
1219 if (estimate_size_after_inlining (node, curr) > limit)
1220 {
1221 cgraph_redirect_edge_callee (curr, dest);
1222 reset_edge_growth_cache (curr);
1223 break;
1224 }
1225
1226 depth = 1;
1227 for (cnode = curr->caller;
1228 cnode->global.inlined_to; cnode = cnode->callers->caller)
1229 if (node->symbol.decl
1230 == cgraph_function_or_thunk_node (curr->callee, NULL)->symbol.decl)
1231 depth++;
1232
1233 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1234 {
1235 cgraph_redirect_edge_callee (curr, dest);
1236 reset_edge_growth_cache (curr);
1237 continue;
1238 }
1239
1240 if (dump_file)
1241 {
1242 fprintf (dump_file,
1243 " Inlining call of depth %i", depth);
1244 if (node->count)
1245 {
1246 fprintf (dump_file, " called approx. %.2f times per call",
1247 (double)curr->count / node->count);
1248 }
1249 fprintf (dump_file, "\n");
1250 }
1251 if (!master_clone)
1252 {
1253 /* We need original clone to copy around. */
1254 master_clone = cgraph_clone_node (node, node->symbol.decl,
1255 node->count, CGRAPH_FREQ_BASE,
1256 false, NULL, true);
1257 for (e = master_clone->callees; e; e = e->next_callee)
1258 if (!e->inline_failed)
1259 clone_inlined_nodes (e, true, false, NULL);
1260 cgraph_redirect_edge_callee (curr, master_clone);
1261 reset_edge_growth_cache (curr);
1262 }
1263
1264 inline_call (curr, false, new_edges, &overall_size, true);
1265 lookup_recursive_calls (node, curr->callee, heap);
1266 n++;
1267 }
1268
1269 if (!fibheap_empty (heap) && dump_file)
1270 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1271 fibheap_delete (heap);
1272
1273 if (!master_clone)
1274 return false;
1275
1276 if (dump_file)
1277 fprintf (dump_file,
1278 "\n Inlined %i times, "
1279 "body grown from size %i to %i, time %i to %i\n", n,
1280 inline_summary (master_clone)->size, inline_summary (node)->size,
1281 inline_summary (master_clone)->time, inline_summary (node)->time);
1282
1283 /* Remove master clone we used for inlining. We rely that clones inlined
1284 into master clone gets queued just before master clone so we don't
1285 need recursion. */
1286 for (node = cgraph_first_function (); node != master_clone;
1287 node = next)
1288 {
1289 next = cgraph_next_function (node);
1290 if (node->global.inlined_to == master_clone)
1291 cgraph_remove_node (node);
1292 }
1293 cgraph_remove_node (master_clone);
1294 return true;
1295 }
1296
1297
1298 /* Given whole compilation unit estimate of INSNS, compute how large we can
1299 allow the unit to grow. */
1300
1301 static int
1302 compute_max_insns (int insns)
1303 {
1304 int max_insns = insns;
1305 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1306 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1307
1308 return ((HOST_WIDEST_INT) max_insns
1309 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1310 }
1311
1312
1313 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1314
1315 static void
1316 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1317 {
1318 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1319 {
1320 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1321
1322 gcc_assert (!edge->aux);
1323 if (edge->inline_failed
1324 && can_inline_edge_p (edge, true)
1325 && want_inline_small_function_p (edge, true))
1326 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1327 }
1328 }
1329
1330
1331 /* We use greedy algorithm for inlining of small functions:
1332 All inline candidates are put into prioritized heap ordered in
1333 increasing badness.
1334
1335 The inlining of small functions is bounded by unit growth parameters. */
1336
1337 static void
1338 inline_small_functions (void)
1339 {
1340 struct cgraph_node *node;
1341 struct cgraph_edge *edge;
1342 fibheap_t edge_heap = fibheap_new ();
1343 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1344 int min_size, max_size;
1345 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1346 int initial_size = 0;
1347 struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1348
1349 if (flag_indirect_inlining)
1350 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1351
1352 /* Compute overall unit size and other global parameters used by badness
1353 metrics. */
1354
1355 max_count = 0;
1356 ipa_reduced_postorder (order, true, true, NULL);
1357 free (order);
1358
1359 FOR_EACH_DEFINED_FUNCTION (node)
1360 if (!node->global.inlined_to)
1361 {
1362 if (cgraph_function_with_gimple_body_p (node)
1363 || node->thunk.thunk_p)
1364 {
1365 struct inline_summary *info = inline_summary (node);
1366 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->symbol.aux;
1367
1368 if (!DECL_EXTERNAL (node->symbol.decl))
1369 initial_size += info->size;
1370 if (dfs && dfs->next_cycle)
1371 {
1372 struct cgraph_node *n2;
1373 int id = dfs->scc_no + 1;
1374 for (n2 = node; n2;
1375 n2 = ((struct ipa_dfs_info *) node->symbol.aux)->next_cycle)
1376 {
1377 struct inline_summary *info2 = inline_summary (n2);
1378 if (info2->scc_no)
1379 break;
1380 info2->scc_no = id;
1381 }
1382 }
1383 }
1384
1385 for (edge = node->callers; edge; edge = edge->next_caller)
1386 if (max_count < edge->count)
1387 max_count = edge->count;
1388 }
1389 ipa_free_postorder_info ();
1390 initialize_growth_caches ();
1391
1392 if (dump_file)
1393 fprintf (dump_file,
1394 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1395 initial_size);
1396
1397 overall_size = initial_size;
1398 max_size = compute_max_insns (overall_size);
1399 min_size = overall_size;
1400
1401 /* Populate the heeap with all edges we might inline. */
1402
1403 FOR_EACH_DEFINED_FUNCTION (node)
1404 if (!node->global.inlined_to)
1405 {
1406 if (dump_file)
1407 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1408 cgraph_node_name (node), node->uid);
1409
1410 for (edge = node->callers; edge; edge = edge->next_caller)
1411 if (edge->inline_failed
1412 && can_inline_edge_p (edge, true)
1413 && want_inline_small_function_p (edge, true)
1414 && edge->inline_failed)
1415 {
1416 gcc_assert (!edge->aux);
1417 update_edge_key (edge_heap, edge);
1418 }
1419 }
1420
1421 gcc_assert (in_lto_p
1422 || !max_count
1423 || (profile_info && flag_branch_probabilities));
1424
1425 while (!fibheap_empty (edge_heap))
1426 {
1427 int old_size = overall_size;
1428 struct cgraph_node *where, *callee;
1429 int badness = fibheap_min_key (edge_heap);
1430 int current_badness;
1431 int cached_badness;
1432 int growth;
1433
1434 edge = (struct cgraph_edge *) fibheap_extract_min (edge_heap);
1435 gcc_assert (edge->aux);
1436 edge->aux = NULL;
1437 if (!edge->inline_failed)
1438 continue;
1439
1440 /* Be sure that caches are maintained consistent.
1441 We can not make this ENABLE_CHECKING only because it cause different
1442 updates of the fibheap queue. */
1443 cached_badness = edge_badness (edge, false);
1444 reset_edge_growth_cache (edge);
1445 reset_node_growth_cache (edge->callee);
1446
1447 /* When updating the edge costs, we only decrease badness in the keys.
1448 Increases of badness are handled lazilly; when we see key with out
1449 of date value on it, we re-insert it now. */
1450 current_badness = edge_badness (edge, false);
1451 gcc_assert (cached_badness == current_badness);
1452 gcc_assert (current_badness >= badness);
1453 if (current_badness != badness)
1454 {
1455 edge->aux = fibheap_insert (edge_heap, current_badness, edge);
1456 continue;
1457 }
1458
1459 if (!can_inline_edge_p (edge, true))
1460 continue;
1461
1462 callee = cgraph_function_or_thunk_node (edge->callee, NULL);
1463 growth = estimate_edge_growth (edge);
1464 if (dump_file)
1465 {
1466 fprintf (dump_file,
1467 "\nConsidering %s with %i size\n",
1468 cgraph_node_name (callee),
1469 inline_summary (callee)->size);
1470 fprintf (dump_file,
1471 " to be inlined into %s in %s:%i\n"
1472 " Estimated growth after inlined into all is %+i insns.\n"
1473 " Estimated badness is %i, frequency %.2f.\n",
1474 cgraph_node_name (edge->caller),
1475 flag_wpa ? "unknown"
1476 : gimple_filename ((const_gimple) edge->call_stmt),
1477 flag_wpa ? -1
1478 : gimple_lineno ((const_gimple) edge->call_stmt),
1479 estimate_growth (callee),
1480 badness,
1481 edge->frequency / (double)CGRAPH_FREQ_BASE);
1482 if (edge->count)
1483 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1484 edge->count);
1485 if (dump_flags & TDF_DETAILS)
1486 edge_badness (edge, true);
1487 }
1488
1489 if (overall_size + growth > max_size
1490 && !DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
1491 {
1492 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1493 report_inline_failed_reason (edge);
1494 continue;
1495 }
1496
1497 if (!want_inline_small_function_p (edge, true))
1498 continue;
1499
1500 /* Heuristics for inlining small functions works poorly for
1501 recursive calls where we do efect similar to loop unrolling.
1502 When inliing such edge seems profitable, leave decision on
1503 specific inliner. */
1504 if (cgraph_edge_recursive_p (edge))
1505 {
1506 where = edge->caller;
1507 if (where->global.inlined_to)
1508 where = where->global.inlined_to;
1509 if (!recursive_inlining (edge,
1510 flag_indirect_inlining
1511 ? &new_indirect_edges : NULL))
1512 {
1513 edge->inline_failed = CIF_RECURSIVE_INLINING;
1514 continue;
1515 }
1516 reset_edge_caches (where);
1517 /* Recursive inliner inlines all recursive calls of the function
1518 at once. Consequently we need to update all callee keys. */
1519 if (flag_indirect_inlining)
1520 add_new_edges_to_heap (edge_heap, new_indirect_edges);
1521 update_callee_keys (edge_heap, where, updated_nodes);
1522 }
1523 else
1524 {
1525 struct cgraph_node *outer_node = NULL;
1526 int depth = 0;
1527
1528 /* Consider the case where self recursive function A is inlined into B.
1529 This is desired optimization in some cases, since it leads to effect
1530 similar of loop peeling and we might completely optimize out the
1531 recursive call. However we must be extra selective. */
1532
1533 where = edge->caller;
1534 while (where->global.inlined_to)
1535 {
1536 if (where->symbol.decl == callee->symbol.decl)
1537 outer_node = where, depth++;
1538 where = where->callers->caller;
1539 }
1540 if (outer_node
1541 && !want_inline_self_recursive_call_p (edge, outer_node,
1542 true, depth))
1543 {
1544 edge->inline_failed
1545 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->symbol.decl)
1546 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1547 continue;
1548 }
1549 else if (depth && dump_file)
1550 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1551
1552 gcc_checking_assert (!callee->global.inlined_to);
1553 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1554 if (flag_indirect_inlining)
1555 add_new_edges_to_heap (edge_heap, new_indirect_edges);
1556
1557 reset_edge_caches (edge->callee);
1558 reset_node_growth_cache (callee);
1559
1560 update_callee_keys (edge_heap, where, updated_nodes);
1561 }
1562 where = edge->caller;
1563 if (where->global.inlined_to)
1564 where = where->global.inlined_to;
1565
1566 /* Our profitability metric can depend on local properties
1567 such as number of inlinable calls and size of the function body.
1568 After inlining these properties might change for the function we
1569 inlined into (since it's body size changed) and for the functions
1570 called by function we inlined (since number of it inlinable callers
1571 might change). */
1572 update_caller_keys (edge_heap, where, updated_nodes, NULL);
1573 bitmap_clear (updated_nodes);
1574
1575 if (dump_file)
1576 {
1577 fprintf (dump_file,
1578 " Inlined into %s which now has time %i and size %i,"
1579 "net change of %+i.\n",
1580 cgraph_node_name (edge->caller),
1581 inline_summary (edge->caller)->time,
1582 inline_summary (edge->caller)->size,
1583 overall_size - old_size);
1584 }
1585 if (min_size > overall_size)
1586 {
1587 min_size = overall_size;
1588 max_size = compute_max_insns (min_size);
1589
1590 if (dump_file)
1591 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1592 }
1593 }
1594
1595 free_growth_caches ();
1596 if (new_indirect_edges)
1597 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1598 fibheap_delete (edge_heap);
1599 if (dump_file)
1600 fprintf (dump_file,
1601 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1602 initial_size, overall_size,
1603 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1604 BITMAP_FREE (updated_nodes);
1605 }
1606
1607 /* Flatten NODE. Performed both during early inlining and
1608 at IPA inlining time. */
1609
1610 static void
1611 flatten_function (struct cgraph_node *node, bool early)
1612 {
1613 struct cgraph_edge *e;
1614
1615 /* We shouldn't be called recursively when we are being processed. */
1616 gcc_assert (node->symbol.aux == NULL);
1617
1618 node->symbol.aux = (void *) node;
1619
1620 for (e = node->callees; e; e = e->next_callee)
1621 {
1622 struct cgraph_node *orig_callee;
1623 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1624
1625 /* We've hit cycle? It is time to give up. */
1626 if (callee->symbol.aux)
1627 {
1628 if (dump_file)
1629 fprintf (dump_file,
1630 "Not inlining %s into %s to avoid cycle.\n",
1631 xstrdup (cgraph_node_name (callee)),
1632 xstrdup (cgraph_node_name (e->caller)));
1633 e->inline_failed = CIF_RECURSIVE_INLINING;
1634 continue;
1635 }
1636
1637 /* When the edge is already inlined, we just need to recurse into
1638 it in order to fully flatten the leaves. */
1639 if (!e->inline_failed)
1640 {
1641 flatten_function (callee, early);
1642 continue;
1643 }
1644
1645 /* Flatten attribute needs to be processed during late inlining. For
1646 extra code quality we however do flattening during early optimization,
1647 too. */
1648 if (!early
1649 ? !can_inline_edge_p (e, true)
1650 : !can_early_inline_edge_p (e))
1651 continue;
1652
1653 if (cgraph_edge_recursive_p (e))
1654 {
1655 if (dump_file)
1656 fprintf (dump_file, "Not inlining: recursive call.\n");
1657 continue;
1658 }
1659
1660 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->symbol.decl))
1661 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->symbol.decl)))
1662 {
1663 if (dump_file)
1664 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1665 continue;
1666 }
1667
1668 /* Inline the edge and flatten the inline clone. Avoid
1669 recursing through the original node if the node was cloned. */
1670 if (dump_file)
1671 fprintf (dump_file, " Inlining %s into %s.\n",
1672 xstrdup (cgraph_node_name (callee)),
1673 xstrdup (cgraph_node_name (e->caller)));
1674 orig_callee = callee;
1675 inline_call (e, true, NULL, NULL, false);
1676 if (e->callee != orig_callee)
1677 orig_callee->symbol.aux = (void *) node;
1678 flatten_function (e->callee, early);
1679 if (e->callee != orig_callee)
1680 orig_callee->symbol.aux = NULL;
1681 }
1682
1683 node->symbol.aux = NULL;
1684 if (!node->global.inlined_to)
1685 inline_update_overall_summary (node);
1686 }
1687
1688 /* Decide on the inlining. We do so in the topological order to avoid
1689 expenses on updating data structures. */
1690
1691 static unsigned int
1692 ipa_inline (void)
1693 {
1694 struct cgraph_node *node;
1695 int nnodes;
1696 struct cgraph_node **order =
1697 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1698 int i;
1699
1700 if (in_lto_p && optimize)
1701 ipa_update_after_lto_read ();
1702
1703 if (dump_file)
1704 dump_inline_summaries (dump_file);
1705
1706 nnodes = ipa_reverse_postorder (order);
1707
1708 FOR_EACH_FUNCTION (node)
1709 node->symbol.aux = 0;
1710
1711 if (dump_file)
1712 fprintf (dump_file, "\nFlattening functions:\n");
1713
1714 /* In the first pass handle functions to be flattened. Do this with
1715 a priority so none of our later choices will make this impossible. */
1716 for (i = nnodes - 1; i >= 0; i--)
1717 {
1718 node = order[i];
1719
1720 /* Handle nodes to be flattened.
1721 Ideally when processing callees we stop inlining at the
1722 entry of cycles, possibly cloning that entry point and
1723 try to flatten itself turning it into a self-recursive
1724 function. */
1725 if (lookup_attribute ("flatten",
1726 DECL_ATTRIBUTES (node->symbol.decl)) != NULL)
1727 {
1728 if (dump_file)
1729 fprintf (dump_file,
1730 "Flattening %s\n", cgraph_node_name (node));
1731 flatten_function (node, false);
1732 }
1733 }
1734
1735 inline_small_functions ();
1736 symtab_remove_unreachable_nodes (true, dump_file);
1737 free (order);
1738
1739 /* Inline functions with a property that after inlining into all callers the
1740 code size will shrink because the out-of-line copy is eliminated.
1741 We do this regardless on the callee size as long as function growth limits
1742 are met. */
1743 if (flag_inline_functions_called_once)
1744 {
1745 int cold;
1746 if (dump_file)
1747 fprintf (dump_file,
1748 "\nDeciding on functions to be inlined into all callers:\n");
1749
1750 /* Inlining one function called once has good chance of preventing
1751 inlining other function into the same callee. Ideally we should
1752 work in priority order, but probably inlining hot functions first
1753 is good cut without the extra pain of maintaining the queue.
1754
1755 ??? this is not really fitting the bill perfectly: inlining function
1756 into callee often leads to better optimization of callee due to
1757 increased context for optimization.
1758 For example if main() function calls a function that outputs help
1759 and then function that does the main optmization, we should inline
1760 the second with priority even if both calls are cold by themselves.
1761
1762 We probably want to implement new predicate replacing our use of
1763 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1764 to be hot. */
1765 for (cold = 0; cold <= 1; cold ++)
1766 {
1767 FOR_EACH_DEFINED_FUNCTION (node)
1768 {
1769 if (want_inline_function_to_all_callers_p (node, cold))
1770 {
1771 int num_calls = 0;
1772 struct cgraph_edge *e;
1773 for (e = node->callers; e; e = e->next_caller)
1774 num_calls++;
1775 while (node->callers && !node->global.inlined_to)
1776 {
1777 struct cgraph_node *caller = node->callers->caller;
1778
1779 if (dump_file)
1780 {
1781 fprintf (dump_file,
1782 "\nInlining %s size %i.\n",
1783 cgraph_node_name (node),
1784 inline_summary (node)->size);
1785 fprintf (dump_file,
1786 " Called once from %s %i insns.\n",
1787 cgraph_node_name (node->callers->caller),
1788 inline_summary (node->callers->caller)->size);
1789 }
1790
1791 inline_call (node->callers, true, NULL, NULL, true);
1792 if (dump_file)
1793 fprintf (dump_file,
1794 " Inlined into %s which now has %i size\n",
1795 cgraph_node_name (caller),
1796 inline_summary (caller)->size);
1797 if (!num_calls--)
1798 {
1799 if (dump_file)
1800 fprintf (dump_file, "New calls found; giving up.\n");
1801 break;
1802 }
1803 }
1804 }
1805 }
1806 }
1807 }
1808
1809 /* Free ipa-prop structures if they are no longer needed. */
1810 if (optimize)
1811 ipa_free_all_structures_after_iinln ();
1812
1813 if (dump_file)
1814 fprintf (dump_file,
1815 "\nInlined %i calls, eliminated %i functions\n\n",
1816 ncalls_inlined, nfunctions_inlined);
1817
1818 if (dump_file)
1819 dump_inline_summaries (dump_file);
1820 /* In WPA we use inline summaries for partitioning process. */
1821 if (!flag_wpa)
1822 inline_free_summary ();
1823 return 0;
1824 }
1825
1826 /* Inline always-inline function calls in NODE. */
1827
1828 static bool
1829 inline_always_inline_functions (struct cgraph_node *node)
1830 {
1831 struct cgraph_edge *e;
1832 bool inlined = false;
1833
1834 for (e = node->callees; e; e = e->next_callee)
1835 {
1836 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1837 if (!DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
1838 continue;
1839
1840 if (cgraph_edge_recursive_p (e))
1841 {
1842 if (dump_file)
1843 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1844 cgraph_node_name (e->callee));
1845 e->inline_failed = CIF_RECURSIVE_INLINING;
1846 continue;
1847 }
1848
1849 if (!can_early_inline_edge_p (e))
1850 continue;
1851
1852 if (dump_file)
1853 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1854 xstrdup (cgraph_node_name (e->callee)),
1855 xstrdup (cgraph_node_name (e->caller)));
1856 inline_call (e, true, NULL, NULL, false);
1857 inlined = true;
1858 }
1859 if (inlined)
1860 inline_update_overall_summary (node);
1861
1862 return inlined;
1863 }
1864
1865 /* Decide on the inlining. We do so in the topological order to avoid
1866 expenses on updating data structures. */
1867
1868 static bool
1869 early_inline_small_functions (struct cgraph_node *node)
1870 {
1871 struct cgraph_edge *e;
1872 bool inlined = false;
1873
1874 for (e = node->callees; e; e = e->next_callee)
1875 {
1876 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1877 if (!inline_summary (callee)->inlinable
1878 || !e->inline_failed)
1879 continue;
1880
1881 /* Do not consider functions not declared inline. */
1882 if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
1883 && !flag_inline_small_functions
1884 && !flag_inline_functions)
1885 continue;
1886
1887 if (dump_file)
1888 fprintf (dump_file, "Considering inline candidate %s.\n",
1889 cgraph_node_name (callee));
1890
1891 if (!can_early_inline_edge_p (e))
1892 continue;
1893
1894 if (cgraph_edge_recursive_p (e))
1895 {
1896 if (dump_file)
1897 fprintf (dump_file, " Not inlining: recursive call.\n");
1898 continue;
1899 }
1900
1901 if (!want_early_inline_function_p (e))
1902 continue;
1903
1904 if (dump_file)
1905 fprintf (dump_file, " Inlining %s into %s.\n",
1906 xstrdup (cgraph_node_name (callee)),
1907 xstrdup (cgraph_node_name (e->caller)));
1908 inline_call (e, true, NULL, NULL, true);
1909 inlined = true;
1910 }
1911
1912 return inlined;
1913 }
1914
1915 /* Do inlining of small functions. Doing so early helps profiling and other
1916 passes to be somewhat more effective and avoids some code duplication in
1917 later real inlining pass for testcases with very many function calls. */
1918 static unsigned int
1919 early_inliner (void)
1920 {
1921 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1922 struct cgraph_edge *edge;
1923 unsigned int todo = 0;
1924 int iterations = 0;
1925 bool inlined = false;
1926
1927 if (seen_error ())
1928 return 0;
1929
1930 /* Do nothing if datastructures for ipa-inliner are already computed. This
1931 happens when some pass decides to construct new function and
1932 cgraph_add_new_function calls lowering passes and early optimization on
1933 it. This may confuse ourself when early inliner decide to inline call to
1934 function clone, because function clones don't have parameter list in
1935 ipa-prop matching their signature. */
1936 if (ipa_node_params_vector)
1937 return 0;
1938
1939 #ifdef ENABLE_CHECKING
1940 verify_cgraph_node (node);
1941 #endif
1942
1943 /* Even when not optimizing or not inlining inline always-inline
1944 functions. */
1945 inlined = inline_always_inline_functions (node);
1946
1947 if (!optimize
1948 || flag_no_inline
1949 || !flag_early_inlining
1950 /* Never inline regular functions into always-inline functions
1951 during incremental inlining. This sucks as functions calling
1952 always inline functions will get less optimized, but at the
1953 same time inlining of functions calling always inline
1954 function into an always inline function might introduce
1955 cycles of edges to be always inlined in the callgraph.
1956
1957 We might want to be smarter and just avoid this type of inlining. */
1958 || DECL_DISREGARD_INLINE_LIMITS (node->symbol.decl))
1959 ;
1960 else if (lookup_attribute ("flatten",
1961 DECL_ATTRIBUTES (node->symbol.decl)) != NULL)
1962 {
1963 /* When the function is marked to be flattened, recursively inline
1964 all calls in it. */
1965 if (dump_file)
1966 fprintf (dump_file,
1967 "Flattening %s\n", cgraph_node_name (node));
1968 flatten_function (node, true);
1969 inlined = true;
1970 }
1971 else
1972 {
1973 /* We iterate incremental inlining to get trivial cases of indirect
1974 inlining. */
1975 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1976 && early_inline_small_functions (node))
1977 {
1978 timevar_push (TV_INTEGRATION);
1979 todo |= optimize_inline_calls (current_function_decl);
1980
1981 /* Technically we ought to recompute inline parameters so the new
1982 iteration of early inliner works as expected. We however have
1983 values approximately right and thus we only need to update edge
1984 info that might be cleared out for newly discovered edges. */
1985 for (edge = node->callees; edge; edge = edge->next_callee)
1986 {
1987 struct inline_edge_summary *es = inline_edge_summary (edge);
1988 es->call_stmt_size
1989 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1990 es->call_stmt_time
1991 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1992 if (edge->callee->symbol.decl
1993 && !gimple_check_call_matching_types (edge->call_stmt,
1994 edge->callee->symbol.decl))
1995 edge->call_stmt_cannot_inline_p = true;
1996 }
1997 timevar_pop (TV_INTEGRATION);
1998 iterations++;
1999 inlined = false;
2000 }
2001 if (dump_file)
2002 fprintf (dump_file, "Iterations: %i\n", iterations);
2003 }
2004
2005 if (inlined)
2006 {
2007 timevar_push (TV_INTEGRATION);
2008 todo |= optimize_inline_calls (current_function_decl);
2009 timevar_pop (TV_INTEGRATION);
2010 }
2011
2012 cfun->always_inline_functions_inlined = true;
2013
2014 return todo;
2015 }
2016
2017 struct gimple_opt_pass pass_early_inline =
2018 {
2019 {
2020 GIMPLE_PASS,
2021 "einline", /* name */
2022 OPTGROUP_INLINE, /* optinfo_flags */
2023 NULL, /* gate */
2024 early_inliner, /* execute */
2025 NULL, /* sub */
2026 NULL, /* next */
2027 0, /* static_pass_number */
2028 TV_EARLY_INLINING, /* tv_id */
2029 PROP_ssa, /* properties_required */
2030 0, /* properties_provided */
2031 0, /* properties_destroyed */
2032 0, /* todo_flags_start */
2033 0 /* todo_flags_finish */
2034 }
2035 };
2036
2037
2038 /* When to run IPA inlining. Inlining of always-inline functions
2039 happens during early inlining.
2040
2041 Enable inlining unconditoinally at -flto. We need size estimates to
2042 drive partitioning. */
2043
2044 static bool
2045 gate_ipa_inline (void)
2046 {
2047 return optimize || flag_lto || flag_wpa;
2048 }
2049
2050 struct ipa_opt_pass_d pass_ipa_inline =
2051 {
2052 {
2053 IPA_PASS,
2054 "inline", /* name */
2055 OPTGROUP_INLINE, /* optinfo_flags */
2056 gate_ipa_inline, /* gate */
2057 ipa_inline, /* execute */
2058 NULL, /* sub */
2059 NULL, /* next */
2060 0, /* static_pass_number */
2061 TV_IPA_INLINING, /* tv_id */
2062 0, /* properties_required */
2063 0, /* properties_provided */
2064 0, /* properties_destroyed */
2065 TODO_remove_functions, /* todo_flags_finish */
2066 TODO_dump_symtab
2067 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2068 },
2069 inline_generate_summary, /* generate_summary */
2070 inline_write_summary, /* write_summary */
2071 inline_read_summary, /* read_summary */
2072 NULL, /* write_optimization_summary */
2073 NULL, /* read_optimization_summary */
2074 NULL, /* stmt_fixup */
2075 0, /* TODOs */
2076 inline_transform, /* function_transform */
2077 NULL, /* variable_transform */
2078 };