]>
Commit | Line | Data |
---|---|---|
ca31b95f | 1 | /* Inlining decision heuristics. |
e89e03ab | 2 | Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011 |
c75c517d | 3 | Free Software Foundation, Inc. |
ca31b95f JH |
4 | Contributed by Jan Hubicka |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 10 | Software Foundation; either version 3, or (at your option) any later |
ca31b95f JH |
11 | version. |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
ca31b95f JH |
21 | |
22 | /* Inlining decision heuristics | |
23 | ||
4c0f7679 | 24 | The implementation of inliner is organized as follows: |
ca31b95f | 25 | |
ca31b95f JH |
26 | inlining heuristics limits |
27 | ||
4c0f7679 JH |
28 | can_inline_edge_p allow to check that particular inlining is allowed |
29 | by the limits specified by user (allowed function growth, growth and so | |
30 | on). | |
31 | ||
32 | Functions are inlined when it is obvious the result is profitable (such | |
33 | as functions called once or when inlining reduce code size). | |
34 | In addition to that we perform inlining of small functions and recursive | |
35 | inlining. | |
ca31b95f JH |
36 | |
37 | inlining heuristics | |
38 | ||
4c0f7679 JH |
39 | The inliner itself is split into two passes: |
40 | ||
41 | pass_early_inlining | |
ca31b95f | 42 | |
4c0f7679 JH |
43 | Simple local inlining pass inlining callees into current function. |
44 | This pass makes no use of whole unit analysis and thus it can do only | |
45 | very simple decisions based on local properties. | |
ca31b95f | 46 | |
4c0f7679 JH |
47 | The strength of the pass is that it is run in topological order |
48 | (reverse postorder) on the callgraph. Functions are converted into SSA | |
49 | form just before this pass and optimized subsequently. As a result, the | |
50 | callees of the function seen by the early inliner was already optimized | |
09a2806f | 51 | and results of early inlining adds a lot of optimization opportunities |
4c0f7679 | 52 | for the local optimization. |
ca31b95f | 53 | |
09a2806f | 54 | The pass handle the obvious inlining decisions within the compilation |
4c0f7679 JH |
55 | unit - inlining auto inline functions, inlining for size and |
56 | flattening. | |
ca31b95f | 57 | |
4c0f7679 JH |
58 | main strength of the pass is the ability to eliminate abstraction |
59 | penalty in C++ code (via combination of inlining and early | |
60 | optimization) and thus improve quality of analysis done by real IPA | |
61 | optimizers. | |
873aa8f5 | 62 | |
4c0f7679 JH |
63 | Because of lack of whole unit knowledge, the pass can not really make |
64 | good code size/performance tradeoffs. It however does very simple | |
65 | speculative inlining allowing code size to grow by | |
09a2806f JH |
66 | EARLY_INLINING_INSNS when callee is leaf function. In this case the |
67 | optimizations performed later are very likely to eliminate the cost. | |
873aa8f5 | 68 | |
4c0f7679 | 69 | pass_ipa_inline |
873aa8f5 | 70 | |
4c0f7679 JH |
71 | This is the real inliner able to handle inlining with whole program |
72 | knowledge. It performs following steps: | |
873aa8f5 | 73 | |
4c0f7679 JH |
74 | 1) inlining of small functions. This is implemented by greedy |
75 | algorithm ordering all inlinable cgraph edges by their badness and | |
76 | inlining them in this order as long as inline limits allows doing so. | |
873aa8f5 | 77 | |
4c0f7679 JH |
78 | This heuristics is not very good on inlining recursive calls. Recursive |
79 | calls can be inlined with results similar to loop unrolling. To do so, | |
80 | special purpose recursive inliner is executed on function when | |
81 | recursive edge is met as viable candidate. | |
873aa8f5 | 82 | |
4c0f7679 JH |
83 | 2) Unreachable functions are removed from callgraph. Inlining leads |
84 | to devirtualization and other modification of callgraph so functions | |
85 | may become unreachable during the process. Also functions declared as | |
86 | extern inline or virtual functions are removed, since after inlining | |
87 | we no longer need the offline bodies. | |
88 | ||
89 | 3) Functions called once and not exported from the unit are inlined. | |
90 | This should almost always lead to reduction of code size by eliminating | |
91 | the need for offline copy of the function. */ | |
ca31b95f JH |
92 | |
93 | #include "config.h" | |
94 | #include "system.h" | |
95 | #include "coretypes.h" | |
96 | #include "tm.h" | |
97 | #include "tree.h" | |
98 | #include "tree-inline.h" | |
99 | #include "langhooks.h" | |
100 | #include "flags.h" | |
101 | #include "cgraph.h" | |
102 | #include "diagnostic.h" | |
cf835838 | 103 | #include "gimple-pretty-print.h" |
ca31b95f JH |
104 | #include "params.h" |
105 | #include "fibheap.h" | |
106 | #include "intl.h" | |
107 | #include "tree-pass.h" | |
670cd5c5 | 108 | #include "coverage.h" |
d63db217 | 109 | #include "ggc.h" |
45a80bb9 | 110 | #include "rtl.h" |
4c0f7679 | 111 | #include "tree-flow.h" |
3e293154 | 112 | #include "ipa-prop.h" |
85057983 | 113 | #include "except.h" |
4c0f7679 | 114 | #include "target.h" |
03dfc36d | 115 | #include "ipa-inline.h" |
af8bca3c | 116 | #include "ipa-utils.h" |
85057983 | 117 | |
ca31b95f | 118 | /* Statistics we collect about inlining algorithm. */ |
85057983 | 119 | static int overall_size; |
632b4f8e | 120 | static gcov_type max_count; |
ca31b95f | 121 | |
4c0f7679 JH |
122 | /* Return false when inlining edge E would lead to violating |
123 | limits on function unit growth or stack usage growth. | |
124 | ||
125 | The relative function body growth limit is present generally | |
09a2806f | 126 | to avoid problems with non-linear behavior of the compiler. |
4c0f7679 JH |
127 | To allow inlining huge functions into tiny wrapper, the limit |
128 | is always based on the bigger of the two functions considered. | |
129 | ||
130 | For stack growth limits we always base the growth in stack usage | |
131 | of the callers. We want to prevent applications from segfaulting | |
132 | on stack overflow when functions with huge stack frames gets | |
133 | inlined. */ | |
ca31b95f JH |
134 | |
135 | static bool | |
4c0f7679 | 136 | caller_growth_limits (struct cgraph_edge *e) |
ca31b95f | 137 | { |
d7d1d041 | 138 | struct cgraph_node *to = e->caller; |
a5b1779f | 139 | struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL); |
ca31b95f | 140 | int newsize; |
4c0f7679 JH |
141 | int limit = 0; |
142 | HOST_WIDE_INT stack_size_limit = 0, inlined_stack; | |
143 | struct inline_summary *info, *what_info, *outer_info = inline_summary (to); | |
144 | ||
145 | /* Look for function e->caller is inlined to. While doing | |
146 | so work out the largest function body on the way. As | |
147 | described above, we want to base our function growth | |
148 | limits based on that. Not on the self size of the | |
149 | outer function, not on the self size of inline code | |
150 | we immediately inline to. This is the most relaxed | |
151 | interpretation of the rule "do not grow large functions | |
152 | too much in order to prevent compiler from exploding". */ | |
09dfe187 | 153 | while (true) |
4c0f7679 JH |
154 | { |
155 | info = inline_summary (to); | |
156 | if (limit < info->self_size) | |
157 | limit = info->self_size; | |
158 | if (stack_size_limit < info->estimated_self_stack_size) | |
159 | stack_size_limit = info->estimated_self_stack_size; | |
160 | if (to->global.inlined_to) | |
161 | to = to->callers->caller; | |
09dfe187 JH |
162 | else |
163 | break; | |
4c0f7679 | 164 | } |
6971d714 | 165 | |
e7f23018 JH |
166 | what_info = inline_summary (what); |
167 | ||
4c0f7679 | 168 | if (limit < what_info->self_size) |
e7f23018 | 169 | limit = what_info->self_size; |
ca31b95f JH |
170 | |
171 | limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100; | |
172 | ||
6971d714 RG |
173 | /* Check the size after inlining against the function limits. But allow |
174 | the function to shrink if it went over the limits by forced inlining. */ | |
03dfc36d | 175 | newsize = estimate_size_after_inlining (to, e); |
e7f23018 | 176 | if (newsize >= info->size |
6971d714 | 177 | && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) |
ca31b95f JH |
178 | && newsize > limit) |
179 | { | |
4c0f7679 | 180 | e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT; |
ca31b95f JH |
181 | return false; |
182 | } | |
ff28a94d | 183 | |
09dfe187 JH |
184 | if (!what_info->estimated_stack_size) |
185 | return true; | |
186 | ||
09a2806f JH |
187 | /* FIXME: Stack size limit often prevents inlining in Fortran programs |
188 | due to large i/o datastructures used by the Fortran front-end. | |
4c0f7679 JH |
189 | We ought to ignore this limit when we know that the edge is executed |
190 | on every invocation of the caller (i.e. its call statement dominates | |
191 | exit block). We do not track this information, yet. */ | |
09dfe187 | 192 | stack_size_limit += ((gcov_type)stack_size_limit |
4c0f7679 | 193 | * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100); |
ff28a94d | 194 | |
4c0f7679 JH |
195 | inlined_stack = (outer_info->stack_frame_offset |
196 | + outer_info->estimated_self_stack_size | |
e7f23018 | 197 | + what_info->estimated_stack_size); |
4c0f7679 JH |
198 | /* Check new stack consumption with stack consumption at the place |
199 | stack is used. */ | |
200 | if (inlined_stack > stack_size_limit | |
09a2806f | 201 | /* If function already has large stack usage from sibling |
4c0f7679 JH |
202 | inline call, we can inline, too. |
203 | This bit overoptimistically assume that we are good at stack | |
204 | packing. */ | |
205 | && inlined_stack > info->estimated_stack_size | |
ff28a94d JH |
206 | && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME)) |
207 | { | |
4c0f7679 | 208 | e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT; |
ff28a94d JH |
209 | return false; |
210 | } | |
ca31b95f JH |
211 | return true; |
212 | } | |
213 | ||
4c0f7679 JH |
214 | /* Dump info about why inlining has failed. */ |
215 | ||
216 | static void | |
217 | report_inline_failed_reason (struct cgraph_edge *e) | |
218 | { | |
219 | if (dump_file) | |
220 | { | |
221 | fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n", | |
036c0102 UB |
222 | xstrdup (cgraph_node_name (e->caller)), e->caller->uid, |
223 | xstrdup (cgraph_node_name (e->callee)), e->callee->uid, | |
4c0f7679 JH |
224 | cgraph_inline_failed_string (e->inline_failed)); |
225 | } | |
226 | } | |
227 | ||
228 | /* Decide if we can inline the edge and possibly update | |
229 | inline_failed reason. | |
230 | We check whether inlining is possible at all and whether | |
231 | caller growth limits allow doing so. | |
232 | ||
233 | if REPORT is true, output reason to the dump file. */ | |
ca31b95f | 234 | |
61a05df1 | 235 | static bool |
4c0f7679 | 236 | can_inline_edge_p (struct cgraph_edge *e, bool report) |
ca31b95f | 237 | { |
4c0f7679 | 238 | bool inlinable = true; |
a5b1779f | 239 | enum availability avail; |
264b47b0 EB |
240 | struct cgraph_node *callee |
241 | = cgraph_function_or_thunk_node (e->callee, &avail); | |
960bfb69 | 242 | tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->symbol.decl); |
264b47b0 | 243 | tree callee_tree |
960bfb69 JH |
244 | = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->symbol.decl) : NULL; |
245 | struct function *caller_cfun = DECL_STRUCT_FUNCTION (e->caller->symbol.decl); | |
264b47b0 | 246 | struct function *callee_cfun |
960bfb69 | 247 | = callee ? DECL_STRUCT_FUNCTION (callee->symbol.decl) : NULL; |
264b47b0 EB |
248 | |
249 | if (!caller_cfun && e->caller->clone_of) | |
960bfb69 | 250 | caller_cfun = DECL_STRUCT_FUNCTION (e->caller->clone_of->symbol.decl); |
264b47b0 EB |
251 | |
252 | if (!callee_cfun && callee && callee->clone_of) | |
960bfb69 | 253 | callee_cfun = DECL_STRUCT_FUNCTION (callee->clone_of->symbol.decl); |
ea99e0be | 254 | |
4c0f7679 | 255 | gcc_assert (e->inline_failed); |
af961c7f | 256 | |
a5b1779f | 257 | if (!callee || !callee->analyzed) |
4c0f7679 JH |
258 | { |
259 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
260 | inlinable = false; | |
261 | } | |
a5b1779f | 262 | else if (!inline_summary (callee)->inlinable) |
4c0f7679 JH |
263 | { |
264 | e->inline_failed = CIF_FUNCTION_NOT_INLINABLE; | |
265 | inlinable = false; | |
266 | } | |
a5b1779f | 267 | else if (avail <= AVAIL_OVERWRITABLE) |
9de21a23 | 268 | { |
4c0f7679 | 269 | e->inline_failed = CIF_OVERWRITABLE; |
9de21a23 JC |
270 | return false; |
271 | } | |
89faf322 | 272 | else if (e->call_stmt_cannot_inline_p) |
4c0f7679 JH |
273 | { |
274 | e->inline_failed = CIF_MISMATCHED_ARGUMENTS; | |
275 | inlinable = false; | |
276 | } | |
277 | /* Don't inline if the functions have different EH personalities. */ | |
960bfb69 JH |
278 | else if (DECL_FUNCTION_PERSONALITY (e->caller->symbol.decl) |
279 | && DECL_FUNCTION_PERSONALITY (callee->symbol.decl) | |
280 | && (DECL_FUNCTION_PERSONALITY (e->caller->symbol.decl) | |
281 | != DECL_FUNCTION_PERSONALITY (callee->symbol.decl))) | |
4c0f7679 JH |
282 | { |
283 | e->inline_failed = CIF_EH_PERSONALITY; | |
284 | inlinable = false; | |
285 | } | |
a7ff6e27 AH |
286 | /* TM pure functions should not be inlined into non-TM_pure |
287 | functions. */ | |
960bfb69 JH |
288 | else if (is_tm_pure (callee->symbol.decl) |
289 | && !is_tm_pure (e->caller->symbol.decl)) | |
0a35513e AH |
290 | { |
291 | e->inline_failed = CIF_UNSPECIFIED; | |
292 | inlinable = false; | |
293 | } | |
4c0f7679 JH |
294 | /* Don't inline if the callee can throw non-call exceptions but the |
295 | caller cannot. | |
296 | FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing. | |
297 | Move the flag into cgraph node or mirror it in the inline summary. */ | |
264b47b0 EB |
298 | else if (callee_cfun && callee_cfun->can_throw_non_call_exceptions |
299 | && !(caller_cfun && caller_cfun->can_throw_non_call_exceptions)) | |
4c0f7679 JH |
300 | { |
301 | e->inline_failed = CIF_NON_CALL_EXCEPTIONS; | |
302 | inlinable = false; | |
303 | } | |
09a2806f | 304 | /* Check compatibility of target optimization options. */ |
960bfb69 JH |
305 | else if (!targetm.target_option.can_inline_p (e->caller->symbol.decl, |
306 | callee->symbol.decl)) | |
4c0f7679 JH |
307 | { |
308 | e->inline_failed = CIF_TARGET_OPTION_MISMATCH; | |
309 | inlinable = false; | |
310 | } | |
311 | /* Check if caller growth allows the inlining. */ | |
960bfb69 | 312 | else if (!DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl) |
9a4ac625 JH |
313 | && !lookup_attribute ("flatten", |
314 | DECL_ATTRIBUTES | |
315 | (e->caller->global.inlined_to | |
960bfb69 JH |
316 | ? e->caller->global.inlined_to->symbol.decl |
317 | : e->caller->symbol.decl)) | |
4c0f7679 JH |
318 | && !caller_growth_limits (e)) |
319 | inlinable = false; | |
320 | /* Don't inline a function with a higher optimization level than the | |
321 | caller. FIXME: this is really just tip of iceberg of handling | |
322 | optimization attribute. */ | |
323 | else if (caller_tree != callee_tree) | |
9de21a23 | 324 | { |
4c0f7679 JH |
325 | struct cl_optimization *caller_opt |
326 | = TREE_OPTIMIZATION ((caller_tree) | |
327 | ? caller_tree | |
328 | : optimization_default_node); | |
329 | ||
330 | struct cl_optimization *callee_opt | |
331 | = TREE_OPTIMIZATION ((callee_tree) | |
332 | ? callee_tree | |
333 | : optimization_default_node); | |
334 | ||
4b583c43 CB |
335 | if (((caller_opt->x_optimize > callee_opt->x_optimize) |
336 | || (caller_opt->x_optimize_size != callee_opt->x_optimize_size)) | |
337 | /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */ | |
960bfb69 | 338 | && !DECL_DISREGARD_INLINE_LIMITS (e->callee->symbol.decl)) |
4c0f7679 | 339 | { |
fd811f03 | 340 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; |
4c0f7679 JH |
341 | inlinable = false; |
342 | } | |
343 | } | |
344 | ||
4c0f7679 JH |
345 | if (!inlinable && report) |
346 | report_inline_failed_reason (e); | |
347 | return inlinable; | |
348 | } | |
349 | ||
350 | ||
351 | /* Return true if the edge E is inlinable during early inlining. */ | |
352 | ||
353 | static bool | |
354 | can_early_inline_edge_p (struct cgraph_edge *e) | |
355 | { | |
a5b1779f JH |
356 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, |
357 | NULL); | |
4c0f7679 JH |
358 | /* Early inliner might get called at WPA stage when IPA pass adds new |
359 | function. In this case we can not really do any of early inlining | |
360 | because function bodies are missing. */ | |
960bfb69 | 361 | if (!gimple_has_body_p (callee->symbol.decl)) |
4c0f7679 JH |
362 | { |
363 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
9de21a23 JC |
364 | return false; |
365 | } | |
4c0f7679 JH |
366 | /* In early inliner some of callees may not be in SSA form yet |
367 | (i.e. the callgraph is cyclic and we did not process | |
368 | the callee by early inliner, yet). We don't have CIF code for this | |
369 | case; later we will re-do the decision in the real inliner. */ | |
960bfb69 JH |
370 | if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->symbol.decl)) |
371 | || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->symbol.decl))) | |
f27e50db | 372 | { |
4c0f7679 JH |
373 | if (dump_file) |
374 | fprintf (dump_file, " edge not inlinable: not in SSA form\n"); | |
f27e50db JH |
375 | return false; |
376 | } | |
4c0f7679 JH |
377 | if (!can_inline_edge_p (e, true)) |
378 | return false; | |
379 | return true; | |
380 | } | |
381 | ||
382 | ||
383 | /* Return true when N is leaf function. Accept cheap builtins | |
384 | in leaf functions. */ | |
385 | ||
386 | static bool | |
387 | leaf_node_p (struct cgraph_node *n) | |
388 | { | |
389 | struct cgraph_edge *e; | |
390 | for (e = n->callees; e; e = e->next_callee) | |
960bfb69 | 391 | if (!is_inexpensive_builtin (e->callee->symbol.decl)) |
4c0f7679 JH |
392 | return false; |
393 | return true; | |
394 | } | |
395 | ||
f27e50db | 396 | |
4c0f7679 | 397 | /* Return true if we are interested in inlining small function. */ |
9de21a23 | 398 | |
4c0f7679 JH |
399 | static bool |
400 | want_early_inline_function_p (struct cgraph_edge *e) | |
401 | { | |
402 | bool want_inline = true; | |
a5b1779f | 403 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
4c0f7679 | 404 | |
960bfb69 | 405 | if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)) |
4c0f7679 | 406 | ; |
960bfb69 | 407 | else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl) |
4c0f7679 JH |
408 | && !flag_inline_small_functions) |
409 | { | |
410 | e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; | |
411 | report_inline_failed_reason (e); | |
412 | want_inline = false; | |
413 | } | |
414 | else | |
9de21a23 | 415 | { |
4c0f7679 JH |
416 | int growth = estimate_edge_growth (e); |
417 | if (growth <= 0) | |
418 | ; | |
419 | else if (!cgraph_maybe_hot_edge_p (e) | |
420 | && growth > 0) | |
421 | { | |
422 | if (dump_file) | |
423 | fprintf (dump_file, " will not early inline: %s/%i->%s/%i, " | |
424 | "call is cold and code would grow by %i\n", | |
036c0102 UB |
425 | xstrdup (cgraph_node_name (e->caller)), e->caller->uid, |
426 | xstrdup (cgraph_node_name (callee)), callee->uid, | |
4c0f7679 JH |
427 | growth); |
428 | want_inline = false; | |
429 | } | |
a5b1779f | 430 | else if (!leaf_node_p (callee) |
4c0f7679 | 431 | && growth > 0) |
9de21a23 | 432 | { |
4c0f7679 JH |
433 | if (dump_file) |
434 | fprintf (dump_file, " will not early inline: %s/%i->%s/%i, " | |
435 | "callee is not leaf and code would grow by %i\n", | |
036c0102 UB |
436 | xstrdup (cgraph_node_name (e->caller)), e->caller->uid, |
437 | xstrdup (cgraph_node_name (callee)), callee->uid, | |
4c0f7679 JH |
438 | growth); |
439 | want_inline = false; | |
9de21a23 | 440 | } |
4c0f7679 JH |
441 | else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)) |
442 | { | |
443 | if (dump_file) | |
444 | fprintf (dump_file, " will not early inline: %s/%i->%s/%i, " | |
445 | "growth %i exceeds --param early-inlining-insns\n", | |
036c0102 UB |
446 | xstrdup (cgraph_node_name (e->caller)), e->caller->uid, |
447 | xstrdup (cgraph_node_name (callee)), callee->uid, | |
4c0f7679 JH |
448 | growth); |
449 | want_inline = false; | |
450 | } | |
451 | } | |
452 | return want_inline; | |
453 | } | |
454 | ||
455 | /* Return true if we are interested in inlining small function. | |
456 | When REPORT is true, report reason to dump file. */ | |
457 | ||
458 | static bool | |
459 | want_inline_small_function_p (struct cgraph_edge *e, bool report) | |
460 | { | |
461 | bool want_inline = true; | |
a5b1779f | 462 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
4c0f7679 | 463 | |
960bfb69 | 464 | if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)) |
4c0f7679 | 465 | ; |
960bfb69 | 466 | else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl) |
4c0f7679 JH |
467 | && !flag_inline_small_functions) |
468 | { | |
469 | e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; | |
470 | want_inline = false; | |
9de21a23 | 471 | } |
ca31b95f | 472 | else |
9de21a23 | 473 | { |
4c0f7679 JH |
474 | int growth = estimate_edge_growth (e); |
475 | ||
476 | if (growth <= 0) | |
477 | ; | |
960bfb69 | 478 | else if (DECL_DECLARED_INLINE_P (callee->symbol.decl) |
4c0f7679 JH |
479 | && growth >= MAX_INLINE_INSNS_SINGLE) |
480 | { | |
481 | e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT; | |
482 | want_inline = false; | |
483 | } | |
db22a743 JH |
484 | /* Before giving up based on fact that caller size will grow, allow |
485 | functions that are called few times and eliminating the offline | |
486 | copy will lead to overall code size reduction. | |
487 | Not all of these will be handled by subsequent inlining of functions | |
488 | called once: in particular weak functions are not handled or funcitons | |
489 | that inline to multiple calls but a lot of bodies is optimized out. | |
490 | Finally we want to inline earlier to allow inlining of callbacks. | |
491 | ||
492 | This is slightly wrong on aggressive side: it is entirely possible | |
493 | that function is called many times with a context where inlining | |
494 | reduces code size and few times with a context where inlining increase | |
495 | code size. Resoluting growth estimate will be negative even if it | |
496 | would make more sense to keep offline copy and do not inline into the | |
497 | call sites that makes the code size grow. | |
498 | ||
499 | When badness orders the calls in a way that code reducing calls come | |
500 | first, this situation is not a problem at all: after inlining all | |
501 | "good" calls, we will realize that keeping the function around is | |
502 | better. */ | |
503 | else if (growth <= MAX_INLINE_INSNS_SINGLE | |
504 | /* Unlike for functions called once, we play unsafe with | |
505 | COMDATs. We can allow that since we know functions | |
506 | in consideration are small (and thus risk is small) and | |
507 | moreover grow estimates already accounts that COMDAT | |
508 | functions may or may not disappear when eliminated from | |
509 | current unit. With good probability making aggressive | |
510 | choice in all units is going to make overall program | |
511 | smaller. | |
512 | ||
513 | Consequently we ask cgraph_can_remove_if_no_direct_calls_p | |
514 | instead of | |
515 | cgraph_will_be_removed_from_program_if_no_direct_calls */ | |
960bfb69 | 516 | && !DECL_EXTERNAL (callee->symbol.decl) |
db22a743 JH |
517 | && cgraph_can_remove_if_no_direct_calls_p (callee) |
518 | && estimate_growth (callee) <= 0) | |
519 | ; | |
960bfb69 | 520 | else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl) |
4c0f7679 JH |
521 | && !flag_inline_functions) |
522 | { | |
523 | e->inline_failed = CIF_NOT_DECLARED_INLINED; | |
524 | want_inline = false; | |
525 | } | |
960bfb69 | 526 | else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl) |
4c0f7679 JH |
527 | && growth >= MAX_INLINE_INSNS_AUTO) |
528 | { | |
529 | e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT; | |
530 | want_inline = false; | |
531 | } | |
db22a743 JH |
532 | /* If call is cold, do not inline when function body would grow. */ |
533 | else if (!cgraph_maybe_hot_edge_p (e)) | |
9de21a23 | 534 | { |
4c0f7679 JH |
535 | e->inline_failed = CIF_UNLIKELY_CALL; |
536 | want_inline = false; | |
9de21a23 JC |
537 | } |
538 | } | |
4c0f7679 JH |
539 | if (!want_inline && report) |
540 | report_inline_failed_reason (e); | |
541 | return want_inline; | |
542 | } | |
9de21a23 | 543 | |
4c0f7679 JH |
544 | /* EDGE is self recursive edge. |
545 | We hand two cases - when function A is inlining into itself | |
546 | or when function A is being inlined into another inliner copy of function | |
547 | A within function B. | |
548 | ||
549 | In first case OUTER_NODE points to the toplevel copy of A, while | |
550 | in the second case OUTER_NODE points to the outermost copy of A in B. | |
551 | ||
552 | In both cases we want to be extra selective since | |
553 | inlining the call will just introduce new recursive calls to appear. */ | |
09a2806f | 554 | |
4c0f7679 JH |
555 | static bool |
556 | want_inline_self_recursive_call_p (struct cgraph_edge *edge, | |
557 | struct cgraph_node *outer_node, | |
558 | bool peeling, | |
559 | int depth) | |
560 | { | |
561 | char const *reason = NULL; | |
562 | bool want_inline = true; | |
563 | int caller_freq = CGRAPH_FREQ_BASE; | |
564 | int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO); | |
565 | ||
960bfb69 | 566 | if (DECL_DECLARED_INLINE_P (edge->caller->symbol.decl)) |
4c0f7679 JH |
567 | max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH); |
568 | ||
569 | if (!cgraph_maybe_hot_edge_p (edge)) | |
570 | { | |
571 | reason = "recursive call is cold"; | |
572 | want_inline = false; | |
573 | } | |
574 | else if (max_count && !outer_node->count) | |
575 | { | |
576 | reason = "not executed in profile"; | |
577 | want_inline = false; | |
578 | } | |
579 | else if (depth > max_depth) | |
580 | { | |
581 | reason = "--param max-inline-recursive-depth exceeded."; | |
582 | want_inline = false; | |
583 | } | |
584 | ||
585 | if (outer_node->global.inlined_to) | |
586 | caller_freq = outer_node->callers->frequency; | |
587 | ||
588 | if (!want_inline) | |
589 | ; | |
590 | /* Inlining of self recursive function into copy of itself within other function | |
591 | is transformation similar to loop peeling. | |
592 | ||
09a2806f | 593 | Peeling is profitable if we can inline enough copies to make probability |
4c0f7679 JH |
594 | of actual call to the self recursive function very small. Be sure that |
595 | the probability of recursion is small. | |
596 | ||
09a2806f JH |
597 | We ensure that the frequency of recursing is at most 1 - (1/max_depth). |
598 | This way the expected number of recision is at most max_depth. */ | |
4c0f7679 JH |
599 | else if (peeling) |
600 | { | |
601 | int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1) | |
602 | / max_depth); | |
603 | int i; | |
604 | for (i = 1; i < depth; i++) | |
605 | max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE; | |
606 | if (max_count | |
607 | && (edge->count * CGRAPH_FREQ_BASE / outer_node->count | |
608 | >= max_prob)) | |
609 | { | |
610 | reason = "profile of recursive call is too large"; | |
611 | want_inline = false; | |
612 | } | |
613 | if (!max_count | |
614 | && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq | |
615 | >= max_prob)) | |
616 | { | |
617 | reason = "frequency of recursive call is too large"; | |
618 | want_inline = false; | |
619 | } | |
620 | } | |
09a2806f | 621 | /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion |
4c0f7679 JH |
622 | depth is large. We reduce function call overhead and increase chances that |
623 | things fit in hardware return predictor. | |
624 | ||
625 | Recursive inlining might however increase cost of stack frame setup | |
626 | actually slowing down functions whose recursion tree is wide rather than | |
627 | deep. | |
628 | ||
09a2806f | 629 | Deciding reliably on when to do recursive inlining without profile feedback |
4c0f7679 JH |
630 | is tricky. For now we disable recursive inlining when probability of self |
631 | recursion is low. | |
632 | ||
633 | Recursive inlining of self recursive call within loop also results in large loop | |
634 | depths that generally optimize badly. We may want to throttle down inlining | |
635 | in those cases. In particular this seems to happen in one of libstdc++ rb tree | |
636 | methods. */ | |
637 | else | |
638 | { | |
639 | if (max_count | |
640 | && (edge->count * 100 / outer_node->count | |
641 | <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))) | |
642 | { | |
643 | reason = "profile of recursive call is too small"; | |
644 | want_inline = false; | |
645 | } | |
646 | else if (!max_count | |
647 | && (edge->frequency * 100 / caller_freq | |
648 | <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))) | |
649 | { | |
650 | reason = "frequency of recursive call is too small"; | |
651 | want_inline = false; | |
652 | } | |
653 | } | |
654 | if (!want_inline && dump_file) | |
655 | fprintf (dump_file, " not inlining recursively: %s\n", reason); | |
656 | return want_inline; | |
ca31b95f JH |
657 | } |
658 | ||
9aa3f5c5 JH |
659 | /* Return true when NODE has caller other than EDGE. |
660 | Worker for cgraph_for_node_and_aliases. */ | |
661 | ||
662 | static bool | |
663 | check_caller_edge (struct cgraph_node *node, void *edge) | |
664 | { | |
665 | return (node->callers | |
666 | && node->callers != edge); | |
667 | } | |
668 | ||
09a2806f JH |
669 | |
670 | /* Decide if NODE is called once inlining it would eliminate need | |
671 | for the offline copy of function. */ | |
672 | ||
673 | static bool | |
674 | want_inline_function_called_once_p (struct cgraph_node *node) | |
675 | { | |
9aa3f5c5 | 676 | struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL); |
09a2806f | 677 | /* Already inlined? */ |
9aa3f5c5 | 678 | if (function->global.inlined_to) |
09a2806f JH |
679 | return false; |
680 | /* Zero or more then one callers? */ | |
681 | if (!node->callers | |
682 | || node->callers->next_caller) | |
683 | return false; | |
9aa3f5c5 JH |
684 | /* Maybe other aliases has more direct calls. */ |
685 | if (cgraph_for_node_and_aliases (node, check_caller_edge, node->callers, true)) | |
686 | return false; | |
09a2806f | 687 | /* Recursive call makes no sense to inline. */ |
9aa3f5c5 | 688 | if (cgraph_edge_recursive_p (node->callers)) |
09a2806f JH |
689 | return false; |
690 | /* External functions are not really in the unit, so inlining | |
691 | them when called once would just increase the program size. */ | |
960bfb69 | 692 | if (DECL_EXTERNAL (function->symbol.decl)) |
09a2806f JH |
693 | return false; |
694 | /* Offline body must be optimized out. */ | |
9aa3f5c5 | 695 | if (!cgraph_will_be_removed_from_program_if_no_direct_calls (function)) |
09a2806f JH |
696 | return false; |
697 | if (!can_inline_edge_p (node->callers, true)) | |
698 | return false; | |
699 | return true; | |
700 | } | |
701 | ||
b4c0a884 JH |
702 | |
703 | /* Return relative time improvement for inlining EDGE in range | |
704 | 1...2^9. */ | |
705 | ||
706 | static inline int | |
707 | relative_time_benefit (struct inline_summary *callee_info, | |
708 | struct cgraph_edge *edge, | |
709 | int time_growth) | |
710 | { | |
711 | int relbenefit; | |
712 | gcov_type uninlined_call_time; | |
713 | ||
714 | uninlined_call_time = | |
715 | ((gcov_type) | |
716 | (callee_info->time | |
b439486d JH |
717 | + inline_edge_summary (edge)->call_stmt_time) * edge->frequency |
718 | + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE; | |
b4c0a884 JH |
719 | /* Compute relative time benefit, i.e. how much the call becomes faster. |
720 | ??? perhaps computing how much the caller+calle together become faster | |
721 | would lead to more realistic results. */ | |
722 | if (!uninlined_call_time) | |
723 | uninlined_call_time = 1; | |
724 | relbenefit = | |
725 | (uninlined_call_time - time_growth) * 256 / (uninlined_call_time); | |
726 | relbenefit = MIN (relbenefit, 512); | |
727 | relbenefit = MAX (relbenefit, 1); | |
728 | return relbenefit; | |
729 | } | |
730 | ||
731 | ||
670cd5c5 JH |
732 | /* A cost model driving the inlining heuristics in a way so the edges with |
733 | smallest badness are inlined first. After each inlining is performed | |
0fa2e4df | 734 | the costs of all caller edges of nodes affected are recomputed so the |
670cd5c5 | 735 | metrics may accurately depend on values such as number of inlinable callers |
45a80bb9 | 736 | of the function or function body size. */ |
670cd5c5 JH |
737 | |
738 | static int | |
4c0f7679 | 739 | edge_badness (struct cgraph_edge *edge, bool dump) |
670cd5c5 | 740 | { |
85057983 | 741 | gcov_type badness; |
632b4f8e | 742 | int growth, time_growth; |
a5b1779f JH |
743 | struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee, |
744 | NULL); | |
745 | struct inline_summary *callee_info = inline_summary (callee); | |
45a80bb9 | 746 | |
960bfb69 | 747 | if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)) |
1aa14195 JH |
748 | return INT_MIN; |
749 | ||
03dfc36d | 750 | growth = estimate_edge_growth (edge); |
632b4f8e | 751 | time_growth = estimate_edge_time (edge); |
e89964e3 | 752 | |
1ce18dc8 JH |
753 | if (dump) |
754 | { | |
61502ca8 | 755 | fprintf (dump_file, " Badness calculation for %s -> %s\n", |
036c0102 UB |
756 | xstrdup (cgraph_node_name (edge->caller)), |
757 | xstrdup (cgraph_node_name (callee))); | |
b4c0a884 | 758 | fprintf (dump_file, " size growth %i, time growth %i\n", |
1ce18dc8 | 759 | growth, |
632b4f8e | 760 | time_growth); |
1ce18dc8 | 761 | } |
45a80bb9 JH |
762 | |
763 | /* Always prefer inlining saving code size. */ | |
764 | if (growth <= 0) | |
1ce18dc8 | 765 | { |
b4c0a884 | 766 | badness = INT_MIN / 2 + growth; |
1ce18dc8 | 767 | if (dump) |
b4c0a884 | 768 | fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness, |
1ce18dc8 JH |
769 | growth); |
770 | } | |
45a80bb9 | 771 | |
b4c0a884 JH |
772 | /* When profiling is available, compute badness as: |
773 | ||
774 | relative_edge_count * relative_time_benefit | |
775 | goodness = ------------------------------------------- | |
776 | edge_growth | |
777 | badness = -goodness | |
778 | ||
073a8998 | 779 | The fraction is upside down, because on edge counts and time beneits |
b4c0a884 JH |
780 | the bounds are known. Edge growth is essentially unlimited. */ |
781 | ||
68e2e141 | 782 | else if (max_count) |
1ce18dc8 | 783 | { |
b4c0a884 | 784 | int relbenefit = relative_time_benefit (callee_info, edge, time_growth); |
1ce18dc8 JH |
785 | badness = |
786 | ((int) | |
b4c0a884 JH |
787 | ((double) edge->count * INT_MIN / 2 / max_count / 512) * |
788 | relative_time_benefit (callee_info, edge, time_growth)) / growth; | |
09a2806f JH |
789 | |
790 | /* Be sure that insanity of the profile won't lead to increasing counts | |
791 | in the scalling and thus to overflow in the computation above. */ | |
792 | gcc_assert (max_count >= edge->count); | |
1ce18dc8 JH |
793 | if (dump) |
794 | { | |
795 | fprintf (dump_file, | |
796 | " %i (relative %f): profile info. Relative count %f" | |
797 | " * Relative benefit %f\n", | |
798 | (int) badness, (double) badness / INT_MIN, | |
799 | (double) edge->count / max_count, | |
b4c0a884 | 800 | relbenefit * 100 / 256.0); |
1ce18dc8 JH |
801 | } |
802 | } | |
45a80bb9 | 803 | |
b4c0a884 | 804 | /* When function local profile is available. Compute badness as: |
45a80bb9 | 805 | |
b4c0a884 JH |
806 | |
807 | growth_of_callee | |
808 | badness = -------------------------------------- + growth_for-all | |
809 | relative_time_benefit * edge_frequency | |
810 | ||
811 | */ | |
45a80bb9 | 812 | else if (flag_guess_branch_prob) |
670cd5c5 | 813 | { |
b4c0a884 | 814 | int div = edge->frequency * (1<<10) / CGRAPH_FREQ_MAX; |
b8698a0f | 815 | |
b4c0a884 JH |
816 | div = MAX (div, 1); |
817 | gcc_checking_assert (edge->frequency <= CGRAPH_FREQ_MAX); | |
818 | div *= relative_time_benefit (callee_info, edge, time_growth); | |
819 | ||
820 | /* frequency is normalized in range 1...2^10. | |
821 | relbenefit in range 1...2^9 | |
822 | DIV should be in range 1....2^19. */ | |
823 | gcc_checking_assert (div >= 1 && div <= (1<<19)); | |
824 | ||
825 | /* Result must be integer in range 0...INT_MAX. | |
826 | Set the base of fixed point calculation so we don't lose much of | |
827 | precision for small bandesses (those are interesting) yet we don't | |
d621a5fb JH |
828 | overflow for growths that are still in interesting range. |
829 | ||
830 | Fixed point arithmetic with point at 8th bit. */ | |
831 | badness = ((gcov_type)growth) * (1<<(19+8)); | |
b4c0a884 JH |
832 | badness = (badness + div / 2) / div; |
833 | ||
834 | /* Overall growth of inlining all calls of function matters: we want to | |
835 | inline so offline copy of function is no longer needed. | |
836 | ||
837 | Additionally functions that can be fully inlined without much of | |
838 | effort are better inline candidates than functions that can be fully | |
839 | inlined only after noticeable overall unit growths. The latter | |
840 | are better in a sense compressing of code size by factoring out common | |
841 | code into separate function shared by multiple code paths. | |
842 | ||
843 | We might mix the valud into the fraction by taking into account | |
844 | relative growth of the unit, but for now just add the number | |
845 | into resulting fraction. */ | |
d621a5fb JH |
846 | if (badness > INT_MAX / 2) |
847 | { | |
848 | badness = INT_MAX / 2; | |
849 | if (dump) | |
850 | fprintf (dump_file, "Badness overflow\n"); | |
851 | } | |
1ce18dc8 JH |
852 | if (dump) |
853 | { | |
854 | fprintf (dump_file, | |
f867f134 | 855 | " %i: guessed profile. frequency %f," |
b4c0a884 | 856 | " benefit %f%%, divisor %i\n", |
f867f134 | 857 | (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE, |
b4c0a884 | 858 | relative_time_benefit (callee_info, edge, time_growth) * 100 / 256.0, div); |
1ce18dc8 | 859 | } |
45a80bb9 JH |
860 | } |
861 | /* When function local profile is not available or it does not give | |
862 | useful information (ie frequency is zero), base the cost on | |
863 | loop nest and overall size growth, so we optimize for overall number | |
864 | of functions fully inlined in program. */ | |
865 | else | |
866 | { | |
898b8927 | 867 | int nest = MIN (inline_edge_summary (edge)->loop_depth, 8); |
a5c3d18c | 868 | badness = growth * 256; |
670cd5c5 | 869 | |
45a80bb9 | 870 | /* Decrease badness if call is nested. */ |
b8698a0f | 871 | if (badness > 0) |
45a80bb9 JH |
872 | badness >>= nest; |
873 | else | |
1ce18dc8 | 874 | { |
45a80bb9 | 875 | badness <<= nest; |
1ce18dc8 JH |
876 | } |
877 | if (dump) | |
878 | fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness, | |
879 | nest); | |
670cd5c5 | 880 | } |
1ce18dc8 JH |
881 | |
882 | /* Ensure that we did not overflow in all the fixed point math above. */ | |
883 | gcc_assert (badness >= INT_MIN); | |
884 | gcc_assert (badness <= INT_MAX - 1); | |
45a80bb9 | 885 | /* Make recursive inlining happen always after other inlining is done. */ |
d7d1d041 | 886 | if (cgraph_edge_recursive_p (edge)) |
45a80bb9 | 887 | return badness + 1; |
670cd5c5 | 888 | else |
45a80bb9 | 889 | return badness; |
670cd5c5 JH |
890 | } |
891 | ||
9b8051b4 | 892 | /* Recompute badness of EDGE and update its key in HEAP if needed. */ |
4c0f7679 | 893 | static inline void |
9b8051b4 JH |
894 | update_edge_key (fibheap_t heap, struct cgraph_edge *edge) |
895 | { | |
4c0f7679 | 896 | int badness = edge_badness (edge, false); |
9b8051b4 JH |
897 | if (edge->aux) |
898 | { | |
899 | fibnode_t n = (fibnode_t) edge->aux; | |
900 | gcc_checking_assert (n->data == edge); | |
901 | ||
902 | /* fibheap_replace_key only decrease the keys. | |
903 | When we increase the key we do not update heap | |
904 | and instead re-insert the element once it becomes | |
61502ca8 | 905 | a minimum of heap. */ |
9b8051b4 JH |
906 | if (badness < n->key) |
907 | { | |
4c0f7679 JH |
908 | if (dump_file && (dump_flags & TDF_DETAILS)) |
909 | { | |
910 | fprintf (dump_file, | |
911 | " decreasing badness %s/%i -> %s/%i, %i to %i\n", | |
036c0102 UB |
912 | xstrdup (cgraph_node_name (edge->caller)), |
913 | edge->caller->uid, | |
914 | xstrdup (cgraph_node_name (edge->callee)), | |
915 | edge->callee->uid, | |
4c0f7679 JH |
916 | (int)n->key, |
917 | badness); | |
918 | } | |
b4c0a884 | 919 | fibheap_replace_key (heap, n, badness); |
9b8051b4 JH |
920 | gcc_checking_assert (n->key == badness); |
921 | } | |
922 | } | |
923 | else | |
4c0f7679 JH |
924 | { |
925 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
926 | { | |
927 | fprintf (dump_file, | |
928 | " enqueuing call %s/%i -> %s/%i, badness %i\n", | |
036c0102 UB |
929 | xstrdup (cgraph_node_name (edge->caller)), |
930 | edge->caller->uid, | |
931 | xstrdup (cgraph_node_name (edge->callee)), | |
932 | edge->callee->uid, | |
4c0f7679 JH |
933 | badness); |
934 | } | |
935 | edge->aux = fibheap_insert (heap, badness, edge); | |
936 | } | |
9b8051b4 JH |
937 | } |
938 | ||
40fda55b JH |
939 | |
940 | /* NODE was inlined. | |
941 | All caller edges needs to be resetted because | |
942 | size estimates change. Similarly callees needs reset | |
943 | because better context may be known. */ | |
944 | ||
945 | static void | |
946 | reset_edge_caches (struct cgraph_node *node) | |
947 | { | |
948 | struct cgraph_edge *edge; | |
949 | struct cgraph_edge *e = node->callees; | |
950 | struct cgraph_node *where = node; | |
317a0646 JH |
951 | int i; |
952 | struct ipa_ref *ref; | |
40fda55b JH |
953 | |
954 | if (where->global.inlined_to) | |
955 | where = where->global.inlined_to; | |
956 | ||
957 | /* WHERE body size has changed, the cached growth is invalid. */ | |
958 | reset_node_growth_cache (where); | |
959 | ||
960 | for (edge = where->callers; edge; edge = edge->next_caller) | |
961 | if (edge->inline_failed) | |
962 | reset_edge_growth_cache (edge); | |
5932a4d4 | 963 | for (i = 0; ipa_ref_list_referring_iterate (&where->symbol.ref_list, |
960bfb69 | 964 | i, ref); i++) |
317a0646 | 965 | if (ref->use == IPA_REF_ALIAS) |
5932a4d4 | 966 | reset_edge_caches (ipa_ref_referring_node (ref)); |
40fda55b JH |
967 | |
968 | if (!e) | |
969 | return; | |
970 | ||
971 | while (true) | |
972 | if (!e->inline_failed && e->callee->callees) | |
973 | e = e->callee->callees; | |
974 | else | |
975 | { | |
976 | if (e->inline_failed) | |
977 | reset_edge_growth_cache (e); | |
978 | if (e->next_callee) | |
979 | e = e->next_callee; | |
980 | else | |
981 | { | |
982 | do | |
983 | { | |
984 | if (e->caller == node) | |
985 | return; | |
986 | e = e->caller->callers; | |
987 | } | |
988 | while (!e->next_callee); | |
989 | e = e->next_callee; | |
990 | } | |
991 | } | |
992 | } | |
993 | ||
994 | /* Recompute HEAP nodes for each of caller of NODE. | |
995 | UPDATED_NODES track nodes we already visited, to avoid redundant work. | |
996 | When CHECK_INLINABLITY_FOR is set, re-check for specified edge that | |
997 | it is inlinable. Otherwise check all edges. */ | |
670cd5c5 JH |
998 | |
999 | static void | |
1000 | update_caller_keys (fibheap_t heap, struct cgraph_node *node, | |
40fda55b JH |
1001 | bitmap updated_nodes, |
1002 | struct cgraph_edge *check_inlinablity_for) | |
670cd5c5 JH |
1003 | { |
1004 | struct cgraph_edge *edge; | |
39e2db00 JH |
1005 | int i; |
1006 | struct ipa_ref *ref; | |
670cd5c5 | 1007 | |
317a0646 | 1008 | if ((!node->alias && !inline_summary (node)->inlinable) |
f27e50db | 1009 | || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE |
670cd5c5 JH |
1010 | || node->global.inlined_to) |
1011 | return; | |
fcaa4ca4 | 1012 | if (!bitmap_set_bit (updated_nodes, node->uid)) |
670cd5c5 | 1013 | return; |
670cd5c5 | 1014 | |
5932a4d4 | 1015 | for (i = 0; ipa_ref_list_referring_iterate (&node->symbol.ref_list, |
960bfb69 | 1016 | i, ref); i++) |
39e2db00 JH |
1017 | if (ref->use == IPA_REF_ALIAS) |
1018 | { | |
5932a4d4 | 1019 | struct cgraph_node *alias = ipa_ref_referring_node (ref); |
39e2db00 JH |
1020 | update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for); |
1021 | } | |
1022 | ||
cdc029b9 | 1023 | for (edge = node->callers; edge; edge = edge->next_caller) |
4c0f7679 JH |
1024 | if (edge->inline_failed) |
1025 | { | |
40fda55b JH |
1026 | if (!check_inlinablity_for |
1027 | || check_inlinablity_for == edge) | |
f10d1a74 | 1028 | { |
40fda55b JH |
1029 | if (can_inline_edge_p (edge, false) |
1030 | && want_inline_small_function_p (edge, false)) | |
1031 | update_edge_key (heap, edge); | |
1032 | else if (edge->aux) | |
1033 | { | |
1034 | report_inline_failed_reason (edge); | |
1035 | fibheap_delete_node (heap, (fibnode_t) edge->aux); | |
1036 | edge->aux = NULL; | |
1037 | } | |
f10d1a74 | 1038 | } |
40fda55b JH |
1039 | else if (edge->aux) |
1040 | update_edge_key (heap, edge); | |
4c0f7679 | 1041 | } |
9b8051b4 JH |
1042 | } |
1043 | ||
40fda55b | 1044 | /* Recompute HEAP nodes for each uninlined call in NODE. |
9b8051b4 JH |
1045 | This is used when we know that edge badnesses are going only to increase |
1046 | (we introduced new call site) and thus all we need is to insert newly | |
1047 | created edges into heap. */ | |
1048 | ||
1049 | static void | |
1050 | update_callee_keys (fibheap_t heap, struct cgraph_node *node, | |
1051 | bitmap updated_nodes) | |
1052 | { | |
1053 | struct cgraph_edge *e = node->callees; | |
09a2806f | 1054 | |
9b8051b4 JH |
1055 | if (!e) |
1056 | return; | |
1057 | while (true) | |
1058 | if (!e->inline_failed && e->callee->callees) | |
1059 | e = e->callee->callees; | |
1060 | else | |
670cd5c5 | 1061 | { |
a5b1779f JH |
1062 | enum availability avail; |
1063 | struct cgraph_node *callee; | |
58696ce5 JH |
1064 | /* We do not reset callee growth cache here. Since we added a new call, |
1065 | growth chould have just increased and consequentely badness metric | |
1066 | don't need updating. */ | |
9b8051b4 | 1067 | if (e->inline_failed |
a5b1779f JH |
1068 | && (callee = cgraph_function_or_thunk_node (e->callee, &avail)) |
1069 | && inline_summary (callee)->inlinable | |
1070 | && cgraph_function_body_availability (callee) >= AVAIL_AVAILABLE | |
1071 | && !bitmap_bit_p (updated_nodes, callee->uid)) | |
670cd5c5 | 1072 | { |
40fda55b JH |
1073 | if (can_inline_edge_p (e, false) |
1074 | && want_inline_small_function_p (e, false)) | |
1075 | update_edge_key (heap, e); | |
1076 | else if (e->aux) | |
1077 | { | |
1078 | report_inline_failed_reason (e); | |
1079 | fibheap_delete_node (heap, (fibnode_t) e->aux); | |
1080 | e->aux = NULL; | |
1081 | } | |
9b8051b4 JH |
1082 | } |
1083 | if (e->next_callee) | |
1084 | e = e->next_callee; | |
1085 | else | |
1086 | { | |
1087 | do | |
1ce18dc8 | 1088 | { |
9b8051b4 JH |
1089 | if (e->caller == node) |
1090 | return; | |
1091 | e = e->caller->callers; | |
1ce18dc8 | 1092 | } |
9b8051b4 JH |
1093 | while (!e->next_callee); |
1094 | e = e->next_callee; | |
670cd5c5 | 1095 | } |
670cd5c5 JH |
1096 | } |
1097 | } | |
1098 | ||
670cd5c5 | 1099 | /* Enqueue all recursive calls from NODE into priority queue depending on |
0fa2e4df | 1100 | how likely we want to recursively inline the call. */ |
670cd5c5 | 1101 | |
ca31b95f JH |
1102 | static void |
1103 | lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where, | |
670cd5c5 | 1104 | fibheap_t heap) |
ca31b95f JH |
1105 | { |
1106 | struct cgraph_edge *e; | |
a5b1779f JH |
1107 | enum availability avail; |
1108 | ||
ca31b95f | 1109 | for (e = where->callees; e; e = e->next_callee) |
a5b1779f JH |
1110 | if (e->callee == node |
1111 | || (cgraph_function_or_thunk_node (e->callee, &avail) == node | |
1112 | && avail > AVAIL_OVERWRITABLE)) | |
ca31b95f | 1113 | { |
c5a4444c | 1114 | /* When profile feedback is available, prioritize by expected number |
09a2806f | 1115 | of calls. */ |
c5a4444c | 1116 | fibheap_insert (heap, |
09a2806f | 1117 | !max_count ? -e->frequency |
c5a4444c JH |
1118 | : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))), |
1119 | e); | |
ca31b95f JH |
1120 | } |
1121 | for (e = where->callees; e; e = e->next_callee) | |
1122 | if (!e->inline_failed) | |
670cd5c5 | 1123 | lookup_recursive_calls (node, e->callee, heap); |
ca31b95f JH |
1124 | } |
1125 | ||
1126 | /* Decide on recursive inlining: in the case function has recursive calls, | |
3e293154 | 1127 | inline until body size reaches given argument. If any new indirect edges |
e56f5f3e JJ |
1128 | are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES |
1129 | is NULL. */ | |
670cd5c5 JH |
1130 | |
1131 | static bool | |
4c0f7679 JH |
1132 | recursive_inlining (struct cgraph_edge *edge, |
1133 | VEC (cgraph_edge_p, heap) **new_edges) | |
ca31b95f JH |
1134 | { |
1135 | int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO); | |
670cd5c5 | 1136 | fibheap_t heap; |
d7d1d041 | 1137 | struct cgraph_node *node; |
ca31b95f | 1138 | struct cgraph_edge *e; |
4c0f7679 | 1139 | struct cgraph_node *master_clone = NULL, *next; |
ca31b95f JH |
1140 | int depth = 0; |
1141 | int n = 0; | |
1142 | ||
d7d1d041 RG |
1143 | node = edge->caller; |
1144 | if (node->global.inlined_to) | |
1145 | node = node->global.inlined_to; | |
1146 | ||
960bfb69 | 1147 | if (DECL_DECLARED_INLINE_P (node->symbol.decl)) |
4c0f7679 | 1148 | limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE); |
ca31b95f JH |
1149 | |
1150 | /* Make sure that function is small enough to be considered for inlining. */ | |
4c0f7679 | 1151 | if (estimate_size_after_inlining (node, edge) >= limit) |
670cd5c5 JH |
1152 | return false; |
1153 | heap = fibheap_new (); | |
1154 | lookup_recursive_calls (node, node, heap); | |
1155 | if (fibheap_empty (heap)) | |
1156 | { | |
1157 | fibheap_delete (heap); | |
1158 | return false; | |
1159 | } | |
ca31b95f JH |
1160 | |
1161 | if (dump_file) | |
b8698a0f | 1162 | fprintf (dump_file, |
670cd5c5 | 1163 | " Performing recursive inlining on %s\n", |
ca31b95f JH |
1164 | cgraph_node_name (node)); |
1165 | ||
ca31b95f | 1166 | /* Do the inlining and update list of recursive call during process. */ |
d7d1d041 | 1167 | while (!fibheap_empty (heap)) |
ca31b95f | 1168 | { |
c5274326 TN |
1169 | struct cgraph_edge *curr |
1170 | = (struct cgraph_edge *) fibheap_extract_min (heap); | |
c5a4444c | 1171 | struct cgraph_node *cnode; |
670cd5c5 | 1172 | |
03dfc36d | 1173 | if (estimate_size_after_inlining (node, curr) > limit) |
d7d1d041 RG |
1174 | break; |
1175 | ||
4c0f7679 JH |
1176 | if (!can_inline_edge_p (curr, true)) |
1177 | continue; | |
1178 | ||
c5a4444c JH |
1179 | depth = 1; |
1180 | for (cnode = curr->caller; | |
1181 | cnode->global.inlined_to; cnode = cnode->callers->caller) | |
960bfb69 JH |
1182 | if (node->symbol.decl |
1183 | == cgraph_function_or_thunk_node (curr->callee, NULL)->symbol.decl) | |
f791d333 | 1184 | depth++; |
c5a4444c | 1185 | |
4c0f7679 JH |
1186 | if (!want_inline_self_recursive_call_p (curr, node, false, depth)) |
1187 | continue; | |
ca31b95f | 1188 | |
670cd5c5 | 1189 | if (dump_file) |
c5a4444c | 1190 | { |
b8698a0f | 1191 | fprintf (dump_file, |
c5a4444c JH |
1192 | " Inlining call of depth %i", depth); |
1193 | if (node->count) | |
1194 | { | |
1195 | fprintf (dump_file, " called approx. %.2f times per call", | |
1196 | (double)curr->count / node->count); | |
1197 | } | |
1198 | fprintf (dump_file, "\n"); | |
1199 | } | |
4c0f7679 JH |
1200 | if (!master_clone) |
1201 | { | |
1202 | /* We need original clone to copy around. */ | |
960bfb69 | 1203 | master_clone = cgraph_clone_node (node, node->symbol.decl, |
898b8927 | 1204 | node->count, CGRAPH_FREQ_BASE, |
74605a11 | 1205 | false, NULL, true); |
4c0f7679 JH |
1206 | for (e = master_clone->callees; e; e = e->next_callee) |
1207 | if (!e->inline_failed) | |
fee8b6da | 1208 | clone_inlined_nodes (e, true, false, NULL); |
4c0f7679 JH |
1209 | } |
1210 | ||
ca31b95f | 1211 | cgraph_redirect_edge_callee (curr, master_clone); |
c170d40f | 1212 | inline_call (curr, false, new_edges, &overall_size, true); |
670cd5c5 | 1213 | lookup_recursive_calls (node, curr->callee, heap); |
ca31b95f JH |
1214 | n++; |
1215 | } | |
4c0f7679 | 1216 | |
c5a4444c JH |
1217 | if (!fibheap_empty (heap) && dump_file) |
1218 | fprintf (dump_file, " Recursive inlining growth limit met.\n"); | |
670cd5c5 | 1219 | fibheap_delete (heap); |
4c0f7679 JH |
1220 | |
1221 | if (!master_clone) | |
1222 | return false; | |
1223 | ||
ca31b95f | 1224 | if (dump_file) |
b8698a0f | 1225 | fprintf (dump_file, |
4c0f7679 JH |
1226 | "\n Inlined %i times, " |
1227 | "body grown from size %i to %i, time %i to %i\n", n, | |
e7f23018 JH |
1228 | inline_summary (master_clone)->size, inline_summary (node)->size, |
1229 | inline_summary (master_clone)->time, inline_summary (node)->time); | |
ca31b95f JH |
1230 | |
1231 | /* Remove master clone we used for inlining. We rely that clones inlined | |
1232 | into master clone gets queued just before master clone so we don't | |
1233 | need recursion. */ | |
2aae7680 | 1234 | for (node = cgraph_first_function (); node != master_clone; |
96fc428c JH |
1235 | node = next) |
1236 | { | |
2aae7680 | 1237 | next = cgraph_next_function (node); |
96fc428c JH |
1238 | if (node->global.inlined_to == master_clone) |
1239 | cgraph_remove_node (node); | |
1240 | } | |
ca31b95f | 1241 | cgraph_remove_node (master_clone); |
4c0f7679 | 1242 | return true; |
ca31b95f JH |
1243 | } |
1244 | ||
09a2806f | 1245 | |
88512ba0 | 1246 | /* Given whole compilation unit estimate of INSNS, compute how large we can |
b7c27d51 | 1247 | allow the unit to grow. */ |
09a2806f | 1248 | |
b7c27d51 JH |
1249 | static int |
1250 | compute_max_insns (int insns) | |
1251 | { | |
1252 | int max_insns = insns; | |
1253 | if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS)) | |
1254 | max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS); | |
1255 | ||
3e2a6e7b JH |
1256 | return ((HOST_WIDEST_INT) max_insns |
1257 | * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100); | |
b7c27d51 JH |
1258 | } |
1259 | ||
09a2806f | 1260 | |
3e293154 | 1261 | /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */ |
09a2806f | 1262 | |
3e293154 MJ |
1263 | static void |
1264 | add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges) | |
1265 | { | |
1266 | while (VEC_length (cgraph_edge_p, new_edges) > 0) | |
1267 | { | |
1268 | struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges); | |
1269 | ||
1270 | gcc_assert (!edge->aux); | |
a5b1779f | 1271 | if (edge->inline_failed |
4c0f7679 JH |
1272 | && can_inline_edge_p (edge, true) |
1273 | && want_inline_small_function_p (edge, true)) | |
1274 | edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge); | |
3e293154 MJ |
1275 | } |
1276 | } | |
1277 | ||
1278 | ||
ca31b95f | 1279 | /* We use greedy algorithm for inlining of small functions: |
09a2806f JH |
1280 | All inline candidates are put into prioritized heap ordered in |
1281 | increasing badness. | |
ca31b95f | 1282 | |
09a2806f | 1283 | The inlining of small functions is bounded by unit growth parameters. */ |
ca31b95f JH |
1284 | |
1285 | static void | |
4c0f7679 | 1286 | inline_small_functions (void) |
ca31b95f JH |
1287 | { |
1288 | struct cgraph_node *node; | |
670cd5c5 | 1289 | struct cgraph_edge *edge; |
0823efed | 1290 | fibheap_t edge_heap = fibheap_new (); |
670cd5c5 | 1291 | bitmap updated_nodes = BITMAP_ALLOC (NULL); |
85057983 | 1292 | int min_size, max_size; |
3e293154 | 1293 | VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL; |
09a2806f | 1294 | int initial_size = 0; |
3e293154 | 1295 | |
661e7330 | 1296 | if (flag_indirect_inlining) |
3e293154 | 1297 | new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8); |
670cd5c5 JH |
1298 | |
1299 | if (dump_file) | |
09a2806f JH |
1300 | fprintf (dump_file, |
1301 | "\nDeciding on inlining of small functions. Starting with size %i.\n", | |
1302 | initial_size); | |
ca31b95f | 1303 | |
1a3118e9 JH |
1304 | /* Compute overall unit size and other global parameters used by badness |
1305 | metrics. */ | |
ca31b95f | 1306 | |
09a2806f | 1307 | max_count = 0; |
632b4f8e | 1308 | initialize_growth_caches (); |
1a3118e9 | 1309 | |
c47d0034 JH |
1310 | FOR_EACH_DEFINED_FUNCTION (node) |
1311 | if (!node->global.inlined_to) | |
e7f23018 | 1312 | { |
a5b1779f JH |
1313 | if (cgraph_function_with_gimple_body_p (node) |
1314 | || node->thunk.thunk_p) | |
1315 | { | |
1316 | struct inline_summary *info = inline_summary (node); | |
ca31b95f | 1317 | |
960bfb69 | 1318 | if (!DECL_EXTERNAL (node->symbol.decl)) |
a5b1779f JH |
1319 | initial_size += info->size; |
1320 | } | |
09a2806f | 1321 | |
e7f23018 | 1322 | for (edge = node->callers; edge; edge = edge->next_caller) |
632b4f8e JH |
1323 | if (max_count < edge->count) |
1324 | max_count = edge->count; | |
e7f23018 | 1325 | } |
b7c27d51 | 1326 | |
8a8dccb2 | 1327 | overall_size = initial_size; |
85057983 JH |
1328 | max_size = compute_max_insns (overall_size); |
1329 | min_size = overall_size; | |
1a3118e9 JH |
1330 | |
1331 | /* Populate the heeap with all edges we might inline. */ | |
1332 | ||
c47d0034 JH |
1333 | FOR_EACH_DEFINED_FUNCTION (node) |
1334 | if (!node->global.inlined_to) | |
1a3118e9 JH |
1335 | { |
1336 | if (dump_file) | |
1337 | fprintf (dump_file, "Enqueueing calls of %s/%i.\n", | |
1338 | cgraph_node_name (node), node->uid); | |
1339 | ||
1340 | for (edge = node->callers; edge; edge = edge->next_caller) | |
1341 | if (edge->inline_failed | |
1342 | && can_inline_edge_p (edge, true) | |
1343 | && want_inline_small_function_p (edge, true) | |
1344 | && edge->inline_failed) | |
1345 | { | |
1346 | gcc_assert (!edge->aux); | |
0823efed | 1347 | update_edge_key (edge_heap, edge); |
1a3118e9 JH |
1348 | } |
1349 | } | |
1350 | ||
09a2806f JH |
1351 | gcc_assert (in_lto_p |
1352 | || !max_count | |
1353 | || (profile_info && flag_branch_probabilities)); | |
b7c27d51 | 1354 | |
0823efed | 1355 | while (!fibheap_empty (edge_heap)) |
ca31b95f | 1356 | { |
85057983 | 1357 | int old_size = overall_size; |
1ce18dc8 | 1358 | struct cgraph_node *where, *callee; |
0823efed | 1359 | int badness = fibheap_min_key (edge_heap); |
cdc029b9 | 1360 | int current_badness; |
8ec802d7 | 1361 | int cached_badness; |
1ce18dc8 | 1362 | int growth; |
670cd5c5 | 1363 | |
0823efed | 1364 | edge = (struct cgraph_edge *) fibheap_extract_min (edge_heap); |
1ce18dc8 JH |
1365 | gcc_assert (edge->aux); |
1366 | edge->aux = NULL; | |
1367 | if (!edge->inline_failed) | |
1368 | continue; | |
cdc029b9 | 1369 | |
8ec802d7 | 1370 | /* Be sure that caches are maintained consistent. |
073a8998 | 1371 | We can not make this ENABLE_CHECKING only because it cause different |
8ec802d7 JH |
1372 | updates of the fibheap queue. */ |
1373 | cached_badness = edge_badness (edge, false); | |
40fda55b JH |
1374 | reset_edge_growth_cache (edge); |
1375 | reset_node_growth_cache (edge->callee); | |
40fda55b | 1376 | |
cdc029b9 | 1377 | /* When updating the edge costs, we only decrease badness in the keys. |
09a2806f JH |
1378 | Increases of badness are handled lazilly; when we see key with out |
1379 | of date value on it, we re-insert it now. */ | |
4c0f7679 | 1380 | current_badness = edge_badness (edge, false); |
8ec802d7 | 1381 | gcc_assert (cached_badness == current_badness); |
cdc029b9 JH |
1382 | gcc_assert (current_badness >= badness); |
1383 | if (current_badness != badness) | |
1384 | { | |
0823efed | 1385 | edge->aux = fibheap_insert (edge_heap, current_badness, edge); |
cdc029b9 JH |
1386 | continue; |
1387 | } | |
4c0f7679 JH |
1388 | |
1389 | if (!can_inline_edge_p (edge, true)) | |
1390 | continue; | |
cdc029b9 | 1391 | |
a5b1779f | 1392 | callee = cgraph_function_or_thunk_node (edge->callee, NULL); |
03dfc36d | 1393 | growth = estimate_edge_growth (edge); |
ca31b95f | 1394 | if (dump_file) |
ca31b95f | 1395 | { |
b8698a0f | 1396 | fprintf (dump_file, |
85057983 | 1397 | "\nConsidering %s with %i size\n", |
a5b1779f JH |
1398 | cgraph_node_name (callee), |
1399 | inline_summary (callee)->size); | |
b8698a0f | 1400 | fprintf (dump_file, |
b0e2d008 | 1401 | " to be inlined into %s in %s:%i\n" |
4c0f7679 | 1402 | " Estimated growth after inlined into all is %+i insns.\n" |
45a80bb9 | 1403 | " Estimated badness is %i, frequency %.2f.\n", |
670cd5c5 | 1404 | cgraph_node_name (edge->caller), |
0115e6c7 JH |
1405 | flag_wpa ? "unknown" |
1406 | : gimple_filename ((const_gimple) edge->call_stmt), | |
4c0f7679 JH |
1407 | flag_wpa ? -1 |
1408 | : gimple_lineno ((const_gimple) edge->call_stmt), | |
a5b1779f | 1409 | estimate_growth (callee), |
1ce18dc8 | 1410 | badness, |
45a80bb9 | 1411 | edge->frequency / (double)CGRAPH_FREQ_BASE); |
670cd5c5 | 1412 | if (edge->count) |
4c0f7679 JH |
1413 | fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", |
1414 | edge->count); | |
1ce18dc8 | 1415 | if (dump_flags & TDF_DETAILS) |
4c0f7679 | 1416 | edge_badness (edge, true); |
ca31b95f JH |
1417 | } |
1418 | ||
4c0f7679 | 1419 | if (overall_size + growth > max_size |
960bfb69 | 1420 | && !DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)) |
670cd5c5 | 1421 | { |
4c0f7679 JH |
1422 | edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT; |
1423 | report_inline_failed_reason (edge); | |
670cd5c5 JH |
1424 | continue; |
1425 | } | |
4c0f7679 JH |
1426 | |
1427 | if (!want_inline_small_function_p (edge, true)) | |
09a2806f JH |
1428 | continue; |
1429 | ||
1430 | /* Heuristics for inlining small functions works poorly for | |
1431 | recursive calls where we do efect similar to loop unrolling. | |
1432 | When inliing such edge seems profitable, leave decision on | |
1433 | specific inliner. */ | |
d7d1d041 | 1434 | if (cgraph_edge_recursive_p (edge)) |
670cd5c5 JH |
1435 | { |
1436 | where = edge->caller; | |
1437 | if (where->global.inlined_to) | |
1438 | where = where->global.inlined_to; | |
4c0f7679 JH |
1439 | if (!recursive_inlining (edge, |
1440 | flag_indirect_inlining | |
1441 | ? &new_indirect_edges : NULL)) | |
d7d1d041 RG |
1442 | { |
1443 | edge->inline_failed = CIF_RECURSIVE_INLINING; | |
1444 | continue; | |
1445 | } | |
40fda55b | 1446 | reset_edge_caches (where); |
09a2806f JH |
1447 | /* Recursive inliner inlines all recursive calls of the function |
1448 | at once. Consequently we need to update all callee keys. */ | |
661e7330 | 1449 | if (flag_indirect_inlining) |
0823efed DN |
1450 | add_new_edges_to_heap (edge_heap, new_indirect_edges); |
1451 | update_callee_keys (edge_heap, where, updated_nodes); | |
670cd5c5 JH |
1452 | } |
1453 | else | |
1454 | { | |
4c0f7679 JH |
1455 | struct cgraph_node *outer_node = NULL; |
1456 | int depth = 0; | |
1457 | ||
1458 | /* Consider the case where self recursive function A is inlined into B. | |
1459 | This is desired optimization in some cases, since it leads to effect | |
1460 | similar of loop peeling and we might completely optimize out the | |
1461 | recursive call. However we must be extra selective. */ | |
1462 | ||
1463 | where = edge->caller; | |
1464 | while (where->global.inlined_to) | |
670cd5c5 | 1465 | { |
960bfb69 | 1466 | if (where->symbol.decl == callee->symbol.decl) |
4c0f7679 JH |
1467 | outer_node = where, depth++; |
1468 | where = where->callers->caller; | |
1469 | } | |
1470 | if (outer_node | |
1471 | && !want_inline_self_recursive_call_p (edge, outer_node, | |
1472 | true, depth)) | |
1473 | { | |
1474 | edge->inline_failed | |
960bfb69 | 1475 | = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->symbol.decl) |
4c0f7679 | 1476 | ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED); |
670cd5c5 JH |
1477 | continue; |
1478 | } | |
4c0f7679 JH |
1479 | else if (depth && dump_file) |
1480 | fprintf (dump_file, " Peeling recursion with depth %i\n", depth); | |
1481 | ||
9b8051b4 | 1482 | gcc_checking_assert (!callee->global.inlined_to); |
c170d40f | 1483 | inline_call (edge, true, &new_indirect_edges, &overall_size, true); |
661e7330 | 1484 | if (flag_indirect_inlining) |
0823efed | 1485 | add_new_edges_to_heap (edge_heap, new_indirect_edges); |
f8e2a1ed | 1486 | |
40fda55b JH |
1487 | reset_edge_caches (edge->callee); |
1488 | reset_node_growth_cache (callee); | |
1489 | ||
0823efed | 1490 | update_callee_keys (edge_heap, edge->callee, updated_nodes); |
670cd5c5 JH |
1491 | } |
1492 | where = edge->caller; | |
1493 | if (where->global.inlined_to) | |
1494 | where = where->global.inlined_to; | |
1495 | ||
1496 | /* Our profitability metric can depend on local properties | |
1497 | such as number of inlinable calls and size of the function body. | |
1498 | After inlining these properties might change for the function we | |
1499 | inlined into (since it's body size changed) and for the functions | |
1500 | called by function we inlined (since number of it inlinable callers | |
1501 | might change). */ | |
0823efed | 1502 | update_caller_keys (edge_heap, where, updated_nodes, NULL); |
670cd5c5 | 1503 | bitmap_clear (updated_nodes); |
ca31b95f | 1504 | |
670cd5c5 | 1505 | if (dump_file) |
50fe876d | 1506 | { |
b8698a0f | 1507 | fprintf (dump_file, |
2f2935b6 | 1508 | " Inlined into %s which now has time %i and size %i," |
85057983 | 1509 | "net change of %+i.\n", |
50fe876d | 1510 | cgraph_node_name (edge->caller), |
e7f23018 JH |
1511 | inline_summary (edge->caller)->time, |
1512 | inline_summary (edge->caller)->size, | |
85057983 | 1513 | overall_size - old_size); |
50fe876d | 1514 | } |
85057983 | 1515 | if (min_size > overall_size) |
b7c27d51 | 1516 | { |
85057983 JH |
1517 | min_size = overall_size; |
1518 | max_size = compute_max_insns (min_size); | |
b7c27d51 JH |
1519 | |
1520 | if (dump_file) | |
85057983 | 1521 | fprintf (dump_file, "New minimal size reached: %i\n", min_size); |
b7c27d51 | 1522 | } |
ca31b95f | 1523 | } |
3e293154 | 1524 | |
632b4f8e | 1525 | free_growth_caches (); |
3e293154 MJ |
1526 | if (new_indirect_edges) |
1527 | VEC_free (cgraph_edge_p, heap, new_indirect_edges); | |
0823efed | 1528 | fibheap_delete (edge_heap); |
09a2806f JH |
1529 | if (dump_file) |
1530 | fprintf (dump_file, | |
1531 | "Unit growth for small function inlining: %i->%i (%i%%)\n", | |
632b4f8e JH |
1532 | initial_size, overall_size, |
1533 | initial_size ? overall_size * 100 / (initial_size) - 100: 0); | |
670cd5c5 | 1534 | BITMAP_FREE (updated_nodes); |
ca31b95f JH |
1535 | } |
1536 | ||
09a2806f JH |
1537 | /* Flatten NODE. Performed both during early inlining and |
1538 | at IPA inlining time. */ | |
af961c7f RG |
1539 | |
1540 | static void | |
632b4f8e | 1541 | flatten_function (struct cgraph_node *node, bool early) |
af961c7f RG |
1542 | { |
1543 | struct cgraph_edge *e; | |
1544 | ||
1545 | /* We shouldn't be called recursively when we are being processed. */ | |
960bfb69 | 1546 | gcc_assert (node->symbol.aux == NULL); |
af961c7f | 1547 | |
960bfb69 | 1548 | node->symbol.aux = (void *) node; |
af961c7f RG |
1549 | |
1550 | for (e = node->callees; e; e = e->next_callee) | |
1551 | { | |
1552 | struct cgraph_node *orig_callee; | |
a5b1779f | 1553 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
af961c7f | 1554 | |
af961c7f | 1555 | /* We've hit cycle? It is time to give up. */ |
960bfb69 | 1556 | if (callee->symbol.aux) |
af961c7f RG |
1557 | { |
1558 | if (dump_file) | |
1559 | fprintf (dump_file, | |
1560 | "Not inlining %s into %s to avoid cycle.\n", | |
036c0102 UB |
1561 | xstrdup (cgraph_node_name (callee)), |
1562 | xstrdup (cgraph_node_name (e->caller))); | |
af961c7f RG |
1563 | e->inline_failed = CIF_RECURSIVE_INLINING; |
1564 | continue; | |
1565 | } | |
1566 | ||
1567 | /* When the edge is already inlined, we just need to recurse into | |
1568 | it in order to fully flatten the leaves. */ | |
1569 | if (!e->inline_failed) | |
1570 | { | |
a5b1779f | 1571 | flatten_function (callee, early); |
af961c7f RG |
1572 | continue; |
1573 | } | |
1574 | ||
4c0f7679 JH |
1575 | /* Flatten attribute needs to be processed during late inlining. For |
1576 | extra code quality we however do flattening during early optimization, | |
1577 | too. */ | |
632b4f8e | 1578 | if (!early |
4c0f7679 JH |
1579 | ? !can_inline_edge_p (e, true) |
1580 | : !can_early_inline_edge_p (e)) | |
1581 | continue; | |
1582 | ||
d7d1d041 | 1583 | if (cgraph_edge_recursive_p (e)) |
af961c7f RG |
1584 | { |
1585 | if (dump_file) | |
1586 | fprintf (dump_file, "Not inlining: recursive call.\n"); | |
1587 | continue; | |
1588 | } | |
1589 | ||
960bfb69 JH |
1590 | if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->symbol.decl)) |
1591 | != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->symbol.decl))) | |
59e0c6b7 RG |
1592 | { |
1593 | if (dump_file) | |
1594 | fprintf (dump_file, "Not inlining: SSA form does not match.\n"); | |
1595 | continue; | |
1596 | } | |
1597 | ||
af961c7f RG |
1598 | /* Inline the edge and flatten the inline clone. Avoid |
1599 | recursing through the original node if the node was cloned. */ | |
1600 | if (dump_file) | |
1601 | fprintf (dump_file, " Inlining %s into %s.\n", | |
036c0102 UB |
1602 | xstrdup (cgraph_node_name (callee)), |
1603 | xstrdup (cgraph_node_name (e->caller))); | |
a5b1779f | 1604 | orig_callee = callee; |
c170d40f | 1605 | inline_call (e, true, NULL, NULL, false); |
af961c7f | 1606 | if (e->callee != orig_callee) |
960bfb69 | 1607 | orig_callee->symbol.aux = (void *) node; |
632b4f8e | 1608 | flatten_function (e->callee, early); |
af961c7f | 1609 | if (e->callee != orig_callee) |
960bfb69 | 1610 | orig_callee->symbol.aux = NULL; |
af961c7f RG |
1611 | } |
1612 | ||
960bfb69 | 1613 | node->symbol.aux = NULL; |
c170d40f JH |
1614 | if (!node->global.inlined_to) |
1615 | inline_update_overall_summary (node); | |
af961c7f RG |
1616 | } |
1617 | ||
ca31b95f JH |
1618 | /* Decide on the inlining. We do so in the topological order to avoid |
1619 | expenses on updating data structures. */ | |
1620 | ||
c2924966 | 1621 | static unsigned int |
4c0f7679 | 1622 | ipa_inline (void) |
ca31b95f JH |
1623 | { |
1624 | struct cgraph_node *node; | |
1625 | int nnodes; | |
1626 | struct cgraph_node **order = | |
5ed6ace5 | 1627 | XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); |
ca31b95f JH |
1628 | int i; |
1629 | ||
5ee53a06 | 1630 | if (in_lto_p && optimize) |
fb3f88cc | 1631 | ipa_update_after_lto_read (); |
0dbca537 | 1632 | |
10a5dd5d JH |
1633 | if (dump_file) |
1634 | dump_inline_summaries (dump_file); | |
670cd5c5 | 1635 | |
af8bca3c | 1636 | nnodes = ipa_reverse_postorder (order); |
ca31b95f | 1637 | |
65c70e6b | 1638 | FOR_EACH_FUNCTION (node) |
960bfb69 | 1639 | node->symbol.aux = 0; |
ca31b95f JH |
1640 | |
1641 | if (dump_file) | |
af961c7f | 1642 | fprintf (dump_file, "\nFlattening functions:\n"); |
ca31b95f | 1643 | |
af961c7f RG |
1644 | /* In the first pass handle functions to be flattened. Do this with |
1645 | a priority so none of our later choices will make this impossible. */ | |
1646 | for (i = nnodes - 1; i >= 0; i--) | |
ca31b95f | 1647 | { |
af961c7f RG |
1648 | node = order[i]; |
1649 | ||
09a2806f | 1650 | /* Handle nodes to be flattened. |
af961c7f RG |
1651 | Ideally when processing callees we stop inlining at the |
1652 | entry of cycles, possibly cloning that entry point and | |
1653 | try to flatten itself turning it into a self-recursive | |
1654 | function. */ | |
1655 | if (lookup_attribute ("flatten", | |
960bfb69 | 1656 | DECL_ATTRIBUTES (node->symbol.decl)) != NULL) |
f8e2a1ed | 1657 | { |
ca31b95f | 1658 | if (dump_file) |
b8698a0f | 1659 | fprintf (dump_file, |
af961c7f | 1660 | "Flattening %s\n", cgraph_node_name (node)); |
632b4f8e | 1661 | flatten_function (node, false); |
ca31b95f | 1662 | } |
ca31b95f JH |
1663 | } |
1664 | ||
4c0f7679 | 1665 | inline_small_functions (); |
04142cc3 | 1666 | symtab_remove_unreachable_nodes (true, dump_file); |
4c0f7679 | 1667 | free (order); |
ca31b95f | 1668 | |
4c0f7679 JH |
1669 | /* We already perform some inlining of functions called once during |
1670 | inlining small functions above. After unreachable nodes are removed, | |
1671 | we still might do a quick check that nothing new is found. */ | |
e90acd93 | 1672 | if (flag_inline_functions_called_once) |
355866de | 1673 | { |
09a2806f | 1674 | int cold; |
ca31b95f JH |
1675 | if (dump_file) |
1676 | fprintf (dump_file, "\nDeciding on functions called once:\n"); | |
1677 | ||
09a2806f JH |
1678 | /* Inlining one function called once has good chance of preventing |
1679 | inlining other function into the same callee. Ideally we should | |
1680 | work in priority order, but probably inlining hot functions first | |
1681 | is good cut without the extra pain of maintaining the queue. | |
1682 | ||
1683 | ??? this is not really fitting the bill perfectly: inlining function | |
1684 | into callee often leads to better optimization of callee due to | |
1685 | increased context for optimization. | |
1686 | For example if main() function calls a function that outputs help | |
1687 | and then function that does the main optmization, we should inline | |
1688 | the second with priority even if both calls are cold by themselves. | |
1689 | ||
1690 | We probably want to implement new predicate replacing our use of | |
1691 | maybe_hot_edge interpreted as maybe_hot_edge || callee is known | |
1692 | to be hot. */ | |
1693 | for (cold = 0; cold <= 1; cold ++) | |
ca31b95f | 1694 | { |
65c70e6b | 1695 | FOR_EACH_DEFINED_FUNCTION (node) |
ca31b95f | 1696 | { |
09a2806f JH |
1697 | if (want_inline_function_called_once_p (node) |
1698 | && (cold | |
1699 | || cgraph_maybe_hot_edge_p (node->callers))) | |
0550e7b7 | 1700 | { |
09a2806f JH |
1701 | struct cgraph_node *caller = node->callers->caller; |
1702 | ||
1703 | if (dump_file) | |
1704 | { | |
1705 | fprintf (dump_file, | |
1706 | "\nInlining %s size %i.\n", | |
036c0102 UB |
1707 | cgraph_node_name (node), |
1708 | inline_summary (node)->size); | |
09a2806f JH |
1709 | fprintf (dump_file, |
1710 | " Called once from %s %i insns.\n", | |
1711 | cgraph_node_name (node->callers->caller), | |
1712 | inline_summary (node->callers->caller)->size); | |
1713 | } | |
1714 | ||
c170d40f | 1715 | inline_call (node->callers, true, NULL, NULL, true); |
09a2806f JH |
1716 | if (dump_file) |
1717 | fprintf (dump_file, | |
1718 | " Inlined into %s which now has %i size\n", | |
1719 | cgraph_node_name (caller), | |
1720 | inline_summary (caller)->size); | |
0550e7b7 | 1721 | } |
ca31b95f JH |
1722 | } |
1723 | } | |
1724 | } | |
1725 | ||
f8e2a1ed | 1726 | /* Free ipa-prop structures if they are no longer needed. */ |
5ee53a06 | 1727 | if (optimize) |
e33c6cd6 | 1728 | ipa_free_all_structures_after_iinln (); |
f8e2a1ed | 1729 | |
ca31b95f JH |
1730 | if (dump_file) |
1731 | fprintf (dump_file, | |
09a2806f JH |
1732 | "\nInlined %i calls, eliminated %i functions\n\n", |
1733 | ncalls_inlined, nfunctions_inlined); | |
1734 | ||
898b8927 JH |
1735 | if (dump_file) |
1736 | dump_inline_summaries (dump_file); | |
10a5dd5d JH |
1737 | /* In WPA we use inline summaries for partitioning process. */ |
1738 | if (!flag_wpa) | |
1739 | inline_free_summary (); | |
c2924966 | 1740 | return 0; |
ca31b95f JH |
1741 | } |
1742 | ||
275b4baa RG |
1743 | /* Inline always-inline function calls in NODE. */ |
1744 | ||
1745 | static bool | |
4c0f7679 | 1746 | inline_always_inline_functions (struct cgraph_node *node) |
275b4baa RG |
1747 | { |
1748 | struct cgraph_edge *e; | |
1749 | bool inlined = false; | |
1750 | ||
1751 | for (e = node->callees; e; e = e->next_callee) | |
1752 | { | |
a5b1779f | 1753 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
960bfb69 | 1754 | if (!DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)) |
275b4baa RG |
1755 | continue; |
1756 | ||
275b4baa RG |
1757 | if (cgraph_edge_recursive_p (e)) |
1758 | { | |
1759 | if (dump_file) | |
4c0f7679 JH |
1760 | fprintf (dump_file, " Not inlining recursive call to %s.\n", |
1761 | cgraph_node_name (e->callee)); | |
275b4baa RG |
1762 | e->inline_failed = CIF_RECURSIVE_INLINING; |
1763 | continue; | |
1764 | } | |
1765 | ||
4c0f7679 | 1766 | if (!can_early_inline_edge_p (e)) |
275b4baa RG |
1767 | continue; |
1768 | ||
1769 | if (dump_file) | |
4c0f7679 | 1770 | fprintf (dump_file, " Inlining %s into %s (always_inline).\n", |
036c0102 UB |
1771 | xstrdup (cgraph_node_name (e->callee)), |
1772 | xstrdup (cgraph_node_name (e->caller))); | |
c170d40f | 1773 | inline_call (e, true, NULL, NULL, false); |
275b4baa RG |
1774 | inlined = true; |
1775 | } | |
c170d40f JH |
1776 | if (inlined) |
1777 | inline_update_overall_summary (node); | |
275b4baa RG |
1778 | |
1779 | return inlined; | |
1780 | } | |
1781 | ||
ca31b95f | 1782 | /* Decide on the inlining. We do so in the topological order to avoid |
af961c7f | 1783 | expenses on updating data structures. */ |
ca31b95f | 1784 | |
7fa49e7b | 1785 | static bool |
4c0f7679 | 1786 | early_inline_small_functions (struct cgraph_node *node) |
ca31b95f JH |
1787 | { |
1788 | struct cgraph_edge *e; | |
d63db217 | 1789 | bool inlined = false; |
7fa49e7b | 1790 | |
275b4baa | 1791 | for (e = node->callees; e; e = e->next_callee) |
c3056c2d | 1792 | { |
a5b1779f JH |
1793 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
1794 | if (!inline_summary (callee)->inlinable | |
4c0f7679 | 1795 | || !e->inline_failed) |
275b4baa RG |
1796 | continue; |
1797 | ||
1798 | /* Do not consider functions not declared inline. */ | |
960bfb69 | 1799 | if (!DECL_DECLARED_INLINE_P (callee->symbol.decl) |
275b4baa RG |
1800 | && !flag_inline_small_functions |
1801 | && !flag_inline_functions) | |
1802 | continue; | |
1803 | ||
c3056c2d | 1804 | if (dump_file) |
275b4baa | 1805 | fprintf (dump_file, "Considering inline candidate %s.\n", |
a5b1779f | 1806 | cgraph_node_name (callee)); |
ca31b95f | 1807 | |
4c0f7679 JH |
1808 | if (!can_early_inline_edge_p (e)) |
1809 | continue; | |
1810 | ||
275b4baa RG |
1811 | if (cgraph_edge_recursive_p (e)) |
1812 | { | |
1813 | if (dump_file) | |
4c0f7679 | 1814 | fprintf (dump_file, " Not inlining: recursive call.\n"); |
22ad64b6 | 1815 | continue; |
275b4baa | 1816 | } |
af961c7f | 1817 | |
4c0f7679 | 1818 | if (!want_early_inline_function_p (e)) |
275b4baa | 1819 | continue; |
ca31b95f | 1820 | |
4c0f7679 JH |
1821 | if (dump_file) |
1822 | fprintf (dump_file, " Inlining %s into %s.\n", | |
036c0102 UB |
1823 | xstrdup (cgraph_node_name (callee)), |
1824 | xstrdup (cgraph_node_name (e->caller))); | |
c170d40f | 1825 | inline_call (e, true, NULL, NULL, true); |
4c0f7679 | 1826 | inlined = true; |
38bc76da | 1827 | } |
275b4baa | 1828 | |
7fa49e7b | 1829 | return inlined; |
ca31b95f JH |
1830 | } |
1831 | ||
d63db217 JH |
1832 | /* Do inlining of small functions. Doing so early helps profiling and other |
1833 | passes to be somewhat more effective and avoids some code duplication in | |
1834 | later real inlining pass for testcases with very many function calls. */ | |
c2924966 | 1835 | static unsigned int |
4c0f7679 | 1836 | early_inliner (void) |
d63db217 | 1837 | { |
581985d7 | 1838 | struct cgraph_node *node = cgraph_get_node (current_function_decl); |
10a5dd5d | 1839 | struct cgraph_edge *edge; |
7fa49e7b | 1840 | unsigned int todo = 0; |
796bda22 | 1841 | int iterations = 0; |
275b4baa | 1842 | bool inlined = false; |
d63db217 | 1843 | |
1da2ed5f | 1844 | if (seen_error ()) |
c2924966 | 1845 | return 0; |
af961c7f | 1846 | |
ecb62563 JH |
1847 | /* Do nothing if datastructures for ipa-inliner are already computed. This |
1848 | happens when some pass decides to construct new function and | |
1849 | cgraph_add_new_function calls lowering passes and early optimization on | |
1850 | it. This may confuse ourself when early inliner decide to inline call to | |
1851 | function clone, because function clones don't have parameter list in | |
1852 | ipa-prop matching their signature. */ | |
1853 | if (ipa_node_params_vector) | |
1854 | return 0; | |
1855 | ||
275b4baa RG |
1856 | #ifdef ENABLE_CHECKING |
1857 | verify_cgraph_node (node); | |
1858 | #endif | |
1859 | ||
1860 | /* Even when not optimizing or not inlining inline always-inline | |
1861 | functions. */ | |
4c0f7679 | 1862 | inlined = inline_always_inline_functions (node); |
275b4baa | 1863 | |
af961c7f RG |
1864 | if (!optimize |
1865 | || flag_no_inline | |
4c0f7679 JH |
1866 | || !flag_early_inlining |
1867 | /* Never inline regular functions into always-inline functions | |
1868 | during incremental inlining. This sucks as functions calling | |
1869 | always inline functions will get less optimized, but at the | |
1870 | same time inlining of functions calling always inline | |
09a2806f | 1871 | function into an always inline function might introduce |
4c0f7679 JH |
1872 | cycles of edges to be always inlined in the callgraph. |
1873 | ||
1874 | We might want to be smarter and just avoid this type of inlining. */ | |
960bfb69 | 1875 | || DECL_DISREGARD_INLINE_LIMITS (node->symbol.decl)) |
275b4baa RG |
1876 | ; |
1877 | else if (lookup_attribute ("flatten", | |
960bfb69 | 1878 | DECL_ATTRIBUTES (node->symbol.decl)) != NULL) |
7fa49e7b | 1879 | { |
275b4baa RG |
1880 | /* When the function is marked to be flattened, recursively inline |
1881 | all calls in it. */ | |
1882 | if (dump_file) | |
1883 | fprintf (dump_file, | |
1884 | "Flattening %s\n", cgraph_node_name (node)); | |
632b4f8e | 1885 | flatten_function (node, true); |
275b4baa | 1886 | inlined = true; |
7fa49e7b | 1887 | } |
af961c7f RG |
1888 | else |
1889 | { | |
1890 | /* We iterate incremental inlining to get trivial cases of indirect | |
1891 | inlining. */ | |
1892 | while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) | |
4c0f7679 | 1893 | && early_inline_small_functions (node)) |
af961c7f RG |
1894 | { |
1895 | timevar_push (TV_INTEGRATION); | |
1896 | todo |= optimize_inline_calls (current_function_decl); | |
4c0f7679 JH |
1897 | |
1898 | /* Technically we ought to recompute inline parameters so the new | |
1899 | iteration of early inliner works as expected. We however have | |
1900 | values approximately right and thus we only need to update edge | |
1901 | info that might be cleared out for newly discovered edges. */ | |
1902 | for (edge = node->callees; edge; edge = edge->next_callee) | |
1903 | { | |
898b8927 JH |
1904 | struct inline_edge_summary *es = inline_edge_summary (edge); |
1905 | es->call_stmt_size | |
4c0f7679 | 1906 | = estimate_num_insns (edge->call_stmt, &eni_size_weights); |
898b8927 | 1907 | es->call_stmt_time |
4c0f7679 | 1908 | = estimate_num_insns (edge->call_stmt, &eni_time_weights); |
960bfb69 | 1909 | if (edge->callee->symbol.decl |
89faf322 | 1910 | && !gimple_check_call_matching_types (edge->call_stmt, |
960bfb69 | 1911 | edge->callee->symbol.decl)) |
89faf322 | 1912 | edge->call_stmt_cannot_inline_p = true; |
4c0f7679 | 1913 | } |
af961c7f | 1914 | timevar_pop (TV_INTEGRATION); |
275b4baa RG |
1915 | iterations++; |
1916 | inlined = false; | |
af961c7f RG |
1917 | } |
1918 | if (dump_file) | |
1919 | fprintf (dump_file, "Iterations: %i\n", iterations); | |
1920 | } | |
1921 | ||
275b4baa RG |
1922 | if (inlined) |
1923 | { | |
1924 | timevar_push (TV_INTEGRATION); | |
1925 | todo |= optimize_inline_calls (current_function_decl); | |
1926 | timevar_pop (TV_INTEGRATION); | |
1927 | } | |
1928 | ||
0889e9bc | 1929 | cfun->always_inline_functions_inlined = true; |
d63db217 | 1930 | |
af961c7f | 1931 | return todo; |
d63db217 JH |
1932 | } |
1933 | ||
b8698a0f | 1934 | struct gimple_opt_pass pass_early_inline = |
d63db217 | 1935 | { |
8ddbbcae JH |
1936 | { |
1937 | GIMPLE_PASS, | |
d63db217 | 1938 | "einline", /* name */ |
af961c7f | 1939 | NULL, /* gate */ |
4c0f7679 | 1940 | early_inliner, /* execute */ |
d63db217 JH |
1941 | NULL, /* sub */ |
1942 | NULL, /* next */ | |
1943 | 0, /* static_pass_number */ | |
f4b3ca72 | 1944 | TV_INLINE_HEURISTICS, /* tv_id */ |
275b4baa | 1945 | PROP_ssa, /* properties_required */ |
535b544a | 1946 | 0, /* properties_provided */ |
ca31b95f JH |
1947 | 0, /* properties_destroyed */ |
1948 | 0, /* todo_flags_start */ | |
22c5fa5f | 1949 | 0 /* todo_flags_finish */ |
8ddbbcae | 1950 | } |
873aa8f5 JH |
1951 | }; |
1952 | ||
873aa8f5 | 1953 | |
af961c7f | 1954 | /* When to run IPA inlining. Inlining of always-inline functions |
febb1302 JH |
1955 | happens during early inlining. |
1956 | ||
1957 | Enable inlining unconditoinally at -flto. We need size estimates to | |
1958 | drive partitioning. */ | |
af961c7f RG |
1959 | |
1960 | static bool | |
4c0f7679 | 1961 | gate_ipa_inline (void) |
af961c7f | 1962 | { |
febb1302 | 1963 | return optimize || flag_lto || flag_wpa; |
af961c7f RG |
1964 | } |
1965 | ||
7e5487a2 | 1966 | struct ipa_opt_pass_d pass_ipa_inline = |
873aa8f5 | 1967 | { |
8ddbbcae | 1968 | { |
17653c00 JH |
1969 | IPA_PASS, |
1970 | "inline", /* name */ | |
4c0f7679 JH |
1971 | gate_ipa_inline, /* gate */ |
1972 | ipa_inline, /* execute */ | |
873aa8f5 JH |
1973 | NULL, /* sub */ |
1974 | NULL, /* next */ | |
1975 | 0, /* static_pass_number */ | |
1976 | TV_INLINE_HEURISTICS, /* tv_id */ | |
1977 | 0, /* properties_required */ | |
535b544a | 1978 | 0, /* properties_provided */ |
873aa8f5 | 1979 | 0, /* properties_destroyed */ |
17653c00 | 1980 | TODO_remove_functions, /* todo_flags_finish */ |
8f940ee6 | 1981 | TODO_dump_symtab |
49ba8180 | 1982 | | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */ |
17653c00 | 1983 | }, |
1920df6c | 1984 | inline_generate_summary, /* generate_summary */ |
fb3f88cc JH |
1985 | inline_write_summary, /* write_summary */ |
1986 | inline_read_summary, /* read_summary */ | |
e792884f JH |
1987 | NULL, /* write_optimization_summary */ |
1988 | NULL, /* read_optimization_summary */ | |
e33c6cd6 | 1989 | NULL, /* stmt_fixup */ |
17653c00 JH |
1990 | 0, /* TODOs */ |
1991 | inline_transform, /* function_transform */ | |
1992 | NULL, /* variable_transform */ | |
ca31b95f | 1993 | }; |