]>
Commit | Line | Data |
---|---|---|
ca31b95f | 1 | /* Inlining decision heuristics. |
d1e082c2 | 2 | Copyright (C) 2003-2013 Free Software Foundation, Inc. |
ca31b95f JH |
3 | Contributed by Jan Hubicka |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 9 | Software Foundation; either version 3, or (at your option) any later |
ca31b95f JH |
10 | version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
ca31b95f JH |
20 | |
21 | /* Inlining decision heuristics | |
22 | ||
4c0f7679 | 23 | The implementation of inliner is organized as follows: |
ca31b95f | 24 | |
ca31b95f JH |
25 | inlining heuristics limits |
26 | ||
4c0f7679 JH |
27 | can_inline_edge_p allow to check that particular inlining is allowed |
28 | by the limits specified by user (allowed function growth, growth and so | |
29 | on). | |
30 | ||
31 | Functions are inlined when it is obvious the result is profitable (such | |
32 | as functions called once or when inlining reduce code size). | |
33 | In addition to that we perform inlining of small functions and recursive | |
34 | inlining. | |
ca31b95f JH |
35 | |
36 | inlining heuristics | |
37 | ||
4c0f7679 JH |
38 | The inliner itself is split into two passes: |
39 | ||
40 | pass_early_inlining | |
ca31b95f | 41 | |
4c0f7679 JH |
42 | Simple local inlining pass inlining callees into current function. |
43 | This pass makes no use of whole unit analysis and thus it can do only | |
44 | very simple decisions based on local properties. | |
ca31b95f | 45 | |
4c0f7679 JH |
46 | The strength of the pass is that it is run in topological order |
47 | (reverse postorder) on the callgraph. Functions are converted into SSA | |
48 | form just before this pass and optimized subsequently. As a result, the | |
49 | callees of the function seen by the early inliner was already optimized | |
09a2806f | 50 | and results of early inlining adds a lot of optimization opportunities |
4c0f7679 | 51 | for the local optimization. |
ca31b95f | 52 | |
09a2806f | 53 | The pass handle the obvious inlining decisions within the compilation |
4c0f7679 JH |
54 | unit - inlining auto inline functions, inlining for size and |
55 | flattening. | |
ca31b95f | 56 | |
4c0f7679 JH |
57 | main strength of the pass is the ability to eliminate abstraction |
58 | penalty in C++ code (via combination of inlining and early | |
59 | optimization) and thus improve quality of analysis done by real IPA | |
60 | optimizers. | |
873aa8f5 | 61 | |
4c0f7679 JH |
62 | Because of lack of whole unit knowledge, the pass can not really make |
63 | good code size/performance tradeoffs. It however does very simple | |
64 | speculative inlining allowing code size to grow by | |
09a2806f JH |
65 | EARLY_INLINING_INSNS when callee is leaf function. In this case the |
66 | optimizations performed later are very likely to eliminate the cost. | |
873aa8f5 | 67 | |
4c0f7679 | 68 | pass_ipa_inline |
873aa8f5 | 69 | |
4c0f7679 JH |
70 | This is the real inliner able to handle inlining with whole program |
71 | knowledge. It performs following steps: | |
873aa8f5 | 72 | |
4c0f7679 JH |
73 | 1) inlining of small functions. This is implemented by greedy |
74 | algorithm ordering all inlinable cgraph edges by their badness and | |
75 | inlining them in this order as long as inline limits allows doing so. | |
873aa8f5 | 76 | |
4c0f7679 JH |
77 | This heuristics is not very good on inlining recursive calls. Recursive |
78 | calls can be inlined with results similar to loop unrolling. To do so, | |
79 | special purpose recursive inliner is executed on function when | |
80 | recursive edge is met as viable candidate. | |
873aa8f5 | 81 | |
4c0f7679 JH |
82 | 2) Unreachable functions are removed from callgraph. Inlining leads |
83 | to devirtualization and other modification of callgraph so functions | |
84 | may become unreachable during the process. Also functions declared as | |
85 | extern inline or virtual functions are removed, since after inlining | |
86 | we no longer need the offline bodies. | |
87 | ||
88 | 3) Functions called once and not exported from the unit are inlined. | |
89 | This should almost always lead to reduction of code size by eliminating | |
90 | the need for offline copy of the function. */ | |
ca31b95f JH |
91 | |
92 | #include "config.h" | |
93 | #include "system.h" | |
94 | #include "coretypes.h" | |
95 | #include "tm.h" | |
96 | #include "tree.h" | |
d8a2d370 DN |
97 | #include "trans-mem.h" |
98 | #include "calls.h" | |
ca31b95f JH |
99 | #include "tree-inline.h" |
100 | #include "langhooks.h" | |
101 | #include "flags.h" | |
ca31b95f | 102 | #include "diagnostic.h" |
cf835838 | 103 | #include "gimple-pretty-print.h" |
ca31b95f JH |
104 | #include "params.h" |
105 | #include "fibheap.h" | |
106 | #include "intl.h" | |
107 | #include "tree-pass.h" | |
670cd5c5 | 108 | #include "coverage.h" |
45a80bb9 | 109 | #include "rtl.h" |
442b4905 | 110 | #include "bitmap.h" |
2fb9a547 AM |
111 | #include "basic-block.h" |
112 | #include "tree-ssa-alias.h" | |
113 | #include "internal-fn.h" | |
114 | #include "gimple-expr.h" | |
115 | #include "is-a.h" | |
442b4905 AM |
116 | #include "gimple.h" |
117 | #include "gimple-ssa.h" | |
3e293154 | 118 | #include "ipa-prop.h" |
85057983 | 119 | #include "except.h" |
4c0f7679 | 120 | #include "target.h" |
03dfc36d | 121 | #include "ipa-inline.h" |
af8bca3c | 122 | #include "ipa-utils.h" |
1b08b734 | 123 | #include "sreal.h" |
939b37da | 124 | #include "cilk.h" |
85057983 | 125 | |
ca31b95f | 126 | /* Statistics we collect about inlining algorithm. */ |
85057983 | 127 | static int overall_size; |
632b4f8e | 128 | static gcov_type max_count; |
1b08b734 | 129 | static sreal max_count_real, max_relbenefit_real, half_int_min_real; |
ca31b95f | 130 | |
4c0f7679 JH |
131 | /* Return false when inlining edge E would lead to violating |
132 | limits on function unit growth or stack usage growth. | |
133 | ||
134 | The relative function body growth limit is present generally | |
09a2806f | 135 | to avoid problems with non-linear behavior of the compiler. |
4c0f7679 JH |
136 | To allow inlining huge functions into tiny wrapper, the limit |
137 | is always based on the bigger of the two functions considered. | |
138 | ||
139 | For stack growth limits we always base the growth in stack usage | |
140 | of the callers. We want to prevent applications from segfaulting | |
141 | on stack overflow when functions with huge stack frames gets | |
142 | inlined. */ | |
ca31b95f JH |
143 | |
144 | static bool | |
4c0f7679 | 145 | caller_growth_limits (struct cgraph_edge *e) |
ca31b95f | 146 | { |
d7d1d041 | 147 | struct cgraph_node *to = e->caller; |
a5b1779f | 148 | struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL); |
ca31b95f | 149 | int newsize; |
4c0f7679 JH |
150 | int limit = 0; |
151 | HOST_WIDE_INT stack_size_limit = 0, inlined_stack; | |
152 | struct inline_summary *info, *what_info, *outer_info = inline_summary (to); | |
153 | ||
154 | /* Look for function e->caller is inlined to. While doing | |
155 | so work out the largest function body on the way. As | |
156 | described above, we want to base our function growth | |
157 | limits based on that. Not on the self size of the | |
158 | outer function, not on the self size of inline code | |
159 | we immediately inline to. This is the most relaxed | |
160 | interpretation of the rule "do not grow large functions | |
161 | too much in order to prevent compiler from exploding". */ | |
09dfe187 | 162 | while (true) |
4c0f7679 JH |
163 | { |
164 | info = inline_summary (to); | |
165 | if (limit < info->self_size) | |
166 | limit = info->self_size; | |
167 | if (stack_size_limit < info->estimated_self_stack_size) | |
168 | stack_size_limit = info->estimated_self_stack_size; | |
169 | if (to->global.inlined_to) | |
170 | to = to->callers->caller; | |
09dfe187 JH |
171 | else |
172 | break; | |
4c0f7679 | 173 | } |
6971d714 | 174 | |
e7f23018 JH |
175 | what_info = inline_summary (what); |
176 | ||
4c0f7679 | 177 | if (limit < what_info->self_size) |
e7f23018 | 178 | limit = what_info->self_size; |
ca31b95f JH |
179 | |
180 | limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100; | |
181 | ||
6971d714 RG |
182 | /* Check the size after inlining against the function limits. But allow |
183 | the function to shrink if it went over the limits by forced inlining. */ | |
03dfc36d | 184 | newsize = estimate_size_after_inlining (to, e); |
e7f23018 | 185 | if (newsize >= info->size |
6971d714 | 186 | && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) |
ca31b95f JH |
187 | && newsize > limit) |
188 | { | |
4c0f7679 | 189 | e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT; |
ca31b95f JH |
190 | return false; |
191 | } | |
ff28a94d | 192 | |
09dfe187 JH |
193 | if (!what_info->estimated_stack_size) |
194 | return true; | |
195 | ||
09a2806f JH |
196 | /* FIXME: Stack size limit often prevents inlining in Fortran programs |
197 | due to large i/o datastructures used by the Fortran front-end. | |
4c0f7679 JH |
198 | We ought to ignore this limit when we know that the edge is executed |
199 | on every invocation of the caller (i.e. its call statement dominates | |
200 | exit block). We do not track this information, yet. */ | |
09dfe187 | 201 | stack_size_limit += ((gcov_type)stack_size_limit |
4c0f7679 | 202 | * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100); |
ff28a94d | 203 | |
4c0f7679 JH |
204 | inlined_stack = (outer_info->stack_frame_offset |
205 | + outer_info->estimated_self_stack_size | |
e7f23018 | 206 | + what_info->estimated_stack_size); |
4c0f7679 JH |
207 | /* Check new stack consumption with stack consumption at the place |
208 | stack is used. */ | |
209 | if (inlined_stack > stack_size_limit | |
09a2806f | 210 | /* If function already has large stack usage from sibling |
4c0f7679 JH |
211 | inline call, we can inline, too. |
212 | This bit overoptimistically assume that we are good at stack | |
213 | packing. */ | |
214 | && inlined_stack > info->estimated_stack_size | |
ff28a94d JH |
215 | && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME)) |
216 | { | |
4c0f7679 | 217 | e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT; |
ff28a94d JH |
218 | return false; |
219 | } | |
ca31b95f JH |
220 | return true; |
221 | } | |
222 | ||
4c0f7679 JH |
223 | /* Dump info about why inlining has failed. */ |
224 | ||
225 | static void | |
226 | report_inline_failed_reason (struct cgraph_edge *e) | |
227 | { | |
228 | if (dump_file) | |
229 | { | |
230 | fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n", | |
fec39fa6 TS |
231 | xstrdup (e->caller->name ()), e->caller->order, |
232 | xstrdup (e->callee->name ()), e->callee->order, | |
4c0f7679 JH |
233 | cgraph_inline_failed_string (e->inline_failed)); |
234 | } | |
235 | } | |
236 | ||
237 | /* Decide if we can inline the edge and possibly update | |
238 | inline_failed reason. | |
239 | We check whether inlining is possible at all and whether | |
240 | caller growth limits allow doing so. | |
241 | ||
09ce3660 JH |
242 | if REPORT is true, output reason to the dump file. |
243 | ||
244 | if DISREGARD_LIMITES is true, ignore size limits.*/ | |
ca31b95f | 245 | |
61a05df1 | 246 | static bool |
09ce3660 JH |
247 | can_inline_edge_p (struct cgraph_edge *e, bool report, |
248 | bool disregard_limits = false) | |
ca31b95f | 249 | { |
4c0f7679 | 250 | bool inlinable = true; |
a5b1779f | 251 | enum availability avail; |
264b47b0 EB |
252 | struct cgraph_node *callee |
253 | = cgraph_function_or_thunk_node (e->callee, &avail); | |
67348ccc | 254 | tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl); |
264b47b0 | 255 | tree callee_tree |
67348ccc DM |
256 | = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL; |
257 | struct function *caller_cfun = DECL_STRUCT_FUNCTION (e->caller->decl); | |
264b47b0 | 258 | struct function *callee_cfun |
67348ccc | 259 | = callee ? DECL_STRUCT_FUNCTION (callee->decl) : NULL; |
264b47b0 EB |
260 | |
261 | if (!caller_cfun && e->caller->clone_of) | |
67348ccc | 262 | caller_cfun = DECL_STRUCT_FUNCTION (e->caller->clone_of->decl); |
264b47b0 EB |
263 | |
264 | if (!callee_cfun && callee && callee->clone_of) | |
67348ccc | 265 | callee_cfun = DECL_STRUCT_FUNCTION (callee->clone_of->decl); |
ea99e0be | 266 | |
4c0f7679 | 267 | gcc_assert (e->inline_failed); |
af961c7f | 268 | |
67348ccc | 269 | if (!callee || !callee->definition) |
4c0f7679 JH |
270 | { |
271 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
272 | inlinable = false; | |
273 | } | |
939b37da BI |
274 | else if (!inline_summary (callee)->inlinable |
275 | || (caller_cfun && fn_contains_cilk_spawn_p (caller_cfun))) | |
4c0f7679 JH |
276 | { |
277 | e->inline_failed = CIF_FUNCTION_NOT_INLINABLE; | |
278 | inlinable = false; | |
279 | } | |
a5b1779f | 280 | else if (avail <= AVAIL_OVERWRITABLE) |
9de21a23 | 281 | { |
4c0f7679 | 282 | e->inline_failed = CIF_OVERWRITABLE; |
6957a6f6 | 283 | inlinable = false; |
9de21a23 | 284 | } |
89faf322 | 285 | else if (e->call_stmt_cannot_inline_p) |
4c0f7679 | 286 | { |
b631d45a JH |
287 | if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED) |
288 | e->inline_failed = CIF_MISMATCHED_ARGUMENTS; | |
4c0f7679 JH |
289 | inlinable = false; |
290 | } | |
291 | /* Don't inline if the functions have different EH personalities. */ | |
67348ccc DM |
292 | else if (DECL_FUNCTION_PERSONALITY (e->caller->decl) |
293 | && DECL_FUNCTION_PERSONALITY (callee->decl) | |
294 | && (DECL_FUNCTION_PERSONALITY (e->caller->decl) | |
295 | != DECL_FUNCTION_PERSONALITY (callee->decl))) | |
4c0f7679 JH |
296 | { |
297 | e->inline_failed = CIF_EH_PERSONALITY; | |
298 | inlinable = false; | |
299 | } | |
a7ff6e27 AH |
300 | /* TM pure functions should not be inlined into non-TM_pure |
301 | functions. */ | |
67348ccc DM |
302 | else if (is_tm_pure (callee->decl) |
303 | && !is_tm_pure (e->caller->decl)) | |
0a35513e AH |
304 | { |
305 | e->inline_failed = CIF_UNSPECIFIED; | |
306 | inlinable = false; | |
307 | } | |
4c0f7679 JH |
308 | /* Don't inline if the callee can throw non-call exceptions but the |
309 | caller cannot. | |
310 | FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing. | |
311 | Move the flag into cgraph node or mirror it in the inline summary. */ | |
264b47b0 EB |
312 | else if (callee_cfun && callee_cfun->can_throw_non_call_exceptions |
313 | && !(caller_cfun && caller_cfun->can_throw_non_call_exceptions)) | |
4c0f7679 JH |
314 | { |
315 | e->inline_failed = CIF_NON_CALL_EXCEPTIONS; | |
316 | inlinable = false; | |
317 | } | |
09a2806f | 318 | /* Check compatibility of target optimization options. */ |
67348ccc DM |
319 | else if (!targetm.target_option.can_inline_p (e->caller->decl, |
320 | callee->decl)) | |
4c0f7679 JH |
321 | { |
322 | e->inline_failed = CIF_TARGET_OPTION_MISMATCH; | |
323 | inlinable = false; | |
324 | } | |
325 | /* Check if caller growth allows the inlining. */ | |
67348ccc | 326 | else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl) |
09ce3660 | 327 | && !disregard_limits |
9a4ac625 JH |
328 | && !lookup_attribute ("flatten", |
329 | DECL_ATTRIBUTES | |
330 | (e->caller->global.inlined_to | |
67348ccc DM |
331 | ? e->caller->global.inlined_to->decl |
332 | : e->caller->decl)) | |
4c0f7679 JH |
333 | && !caller_growth_limits (e)) |
334 | inlinable = false; | |
335 | /* Don't inline a function with a higher optimization level than the | |
336 | caller. FIXME: this is really just tip of iceberg of handling | |
337 | optimization attribute. */ | |
338 | else if (caller_tree != callee_tree) | |
9de21a23 | 339 | { |
4c0f7679 JH |
340 | struct cl_optimization *caller_opt |
341 | = TREE_OPTIMIZATION ((caller_tree) | |
342 | ? caller_tree | |
343 | : optimization_default_node); | |
344 | ||
345 | struct cl_optimization *callee_opt | |
346 | = TREE_OPTIMIZATION ((callee_tree) | |
347 | ? callee_tree | |
348 | : optimization_default_node); | |
349 | ||
4b583c43 CB |
350 | if (((caller_opt->x_optimize > callee_opt->x_optimize) |
351 | || (caller_opt->x_optimize_size != callee_opt->x_optimize_size)) | |
352 | /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */ | |
67348ccc | 353 | && !DECL_DISREGARD_INLINE_LIMITS (e->callee->decl)) |
4c0f7679 | 354 | { |
fd811f03 | 355 | e->inline_failed = CIF_OPTIMIZATION_MISMATCH; |
4c0f7679 JH |
356 | inlinable = false; |
357 | } | |
358 | } | |
359 | ||
4c0f7679 JH |
360 | if (!inlinable && report) |
361 | report_inline_failed_reason (e); | |
362 | return inlinable; | |
363 | } | |
364 | ||
365 | ||
366 | /* Return true if the edge E is inlinable during early inlining. */ | |
367 | ||
368 | static bool | |
369 | can_early_inline_edge_p (struct cgraph_edge *e) | |
370 | { | |
a5b1779f JH |
371 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, |
372 | NULL); | |
4c0f7679 JH |
373 | /* Early inliner might get called at WPA stage when IPA pass adds new |
374 | function. In this case we can not really do any of early inlining | |
375 | because function bodies are missing. */ | |
67348ccc | 376 | if (!gimple_has_body_p (callee->decl)) |
4c0f7679 JH |
377 | { |
378 | e->inline_failed = CIF_BODY_NOT_AVAILABLE; | |
9de21a23 JC |
379 | return false; |
380 | } | |
4c0f7679 JH |
381 | /* In early inliner some of callees may not be in SSA form yet |
382 | (i.e. the callgraph is cyclic and we did not process | |
383 | the callee by early inliner, yet). We don't have CIF code for this | |
384 | case; later we will re-do the decision in the real inliner. */ | |
67348ccc DM |
385 | if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl)) |
386 | || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl))) | |
f27e50db | 387 | { |
4c0f7679 JH |
388 | if (dump_file) |
389 | fprintf (dump_file, " edge not inlinable: not in SSA form\n"); | |
f27e50db JH |
390 | return false; |
391 | } | |
4c0f7679 JH |
392 | if (!can_inline_edge_p (e, true)) |
393 | return false; | |
394 | return true; | |
395 | } | |
396 | ||
397 | ||
ae6e6a08 | 398 | /* Return number of calls in N. Ignore cheap builtins. */ |
4c0f7679 | 399 | |
ae6e6a08 JH |
400 | static int |
401 | num_calls (struct cgraph_node *n) | |
4c0f7679 JH |
402 | { |
403 | struct cgraph_edge *e; | |
ae6e6a08 JH |
404 | int num = 0; |
405 | ||
4c0f7679 | 406 | for (e = n->callees; e; e = e->next_callee) |
67348ccc | 407 | if (!is_inexpensive_builtin (e->callee->decl)) |
ae6e6a08 JH |
408 | num++; |
409 | return num; | |
4c0f7679 JH |
410 | } |
411 | ||
f27e50db | 412 | |
4c0f7679 | 413 | /* Return true if we are interested in inlining small function. */ |
9de21a23 | 414 | |
4c0f7679 JH |
415 | static bool |
416 | want_early_inline_function_p (struct cgraph_edge *e) | |
417 | { | |
418 | bool want_inline = true; | |
a5b1779f | 419 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
4c0f7679 | 420 | |
67348ccc | 421 | if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
4c0f7679 | 422 | ; |
67348ccc | 423 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
4c0f7679 JH |
424 | && !flag_inline_small_functions) |
425 | { | |
426 | e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; | |
427 | report_inline_failed_reason (e); | |
428 | want_inline = false; | |
429 | } | |
430 | else | |
9de21a23 | 431 | { |
4c0f7679 | 432 | int growth = estimate_edge_growth (e); |
ae6e6a08 JH |
433 | int n; |
434 | ||
4c0f7679 JH |
435 | if (growth <= 0) |
436 | ; | |
437 | else if (!cgraph_maybe_hot_edge_p (e) | |
438 | && growth > 0) | |
439 | { | |
440 | if (dump_file) | |
441 | fprintf (dump_file, " will not early inline: %s/%i->%s/%i, " | |
442 | "call is cold and code would grow by %i\n", | |
fec39fa6 | 443 | xstrdup (e->caller->name ()), |
67348ccc | 444 | e->caller->order, |
fec39fa6 | 445 | xstrdup (callee->name ()), callee->order, |
4c0f7679 JH |
446 | growth); |
447 | want_inline = false; | |
448 | } | |
ae6e6a08 | 449 | else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)) |
9de21a23 | 450 | { |
4c0f7679 JH |
451 | if (dump_file) |
452 | fprintf (dump_file, " will not early inline: %s/%i->%s/%i, " | |
ae6e6a08 | 453 | "growth %i exceeds --param early-inlining-insns\n", |
fec39fa6 | 454 | xstrdup (e->caller->name ()), |
67348ccc | 455 | e->caller->order, |
fec39fa6 | 456 | xstrdup (callee->name ()), callee->order, |
4c0f7679 JH |
457 | growth); |
458 | want_inline = false; | |
9de21a23 | 459 | } |
ae6e6a08 JH |
460 | else if ((n = num_calls (callee)) != 0 |
461 | && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)) | |
4c0f7679 JH |
462 | { |
463 | if (dump_file) | |
464 | fprintf (dump_file, " will not early inline: %s/%i->%s/%i, " | |
ae6e6a08 JH |
465 | "growth %i exceeds --param early-inlining-insns " |
466 | "divided by number of calls\n", | |
fec39fa6 | 467 | xstrdup (e->caller->name ()), |
67348ccc | 468 | e->caller->order, |
fec39fa6 | 469 | xstrdup (callee->name ()), callee->order, |
4c0f7679 JH |
470 | growth); |
471 | want_inline = false; | |
472 | } | |
473 | } | |
474 | return want_inline; | |
475 | } | |
476 | ||
d59171da JH |
477 | /* Compute time of the edge->caller + edge->callee execution when inlining |
478 | does not happen. */ | |
479 | ||
d3669566 | 480 | inline gcov_type |
d59171da JH |
481 | compute_uninlined_call_time (struct inline_summary *callee_info, |
482 | struct cgraph_edge *edge) | |
483 | { | |
d3669566 | 484 | gcov_type uninlined_call_time = |
d59171da JH |
485 | RDIV ((gcov_type)callee_info->time * MAX (edge->frequency, 1), |
486 | CGRAPH_FREQ_BASE); | |
d3669566 JH |
487 | gcov_type caller_time = inline_summary (edge->caller->global.inlined_to |
488 | ? edge->caller->global.inlined_to | |
489 | : edge->caller)->time; | |
d59171da JH |
490 | return uninlined_call_time + caller_time; |
491 | } | |
492 | ||
493 | /* Same as compute_uinlined_call_time but compute time when inlining | |
494 | does happen. */ | |
495 | ||
496 | inline gcov_type | |
497 | compute_inlined_call_time (struct cgraph_edge *edge, | |
498 | int edge_time) | |
499 | { | |
d3669566 JH |
500 | gcov_type caller_time = inline_summary (edge->caller->global.inlined_to |
501 | ? edge->caller->global.inlined_to | |
502 | : edge->caller)->time; | |
503 | gcov_type time = (caller_time | |
504 | + RDIV (((gcov_type) edge_time | |
505 | - inline_edge_summary (edge)->call_stmt_time) | |
506 | * MAX (edge->frequency, 1), CGRAPH_FREQ_BASE)); | |
d59171da JH |
507 | /* Possible one roundoff error, but watch for overflows. */ |
508 | gcc_checking_assert (time >= INT_MIN / 2); | |
509 | if (time < 0) | |
510 | time = 0; | |
511 | return time; | |
512 | } | |
513 | ||
42f7b0fa JH |
514 | /* Return true if the speedup for inlining E is bigger than |
515 | PARAM_MAX_INLINE_MIN_SPEEDUP. */ | |
516 | ||
517 | static bool | |
518 | big_speedup_p (struct cgraph_edge *e) | |
519 | { | |
520 | gcov_type time = compute_uninlined_call_time (inline_summary (e->callee), | |
521 | e); | |
522 | gcov_type inlined_time = compute_inlined_call_time (e, | |
523 | estimate_edge_time (e)); | |
524 | if (time - inlined_time | |
525 | > RDIV (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP), 100)) | |
526 | return true; | |
527 | return false; | |
528 | } | |
529 | ||
4c0f7679 JH |
530 | /* Return true if we are interested in inlining small function. |
531 | When REPORT is true, report reason to dump file. */ | |
532 | ||
533 | static bool | |
534 | want_inline_small_function_p (struct cgraph_edge *e, bool report) | |
535 | { | |
536 | bool want_inline = true; | |
a5b1779f | 537 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
4c0f7679 | 538 | |
67348ccc | 539 | if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
4c0f7679 | 540 | ; |
67348ccc | 541 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
4c0f7679 JH |
542 | && !flag_inline_small_functions) |
543 | { | |
544 | e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; | |
545 | want_inline = false; | |
9de21a23 | 546 | } |
ca31b95f | 547 | else |
9de21a23 | 548 | { |
4c0f7679 | 549 | int growth = estimate_edge_growth (e); |
37678631 | 550 | inline_hints hints = estimate_edge_hints (e); |
42f7b0fa | 551 | bool big_speedup = big_speedup_p (e); |
4c0f7679 JH |
552 | |
553 | if (growth <= 0) | |
554 | ; | |
37678631 JH |
555 | /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when |
556 | hints suggests that inlining given function is very profitable. */ | |
67348ccc | 557 | else if (DECL_DECLARED_INLINE_P (callee->decl) |
37678631 | 558 | && growth >= MAX_INLINE_INSNS_SINGLE |
42f7b0fa | 559 | && !big_speedup |
2daffc47 | 560 | && !(hints & (INLINE_HINT_indirect_call |
128e0d89 | 561 | | INLINE_HINT_loop_iterations |
52843a47 | 562 | | INLINE_HINT_array_index |
128e0d89 | 563 | | INLINE_HINT_loop_stride))) |
4c0f7679 JH |
564 | { |
565 | e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT; | |
566 | want_inline = false; | |
567 | } | |
db22a743 JH |
568 | /* Before giving up based on fact that caller size will grow, allow |
569 | functions that are called few times and eliminating the offline | |
570 | copy will lead to overall code size reduction. | |
571 | Not all of these will be handled by subsequent inlining of functions | |
572 | called once: in particular weak functions are not handled or funcitons | |
573 | that inline to multiple calls but a lot of bodies is optimized out. | |
574 | Finally we want to inline earlier to allow inlining of callbacks. | |
575 | ||
576 | This is slightly wrong on aggressive side: it is entirely possible | |
577 | that function is called many times with a context where inlining | |
578 | reduces code size and few times with a context where inlining increase | |
579 | code size. Resoluting growth estimate will be negative even if it | |
580 | would make more sense to keep offline copy and do not inline into the | |
581 | call sites that makes the code size grow. | |
582 | ||
583 | When badness orders the calls in a way that code reducing calls come | |
584 | first, this situation is not a problem at all: after inlining all | |
585 | "good" calls, we will realize that keeping the function around is | |
586 | better. */ | |
587 | else if (growth <= MAX_INLINE_INSNS_SINGLE | |
588 | /* Unlike for functions called once, we play unsafe with | |
589 | COMDATs. We can allow that since we know functions | |
590 | in consideration are small (and thus risk is small) and | |
591 | moreover grow estimates already accounts that COMDAT | |
592 | functions may or may not disappear when eliminated from | |
593 | current unit. With good probability making aggressive | |
594 | choice in all units is going to make overall program | |
595 | smaller. | |
596 | ||
597 | Consequently we ask cgraph_can_remove_if_no_direct_calls_p | |
598 | instead of | |
599 | cgraph_will_be_removed_from_program_if_no_direct_calls */ | |
67348ccc | 600 | && !DECL_EXTERNAL (callee->decl) |
db22a743 JH |
601 | && cgraph_can_remove_if_no_direct_calls_p (callee) |
602 | && estimate_growth (callee) <= 0) | |
603 | ; | |
67348ccc | 604 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
4c0f7679 JH |
605 | && !flag_inline_functions) |
606 | { | |
607 | e->inline_failed = CIF_NOT_DECLARED_INLINED; | |
608 | want_inline = false; | |
609 | } | |
37678631 JH |
610 | /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline |
611 | Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that | |
612 | inlining given function is very profitable. */ | |
67348ccc | 613 | else if (!DECL_DECLARED_INLINE_P (callee->decl) |
42f7b0fa | 614 | && !big_speedup |
7c99ab65 | 615 | && growth >= ((hints & (INLINE_HINT_indirect_call |
128e0d89 | 616 | | INLINE_HINT_loop_iterations |
52843a47 | 617 | | INLINE_HINT_array_index |
128e0d89 | 618 | | INLINE_HINT_loop_stride)) |
37678631 JH |
619 | ? MAX (MAX_INLINE_INSNS_AUTO, |
620 | MAX_INLINE_INSNS_SINGLE) | |
621 | : MAX_INLINE_INSNS_AUTO)) | |
4c0f7679 JH |
622 | { |
623 | e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT; | |
624 | want_inline = false; | |
625 | } | |
db22a743 JH |
626 | /* If call is cold, do not inline when function body would grow. */ |
627 | else if (!cgraph_maybe_hot_edge_p (e)) | |
9de21a23 | 628 | { |
4c0f7679 JH |
629 | e->inline_failed = CIF_UNLIKELY_CALL; |
630 | want_inline = false; | |
9de21a23 JC |
631 | } |
632 | } | |
4c0f7679 JH |
633 | if (!want_inline && report) |
634 | report_inline_failed_reason (e); | |
635 | return want_inline; | |
636 | } | |
9de21a23 | 637 | |
4c0f7679 JH |
638 | /* EDGE is self recursive edge. |
639 | We hand two cases - when function A is inlining into itself | |
640 | or when function A is being inlined into another inliner copy of function | |
641 | A within function B. | |
642 | ||
643 | In first case OUTER_NODE points to the toplevel copy of A, while | |
644 | in the second case OUTER_NODE points to the outermost copy of A in B. | |
645 | ||
646 | In both cases we want to be extra selective since | |
647 | inlining the call will just introduce new recursive calls to appear. */ | |
09a2806f | 648 | |
4c0f7679 JH |
649 | static bool |
650 | want_inline_self_recursive_call_p (struct cgraph_edge *edge, | |
651 | struct cgraph_node *outer_node, | |
652 | bool peeling, | |
653 | int depth) | |
654 | { | |
655 | char const *reason = NULL; | |
656 | bool want_inline = true; | |
657 | int caller_freq = CGRAPH_FREQ_BASE; | |
658 | int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO); | |
659 | ||
67348ccc | 660 | if (DECL_DECLARED_INLINE_P (edge->caller->decl)) |
4c0f7679 JH |
661 | max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH); |
662 | ||
663 | if (!cgraph_maybe_hot_edge_p (edge)) | |
664 | { | |
665 | reason = "recursive call is cold"; | |
666 | want_inline = false; | |
667 | } | |
668 | else if (max_count && !outer_node->count) | |
669 | { | |
670 | reason = "not executed in profile"; | |
671 | want_inline = false; | |
672 | } | |
673 | else if (depth > max_depth) | |
674 | { | |
675 | reason = "--param max-inline-recursive-depth exceeded."; | |
676 | want_inline = false; | |
677 | } | |
678 | ||
679 | if (outer_node->global.inlined_to) | |
680 | caller_freq = outer_node->callers->frequency; | |
681 | ||
682 | if (!want_inline) | |
683 | ; | |
684 | /* Inlining of self recursive function into copy of itself within other function | |
685 | is transformation similar to loop peeling. | |
686 | ||
09a2806f | 687 | Peeling is profitable if we can inline enough copies to make probability |
4c0f7679 JH |
688 | of actual call to the self recursive function very small. Be sure that |
689 | the probability of recursion is small. | |
690 | ||
09a2806f JH |
691 | We ensure that the frequency of recursing is at most 1 - (1/max_depth). |
692 | This way the expected number of recision is at most max_depth. */ | |
4c0f7679 JH |
693 | else if (peeling) |
694 | { | |
695 | int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1) | |
696 | / max_depth); | |
697 | int i; | |
698 | for (i = 1; i < depth; i++) | |
699 | max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE; | |
700 | if (max_count | |
701 | && (edge->count * CGRAPH_FREQ_BASE / outer_node->count | |
702 | >= max_prob)) | |
703 | { | |
704 | reason = "profile of recursive call is too large"; | |
705 | want_inline = false; | |
706 | } | |
707 | if (!max_count | |
708 | && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq | |
709 | >= max_prob)) | |
710 | { | |
711 | reason = "frequency of recursive call is too large"; | |
712 | want_inline = false; | |
713 | } | |
714 | } | |
09a2806f | 715 | /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion |
4c0f7679 JH |
716 | depth is large. We reduce function call overhead and increase chances that |
717 | things fit in hardware return predictor. | |
718 | ||
719 | Recursive inlining might however increase cost of stack frame setup | |
720 | actually slowing down functions whose recursion tree is wide rather than | |
721 | deep. | |
722 | ||
09a2806f | 723 | Deciding reliably on when to do recursive inlining without profile feedback |
4c0f7679 JH |
724 | is tricky. For now we disable recursive inlining when probability of self |
725 | recursion is low. | |
726 | ||
727 | Recursive inlining of self recursive call within loop also results in large loop | |
728 | depths that generally optimize badly. We may want to throttle down inlining | |
729 | in those cases. In particular this seems to happen in one of libstdc++ rb tree | |
730 | methods. */ | |
731 | else | |
732 | { | |
733 | if (max_count | |
734 | && (edge->count * 100 / outer_node->count | |
735 | <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))) | |
736 | { | |
737 | reason = "profile of recursive call is too small"; | |
738 | want_inline = false; | |
739 | } | |
740 | else if (!max_count | |
741 | && (edge->frequency * 100 / caller_freq | |
742 | <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))) | |
743 | { | |
744 | reason = "frequency of recursive call is too small"; | |
745 | want_inline = false; | |
746 | } | |
747 | } | |
748 | if (!want_inline && dump_file) | |
749 | fprintf (dump_file, " not inlining recursively: %s\n", reason); | |
750 | return want_inline; | |
ca31b95f JH |
751 | } |
752 | ||
19ba6aab JH |
753 | /* Return true when NODE has uninlinable caller; |
754 | set HAS_HOT_CALL if it has hot call. | |
9aa3f5c5 JH |
755 | Worker for cgraph_for_node_and_aliases. */ |
756 | ||
757 | static bool | |
19ba6aab | 758 | check_callers (struct cgraph_node *node, void *has_hot_call) |
9aa3f5c5 | 759 | { |
19ba6aab JH |
760 | struct cgraph_edge *e; |
761 | for (e = node->callers; e; e = e->next_caller) | |
762 | { | |
763 | if (!can_inline_edge_p (e, true)) | |
764 | return true; | |
765 | if (!has_hot_call && cgraph_maybe_hot_edge_p (e)) | |
766 | *(bool *)has_hot_call = true; | |
767 | } | |
768 | return false; | |
9aa3f5c5 JH |
769 | } |
770 | ||
a81b0a3d JH |
771 | /* If NODE has a caller, return true. */ |
772 | ||
773 | static bool | |
774 | has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED) | |
775 | { | |
776 | if (node->callers) | |
777 | return true; | |
778 | return false; | |
779 | } | |
09a2806f | 780 | |
100411f8 JH |
781 | /* Decide if inlining NODE would reduce unit size by eliminating |
782 | the offline copy of function. | |
783 | When COLD is true the cold calls are considered, too. */ | |
09a2806f JH |
784 | |
785 | static bool | |
100411f8 | 786 | want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold) |
09a2806f | 787 | { |
9aa3f5c5 | 788 | struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL); |
100411f8 JH |
789 | bool has_hot_call = false; |
790 | ||
791 | /* Does it have callers? */ | |
a81b0a3d | 792 | if (!cgraph_for_node_and_aliases (node, has_caller_p, NULL, true)) |
100411f8 | 793 | return false; |
09a2806f | 794 | /* Already inlined? */ |
9aa3f5c5 | 795 | if (function->global.inlined_to) |
09a2806f | 796 | return false; |
100411f8 JH |
797 | if (cgraph_function_or_thunk_node (node, NULL) != node) |
798 | return false; | |
799 | /* Inlining into all callers would increase size? */ | |
800 | if (estimate_growth (node) > 0) | |
09a2806f | 801 | return false; |
100411f8 | 802 | /* All inlines must be possible. */ |
19ba6aab JH |
803 | if (cgraph_for_node_and_aliases (node, check_callers, &has_hot_call, true)) |
804 | return false; | |
100411f8 | 805 | if (!cold && !has_hot_call) |
09a2806f JH |
806 | return false; |
807 | return true; | |
808 | } | |
809 | ||
d59171da | 810 | #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64) |
b4c0a884 JH |
811 | |
812 | /* Return relative time improvement for inlining EDGE in range | |
d59171da | 813 | 1...RELATIVE_TIME_BENEFIT_RANGE */ |
b4c0a884 JH |
814 | |
815 | static inline int | |
816 | relative_time_benefit (struct inline_summary *callee_info, | |
817 | struct cgraph_edge *edge, | |
d59171da | 818 | int edge_time) |
b4c0a884 | 819 | { |
d3669566 JH |
820 | gcov_type relbenefit; |
821 | gcov_type uninlined_call_time = compute_uninlined_call_time (callee_info, edge); | |
822 | gcov_type inlined_call_time = compute_inlined_call_time (edge, edge_time); | |
d59171da JH |
823 | |
824 | /* Inlining into extern inline function is not a win. */ | |
825 | if (DECL_EXTERNAL (edge->caller->global.inlined_to | |
67348ccc DM |
826 | ? edge->caller->global.inlined_to->decl |
827 | : edge->caller->decl)) | |
d59171da JH |
828 | return 1; |
829 | ||
830 | /* Watch overflows. */ | |
831 | gcc_checking_assert (uninlined_call_time >= 0); | |
832 | gcc_checking_assert (inlined_call_time >= 0); | |
833 | gcc_checking_assert (uninlined_call_time >= inlined_call_time); | |
b4c0a884 | 834 | |
b4c0a884 JH |
835 | /* Compute relative time benefit, i.e. how much the call becomes faster. |
836 | ??? perhaps computing how much the caller+calle together become faster | |
837 | would lead to more realistic results. */ | |
838 | if (!uninlined_call_time) | |
839 | uninlined_call_time = 1; | |
840 | relbenefit = | |
d59171da JH |
841 | RDIV (((gcov_type)uninlined_call_time - inlined_call_time) * RELATIVE_TIME_BENEFIT_RANGE, |
842 | uninlined_call_time); | |
843 | relbenefit = MIN (relbenefit, RELATIVE_TIME_BENEFIT_RANGE); | |
844 | gcc_checking_assert (relbenefit >= 0); | |
b4c0a884 JH |
845 | relbenefit = MAX (relbenefit, 1); |
846 | return relbenefit; | |
847 | } | |
848 | ||
849 | ||
670cd5c5 JH |
850 | /* A cost model driving the inlining heuristics in a way so the edges with |
851 | smallest badness are inlined first. After each inlining is performed | |
0fa2e4df | 852 | the costs of all caller edges of nodes affected are recomputed so the |
670cd5c5 | 853 | metrics may accurately depend on values such as number of inlinable callers |
45a80bb9 | 854 | of the function or function body size. */ |
670cd5c5 JH |
855 | |
856 | static int | |
4c0f7679 | 857 | edge_badness (struct cgraph_edge *edge, bool dump) |
670cd5c5 | 858 | { |
85057983 | 859 | gcov_type badness; |
d59171da | 860 | int growth, edge_time; |
a5b1779f JH |
861 | struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee, |
862 | NULL); | |
863 | struct inline_summary *callee_info = inline_summary (callee); | |
37678631 | 864 | inline_hints hints; |
45a80bb9 | 865 | |
67348ccc | 866 | if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
1aa14195 JH |
867 | return INT_MIN; |
868 | ||
03dfc36d | 869 | growth = estimate_edge_growth (edge); |
d59171da | 870 | edge_time = estimate_edge_time (edge); |
37678631 | 871 | hints = estimate_edge_hints (edge); |
d59171da JH |
872 | gcc_checking_assert (edge_time >= 0); |
873 | gcc_checking_assert (edge_time <= callee_info->time); | |
874 | gcc_checking_assert (growth <= callee_info->size); | |
e89964e3 | 875 | |
1ce18dc8 JH |
876 | if (dump) |
877 | { | |
7153ca97 | 878 | fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n", |
fec39fa6 | 879 | xstrdup (edge->caller->name ()), |
67348ccc | 880 | edge->caller->order, |
fec39fa6 | 881 | xstrdup (callee->name ()), |
67348ccc | 882 | edge->callee->order); |
d59171da | 883 | fprintf (dump_file, " size growth %i, time %i ", |
1ce18dc8 | 884 | growth, |
d59171da | 885 | edge_time); |
37678631 | 886 | dump_inline_hints (dump_file, hints); |
42f7b0fa JH |
887 | if (big_speedup_p (edge)) |
888 | fprintf (dump_file, " big_speedup"); | |
37678631 | 889 | fprintf (dump_file, "\n"); |
1ce18dc8 | 890 | } |
45a80bb9 JH |
891 | |
892 | /* Always prefer inlining saving code size. */ | |
893 | if (growth <= 0) | |
1ce18dc8 | 894 | { |
b4c0a884 | 895 | badness = INT_MIN / 2 + growth; |
1ce18dc8 | 896 | if (dump) |
b4c0a884 | 897 | fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness, |
1ce18dc8 JH |
898 | growth); |
899 | } | |
45a80bb9 | 900 | |
b4c0a884 JH |
901 | /* When profiling is available, compute badness as: |
902 | ||
903 | relative_edge_count * relative_time_benefit | |
904 | goodness = ------------------------------------------- | |
d59171da | 905 | growth_f_caller |
b4c0a884 JH |
906 | badness = -goodness |
907 | ||
073a8998 | 908 | The fraction is upside down, because on edge counts and time beneits |
b4c0a884 JH |
909 | the bounds are known. Edge growth is essentially unlimited. */ |
910 | ||
68e2e141 | 911 | else if (max_count) |
1ce18dc8 | 912 | { |
1b08b734 | 913 | sreal tmp, relbenefit_real, growth_real; |
d59171da | 914 | int relbenefit = relative_time_benefit (callee_info, edge, edge_time); |
0bff3610 PM |
915 | /* Capping edge->count to max_count. edge->count can be larger than |
916 | max_count if an inline adds new edges which increase max_count | |
917 | after max_count is computed. */ | |
51f5c118 | 918 | gcov_type edge_count = edge->count > max_count ? max_count : edge->count; |
1b08b734 | 919 | |
c3284718 RS |
920 | sreal_init (&relbenefit_real, relbenefit, 0); |
921 | sreal_init (&growth_real, growth, 0); | |
1b08b734 DC |
922 | |
923 | /* relative_edge_count. */ | |
0bff3610 | 924 | sreal_init (&tmp, edge_count, 0); |
1b08b734 DC |
925 | sreal_div (&tmp, &tmp, &max_count_real); |
926 | ||
927 | /* relative_time_benefit. */ | |
928 | sreal_mul (&tmp, &tmp, &relbenefit_real); | |
929 | sreal_div (&tmp, &tmp, &max_relbenefit_real); | |
930 | ||
931 | /* growth_f_caller. */ | |
932 | sreal_mul (&tmp, &tmp, &half_int_min_real); | |
933 | sreal_div (&tmp, &tmp, &growth_real); | |
934 | ||
935 | badness = -1 * sreal_to_int (&tmp); | |
936 | ||
1ce18dc8 JH |
937 | if (dump) |
938 | { | |
939 | fprintf (dump_file, | |
0bff3610 | 940 | " %i (relative %f): profile info. Relative count %f%s" |
1ce18dc8 JH |
941 | " * Relative benefit %f\n", |
942 | (int) badness, (double) badness / INT_MIN, | |
0bff3610 PM |
943 | (double) edge_count / max_count, |
944 | edge->count > max_count ? " (capped to max_count)" : "", | |
d59171da | 945 | relbenefit * 100.0 / RELATIVE_TIME_BENEFIT_RANGE); |
1ce18dc8 JH |
946 | } |
947 | } | |
45a80bb9 | 948 | |
b4c0a884 | 949 | /* When function local profile is available. Compute badness as: |
b4c0a884 | 950 | |
d59171da JH |
951 | relative_time_benefit |
952 | goodness = --------------------------------- | |
953 | growth_of_caller * overall_growth | |
954 | ||
955 | badness = - goodness | |
b4c0a884 | 956 | |
d59171da | 957 | compensated by the inline hints. |
b4c0a884 | 958 | */ |
45a80bb9 | 959 | else if (flag_guess_branch_prob) |
670cd5c5 | 960 | { |
d59171da JH |
961 | badness = (relative_time_benefit (callee_info, edge, edge_time) |
962 | * (INT_MIN / 16 / RELATIVE_TIME_BENEFIT_RANGE)); | |
7153ca97 | 963 | badness /= (MIN (65536/2, growth) * MIN (65536/2, MAX (1, callee_info->growth))); |
d59171da JH |
964 | gcc_checking_assert (badness <=0 && badness >= INT_MIN / 16); |
965 | if ((hints & (INLINE_HINT_indirect_call | |
966 | | INLINE_HINT_loop_iterations | |
52843a47 | 967 | | INLINE_HINT_array_index |
d59171da JH |
968 | | INLINE_HINT_loop_stride)) |
969 | || callee_info->growth <= 0) | |
970 | badness *= 8; | |
b48ccf0d | 971 | if (hints & (INLINE_HINT_same_scc)) |
d59171da JH |
972 | badness /= 16; |
973 | else if (hints & (INLINE_HINT_in_scc)) | |
974 | badness /= 8; | |
975 | else if (hints & (INLINE_HINT_cross_module)) | |
976 | badness /= 2; | |
977 | gcc_checking_assert (badness <= 0 && badness >= INT_MIN / 2); | |
978 | if ((hints & INLINE_HINT_declared_inline) && badness >= INT_MIN / 32) | |
979 | badness *= 16; | |
1ce18dc8 JH |
980 | if (dump) |
981 | { | |
982 | fprintf (dump_file, | |
f867f134 | 983 | " %i: guessed profile. frequency %f," |
d59171da JH |
984 | " benefit %f%%, time w/o inlining %i, time w inlining %i" |
985 | " overall growth %i (current) %i (original)\n", | |
f867f134 | 986 | (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE, |
d59171da JH |
987 | relative_time_benefit (callee_info, edge, edge_time) * 100.0 |
988 | / RELATIVE_TIME_BENEFIT_RANGE, | |
d3669566 | 989 | (int)compute_uninlined_call_time (callee_info, edge), |
d59171da JH |
990 | (int)compute_inlined_call_time (edge, edge_time), |
991 | estimate_growth (callee), | |
992 | callee_info->growth); | |
1ce18dc8 | 993 | } |
45a80bb9 JH |
994 | } |
995 | /* When function local profile is not available or it does not give | |
996 | useful information (ie frequency is zero), base the cost on | |
997 | loop nest and overall size growth, so we optimize for overall number | |
998 | of functions fully inlined in program. */ | |
999 | else | |
1000 | { | |
898b8927 | 1001 | int nest = MIN (inline_edge_summary (edge)->loop_depth, 8); |
a5c3d18c | 1002 | badness = growth * 256; |
670cd5c5 | 1003 | |
45a80bb9 | 1004 | /* Decrease badness if call is nested. */ |
b8698a0f | 1005 | if (badness > 0) |
45a80bb9 JH |
1006 | badness >>= nest; |
1007 | else | |
1ce18dc8 | 1008 | { |
45a80bb9 | 1009 | badness <<= nest; |
1ce18dc8 JH |
1010 | } |
1011 | if (dump) | |
1012 | fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness, | |
1013 | nest); | |
670cd5c5 | 1014 | } |
1ce18dc8 JH |
1015 | |
1016 | /* Ensure that we did not overflow in all the fixed point math above. */ | |
1017 | gcc_assert (badness >= INT_MIN); | |
1018 | gcc_assert (badness <= INT_MAX - 1); | |
45a80bb9 | 1019 | /* Make recursive inlining happen always after other inlining is done. */ |
d7d1d041 | 1020 | if (cgraph_edge_recursive_p (edge)) |
45a80bb9 | 1021 | return badness + 1; |
670cd5c5 | 1022 | else |
45a80bb9 | 1023 | return badness; |
670cd5c5 JH |
1024 | } |
1025 | ||
9b8051b4 | 1026 | /* Recompute badness of EDGE and update its key in HEAP if needed. */ |
4c0f7679 | 1027 | static inline void |
9b8051b4 JH |
1028 | update_edge_key (fibheap_t heap, struct cgraph_edge *edge) |
1029 | { | |
4c0f7679 | 1030 | int badness = edge_badness (edge, false); |
9b8051b4 JH |
1031 | if (edge->aux) |
1032 | { | |
1033 | fibnode_t n = (fibnode_t) edge->aux; | |
1034 | gcc_checking_assert (n->data == edge); | |
1035 | ||
1036 | /* fibheap_replace_key only decrease the keys. | |
1037 | When we increase the key we do not update heap | |
1038 | and instead re-insert the element once it becomes | |
61502ca8 | 1039 | a minimum of heap. */ |
9b8051b4 JH |
1040 | if (badness < n->key) |
1041 | { | |
4c0f7679 JH |
1042 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1043 | { | |
1044 | fprintf (dump_file, | |
1045 | " decreasing badness %s/%i -> %s/%i, %i to %i\n", | |
fec39fa6 | 1046 | xstrdup (edge->caller->name ()), |
67348ccc | 1047 | edge->caller->order, |
fec39fa6 | 1048 | xstrdup (edge->callee->name ()), |
67348ccc | 1049 | edge->callee->order, |
4c0f7679 JH |
1050 | (int)n->key, |
1051 | badness); | |
1052 | } | |
b4c0a884 | 1053 | fibheap_replace_key (heap, n, badness); |
9b8051b4 JH |
1054 | gcc_checking_assert (n->key == badness); |
1055 | } | |
1056 | } | |
1057 | else | |
4c0f7679 JH |
1058 | { |
1059 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1060 | { | |
1061 | fprintf (dump_file, | |
1062 | " enqueuing call %s/%i -> %s/%i, badness %i\n", | |
fec39fa6 | 1063 | xstrdup (edge->caller->name ()), |
67348ccc | 1064 | edge->caller->order, |
fec39fa6 | 1065 | xstrdup (edge->callee->name ()), |
67348ccc | 1066 | edge->callee->order, |
4c0f7679 JH |
1067 | badness); |
1068 | } | |
1069 | edge->aux = fibheap_insert (heap, badness, edge); | |
1070 | } | |
9b8051b4 JH |
1071 | } |
1072 | ||
40fda55b JH |
1073 | |
1074 | /* NODE was inlined. | |
1075 | All caller edges needs to be resetted because | |
1076 | size estimates change. Similarly callees needs reset | |
1077 | because better context may be known. */ | |
1078 | ||
1079 | static void | |
1080 | reset_edge_caches (struct cgraph_node *node) | |
1081 | { | |
1082 | struct cgraph_edge *edge; | |
1083 | struct cgraph_edge *e = node->callees; | |
1084 | struct cgraph_node *where = node; | |
317a0646 JH |
1085 | int i; |
1086 | struct ipa_ref *ref; | |
40fda55b JH |
1087 | |
1088 | if (where->global.inlined_to) | |
1089 | where = where->global.inlined_to; | |
1090 | ||
1091 | /* WHERE body size has changed, the cached growth is invalid. */ | |
1092 | reset_node_growth_cache (where); | |
1093 | ||
1094 | for (edge = where->callers; edge; edge = edge->next_caller) | |
1095 | if (edge->inline_failed) | |
1096 | reset_edge_growth_cache (edge); | |
67348ccc | 1097 | for (i = 0; ipa_ref_list_referring_iterate (&where->ref_list, |
960bfb69 | 1098 | i, ref); i++) |
317a0646 | 1099 | if (ref->use == IPA_REF_ALIAS) |
5932a4d4 | 1100 | reset_edge_caches (ipa_ref_referring_node (ref)); |
40fda55b JH |
1101 | |
1102 | if (!e) | |
1103 | return; | |
1104 | ||
1105 | while (true) | |
1106 | if (!e->inline_failed && e->callee->callees) | |
1107 | e = e->callee->callees; | |
1108 | else | |
1109 | { | |
1110 | if (e->inline_failed) | |
1111 | reset_edge_growth_cache (e); | |
1112 | if (e->next_callee) | |
1113 | e = e->next_callee; | |
1114 | else | |
1115 | { | |
1116 | do | |
1117 | { | |
1118 | if (e->caller == node) | |
1119 | return; | |
1120 | e = e->caller->callers; | |
1121 | } | |
1122 | while (!e->next_callee); | |
1123 | e = e->next_callee; | |
1124 | } | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | /* Recompute HEAP nodes for each of caller of NODE. | |
1129 | UPDATED_NODES track nodes we already visited, to avoid redundant work. | |
1130 | When CHECK_INLINABLITY_FOR is set, re-check for specified edge that | |
1131 | it is inlinable. Otherwise check all edges. */ | |
670cd5c5 JH |
1132 | |
1133 | static void | |
1134 | update_caller_keys (fibheap_t heap, struct cgraph_node *node, | |
40fda55b JH |
1135 | bitmap updated_nodes, |
1136 | struct cgraph_edge *check_inlinablity_for) | |
670cd5c5 JH |
1137 | { |
1138 | struct cgraph_edge *edge; | |
39e2db00 JH |
1139 | int i; |
1140 | struct ipa_ref *ref; | |
670cd5c5 | 1141 | |
67348ccc | 1142 | if ((!node->alias && !inline_summary (node)->inlinable) |
670cd5c5 JH |
1143 | || node->global.inlined_to) |
1144 | return; | |
fcaa4ca4 | 1145 | if (!bitmap_set_bit (updated_nodes, node->uid)) |
670cd5c5 | 1146 | return; |
670cd5c5 | 1147 | |
67348ccc | 1148 | for (i = 0; ipa_ref_list_referring_iterate (&node->ref_list, |
960bfb69 | 1149 | i, ref); i++) |
39e2db00 JH |
1150 | if (ref->use == IPA_REF_ALIAS) |
1151 | { | |
5932a4d4 | 1152 | struct cgraph_node *alias = ipa_ref_referring_node (ref); |
39e2db00 JH |
1153 | update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for); |
1154 | } | |
1155 | ||
cdc029b9 | 1156 | for (edge = node->callers; edge; edge = edge->next_caller) |
4c0f7679 JH |
1157 | if (edge->inline_failed) |
1158 | { | |
40fda55b JH |
1159 | if (!check_inlinablity_for |
1160 | || check_inlinablity_for == edge) | |
f10d1a74 | 1161 | { |
40fda55b JH |
1162 | if (can_inline_edge_p (edge, false) |
1163 | && want_inline_small_function_p (edge, false)) | |
1164 | update_edge_key (heap, edge); | |
1165 | else if (edge->aux) | |
1166 | { | |
1167 | report_inline_failed_reason (edge); | |
1168 | fibheap_delete_node (heap, (fibnode_t) edge->aux); | |
1169 | edge->aux = NULL; | |
1170 | } | |
f10d1a74 | 1171 | } |
40fda55b JH |
1172 | else if (edge->aux) |
1173 | update_edge_key (heap, edge); | |
4c0f7679 | 1174 | } |
9b8051b4 JH |
1175 | } |
1176 | ||
40fda55b | 1177 | /* Recompute HEAP nodes for each uninlined call in NODE. |
9b8051b4 JH |
1178 | This is used when we know that edge badnesses are going only to increase |
1179 | (we introduced new call site) and thus all we need is to insert newly | |
1180 | created edges into heap. */ | |
1181 | ||
1182 | static void | |
1183 | update_callee_keys (fibheap_t heap, struct cgraph_node *node, | |
1184 | bitmap updated_nodes) | |
1185 | { | |
1186 | struct cgraph_edge *e = node->callees; | |
09a2806f | 1187 | |
9b8051b4 JH |
1188 | if (!e) |
1189 | return; | |
1190 | while (true) | |
1191 | if (!e->inline_failed && e->callee->callees) | |
1192 | e = e->callee->callees; | |
1193 | else | |
670cd5c5 | 1194 | { |
a5b1779f JH |
1195 | enum availability avail; |
1196 | struct cgraph_node *callee; | |
58696ce5 JH |
1197 | /* We do not reset callee growth cache here. Since we added a new call, |
1198 | growth chould have just increased and consequentely badness metric | |
1199 | don't need updating. */ | |
9b8051b4 | 1200 | if (e->inline_failed |
a5b1779f JH |
1201 | && (callee = cgraph_function_or_thunk_node (e->callee, &avail)) |
1202 | && inline_summary (callee)->inlinable | |
aaae719d | 1203 | && avail >= AVAIL_AVAILABLE |
a5b1779f | 1204 | && !bitmap_bit_p (updated_nodes, callee->uid)) |
670cd5c5 | 1205 | { |
40fda55b JH |
1206 | if (can_inline_edge_p (e, false) |
1207 | && want_inline_small_function_p (e, false)) | |
1208 | update_edge_key (heap, e); | |
1209 | else if (e->aux) | |
1210 | { | |
1211 | report_inline_failed_reason (e); | |
1212 | fibheap_delete_node (heap, (fibnode_t) e->aux); | |
1213 | e->aux = NULL; | |
1214 | } | |
9b8051b4 JH |
1215 | } |
1216 | if (e->next_callee) | |
1217 | e = e->next_callee; | |
1218 | else | |
1219 | { | |
1220 | do | |
1ce18dc8 | 1221 | { |
9b8051b4 JH |
1222 | if (e->caller == node) |
1223 | return; | |
1224 | e = e->caller->callers; | |
1ce18dc8 | 1225 | } |
9b8051b4 JH |
1226 | while (!e->next_callee); |
1227 | e = e->next_callee; | |
670cd5c5 | 1228 | } |
670cd5c5 JH |
1229 | } |
1230 | } | |
1231 | ||
670cd5c5 | 1232 | /* Enqueue all recursive calls from NODE into priority queue depending on |
0fa2e4df | 1233 | how likely we want to recursively inline the call. */ |
670cd5c5 | 1234 | |
ca31b95f JH |
1235 | static void |
1236 | lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where, | |
670cd5c5 | 1237 | fibheap_t heap) |
ca31b95f JH |
1238 | { |
1239 | struct cgraph_edge *e; | |
a5b1779f JH |
1240 | enum availability avail; |
1241 | ||
ca31b95f | 1242 | for (e = where->callees; e; e = e->next_callee) |
a5b1779f JH |
1243 | if (e->callee == node |
1244 | || (cgraph_function_or_thunk_node (e->callee, &avail) == node | |
1245 | && avail > AVAIL_OVERWRITABLE)) | |
ca31b95f | 1246 | { |
c5a4444c | 1247 | /* When profile feedback is available, prioritize by expected number |
09a2806f | 1248 | of calls. */ |
c5a4444c | 1249 | fibheap_insert (heap, |
09a2806f | 1250 | !max_count ? -e->frequency |
c5a4444c JH |
1251 | : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))), |
1252 | e); | |
ca31b95f JH |
1253 | } |
1254 | for (e = where->callees; e; e = e->next_callee) | |
1255 | if (!e->inline_failed) | |
670cd5c5 | 1256 | lookup_recursive_calls (node, e->callee, heap); |
ca31b95f JH |
1257 | } |
1258 | ||
1259 | /* Decide on recursive inlining: in the case function has recursive calls, | |
3e293154 | 1260 | inline until body size reaches given argument. If any new indirect edges |
e56f5f3e JJ |
1261 | are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES |
1262 | is NULL. */ | |
670cd5c5 JH |
1263 | |
1264 | static bool | |
4c0f7679 | 1265 | recursive_inlining (struct cgraph_edge *edge, |
9771b263 | 1266 | vec<cgraph_edge_p> *new_edges) |
ca31b95f JH |
1267 | { |
1268 | int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO); | |
670cd5c5 | 1269 | fibheap_t heap; |
d7d1d041 | 1270 | struct cgraph_node *node; |
ca31b95f | 1271 | struct cgraph_edge *e; |
4c0f7679 | 1272 | struct cgraph_node *master_clone = NULL, *next; |
ca31b95f JH |
1273 | int depth = 0; |
1274 | int n = 0; | |
1275 | ||
d7d1d041 RG |
1276 | node = edge->caller; |
1277 | if (node->global.inlined_to) | |
1278 | node = node->global.inlined_to; | |
1279 | ||
67348ccc | 1280 | if (DECL_DECLARED_INLINE_P (node->decl)) |
4c0f7679 | 1281 | limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE); |
ca31b95f JH |
1282 | |
1283 | /* Make sure that function is small enough to be considered for inlining. */ | |
4c0f7679 | 1284 | if (estimate_size_after_inlining (node, edge) >= limit) |
670cd5c5 JH |
1285 | return false; |
1286 | heap = fibheap_new (); | |
1287 | lookup_recursive_calls (node, node, heap); | |
1288 | if (fibheap_empty (heap)) | |
1289 | { | |
1290 | fibheap_delete (heap); | |
1291 | return false; | |
1292 | } | |
ca31b95f JH |
1293 | |
1294 | if (dump_file) | |
b8698a0f | 1295 | fprintf (dump_file, |
670cd5c5 | 1296 | " Performing recursive inlining on %s\n", |
fec39fa6 | 1297 | node->name ()); |
ca31b95f | 1298 | |
ca31b95f | 1299 | /* Do the inlining and update list of recursive call during process. */ |
d7d1d041 | 1300 | while (!fibheap_empty (heap)) |
ca31b95f | 1301 | { |
c5274326 TN |
1302 | struct cgraph_edge *curr |
1303 | = (struct cgraph_edge *) fibheap_extract_min (heap); | |
6ced940d | 1304 | struct cgraph_node *cnode, *dest = curr->callee; |
d7d1d041 | 1305 | |
4c0f7679 JH |
1306 | if (!can_inline_edge_p (curr, true)) |
1307 | continue; | |
1308 | ||
6ced940d JH |
1309 | /* MASTER_CLONE is produced in the case we already started modified |
1310 | the function. Be sure to redirect edge to the original body before | |
1311 | estimating growths otherwise we will be seeing growths after inlining | |
1312 | the already modified body. */ | |
1313 | if (master_clone) | |
1314 | { | |
1315 | cgraph_redirect_edge_callee (curr, master_clone); | |
1316 | reset_edge_growth_cache (curr); | |
1317 | } | |
1318 | ||
1319 | if (estimate_size_after_inlining (node, curr) > limit) | |
1320 | { | |
1321 | cgraph_redirect_edge_callee (curr, dest); | |
1322 | reset_edge_growth_cache (curr); | |
1323 | break; | |
1324 | } | |
1325 | ||
c5a4444c JH |
1326 | depth = 1; |
1327 | for (cnode = curr->caller; | |
1328 | cnode->global.inlined_to; cnode = cnode->callers->caller) | |
67348ccc DM |
1329 | if (node->decl |
1330 | == cgraph_function_or_thunk_node (curr->callee, NULL)->decl) | |
f791d333 | 1331 | depth++; |
c5a4444c | 1332 | |
4c0f7679 | 1333 | if (!want_inline_self_recursive_call_p (curr, node, false, depth)) |
6ced940d JH |
1334 | { |
1335 | cgraph_redirect_edge_callee (curr, dest); | |
1336 | reset_edge_growth_cache (curr); | |
1337 | continue; | |
1338 | } | |
ca31b95f | 1339 | |
670cd5c5 | 1340 | if (dump_file) |
c5a4444c | 1341 | { |
b8698a0f | 1342 | fprintf (dump_file, |
c5a4444c JH |
1343 | " Inlining call of depth %i", depth); |
1344 | if (node->count) | |
1345 | { | |
1346 | fprintf (dump_file, " called approx. %.2f times per call", | |
1347 | (double)curr->count / node->count); | |
1348 | } | |
1349 | fprintf (dump_file, "\n"); | |
1350 | } | |
4c0f7679 JH |
1351 | if (!master_clone) |
1352 | { | |
1353 | /* We need original clone to copy around. */ | |
67348ccc | 1354 | master_clone = cgraph_clone_node (node, node->decl, |
898b8927 | 1355 | node->count, CGRAPH_FREQ_BASE, |
44a60244 | 1356 | false, vNULL, true, NULL); |
4c0f7679 JH |
1357 | for (e = master_clone->callees; e; e = e->next_callee) |
1358 | if (!e->inline_failed) | |
fee8b6da | 1359 | clone_inlined_nodes (e, true, false, NULL); |
6ced940d JH |
1360 | cgraph_redirect_edge_callee (curr, master_clone); |
1361 | reset_edge_growth_cache (curr); | |
4c0f7679 JH |
1362 | } |
1363 | ||
c170d40f | 1364 | inline_call (curr, false, new_edges, &overall_size, true); |
670cd5c5 | 1365 | lookup_recursive_calls (node, curr->callee, heap); |
ca31b95f JH |
1366 | n++; |
1367 | } | |
4c0f7679 | 1368 | |
c5a4444c JH |
1369 | if (!fibheap_empty (heap) && dump_file) |
1370 | fprintf (dump_file, " Recursive inlining growth limit met.\n"); | |
670cd5c5 | 1371 | fibheap_delete (heap); |
4c0f7679 JH |
1372 | |
1373 | if (!master_clone) | |
1374 | return false; | |
1375 | ||
ca31b95f | 1376 | if (dump_file) |
b8698a0f | 1377 | fprintf (dump_file, |
4c0f7679 JH |
1378 | "\n Inlined %i times, " |
1379 | "body grown from size %i to %i, time %i to %i\n", n, | |
e7f23018 JH |
1380 | inline_summary (master_clone)->size, inline_summary (node)->size, |
1381 | inline_summary (master_clone)->time, inline_summary (node)->time); | |
ca31b95f JH |
1382 | |
1383 | /* Remove master clone we used for inlining. We rely that clones inlined | |
1384 | into master clone gets queued just before master clone so we don't | |
1385 | need recursion. */ | |
2aae7680 | 1386 | for (node = cgraph_first_function (); node != master_clone; |
96fc428c JH |
1387 | node = next) |
1388 | { | |
2aae7680 | 1389 | next = cgraph_next_function (node); |
96fc428c JH |
1390 | if (node->global.inlined_to == master_clone) |
1391 | cgraph_remove_node (node); | |
1392 | } | |
ca31b95f | 1393 | cgraph_remove_node (master_clone); |
4c0f7679 | 1394 | return true; |
ca31b95f JH |
1395 | } |
1396 | ||
09a2806f | 1397 | |
88512ba0 | 1398 | /* Given whole compilation unit estimate of INSNS, compute how large we can |
b7c27d51 | 1399 | allow the unit to grow. */ |
09a2806f | 1400 | |
b7c27d51 JH |
1401 | static int |
1402 | compute_max_insns (int insns) | |
1403 | { | |
1404 | int max_insns = insns; | |
1405 | if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS)) | |
1406 | max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS); | |
1407 | ||
3e2a6e7b JH |
1408 | return ((HOST_WIDEST_INT) max_insns |
1409 | * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100); | |
b7c27d51 JH |
1410 | } |
1411 | ||
09a2806f | 1412 | |
3e293154 | 1413 | /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */ |
09a2806f | 1414 | |
3e293154 | 1415 | static void |
9771b263 | 1416 | add_new_edges_to_heap (fibheap_t heap, vec<cgraph_edge_p> new_edges) |
3e293154 | 1417 | { |
9771b263 | 1418 | while (new_edges.length () > 0) |
3e293154 | 1419 | { |
9771b263 | 1420 | struct cgraph_edge *edge = new_edges.pop (); |
3e293154 MJ |
1421 | |
1422 | gcc_assert (!edge->aux); | |
a5b1779f | 1423 | if (edge->inline_failed |
4c0f7679 JH |
1424 | && can_inline_edge_p (edge, true) |
1425 | && want_inline_small_function_p (edge, true)) | |
1426 | edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge); | |
3e293154 MJ |
1427 | } |
1428 | } | |
1429 | ||
042ae7d2 JH |
1430 | /* Remove EDGE from the fibheap. */ |
1431 | ||
1432 | static void | |
1433 | heap_edge_removal_hook (struct cgraph_edge *e, void *data) | |
1434 | { | |
d0b66480 JH |
1435 | if (e->callee) |
1436 | reset_node_growth_cache (e->callee); | |
042ae7d2 JH |
1437 | if (e->aux) |
1438 | { | |
1439 | fibheap_delete_node ((fibheap_t)data, (fibnode_t)e->aux); | |
1440 | e->aux = NULL; | |
1441 | } | |
1442 | } | |
3e293154 | 1443 | |
09ce3660 JH |
1444 | /* Return true if speculation of edge E seems useful. |
1445 | If ANTICIPATE_INLINING is true, be conservative and hope that E | |
1446 | may get inlined. */ | |
1447 | ||
1448 | bool | |
1449 | speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining) | |
1450 | { | |
1451 | enum availability avail; | |
1452 | struct cgraph_node *target = cgraph_function_or_thunk_node (e->callee, &avail); | |
1453 | struct cgraph_edge *direct, *indirect; | |
1454 | struct ipa_ref *ref; | |
1455 | ||
1456 | gcc_assert (e->speculative && !e->indirect_unknown_callee); | |
1457 | ||
1458 | if (!cgraph_maybe_hot_edge_p (e)) | |
1459 | return false; | |
1460 | ||
1461 | /* See if IP optimizations found something potentially useful about the | |
1462 | function. For now we look only for CONST/PURE flags. Almost everything | |
1463 | else we propagate is useless. */ | |
1464 | if (avail >= AVAIL_AVAILABLE) | |
1465 | { | |
67348ccc | 1466 | int ecf_flags = flags_from_decl_or_type (target->decl); |
09ce3660 JH |
1467 | if (ecf_flags & ECF_CONST) |
1468 | { | |
1469 | cgraph_speculative_call_info (e, direct, indirect, ref); | |
1470 | if (!(indirect->indirect_info->ecf_flags & ECF_CONST)) | |
1471 | return true; | |
1472 | } | |
1473 | else if (ecf_flags & ECF_PURE) | |
1474 | { | |
1475 | cgraph_speculative_call_info (e, direct, indirect, ref); | |
1476 | if (!(indirect->indirect_info->ecf_flags & ECF_PURE)) | |
1477 | return true; | |
1478 | } | |
1479 | } | |
1480 | /* If we did not managed to inline the function nor redirect | |
1481 | to an ipa-cp clone (that are seen by having local flag set), | |
1482 | it is probably pointless to inline it unless hardware is missing | |
1483 | indirect call predictor. */ | |
1484 | if (!anticipate_inlining && e->inline_failed && !target->local.local) | |
1485 | return false; | |
1486 | /* For overwritable targets there is not much to do. */ | |
1487 | if (e->inline_failed && !can_inline_edge_p (e, false, true)) | |
1488 | return false; | |
1489 | /* OK, speculation seems interesting. */ | |
1490 | return true; | |
1491 | } | |
1492 | ||
1493 | /* We know that EDGE is not going to be inlined. | |
1494 | See if we can remove speculation. */ | |
1495 | ||
1496 | static void | |
1497 | resolve_noninline_speculation (fibheap_t edge_heap, struct cgraph_edge *edge) | |
1498 | { | |
1499 | if (edge->speculative && !speculation_useful_p (edge, false)) | |
1500 | { | |
1501 | struct cgraph_node *node = edge->caller; | |
1502 | struct cgraph_node *where = node->global.inlined_to | |
1503 | ? node->global.inlined_to : node; | |
1504 | bitmap updated_nodes = BITMAP_ALLOC (NULL); | |
1505 | ||
1506 | cgraph_resolve_speculation (edge, NULL); | |
09ce3660 JH |
1507 | reset_edge_caches (where); |
1508 | inline_update_overall_summary (where); | |
1509 | update_caller_keys (edge_heap, where, | |
1510 | updated_nodes, NULL); | |
d0b66480 JH |
1511 | update_callee_keys (edge_heap, where, |
1512 | updated_nodes); | |
09ce3660 JH |
1513 | BITMAP_FREE (updated_nodes); |
1514 | } | |
1515 | } | |
1516 | ||
ca31b95f | 1517 | /* We use greedy algorithm for inlining of small functions: |
09a2806f JH |
1518 | All inline candidates are put into prioritized heap ordered in |
1519 | increasing badness. | |
ca31b95f | 1520 | |
09a2806f | 1521 | The inlining of small functions is bounded by unit growth parameters. */ |
ca31b95f JH |
1522 | |
1523 | static void | |
4c0f7679 | 1524 | inline_small_functions (void) |
ca31b95f JH |
1525 | { |
1526 | struct cgraph_node *node; | |
670cd5c5 | 1527 | struct cgraph_edge *edge; |
0823efed | 1528 | fibheap_t edge_heap = fibheap_new (); |
670cd5c5 | 1529 | bitmap updated_nodes = BITMAP_ALLOC (NULL); |
85057983 | 1530 | int min_size, max_size; |
ef062b13 | 1531 | auto_vec<cgraph_edge_p> new_indirect_edges; |
09a2806f | 1532 | int initial_size = 0; |
68cc8feb | 1533 | struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); |
042ae7d2 | 1534 | struct cgraph_edge_hook_list *edge_removal_hook_holder; |
3e293154 | 1535 | |
661e7330 | 1536 | if (flag_indirect_inlining) |
9771b263 | 1537 | new_indirect_edges.create (8); |
670cd5c5 | 1538 | |
042ae7d2 JH |
1539 | edge_removal_hook_holder |
1540 | = cgraph_add_edge_removal_hook (&heap_edge_removal_hook, edge_heap); | |
1541 | ||
1a3118e9 JH |
1542 | /* Compute overall unit size and other global parameters used by badness |
1543 | metrics. */ | |
ca31b95f | 1544 | |
09a2806f | 1545 | max_count = 0; |
68cc8feb JH |
1546 | ipa_reduced_postorder (order, true, true, NULL); |
1547 | free (order); | |
1a3118e9 | 1548 | |
c47d0034 JH |
1549 | FOR_EACH_DEFINED_FUNCTION (node) |
1550 | if (!node->global.inlined_to) | |
e7f23018 | 1551 | { |
a5b1779f JH |
1552 | if (cgraph_function_with_gimple_body_p (node) |
1553 | || node->thunk.thunk_p) | |
1554 | { | |
1555 | struct inline_summary *info = inline_summary (node); | |
67348ccc | 1556 | struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux; |
ca31b95f | 1557 | |
67348ccc | 1558 | if (!DECL_EXTERNAL (node->decl)) |
a5b1779f | 1559 | initial_size += info->size; |
d59171da | 1560 | info->growth = estimate_growth (node); |
bf3f6510 JH |
1561 | if (dfs && dfs->next_cycle) |
1562 | { | |
1563 | struct cgraph_node *n2; | |
1564 | int id = dfs->scc_no + 1; | |
1565 | for (n2 = node; n2; | |
67348ccc | 1566 | n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle) |
bf3f6510 JH |
1567 | { |
1568 | struct inline_summary *info2 = inline_summary (n2); | |
1569 | if (info2->scc_no) | |
1570 | break; | |
1571 | info2->scc_no = id; | |
1572 | } | |
1573 | } | |
a5b1779f | 1574 | } |
09a2806f | 1575 | |
e7f23018 | 1576 | for (edge = node->callers; edge; edge = edge->next_caller) |
632b4f8e JH |
1577 | if (max_count < edge->count) |
1578 | max_count = edge->count; | |
e7f23018 | 1579 | } |
1b08b734 DC |
1580 | sreal_init (&max_count_real, max_count, 0); |
1581 | sreal_init (&max_relbenefit_real, RELATIVE_TIME_BENEFIT_RANGE, 0); | |
1582 | sreal_init (&half_int_min_real, INT_MAX / 2, 0); | |
b48ccf0d JH |
1583 | ipa_free_postorder_info (); |
1584 | initialize_growth_caches (); | |
1585 | ||
1586 | if (dump_file) | |
1587 | fprintf (dump_file, | |
1588 | "\nDeciding on inlining of small functions. Starting with size %i.\n", | |
1589 | initial_size); | |
b7c27d51 | 1590 | |
8a8dccb2 | 1591 | overall_size = initial_size; |
85057983 JH |
1592 | max_size = compute_max_insns (overall_size); |
1593 | min_size = overall_size; | |
1a3118e9 JH |
1594 | |
1595 | /* Populate the heeap with all edges we might inline. */ | |
1596 | ||
c47d0034 | 1597 | FOR_EACH_DEFINED_FUNCTION (node) |
09ce3660 JH |
1598 | { |
1599 | bool update = false; | |
1600 | struct cgraph_edge *next; | |
1a3118e9 | 1601 | |
09ce3660 JH |
1602 | if (dump_file) |
1603 | fprintf (dump_file, "Enqueueing calls in %s/%i.\n", | |
fec39fa6 | 1604 | node->name (), node->order); |
09ce3660 JH |
1605 | |
1606 | for (edge = node->callees; edge; edge = next) | |
1607 | { | |
1608 | next = edge->next_callee; | |
1a3118e9 | 1609 | if (edge->inline_failed |
09ce3660 | 1610 | && !edge->aux |
1a3118e9 JH |
1611 | && can_inline_edge_p (edge, true) |
1612 | && want_inline_small_function_p (edge, true) | |
1613 | && edge->inline_failed) | |
1614 | { | |
1615 | gcc_assert (!edge->aux); | |
0823efed | 1616 | update_edge_key (edge_heap, edge); |
1a3118e9 | 1617 | } |
09ce3660 JH |
1618 | if (edge->speculative && !speculation_useful_p (edge, edge->aux != NULL)) |
1619 | { | |
1620 | cgraph_resolve_speculation (edge, NULL); | |
1621 | update = true; | |
1622 | } | |
1623 | } | |
1624 | if (update) | |
1625 | { | |
1626 | struct cgraph_node *where = node->global.inlined_to | |
1627 | ? node->global.inlined_to : node; | |
1628 | inline_update_overall_summary (where); | |
1629 | reset_node_growth_cache (where); | |
1630 | reset_edge_caches (where); | |
1631 | update_caller_keys (edge_heap, where, | |
1632 | updated_nodes, NULL); | |
1633 | bitmap_clear (updated_nodes); | |
1634 | } | |
1635 | } | |
1a3118e9 | 1636 | |
09a2806f JH |
1637 | gcc_assert (in_lto_p |
1638 | || !max_count | |
1639 | || (profile_info && flag_branch_probabilities)); | |
b7c27d51 | 1640 | |
0823efed | 1641 | while (!fibheap_empty (edge_heap)) |
ca31b95f | 1642 | { |
85057983 | 1643 | int old_size = overall_size; |
1ce18dc8 | 1644 | struct cgraph_node *where, *callee; |
0823efed | 1645 | int badness = fibheap_min_key (edge_heap); |
cdc029b9 | 1646 | int current_badness; |
8ec802d7 | 1647 | int cached_badness; |
1ce18dc8 | 1648 | int growth; |
670cd5c5 | 1649 | |
0823efed | 1650 | edge = (struct cgraph_edge *) fibheap_extract_min (edge_heap); |
1ce18dc8 JH |
1651 | gcc_assert (edge->aux); |
1652 | edge->aux = NULL; | |
1653 | if (!edge->inline_failed) | |
1654 | continue; | |
cdc029b9 | 1655 | |
8ec802d7 | 1656 | /* Be sure that caches are maintained consistent. |
073a8998 | 1657 | We can not make this ENABLE_CHECKING only because it cause different |
8ec802d7 JH |
1658 | updates of the fibheap queue. */ |
1659 | cached_badness = edge_badness (edge, false); | |
40fda55b JH |
1660 | reset_edge_growth_cache (edge); |
1661 | reset_node_growth_cache (edge->callee); | |
40fda55b | 1662 | |
cdc029b9 | 1663 | /* When updating the edge costs, we only decrease badness in the keys. |
09a2806f JH |
1664 | Increases of badness are handled lazilly; when we see key with out |
1665 | of date value on it, we re-insert it now. */ | |
4c0f7679 | 1666 | current_badness = edge_badness (edge, false); |
8ec802d7 | 1667 | gcc_assert (cached_badness == current_badness); |
cdc029b9 JH |
1668 | gcc_assert (current_badness >= badness); |
1669 | if (current_badness != badness) | |
1670 | { | |
0823efed | 1671 | edge->aux = fibheap_insert (edge_heap, current_badness, edge); |
cdc029b9 JH |
1672 | continue; |
1673 | } | |
4c0f7679 JH |
1674 | |
1675 | if (!can_inline_edge_p (edge, true)) | |
09ce3660 JH |
1676 | { |
1677 | resolve_noninline_speculation (edge_heap, edge); | |
1678 | continue; | |
1679 | } | |
cdc029b9 | 1680 | |
a5b1779f | 1681 | callee = cgraph_function_or_thunk_node (edge->callee, NULL); |
03dfc36d | 1682 | growth = estimate_edge_growth (edge); |
ca31b95f | 1683 | if (dump_file) |
ca31b95f | 1684 | { |
b8698a0f | 1685 | fprintf (dump_file, |
9de04252 | 1686 | "\nConsidering %s/%i with %i size\n", |
fec39fa6 | 1687 | callee->name (), callee->order, |
a5b1779f | 1688 | inline_summary (callee)->size); |
b8698a0f | 1689 | fprintf (dump_file, |
9de04252 | 1690 | " to be inlined into %s/%i in %s:%i\n" |
4c0f7679 | 1691 | " Estimated growth after inlined into all is %+i insns.\n" |
45a80bb9 | 1692 | " Estimated badness is %i, frequency %.2f.\n", |
fec39fa6 | 1693 | edge->caller->name (), edge->caller->order, |
0115e6c7 JH |
1694 | flag_wpa ? "unknown" |
1695 | : gimple_filename ((const_gimple) edge->call_stmt), | |
4c0f7679 JH |
1696 | flag_wpa ? -1 |
1697 | : gimple_lineno ((const_gimple) edge->call_stmt), | |
a5b1779f | 1698 | estimate_growth (callee), |
1ce18dc8 | 1699 | badness, |
45a80bb9 | 1700 | edge->frequency / (double)CGRAPH_FREQ_BASE); |
670cd5c5 | 1701 | if (edge->count) |
4c0f7679 JH |
1702 | fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", |
1703 | edge->count); | |
1ce18dc8 | 1704 | if (dump_flags & TDF_DETAILS) |
4c0f7679 | 1705 | edge_badness (edge, true); |
ca31b95f JH |
1706 | } |
1707 | ||
4c0f7679 | 1708 | if (overall_size + growth > max_size |
67348ccc | 1709 | && !DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
670cd5c5 | 1710 | { |
4c0f7679 JH |
1711 | edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT; |
1712 | report_inline_failed_reason (edge); | |
09ce3660 | 1713 | resolve_noninline_speculation (edge_heap, edge); |
670cd5c5 JH |
1714 | continue; |
1715 | } | |
4c0f7679 JH |
1716 | |
1717 | if (!want_inline_small_function_p (edge, true)) | |
09ce3660 JH |
1718 | { |
1719 | resolve_noninline_speculation (edge_heap, edge); | |
1720 | continue; | |
1721 | } | |
09a2806f JH |
1722 | |
1723 | /* Heuristics for inlining small functions works poorly for | |
1724 | recursive calls where we do efect similar to loop unrolling. | |
1725 | When inliing such edge seems profitable, leave decision on | |
1726 | specific inliner. */ | |
d7d1d041 | 1727 | if (cgraph_edge_recursive_p (edge)) |
670cd5c5 JH |
1728 | { |
1729 | where = edge->caller; | |
1730 | if (where->global.inlined_to) | |
1731 | where = where->global.inlined_to; | |
4c0f7679 JH |
1732 | if (!recursive_inlining (edge, |
1733 | flag_indirect_inlining | |
1734 | ? &new_indirect_edges : NULL)) | |
d7d1d041 RG |
1735 | { |
1736 | edge->inline_failed = CIF_RECURSIVE_INLINING; | |
09ce3660 | 1737 | resolve_noninline_speculation (edge_heap, edge); |
d7d1d041 RG |
1738 | continue; |
1739 | } | |
40fda55b | 1740 | reset_edge_caches (where); |
09a2806f JH |
1741 | /* Recursive inliner inlines all recursive calls of the function |
1742 | at once. Consequently we need to update all callee keys. */ | |
661e7330 | 1743 | if (flag_indirect_inlining) |
0823efed DN |
1744 | add_new_edges_to_heap (edge_heap, new_indirect_edges); |
1745 | update_callee_keys (edge_heap, where, updated_nodes); | |
09ce3660 | 1746 | bitmap_clear (updated_nodes); |
670cd5c5 JH |
1747 | } |
1748 | else | |
1749 | { | |
4c0f7679 JH |
1750 | struct cgraph_node *outer_node = NULL; |
1751 | int depth = 0; | |
1752 | ||
1753 | /* Consider the case where self recursive function A is inlined into B. | |
1754 | This is desired optimization in some cases, since it leads to effect | |
1755 | similar of loop peeling and we might completely optimize out the | |
1756 | recursive call. However we must be extra selective. */ | |
1757 | ||
1758 | where = edge->caller; | |
1759 | while (where->global.inlined_to) | |
670cd5c5 | 1760 | { |
67348ccc | 1761 | if (where->decl == callee->decl) |
4c0f7679 JH |
1762 | outer_node = where, depth++; |
1763 | where = where->callers->caller; | |
1764 | } | |
1765 | if (outer_node | |
1766 | && !want_inline_self_recursive_call_p (edge, outer_node, | |
1767 | true, depth)) | |
1768 | { | |
1769 | edge->inline_failed | |
67348ccc | 1770 | = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl) |
4c0f7679 | 1771 | ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED); |
09ce3660 | 1772 | resolve_noninline_speculation (edge_heap, edge); |
670cd5c5 JH |
1773 | continue; |
1774 | } | |
4c0f7679 JH |
1775 | else if (depth && dump_file) |
1776 | fprintf (dump_file, " Peeling recursion with depth %i\n", depth); | |
1777 | ||
9b8051b4 | 1778 | gcc_checking_assert (!callee->global.inlined_to); |
c170d40f | 1779 | inline_call (edge, true, &new_indirect_edges, &overall_size, true); |
661e7330 | 1780 | if (flag_indirect_inlining) |
0823efed | 1781 | add_new_edges_to_heap (edge_heap, new_indirect_edges); |
f8e2a1ed | 1782 | |
40fda55b JH |
1783 | reset_edge_caches (edge->callee); |
1784 | reset_node_growth_cache (callee); | |
1785 | ||
b48ccf0d | 1786 | update_callee_keys (edge_heap, where, updated_nodes); |
670cd5c5 JH |
1787 | } |
1788 | where = edge->caller; | |
1789 | if (where->global.inlined_to) | |
1790 | where = where->global.inlined_to; | |
1791 | ||
1792 | /* Our profitability metric can depend on local properties | |
1793 | such as number of inlinable calls and size of the function body. | |
1794 | After inlining these properties might change for the function we | |
1795 | inlined into (since it's body size changed) and for the functions | |
1796 | called by function we inlined (since number of it inlinable callers | |
1797 | might change). */ | |
0823efed | 1798 | update_caller_keys (edge_heap, where, updated_nodes, NULL); |
670cd5c5 | 1799 | bitmap_clear (updated_nodes); |
ca31b95f | 1800 | |
670cd5c5 | 1801 | if (dump_file) |
50fe876d | 1802 | { |
b8698a0f | 1803 | fprintf (dump_file, |
2f2935b6 | 1804 | " Inlined into %s which now has time %i and size %i," |
85057983 | 1805 | "net change of %+i.\n", |
fec39fa6 | 1806 | edge->caller->name (), |
e7f23018 JH |
1807 | inline_summary (edge->caller)->time, |
1808 | inline_summary (edge->caller)->size, | |
85057983 | 1809 | overall_size - old_size); |
50fe876d | 1810 | } |
85057983 | 1811 | if (min_size > overall_size) |
b7c27d51 | 1812 | { |
85057983 JH |
1813 | min_size = overall_size; |
1814 | max_size = compute_max_insns (min_size); | |
b7c27d51 JH |
1815 | |
1816 | if (dump_file) | |
85057983 | 1817 | fprintf (dump_file, "New minimal size reached: %i\n", min_size); |
b7c27d51 | 1818 | } |
ca31b95f | 1819 | } |
3e293154 | 1820 | |
632b4f8e | 1821 | free_growth_caches (); |
0823efed | 1822 | fibheap_delete (edge_heap); |
09a2806f JH |
1823 | if (dump_file) |
1824 | fprintf (dump_file, | |
1825 | "Unit growth for small function inlining: %i->%i (%i%%)\n", | |
632b4f8e JH |
1826 | initial_size, overall_size, |
1827 | initial_size ? overall_size * 100 / (initial_size) - 100: 0); | |
670cd5c5 | 1828 | BITMAP_FREE (updated_nodes); |
042ae7d2 | 1829 | cgraph_remove_edge_removal_hook (edge_removal_hook_holder); |
ca31b95f JH |
1830 | } |
1831 | ||
09a2806f JH |
1832 | /* Flatten NODE. Performed both during early inlining and |
1833 | at IPA inlining time. */ | |
af961c7f RG |
1834 | |
1835 | static void | |
632b4f8e | 1836 | flatten_function (struct cgraph_node *node, bool early) |
af961c7f RG |
1837 | { |
1838 | struct cgraph_edge *e; | |
1839 | ||
1840 | /* We shouldn't be called recursively when we are being processed. */ | |
67348ccc | 1841 | gcc_assert (node->aux == NULL); |
af961c7f | 1842 | |
67348ccc | 1843 | node->aux = (void *) node; |
af961c7f RG |
1844 | |
1845 | for (e = node->callees; e; e = e->next_callee) | |
1846 | { | |
1847 | struct cgraph_node *orig_callee; | |
a5b1779f | 1848 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
af961c7f | 1849 | |
af961c7f | 1850 | /* We've hit cycle? It is time to give up. */ |
67348ccc | 1851 | if (callee->aux) |
af961c7f RG |
1852 | { |
1853 | if (dump_file) | |
1854 | fprintf (dump_file, | |
1855 | "Not inlining %s into %s to avoid cycle.\n", | |
fec39fa6 TS |
1856 | xstrdup (callee->name ()), |
1857 | xstrdup (e->caller->name ())); | |
af961c7f RG |
1858 | e->inline_failed = CIF_RECURSIVE_INLINING; |
1859 | continue; | |
1860 | } | |
1861 | ||
1862 | /* When the edge is already inlined, we just need to recurse into | |
1863 | it in order to fully flatten the leaves. */ | |
1864 | if (!e->inline_failed) | |
1865 | { | |
a5b1779f | 1866 | flatten_function (callee, early); |
af961c7f RG |
1867 | continue; |
1868 | } | |
1869 | ||
4c0f7679 JH |
1870 | /* Flatten attribute needs to be processed during late inlining. For |
1871 | extra code quality we however do flattening during early optimization, | |
1872 | too. */ | |
632b4f8e | 1873 | if (!early |
4c0f7679 JH |
1874 | ? !can_inline_edge_p (e, true) |
1875 | : !can_early_inline_edge_p (e)) | |
1876 | continue; | |
1877 | ||
d7d1d041 | 1878 | if (cgraph_edge_recursive_p (e)) |
af961c7f RG |
1879 | { |
1880 | if (dump_file) | |
1881 | fprintf (dump_file, "Not inlining: recursive call.\n"); | |
1882 | continue; | |
1883 | } | |
1884 | ||
67348ccc DM |
1885 | if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl)) |
1886 | != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl))) | |
59e0c6b7 RG |
1887 | { |
1888 | if (dump_file) | |
1889 | fprintf (dump_file, "Not inlining: SSA form does not match.\n"); | |
1890 | continue; | |
1891 | } | |
1892 | ||
af961c7f RG |
1893 | /* Inline the edge and flatten the inline clone. Avoid |
1894 | recursing through the original node if the node was cloned. */ | |
1895 | if (dump_file) | |
1896 | fprintf (dump_file, " Inlining %s into %s.\n", | |
fec39fa6 TS |
1897 | xstrdup (callee->name ()), |
1898 | xstrdup (e->caller->name ())); | |
a5b1779f | 1899 | orig_callee = callee; |
c170d40f | 1900 | inline_call (e, true, NULL, NULL, false); |
af961c7f | 1901 | if (e->callee != orig_callee) |
67348ccc | 1902 | orig_callee->aux = (void *) node; |
632b4f8e | 1903 | flatten_function (e->callee, early); |
af961c7f | 1904 | if (e->callee != orig_callee) |
67348ccc | 1905 | orig_callee->aux = NULL; |
af961c7f RG |
1906 | } |
1907 | ||
67348ccc | 1908 | node->aux = NULL; |
c170d40f JH |
1909 | if (!node->global.inlined_to) |
1910 | inline_update_overall_summary (node); | |
af961c7f RG |
1911 | } |
1912 | ||
a81b0a3d JH |
1913 | /* Count number of callers of NODE and store it into DATA (that |
1914 | points to int. Worker for cgraph_for_node_and_aliases. */ | |
1915 | ||
1916 | static bool | |
1917 | sum_callers (struct cgraph_node *node, void *data) | |
1918 | { | |
1919 | struct cgraph_edge *e; | |
1920 | int *num_calls = (int *)data; | |
1921 | ||
1922 | for (e = node->callers; e; e = e->next_caller) | |
1923 | (*num_calls)++; | |
1924 | return false; | |
1925 | } | |
1926 | ||
1927 | /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases. | |
1928 | DATA points to number of calls originally found so we avoid infinite | |
1929 | recursion. */ | |
1930 | ||
1931 | static bool | |
1932 | inline_to_all_callers (struct cgraph_node *node, void *data) | |
1933 | { | |
1934 | int *num_calls = (int *)data; | |
1935 | while (node->callers && !node->global.inlined_to) | |
1936 | { | |
1937 | struct cgraph_node *caller = node->callers->caller; | |
1938 | ||
1939 | if (dump_file) | |
1940 | { | |
1941 | fprintf (dump_file, | |
1942 | "\nInlining %s size %i.\n", | |
fec39fa6 | 1943 | node->name (), |
a81b0a3d JH |
1944 | inline_summary (node)->size); |
1945 | fprintf (dump_file, | |
1946 | " Called once from %s %i insns.\n", | |
fec39fa6 | 1947 | node->callers->caller->name (), |
a81b0a3d JH |
1948 | inline_summary (node->callers->caller)->size); |
1949 | } | |
1950 | ||
1951 | inline_call (node->callers, true, NULL, NULL, true); | |
1952 | if (dump_file) | |
1953 | fprintf (dump_file, | |
1954 | " Inlined into %s which now has %i size\n", | |
fec39fa6 | 1955 | caller->name (), |
a81b0a3d JH |
1956 | inline_summary (caller)->size); |
1957 | if (!(*num_calls)--) | |
1958 | { | |
1959 | if (dump_file) | |
1960 | fprintf (dump_file, "New calls found; giving up.\n"); | |
19ba6aab | 1961 | return true; |
a81b0a3d JH |
1962 | } |
1963 | } | |
1964 | return false; | |
1965 | } | |
1966 | ||
ca31b95f JH |
1967 | /* Decide on the inlining. We do so in the topological order to avoid |
1968 | expenses on updating data structures. */ | |
1969 | ||
c2924966 | 1970 | static unsigned int |
4c0f7679 | 1971 | ipa_inline (void) |
ca31b95f JH |
1972 | { |
1973 | struct cgraph_node *node; | |
1974 | int nnodes; | |
b591a8b7 | 1975 | struct cgraph_node **order; |
ca31b95f | 1976 | int i; |
09ce3660 | 1977 | int cold; |
8a41354f JH |
1978 | bool remove_functions = false; |
1979 | ||
1980 | if (!optimize) | |
1981 | return 0; | |
ca31b95f | 1982 | |
b591a8b7 DM |
1983 | order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); |
1984 | ||
5ee53a06 | 1985 | if (in_lto_p && optimize) |
fb3f88cc | 1986 | ipa_update_after_lto_read (); |
0dbca537 | 1987 | |
10a5dd5d JH |
1988 | if (dump_file) |
1989 | dump_inline_summaries (dump_file); | |
670cd5c5 | 1990 | |
af8bca3c | 1991 | nnodes = ipa_reverse_postorder (order); |
ca31b95f | 1992 | |
65c70e6b | 1993 | FOR_EACH_FUNCTION (node) |
67348ccc | 1994 | node->aux = 0; |
ca31b95f JH |
1995 | |
1996 | if (dump_file) | |
af961c7f | 1997 | fprintf (dump_file, "\nFlattening functions:\n"); |
ca31b95f | 1998 | |
af961c7f RG |
1999 | /* In the first pass handle functions to be flattened. Do this with |
2000 | a priority so none of our later choices will make this impossible. */ | |
2001 | for (i = nnodes - 1; i >= 0; i--) | |
ca31b95f | 2002 | { |
af961c7f RG |
2003 | node = order[i]; |
2004 | ||
09a2806f | 2005 | /* Handle nodes to be flattened. |
af961c7f RG |
2006 | Ideally when processing callees we stop inlining at the |
2007 | entry of cycles, possibly cloning that entry point and | |
2008 | try to flatten itself turning it into a self-recursive | |
2009 | function. */ | |
2010 | if (lookup_attribute ("flatten", | |
67348ccc | 2011 | DECL_ATTRIBUTES (node->decl)) != NULL) |
f8e2a1ed | 2012 | { |
ca31b95f | 2013 | if (dump_file) |
b8698a0f | 2014 | fprintf (dump_file, |
fec39fa6 | 2015 | "Flattening %s\n", node->name ()); |
632b4f8e | 2016 | flatten_function (node, false); |
ca31b95f | 2017 | } |
ca31b95f JH |
2018 | } |
2019 | ||
4c0f7679 | 2020 | inline_small_functions (); |
e70670cf JH |
2021 | |
2022 | /* Do first after-inlining removal. We want to remove all "stale" extern inline | |
2023 | functions and virtual functions so we really know what is called once. */ | |
a19b1432 | 2024 | symtab_remove_unreachable_nodes (false, dump_file); |
4c0f7679 | 2025 | free (order); |
ca31b95f | 2026 | |
100411f8 JH |
2027 | /* Inline functions with a property that after inlining into all callers the |
2028 | code size will shrink because the out-of-line copy is eliminated. | |
2029 | We do this regardless on the callee size as long as function growth limits | |
2030 | are met. */ | |
09ce3660 JH |
2031 | if (dump_file) |
2032 | fprintf (dump_file, | |
2033 | "\nDeciding on functions to be inlined into all callers and removing useless speculations:\n"); | |
2034 | ||
2035 | /* Inlining one function called once has good chance of preventing | |
2036 | inlining other function into the same callee. Ideally we should | |
2037 | work in priority order, but probably inlining hot functions first | |
2038 | is good cut without the extra pain of maintaining the queue. | |
2039 | ||
2040 | ??? this is not really fitting the bill perfectly: inlining function | |
2041 | into callee often leads to better optimization of callee due to | |
2042 | increased context for optimization. | |
2043 | For example if main() function calls a function that outputs help | |
2044 | and then function that does the main optmization, we should inline | |
2045 | the second with priority even if both calls are cold by themselves. | |
2046 | ||
2047 | We probably want to implement new predicate replacing our use of | |
2048 | maybe_hot_edge interpreted as maybe_hot_edge || callee is known | |
2049 | to be hot. */ | |
2050 | for (cold = 0; cold <= 1; cold ++) | |
355866de | 2051 | { |
09ce3660 | 2052 | FOR_EACH_DEFINED_FUNCTION (node) |
ca31b95f | 2053 | { |
09ce3660 JH |
2054 | struct cgraph_edge *edge, *next; |
2055 | bool update=false; | |
2056 | ||
2057 | for (edge = node->callees; edge; edge = next) | |
ca31b95f | 2058 | { |
09ce3660 JH |
2059 | next = edge->next_callee; |
2060 | if (edge->speculative && !speculation_useful_p (edge, false)) | |
e3c7b49c | 2061 | { |
09ce3660 JH |
2062 | cgraph_resolve_speculation (edge, NULL); |
2063 | update = true; | |
8a41354f | 2064 | remove_functions = true; |
09ce3660 JH |
2065 | } |
2066 | } | |
2067 | if (update) | |
2068 | { | |
2069 | struct cgraph_node *where = node->global.inlined_to | |
2070 | ? node->global.inlined_to : node; | |
2071 | reset_node_growth_cache (where); | |
2072 | reset_edge_caches (where); | |
2073 | inline_update_overall_summary (where); | |
2074 | } | |
2075 | if (flag_inline_functions_called_once | |
2076 | && want_inline_function_to_all_callers_p (node, cold)) | |
2077 | { | |
2078 | int num_calls = 0; | |
a81b0a3d JH |
2079 | cgraph_for_node_and_aliases (node, sum_callers, |
2080 | &num_calls, true); | |
2081 | cgraph_for_node_and_aliases (node, inline_to_all_callers, | |
2082 | &num_calls, true); | |
2083 | remove_functions = true; | |
ca31b95f JH |
2084 | } |
2085 | } | |
2086 | } | |
2087 | ||
f8e2a1ed | 2088 | /* Free ipa-prop structures if they are no longer needed. */ |
5ee53a06 | 2089 | if (optimize) |
e33c6cd6 | 2090 | ipa_free_all_structures_after_iinln (); |
f8e2a1ed | 2091 | |
ca31b95f JH |
2092 | if (dump_file) |
2093 | fprintf (dump_file, | |
09a2806f JH |
2094 | "\nInlined %i calls, eliminated %i functions\n\n", |
2095 | ncalls_inlined, nfunctions_inlined); | |
2096 | ||
898b8927 JH |
2097 | if (dump_file) |
2098 | dump_inline_summaries (dump_file); | |
10a5dd5d JH |
2099 | /* In WPA we use inline summaries for partitioning process. */ |
2100 | if (!flag_wpa) | |
2101 | inline_free_summary (); | |
8a41354f | 2102 | return remove_functions ? TODO_remove_functions : 0; |
ca31b95f JH |
2103 | } |
2104 | ||
275b4baa RG |
2105 | /* Inline always-inline function calls in NODE. */ |
2106 | ||
2107 | static bool | |
4c0f7679 | 2108 | inline_always_inline_functions (struct cgraph_node *node) |
275b4baa RG |
2109 | { |
2110 | struct cgraph_edge *e; | |
2111 | bool inlined = false; | |
2112 | ||
2113 | for (e = node->callees; e; e = e->next_callee) | |
2114 | { | |
a5b1779f | 2115 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
67348ccc | 2116 | if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)) |
275b4baa RG |
2117 | continue; |
2118 | ||
275b4baa RG |
2119 | if (cgraph_edge_recursive_p (e)) |
2120 | { | |
2121 | if (dump_file) | |
4c0f7679 | 2122 | fprintf (dump_file, " Not inlining recursive call to %s.\n", |
fec39fa6 | 2123 | e->callee->name ()); |
275b4baa RG |
2124 | e->inline_failed = CIF_RECURSIVE_INLINING; |
2125 | continue; | |
2126 | } | |
2127 | ||
4c0f7679 | 2128 | if (!can_early_inline_edge_p (e)) |
bef8491a ST |
2129 | { |
2130 | /* Set inlined to true if the callee is marked "always_inline" but | |
2131 | is not inlinable. This will allow flagging an error later in | |
2132 | expand_call_inline in tree-inline.c. */ | |
2133 | if (lookup_attribute ("always_inline", | |
67348ccc | 2134 | DECL_ATTRIBUTES (callee->decl)) != NULL) |
bef8491a ST |
2135 | inlined = true; |
2136 | continue; | |
2137 | } | |
275b4baa RG |
2138 | |
2139 | if (dump_file) | |
4c0f7679 | 2140 | fprintf (dump_file, " Inlining %s into %s (always_inline).\n", |
fec39fa6 TS |
2141 | xstrdup (e->callee->name ()), |
2142 | xstrdup (e->caller->name ())); | |
c170d40f | 2143 | inline_call (e, true, NULL, NULL, false); |
275b4baa RG |
2144 | inlined = true; |
2145 | } | |
c170d40f JH |
2146 | if (inlined) |
2147 | inline_update_overall_summary (node); | |
275b4baa RG |
2148 | |
2149 | return inlined; | |
2150 | } | |
2151 | ||
ca31b95f | 2152 | /* Decide on the inlining. We do so in the topological order to avoid |
af961c7f | 2153 | expenses on updating data structures. */ |
ca31b95f | 2154 | |
7fa49e7b | 2155 | static bool |
4c0f7679 | 2156 | early_inline_small_functions (struct cgraph_node *node) |
ca31b95f JH |
2157 | { |
2158 | struct cgraph_edge *e; | |
d63db217 | 2159 | bool inlined = false; |
7fa49e7b | 2160 | |
275b4baa | 2161 | for (e = node->callees; e; e = e->next_callee) |
c3056c2d | 2162 | { |
a5b1779f JH |
2163 | struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL); |
2164 | if (!inline_summary (callee)->inlinable | |
4c0f7679 | 2165 | || !e->inline_failed) |
275b4baa RG |
2166 | continue; |
2167 | ||
2168 | /* Do not consider functions not declared inline. */ | |
67348ccc | 2169 | if (!DECL_DECLARED_INLINE_P (callee->decl) |
275b4baa RG |
2170 | && !flag_inline_small_functions |
2171 | && !flag_inline_functions) | |
2172 | continue; | |
2173 | ||
c3056c2d | 2174 | if (dump_file) |
275b4baa | 2175 | fprintf (dump_file, "Considering inline candidate %s.\n", |
fec39fa6 | 2176 | callee->name ()); |
ca31b95f | 2177 | |
4c0f7679 JH |
2178 | if (!can_early_inline_edge_p (e)) |
2179 | continue; | |
2180 | ||
275b4baa RG |
2181 | if (cgraph_edge_recursive_p (e)) |
2182 | { | |
2183 | if (dump_file) | |
4c0f7679 | 2184 | fprintf (dump_file, " Not inlining: recursive call.\n"); |
22ad64b6 | 2185 | continue; |
275b4baa | 2186 | } |
af961c7f | 2187 | |
4c0f7679 | 2188 | if (!want_early_inline_function_p (e)) |
275b4baa | 2189 | continue; |
ca31b95f | 2190 | |
4c0f7679 JH |
2191 | if (dump_file) |
2192 | fprintf (dump_file, " Inlining %s into %s.\n", | |
fec39fa6 TS |
2193 | xstrdup (callee->name ()), |
2194 | xstrdup (e->caller->name ())); | |
c170d40f | 2195 | inline_call (e, true, NULL, NULL, true); |
4c0f7679 | 2196 | inlined = true; |
38bc76da | 2197 | } |
275b4baa | 2198 | |
7fa49e7b | 2199 | return inlined; |
ca31b95f JH |
2200 | } |
2201 | ||
d63db217 JH |
2202 | /* Do inlining of small functions. Doing so early helps profiling and other |
2203 | passes to be somewhat more effective and avoids some code duplication in | |
2204 | later real inlining pass for testcases with very many function calls. */ | |
c2924966 | 2205 | static unsigned int |
4c0f7679 | 2206 | early_inliner (void) |
d63db217 | 2207 | { |
581985d7 | 2208 | struct cgraph_node *node = cgraph_get_node (current_function_decl); |
10a5dd5d | 2209 | struct cgraph_edge *edge; |
7fa49e7b | 2210 | unsigned int todo = 0; |
796bda22 | 2211 | int iterations = 0; |
275b4baa | 2212 | bool inlined = false; |
d63db217 | 2213 | |
1da2ed5f | 2214 | if (seen_error ()) |
c2924966 | 2215 | return 0; |
af961c7f | 2216 | |
ecb62563 JH |
2217 | /* Do nothing if datastructures for ipa-inliner are already computed. This |
2218 | happens when some pass decides to construct new function and | |
2219 | cgraph_add_new_function calls lowering passes and early optimization on | |
2220 | it. This may confuse ourself when early inliner decide to inline call to | |
2221 | function clone, because function clones don't have parameter list in | |
2222 | ipa-prop matching their signature. */ | |
9771b263 | 2223 | if (ipa_node_params_vector.exists ()) |
ecb62563 JH |
2224 | return 0; |
2225 | ||
275b4baa RG |
2226 | #ifdef ENABLE_CHECKING |
2227 | verify_cgraph_node (node); | |
2228 | #endif | |
67348ccc | 2229 | ipa_remove_all_references (&node->ref_list); |
275b4baa RG |
2230 | |
2231 | /* Even when not optimizing or not inlining inline always-inline | |
2232 | functions. */ | |
4c0f7679 | 2233 | inlined = inline_always_inline_functions (node); |
275b4baa | 2234 | |
af961c7f RG |
2235 | if (!optimize |
2236 | || flag_no_inline | |
4c0f7679 JH |
2237 | || !flag_early_inlining |
2238 | /* Never inline regular functions into always-inline functions | |
2239 | during incremental inlining. This sucks as functions calling | |
2240 | always inline functions will get less optimized, but at the | |
2241 | same time inlining of functions calling always inline | |
09a2806f | 2242 | function into an always inline function might introduce |
4c0f7679 JH |
2243 | cycles of edges to be always inlined in the callgraph. |
2244 | ||
2245 | We might want to be smarter and just avoid this type of inlining. */ | |
67348ccc | 2246 | || DECL_DISREGARD_INLINE_LIMITS (node->decl)) |
275b4baa RG |
2247 | ; |
2248 | else if (lookup_attribute ("flatten", | |
67348ccc | 2249 | DECL_ATTRIBUTES (node->decl)) != NULL) |
7fa49e7b | 2250 | { |
275b4baa RG |
2251 | /* When the function is marked to be flattened, recursively inline |
2252 | all calls in it. */ | |
2253 | if (dump_file) | |
2254 | fprintf (dump_file, | |
fec39fa6 | 2255 | "Flattening %s\n", node->name ()); |
632b4f8e | 2256 | flatten_function (node, true); |
275b4baa | 2257 | inlined = true; |
7fa49e7b | 2258 | } |
af961c7f RG |
2259 | else |
2260 | { | |
2261 | /* We iterate incremental inlining to get trivial cases of indirect | |
2262 | inlining. */ | |
2263 | while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) | |
4c0f7679 | 2264 | && early_inline_small_functions (node)) |
af961c7f RG |
2265 | { |
2266 | timevar_push (TV_INTEGRATION); | |
2267 | todo |= optimize_inline_calls (current_function_decl); | |
4c0f7679 JH |
2268 | |
2269 | /* Technically we ought to recompute inline parameters so the new | |
2270 | iteration of early inliner works as expected. We however have | |
2271 | values approximately right and thus we only need to update edge | |
2272 | info that might be cleared out for newly discovered edges. */ | |
2273 | for (edge = node->callees; edge; edge = edge->next_callee) | |
2274 | { | |
898b8927 JH |
2275 | struct inline_edge_summary *es = inline_edge_summary (edge); |
2276 | es->call_stmt_size | |
4c0f7679 | 2277 | = estimate_num_insns (edge->call_stmt, &eni_size_weights); |
898b8927 | 2278 | es->call_stmt_time |
4c0f7679 | 2279 | = estimate_num_insns (edge->call_stmt, &eni_time_weights); |
67348ccc | 2280 | if (edge->callee->decl |
4de09b85 | 2281 | && !gimple_check_call_matching_types ( |
67348ccc | 2282 | edge->call_stmt, edge->callee->decl, false)) |
89faf322 | 2283 | edge->call_stmt_cannot_inline_p = true; |
4c0f7679 | 2284 | } |
af961c7f | 2285 | timevar_pop (TV_INTEGRATION); |
275b4baa RG |
2286 | iterations++; |
2287 | inlined = false; | |
af961c7f RG |
2288 | } |
2289 | if (dump_file) | |
2290 | fprintf (dump_file, "Iterations: %i\n", iterations); | |
2291 | } | |
2292 | ||
275b4baa RG |
2293 | if (inlined) |
2294 | { | |
2295 | timevar_push (TV_INTEGRATION); | |
2296 | todo |= optimize_inline_calls (current_function_decl); | |
2297 | timevar_pop (TV_INTEGRATION); | |
2298 | } | |
2299 | ||
0889e9bc | 2300 | cfun->always_inline_functions_inlined = true; |
d63db217 | 2301 | |
af961c7f | 2302 | return todo; |
d63db217 JH |
2303 | } |
2304 | ||
27a4cd48 DM |
2305 | namespace { |
2306 | ||
2307 | const pass_data pass_data_early_inline = | |
d63db217 | 2308 | { |
27a4cd48 DM |
2309 | GIMPLE_PASS, /* type */ |
2310 | "einline", /* name */ | |
2311 | OPTGROUP_INLINE, /* optinfo_flags */ | |
2312 | false, /* has_gate */ | |
2313 | true, /* has_execute */ | |
2314 | TV_EARLY_INLINING, /* tv_id */ | |
2315 | PROP_ssa, /* properties_required */ | |
2316 | 0, /* properties_provided */ | |
2317 | 0, /* properties_destroyed */ | |
2318 | 0, /* todo_flags_start */ | |
2319 | 0, /* todo_flags_finish */ | |
873aa8f5 JH |
2320 | }; |
2321 | ||
27a4cd48 DM |
2322 | class pass_early_inline : public gimple_opt_pass |
2323 | { | |
2324 | public: | |
c3284718 RS |
2325 | pass_early_inline (gcc::context *ctxt) |
2326 | : gimple_opt_pass (pass_data_early_inline, ctxt) | |
27a4cd48 DM |
2327 | {} |
2328 | ||
2329 | /* opt_pass methods: */ | |
2330 | unsigned int execute () { return early_inliner (); } | |
2331 | ||
2332 | }; // class pass_early_inline | |
2333 | ||
2334 | } // anon namespace | |
2335 | ||
2336 | gimple_opt_pass * | |
2337 | make_pass_early_inline (gcc::context *ctxt) | |
2338 | { | |
2339 | return new pass_early_inline (ctxt); | |
2340 | } | |
2341 | ||
873aa8f5 | 2342 | |
af961c7f | 2343 | /* When to run IPA inlining. Inlining of always-inline functions |
febb1302 JH |
2344 | happens during early inlining. |
2345 | ||
8a41354f JH |
2346 | Enable inlining unconditoinally, because callgraph redirection |
2347 | happens here. */ | |
af961c7f RG |
2348 | |
2349 | static bool | |
4c0f7679 | 2350 | gate_ipa_inline (void) |
af961c7f | 2351 | { |
8a41354f | 2352 | return true; |
af961c7f RG |
2353 | } |
2354 | ||
27a4cd48 DM |
2355 | namespace { |
2356 | ||
2357 | const pass_data pass_data_ipa_inline = | |
873aa8f5 | 2358 | { |
27a4cd48 DM |
2359 | IPA_PASS, /* type */ |
2360 | "inline", /* name */ | |
2361 | OPTGROUP_INLINE, /* optinfo_flags */ | |
2362 | true, /* has_gate */ | |
2363 | true, /* has_execute */ | |
2364 | TV_IPA_INLINING, /* tv_id */ | |
2365 | 0, /* properties_required */ | |
2366 | 0, /* properties_provided */ | |
2367 | 0, /* properties_destroyed */ | |
2368 | TODO_remove_functions, /* todo_flags_start */ | |
8a41354f | 2369 | ( TODO_dump_symtab ), /* todo_flags_finish */ |
ca31b95f | 2370 | }; |
27a4cd48 DM |
2371 | |
2372 | class pass_ipa_inline : public ipa_opt_pass_d | |
2373 | { | |
2374 | public: | |
c3284718 RS |
2375 | pass_ipa_inline (gcc::context *ctxt) |
2376 | : ipa_opt_pass_d (pass_data_ipa_inline, ctxt, | |
2377 | inline_generate_summary, /* generate_summary */ | |
2378 | inline_write_summary, /* write_summary */ | |
2379 | inline_read_summary, /* read_summary */ | |
2380 | NULL, /* write_optimization_summary */ | |
2381 | NULL, /* read_optimization_summary */ | |
2382 | NULL, /* stmt_fixup */ | |
2383 | 0, /* function_transform_todo_flags_start */ | |
2384 | inline_transform, /* function_transform */ | |
2385 | NULL) /* variable_transform */ | |
27a4cd48 DM |
2386 | {} |
2387 | ||
2388 | /* opt_pass methods: */ | |
2389 | bool gate () { return gate_ipa_inline (); } | |
2390 | unsigned int execute () { return ipa_inline (); } | |
2391 | ||
2392 | }; // class pass_ipa_inline | |
2393 | ||
2394 | } // anon namespace | |
2395 | ||
2396 | ipa_opt_pass_d * | |
2397 | make_pass_ipa_inline (gcc::context *ctxt) | |
2398 | { | |
2399 | return new pass_ipa_inline (ctxt); | |
2400 | } |