]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-inline.c
* lto-symtab.c (lto_cgraph_replace_node): Assert that inline clones has
[thirdparty/gcc.git] / gcc / ipa-inline.c
CommitLineData
65c1a668 1/* Inlining decision heuristics.
cfaf579d 2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
65c1a668 3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
8c4c00c1 9Software Foundation; either version 3, or (at your option) any later
65c1a668 10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
8c4c00c1 18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
65c1a668 20
21/* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
9989536b 32 This function allows to mark given call inline and performs necessary
65c1a668 33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
6329636b 63 only one function at a time and is used by early inliner.
09a2e412 64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
6329636b 85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
09a2e412 92
93 pass_ipa_early_inlining
94
0d424440 95 With profiling, the early inlining is also necessary to reduce
09a2e412 96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
65c1a668 119
120#include "config.h"
121#include "system.h"
122#include "coretypes.h"
123#include "tm.h"
124#include "tree.h"
125#include "tree-inline.h"
126#include "langhooks.h"
127#include "flags.h"
128#include "cgraph.h"
129#include "diagnostic.h"
130#include "timevar.h"
131#include "params.h"
132#include "fibheap.h"
133#include "intl.h"
134#include "tree-pass.h"
0cdd9887 135#include "hashtab.h"
a49506c7 136#include "coverage.h"
9e0baf4d 137#include "ggc.h"
09a2e412 138#include "tree-flow.h"
4ae20857 139#include "rtl.h"
f8daee9b 140#include "ipa-prop.h"
97343302 141#include "except.h"
142
143#define MAX_TIME 1000000000
65c1a668 144
436a2379 145/* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
436a2379 154 in ALL mode, everything is inlined. This is used during flattening. */
155enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
a7b61d8c 158 INLINE_SIZE_NORECURSIVE,
436a2379 159 INLINE_SIZE,
436a2379 160 INLINE_ALL
161};
162static bool
163cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
65c1a668 167/* Statistics we collect about inlining algorithm. */
168static int ncalls_inlined;
169static int nfunctions_inlined;
97343302 170static int overall_size;
171static gcov_type max_count, max_benefit;
65c1a668 172
9ca785fc 173/* Holders of ipa cgraph hooks: */
174static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
c4d6511c 176static inline struct inline_summary *
177inline_summary (struct cgraph_node *node)
178{
179 return &node->local.inline_summary;
180}
181
97343302 182/* Estimate self time of the function after inlining WHAT into TO. */
a49506c7 183
9288864f 184static int
97343302 185cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
9288864f 186 struct cgraph_node *what)
187{
97343302 188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197}
2958813f 198
97343302 199/* Estimate self time of the function after inlining WHAT into TO. */
200
201static int
202cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204{
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
a49506c7 206 gcc_assert (size >= 0);
207 return size;
65c1a668 208}
209
210/* E is expected to be an edge being inlined. Clone destination node of
211 the edge and redirect it to the new clone.
212 DUPLICATE is used for bookkeeping on whether we are actually creating new
213 clones or re-using node originally representing out-of-line function call.
214 */
215void
75a70cf9 216cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
217 bool update_original)
65c1a668 218{
5a02d67b 219 HOST_WIDE_INT peak;
75a70cf9 220
d9cdeb77 221 if (duplicate)
65c1a668 222 {
d9cdeb77 223 /* We may eliminate the need for out-of-line copy to be output.
224 In that case just go ahead and re-use it. */
225 if (!e->callee->callers->next_caller
59dd4830 226 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
6329636b 227 && !cgraph_new_nodes)
d9cdeb77 228 {
229 gcc_assert (!e->callee->global.inlined_to);
1a1a827a 230 if (e->callee->analyzed)
97343302 231 {
232 overall_size -= e->callee->global.size;
233 nfunctions_inlined++;
234 }
d9cdeb77 235 duplicate = false;
59dd4830 236 e->callee->local.externally_visible = false;
d9cdeb77 237 }
238 else
239 {
240 struct cgraph_node *n;
4ae20857 241 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
a1e88032 242 update_original, NULL);
d9cdeb77 243 cgraph_redirect_edge_callee (e, n);
244 }
65c1a668 245 }
246
247 if (e->caller->global.inlined_to)
248 e->callee->global.inlined_to = e->caller->global.inlined_to;
249 else
250 e->callee->global.inlined_to = e->caller;
5a02d67b 251 e->callee->global.stack_frame_offset
c4d6511c 252 = e->caller->global.stack_frame_offset
253 + inline_summary (e->caller)->estimated_self_stack_size;
254 peak = e->callee->global.stack_frame_offset
255 + inline_summary (e->callee)->estimated_self_stack_size;
5a02d67b 256 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
257 e->callee->global.inlined_to->global.estimated_stack_size = peak;
65c1a668 258
259 /* Recursively clone all bodies. */
260 for (e = e->callee->callees; e; e = e->next_callee)
261 if (!e->inline_failed)
a2cb9b3b 262 cgraph_clone_inlined_nodes (e, duplicate, update_original);
65c1a668 263}
264
3f2ff969 265/* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
266 specify whether profile of original function should be updated. If any new
267 indirect edges are discovered in the process, add them to NEW_EDGES, unless
268 it is NULL. Return true iff any new callgraph edges were discovered as a
269 result of inlining. */
65c1a668 270
3f2ff969 271static bool
272cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
273 VEC (cgraph_edge_p, heap) **new_edges)
65c1a668 274{
97343302 275 int old_size = 0, new_size = 0;
65c1a668 276 struct cgraph_node *to = NULL, *what;
3f2ff969 277 struct cgraph_edge *curr = e;
97343302 278 int freq;
279 bool duplicate = false;
280 int orig_size = e->callee->global.size;
65c1a668 281
282 gcc_assert (e->inline_failed);
326a9581 283 e->inline_failed = CIF_OK;
65c1a668 284
6329636b 285 if (!e->callee->global.inlined)
65c1a668 286 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
287 e->callee->global.inlined = true;
288
97343302 289 if (e->callee->callers->next_caller
59dd4830 290 || !cgraph_can_remove_if_no_direct_calls_p (e->callee))
97343302 291 duplicate = true;
a2cb9b3b 292 cgraph_clone_inlined_nodes (e, true, update_original);
65c1a668 293
294 what = e->callee;
295
97343302 296 freq = e->frequency;
65c1a668 297 /* Now update size of caller and all functions caller is inlined into. */
298 for (;e && !e->inline_failed; e = e->caller->callers)
299 {
65c1a668 300 to = e->caller;
97343302 301 old_size = e->caller->global.size;
302 new_size = cgraph_estimate_size_after_inlining (1, to, what);
303 to->global.size = new_size;
304 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
65c1a668 305 }
306 gcc_assert (what->global.inlined_to == to);
97343302 307 if (new_size > old_size)
308 overall_size += new_size - old_size;
309 if (!duplicate)
310 overall_size -= orig_size;
65c1a668 311 ncalls_inlined++;
3f2ff969 312
313 if (flag_indirect_inlining)
314 return ipa_propagate_indirect_call_infos (curr, new_edges);
315 else
316 return false;
65c1a668 317}
318
319/* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
320 Return following unredirected edge in the list of callers
321 of EDGE->CALLEE */
322
323static struct cgraph_edge *
324cgraph_mark_inline (struct cgraph_edge *edge)
325{
326 struct cgraph_node *to = edge->caller;
327 struct cgraph_node *what = edge->callee;
328 struct cgraph_edge *e, *next;
65c1a668 329
7bfefa9d 330 gcc_assert (!edge->call_stmt_cannot_inline_p);
65c1a668 331 /* Look for all calls, mark them inline and clone recursively
332 all inlined functions. */
333 for (e = what->callers; e; e = next)
334 {
335 next = e->next_caller;
4eb7a440 336 if (e->caller == to && e->inline_failed)
65c1a668 337 {
3f2ff969 338 cgraph_mark_inline_edge (e, true, NULL);
65c1a668 339 if (e == edge)
340 edge = next;
65c1a668 341 }
342 }
28d5335f 343
65c1a668 344 return edge;
345}
346
347/* Estimate the growth caused by inlining NODE into all callees. */
348
349static int
350cgraph_estimate_growth (struct cgraph_node *node)
351{
352 int growth = 0;
353 struct cgraph_edge *e;
b9825313 354 bool self_recursive = false;
355
a49506c7 356 if (node->global.estimated_growth != INT_MIN)
357 return node->global.estimated_growth;
65c1a668 358
359 for (e = node->callers; e; e = e->next_caller)
b9825313 360 {
361 if (e->caller == node)
362 self_recursive = true;
363 if (e->inline_failed)
364 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
97343302 365 - e->caller->global.size);
b9825313 366 }
65c1a668 367
b9825313 368 /* ??? Wrong for non-trivially self recursive functions or cases where
369 we decide to not inline for different reasons, but it is not big deal
370 as in that case we will keep the body around, but we will also avoid
371 some inlining. */
59dd4830 372 if (cgraph_only_called_directly_p (node)
373 && !DECL_EXTERNAL (node->decl) && !self_recursive)
97343302 374 growth -= node->global.size;
65c1a668 375
a49506c7 376 node->global.estimated_growth = growth;
65c1a668 377 return growth;
378}
379
380/* Return false when inlining WHAT into TO is not good idea
8df22c5c 381 as it would cause too large growth of function bodies.
382 When ONE_ONLY is true, assume that only one call site is going
383 to be inlined, otherwise figure out how many call sites in
384 TO calls WHAT and verify that all can be inlined.
385 */
65c1a668 386
387static bool
388cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
326a9581 389 cgraph_inline_failed_t *reason, bool one_only)
65c1a668 390{
391 int times = 0;
392 struct cgraph_edge *e;
393 int newsize;
394 int limit;
5a02d67b 395 HOST_WIDE_INT stack_size_limit, inlined_stack;
65c1a668 396
8df22c5c 397 if (one_only)
398 times = 1;
399 else
400 for (e = to->callees; e; e = e->next_callee)
401 if (e->callee == what)
402 times++;
65c1a668 403
4b4d4c92 404 if (to->global.inlined_to)
405 to = to->global.inlined_to;
406
65c1a668 407 /* When inlining large function body called once into small function,
408 take the inlined function as base for limiting the growth. */
97343302 409 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
410 limit = inline_summary (to)->self_size;
65c1a668 411 else
97343302 412 limit = inline_summary (what)->self_size;
65c1a668 413
414 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
415
4b4d4c92 416 /* Check the size after inlining against the function limits. But allow
417 the function to shrink if it went over the limits by forced inlining. */
65c1a668 418 newsize = cgraph_estimate_size_after_inlining (times, to, what);
97343302 419 if (newsize >= to->global.size
4b4d4c92 420 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
65c1a668 421 && newsize > limit)
422 {
423 if (reason)
326a9581 424 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
65c1a668 425 return false;
426 }
5a02d67b 427
c4d6511c 428 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
5a02d67b 429
430 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
431
432 inlined_stack = (to->global.stack_frame_offset
c4d6511c 433 + inline_summary (to)->estimated_self_stack_size
5a02d67b 434 + what->global.estimated_stack_size);
435 if (inlined_stack > stack_size_limit
436 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
437 {
438 if (reason)
326a9581 439 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
5a02d67b 440 return false;
441 }
65c1a668 442 return true;
443}
444
445/* Return true when function N is small enough to be inlined. */
446
326a9581 447static bool
448cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
65c1a668 449{
469679ab 450 tree decl = n->decl;
451
b97510b2 452 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
b30512dd 453 {
454 if (reason)
326a9581 455 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
b30512dd 456 return false;
457 }
458
ccf4ab6b 459 if (!n->analyzed)
b30512dd 460 {
461 if (reason)
326a9581 462 *reason = CIF_BODY_NOT_AVAILABLE;
b30512dd 463 return false;
464 }
465
469679ab 466 if (DECL_DECLARED_INLINE_P (decl))
b30512dd 467 {
97343302 468 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
b30512dd 469 {
470 if (reason)
326a9581 471 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
b30512dd 472 return false;
473 }
474 }
65c1a668 475 else
b30512dd 476 {
97343302 477 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
b30512dd 478 {
479 if (reason)
326a9581 480 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
b30512dd 481 return false;
482 }
483 }
484
485 return true;
65c1a668 486}
487
488/* Return true when inlining WHAT would create recursive inlining.
489 We call recursive inlining all cases where same function appears more than
490 once in the single recursion nest path in the inline graph. */
491
492static bool
493cgraph_recursive_inlining_p (struct cgraph_node *to,
494 struct cgraph_node *what,
326a9581 495 cgraph_inline_failed_t *reason)
65c1a668 496{
497 bool recursive;
498 if (to->global.inlined_to)
499 recursive = what->decl == to->global.inlined_to->decl;
500 else
501 recursive = what->decl == to->decl;
502 /* Marking recursive function inline has sane semantic and thus we should
503 not warn on it. */
504 if (recursive && reason)
505 *reason = (what->local.disregard_inline_limits
326a9581 506 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
65c1a668 507 return recursive;
508}
509
a49506c7 510/* A cost model driving the inlining heuristics in a way so the edges with
511 smallest badness are inlined first. After each inlining is performed
442e3cb9 512 the costs of all caller edges of nodes affected are recomputed so the
a49506c7 513 metrics may accurately depend on values such as number of inlinable callers
4ae20857 514 of the function or function body size. */
a49506c7 515
516static int
517cgraph_edge_badness (struct cgraph_edge *edge)
518{
97343302 519 gcov_type badness;
4ae20857 520 int growth =
521 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
522
97343302 523 growth -= edge->caller->global.size;
4ae20857 524
525 /* Always prefer inlining saving code size. */
526 if (growth <= 0)
527 badness = INT_MIN - growth;
528
529 /* When profiling is available, base priorities -(#calls / growth).
530 So we optimize for overall number of "executed" inlined calls. */
54e3de71 531 else if (max_count)
97343302 532 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
533 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
4ae20857 534
535 /* When function local profile is available, base priorities on
536 growth / frequency, so we optimize for overall frequency of inlined
537 calls. This is not too accurate since while the call might be frequent
538 within function, the function itself is infrequent.
539
540 Other objective to optimize for is number of different calls inlined.
f0b5f617 541 We add the estimated growth after inlining all functions to bias the
4ae20857 542 priorities slightly in this direction (so fewer times called functions
543 of the same size gets priority). */
544 else if (flag_guess_branch_prob)
a49506c7 545 {
97343302 546 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
547 badness = growth * 10000;
548 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
549 / (edge->callee->global.time + 1) + 1, 100);
550
4ae20857 551
552 /* Decrease badness if call is nested. */
553 /* Compress the range so we don't overflow. */
97343302 554 if (div > 10000)
555 div = 10000 + ceil_log2 (div) - 8;
4ae20857 556 if (div < 1)
557 div = 1;
558 if (badness > 0)
559 badness /= div;
560 badness += cgraph_estimate_growth (edge->callee);
97343302 561 if (badness > INT_MAX)
562 badness = INT_MAX;
4ae20857 563 }
564 /* When function local profile is not available or it does not give
565 useful information (ie frequency is zero), base the cost on
566 loop nest and overall size growth, so we optimize for overall number
567 of functions fully inlined in program. */
568 else
569 {
570 int nest = MIN (edge->loop_nest, 8);
571 badness = cgraph_estimate_growth (edge->callee) * 256;
a49506c7 572
4ae20857 573 /* Decrease badness if call is nested. */
574 if (badness > 0)
575 badness >>= nest;
576 else
577 {
578 badness <<= nest;
579 }
a49506c7 580 }
4ae20857 581 /* Make recursive inlining happen always after other inlining is done. */
582 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
583 return badness + 1;
a49506c7 584 else
4ae20857 585 return badness;
a49506c7 586}
587
588/* Recompute heap nodes for each of caller edge. */
589
590static void
591update_caller_keys (fibheap_t heap, struct cgraph_node *node,
592 bitmap updated_nodes)
593{
594 struct cgraph_edge *edge;
326a9581 595 cgraph_inline_failed_t failed_reason;
a49506c7 596
597 if (!node->local.inlinable || node->local.disregard_inline_limits
598 || node->global.inlined_to)
599 return;
600 if (bitmap_bit_p (updated_nodes, node->uid))
601 return;
602 bitmap_set_bit (updated_nodes, node->uid);
b628b4c1 603 node->global.estimated_growth = INT_MIN;
a49506c7 604
109bf1e3 605 if (!node->local.inlinable)
606 return;
607 /* Prune out edges we won't inline into anymore. */
608 if (!cgraph_default_inline_p (node, &failed_reason))
609 {
610 for (edge = node->callers; edge; edge = edge->next_caller)
611 if (edge->aux)
612 {
cda6870f 613 fibheap_delete_node (heap, (fibnode_t) edge->aux);
109bf1e3 614 edge->aux = NULL;
615 if (edge->inline_failed)
616 edge->inline_failed = failed_reason;
617 }
618 return;
619 }
620
a49506c7 621 for (edge = node->callers; edge; edge = edge->next_caller)
622 if (edge->inline_failed)
623 {
624 int badness = cgraph_edge_badness (edge);
625 if (edge->aux)
626 {
cda6870f 627 fibnode_t n = (fibnode_t) edge->aux;
a49506c7 628 gcc_assert (n->data == edge);
629 if (n->key == badness)
630 continue;
631
632 /* fibheap_replace_key only increase the keys. */
633 if (fibheap_replace_key (heap, n, badness))
634 continue;
cda6870f 635 fibheap_delete_node (heap, (fibnode_t) edge->aux);
a49506c7 636 }
637 edge->aux = fibheap_insert (heap, badness, edge);
638 }
639}
640
641/* Recompute heap nodes for each of caller edges of each of callees. */
642
65c1a668 643static void
a49506c7 644update_callee_keys (fibheap_t heap, struct cgraph_node *node,
645 bitmap updated_nodes)
65c1a668 646{
647 struct cgraph_edge *e;
a49506c7 648 node->global.estimated_growth = INT_MIN;
65c1a668 649
650 for (e = node->callees; e; e = e->next_callee)
a49506c7 651 if (e->inline_failed)
652 update_caller_keys (heap, e->callee, updated_nodes);
65c1a668 653 else if (!e->inline_failed)
a49506c7 654 update_callee_keys (heap, e->callee, updated_nodes);
65c1a668 655}
656
a49506c7 657/* Enqueue all recursive calls from NODE into priority queue depending on
442e3cb9 658 how likely we want to recursively inline the call. */
a49506c7 659
65c1a668 660static void
661lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
a49506c7 662 fibheap_t heap)
65c1a668 663{
a49506c7 664 static int priority;
65c1a668 665 struct cgraph_edge *e;
666 for (e = where->callees; e; e = e->next_callee)
667 if (e->callee == node)
668 {
0aca0eb6 669 /* When profile feedback is available, prioritize by expected number
670 of calls. Without profile feedback we maintain simple queue
671 to order candidates via recursive depths. */
672 fibheap_insert (heap,
673 !max_count ? priority++
674 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
675 e);
65c1a668 676 }
677 for (e = where->callees; e; e = e->next_callee)
678 if (!e->inline_failed)
a49506c7 679 lookup_recursive_calls (node, e->callee, heap);
65c1a668 680}
681
682/* Decide on recursive inlining: in the case function has recursive calls,
f8daee9b 683 inline until body size reaches given argument. If any new indirect edges
6db08adc 684 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
685 is NULL. */
a49506c7 686
687static bool
f8daee9b 688cgraph_decide_recursive_inlining (struct cgraph_node *node,
6db08adc 689 VEC (cgraph_edge_p, heap) **new_edges)
65c1a668 690{
691 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
692 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
0aca0eb6 693 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
a49506c7 694 fibheap_t heap;
65c1a668 695 struct cgraph_edge *e;
f4ec5ce1 696 struct cgraph_node *master_clone, *next;
65c1a668 697 int depth = 0;
698 int n = 0;
699
0bfd8d5c 700 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
b97510b2 701 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
9160060d 702 return false;
703
65c1a668 704 if (DECL_DECLARED_INLINE_P (node->decl))
705 {
706 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
707 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
708 }
709
710 /* Make sure that function is small enough to be considered for inlining. */
711 if (!max_depth
712 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
a49506c7 713 return false;
714 heap = fibheap_new ();
715 lookup_recursive_calls (node, node, heap);
716 if (fibheap_empty (heap))
717 {
718 fibheap_delete (heap);
719 return false;
720 }
65c1a668 721
722 if (dump_file)
723 fprintf (dump_file,
a49506c7 724 " Performing recursive inlining on %s\n",
65c1a668 725 cgraph_node_name (node));
726
727 /* We need original clone to copy around. */
a1e88032 728 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
729 false, NULL);
65c1a668 730 master_clone->needed = true;
731 for (e = master_clone->callees; e; e = e->next_callee)
732 if (!e->inline_failed)
a2cb9b3b 733 cgraph_clone_inlined_nodes (e, true, false);
65c1a668 734
735 /* Do the inlining and update list of recursive call during process. */
a49506c7 736 while (!fibheap_empty (heap)
0aca0eb6 737 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
738 <= limit))
65c1a668 739 {
cda6870f 740 struct cgraph_edge *curr
741 = (struct cgraph_edge *) fibheap_extract_min (heap);
0aca0eb6 742 struct cgraph_node *cnode;
a49506c7 743
0aca0eb6 744 depth = 1;
745 for (cnode = curr->caller;
746 cnode->global.inlined_to; cnode = cnode->callers->caller)
a49506c7 747 if (node->decl == curr->callee->decl)
748 depth++;
749 if (depth > max_depth)
0aca0eb6 750 {
751 if (dump_file)
752 fprintf (dump_file,
666137bc 753 " maximal depth reached\n");
0aca0eb6 754 continue;
755 }
756
757 if (max_count)
758 {
759 if (!cgraph_maybe_hot_edge_p (curr))
760 {
761 if (dump_file)
762 fprintf (dump_file, " Not inlining cold call\n");
763 continue;
764 }
765 if (curr->count * 100 / node->count < probability)
766 {
767 if (dump_file)
768 fprintf (dump_file,
769 " Probability of edge is too small\n");
770 continue;
771 }
772 }
65c1a668 773
a49506c7 774 if (dump_file)
0aca0eb6 775 {
776 fprintf (dump_file,
777 " Inlining call of depth %i", depth);
778 if (node->count)
779 {
780 fprintf (dump_file, " called approx. %.2f times per call",
781 (double)curr->count / node->count);
782 }
783 fprintf (dump_file, "\n");
784 }
65c1a668 785 cgraph_redirect_edge_callee (curr, master_clone);
3f2ff969 786 cgraph_mark_inline_edge (curr, false, new_edges);
a49506c7 787 lookup_recursive_calls (node, curr->callee, heap);
65c1a668 788 n++;
789 }
0aca0eb6 790 if (!fibheap_empty (heap) && dump_file)
791 fprintf (dump_file, " Recursive inlining growth limit met.\n");
65c1a668 792
a49506c7 793 fibheap_delete (heap);
65c1a668 794 if (dump_file)
795 fprintf (dump_file,
97343302 796 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
797 master_clone->global.size, node->global.size,
798 master_clone->global.time, node->global.time);
65c1a668 799
800 /* Remove master clone we used for inlining. We rely that clones inlined
801 into master clone gets queued just before master clone so we don't
802 need recursion. */
803 for (node = cgraph_nodes; node != master_clone;
f4ec5ce1 804 node = next)
805 {
806 next = node->next;
807 if (node->global.inlined_to == master_clone)
808 cgraph_remove_node (node);
809 }
65c1a668 810 cgraph_remove_node (master_clone);
0aca0eb6 811 /* FIXME: Recursive inlining actually reduces number of calls of the
812 function. At this place we should probably walk the function and
813 inline clones and compensate the counts accordingly. This probably
814 doesn't matter much in practice. */
2a534008 815 return n > 0;
65c1a668 816}
817
818/* Set inline_failed for all callers of given function to REASON. */
819
820static void
326a9581 821cgraph_set_inline_failed (struct cgraph_node *node,
822 cgraph_inline_failed_t reason)
65c1a668 823{
824 struct cgraph_edge *e;
825
826 if (dump_file)
326a9581 827 fprintf (dump_file, "Inlining failed: %s\n",
828 cgraph_inline_failed_string (reason));
65c1a668 829 for (e = node->callers; e; e = e->next_caller)
830 if (e->inline_failed)
831 e->inline_failed = reason;
832}
833
0d424440 834/* Given whole compilation unit estimate of INSNS, compute how large we can
5c121ffe 835 allow the unit to grow. */
836static int
837compute_max_insns (int insns)
838{
839 int max_insns = insns;
840 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
841 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
842
773aeca3 843 return ((HOST_WIDEST_INT) max_insns
844 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
5c121ffe 845}
846
f8daee9b 847/* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
848static void
849add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
850{
851 while (VEC_length (cgraph_edge_p, new_edges) > 0)
852 {
853 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
854
855 gcc_assert (!edge->aux);
856 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
857 }
858}
859
860
65c1a668 861/* We use greedy algorithm for inlining of small functions:
862 All inline candidates are put into prioritized heap based on estimated
863 growth of the overall number of instructions and then update the estimates.
864
865 INLINED and INLINED_CALEES are just pointers to arrays large enough
866 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
867
868static void
869cgraph_decide_inlining_of_small_functions (void)
870{
871 struct cgraph_node *node;
a49506c7 872 struct cgraph_edge *edge;
326a9581 873 cgraph_inline_failed_t failed_reason;
65c1a668 874 fibheap_t heap = fibheap_new ();
a49506c7 875 bitmap updated_nodes = BITMAP_ALLOC (NULL);
97343302 876 int min_size, max_size;
f8daee9b 877 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
878
879 if (flag_indirect_inlining)
880 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
a49506c7 881
882 if (dump_file)
883 fprintf (dump_file, "\nDeciding on smaller functions:\n");
65c1a668 884
885 /* Put all inline candidates into the heap. */
886
887 for (node = cgraph_nodes; node; node = node->next)
888 {
889 if (!node->local.inlinable || !node->callers
890 || node->local.disregard_inline_limits)
891 continue;
a49506c7 892 if (dump_file)
893 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
65c1a668 894
a49506c7 895 node->global.estimated_growth = INT_MIN;
b30512dd 896 if (!cgraph_default_inline_p (node, &failed_reason))
65c1a668 897 {
b30512dd 898 cgraph_set_inline_failed (node, failed_reason);
65c1a668 899 continue;
900 }
65c1a668 901
a49506c7 902 for (edge = node->callers; edge; edge = edge->next_caller)
903 if (edge->inline_failed)
904 {
905 gcc_assert (!edge->aux);
906 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
907 }
908 }
5c121ffe 909
97343302 910 max_size = compute_max_insns (overall_size);
911 min_size = overall_size;
5c121ffe 912
97343302 913 while (overall_size <= max_size
cda6870f 914 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
65c1a668 915 {
97343302 916 int old_size = overall_size;
a49506c7 917 struct cgraph_node *where;
918 int growth =
919 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
326a9581 920 cgraph_inline_failed_t not_good = CIF_OK;
a49506c7 921
97343302 922 growth -= edge->caller->global.size;
65c1a668 923
65c1a668 924 if (dump_file)
65c1a668 925 {
a49506c7 926 fprintf (dump_file,
97343302 927 "\nConsidering %s with %i size\n",
71cadde7 928 cgraph_node_name (edge->callee),
97343302 929 edge->callee->global.size);
71cadde7 930 fprintf (dump_file,
bc6e5ec3 931 " to be inlined into %s in %s:%i\n"
a49506c7 932 " Estimated growth after inlined into all callees is %+i insns.\n"
4ae20857 933 " Estimated badness is %i, frequency %.2f.\n",
a49506c7 934 cgraph_node_name (edge->caller),
bc6e5ec3 935 gimple_filename ((const_gimple) edge->call_stmt),
936 gimple_lineno ((const_gimple) edge->call_stmt),
a49506c7 937 cgraph_estimate_growth (edge->callee),
4ae20857 938 cgraph_edge_badness (edge),
939 edge->frequency / (double)CGRAPH_FREQ_BASE);
a49506c7 940 if (edge->count)
941 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
65c1a668 942 }
a49506c7 943 gcc_assert (edge->aux);
944 edge->aux = NULL;
4eb7a440 945 if (!edge->inline_failed)
a49506c7 946 continue;
65c1a668 947
a49506c7 948 /* When not having profile info ready we don't weight by any way the
442e3cb9 949 position of call in procedure itself. This means if call of
a49506c7 950 function A from function B seems profitable to inline, the recursive
951 call of function A in inline copy of A in B will look profitable too
952 and we end up inlining until reaching maximal function growth. This
953 is not good idea so prohibit the recursive inlining.
65c1a668 954
a49506c7 955 ??? When the frequencies are taken into account we might not need this
b9825313 956 restriction.
957
958 We need to be cureful here, in some testcases, e.g. directivec.c in
959 libcpp, we can estimate self recursive function to have negative growth
960 for inlining completely.
961 */
962 if (!edge->count)
a49506c7 963 {
964 where = edge->caller;
965 while (where->global.inlined_to)
966 {
967 if (where->decl == edge->callee->decl)
968 break;
969 where = where->callers->caller;
970 }
971 if (where->global.inlined_to)
972 {
973 edge->inline_failed
326a9581 974 = (edge->callee->local.disregard_inline_limits
975 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
65c1a668 976 if (dump_file)
4133d091 977 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
a49506c7 978 continue;
65c1a668 979 }
980 }
981
b97510b2 982 if (!cgraph_maybe_hot_edge_p (edge))
326a9581 983 not_good = CIF_UNLIKELY_CALL;
b97510b2 984 if (!flag_inline_functions
985 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
326a9581 986 not_good = CIF_NOT_DECLARED_INLINED;
0bfd8d5c 987 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
326a9581 988 not_good = CIF_OPTIMIZING_FOR_SIZE;
fb1b7d77 989 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
a49506c7 990 {
991 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
992 &edge->inline_failed))
993 {
b97510b2 994 edge->inline_failed = not_good;
a49506c7 995 if (dump_file)
326a9581 996 fprintf (dump_file, " inline_failed:%s.\n",
997 cgraph_inline_failed_string (edge->inline_failed));
a49506c7 998 }
999 continue;
1000 }
b30512dd 1001 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
a49506c7 1002 {
1003 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1004 &edge->inline_failed))
1005 {
a49506c7 1006 if (dump_file)
326a9581 1007 fprintf (dump_file, " inline_failed:%s.\n",
1008 cgraph_inline_failed_string (edge->inline_failed));
a49506c7 1009 }
1010 continue;
1011 }
19bcf521 1012 if (!tree_can_inline_p (edge))
46f8e3b0 1013 {
46f8e3b0 1014 if (dump_file)
326a9581 1015 fprintf (dump_file, " inline_failed:%s.\n",
1016 cgraph_inline_failed_string (edge->inline_failed));
46f8e3b0 1017 continue;
1018 }
a49506c7 1019 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1020 &edge->inline_failed))
1021 {
1022 where = edge->caller;
1023 if (where->global.inlined_to)
1024 where = where->global.inlined_to;
6db08adc 1025 if (!cgraph_decide_recursive_inlining (where,
1026 flag_indirect_inlining
1027 ? &new_indirect_edges : NULL))
a49506c7 1028 continue;
f8daee9b 1029 if (flag_indirect_inlining)
1030 add_new_edges_to_heap (heap, new_indirect_edges);
a49506c7 1031 update_callee_keys (heap, where, updated_nodes);
1032 }
1033 else
1034 {
b628b4c1 1035 struct cgraph_node *callee;
7bfefa9d 1036 if (edge->call_stmt_cannot_inline_p
4eb7a440 1037 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1038 &edge->inline_failed, true))
a49506c7 1039 {
1040 if (dump_file)
1041 fprintf (dump_file, " Not inlining into %s:%s.\n",
326a9581 1042 cgraph_node_name (edge->caller),
1043 cgraph_inline_failed_string (edge->inline_failed));
a49506c7 1044 continue;
1045 }
b628b4c1 1046 callee = edge->callee;
3f2ff969 1047 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
f8daee9b 1048 if (flag_indirect_inlining)
3f2ff969 1049 add_new_edges_to_heap (heap, new_indirect_edges);
1050
b628b4c1 1051 update_callee_keys (heap, callee, updated_nodes);
a49506c7 1052 }
1053 where = edge->caller;
1054 if (where->global.inlined_to)
1055 where = where->global.inlined_to;
1056
1057 /* Our profitability metric can depend on local properties
1058 such as number of inlinable calls and size of the function body.
1059 After inlining these properties might change for the function we
1060 inlined into (since it's body size changed) and for the functions
1061 called by function we inlined (since number of it inlinable callers
1062 might change). */
1063 update_caller_keys (heap, where, updated_nodes);
1064 bitmap_clear (updated_nodes);
65c1a668 1065
a49506c7 1066 if (dump_file)
71cadde7 1067 {
1068 fprintf (dump_file,
97343302 1069 " Inlined into %s which now has size %i and self time %i,"
1070 "net change of %+i.\n",
71cadde7 1071 cgraph_node_name (edge->caller),
97343302 1072 edge->caller->global.time,
1073 edge->caller->global.size,
1074 overall_size - old_size);
71cadde7 1075 }
97343302 1076 if (min_size > overall_size)
5c121ffe 1077 {
97343302 1078 min_size = overall_size;
1079 max_size = compute_max_insns (min_size);
5c121ffe 1080
1081 if (dump_file)
97343302 1082 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
5c121ffe 1083 }
65c1a668 1084 }
cda6870f 1085 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
a49506c7 1086 {
1087 gcc_assert (edge->aux);
1088 edge->aux = NULL;
1089 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1090 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1091 &edge->inline_failed))
326a9581 1092 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
a49506c7 1093 }
f8daee9b 1094
1095 if (new_indirect_edges)
1096 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
65c1a668 1097 fibheap_delete (heap);
a49506c7 1098 BITMAP_FREE (updated_nodes);
65c1a668 1099}
1100
1101/* Decide on the inlining. We do so in the topological order to avoid
1102 expenses on updating data structures. */
1103
2a1990e9 1104static unsigned int
65c1a668 1105cgraph_decide_inlining (void)
1106{
1107 struct cgraph_node *node;
1108 int nnodes;
1109 struct cgraph_node **order =
4c36ffe6 1110 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
97343302 1111 int old_size = 0;
65c1a668 1112 int i;
3f2ff969 1113 bool redo_always_inline = true;
97343302 1114 int initial_size = 0;
65c1a668 1115
7bfefa9d 1116 /* FIXME lto. We need to rethink how to coordinate different passes. */
1117 if (flag_ltrans)
1118 return 0;
1119
1120 /* FIXME lto. We need to re-think about how the passes get invoked. */
1121 if (!flag_wpa)
1122 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
9ca785fc 1123
a49506c7 1124 max_count = 0;
97343302 1125 max_benefit = 0;
65c1a668 1126 for (node = cgraph_nodes; node; node = node->next)
97343302 1127 if (node->analyzed)
6f30959a 1128 {
1129 struct cgraph_edge *e;
1130
97343302 1131 gcc_assert (inline_summary (node)->self_size == node->global.size);
97343302 1132 initial_size += node->global.size;
6f30959a 1133 for (e = node->callees; e; e = e->next_callee)
1134 if (max_count < e->count)
1135 max_count = e->count;
97343302 1136 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1137 max_benefit = inline_summary (node)->time_inlining_benefit;
6f30959a 1138 }
7bfefa9d 1139 gcc_assert (in_lto_p
1140 || !max_count
1141 || (profile_info && flag_branch_probabilities));
97343302 1142 overall_size = initial_size;
a49506c7 1143
65c1a668 1144 nnodes = cgraph_postorder (order);
1145
1146 if (dump_file)
1147 fprintf (dump_file,
97343302 1148 "\nDeciding on inlining. Starting with size %i.\n",
1149 initial_size);
65c1a668 1150
1151 for (node = cgraph_nodes; node; node = node->next)
1152 node->aux = 0;
1153
1154 if (dump_file)
1155 fprintf (dump_file, "\nInlining always_inline functions:\n");
1156
1157 /* In the first pass mark all always_inline edges. Do this with a priority
1158 so none of our later choices will make this impossible. */
3f2ff969 1159 while (redo_always_inline)
65c1a668 1160 {
3f2ff969 1161 redo_always_inline = false;
1162 for (i = nnodes - 1; i >= 0; i--)
1163 {
1164 struct cgraph_edge *e, *next;
65c1a668 1165
3f2ff969 1166 node = order[i];
65c1a668 1167
3f2ff969 1168 /* Handle nodes to be flattened, but don't update overall unit
1169 size. */
1170 if (lookup_attribute ("flatten",
1171 DECL_ATTRIBUTES (node->decl)) != NULL)
1172 {
1173 if (dump_file)
1174 fprintf (dump_file,
1175 "Flattening %s\n", cgraph_node_name (node));
1176 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1177 }
0cdd9887 1178
3f2ff969 1179 if (!node->local.disregard_inline_limits)
65c1a668 1180 continue;
3f2ff969 1181 if (dump_file)
1182 fprintf (dump_file,
97343302 1183 "\nConsidering %s size:%i (always inline)\n",
1184 cgraph_node_name (node), node->global.size);
1185 old_size = overall_size;
3f2ff969 1186 for (e = node->callers; e; e = next)
46f8e3b0 1187 {
3f2ff969 1188 next = e->next_caller;
7bfefa9d 1189 if (!e->inline_failed || e->call_stmt_cannot_inline_p)
3f2ff969 1190 continue;
1191 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1192 &e->inline_failed))
1193 continue;
19bcf521 1194 if (!tree_can_inline_p (e))
1195 continue;
3f2ff969 1196 if (cgraph_mark_inline_edge (e, true, NULL))
1197 redo_always_inline = true;
1198 if (dump_file)
1199 fprintf (dump_file,
97343302 1200 " Inlined into %s which now has size %i.\n",
3f2ff969 1201 cgraph_node_name (e->caller),
97343302 1202 e->caller->global.size);
46f8e3b0 1203 }
3f2ff969 1204 /* Inlining self recursive function might introduce new calls to
1205 themselves we didn't see in the loop above. Fill in the proper
1206 reason why inline failed. */
1207 for (e = node->callers; e; e = e->next_caller)
1208 if (e->inline_failed)
326a9581 1209 e->inline_failed = CIF_RECURSIVE_INLINING;
65c1a668 1210 if (dump_file)
1211 fprintf (dump_file,
97343302 1212 " Inlined for a net change of %+i size.\n",
1213 overall_size - old_size);
65c1a668 1214 }
65c1a668 1215 }
1216
1c2f0012 1217 cgraph_decide_inlining_of_small_functions ();
65c1a668 1218
1c2f0012 1219 if (flag_inline_functions_called_once)
f1aa280c 1220 {
65c1a668 1221 if (dump_file)
1222 fprintf (dump_file, "\nDeciding on functions called once:\n");
1223
1224 /* And finally decide what functions are called once. */
65c1a668 1225 for (i = nnodes - 1; i >= 0; i--)
1226 {
1227 node = order[i];
1228
75a70cf9 1229 if (node->callers
1230 && !node->callers->next_caller
59dd4830 1231 && cgraph_only_called_directly_p (node)
75a70cf9 1232 && node->local.inlinable
1233 && node->callers->inline_failed
76870d0c 1234 && node->callers->caller != node
1235 && node->callers->caller->global.inlined_to != node
7bfefa9d 1236 && !node->callers->call_stmt_cannot_inline_p
75a70cf9 1237 && !DECL_EXTERNAL (node->decl)
1238 && !DECL_COMDAT (node->decl))
65c1a668 1239 {
97343302 1240 old_size = overall_size;
a30b29a7 1241 if (dump_file)
1242 {
1243 fprintf (dump_file,
97343302 1244 "\nConsidering %s size %i.\n",
1245 cgraph_node_name (node), node->global.size);
a30b29a7 1246 fprintf (dump_file,
1247 " Called once from %s %i insns.\n",
1248 cgraph_node_name (node->callers->caller),
97343302 1249 node->callers->caller->global.size);
a30b29a7 1250 }
1251
a30b29a7 1252 if (cgraph_check_inline_limits (node->callers->caller, node,
1253 NULL, false))
1254 {
1255 cgraph_mark_inline (node->callers);
1256 if (dump_file)
1257 fprintf (dump_file,
97343302 1258 " Inlined into %s which now has %i size"
1259 " for a net change of %+i size.\n",
a30b29a7 1260 cgraph_node_name (node->callers->caller),
97343302 1261 node->callers->caller->global.size,
1262 overall_size - old_size);
a30b29a7 1263 }
1264 else
65c1a668 1265 {
1266 if (dump_file)
a30b29a7 1267 fprintf (dump_file,
1268 " Inline limit reached, not inlined.\n");
65c1a668 1269 }
1270 }
1271 }
1272 }
1273
3f2ff969 1274 /* Free ipa-prop structures if they are no longer needed. */
1275 if (flag_indirect_inlining)
1276 free_all_ipa_structures_after_iinln ();
1277
65c1a668 1278 if (dump_file)
1279 fprintf (dump_file,
1280 "\nInlined %i calls, eliminated %i functions, "
97343302 1281 "size %i turned to %i size.\n\n",
1282 ncalls_inlined, nfunctions_inlined, initial_size,
1283 overall_size);
65c1a668 1284 free (order);
2a1990e9 1285 return 0;
65c1a668 1286}
1287
436a2379 1288/* Try to inline edge E from incremental inliner. MODE specifies mode
1289 of inliner.
1290
1291 We are detecting cycles by storing mode of inliner into cgraph_node last
1292 time we visited it in the recursion. In general when mode is set, we have
1293 recursive inlining, but as an special case, we want to try harder inline
1294 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1295 flatten, b being always inline. Flattening 'a' will collapse
0d424440 1296 a->b->c before hitting cycle. To accommodate always inline, we however
436a2379 1297 need to inline a->b->c->b.
1298
1299 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1300 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1301static bool
1302try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1303{
1304 struct cgraph_node *callee = e->callee;
cda6870f 1305 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
436a2379 1306 bool always_inline = e->callee->local.disregard_inline_limits;
a7b61d8c 1307 bool inlined = false;
436a2379 1308
1309 /* We've hit cycle? */
1310 if (callee_mode)
1311 {
1312 /* It is first time we see it and we are not in ALWAY_INLINE only
1313 mode yet. and the function in question is always_inline. */
1314 if (always_inline && mode != INLINE_ALWAYS_INLINE)
f41629b6 1315 {
1316 if (dump_file)
1317 {
1318 indent_to (dump_file, depth);
1319 fprintf (dump_file,
1320 "Hit cycle in %s, switching to always inline only.\n",
1321 cgraph_node_name (callee));
1322 }
1323 mode = INLINE_ALWAYS_INLINE;
1324 }
0d424440 1325 /* Otherwise it is time to give up. */
436a2379 1326 else
1327 {
1328 if (dump_file)
1329 {
1330 indent_to (dump_file, depth);
1331 fprintf (dump_file,
1332 "Not inlining %s into %s to avoid cycle.\n",
1333 cgraph_node_name (callee),
1334 cgraph_node_name (e->caller));
1335 }
1336 e->inline_failed = (e->callee->local.disregard_inline_limits
326a9581 1337 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
436a2379 1338 return false;
1339 }
1340 }
1341
1342 callee->aux = (void *)(size_t) mode;
1343 if (dump_file)
1344 {
1345 indent_to (dump_file, depth);
1346 fprintf (dump_file, " Inlining %s into %s.\n",
1347 cgraph_node_name (e->callee),
1348 cgraph_node_name (e->caller));
1349 }
f41629b6 1350 if (e->inline_failed)
68b2f511 1351 {
1352 cgraph_mark_inline (e);
436a2379 1353
68b2f511 1354 /* In order to fully inline always_inline functions, we need to
1355 recurse here, since the inlined functions might not be processed by
1356 incremental inlining at all yet.
436a2379 1357
68b2f511 1358 Also flattening needs to be done recursively. */
436a2379 1359
68b2f511 1360 if (mode == INLINE_ALL || always_inline)
1361 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
a7b61d8c 1362 inlined = true;
68b2f511 1363 }
436a2379 1364 callee->aux = (void *)(size_t) callee_mode;
a7b61d8c 1365 return inlined;
436a2379 1366}
a223d5ed 1367
97343302 1368/* Return true when N is leaf function. Accept cheap (pure&const) builtins
1369 in leaf functions. */
1370static bool
1371leaf_node_p (struct cgraph_node *n)
1372{
1373 struct cgraph_edge *e;
1374 for (e = n->callees; e; e = e->next_callee)
1375 if (!DECL_BUILT_IN (e->callee->decl)
1376 || (!TREE_READONLY (e->callee->decl)
1377 || DECL_PURE_P (e->callee->decl)))
1378 return false;
1379 return true;
1380}
1381
65c1a668 1382/* Decide on the inlining. We do so in the topological order to avoid
436a2379 1383 expenses on updating data structures.
1384 DEPTH is depth of recursion, used only for debug output. */
65c1a668 1385
436a2379 1386static bool
f41629b6 1387cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1388 enum inlining_mode mode,
436a2379 1389 int depth)
65c1a668 1390{
1391 struct cgraph_edge *e;
9e0baf4d 1392 bool inlined = false;
326a9581 1393 cgraph_inline_failed_t failed_reason;
436a2379 1394 enum inlining_mode old_mode;
09a2e412 1395
1396#ifdef ENABLE_CHECKING
1397 verify_cgraph_node (node);
1398#endif
436a2379 1399
cda6870f 1400 old_mode = (enum inlining_mode) (size_t)node->aux;
436a2379 1401
a7b61d8c 1402 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
436a2379 1403 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
a223d5ed 1404 {
1405 if (dump_file)
f41629b6 1406 {
1407 indent_to (dump_file, depth);
1408 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1409 }
a223d5ed 1410 mode = INLINE_ALL;
1411 }
65c1a668 1412
436a2379 1413 node->aux = (void *)(size_t) mode;
1414
65c1a668 1415 /* First of all look for always inline functions. */
a7b61d8c 1416 if (mode != INLINE_SIZE_NORECURSIVE)
1417 for (e = node->callees; e; e = e->next_callee)
1418 {
1419 if (!e->callee->local.disregard_inline_limits
1420 && (mode != INLINE_ALL || !e->callee->local.inlinable))
f41629b6 1421 continue;
7bfefa9d 1422 if (e->call_stmt_cannot_inline_p)
f41629b6 1423 continue;
a7b61d8c 1424 /* When the edge is already inlined, we just need to recurse into
1425 it in order to fully flatten the leaves. */
1426 if (!e->inline_failed && mode == INLINE_ALL)
1427 {
1428 inlined |= try_inline (e, mode, depth);
1429 continue;
1430 }
1431 if (dump_file)
1432 {
1433 indent_to (dump_file, depth);
1434 fprintf (dump_file,
1435 "Considering to always inline inline candidate %s.\n",
1436 cgraph_node_name (e->callee));
1437 }
1438 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1439 {
1440 if (dump_file)
1441 {
1442 indent_to (dump_file, depth);
1443 fprintf (dump_file, "Not inlining: recursive call.\n");
1444 }
1445 continue;
1446 }
19bcf521 1447 if (!tree_can_inline_p (e))
a7b61d8c 1448 {
a7b61d8c 1449 if (dump_file)
1450 {
1451 indent_to (dump_file, depth);
1452 fprintf (dump_file,
19bcf521 1453 "Not inlining: %s",
1454 cgraph_inline_failed_string (e->inline_failed));
a7b61d8c 1455 }
1456 continue;
1457 }
1458 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1459 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1460 {
1461 if (dump_file)
1462 {
1463 indent_to (dump_file, depth);
1464 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1465 }
1466 continue;
1467 }
18c97951 1468 if (!e->callee->analyzed)
a7b61d8c 1469 {
1470 if (dump_file)
1471 {
1472 indent_to (dump_file, depth);
1473 fprintf (dump_file,
1474 "Not inlining: Function body no longer available.\n");
1475 }
1476 continue;
1477 }
1478 inlined |= try_inline (e, mode, depth);
1479 }
65c1a668 1480
1481 /* Now do the automatic inlining. */
1c2f0012 1482 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
65c1a668 1483 for (e = node->callees; e; e = e->next_callee)
f41629b6 1484 {
97343302 1485 int allowed_growth = 0;
f41629b6 1486 if (!e->callee->local.inlinable
1487 || !e->inline_failed
1488 || e->callee->local.disregard_inline_limits)
1489 continue;
1490 if (dump_file)
1491 fprintf (dump_file, "Considering inline candidate %s.\n",
1492 cgraph_node_name (e->callee));
1493 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1494 {
1495 if (dump_file)
1496 {
1497 indent_to (dump_file, depth);
1498 fprintf (dump_file, "Not inlining: recursive call.\n");
1499 }
1500 continue;
1501 }
1502 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1503 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1504 {
1505 if (dump_file)
1506 {
1507 indent_to (dump_file, depth);
1508 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1509 }
1510 continue;
1511 }
97343302 1512
89c679bd 1513 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1514 && optimize_function_for_speed_p (cfun))
97343302 1515 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1516
f41629b6 1517 /* When the function body would grow and inlining the function won't
85694bac 1518 eliminate the need for offline copy of the function, don't inline.
f41629b6 1519 */
a7b61d8c 1520 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
b97510b2 1521 || (!flag_inline_functions
1522 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
f41629b6 1523 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
97343302 1524 > e->caller->global.size + allowed_growth)
1525 && cgraph_estimate_growth (e->callee) > allowed_growth)
f41629b6 1526 {
1527 if (dump_file)
1528 {
1529 indent_to (dump_file, depth);
1530 fprintf (dump_file,
97343302 1531 "Not inlining: code size would grow by %i.\n",
f41629b6 1532 cgraph_estimate_size_after_inlining (1, e->caller,
1533 e->callee)
97343302 1534 - e->caller->global.size);
f41629b6 1535 }
1536 continue;
1537 }
1538 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
97343302 1539 false)
7bfefa9d 1540 || e->call_stmt_cannot_inline_p)
f41629b6 1541 {
1542 if (dump_file)
1543 {
1544 indent_to (dump_file, depth);
326a9581 1545 fprintf (dump_file, "Not inlining: %s.\n",
1546 cgraph_inline_failed_string (e->inline_failed));
f41629b6 1547 }
1548 continue;
1549 }
18c97951 1550 if (!e->callee->analyzed)
f41629b6 1551 {
1552 if (dump_file)
1553 {
1554 indent_to (dump_file, depth);
1555 fprintf (dump_file,
1556 "Not inlining: Function body no longer available.\n");
1557 }
1558 continue;
1559 }
19bcf521 1560 if (!tree_can_inline_p (e))
46f8e3b0 1561 {
46f8e3b0 1562 if (dump_file)
1563 {
1564 indent_to (dump_file, depth);
1565 fprintf (dump_file,
19bcf521 1566 "Not inlining: %s.",
1567 cgraph_inline_failed_string (e->inline_failed));
46f8e3b0 1568 }
1569 continue;
1570 }
f41629b6 1571 if (cgraph_default_inline_p (e->callee, &failed_reason))
1572 inlined |= try_inline (e, mode, depth);
f41629b6 1573 }
436a2379 1574 node->aux = (void *)(size_t) old_mode;
1575 return inlined;
65c1a668 1576}
1577
f4ec5ce1 1578/* Because inlining might remove no-longer reachable nodes, we need to
1579 keep the array visible to garbage collector to avoid reading collected
1580 out nodes. */
1581static int nnodes;
1582static GTY ((length ("nnodes"))) struct cgraph_node **order;
1583
9e0baf4d 1584/* Do inlining of small functions. Doing so early helps profiling and other
1585 passes to be somewhat more effective and avoids some code duplication in
1586 later real inlining pass for testcases with very many function calls. */
2a1990e9 1587static unsigned int
9e0baf4d 1588cgraph_early_inlining (void)
1589{
09a2e412 1590 struct cgraph_node *node = cgraph_node (current_function_decl);
436a2379 1591 unsigned int todo = 0;
a7b61d8c 1592 int iterations = 0;
9e0baf4d 1593
1594 if (sorrycount || errorcount)
2a1990e9 1595 return 0;
a7b61d8c 1596 while (cgraph_decide_inlining_incrementally (node,
1597 iterations
1598 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
1599 && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
436a2379 1600 {
1601 timevar_push (TV_INTEGRATION);
a7b61d8c 1602 todo |= optimize_inline_calls (current_function_decl);
1603 iterations++;
436a2379 1604 timevar_pop (TV_INTEGRATION);
1605 }
a7b61d8c 1606 if (dump_file)
1607 fprintf (dump_file, "Iterations: %i\n", iterations);
198622c0 1608 cfun->always_inline_functions_inlined = true;
436a2379 1609 return todo;
9e0baf4d 1610}
1611
1612/* When inlining shall be performed. */
1613static bool
1614cgraph_gate_early_inlining (void)
1615{
6329636b 1616 return flag_early_inlining;
9e0baf4d 1617}
1618
20099e35 1619struct gimple_opt_pass pass_early_inline =
9e0baf4d 1620{
20099e35 1621 {
1622 GIMPLE_PASS,
9e0baf4d 1623 "einline", /* name */
1624 cgraph_gate_early_inlining, /* gate */
1625 cgraph_early_inlining, /* execute */
1626 NULL, /* sub */
1627 NULL, /* next */
1628 0, /* static_pass_number */
f37a5008 1629 TV_INLINE_HEURISTICS, /* tv_id */
9e0baf4d 1630 0, /* properties_required */
41709826 1631 0, /* properties_provided */
65c1a668 1632 0, /* properties_destroyed */
1633 0, /* todo_flags_start */
20099e35 1634 TODO_dump_func /* todo_flags_finish */
1635 }
09a2e412 1636};
1637
1638/* When inlining shall be performed. */
1639static bool
1640cgraph_gate_ipa_early_inlining (void)
1641{
6329636b 1642 return (flag_early_inlining
7bfefa9d 1643 && !in_lto_p
09a2e412 1644 && (flag_branch_probabilities || flag_test_coverage
1645 || profile_arc_flag));
1646}
1647
1648/* IPA pass wrapper for early inlining pass. We need to run early inlining
1649 before tree profiling so we have stand alone IPA pass for doing so. */
20099e35 1650struct simple_ipa_opt_pass pass_ipa_early_inline =
09a2e412 1651{
20099e35 1652 {
1653 SIMPLE_IPA_PASS,
09a2e412 1654 "einline_ipa", /* name */
1655 cgraph_gate_ipa_early_inlining, /* gate */
1656 NULL, /* execute */
1657 NULL, /* sub */
1658 NULL, /* next */
1659 0, /* static_pass_number */
1660 TV_INLINE_HEURISTICS, /* tv_id */
1661 0, /* properties_required */
41709826 1662 0, /* properties_provided */
09a2e412 1663 0, /* properties_destroyed */
1664 0, /* todo_flags_start */
20099e35 1665 TODO_dump_cgraph /* todo_flags_finish */
1666 }
09a2e412 1667};
1668
97343302 1669/* See if statement might disappear after inlining. We are not terribly
1670 sophisficated, basically looking for simple abstraction penalty wrappers. */
1671
1672static bool
1673likely_eliminated_by_inlining_p (gimple stmt)
1674{
1675 enum gimple_code code = gimple_code (stmt);
1676 switch (code)
1677 {
1678 case GIMPLE_RETURN:
1679 return true;
1680 case GIMPLE_ASSIGN:
1681 if (gimple_num_ops (stmt) != 2)
1682 return false;
1683
1684 /* Casts of parameters, loads from parameters passed by reference
1685 and stores to return value or parameters are probably free after
1686 inlining. */
1687 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1688 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1689 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1690 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1691 {
1692 tree rhs = gimple_assign_rhs1 (stmt);
1693 tree lhs = gimple_assign_lhs (stmt);
1694 tree inner_rhs = rhs;
1695 tree inner_lhs = lhs;
1696 bool rhs_free = false;
1697 bool lhs_free = false;
1698
1699 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1700 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1701 while (handled_component_p (inner_rhs)
1702 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1703 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1704
1705
1706 if (TREE_CODE (inner_rhs) == PARM_DECL
1707 || (TREE_CODE (inner_rhs) == SSA_NAME
1708 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1709 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1710 rhs_free = true;
1711 if (rhs_free && is_gimple_reg (lhs))
1712 lhs_free = true;
1713 if (((TREE_CODE (inner_lhs) == PARM_DECL
1714 || (TREE_CODE (inner_lhs) == SSA_NAME
1715 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1716 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1717 && inner_lhs != lhs)
1718 || TREE_CODE (inner_lhs) == RESULT_DECL
1719 || (TREE_CODE (inner_lhs) == SSA_NAME
1720 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1721 lhs_free = true;
1722 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1723 rhs_free = true;
1724 if (lhs_free && rhs_free)
1725 return true;
1726 }
1727 return false;
1728 default:
1729 return false;
1730 }
1731}
1732
1733/* Compute function body size parameters for NODE. */
1734
1735static void
1736estimate_function_body_sizes (struct cgraph_node *node)
1737{
1738 gcov_type time = 0;
1739 gcov_type time_inlining_benefit = 0;
1740 int size = 0;
1741 int size_inlining_benefit = 0;
1742 basic_block bb;
1743 gimple_stmt_iterator bsi;
1744 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1745 tree arg;
1746 int freq;
1747 tree funtype = TREE_TYPE (node->decl);
97343302 1748
1749 if (dump_file)
479e7f00 1750 fprintf (dump_file, "Analyzing function body size: %s\n",
1751 cgraph_node_name (node));
97343302 1752
1753 gcc_assert (my_function && my_function->cfg);
1754 FOR_EACH_BB_FN (bb, my_function)
1755 {
1756 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1757 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1758 {
e38def9c 1759 gimple stmt = gsi_stmt (bsi);
1760 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1761 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1762
479e7f00 1763 if (dump_file && (dump_flags & TDF_DETAILS))
97343302 1764 {
e38def9c 1765 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1766 freq, this_size, this_time);
1767 print_gimple_stmt (dump_file, stmt, 0, 0);
97343302 1768 }
1769 this_time *= freq;
1770 time += this_time;
1771 size += this_size;
e38def9c 1772 if (likely_eliminated_by_inlining_p (stmt))
97343302 1773 {
1774 size_inlining_benefit += this_size;
1775 time_inlining_benefit += this_time;
479e7f00 1776 if (dump_file && (dump_flags & TDF_DETAILS))
97343302 1777 fprintf (dump_file, " Likely eliminated\n");
1778 }
1779 gcc_assert (time >= 0);
1780 gcc_assert (size >= 0);
1781 }
1782 }
1783 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1784 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1785 / CGRAPH_FREQ_BASE);
1786 if (dump_file)
479e7f00 1787 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1788 (int)time, (int)time_inlining_benefit,
1789 size, size_inlining_benefit);
97343302 1790 time_inlining_benefit += eni_time_weights.call_cost;
1791 size_inlining_benefit += eni_size_weights.call_cost;
1792 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1793 {
1794 int cost = estimate_move_cost (TREE_TYPE (funtype));
1795 time_inlining_benefit += cost;
1796 size_inlining_benefit += cost;
1797 }
1798 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1799 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1800 {
1801 int cost = estimate_move_cost (TREE_TYPE (arg));
1802 time_inlining_benefit += cost;
1803 size_inlining_benefit += cost;
1804 }
1805 if (time_inlining_benefit > MAX_TIME)
1806 time_inlining_benefit = MAX_TIME;
1807 if (time > MAX_TIME)
1808 time = MAX_TIME;
1809 inline_summary (node)->self_time = time;
1810 inline_summary (node)->self_size = size;
1811 if (dump_file)
479e7f00 1812 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1813 (int)time, (int)time_inlining_benefit,
1814 size, size_inlining_benefit);
97343302 1815 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1816 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
97343302 1817}
1818
09a2e412 1819/* Compute parameters of functions used by inliner. */
9c1bff7a 1820unsigned int
1821compute_inline_parameters (struct cgraph_node *node)
09a2e412 1822{
77fa068f 1823 HOST_WIDE_INT self_stack_size;
1824
09a2e412 1825 gcc_assert (!node->global.inlined_to);
77fa068f 1826
1827 /* Estimate the stack size for the function. But not at -O0
1828 because estimated_stack_frame_size is a quadratic problem. */
1829 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1830 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1831 node->global.estimated_stack_size = self_stack_size;
09a2e412 1832 node->global.stack_frame_offset = 0;
77fa068f 1833
1834 /* Can this function be inlined at all? */
09a2e412 1835 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
2de29097 1836 if (node->local.inlinable && !node->local.disregard_inline_limits)
09a2e412 1837 node->local.disregard_inline_limits
ebb7d626 1838 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
97343302 1839 estimate_function_body_sizes (node);
09a2e412 1840 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
97343302 1841 node->global.time = inline_summary (node)->self_time;
1842 node->global.size = inline_summary (node)->self_size;
09a2e412 1843 return 0;
1844}
1845
9c1bff7a 1846
1847/* Compute parameters of functions used by inliner using
1848 current_function_decl. */
1849static unsigned int
1850compute_inline_parameters_for_current (void)
1851{
1852 compute_inline_parameters (cgraph_node (current_function_decl));
1853 return 0;
1854}
1855
20099e35 1856struct gimple_opt_pass pass_inline_parameters =
09a2e412 1857{
20099e35 1858 {
1859 GIMPLE_PASS,
97343302 1860 "inline_param", /* name */
6329636b 1861 NULL, /* gate */
9c1bff7a 1862 compute_inline_parameters_for_current,/* execute */
09a2e412 1863 NULL, /* sub */
1864 NULL, /* next */
1865 0, /* static_pass_number */
1866 TV_INLINE_HEURISTICS, /* tv_id */
1867 0, /* properties_required */
41709826 1868 0, /* properties_provided */
09a2e412 1869 0, /* properties_destroyed */
1870 0, /* todo_flags_start */
20099e35 1871 0 /* todo_flags_finish */
1872 }
09a2e412 1873};
1874
f8daee9b 1875/* This function performs intraprocedural analyzis in NODE that is required to
1876 inline indirect calls. */
1877static void
1878inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1879{
1880 struct cgraph_edge *cs;
1881
50828ed8 1882 if (!flag_ipa_cp)
1883 {
3f2ff969 1884 ipa_initialize_node_params (node);
50828ed8 1885 ipa_detect_param_modifications (node);
1886 }
f8daee9b 1887 ipa_analyze_params_uses (node);
1888
50828ed8 1889 if (!flag_ipa_cp)
1890 for (cs = node->callees; cs; cs = cs->next_callee)
1891 {
1892 ipa_count_arguments (cs);
1893 ipa_compute_jump_functions (cs);
1894 }
f8daee9b 1895
1896 if (dump_file)
11b73810 1897 {
1898 ipa_print_node_params (dump_file, node);
1899 ipa_print_node_jump_functions (dump_file, node);
1900 }
f8daee9b 1901}
1902
9ca785fc 1903/* Note function body size. */
1904static void
1905analyze_function (struct cgraph_node *node)
1906{
1907 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1908 current_function_decl = node->decl;
1909
1910 compute_inline_parameters (node);
1911 if (flag_indirect_inlining)
1912 inline_indirect_intraprocedural_analysis (node);
1913
1914 current_function_decl = NULL;
1915 pop_cfun ();
1916}
1917
1918/* Called when new function is inserted to callgraph late. */
1919static void
1920add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1921{
1922 analyze_function (node);
1923}
1924
68e3904e 1925/* Note function body size. */
fba7ae09 1926static void
9c1bff7a 1927inline_generate_summary (void)
68e3904e 1928{
9ca785fc 1929 struct cgraph_node *node;
1930
7bfefa9d 1931 /* FIXME lto. We should not run any IPA-summary pass in LTRANS mode. */
1932 if (flag_ltrans)
1933 return;
1934
9ca785fc 1935 function_insertion_hook_holder =
1936 cgraph_add_function_insertion_hook (&add_new_function, NULL);
9c1bff7a 1937
f8daee9b 1938 if (flag_indirect_inlining)
1939 {
1940 ipa_register_cgraph_hooks ();
1941 ipa_check_create_node_params ();
1942 ipa_check_create_edge_args ();
1943 }
1944
9ca785fc 1945 for (node = cgraph_nodes; node; node = node->next)
1946 if (node->analyzed)
1947 analyze_function (node);
9c1bff7a 1948
68e3904e 1949 return;
1950}
1951
1952/* Apply inline plan to function. */
fba7ae09 1953static unsigned int
68e3904e 1954inline_transform (struct cgraph_node *node)
09a2e412 1955{
1956 unsigned int todo = 0;
1957 struct cgraph_edge *e;
09a2e412 1958
09a2e412 1959 /* We might need the body of this function so that we can expand
1960 it inline somewhere else. */
1a1a827a 1961 if (cgraph_preserve_function_body_p (node->decl))
09a2e412 1962 save_inline_function_body (node);
1963
1964 for (e = node->callees; e; e = e->next_callee)
1965 if (!e->inline_failed || warn_inline)
1966 break;
75a70cf9 1967
09a2e412 1968 if (e)
1969 {
1970 timevar_push (TV_INTEGRATION);
1971 todo = optimize_inline_calls (current_function_decl);
1972 timevar_pop (TV_INTEGRATION);
1973 }
6cb239e5 1974 cfun->always_inline_functions_inlined = true;
1975 cfun->after_inlining = true;
09a2e412 1976 return todo | execute_fixup_cfg ();
1977}
1978
26dbec0a 1979struct ipa_opt_pass_d pass_ipa_inline =
09a2e412 1980{
20099e35 1981 {
68e3904e 1982 IPA_PASS,
1983 "inline", /* name */
6329636b 1984 NULL, /* gate */
68e3904e 1985 cgraph_decide_inlining, /* execute */
09a2e412 1986 NULL, /* sub */
1987 NULL, /* next */
1988 0, /* static_pass_number */
1989 TV_INLINE_HEURISTICS, /* tv_id */
1990 0, /* properties_required */
41709826 1991 0, /* properties_provided */
09a2e412 1992 0, /* properties_destroyed */
68e3904e 1993 TODO_remove_functions, /* todo_flags_finish */
1994 TODO_dump_cgraph | TODO_dump_func
1995 | TODO_remove_functions /* todo_flags_finish */
1996 },
9c1bff7a 1997 inline_generate_summary, /* generate_summary */
1998 NULL, /* write_summary */
1999 NULL, /* read_summary */
68e3904e 2000 NULL, /* function_read_summary */
68e3904e 2001 0, /* TODOs */
2002 inline_transform, /* function_transform */
2003 NULL, /* variable_transform */
65c1a668 2004};
f4ec5ce1 2005
7309a3bb 2006
f4ec5ce1 2007#include "gt-ipa-inline.h"