]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-inline-transform.c
Remove cgraph_global_info.
[thirdparty/gcc.git] / gcc / ipa-inline-transform.c
1 /* Callgraph transformations to handle inlining
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* The inline decisions are stored in callgraph in "inline plan" and
22 applied later.
23
24 To mark given call inline, use inline_call function.
25 The function marks the edge inlinable and, if necessary, produces
26 virtual clone in the callgraph representing the new copy of callee's
27 function body.
28
29 The inline plan is applied on given function body by inline_transform. */
30
31 #include "config.h"
32 #include "system.h"
33 #include "coretypes.h"
34 #include "tm.h"
35 #include "function.h"
36 #include "tree.h"
37 #include "alloc-pool.h"
38 #include "tree-pass.h"
39 #include "cgraph.h"
40 #include "tree-cfg.h"
41 #include "symbol-summary.h"
42 #include "tree-vrp.h"
43 #include "ipa-prop.h"
44 #include "ipa-fnsummary.h"
45 #include "ipa-inline.h"
46 #include "tree-inline.h"
47 #include "function.h"
48 #include "cfg.h"
49 #include "basic-block.h"
50
51 int ncalls_inlined;
52 int nfunctions_inlined;
53
54 /* Scale counts of NODE edges by NUM/DEN. */
55
56 static void
57 update_noncloned_counts (struct cgraph_node *node,
58 profile_count num, profile_count den)
59 {
60 struct cgraph_edge *e;
61
62 profile_count::adjust_for_ipa_scaling (&num, &den);
63
64 for (e = node->callees; e; e = e->next_callee)
65 {
66 if (!e->inline_failed)
67 update_noncloned_counts (e->callee, num, den);
68 e->count = e->count.apply_scale (num, den);
69 }
70 for (e = node->indirect_calls; e; e = e->next_callee)
71 e->count = e->count.apply_scale (num, den);
72 node->count = node->count.apply_scale (num, den);
73 }
74
75 /* We removed or are going to remove the last call to NODE.
76 Return true if we can and want proactively remove the NODE now.
77 This is important to do, since we want inliner to know when offline
78 copy of function was removed. */
79
80 static bool
81 can_remove_node_now_p_1 (struct cgraph_node *node, struct cgraph_edge *e)
82 {
83 ipa_ref *ref;
84
85 FOR_EACH_ALIAS (node, ref)
86 {
87 cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
88 if ((alias->callers && alias->callers != e)
89 || !can_remove_node_now_p_1 (alias, e))
90 return false;
91 }
92 /* FIXME: When address is taken of DECL_EXTERNAL function we still
93 can remove its offline copy, but we would need to keep unanalyzed node in
94 the callgraph so references can point to it.
95
96 Also for comdat group we can ignore references inside a group as we
97 want to prove the group as a whole to be dead. */
98 return (!node->address_taken
99 && node->can_remove_if_no_direct_calls_and_refs_p ()
100 /* Inlining might enable more devirtualizing, so we want to remove
101 those only after all devirtualizable virtual calls are processed.
102 Lacking may edges in callgraph we just preserve them post
103 inlining. */
104 && (!DECL_VIRTUAL_P (node->decl)
105 || !opt_for_fn (node->decl, flag_devirtualize))
106 /* During early inlining some unanalyzed cgraph nodes might be in the
107 callgraph and they might reffer the function in question. */
108 && !cgraph_new_nodes.exists ());
109 }
110
111 /* We are going to eliminate last direct call to NODE (or alias of it) via edge E.
112 Verify that the NODE can be removed from unit and if it is contained in comdat
113 group that the whole comdat group is removable. */
114
115 static bool
116 can_remove_node_now_p (struct cgraph_node *node, struct cgraph_edge *e)
117 {
118 struct cgraph_node *next;
119 if (!can_remove_node_now_p_1 (node, e))
120 return false;
121
122 /* When we see same comdat group, we need to be sure that all
123 items can be removed. */
124 if (!node->same_comdat_group || !node->externally_visible)
125 return true;
126 for (next = dyn_cast<cgraph_node *> (node->same_comdat_group);
127 next != node; next = dyn_cast<cgraph_node *> (next->same_comdat_group))
128 {
129 if (next->alias)
130 continue;
131 if ((next->callers && next->callers != e)
132 || !can_remove_node_now_p_1 (next, e))
133 return false;
134 }
135 return true;
136 }
137
138 /* Return true if NODE is a master clone with non-inline clones. */
139
140 static bool
141 master_clone_with_noninline_clones_p (struct cgraph_node *node)
142 {
143 if (node->clone_of)
144 return false;
145
146 for (struct cgraph_node *n = node->clones; n; n = n->next_sibling_clone)
147 if (n->decl != node->decl)
148 return true;
149
150 return false;
151 }
152
153 /* E is expected to be an edge being inlined. Clone destination node of
154 the edge and redirect it to the new clone.
155 DUPLICATE is used for bookkeeping on whether we are actually creating new
156 clones or re-using node originally representing out-of-line function call.
157 By default the offline copy is removed, when it appears dead after inlining.
158 UPDATE_ORIGINAL prevents this transformation.
159 If OVERALL_SIZE is non-NULL, the size is updated to reflect the
160 transformation. */
161
162 void
163 clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
164 bool update_original, int *overall_size)
165 {
166 struct cgraph_node *inlining_into;
167 struct cgraph_edge *next;
168
169 if (e->caller->inlined_to)
170 inlining_into = e->caller->inlined_to;
171 else
172 inlining_into = e->caller;
173
174 if (duplicate)
175 {
176 /* We may eliminate the need for out-of-line copy to be output.
177 In that case just go ahead and re-use it. This is not just an
178 memory optimization. Making offline copy of fuction disappear
179 from the program will improve future decisions on inlining. */
180 if (!e->callee->callers->next_caller
181 /* Recursive inlining never wants the master clone to
182 be overwritten. */
183 && update_original
184 && can_remove_node_now_p (e->callee, e)
185 /* We cannot overwrite a master clone with non-inline clones
186 until after these clones are materialized. */
187 && !master_clone_with_noninline_clones_p (e->callee))
188 {
189 /* TODO: When callee is in a comdat group, we could remove all of it,
190 including all inline clones inlined into it. That would however
191 need small function inlining to register edge removal hook to
192 maintain the priority queue.
193
194 For now we keep the ohter functions in the group in program until
195 cgraph_remove_unreachable_functions gets rid of them. */
196 gcc_assert (!e->callee->inlined_to);
197 e->callee->remove_from_same_comdat_group ();
198 if (e->callee->definition
199 && inline_account_function_p (e->callee))
200 {
201 gcc_assert (!e->callee->alias);
202 if (overall_size)
203 *overall_size -= ipa_size_summaries->get (e->callee)->size;
204 nfunctions_inlined++;
205 }
206 duplicate = false;
207 e->callee->externally_visible = false;
208 update_noncloned_counts (e->callee, e->count, e->callee->count);
209
210 dump_callgraph_transformation (e->callee, inlining_into,
211 "inlining to");
212 }
213 else
214 {
215 struct cgraph_node *n;
216
217 n = e->callee->create_clone (e->callee->decl,
218 e->count,
219 update_original, vNULL, true,
220 inlining_into,
221 NULL);
222 n->used_as_abstract_origin = e->callee->used_as_abstract_origin;
223 e->redirect_callee (n);
224 }
225 }
226 else
227 e->callee->remove_from_same_comdat_group ();
228
229 e->callee->inlined_to = inlining_into;
230
231 /* Recursively clone all bodies. */
232 for (e = e->callee->callees; e; e = next)
233 {
234 next = e->next_callee;
235 if (!e->inline_failed)
236 clone_inlined_nodes (e, duplicate, update_original, overall_size);
237 }
238 }
239
240 /* Check all speculations in N and if any seem useless, resolve them. When a
241 first edge is resolved, pop all edges from NEW_EDGES and insert them to
242 EDGE_SET. Then remove each resolved edge from EDGE_SET, if it is there. */
243
244 static bool
245 check_speculations_1 (cgraph_node *n, vec<cgraph_edge *> *new_edges,
246 hash_set <cgraph_edge *> *edge_set)
247 {
248 bool speculation_removed = false;
249 cgraph_edge *next;
250
251 for (cgraph_edge *e = n->callees; e; e = next)
252 {
253 next = e->next_callee;
254 if (e->speculative && !speculation_useful_p (e, true))
255 {
256 while (new_edges && !new_edges->is_empty ())
257 edge_set->add (new_edges->pop ());
258 edge_set->remove (e);
259
260 e->resolve_speculation (NULL);
261 speculation_removed = true;
262 }
263 else if (!e->inline_failed)
264 speculation_removed |= check_speculations_1 (e->callee, new_edges,
265 edge_set);
266 }
267 return speculation_removed;
268 }
269
270 /* Push E to NEW_EDGES. Called from hash_set traverse method, which
271 unfortunately means this function has to have external linkage, otherwise
272 the code will not compile with gcc 4.8. */
273
274 bool
275 push_all_edges_in_set_to_vec (cgraph_edge * const &e,
276 vec<cgraph_edge *> *new_edges)
277 {
278 new_edges->safe_push (e);
279 return true;
280 }
281
282 /* Check all speculations in N and if any seem useless, resolve them and remove
283 them from NEW_EDGES. */
284
285 static bool
286 check_speculations (cgraph_node *n, vec<cgraph_edge *> *new_edges)
287 {
288 hash_set <cgraph_edge *> edge_set;
289 bool res = check_speculations_1 (n, new_edges, &edge_set);
290 if (!edge_set.is_empty ())
291 edge_set.traverse <vec<cgraph_edge *> *,
292 push_all_edges_in_set_to_vec> (new_edges);
293 return res;
294 }
295
296 /* Mark all call graph edges coming out of NODE and all nodes that have been
297 inlined to it as in_polymorphic_cdtor. */
298
299 static void
300 mark_all_inlined_calls_cdtor (cgraph_node *node)
301 {
302 for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
303 {
304 cs->in_polymorphic_cdtor = true;
305 if (!cs->inline_failed)
306 mark_all_inlined_calls_cdtor (cs->callee);
307 }
308 for (cgraph_edge *cs = node->indirect_calls; cs; cs = cs->next_callee)
309 cs->in_polymorphic_cdtor = true;
310 }
311
312
313 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
314 specify whether profile of original function should be updated. If any new
315 indirect edges are discovered in the process, add them to NEW_EDGES, unless
316 it is NULL. If UPDATE_OVERALL_SUMMARY is false, do not bother to recompute overall
317 size of caller after inlining. Caller is required to eventually do it via
318 ipa_update_overall_fn_summary.
319 If callee_removed is non-NULL, set it to true if we removed callee node.
320
321 Return true iff any new callgraph edges were discovered as a
322 result of inlining. */
323
324 bool
325 inline_call (struct cgraph_edge *e, bool update_original,
326 vec<cgraph_edge *> *new_edges,
327 int *overall_size, bool update_overall_summary,
328 bool *callee_removed)
329 {
330 int old_size = 0, new_size = 0;
331 struct cgraph_node *to = NULL;
332 struct cgraph_edge *curr = e;
333 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
334 bool new_edges_found = false;
335
336 int estimated_growth = 0;
337 if (! update_overall_summary)
338 estimated_growth = estimate_edge_growth (e);
339 /* This is used only for assert bellow. */
340 #if 0
341 bool predicated = inline_edge_summary (e)->predicate != NULL;
342 #endif
343
344 /* Don't inline inlined edges. */
345 gcc_assert (e->inline_failed);
346 /* Don't even think of inlining inline clone. */
347 gcc_assert (!callee->inlined_to);
348
349 to = e->caller;
350 if (to->inlined_to)
351 to = to->inlined_to;
352 if (to->thunk.thunk_p)
353 {
354 struct cgraph_node *target = to->callees->callee;
355 symtab->call_cgraph_removal_hooks (to);
356 if (in_lto_p)
357 to->get_untransformed_body ();
358 to->expand_thunk (false, true);
359 /* When thunk is instrumented we may have multiple callees. */
360 for (e = to->callees; e && e->callee != target; e = e->next_callee)
361 ;
362 symtab->call_cgraph_insertion_hooks (to);
363 gcc_assert (e);
364 }
365
366
367 e->inline_failed = CIF_OK;
368 DECL_POSSIBLY_INLINED (callee->decl) = true;
369
370 if (DECL_FUNCTION_PERSONALITY (callee->decl))
371 DECL_FUNCTION_PERSONALITY (to->decl)
372 = DECL_FUNCTION_PERSONALITY (callee->decl);
373
374 bool reload_optimization_node = false;
375 if (!opt_for_fn (callee->decl, flag_strict_aliasing)
376 && opt_for_fn (to->decl, flag_strict_aliasing))
377 {
378 struct gcc_options opts = global_options;
379
380 cl_optimization_restore (&opts, opts_for_fn (to->decl));
381 opts.x_flag_strict_aliasing = false;
382 if (dump_file)
383 fprintf (dump_file, "Dropping flag_strict_aliasing on %s\n",
384 to->dump_name ());
385 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (to->decl)
386 = build_optimization_node (&opts);
387 reload_optimization_node = true;
388 }
389
390 ipa_fn_summary *caller_info = ipa_fn_summaries->get (to);
391 ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
392 if (!caller_info->fp_expressions && callee_info->fp_expressions)
393 {
394 caller_info->fp_expressions = true;
395 if (opt_for_fn (callee->decl, flag_rounding_math)
396 != opt_for_fn (to->decl, flag_rounding_math)
397 || opt_for_fn (callee->decl, flag_trapping_math)
398 != opt_for_fn (to->decl, flag_trapping_math)
399 || opt_for_fn (callee->decl, flag_unsafe_math_optimizations)
400 != opt_for_fn (to->decl, flag_unsafe_math_optimizations)
401 || opt_for_fn (callee->decl, flag_finite_math_only)
402 != opt_for_fn (to->decl, flag_finite_math_only)
403 || opt_for_fn (callee->decl, flag_signaling_nans)
404 != opt_for_fn (to->decl, flag_signaling_nans)
405 || opt_for_fn (callee->decl, flag_cx_limited_range)
406 != opt_for_fn (to->decl, flag_cx_limited_range)
407 || opt_for_fn (callee->decl, flag_signed_zeros)
408 != opt_for_fn (to->decl, flag_signed_zeros)
409 || opt_for_fn (callee->decl, flag_associative_math)
410 != opt_for_fn (to->decl, flag_associative_math)
411 || opt_for_fn (callee->decl, flag_reciprocal_math)
412 != opt_for_fn (to->decl, flag_reciprocal_math)
413 || opt_for_fn (callee->decl, flag_fp_int_builtin_inexact)
414 != opt_for_fn (to->decl, flag_fp_int_builtin_inexact)
415 || opt_for_fn (callee->decl, flag_errno_math)
416 != opt_for_fn (to->decl, flag_errno_math))
417 {
418 struct gcc_options opts = global_options;
419
420 cl_optimization_restore (&opts, opts_for_fn (to->decl));
421 opts.x_flag_rounding_math
422 = opt_for_fn (callee->decl, flag_rounding_math);
423 opts.x_flag_trapping_math
424 = opt_for_fn (callee->decl, flag_trapping_math);
425 opts.x_flag_unsafe_math_optimizations
426 = opt_for_fn (callee->decl, flag_unsafe_math_optimizations);
427 opts.x_flag_finite_math_only
428 = opt_for_fn (callee->decl, flag_finite_math_only);
429 opts.x_flag_signaling_nans
430 = opt_for_fn (callee->decl, flag_signaling_nans);
431 opts.x_flag_cx_limited_range
432 = opt_for_fn (callee->decl, flag_cx_limited_range);
433 opts.x_flag_signed_zeros
434 = opt_for_fn (callee->decl, flag_signed_zeros);
435 opts.x_flag_associative_math
436 = opt_for_fn (callee->decl, flag_associative_math);
437 opts.x_flag_reciprocal_math
438 = opt_for_fn (callee->decl, flag_reciprocal_math);
439 opts.x_flag_fp_int_builtin_inexact
440 = opt_for_fn (callee->decl, flag_fp_int_builtin_inexact);
441 opts.x_flag_errno_math
442 = opt_for_fn (callee->decl, flag_errno_math);
443 if (dump_file)
444 fprintf (dump_file, "Copying FP flags from %s to %s\n",
445 callee->dump_name (), to->dump_name ());
446 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (to->decl)
447 = build_optimization_node (&opts);
448 reload_optimization_node = true;
449 }
450 }
451
452 /* Reload global optimization flags. */
453 if (reload_optimization_node && DECL_STRUCT_FUNCTION (to->decl) == cfun)
454 set_cfun (cfun, true);
455
456 /* If aliases are involved, redirect edge to the actual destination and
457 possibly remove the aliases. */
458 if (e->callee != callee)
459 {
460 struct cgraph_node *alias = e->callee, *next_alias;
461 e->redirect_callee (callee);
462 while (alias && alias != callee)
463 {
464 if (!alias->callers
465 && can_remove_node_now_p (alias,
466 !e->next_caller && !e->prev_caller ? e : NULL))
467 {
468 next_alias = alias->get_alias_target ();
469 alias->remove ();
470 if (callee_removed)
471 *callee_removed = true;
472 alias = next_alias;
473 }
474 else
475 break;
476 }
477 }
478
479 clone_inlined_nodes (e, true, update_original, overall_size);
480
481 gcc_assert (curr->callee->inlined_to == to);
482
483 old_size = ipa_size_summaries->get (to)->size;
484 ipa_merge_fn_summary_after_inlining (e);
485 if (e->in_polymorphic_cdtor)
486 mark_all_inlined_calls_cdtor (e->callee);
487 if (opt_for_fn (e->caller->decl, optimize))
488 new_edges_found = ipa_propagate_indirect_call_infos (curr, new_edges);
489 check_speculations (e->callee, new_edges);
490 if (update_overall_summary)
491 ipa_update_overall_fn_summary (to);
492 else
493 /* Update self size by the estimate so overall function growth limits
494 work for further inlining into this function. Before inlining
495 the function we inlined to again we expect the caller to update
496 the overall summary. */
497 ipa_size_summaries->get (to)->size += estimated_growth;
498 new_size = ipa_size_summaries->get (to)->size;
499
500 if (callee->calls_comdat_local)
501 to->calls_comdat_local = true;
502 else if (to->calls_comdat_local && callee->comdat_local_p ())
503 {
504 struct cgraph_edge *se = to->callees;
505 for (; se; se = se->next_callee)
506 if (se->inline_failed && se->callee->comdat_local_p ())
507 break;
508 if (se == NULL)
509 to->calls_comdat_local = false;
510 }
511
512 /* FIXME: This assert suffers from roundoff errors, disable it for GCC 5
513 and revisit it after conversion to sreals in GCC 6.
514 See PR 65654. */
515 #if 0
516 /* Verify that estimated growth match real growth. Allow off-by-one
517 error due to ipa_fn_summary::size_scale roudoff errors. */
518 gcc_assert (!update_overall_summary || !overall_size || new_edges_found
519 || abs (estimated_growth - (new_size - old_size)) <= 1
520 || speculation_removed
521 /* FIXME: a hack. Edges with false predicate are accounted
522 wrong, we should remove them from callgraph. */
523 || predicated);
524 #endif
525
526 /* Account the change of overall unit size; external functions will be
527 removed and are thus not accounted. */
528 if (overall_size && inline_account_function_p (to))
529 *overall_size += new_size - old_size;
530 ncalls_inlined++;
531
532 /* This must happen after ipa_merge_fn_summary_after_inlining that rely on jump
533 functions of callee to not be updated. */
534 return new_edges_found;
535 }
536
537
538 /* Copy function body of NODE and redirect all inline clones to it.
539 This is done before inline plan is applied to NODE when there are
540 still some inline clones if it.
541
542 This is necessary because inline decisions are not really transitive
543 and the other inline clones may have different bodies. */
544
545 static struct cgraph_node *
546 save_inline_function_body (struct cgraph_node *node)
547 {
548 struct cgraph_node *first_clone, *n;
549
550 if (dump_file)
551 fprintf (dump_file, "\nSaving body of %s for later reuse\n",
552 node->name ());
553
554 gcc_assert (node == cgraph_node::get (node->decl));
555
556 /* first_clone will be turned into real function. */
557 first_clone = node->clones;
558
559 /* Arrange first clone to not be thunk as those do not have bodies. */
560 if (first_clone->thunk.thunk_p)
561 {
562 while (first_clone->thunk.thunk_p)
563 first_clone = first_clone->next_sibling_clone;
564 first_clone->prev_sibling_clone->next_sibling_clone
565 = first_clone->next_sibling_clone;
566 if (first_clone->next_sibling_clone)
567 first_clone->next_sibling_clone->prev_sibling_clone
568 = first_clone->prev_sibling_clone;
569 first_clone->next_sibling_clone = node->clones;
570 first_clone->prev_sibling_clone = NULL;
571 node->clones->prev_sibling_clone = first_clone;
572 node->clones = first_clone;
573 }
574 first_clone->decl = copy_node (node->decl);
575 first_clone->decl->decl_with_vis.symtab_node = first_clone;
576 gcc_assert (first_clone == cgraph_node::get (first_clone->decl));
577
578 /* Now reshape the clone tree, so all other clones descends from
579 first_clone. */
580 if (first_clone->next_sibling_clone)
581 {
582 for (n = first_clone->next_sibling_clone; n->next_sibling_clone;
583 n = n->next_sibling_clone)
584 n->clone_of = first_clone;
585 n->clone_of = first_clone;
586 n->next_sibling_clone = first_clone->clones;
587 if (first_clone->clones)
588 first_clone->clones->prev_sibling_clone = n;
589 first_clone->clones = first_clone->next_sibling_clone;
590 first_clone->next_sibling_clone->prev_sibling_clone = NULL;
591 first_clone->next_sibling_clone = NULL;
592 gcc_assert (!first_clone->prev_sibling_clone);
593 }
594 first_clone->clone_of = NULL;
595
596 /* Now node in question has no clones. */
597 node->clones = NULL;
598
599 /* Inline clones share decl with the function they are cloned
600 from. Walk the whole clone tree and redirect them all to the
601 new decl. */
602 if (first_clone->clones)
603 for (n = first_clone->clones; n != first_clone;)
604 {
605 gcc_assert (n->decl == node->decl);
606 n->decl = first_clone->decl;
607 if (n->clones)
608 n = n->clones;
609 else if (n->next_sibling_clone)
610 n = n->next_sibling_clone;
611 else
612 {
613 while (n != first_clone && !n->next_sibling_clone)
614 n = n->clone_of;
615 if (n != first_clone)
616 n = n->next_sibling_clone;
617 }
618 }
619
620 /* Copy the OLD_VERSION_NODE function tree to the new version. */
621 tree_function_versioning (node->decl, first_clone->decl,
622 NULL, NULL, true, NULL, NULL);
623
624 /* The function will be short lived and removed after we inline all the clones,
625 but make it internal so we won't confuse ourself. */
626 DECL_EXTERNAL (first_clone->decl) = 0;
627 TREE_PUBLIC (first_clone->decl) = 0;
628 DECL_COMDAT (first_clone->decl) = 0;
629 first_clone->ipa_transforms_to_apply.release ();
630
631 /* When doing recursive inlining, the clone may become unnecessary.
632 This is possible i.e. in the case when the recursive function is proved to be
633 non-throwing and the recursion happens only in the EH landing pad.
634 We cannot remove the clone until we are done with saving the body.
635 Remove it now. */
636 if (!first_clone->callers)
637 {
638 first_clone->remove_symbol_and_inline_clones ();
639 first_clone = NULL;
640 }
641 else if (flag_checking)
642 first_clone->verify ();
643
644 return first_clone;
645 }
646
647 /* Return true when function body of DECL still needs to be kept around
648 for later re-use. */
649 static bool
650 preserve_function_body_p (struct cgraph_node *node)
651 {
652 gcc_assert (symtab->global_info_ready);
653 gcc_assert (!node->alias && !node->thunk.thunk_p);
654
655 /* Look if there is any non-thunk clone around. */
656 for (node = node->clones; node; node = node->next_sibling_clone)
657 if (!node->thunk.thunk_p)
658 return true;
659 return false;
660 }
661
662 /* Apply inline plan to function. */
663
664 unsigned int
665 inline_transform (struct cgraph_node *node)
666 {
667 unsigned int todo = 0;
668 struct cgraph_edge *e, *next;
669 bool has_inline = false;
670
671 /* FIXME: Currently the pass manager is adding inline transform more than
672 once to some clones. This needs revisiting after WPA cleanups. */
673 if (cfun->after_inlining)
674 return 0;
675
676 /* We might need the body of this function so that we can expand
677 it inline somewhere else. */
678 if (preserve_function_body_p (node))
679 save_inline_function_body (node);
680
681 for (e = node->callees; e; e = next)
682 {
683 if (!e->inline_failed)
684 has_inline = true;
685 next = e->next_callee;
686 e->redirect_call_stmt_to_callee ();
687 }
688 node->remove_all_references ();
689
690 timevar_push (TV_INTEGRATION);
691 if (node->callees && (opt_for_fn (node->decl, optimize) || has_inline))
692 {
693 profile_count num = node->count;
694 profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
695 bool scale = num.initialized_p () && !(num == den);
696 if (scale)
697 {
698 profile_count::adjust_for_ipa_scaling (&num, &den);
699 if (dump_file)
700 {
701 fprintf (dump_file, "Applying count scale ");
702 num.dump (dump_file);
703 fprintf (dump_file, "/");
704 den.dump (dump_file);
705 fprintf (dump_file, "\n");
706 }
707
708 basic_block bb;
709 cfun->cfg->count_max = profile_count::uninitialized ();
710 FOR_ALL_BB_FN (bb, cfun)
711 {
712 bb->count = bb->count.apply_scale (num, den);
713 cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
714 }
715 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = node->count;
716 }
717 todo = optimize_inline_calls (current_function_decl);
718 }
719 timevar_pop (TV_INTEGRATION);
720
721 cfun->always_inline_functions_inlined = true;
722 cfun->after_inlining = true;
723 todo |= execute_fixup_cfg ();
724
725 if (!(todo & TODO_update_ssa_any))
726 /* Redirecting edges might lead to a need for vops to be recomputed. */
727 todo |= TODO_update_ssa_only_virtuals;
728
729 return todo;
730 }