/* Inlining decision heuristics.
- Copyright (C) 2003-2019 Free Software Foundation, Inc.
+ Copyright (C) 2003-2021 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
if (limit < what_size_info->self_size)
limit = what_size_info->self_size;
- limit += limit * param_large_function_growth / 100;
+ limit += limit * opt_for_fn (to->decl, param_large_function_growth) / 100;
/* Check the size after inlining against the function limits. But allow
the function to shrink if it went over the limits by forced inlining. */
newsize = estimate_size_after_inlining (to, e);
if (newsize >= ipa_size_summaries->get (what)->size
- && newsize > param_large_function_insns
+ && newsize > opt_for_fn (to->decl, param_large_function_insns)
&& newsize > limit)
{
e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
on every invocation of the caller (i.e. its call statement dominates
exit block). We do not track this information, yet. */
stack_size_limit += ((gcov_type)stack_size_limit
- * param_stack_frame_growth / 100);
+ * opt_for_fn (to->decl, param_stack_frame_growth)
+ / 100);
inlined_stack = (ipa_get_stack_frame_offset (to)
+ outer_info->estimated_self_stack_size
This bit overoptimistically assume that we are good at stack
packing. */
&& inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
- && inlined_stack > param_large_stack_frame)
+ && inlined_stack > opt_for_fn (to->decl, param_large_stack_frame))
{
e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
return false;
if (!caller || !callee)
return true;
- /* Allow inlining always_inline functions into no_sanitize_address
- functions. */
- if (!sanitize_flags_p (SANITIZE_ADDRESS, caller)
- && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
+ /* Follow clang and allow inlining for always_inline functions. */
+ if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
return true;
- return ((sanitize_flags_p (SANITIZE_ADDRESS, caller)
- == sanitize_flags_p (SANITIZE_ADDRESS, callee))
- && (sanitize_flags_p (SANITIZE_POINTER_COMPARE, caller)
- == sanitize_flags_p (SANITIZE_POINTER_COMPARE, callee))
- && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, caller)
- == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, callee)));
+ const sanitize_code codes[] =
+ {
+ SANITIZE_ADDRESS,
+ SANITIZE_THREAD,
+ SANITIZE_UNDEFINED,
+ SANITIZE_UNDEFINED_NONDEFAULT,
+ SANITIZE_POINTER_COMPARE,
+ SANITIZE_POINTER_SUBTRACT
+ };
+
+ for (unsigned i = 0; i < sizeof (codes) / sizeof (codes[0]); i++)
+ if (sanitize_flags_p (codes[i], caller)
+ != sanitize_flags_p (codes[i], callee))
+ return false;
+
+ if (sanitize_coverage_p (caller) != sanitize_coverage_p (callee))
+ return false;
+
+ return true;
}
/* Used for flags where it is safe to inline when caller's value is
/* Don't inline a function with mismatched sanitization attributes. */
else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
{
- e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
+ e->inline_failed = CIF_SANITIZE_ATTRIBUTE_MISMATCH;
inlinable = false;
}
+ else if (profile_arc_flag
+ && (lookup_attribute ("no_profile_instrument_function",
+ DECL_ATTRIBUTES (caller->decl)) == NULL_TREE)
+ != (lookup_attribute ("no_profile_instrument_function",
+ DECL_ATTRIBUTES (callee->decl)) == NULL_TREE))
+ {
+ cgraph_node *origin = caller;
+ while (origin->clone_of)
+ origin = origin->clone_of;
+
+ if (!DECL_STRUCT_FUNCTION (origin->decl)->always_inline_functions_inlined)
+ {
+ e->inline_failed = CIF_UNSPECIFIED;
+ inlinable = false;
+ }
+ }
+
if (!inlinable && report)
report_inline_failed_reason (e);
return inlinable;
}
-/* Return inlining_insns_single limit for function N. If HINT is true
+/* Return inlining_insns_single limit for function N. If HINT or HINT2 is true
scale up the bound. */
static int
-inline_insns_single (cgraph_node *n, bool hint)
+inline_insns_single (cgraph_node *n, bool hint, bool hint2)
{
- if (opt_for_fn (n->decl, optimize) >= 3)
+ if (hint && hint2)
{
- if (hint)
- return param_max_inline_insns_single
- * param_inline_heuristics_hint_percent / 100;
- return param_max_inline_insns_single;
- }
- else
- {
- if (hint)
- return param_max_inline_insns_single_o2
- * param_inline_heuristics_hint_percent_o2 / 100;
- return param_max_inline_insns_single_o2;
+ int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
+ spd = spd * spd;
+ if (spd > 1000000)
+ spd = 1000000;
+ return opt_for_fn (n->decl, param_max_inline_insns_single) * spd / 100;
}
+ if (hint || hint2)
+ return opt_for_fn (n->decl, param_max_inline_insns_single)
+ * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
+ return opt_for_fn (n->decl, param_max_inline_insns_single);
}
-/* Return inlining_insns_auto limit for function N. If HINT is true
+/* Return inlining_insns_auto limit for function N. If HINT or HINT2 is true
scale up the bound. */
static int
-inline_insns_auto (cgraph_node *n, bool hint)
+inline_insns_auto (cgraph_node *n, bool hint, bool hint2)
{
int max_inline_insns_auto = opt_for_fn (n->decl, param_max_inline_insns_auto);
- if (hint)
- return max_inline_insns_auto * param_inline_heuristics_hint_percent / 100;
+ if (hint && hint2)
+ {
+ int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
+ spd = spd * spd;
+ if (spd > 1000000)
+ spd = 1000000;
+ return max_inline_insns_auto * spd / 100;
+ }
+ if (hint || hint2)
+ return max_inline_insns_auto
+ * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
return max_inline_insns_auto;
}
else if (check_match (flag_wrapv)
|| check_match (flag_trapv)
|| check_match (flag_pcc_struct_return)
+ || check_maybe_down (optimize_debug)
/* When caller or callee does FP math, be sure FP codegen flags
compatible. */
|| ((caller_info->fp_expressions && callee_info->fp_expressions)
> opt_for_fn (caller->decl, optimize_size))
{
int growth = estimate_edge_growth (e);
- if (growth > param_max_inline_insns_size
+ if (growth > opt_for_fn (caller->decl, param_max_inline_insns_size)
&& (!DECL_DECLARED_INLINE_P (callee->decl)
- && growth >= MAX (inline_insns_single (caller, false),
- inline_insns_auto (caller, false))))
+ && growth >= MAX (inline_insns_single (caller, false, false),
+ inline_insns_auto (caller, false, false))))
{
e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
inlinable = false;
/* First take care of very large functions. */
int min_growth = estimate_min_edge_growth (e), growth = 0;
int n;
- int early_inlining_insns = opt_for_fn (e->caller->decl, optimize) >= 3
- ? param_early_inlining_insns
- : param_early_inlining_insns_o2;
+ int early_inlining_insns = param_early_inlining_insns;
if (min_growth > early_inlining_insns)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
" will not early inline: %C->%C, "
- "growth %i exceeds --param early-inlining-insns%s\n",
- e->caller, callee, growth,
- opt_for_fn (e->caller->decl, optimize) >= 3
- ? "" : "-O2");
+ "growth %i exceeds --param early-inlining-insns\n",
+ e->caller, callee, growth);
want_inline = false;
}
else if ((n = num_calls (callee)) != 0
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
" will not early inline: %C->%C, "
- "growth %i exceeds --param early-inlining-insns%s "
+ "growth %i exceeds --param early-inlining-insns "
"divided by number of calls\n",
- e->caller, callee, growth,
- opt_for_fn (e->caller->decl, optimize) >= 3
- ? "" : "-O2");
+ e->caller, callee, growth);
want_inline = false;
}
}
}
/* Return true if the speedup for inlining E is bigger than
- PARAM_MAX_INLINE_MIN_SPEEDUP. */
+ param_inline_min_speedup. */
static bool
big_speedup_p (struct cgraph_edge *e)
cgraph_node *caller = (e->caller->inlined_to
? e->caller->inlined_to
: e->caller);
- int limit = opt_for_fn (caller->decl, optimize) >= 3
- ? param_inline_min_speedup
- : param_inline_min_speedup_o2;
+ int limit = opt_for_fn (caller->decl, param_inline_min_speedup);
if ((time - inlined_time) * 100 > time * limit)
return true;
{
bool want_inline = true;
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
+ cgraph_node *to = (e->caller->inlined_to
+ ? e->caller->inlined_to : e->caller);
/* Allow this function to be called before can_inline_edge_p,
since it's usually cheaper. */
&& (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
&& ipa_fn_summaries->get (callee)->min_size
- ipa_call_summaries->get (e)->call_stmt_size
- > inline_insns_auto (e->caller, true))
+ > inline_insns_auto (e->caller, true, true))
{
e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
want_inline = false;
|| e->count.ipa ().nonzero_p ())
&& ipa_fn_summaries->get (callee)->min_size
- ipa_call_summaries->get (e)->call_stmt_size
- > inline_insns_single (e->caller, true))
+ > inline_insns_single (e->caller, true, true))
{
- if (opt_for_fn (e->caller->decl, optimize) >= 3)
- e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
- ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
- : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
- else
- e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
- ? CIF_MAX_INLINE_INSNS_SINGLE_O2_LIMIT
- : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
+ e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
+ ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
+ : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
want_inline = false;
}
else
{
int growth = estimate_edge_growth (e);
ipa_hints hints = estimate_edge_hints (e);
+ /* We have two independent groups of hints. If one matches in each
+ of groups the limits are inreased. If both groups matches, limit
+ is increased even more. */
bool apply_hints = (hints & (INLINE_HINT_indirect_call
| INLINE_HINT_known_hot
| INLINE_HINT_loop_iterations
| INLINE_HINT_loop_stride));
+ bool apply_hints2 = (hints & INLINE_HINT_builtin_constant_p);
- if (growth <= param_max_inline_insns_size)
+ if (growth <= opt_for_fn (to->decl,
+ param_max_inline_insns_size))
;
/* Apply param_max_inline_insns_single limit. Do not do so when
hints suggests that inlining given function is very profitable.
Avoid computation of big_speedup_p when not necessary to change
outcome of decision. */
else if (DECL_DECLARED_INLINE_P (callee->decl)
- && growth >= inline_insns_single (e->caller, apply_hints)
- && (apply_hints
- || growth >= inline_insns_single (e->caller, true)
+ && growth >= inline_insns_single (e->caller, apply_hints,
+ apply_hints2)
+ && (apply_hints || apply_hints2
+ || growth >= inline_insns_single (e->caller, true,
+ apply_hints2)
|| !big_speedup_p (e)))
{
- if (opt_for_fn (e->caller->decl, optimize) >= 3)
- e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
- else
- e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_O2_LIMIT;
+ e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
want_inline = false;
}
else if (!DECL_DECLARED_INLINE_P (callee->decl)
&& !opt_for_fn (e->caller->decl, flag_inline_functions)
- && growth >= param_max_inline_insns_small)
+ && growth >= opt_for_fn (to->decl,
+ param_max_inline_insns_small))
{
/* growth_positive_p is expensive, always test it last. */
- if (growth >= inline_insns_single (e->caller, false)
+ if (growth >= inline_insns_single (e->caller, false, false)
|| growth_positive_p (callee, e, growth))
{
e->inline_failed = CIF_NOT_DECLARED_INLINED;
/* Apply param_max_inline_insns_auto limit for functions not declared
inline. Bypass the limit when speedup seems big. */
else if (!DECL_DECLARED_INLINE_P (callee->decl)
- && growth >= inline_insns_auto (e->caller, apply_hints)
- && (apply_hints
- || growth >= inline_insns_auto (e->caller, true)
+ && growth >= inline_insns_auto (e->caller, apply_hints,
+ apply_hints2)
+ && (apply_hints || apply_hints2
+ || growth >= inline_insns_auto (e->caller, true,
+ apply_hints2)
|| !big_speedup_p (e)))
{
/* growth_positive_p is expensive, always test it last. */
- if (growth >= inline_insns_single (e->caller, false)
+ if (growth >= inline_insns_single (e->caller, false, false)
|| growth_positive_p (callee, e, growth))
{
e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
}
/* If call is cold, do not inline when function body would grow. */
else if (!e->maybe_hot_p ()
- && (growth >= inline_insns_single (e->caller, false)
+ && (growth >= inline_insns_single (e->caller, false, false)
|| growth_positive_p (callee, e, growth)))
{
e->inline_failed = CIF_UNLIKELY_CALL;
}
/* EDGE is self recursive edge.
- We hand two cases - when function A is inlining into itself
+ We handle two cases - when function A is inlining into itself
or when function A is being inlined into another inliner copy of function
A within function B.
char const *reason = NULL;
bool want_inline = true;
sreal caller_freq = 1;
- int max_depth = param_max_inline_recursive_depth_auto;
+ int max_depth = opt_for_fn (outer_node->decl,
+ param_max_inline_recursive_depth_auto);
if (DECL_DECLARED_INLINE_P (edge->caller->decl))
- max_depth = param_max_inline_recursive_depth;
+ max_depth = opt_for_fn (outer_node->decl,
+ param_max_inline_recursive_depth);
if (!edge->maybe_hot_p ())
{
{
if (edge->sreal_frequency () * 100
<= caller_freq
- * param_min_inline_recursive_probability)
+ * opt_for_fn (outer_node->decl,
+ param_min_inline_recursive_probability))
{
reason = "frequency of recursive call is too small";
want_inline = false;
wrapper_heuristics_may_apply (struct cgraph_node *where, int size)
{
return size < (DECL_DECLARED_INLINE_P (where->decl)
- ? inline_insns_single (where, false)
- : inline_insns_auto (where, false));
+ ? inline_insns_single (where, false, false)
+ : inline_insns_auto (where, false, false));
}
/* A cost model driving the inlining heuristics in a way so the edges with
/* ... or when early optimizers decided to split and edge
frequency still indicates splitting is a win ... */
|| (callee->split_part && !caller->split_part
- && freq * 100 < param_partial_inlining_entry_probability
+ && freq * 100
+ < opt_for_fn (caller->decl,
+ param_partial_inlining_entry_probability)
/* ... and do not overwrite user specified hints. */
&& (!DECL_DECLARED_INLINE_P (edge->callee->decl)
|| DECL_DECLARED_INLINE_P (caller->decl)))))
| INLINE_HINT_loop_stride))
|| callee_info->growth <= 0)
badness = badness.shift (badness > 0 ? -2 : 2);
+ if (hints & INLINE_HINT_builtin_constant_p)
+ badness = badness.shift (badness > 0 ? -4 : 4);
if (hints & (INLINE_HINT_same_scc))
badness = badness.shift (badness > 0 ? 3 : -3);
else if (hints & (INLINE_HINT_in_scc))
recursive_inlining (struct cgraph_edge *edge,
vec<cgraph_edge *> *new_edges)
{
- int limit = param_max_inline_insns_recursive_auto;
+ cgraph_node *to = (edge->caller->inlined_to
+ ? edge->caller->inlined_to : edge->caller);
+ int limit = opt_for_fn (to->decl,
+ param_max_inline_insns_recursive_auto);
edge_heap_t heap (sreal::min ());
struct cgraph_node *node;
struct cgraph_edge *e;
node = node->inlined_to;
if (DECL_DECLARED_INLINE_P (node->decl))
- limit = param_max_inline_insns_recursive;
+ limit = opt_for_fn (to->decl, param_max_inline_insns_recursive);
/* Make sure that function is small enough to be considered for inlining. */
if (estimate_size_after_inlining (node, edge) >= limit)
if (dump_file)
fprintf (dump_file,
- " Performing recursive inlining on %s\n",
- node->name ());
+ " Performing recursive inlining on %s\n", node->dump_name ());
/* Do the inlining and update list of recursive call during process. */
while (!heap.empty ())
/* Given whole compilation unit estimate of INSNS, compute how large we can
allow the unit to grow. */
-static int
-compute_max_insns (int insns)
+static int64_t
+compute_max_insns (cgraph_node *node, int insns)
{
int max_insns = insns;
- if (max_insns < param_large_unit_insns)
- max_insns = param_large_unit_insns;
+ if (max_insns < opt_for_fn (node->decl, param_large_unit_insns))
+ max_insns = opt_for_fn (node->decl, param_large_unit_insns);
return ((int64_t) max_insns
- * (100 + param_inline_unit_growth) / 100);
+ * (100 + opt_for_fn (node->decl, param_inline_unit_growth)) / 100);
}
/* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
static void
-add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
+add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> &new_edges)
{
while (new_edges.length () > 0)
{
enum availability avail;
struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
e->caller);
- struct cgraph_edge *direct, *indirect;
- struct ipa_ref *ref;
gcc_assert (e->speculative && !e->indirect_unknown_callee);
int ecf_flags = flags_from_decl_or_type (target->decl);
if (ecf_flags & ECF_CONST)
{
- e->speculative_call_info (direct, indirect, ref);
- if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
+ if (!(e->speculative_call_indirect_edge ()->indirect_info
+ ->ecf_flags & ECF_CONST))
return true;
}
else if (ecf_flags & ECF_PURE)
{
- e->speculative_call_info (direct, indirect, ref);
- if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
+ if (!(e->speculative_call_indirect_edge ()->indirect_info
+ ->ecf_flags & ECF_PURE))
return true;
}
}
if (edge->count.ipa ().initialized_p ())
spec_rem += edge->count.ipa ();
- edge->resolve_speculation ();
+ cgraph_edge::resolve_speculation (edge);
reset_edge_caches (where);
ipa_update_overall_fn_summary (where);
update_caller_keys (edge_heap, where,
struct cgraph_edge *edge;
edge_heap_t edge_heap (sreal::min ());
auto_bitmap updated_nodes;
- int min_size, max_size;
+ int min_size;
auto_vec<cgraph_edge *> new_indirect_edges;
int initial_size = 0;
struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
if (!node->inlined_to)
{
if (!node->alias && node->analyzed
- && (node->has_gimple_body_p () || node->thunk.thunk_p)
+ && (node->has_gimple_body_p () || node->thunk)
&& opt_for_fn (node->decl, optimize))
{
class ipa_fn_summary *info = ipa_fn_summaries->get (node);
initial_size);
overall_size = initial_size;
- max_size = compute_max_insns (overall_size);
min_size = overall_size;
/* Populate the heap with all edges we might inline. */
if (dump_file)
fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
- for (edge = node->callees; edge; edge = next)
+ for (edge = node->callees; edge; edge = edge->next_callee)
{
- next = edge->next_callee;
if (edge->inline_failed
&& !edge->aux
&& can_inline_edge_p (edge, true)
if (edge->speculative
&& !speculation_useful_p (edge, edge->aux != NULL))
{
- edge->resolve_speculation ();
+ cgraph_edge::resolve_speculation (edge);
update = true;
}
}
edge_badness (edge, true);
}
- if (overall_size + growth > max_size
+ where = edge->caller;
+
+ if (overall_size + growth > compute_max_insns (where, min_size)
&& !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
{
edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
specific inliner. */
if (edge->recursive_p ())
{
- where = edge->caller;
if (where->inlined_to)
where = where->inlined_to;
if (!recursive_inlining (edge,
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, edge->call_stmt,
" Inlined %C into %C which now has time %f and "
- "size %i, net change of %s.\n",
+ "size %i, net change of %s%s.\n",
edge->callee, edge->caller,
s->time.to_double (),
ipa_size_summaries->get (edge->caller)->size,
- buf_net_change);
+ buf_net_change,
+ cross_module_call_p (edge) ? " (cross module)":"");
}
if (min_size > overall_size)
{
min_size = overall_size;
- max_size = compute_max_insns (min_size);
if (dump_file)
fprintf (dump_file, "New minimal size reached: %i\n", min_size);
cgraph_node *ultimate = node->ultimate_alias_target ();
fprintf (dump_file,
"\nInlining %s size %i.\n",
- ultimate->name (),
+ ultimate->dump_name (),
ipa_size_summaries->get (ultimate)->size);
fprintf (dump_file,
" Called once from %s %i insns.\n",
- node->callers->caller->name (),
+ node->callers->caller->dump_name (),
ipa_size_summaries->get (node->callers->caller)->size);
}
if (dump_file)
fprintf (dump_file,
" Inlined into %s which now has %i size\n",
- caller->name (),
+ caller->dump_name (),
ipa_size_summaries->get (caller)->size);
if (!(*num_calls)--)
{
{
node = order[i];
if (node->definition
+ /* Do not try to flatten aliases. These may happen for example when
+ creating local aliases. */
+ && !node->alias
&& lookup_attribute ("flatten",
DECL_ATTRIBUTES (node->decl)) != NULL)
order[j--] = order[i];
try to flatten itself turning it into a self-recursive
function. */
if (dump_file)
- fprintf (dump_file, "Flattening %s\n", node->name ());
+ fprintf (dump_file, "Flattening %s\n", node->dump_name ());
flatten_function (node, false, true);
}
{
if (edge->count.ipa ().initialized_p ())
spec_rem += edge->count.ipa ();
- edge->resolve_speculation ();
+ cgraph_edge::resolve_speculation (edge);
update = true;
remove_functions = true;
}
}
}
- /* Free ipa-prop structures if they are no longer needed. */
- ipa_free_all_structures_after_iinln ();
-
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"\nInlined %i calls, eliminated %i functions\n\n",
}
/* We iterate incremental inlining to get trivial cases of indirect
inlining. */
- while (iterations < param_early_inliner_max_iterations
+ while (iterations < opt_for_fn (node->decl,
+ param_early_inliner_max_iterations)
&& early_inline_small_functions (node))
{
timevar_push (TV_INTEGRATION);
es->call_stmt_time
= estimate_num_insns (edge->call_stmt, &eni_time_weights);
}
- if (iterations < param_early_inliner_max_iterations - 1)
+ if (iterations < opt_for_fn (node->decl,
+ param_early_inliner_max_iterations) - 1)
ipa_update_overall_fn_summary (node);
timevar_pop (TV_INTEGRATION);
iterations++;