return callee->cannot_return_p ();
}
-/* Return true if the edge may be considered hot. */
+/* Return true if the edge after scaling it profile by SCALE
+ may be considered hot. */
bool
-cgraph_edge::maybe_hot_p (void)
+cgraph_edge::maybe_hot_p (sreal scale)
{
- if (!maybe_hot_count_p (NULL, count.ipa ()))
+ /* Never consider calls in functions optimized for size hot. */
+ if (opt_for_fn (caller->decl, optimize_size))
return false;
+
+ /* If reliable IPA count is available, just use it. */
+ profile_count c = count.ipa ();
+ if (c.reliable_p ())
+ return maybe_hot_count_p (NULL, c * scale);
+
+ /* See if we can determine hotness using caller frequency. */
if (caller->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED
|| (callee
&& callee->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED))
&& (callee
&& callee->frequency <= NODE_FREQUENCY_EXECUTED_ONCE))
return false;
- if (opt_for_fn (caller->decl, optimize_size))
- return false;
+ /* ??? This may make sense for hot functions determined by
+ user attribute, but if function is hot by profile, it may
+ contains non-hot calls. In most practical cases this case
+ is handled by the reliable ipa count above, but i.e. after
+ inlining function with no profile to function with profile
+ we get here.. */
if (caller->frequency == NODE_FREQUENCY_HOT)
return true;
+
+ /* Use IPA count and if it s not available appy local heuristics. */
+ if (c.initialized_p ())
+ return maybe_hot_count_p (NULL, c * scale);
if (!count.initialized_p ())
return true;
cgraph_node *where = caller->inlined_to ? caller->inlined_to : caller;
if (!where->count.initialized_p ())
- return false;
+ return true;
+ c = count * scale;
if (caller->frequency == NODE_FREQUENCY_EXECUTED_ONCE)
{
- if (count * 2 < where->count * 3)
+ if (c * 2 < where->count * 3)
return false;
}
- else if (count * param_hot_bb_frequency_fraction < where->count)
+ else if (c * param_hot_bb_frequency_fraction < where->count)
return false;
return true;
}
+/* Return true if the edge may be considered hot. */
+
+bool
+cgraph_edge::maybe_hot_p ()
+{
+ return maybe_hot_p (1);
+}
+
/* Worker for cgraph_can_remove_if_no_direct_calls_p. */
static bool
/* Return true when the edge represents a direct recursion. */
bool recursive_p (void);
- /* Return true if the edge may be considered hot. */
- bool maybe_hot_p (void);
+ /* Return true if the edge may be considered hot after scalling its count. */
+ bool maybe_hot_p ();
+
+ /* Return true if the edge may be considered hot after scalling its count
+ (i.e. assume that optimization would reduce runtime for callee,
+ possibly significantly). */
+ bool maybe_hot_p (sreal scale);
/* Get unique identifier of the edge. */
inline int get_uid ()
return speedup;
}
+/* Return expected speedup of the callee function alone
+ (i.e. not estimate of call overhead and also no scalling
+ by call frequency. */
+
+static sreal
+callee_speedup (struct cgraph_edge *e)
+{
+ sreal unspec_time;
+ sreal spec_time = estimate_edge_time (e, &unspec_time);
+ return unspec_time - spec_time;
+}
+
/* Return true if the speedup for inlining E is bigger than
param_inline_min_speedup. */
if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
want_inline = false;
else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
- ;
+ return true;
else if (!DECL_DECLARED_INLINE_P (callee->decl)
&& !opt_for_fn (e->caller->decl, flag_inline_small_functions))
{
e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
want_inline = false;
}
+
+ /* Early return before lookup of summaries. */
+ if (!want_inline)
+ {
+ if (report)
+ report_inline_failed_reason (e);
+ return false;
+ }
+
+ ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ ipa_call_summary *call_info = ipa_call_summaries->get (e);
+
/* Do fast and conservative check if the function can be good
inline candidate. */
- else if ((!DECL_DECLARED_INLINE_P (callee->decl)
- && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
- && ipa_fn_summaries->get (callee)->min_size
- - ipa_call_summaries->get (e)->call_stmt_size
- > inline_insns_auto (e->caller, true, true))
+ if ((!DECL_DECLARED_INLINE_P (callee->decl)
+ && (!e->count.ipa ().initialized_p ()
+ || !e->maybe_hot_p (callee_info->time)))
+ && callee_info->min_size - call_info->call_stmt_size
+ > inline_insns_auto (e->caller, true, true))
{
e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
want_inline = false;
}
else if ((DECL_DECLARED_INLINE_P (callee->decl)
|| e->count.ipa ().nonzero_p ())
- && ipa_fn_summaries->get (callee)->min_size
- - ipa_call_summaries->get (e)->call_stmt_size
+ && callee_info->min_size - call_info->call_stmt_size
> inline_insns_single (e->caller, true, true))
{
e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
}
}
/* If call is cold, do not inline when function body would grow. */
- else if (!e->maybe_hot_p ()
+ else if (!e->maybe_hot_p (callee_speedup (e))
&& (growth >= inline_insns_single (e->caller, false, false)
|| growth_positive_p (callee, e, growth)))
{