]> git.ipfire.org Git - thirdparty/gcc.git/blobdiff - gcc/ipa-inline.c
[PR90048] Fortran OpenACC 'private' clause rejected for predetermined private loop...
[thirdparty/gcc.git] / gcc / ipa-inline.c
index f13f983dfdde83700e836e8f47036df866748ff1..f37cd9da26d85d610584a97632e9d0460652c08b 100644 (file)
@@ -1,5 +1,5 @@
 /* Inlining decision heuristics.
-   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   Copyright (C) 2003-2019 Free Software Foundation, Inc.
    Contributed by Jan Hubicka
 
 This file is part of GCC.
@@ -59,7 +59,7 @@ along with GCC; see the file COPYING3.  If not see
         optimization) and thus improve quality of analysis done by real IPA
         optimizers.
 
-        Because of lack of whole unit knowledge, the pass can not really make
+        Because of lack of whole unit knowledge, the pass cannot really make
         good code size/performance tradeoffs.  It however does very simple
         speculative inlining allowing code size to grow by
         EARLY_INLINING_INSNS when callee is leaf function.  In this case the
@@ -110,23 +110,24 @@ along with GCC; see the file COPYING3.  If not see
 #include "symbol-summary.h"
 #include "tree-vrp.h"
 #include "ipa-prop.h"
+#include "ipa-fnsummary.h"
 #include "ipa-inline.h"
 #include "ipa-utils.h"
 #include "sreal.h"
 #include "auto-profile.h"
 #include "builtins.h"
 #include "fibonacci_heap.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "asan.h"
 
 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
 
 /* Statistics we collect about inlining algorithm.  */
 static int overall_size;
-static gcov_type max_count;
-static gcov_type spec_rem;
-
-/* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
-static sreal cgraph_freq_base_rec, percent_rec;
+static profile_count max_count;
+static profile_count spec_rem;
 
 /* Return false when inlining edge E would lead to violating
    limits on function unit growth or stack usage growth.  
@@ -149,7 +150,8 @@ caller_growth_limits (struct cgraph_edge *e)
   int newsize;
   int limit = 0;
   HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
-  inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
+  ipa_fn_summary *info, *what_info;
+  ipa_fn_summary *outer_info = ipa_fn_summaries->get (to);
 
   /* Look for function e->caller is inlined to.  While doing
      so work out the largest function body on the way.  As
@@ -161,7 +163,7 @@ caller_growth_limits (struct cgraph_edge *e)
      too much in order to prevent compiler from exploding".  */
   while (true)
     {
-      info = inline_summaries->get (to);
+      info = ipa_fn_summaries->get (to);
       if (limit < info->self_size)
        limit = info->self_size;
       if (stack_size_limit < info->estimated_self_stack_size)
@@ -172,7 +174,7 @@ caller_growth_limits (struct cgraph_edge *e)
        break;
     }
 
-  what_info = inline_summaries->get (what);
+  what_info = ipa_fn_summaries->get (what);
 
   if (limit < what_info->self_size)
     limit = what_info->self_size;
@@ -225,29 +227,32 @@ caller_growth_limits (struct cgraph_edge *e)
 static void
 report_inline_failed_reason (struct cgraph_edge *e)
 {
-  if (dump_file)
+  if (dump_enabled_p ())
     {
-      fprintf (dump_file, "  not inlinable: %s/%i -> %s/%i, %s\n",
-              xstrdup_for_dump (e->caller->name ()), e->caller->order,
-              xstrdup_for_dump (e->callee->name ()), e->callee->order,
-              cgraph_inline_failed_string (e->inline_failed));
+      dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                      "  not inlinable: %C -> %C, %s\n",
+                      e->caller, e->callee,
+                      cgraph_inline_failed_string (e->inline_failed));
       if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
           || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
          && e->caller->lto_file_data
          && e->callee->ultimate_alias_target ()->lto_file_data)
        {
-         fprintf (dump_file, "  LTO objects: %s, %s\n",
-                  e->caller->lto_file_data->file_name,
-                  e->callee->ultimate_alias_target ()->lto_file_data->file_name);
+         dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                          "  LTO objects: %s, %s\n",
+                          e->caller->lto_file_data->file_name,
+                          e->callee->ultimate_alias_target ()->lto_file_data->file_name);
        }
       if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
-       cl_target_option_print_diff
-        (dump_file, 2, target_opts_for_fn (e->caller->decl),
-          target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
+       if (dump_file)
+         cl_target_option_print_diff
+           (dump_file, 2, target_opts_for_fn (e->caller->decl),
+            target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
       if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
-       cl_optimization_print_diff
-         (dump_file, 2, opts_for_fn (e->caller->decl),
-          opts_for_fn (e->callee->ultimate_alias_target ()->decl));
+       if (dump_file)
+         cl_optimization_print_diff
+           (dump_file, 2, opts_for_fn (e->caller->decl),
+            opts_for_fn (e->callee->ultimate_alias_target ()->decl));
     }
 }
 
@@ -256,17 +261,21 @@ report_inline_failed_reason (struct cgraph_edge *e)
 static bool
 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
 {
-  /* Don't care if sanitizer is disabled */
-  if (!(flag_sanitize & SANITIZE_ADDRESS))
+  if (!caller || !callee)
     return true;
 
-  if (!caller || !callee)
+  /* Allow inlining always_inline functions into no_sanitize_address
+     functions.  */
+  if (!sanitize_flags_p (SANITIZE_ADDRESS, caller)
+      && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
     return true;
 
-  return !!lookup_attribute ("no_sanitize_address",
-      DECL_ATTRIBUTES (caller)) == 
-      !!lookup_attribute ("no_sanitize_address",
-      DECL_ATTRIBUTES (callee));
+  return ((sanitize_flags_p (SANITIZE_ADDRESS, caller)
+          == sanitize_flags_p (SANITIZE_ADDRESS, callee))
+         && (sanitize_flags_p (SANITIZE_POINTER_COMPARE, caller)
+             == sanitize_flags_p (SANITIZE_POINTER_COMPARE, callee))
+         && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, caller)
+             == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, callee)));
 }
 
 /* Used for flags where it is safe to inline when caller's value is
@@ -290,18 +299,16 @@ sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
       (opts_for_fn (caller->decl)->x_##flag            \
        != opts_for_fn (callee->decl)->x_##flag)
 
- /* Decide if we can inline the edge and possibly update
+/* Decide if we can inline the edge and possibly update
    inline_failed reason.  
    We check whether inlining is possible at all and whether
    caller growth limits allow doing so.  
 
-   if REPORT is true, output reason to the dump file.  
-
-   if DISREGARD_LIMITS is true, ignore size limits.*/
+   if REPORT is true, output reason to the dump file. */
 
 static bool
 can_inline_edge_p (struct cgraph_edge *e, bool report,
-                  bool disregard_limits = false, bool early = false)
+                  bool early = false)
 {
   gcc_checking_assert (e->inline_failed);
 
@@ -317,15 +324,18 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
   cgraph_node *caller = e->caller->global.inlined_to
                        ? e->caller->global.inlined_to : e->caller;
   cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
-  tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
-  tree callee_tree
-    = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
 
   if (!callee->definition)
     {
       e->inline_failed = CIF_BODY_NOT_AVAILABLE;
       inlinable = false;
     }
+  if (!early && (!opt_for_fn (callee->decl, optimize)
+                || !opt_for_fn (caller->decl, optimize)))
+    {
+      e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
+      inlinable = false;
+    }
   else if (callee->calls_comdat_local)
     {
       e->inline_failed = CIF_USES_COMDAT_LOCAL;
@@ -363,7 +373,8 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
       e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
       inlinable = false;
     }
-  else if (!inline_summaries->get (callee)->inlinable)
+  else if (ipa_fn_summaries->get (callee) == NULL
+          || !ipa_fn_summaries->get (callee)->inlinable)
     {
       e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
       inlinable = false;
@@ -374,13 +385,55 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
       e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
       inlinable = false;
     }
+  if (!inlinable && report)
+    report_inline_failed_reason (e);
+  return inlinable;
+}
+
+/* Decide if we can inline the edge and possibly update
+   inline_failed reason.  
+   We check whether inlining is possible at all and whether
+   caller growth limits allow doing so.  
+
+   if REPORT is true, output reason to the dump file.
+
+   if DISREGARD_LIMITS is true, ignore size limits.  */
+
+static bool
+can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
+                            bool disregard_limits = false, bool early = false)
+{
+  gcc_checking_assert (e->inline_failed);
+
+  if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
+    {
+      if (report)
+        report_inline_failed_reason (e);
+      return false;
+    }
+
+  bool inlinable = true;
+  enum availability avail;
+  cgraph_node *caller = e->caller->global.inlined_to
+                       ? e->caller->global.inlined_to : e->caller;
+  cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
+  tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
+  tree callee_tree
+    = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
   /* Check if caller growth allows the inlining.  */
-  else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
-          && !disregard_limits
-          && !lookup_attribute ("flatten",
-                                DECL_ATTRIBUTES (caller->decl))
-           && !caller_growth_limits (e))
+  if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
+      && !disregard_limits
+      && !lookup_attribute ("flatten",
+                DECL_ATTRIBUTES (caller->decl))
+      && !caller_growth_limits (e))
     inlinable = false;
+  else if (callee->externally_visible
+          && !DECL_DISREGARD_INLINE_LIMITS (callee->decl)
+          && flag_live_patching == LIVE_PATCHING_INLINE_ONLY_STATIC)
+    {
+      e->inline_failed = CIF_EXTERN_LIVE_ONLY_STATIC;
+      inlinable = false;
+    }
   /* Don't inline a function with a higher optimization level than the
      caller.  FIXME: this is really just tip of iceberg of handling
      optimization attribute.  */
@@ -390,12 +443,12 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
             (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
              && lookup_attribute ("always_inline",
                                   DECL_ATTRIBUTES (callee->decl)));
-      inline_summary *caller_info = inline_summaries->get (caller);
-      inline_summary *callee_info = inline_summaries->get (callee);
+      ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
+      ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
 
-     /* Until GCC 4.9 we did not check the semantics alterning flags
-       bellow and inline across optimization boundry.
-       Enabling checks bellow breaks several packages by refusing
+     /* Until GCC 4.9 we did not check the semantics-altering flags
+       below and inlined across optimization boundaries.
+       Enabling checks below breaks several packages by refusing
        to inline library always_inline functions. See PR65873.
        Disable the check for early inlining for now until better solution
        is found.  */
@@ -406,6 +459,7 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
         Not even for always_inline declared functions.  */
      else if (check_match (flag_wrapv)
              || check_match (flag_trapv)
+             || check_match (flag_pcc_struct_return)
              /* When caller or callee does FP math, be sure FP codegen flags
                 compatible.  */
              || ((caller_info->fp_expressions && callee_info->fp_expressions)
@@ -476,7 +530,7 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
               > opt_for_fn (caller->decl, optimize_size))
        {
          int growth = estimate_edge_growth (e);
-         if (growth > 0
+         if (growth > PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE)
              && (!DECL_DECLARED_INLINE_P (callee->decl)
                  && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
                                    MAX_INLINE_INSNS_AUTO)))
@@ -515,7 +569,7 @@ can_early_inline_edge_p (struct cgraph_edge *e)
 {
   struct cgraph_node *callee = e->callee->ultimate_alias_target ();
   /* Early inliner might get called at WPA stage when IPA pass adds new
-     function.  In this case we can not really do any of early inlining
+     function.  In this case we cannot really do any of early inlining
      because function bodies are missing.  */
   if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
     return false;
@@ -531,11 +585,13 @@ can_early_inline_edge_p (struct cgraph_edge *e)
   if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
       || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
     {
-      if (dump_file)
-       fprintf (dump_file, "  edge not inlinable: not in SSA form\n");
+      if (dump_enabled_p ())
+       dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                        "  edge not inlinable: not in SSA form\n");
       return false;
     }
-  if (!can_inline_edge_p (e, true, false, true))
+  if (!can_inline_edge_p (e, true, true)
+      || !can_inline_edge_by_limits_p (e, true, false, true))
     return false;
   return true;
 }
@@ -586,42 +642,38 @@ want_early_inline_function_p (struct cgraph_edge *e)
       int growth = estimate_edge_growth (e);
       int n;
 
-      if (growth <= 0)
+      if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
        ;
-      else if (!e->maybe_hot_p ()
-              && growth > 0)
+      else if (!e->maybe_hot_p ())
        {
-         if (dump_file)
-           fprintf (dump_file, "  will not early inline: %s/%i->%s/%i, "
-                    "call is cold and code would grow by %i\n",
-                    xstrdup_for_dump (e->caller->name ()),
-                    e->caller->order,
-                    xstrdup_for_dump (callee->name ()), callee->order,
-                    growth);
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "  will not early inline: %C->%C, "
+                            "call is cold and code would grow by %i\n",
+                            e->caller, callee,
+                            growth);
          want_inline = false;
        }
       else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
        {
-         if (dump_file)
-           fprintf (dump_file, "  will not early inline: %s/%i->%s/%i, "
-                    "growth %i exceeds --param early-inlining-insns\n",
-                    xstrdup_for_dump (e->caller->name ()),
-                    e->caller->order,
-                    xstrdup_for_dump (callee->name ()), callee->order,
-                    growth);
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "  will not early inline: %C->%C, "
+                            "growth %i exceeds --param early-inlining-insns\n",
+                            e->caller, callee,
+                            growth);
          want_inline = false;
        }
       else if ((n = num_calls (callee)) != 0
               && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
        {
-         if (dump_file)
-           fprintf (dump_file, "  will not early inline: %s/%i->%s/%i, "
-                    "growth %i exceeds --param early-inlining-insns "
-                    "divided by number of calls\n",
-                    xstrdup_for_dump (e->caller->name ()),
-                    e->caller->order,
-                    xstrdup_for_dump (callee->name ()), callee->order,
-                    growth);
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "  will not early inline: %C->%C, "
+                            "growth %i exceeds --param early-inlining-insns "
+                            "divided by number of calls\n",
+                            e->caller, callee,
+                            growth);
          want_inline = false;
        }
     }
@@ -639,14 +691,13 @@ compute_uninlined_call_time (struct cgraph_edge *edge,
                         ? edge->caller->global.inlined_to
                         : edge->caller);
 
-  if (edge->count && caller->count)
-    uninlined_call_time *= (sreal)edge->count / caller->count;
-  if (edge->frequency)
-    uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
+  sreal freq = edge->sreal_frequency ();
+  if (freq > 0)
+    uninlined_call_time *= freq;
   else
     uninlined_call_time = uninlined_call_time >> 11;
 
-  sreal caller_time = inline_summaries->get (caller)->time;
+  sreal caller_time = ipa_fn_summaries->get (caller)->time;
   return uninlined_call_time + caller_time;
 }
 
@@ -660,19 +711,17 @@ compute_inlined_call_time (struct cgraph_edge *edge,
   cgraph_node *caller = (edge->caller->global.inlined_to 
                         ? edge->caller->global.inlined_to
                         : edge->caller);
-  sreal caller_time = inline_summaries->get (caller)->time;
+  sreal caller_time = ipa_fn_summaries->get (caller)->time;
 
-  if (edge->count && caller->count)
-    time *= (sreal)edge->count / caller->count;
-  if (edge->frequency)
-    time *= cgraph_freq_base_rec * edge->frequency;
+  sreal freq = edge->sreal_frequency ();
+  if (freq > 0)
+    time *= freq;
   else
     time = time >> 11;
 
   /* This calculation should match one in ipa-inline-analysis.c
      (estimate_edge_size_and_time).  */
-  time -= (sreal) edge->frequency
-          * ipa_call_summaries->get (edge)->call_stmt_time / CGRAPH_FREQ_BASE;
+  time -= (sreal)ipa_call_summaries->get (edge)->call_stmt_time * freq;
   time += caller_time;
   if (time <= 0)
     time = ((sreal) 1) >> 8;
@@ -691,9 +740,8 @@ big_speedup_p (struct cgraph_edge *e)
   sreal time = compute_uninlined_call_time (e, unspec_time);
   sreal inlined_time = compute_inlined_call_time (e, spec_time);
 
-  if (time - inlined_time
-      > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP))
-        * percent_rec)
+  if ((time - inlined_time) * 100
+      > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)))
     return true;
   return false;
 }
@@ -707,7 +755,11 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
   bool want_inline = true;
   struct cgraph_node *callee = e->callee->ultimate_alias_target ();
 
-  if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
+  /* Allow this function to be called before can_inline_edge_p,
+     since it's usually cheaper.  */
+  if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
+    want_inline = false;
+  else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
     ;
   else if (!DECL_DECLARED_INLINE_P (callee->decl)
           && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
@@ -720,16 +772,17 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
      promote non-inline functions to inline and we increase
      MAX_INLINE_INSNS_SINGLE 16-fold for inline functions.  */
   else if ((!DECL_DECLARED_INLINE_P (callee->decl)
-          && (!e->count || !e->maybe_hot_p ()))
-          && inline_summaries->get (callee)->min_size
+          && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
+          && ipa_fn_summaries->get (callee)->min_size
                - ipa_call_summaries->get (e)->call_stmt_size
              > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
     {
       e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
       want_inline = false;
     }
-  else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
-          && inline_summaries->get (callee)->min_size
+  else if ((DECL_DECLARED_INLINE_P (callee->decl)
+           || e->count.ipa ().nonzero_p ())
+          && ipa_fn_summaries->get (callee)->min_size
                - ipa_call_summaries->get (e)->call_stmt_size
              > 16 * MAX_INLINE_INSNS_SINGLE)
     {
@@ -741,28 +794,29 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
   else
     {
       int growth = estimate_edge_growth (e);
-      inline_hints hints = estimate_edge_hints (e);
-      bool big_speedup = big_speedup_p (e);
+      ipa_hints hints = estimate_edge_hints (e);
+      int big_speedup = -1; /* compute this lazily */
 
-      if (growth <= 0)
+      if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
        ;
       /* Apply MAX_INLINE_INSNS_SINGLE limit.  Do not do so when
         hints suggests that inlining given function is very profitable.  */
       else if (DECL_DECLARED_INLINE_P (callee->decl)
               && growth >= MAX_INLINE_INSNS_SINGLE
-              && ((!big_speedup
-                   && !(hints & (INLINE_HINT_indirect_call
+              && (growth >= MAX_INLINE_INSNS_SINGLE * 16
+                  || (!(hints & (INLINE_HINT_indirect_call
                                  | INLINE_HINT_known_hot
                                  | INLINE_HINT_loop_iterations
                                  | INLINE_HINT_array_index
-                                 | INLINE_HINT_loop_stride)))
-                  || growth >= MAX_INLINE_INSNS_SINGLE * 16))
+                                 | INLINE_HINT_loop_stride))
+                      && !(big_speedup = big_speedup_p (e)))))
        {
           e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
          want_inline = false;
        }
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
-              && !opt_for_fn (e->caller->decl, flag_inline_functions))
+              && !opt_for_fn (e->caller->decl, flag_inline_functions)
+              && growth >= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SMALL))
        {
          /* growth_likely_positive is expensive, always test it last.  */
           if (growth >= MAX_INLINE_INSNS_SINGLE
@@ -776,7 +830,6 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
         Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
         inlining given function is very profitable.  */
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
-              && !big_speedup
               && !(hints & INLINE_HINT_known_hot)
               && growth >= ((hints & (INLINE_HINT_indirect_call
                                       | INLINE_HINT_loop_iterations
@@ -784,7 +837,8 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
                                       | INLINE_HINT_loop_stride))
                             ? MAX (MAX_INLINE_INSNS_AUTO,
                                    MAX_INLINE_INSNS_SINGLE)
-                            : MAX_INLINE_INSNS_AUTO))
+                            : MAX_INLINE_INSNS_AUTO)
+              && !(big_speedup == -1 ? big_speedup_p (e) : big_speedup))
        {
          /* growth_likely_positive is expensive, always test it last.  */
           if (growth >= MAX_INLINE_INSNS_SINGLE
@@ -827,7 +881,7 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
 {
   char const *reason = NULL;
   bool want_inline = true;
-  int caller_freq = CGRAPH_FREQ_BASE;
+  sreal caller_freq = 1;
   int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
 
   if (DECL_DECLARED_INLINE_P (edge->caller->decl))
@@ -838,62 +892,44 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
       reason = "recursive call is cold";
       want_inline = false;
     }
-  else if (max_count && !outer_node->count)
-    {
-      reason = "not executed in profile";
-      want_inline = false;
-    }
   else if (depth > max_depth)
     {
       reason = "--param max-inline-recursive-depth exceeded.";
       want_inline = false;
     }
-
-  if (outer_node->global.inlined_to)
-    caller_freq = outer_node->callers->frequency;
-
-  if (!caller_freq)
+  else if (outer_node->global.inlined_to
+          && (caller_freq = outer_node->callers->sreal_frequency ()) == 0)
     {
-      reason = "function is inlined and unlikely";
+      reason = "caller frequency is 0";
       want_inline = false;
     }
 
   if (!want_inline)
     ;
-  /* Inlining of self recursive function into copy of itself within other function
-     is transformation similar to loop peeling.
+  /* Inlining of self recursive function into copy of itself within other
+     function is transformation similar to loop peeling.
 
      Peeling is profitable if we can inline enough copies to make probability
      of actual call to the self recursive function very small.  Be sure that
      the probability of recursion is small.
 
      We ensure that the frequency of recursing is at most 1 - (1/max_depth).
-     This way the expected number of recision is at most max_depth.  */
+     This way the expected number of recursion is at most max_depth.  */
   else if (peeling)
     {
-      int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
-                                        / max_depth);
+      sreal max_prob = (sreal)1 - ((sreal)1 / (sreal)max_depth);
       int i;
       for (i = 1; i < depth; i++)
-       max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
-      if (max_count
-         && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
-             >= max_prob))
-       {
-         reason = "profile of recursive call is too large";
-         want_inline = false;
-       }
-      if (!max_count
-         && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
-             >= max_prob))
+       max_prob = max_prob * max_prob;
+      if (edge->sreal_frequency () >= max_prob * caller_freq)
        {
          reason = "frequency of recursive call is too large";
          want_inline = false;
        }
     }
-  /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
-     depth is large.  We reduce function call overhead and increase chances that
-     things fit in hardware return predictor.
+  /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
+     recursion depth is large.  We reduce function call overhead and increase
+     chances that things fit in hardware return predictor.
 
      Recursive inlining might however increase cost of stack frame setup
      actually slowing down functions whose recursion tree is wide rather than
@@ -903,29 +939,23 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
      is tricky.  For now we disable recursive inlining when probability of self
      recursion is low. 
 
-     Recursive inlining of self recursive call within loop also results in large loop
-     depths that generally optimize badly.  We may want to throttle down inlining
-     in those cases.  In particular this seems to happen in one of libstdc++ rb tree
-     methods.  */
+     Recursive inlining of self recursive call within loop also results in
+     large loop depths that generally optimize badly.  We may want to throttle
+     down inlining in those cases.  In particular this seems to happen in one
+     of libstdc++ rb tree methods.  */
   else
     {
-      if (max_count
-         && (edge->count * 100 / outer_node->count
-             <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
-       {
-         reason = "profile of recursive call is too small";
-         want_inline = false;
-       }
-      else if (!max_count
-              && (edge->frequency * 100 / caller_freq
-                  <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
+      if (edge->sreal_frequency () * 100
+          <= caller_freq
+            * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))
        {
          reason = "frequency of recursive call is too small";
          want_inline = false;
        }
     }
-  if (!want_inline && dump_file)
-    fprintf (dump_file, "   not inlining recursively: %s\n", reason);
+  if (!want_inline && dump_enabled_p ())
+    dump_printf_loc (MSG_MISSED_OPTIMIZATION, edge->call_stmt,
+                    "   not inlining recursively: %s\n", reason);
   return want_inline;
 }
 
@@ -939,12 +969,15 @@ check_callers (struct cgraph_node *node, void *has_hot_call)
   struct cgraph_edge *e;
    for (e = node->callers; e; e = e->next_caller)
      {
-       if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
+       if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)
+          || !opt_for_fn (e->caller->decl, optimize))
         return true;
        if (!can_inline_edge_p (e, true))
          return true;
        if (e->recursive_p ())
         return true;
+       if (!can_inline_edge_by_limits_p (e, true))
+         return true;
        if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
         *(bool *)has_hot_call = true;
      }
@@ -1004,8 +1037,8 @@ edge_badness (struct cgraph_edge *edge, bool dump)
   int growth;
   sreal edge_time, unspec_edge_time;
   struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
-  struct inline_summary *callee_info = inline_summaries->get (callee);
-  inline_hints hints;
+  struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+  ipa_hints hints;
   cgraph_node *caller = (edge->caller->global.inlined_to 
                         ? edge->caller->global.inlined_to
                         : edge->caller);
@@ -1014,22 +1047,24 @@ edge_badness (struct cgraph_edge *edge, bool dump)
   edge_time = estimate_edge_time (edge, &unspec_edge_time);
   hints = estimate_edge_hints (edge);
   gcc_checking_assert (edge_time >= 0);
-  /* Check that inlined time is better, but tolerate some roundoff issues.  */
-  gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0);
+  /* Check that inlined time is better, but tolerate some roundoff issues.
+     FIXME: When callee profile drops to 0 we account calls more.  This
+     should be fixed by never doing that.  */
+  gcc_checking_assert ((edge_time * 100
+                       - callee_info->time * 101).to_int () <= 0
+                       || callee->count.ipa ().initialized_p ());
   gcc_checking_assert (growth <= callee_info->size);
 
   if (dump)
     {
-      fprintf (dump_file, "    Badness calculation for %s/%i -> %s/%i\n",
-              xstrdup_for_dump (edge->caller->name ()),
-              edge->caller->order,
-              xstrdup_for_dump (callee->name ()),
-              edge->callee->order);
+      fprintf (dump_file, "    Badness calculation for %s -> %s\n",
+              edge->caller->dump_name (),
+              edge->callee->dump_name ());
       fprintf (dump_file, "      size growth %i, time %f unspec %f ",
               growth,
               edge_time.to_double (),
               unspec_edge_time.to_double ());
-      dump_inline_hints (dump_file, hints);
+      ipa_dump_hints (dump_file, hints);
       if (big_speedup_p (edge))
        fprintf (dump_file, " big_speedup");
       fprintf (dump_file, "\n");
@@ -1062,7 +1097,8 @@ edge_badness (struct cgraph_edge *edge, bool dump)
      Again use negative value to make calls with profile appear hotter
      then calls without.
   */
-  else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
+  else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
+          || caller->count.ipa ().nonzero_p ())
     {
       sreal numerator, denominator;
       int overall_growth;
@@ -1070,11 +1106,11 @@ edge_badness (struct cgraph_edge *edge, bool dump)
 
       numerator = (compute_uninlined_call_time (edge, unspec_edge_time)
                   - inlined_time);
-      if (numerator == 0)
+      if (numerator <= 0)
        numerator = ((sreal) 1 >> 8);
-      if (caller->count)
-       numerator *= caller->count;
-      else if (opt_for_fn (caller->decl, flag_branch_probabilities))
+      if (caller->count.ipa ().nonzero_p ())
+       numerator *= caller->count.ipa ().to_gcov_type ();
+      else if (caller->count.ipa ().initialized_p ())
        numerator = numerator >> 11;
       denominator = growth;
 
@@ -1100,22 +1136,21 @@ edge_badness (struct cgraph_edge *edge, bool dump)
          && callee_info->single_caller
          && !edge->caller->global.inlined_to
          /* ... and edges executed only conditionally ... */
-         && edge->frequency < CGRAPH_FREQ_BASE
+         && edge->sreal_frequency () < 1
          /* ... consider case where callee is not inline but caller is ... */
          && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
               && DECL_DECLARED_INLINE_P (caller->decl))
              /* ... or when early optimizers decided to split and edge
                 frequency still indicates splitting is a win ... */
              || (callee->split_part && !caller->split_part
-                 && edge->frequency
-                    < CGRAPH_FREQ_BASE
-                      * PARAM_VALUE
-                         (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
+                 && edge->sreal_frequency () * 100
+                    < PARAM_VALUE
+                         (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY)
                  /* ... and do not overwrite user specified hints.   */
                  && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
                      || DECL_DECLARED_INLINE_P (caller->decl)))))
        {
-         struct inline_summary *caller_info = inline_summaries->get (caller);
+         ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
          int caller_growth = caller_info->growth;
 
          /* Only apply the penalty when caller looks like inline candidate,
@@ -1145,7 +1180,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
            overall_growth += 256 * 256 - 256;
          denominator *= overall_growth;
         }
-      denominator *= inlined_time;
+      denominator *= ipa_fn_summaries->get (caller)->self_size + growth;
 
       badness = - numerator / denominator;
 
@@ -1158,11 +1193,12 @@ edge_badness (struct cgraph_edge *edge, bool dump)
                   " overall growth %i (current) %i (original)"
                   " %i (compensated)\n",
                   badness.to_double (),
-                 (double)edge->frequency / CGRAPH_FREQ_BASE,
-                  edge->count, caller->count,
+                  edge->sreal_frequency ().to_double (),
+                  edge->count.ipa ().initialized_p () ? edge->count.ipa ().to_gcov_type () : -1,
+                  caller->count.ipa ().initialized_p () ? caller->count.ipa ().to_gcov_type () : -1,
                   compute_uninlined_call_time (edge,
                                                unspec_edge_time).to_double (),
-                  compute_inlined_call_time (edge, edge_time).to_double (),
+                  inlined_time.to_double (),
                   estimate_growth (callee),
                   callee_info->growth, overall_growth);
        }
@@ -1230,12 +1266,9 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
          if (dump_file && (dump_flags & TDF_DETAILS))
            {
              fprintf (dump_file,
-                      "  decreasing badness %s/%i -> %s/%i, %f"
-                      " to %f\n",
-                      xstrdup_for_dump (edge->caller->name ()),
-                      edge->caller->order,
-                      xstrdup_for_dump (edge->callee->name ()),
-                      edge->callee->order,
+                      "  decreasing badness %s -> %s, %f to %f\n",
+                      edge->caller->dump_name (),
+                      edge->callee->dump_name (),
                       n->get_key ().to_double (),
                       badness.to_double ());
            }
@@ -1247,11 +1280,9 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
        if (dump_file && (dump_flags & TDF_DETAILS))
         {
           fprintf (dump_file,
-                   "  enqueuing call %s/%i -> %s/%i, badness %f\n",
-                   xstrdup_for_dump (edge->caller->name ()),
-                   edge->caller->order,
-                   xstrdup_for_dump (edge->callee->name ()),
-                   edge->callee->order,
+                   "  enqueuing call %s -> %s, badness %f\n",
+                   edge->caller->dump_name (),
+                   edge->callee->dump_name (),
                    badness.to_double ());
         }
       edge->aux = heap->insert (badness, edge);
@@ -1275,9 +1306,10 @@ reset_edge_caches (struct cgraph_node *node)
   if (where->global.inlined_to)
     where = where->global.inlined_to;
 
-  for (edge = where->callers; edge; edge = edge->next_caller)
-    if (edge->inline_failed)
-      reset_edge_growth_cache (edge);
+  if (edge_growth_cache != NULL)
+    for (edge = where->callers; edge; edge = edge->next_caller)
+      if (edge->inline_failed)
+       edge_growth_cache->remove (edge);
 
   FOR_EACH_ALIAS (where, ref)
     reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
@@ -1290,8 +1322,8 @@ reset_edge_caches (struct cgraph_node *node)
       e = e->callee->callees;
     else
       {
-       if (e->inline_failed)
-         reset_edge_growth_cache (e);
+       if (edge_growth_cache != NULL && e->inline_failed)
+         edge_growth_cache->remove (e);
        if (e->next_callee)
          e = e->next_callee;
        else
@@ -1321,10 +1353,10 @@ update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
   struct cgraph_edge *edge;
   struct ipa_ref *ref;
 
-  if ((!node->alias && !inline_summaries->get (node)->inlinable)
+  if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable)
       || node->global.inlined_to)
     return;
-  if (!bitmap_set_bit (updated_nodes, node->uid))
+  if (!bitmap_set_bit (updated_nodes, node->get_uid ()))
     return;
 
   FOR_EACH_ALIAS (node, ref)
@@ -1340,7 +1372,8 @@ update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
            || check_inlinablity_for == edge)
          {
            if (can_inline_edge_p (edge, false)
-               && want_inline_small_function_p (edge, false))
+               && want_inline_small_function_p (edge, false)
+               && can_inline_edge_by_limits_p (edge, false))
              update_edge_key (heap, edge);
            else if (edge->aux)
              {
@@ -1379,12 +1412,14 @@ update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
            don't need updating.  */
        if (e->inline_failed
            && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
-           && inline_summaries->get (callee)->inlinable
+           && ipa_fn_summaries->get (callee) != NULL
+           && ipa_fn_summaries->get (callee)->inlinable
            && avail >= AVAIL_AVAILABLE
-           && !bitmap_bit_p (updated_nodes, callee->uid))
+           && !bitmap_bit_p (updated_nodes, callee->get_uid ()))
          {
            if (can_inline_edge_p (e, false)
-               && want_inline_small_function_p (e, false))
+               && want_inline_small_function_p (e, false)
+               && can_inline_edge_by_limits_p (e, false))
              update_edge_key (heap, e);
            else if (e->aux)
              {
@@ -1423,13 +1458,7 @@ lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
     if (e->callee == node
        || (e->callee->ultimate_alias_target (&avail, e->caller) == node
            && avail > AVAIL_INTERPOSABLE))
-      {
-       /* When profile feedback is available, prioritize by expected number
-          of calls.  */
-        heap->insert (!max_count ? -e->frequency
-                     : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
-                     e);
-      }
+      heap->insert (-e->sreal_frequency (), e);
   for (e = where->callees; e; e = e->next_callee)
     if (!e->inline_failed)
       lookup_recursive_calls (node, e->callee, heap);
@@ -1477,7 +1506,8 @@ recursive_inlining (struct cgraph_edge *edge,
       struct cgraph_edge *curr = heap.extract_min ();
       struct cgraph_node *cnode, *dest = curr->callee;
 
-      if (!can_inline_edge_p (curr, true))
+      if (!can_inline_edge_p (curr, true)
+         || can_inline_edge_by_limits_p (curr, true))
        continue;
 
       /* MASTER_CLONE is produced in the case we already started modified
@@ -1487,13 +1517,15 @@ recursive_inlining (struct cgraph_edge *edge,
       if (master_clone)
        {
          curr->redirect_callee (master_clone);
-         reset_edge_growth_cache (curr);
+         if (edge_growth_cache != NULL)
+           edge_growth_cache->remove (curr);
        }
 
       if (estimate_size_after_inlining (node, curr) > limit)
        {
          curr->redirect_callee (dest);
-         reset_edge_growth_cache (curr);
+         if (edge_growth_cache != NULL)
+           edge_growth_cache->remove (curr);
          break;
        }
 
@@ -1507,7 +1539,8 @@ recursive_inlining (struct cgraph_edge *edge,
       if (!want_inline_self_recursive_call_p (curr, node, false, depth))
        {
          curr->redirect_callee (dest);
-         reset_edge_growth_cache (curr);
+         if (edge_growth_cache != NULL)
+           edge_growth_cache->remove (curr);
          continue;
        }
 
@@ -1515,10 +1548,11 @@ recursive_inlining (struct cgraph_edge *edge,
        {
          fprintf (dump_file,
                   "   Inlining call of depth %i", depth);
-         if (node->count)
+         if (node->count.nonzero_p ())
            {
              fprintf (dump_file, " called approx. %.2f times per call",
-                      (double)curr->count / node->count);
+                      (double)curr->count.to_gcov_type ()
+                      / node->count.to_gcov_type ());
            }
          fprintf (dump_file, "\n");
        }
@@ -1526,13 +1560,13 @@ recursive_inlining (struct cgraph_edge *edge,
        {
          /* We need original clone to copy around.  */
          master_clone = node->create_clone (node->decl, node->count,
-           CGRAPH_FREQ_BASE, false, vNULL,
-           true, NULL, NULL);
+           false, vNULL, true, NULL, NULL);
          for (e = master_clone->callees; e; e = e->next_callee)
            if (!e->inline_failed)
-             clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
+             clone_inlined_nodes (e, true, false, NULL);
          curr->redirect_callee (master_clone);
-          reset_edge_growth_cache (curr);
+         if (edge_growth_cache != NULL)
+           edge_growth_cache->remove (curr);
        }
 
       inline_call (curr, false, new_edges, &overall_size, true);
@@ -1546,14 +1580,14 @@ recursive_inlining (struct cgraph_edge *edge,
   if (!master_clone)
     return false;
 
-  if (dump_file)
-    fprintf (dump_file,
-            "\n   Inlined %i times, "
-            "body grown from size %i to %i, time %f to %f\n", n,
-            inline_summaries->get (master_clone)->size,
-            inline_summaries->get (node)->size,
-            inline_summaries->get (master_clone)->time.to_double (),
-            inline_summaries->get (node)->time.to_double ());
+  if (dump_enabled_p ())
+    dump_printf_loc (MSG_NOTE, edge->call_stmt,
+                    "\n   Inlined %i times, "
+                    "body grown from size %i to %i, time %f to %f\n", n,
+                    ipa_fn_summaries->get (master_clone)->size,
+                    ipa_fn_summaries->get (node)->size,
+                    ipa_fn_summaries->get (master_clone)->time.to_double (),
+                    ipa_fn_summaries->get (node)->time.to_double ());
 
   /* Remove master clone we used for inlining.  We rely that clones inlined
      into master clone gets queued just before master clone so we don't
@@ -1597,7 +1631,8 @@ add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
       gcc_assert (!edge->aux);
       if (edge->inline_failed
          && can_inline_edge_p (edge, true)
-         && want_inline_small_function_p (edge, true))
+         && want_inline_small_function_p (edge, true)
+         && can_inline_edge_by_limits_p (edge, true))
         edge->aux = heap->insert (edge_badness (edge, false), edge);
     }
 }
@@ -1658,7 +1693,9 @@ speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
   if (!anticipate_inlining && e->inline_failed && !target->local.local)
     return false;
   /* For overwritable targets there is not much to do.  */
-  if (e->inline_failed && !can_inline_edge_p (e, false, true))
+  if (e->inline_failed
+      && (!can_inline_edge_p (e, false)
+         || !can_inline_edge_by_limits_p (e, false, true)))
     return false;
   /* OK, speculation seems interesting.  */
   return true;
@@ -1677,10 +1714,11 @@ resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
                                  ? node->global.inlined_to : node;
       auto_bitmap updated_nodes;
 
-      spec_rem += edge->count;
+      if (edge->count.ipa ().initialized_p ())
+        spec_rem += edge->count.ipa ();
       edge->resolve_speculation ();
       reset_edge_caches (where);
-      inline_update_overall_summary (where);
+      ipa_update_overall_fn_summary (where);
       update_caller_keys (edge_heap, where,
                          updated_nodes, NULL);
       update_callee_keys (edge_heap, where,
@@ -1740,17 +1778,18 @@ inline_small_functions (void)
   /* Compute overall unit size and other global parameters used by badness
      metrics.  */
 
-  max_count = 0;
-  ipa_reduced_postorder (order, true, true, NULL);
+  max_count = profile_count::uninitialized ();
+  ipa_reduced_postorder (order, true, NULL);
   free (order);
 
   FOR_EACH_DEFINED_FUNCTION (node)
     if (!node->global.inlined_to)
       {
        if (!node->alias && node->analyzed
-           && (node->has_gimple_body_p () || node->thunk.thunk_p))
+           && (node->has_gimple_body_p () || node->thunk.thunk_p)
+           && opt_for_fn (node->decl, optimize))
          {
-           struct inline_summary *info = inline_summaries->get (node);
+           struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
            struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
 
            /* Do not account external functions, they will be optimized out
@@ -1769,22 +1808,23 @@ inline_small_functions (void)
                struct cgraph_node *n2;
                int id = dfs->scc_no + 1;
                for (n2 = node; n2;
-                    n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
-                 {
-                   struct inline_summary *info2 = inline_summaries->get (n2);
-                   if (info2->scc_no)
-                     break;
-                   info2->scc_no = id;
-                 }
+                    n2 = ((struct ipa_dfs_info *) n2->aux)->next_cycle)
+                 if (opt_for_fn (n2->decl, optimize))
+                   {
+                     ipa_fn_summary *info2 = ipa_fn_summaries->get (n2);
+                     if (info2->scc_no)
+                       break;
+                     info2->scc_no = id;
+                   }
              }
          }
 
        for (edge = node->callers; edge; edge = edge->next_caller)
-         if (max_count < edge->count)
-           max_count = edge->count;
+         max_count = max_count.max (edge->count.ipa ());
       }
   ipa_free_postorder_info ();
-  initialize_growth_caches ();
+  edge_growth_cache
+    = new call_summary<edge_growth_cache_entry *> (symtab, false);
 
   if (dump_file)
     fprintf (dump_file,
@@ -1803,9 +1843,11 @@ inline_small_functions (void)
       struct cgraph_edge *next = NULL;
       bool has_speculative = false;
 
+      if (!opt_for_fn (node->decl, optimize))
+       continue;
+
       if (dump_file)
-       fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
-                node->name (), node->order);
+       fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
 
       for (edge = node->callees; edge; edge = next)
        {
@@ -1814,6 +1856,7 @@ inline_small_functions (void)
              && !edge->aux
              && can_inline_edge_p (edge, true)
              && want_inline_small_function_p (edge, true)
+             && can_inline_edge_by_limits_p (edge, true)
              && edge->inline_failed)
            {
              gcc_assert (!edge->aux);
@@ -1834,7 +1877,7 @@ inline_small_functions (void)
        {
          struct cgraph_node *where = node->global.inlined_to
                                      ? node->global.inlined_to : node;
-         inline_update_overall_summary (where);
+         ipa_update_overall_fn_summary (where);
          reset_edge_caches (where);
           update_caller_keys (&edge_heap, where,
                              updated_nodes, NULL);
@@ -1845,7 +1888,7 @@ inline_small_functions (void)
     }
 
   gcc_assert (in_lto_p
-             || !max_count
+             || !(max_count > 0)
              || (profile_info && flag_branch_probabilities));
 
   while (!edge_heap.empty ())
@@ -1863,37 +1906,44 @@ inline_small_functions (void)
        continue;
 
 #if CHECKING_P
-      /* Be sure that caches are maintained consistent.  */
-      sreal cached_badness = edge_badness (edge, false);
-      int old_size_est = estimate_edge_size (edge);
-      sreal old_time_est = estimate_edge_time (edge);
-      int old_hints_est = estimate_edge_hints (edge);
-
-      reset_edge_growth_cache (edge);
-      gcc_assert (old_size_est == estimate_edge_size (edge));
-      gcc_assert (old_time_est == estimate_edge_time (edge));
-      /* FIXME:
-
-         gcc_assert (old_hints_est == estimate_edge_hints (edge));
-
-        fails with profile feedback because some hints depends on
-        maybe_hot_edge_p predicate and because callee gets inlined to other
-        calls, the edge may become cold.
-        This ought to be fixed by computing relative probabilities
-        for given invocation but that will be better done once whole
-        code is converted to sreals.  Disable for now and revert to "wrong"
-        value so enable/disable checking paths agree.  */
-      edge_growth_cache[edge->uid].hints = old_hints_est + 1;
-
-      /* When updating the edge costs, we only decrease badness in the keys.
-        Increases of badness are handled lazilly; when we see key with out
-        of date value on it, we re-insert it now.  */
-      current_badness = edge_badness (edge, false);
-      /* Disable checking for profile because roundoff errors may cause slight
-         deviations in the order.  */
-      gcc_assert (max_count || cached_badness == current_badness);
-      gcc_assert (current_badness >= badness);
+      /* Be sure that caches are maintained consistent.
+        This check is affected by scaling roundoff errors when compiling for
+        IPA this we skip it in that case.  */
+      if (!edge->callee->count.ipa_p ()
+         && (!max_count.initialized_p () || !max_count.nonzero_p ()))
+       {
+         sreal cached_badness = edge_badness (edge, false);
+     
+         int old_size_est = estimate_edge_size (edge);
+         sreal old_time_est = estimate_edge_time (edge);
+         int old_hints_est = estimate_edge_hints (edge);
+
+         if (edge_growth_cache != NULL)
+           edge_growth_cache->remove (edge);
+         gcc_assert (old_size_est == estimate_edge_size (edge));
+         gcc_assert (old_time_est == estimate_edge_time (edge));
+         /* FIXME:
+
+            gcc_assert (old_hints_est == estimate_edge_hints (edge));
+
+            fails with profile feedback because some hints depends on
+            maybe_hot_edge_p predicate and because callee gets inlined to other
+            calls, the edge may become cold.
+            This ought to be fixed by computing relative probabilities
+            for given invocation but that will be better done once whole
+            code is converted to sreals.  Disable for now and revert to "wrong"
+            value so enable/disable checking paths agree.  */
+         edge_growth_cache->get (edge)->hints = old_hints_est + 1;
+
+         /* When updating the edge costs, we only decrease badness in the keys.
+            Increases of badness are handled lazilly; when we see key with out
+            of date value on it, we re-insert it now.  */
+         current_badness = edge_badness (edge, false);
+         gcc_assert (cached_badness == current_badness);
+         gcc_assert (current_badness >= badness);
+       }
+      else
+        current_badness = edge_badness (edge, false);
 #else
       current_badness = edge_badness (edge, false);
 #endif
@@ -1908,7 +1958,8 @@ inline_small_functions (void)
            badness = current_badness;
        }
 
-      if (!can_inline_edge_p (edge, true))
+      if (!can_inline_edge_p (edge, true)
+         || !can_inline_edge_by_limits_p (edge, true))
        {
          resolve_noninline_speculation (&edge_heap, edge);
          continue;
@@ -1919,13 +1970,13 @@ inline_small_functions (void)
       if (dump_file)
        {
          fprintf (dump_file,
-                  "\nConsidering %s/%i with %i size\n",
-                  callee->name (), callee->order,
-                  inline_summaries->get (callee)->size);
+                  "\nConsidering %s with %i size\n",
+                  callee->dump_name (),
+                  ipa_fn_summaries->get (callee)->size);
          fprintf (dump_file,
-                  " to be inlined into %s/%i in %s:%i\n"
+                  " to be inlined into %s in %s:%i\n"
                   " Estimated badness is %f, frequency %.2f.\n",
-                  edge->caller->name (), edge->caller->order,
+                  edge->caller->dump_name (),
                   edge->call_stmt
                   && (LOCATION_LOCUS (gimple_location ((const gimple *)
                                                        edge->call_stmt))
@@ -1936,10 +1987,13 @@ inline_small_functions (void)
                   ? gimple_lineno ((const gimple *) edge->call_stmt)
                   : -1,
                   badness.to_double (),
-                  edge->frequency / (double)CGRAPH_FREQ_BASE);
-         if (edge->count)
-           fprintf (dump_file," Called %" PRId64"x\n",
-                    edge->count);
+                  edge->sreal_frequency ().to_double ());
+         if (edge->count.ipa ().initialized_p ())
+           {
+             fprintf (dump_file, " Called ");
+             edge->count.ipa ().dump (dump_file);
+             fprintf (dump_file, " times\n");
+            }
          if (dump_flags & TDF_DETAILS)
            edge_badness (edge, true);
        }
@@ -2037,24 +2091,25 @@ inline_small_functions (void)
       update_caller_keys (&edge_heap, where, updated_nodes, NULL);
       /* Offline copy count has possibly changed, recompute if profile is
         available.  */
-      if (max_count)
-        {
-         struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
-         if (n != edge->callee && n->analyzed)
-           update_callee_keys (&edge_heap, n, updated_nodes);
-        }
+      struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
+      if (n != edge->callee && n->analyzed && n->count.ipa ().initialized_p ())
+       update_callee_keys (&edge_heap, n, updated_nodes);
       bitmap_clear (updated_nodes);
 
-      if (dump_file)
+      if (dump_enabled_p ())
        {
-         fprintf (dump_file,
-                  " Inlined %s into %s which now has time %f and size %i, "
-                  "net change of %+i.\n",
-                  edge->callee->name (),
-                  edge->caller->name (),
-                  inline_summaries->get (edge->caller)->time.to_double (),
-                  inline_summaries->get (edge->caller)->size,
-                  overall_size - old_size);
+         ipa_fn_summary *s = ipa_fn_summaries->get (edge->caller);
+
+         /* dump_printf can't handle %+i.  */
+         char buf_net_change[100];
+         snprintf (buf_net_change, sizeof buf_net_change, "%+i",
+                   overall_size - old_size);
+
+         dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, edge->call_stmt,
+                          " Inlined %C into %C which now has time %f and "
+                          "size %i, net change of %s.\n",
+                          edge->callee, edge->caller,
+                          s->time.to_double (), s->size, buf_net_change);
        }
       if (min_size > overall_size)
        {
@@ -2067,11 +2122,11 @@ inline_small_functions (void)
     }
 
   free_growth_caches ();
-  if (dump_file)
-    fprintf (dump_file,
-            "Unit growth for small function inlining: %i->%i (%i%%)\n",
-            initial_size, overall_size,
-            initial_size ? overall_size * 100 / (initial_size) - 100: 0);
+  if (dump_enabled_p ())
+    dump_printf (MSG_NOTE,
+                "Unit growth for small function inlining: %i->%i (%i%%)\n",
+                initial_size, overall_size,
+                initial_size ? overall_size * 100 / (initial_size) - 100: 0);
   symtab->remove_edge_removal_hook (edge_removal_hook_holder);
 }
 
@@ -2096,12 +2151,12 @@ flatten_function (struct cgraph_node *node, bool early)
       /* We've hit cycle?  It is time to give up.  */
       if (callee->aux)
        {
-         if (dump_file)
-           fprintf (dump_file,
-                    "Not inlining %s into %s to avoid cycle.\n",
-                    xstrdup_for_dump (callee->name ()),
-                    xstrdup_for_dump (e->caller->name ()));
-         e->inline_failed = CIF_RECURSIVE_INLINING;
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "Not inlining %C into %C to avoid cycle.\n",
+                            callee, e->caller);
+         if (cgraph_inline_failed_type (e->inline_failed) != CIF_FINAL_ERROR)
+           e->inline_failed = CIF_RECURSIVE_INLINING;
          continue;
        }
 
@@ -2118,30 +2173,33 @@ flatten_function (struct cgraph_node *node, bool early)
         too.  */
       if (!early
          ? !can_inline_edge_p (e, true)
+           && !can_inline_edge_by_limits_p (e, true)
          : !can_early_inline_edge_p (e))
        continue;
 
       if (e->recursive_p ())
        {
-         if (dump_file)
-           fprintf (dump_file, "Not inlining: recursive call.\n");
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "Not inlining: recursive call.\n");
          continue;
        }
 
       if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
          != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
        {
-         if (dump_file)
-           fprintf (dump_file, "Not inlining: SSA form does not match.\n");
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "Not inlining: SSA form does not match.\n");
          continue;
        }
 
       /* Inline the edge and flatten the inline clone.  Avoid
          recursing through the original node if the node was cloned.  */
-      if (dump_file)
-       fprintf (dump_file, " Inlining %s into %s.\n",
-                xstrdup_for_dump (callee->name ()),
-                xstrdup_for_dump (e->caller->name ()));
+      if (dump_enabled_p ())
+       dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
+                        " Inlining %C into %C.\n",
+                        callee, e->caller);
       orig_callee = callee;
       inline_call (e, true, NULL, NULL, false);
       if (e->callee != orig_callee)
@@ -2153,7 +2211,7 @@ flatten_function (struct cgraph_node *node, bool early)
 
   node->aux = NULL;
   if (!node->global.inlined_to)
-    inline_update_overall_summary (node);
+    ipa_update_overall_fn_summary (node);
 }
 
 /* Inline NODE to all callers.  Worker for cgraph_for_node_and_aliases.
@@ -2172,6 +2230,7 @@ inline_to_all_callers_1 (struct cgraph_node *node, void *data,
       struct cgraph_node *caller = node->callers->caller;
 
       if (!can_inline_edge_p (node->callers, true)
+         || !can_inline_edge_by_limits_p (node->callers, true)
          || node->callers->recursive_p ())
        {
          if (dump_file)
@@ -2182,14 +2241,15 @@ inline_to_all_callers_1 (struct cgraph_node *node, void *data,
 
       if (dump_file)
        {
+         cgraph_node *ultimate = node->ultimate_alias_target ();
          fprintf (dump_file,
                   "\nInlining %s size %i.\n",
-                  node->name (),
-                  inline_summaries->get (node)->size);
+                  ultimate->name (),
+                  ipa_fn_summaries->get (ultimate)->size);
          fprintf (dump_file,
                   " Called once from %s %i insns.\n",
                   node->callers->caller->name (),
-                  inline_summaries->get (node->callers->caller)->size);
+                  ipa_fn_summaries->get (node->callers->caller)->size);
        }
 
       /* Remember which callers we inlined to, delaying updating the
@@ -2200,7 +2260,7 @@ inline_to_all_callers_1 (struct cgraph_node *node, void *data,
        fprintf (dump_file,
                 " Inlined into %s which now has %i size\n",
                 caller->name (),
-                inline_summaries->get (caller)->size);
+                ipa_fn_summaries->get (caller)->size);
       if (!(*num_calls)--)
        {
          if (dump_file)
@@ -2226,7 +2286,7 @@ inline_to_all_callers (struct cgraph_node *node, void *data)
      we have a lot of calls to the same function.  */
   for (hash_set<cgraph_node *>::iterator i = callers.begin ();
        i != callers.end (); ++i)
-    inline_update_overall_summary (*i);
+    ipa_update_overall_fn_summary (*i);
   return res;
 }
 
@@ -2241,9 +2301,13 @@ dump_overall_stats (void)
     if (!node->global.inlined_to
        && !node->alias)
       {
-       sreal time = inline_summaries->get (node)->time;
-       sum += time;
-       sum_weighted += time * node->count;
+       ipa_fn_summary *s = ipa_fn_summaries->get (node);
+       if (s != NULL)
+         {
+         sum += s->time;
+         if (node->count.ipa ().initialized_p ())
+           sum_weighted += s->time * node->count.ipa ().to_gcov_type ();
+         }
       }
   fprintf (dump_file, "Overall time estimate: "
           "%f weighted by profile: "
@@ -2261,11 +2325,14 @@ dump_inline_stats (void)
   int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
   int64_t  inlined_speculative = 0, inlined_speculative_ply = 0;
   int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
-  int64_t reason[CIF_N_REASONS][3];
+  int64_t reason[CIF_N_REASONS][2];
+  sreal reason_freq[CIF_N_REASONS];
   int i;
   struct cgraph_node *node;
 
   memset (reason, 0, sizeof (reason));
+  for (i=0; i < CIF_N_REASONS; i++)
+    reason_freq[i] = 0;
   FOR_EACH_DEFINED_FUNCTION (node)
   {
     struct cgraph_edge *e;
@@ -2273,56 +2340,59 @@ dump_inline_stats (void)
       {
        if (e->inline_failed)
          {
-           reason[(int) e->inline_failed][0] += e->count;
-           reason[(int) e->inline_failed][1] += e->frequency;
-           reason[(int) e->inline_failed][2] ++;
-           if (DECL_VIRTUAL_P (e->callee->decl))
+           if (e->count.ipa ().initialized_p ())
+             reason[(int) e->inline_failed][0] += e->count.ipa ().to_gcov_type ();
+           reason_freq[(int) e->inline_failed] += e->sreal_frequency ();
+           reason[(int) e->inline_failed][1] ++;
+           if (DECL_VIRTUAL_P (e->callee->decl)
+               && e->count.ipa ().initialized_p ())
              {
                if (e->indirect_inlining_edge)
-                 noninlined_virt_indir_cnt += e->count;
+                 noninlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
                else
-                 noninlined_virt_cnt += e->count;
+                 noninlined_virt_cnt += e->count.ipa ().to_gcov_type ();
              }
-           else
+           else if (e->count.ipa ().initialized_p ())
              {
                if (e->indirect_inlining_edge)
-                 noninlined_indir_cnt += e->count;
+                 noninlined_indir_cnt += e->count.ipa ().to_gcov_type ();
                else
-                 noninlined_cnt += e->count;
+                 noninlined_cnt += e->count.ipa ().to_gcov_type ();
              }
          }
-       else
+       else if (e->count.ipa ().initialized_p ())
          {
            if (e->speculative)
              {
                if (DECL_VIRTUAL_P (e->callee->decl))
-                 inlined_speculative_ply += e->count;
+                 inlined_speculative_ply += e->count.ipa ().to_gcov_type ();
                else
-                 inlined_speculative += e->count;
+                 inlined_speculative += e->count.ipa ().to_gcov_type ();
              }
            else if (DECL_VIRTUAL_P (e->callee->decl))
              {
                if (e->indirect_inlining_edge)
-                 inlined_virt_indir_cnt += e->count;
+                 inlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
                else
-                 inlined_virt_cnt += e->count;
+                 inlined_virt_cnt += e->count.ipa ().to_gcov_type ();
              }
            else
              {
                if (e->indirect_inlining_edge)
-                 inlined_indir_cnt += e->count;
+                 inlined_indir_cnt += e->count.ipa ().to_gcov_type ();
                else
-                 inlined_cnt += e->count;
+                 inlined_cnt += e->count.ipa ().to_gcov_type ();
              }
          }
       }
     for (e = node->indirect_calls; e; e = e->next_callee)
-      if (e->indirect_info->polymorphic)
-       indirect_poly_cnt += e->count;
-      else
-       indirect_cnt += e->count;
+      if (e->indirect_info->polymorphic
+         & e->count.ipa ().initialized_p ())
+       indirect_poly_cnt += e->count.ipa ().to_gcov_type ();
+      else if (e->count.ipa ().initialized_p ())
+       indirect_cnt += e->count.ipa ().to_gcov_type ();
   }
-  if (max_count)
+  if (max_count.initialized_p ())
     {
       fprintf (dump_file,
               "Inlined %" PRId64 " + speculative "
@@ -2341,17 +2411,30 @@ dump_inline_stats (void)
               inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
               noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
               noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
-      fprintf (dump_file,
-              "Removed speculations %" PRId64 "\n",
-              spec_rem);
+      fprintf (dump_file, "Removed speculations ");
+      spec_rem.dump (dump_file);
+      fprintf (dump_file, "\n");
     }
   dump_overall_stats ();
   fprintf (dump_file, "\nWhy inlining failed?\n");
   for (i = 0; i < CIF_N_REASONS; i++)
-    if (reason[i][2])
-      fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
+    if (reason[i][1])
+      fprintf (dump_file, "%-50s: %8i calls, %8f freq, %" PRId64" count\n",
               cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
-              (int) reason[i][2], (int) reason[i][1], reason[i][0]);
+              (int) reason[i][1], reason_freq[i].to_double (), reason[i][0]);
+}
+
+/* Called when node is removed.  */
+
+static void
+flatten_remove_node_hook (struct cgraph_node *node, void *data)
+{
+  if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) == NULL)
+    return;
+
+  hash_set<struct cgraph_node *> *removed
+    = (hash_set<struct cgraph_node *> *) data;
+  removed->add (node);
 }
 
 /* Decide on the inlining.  We do so in the topological order to avoid
@@ -2363,22 +2446,17 @@ ipa_inline (void)
   struct cgraph_node *node;
   int nnodes;
   struct cgraph_node **order;
-  int i;
+  int i, j;
   int cold;
   bool remove_functions = false;
 
-  if (!optimize)
-    return 0;
-
-  cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
-  percent_rec = (sreal) 1 / (sreal) 100;
-
   order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
 
   if (dump_file)
-    dump_inline_summaries (dump_file);
+    ipa_dump_fn_summaries (dump_file);
 
   nnodes = ipa_reverse_postorder (order);
+  spec_rem = profile_count::zero ();
 
   FOR_EACH_FUNCTION (node)
     {
@@ -2401,26 +2479,56 @@ ipa_inline (void)
   if (dump_file)
     fprintf (dump_file, "\nFlattening functions:\n");
 
+  /* First shrink order array, so that it only contains nodes with
+     flatten attribute.  */
+  for (i = nnodes - 1, j = i; i >= 0; i--)
+    {
+      node = order[i];
+      if (lookup_attribute ("flatten",
+                           DECL_ATTRIBUTES (node->decl)) != NULL)
+       order[j--] = order[i];
+    }
+
+  /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
+     nodes with flatten attribute.  If there is more than one such
+     node, we need to register a node removal hook, as flatten_function
+     could remove other nodes with flatten attribute.  See PR82801.  */
+  struct cgraph_node_hook_list *node_removal_hook_holder = NULL;
+  hash_set<struct cgraph_node *> *flatten_removed_nodes = NULL;
+  if (j < nnodes - 2)
+    {
+      flatten_removed_nodes = new hash_set<struct cgraph_node *>;
+      node_removal_hook_holder
+       = symtab->add_cgraph_removal_hook (&flatten_remove_node_hook,
+                                          flatten_removed_nodes);
+    }
+
   /* In the first pass handle functions to be flattened.  Do this with
      a priority so none of our later choices will make this impossible.  */
-  for (i = nnodes - 1; i >= 0; i--)
+  for (i = nnodes - 1; i > j; i--)
     {
       node = order[i];
+      if (flatten_removed_nodes
+         && flatten_removed_nodes->contains (node))
+       continue;
 
       /* Handle nodes to be flattened.
         Ideally when processing callees we stop inlining at the
         entry of cycles, possibly cloning that entry point and
         try to flatten itself turning it into a self-recursive
         function.  */
-      if (lookup_attribute ("flatten",
-                           DECL_ATTRIBUTES (node->decl)) != NULL)
-       {
-         if (dump_file)
-           fprintf (dump_file,
-                    "Flattening %s\n", node->name ());
-         flatten_function (node, false);
-       }
+      if (dump_file)
+       fprintf (dump_file, "Flattening %s\n", node->name ());
+      flatten_function (node, false);
     }
+
+  if (j < nnodes - 2)
+    {
+      symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
+      delete flatten_removed_nodes;
+    }
+  free (order);
+
   if (dump_file)
     dump_overall_stats ();
 
@@ -2432,7 +2540,6 @@ ipa_inline (void)
      inline functions and virtual functions so we really know what is called
      once.  */
   symtab->remove_unreachable_nodes (dump_file);
-  free (order);
 
   /* Inline functions with a property that after inlining into all callers the
      code size will shrink because the out-of-line copy is eliminated. 
@@ -2465,13 +2572,18 @@ ipa_inline (void)
          struct cgraph_edge *edge, *next;
          bool update=false;
 
+         if (!opt_for_fn (node->decl, optimize)
+             || !opt_for_fn (node->decl, flag_inline_functions_called_once))
+           continue;
+
          for (edge = node->callees; edge; edge = next)
            {
              next = edge->next_callee;
              if (edge->speculative && !speculation_useful_p (edge, false))
                {
+                 if (edge->count.ipa ().initialized_p ())
+                   spec_rem += edge->count.ipa ();
                  edge->resolve_speculation ();
-                 spec_rem += edge->count;
                  update = true;
                  remove_functions = true;
                }
@@ -2481,7 +2593,7 @@ ipa_inline (void)
              struct cgraph_node *where = node->global.inlined_to
                                          ? node->global.inlined_to : node;
              reset_edge_caches (where);
-             inline_update_overall_summary (where);
+             ipa_update_overall_fn_summary (where);
            }
          if (want_inline_function_to_all_callers_p (node, cold))
            {
@@ -2497,22 +2609,17 @@ ipa_inline (void)
     }
 
   /* Free ipa-prop structures if they are no longer needed.  */
-  if (optimize)
-    ipa_free_all_structures_after_iinln ();
+  ipa_free_all_structures_after_iinln ();
 
+  if (dump_enabled_p ())
+    dump_printf (MSG_NOTE,
+                "\nInlined %i calls, eliminated %i functions\n\n",
+                ncalls_inlined, nfunctions_inlined);
   if (dump_file)
-    {
-      fprintf (dump_file,
-              "\nInlined %i calls, eliminated %i functions\n\n",
-              ncalls_inlined, nfunctions_inlined);
-      dump_inline_stats ();
-    }
+    dump_inline_stats ();
 
   if (dump_file)
-    dump_inline_summaries (dump_file);
-  /* In WPA we use inline summaries for partitioning process.  */
-  if (!flag_wpa)
-    inline_free_summary ();
+    ipa_dump_fn_summaries (dump_file);
   return remove_functions ? TODO_remove_functions : 0;
 }
 
@@ -2532,9 +2639,10 @@ inline_always_inline_functions (struct cgraph_node *node)
 
       if (e->recursive_p ())
        {
-         if (dump_file)
-           fprintf (dump_file, "  Not inlining recursive call to %s.\n",
-                    e->callee->name ());
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "  Not inlining recursive call to %C.\n",
+                            e->callee);
          e->inline_failed = CIF_RECURSIVE_INLINING;
          continue;
        }
@@ -2550,15 +2658,15 @@ inline_always_inline_functions (struct cgraph_node *node)
          continue;
        }
 
-      if (dump_file)
-       fprintf (dump_file, "  Inlining %s into %s (always_inline).\n",
-                xstrdup_for_dump (e->callee->name ()),
-                xstrdup_for_dump (e->caller->name ()));
+      if (dump_enabled_p ())
+       dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
+                        "  Inlining %C into %C (always_inline).\n",
+                        e->callee, e->caller);
       inline_call (e, true, NULL, NULL, false);
       inlined = true;
     }
   if (inlined)
-    inline_update_overall_summary (node);
+    ipa_update_overall_fn_summary (node);
 
   return inlined;
 }
@@ -2575,8 +2683,12 @@ early_inline_small_functions (struct cgraph_node *node)
   for (e = node->callees; e; e = e->next_callee)
     {
       struct cgraph_node *callee = e->callee->ultimate_alias_target ();
-      if (!inline_summaries->get (callee)->inlinable
-         || !e->inline_failed)
+
+      /* We can enounter not-yet-analyzed function during
+        early inlining on callgraphs with strongly
+        connected components.  */
+      ipa_fn_summary *s = ipa_fn_summaries->get (callee);
+      if (s == NULL || !s->inlinable || !e->inline_failed)
        continue;
 
       /* Do not consider functions not declared inline.  */
@@ -2585,33 +2697,35 @@ early_inline_small_functions (struct cgraph_node *node)
          && !opt_for_fn (node->decl, flag_inline_functions))
        continue;
 
-      if (dump_file)
-       fprintf (dump_file, "Considering inline candidate %s.\n",
-                callee->name ());
+      if (dump_enabled_p ())
+       dump_printf_loc (MSG_NOTE, e->call_stmt,
+                        "Considering inline candidate %C.\n",
+                        callee);
 
       if (!can_early_inline_edge_p (e))
        continue;
 
       if (e->recursive_p ())
        {
-         if (dump_file)
-           fprintf (dump_file, "  Not inlining: recursive call.\n");
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
+                            "  Not inlining: recursive call.\n");
          continue;
        }
 
       if (!want_early_inline_function_p (e))
        continue;
 
-      if (dump_file)
-       fprintf (dump_file, " Inlining %s into %s.\n",
-                xstrdup_for_dump (callee->name ()),
-                xstrdup_for_dump (e->caller->name ()));
+      if (dump_enabled_p ())
+       dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
+                        " Inlining %C into %C.\n",
+                        callee, e->caller);
       inline_call (e, true, NULL, NULL, false);
       inlined = true;
     }
 
   if (inlined)
-    inline_update_overall_summary (node);
+    ipa_update_overall_fn_summary (node);
 
   return inlined;
 }
@@ -2641,13 +2755,6 @@ early_inliner (function *fun)
     node->verify ();
   node->remove_all_references ();
 
-  /* Rebuild this reference because it dosn't depend on
-     function's body and it's required to pass cgraph_node
-     verification.  */
-  if (node->instrumented_version
-      && !node->instrumentation_clone)
-    node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
-
   /* Even when not optimizing or not inlining inline always-inline
      functions.  */
   inlined = inline_always_inline_functions (node);
@@ -2672,9 +2779,9 @@ early_inliner (function *fun)
     {
       /* When the function is marked to be flattened, recursively inline
         all calls in it.  */
-      if (dump_file)
-       fprintf (dump_file,
-                "Flattening %s\n", node->name ());
+      if (dump_enabled_p ())
+       dump_printf (MSG_OPTIMIZED_LOCATIONS,
+                    "Flattening %C\n", node);
       flatten_function (node, true);
       inlined = true;
     }
@@ -2692,13 +2799,16 @@ early_inliner (function *fun)
             statements that don't have inline parameters computed.  */
          for (edge = node->callees; edge; edge = edge->next_callee)
            {
-             struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+             /* We can enounter not-yet-analyzed function during
+                early inlining on callgraphs with strongly
+                connected components.  */
+             ipa_call_summary *es = ipa_call_summaries->get_create (edge);
              es->call_stmt_size
                = estimate_num_insns (edge->call_stmt, &eni_size_weights);
              es->call_stmt_time
                = estimate_num_insns (edge->call_stmt, &eni_time_weights);
            }
-         inline_update_overall_summary (node);
+         ipa_update_overall_fn_summary (node);
          inlined = false;
          timevar_pop (TV_INTEGRATION);
        }
@@ -2717,7 +2827,7 @@ early_inliner (function *fun)
          for (edge = node->callees; edge; edge = edge->next_callee)
            {
              /* We have no summary for new bound store calls yet.  */
-             struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+             ipa_call_summary *es = ipa_call_summaries->get_create (edge);
              es->call_stmt_size
                = estimate_num_insns (edge->call_stmt, &eni_size_weights);
              es->call_stmt_time
@@ -2732,7 +2842,7 @@ early_inliner (function *fun)
                }
            }
          if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
-           inline_update_overall_summary (node);
+           ipa_update_overall_fn_summary (node);
          timevar_pop (TV_INTEGRATION);
          iterations++;
          inlined = false;
@@ -2818,9 +2928,9 @@ class pass_ipa_inline : public ipa_opt_pass_d
 public:
   pass_ipa_inline (gcc::context *ctxt)
     : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
-                     inline_generate_summary, /* generate_summary */
-                     inline_write_summary, /* write_summary */
-                     inline_read_summary, /* read_summary */
+                     NULL, /* generate_summary */
+                     NULL, /* write_summary */
+                     NULL, /* read_summary */
                      NULL, /* write_optimization_summary */
                      NULL, /* read_optimization_summary */
                      NULL, /* stmt_fixup */