]> git.ipfire.org Git - thirdparty/gcc.git/blobdiff - gcc/ipa-inline.c
Adjust by-value function vec arguments to by-reference.
[thirdparty/gcc.git] / gcc / ipa-inline.c
index 4dd4de157f16603b66ee4e8db77acee233d2012b..413446bcc46cd22b42ae90b7024135b8210a38e3 100644 (file)
@@ -1,5 +1,5 @@
 /* Inlining decision heuristics.
-   Copyright (C) 2003-2019 Free Software Foundation, Inc.
+   Copyright (C) 2003-2021 Free Software Foundation, Inc.
    Contributed by Jan Hubicka
 
 This file is part of GCC.
@@ -178,13 +178,13 @@ caller_growth_limits (struct cgraph_edge *e)
   if (limit < what_size_info->self_size)
     limit = what_size_info->self_size;
 
-  limit += limit * param_large_function_growth / 100;
+  limit += limit * opt_for_fn (to->decl, param_large_function_growth) / 100;
 
   /* Check the size after inlining against the function limits.  But allow
      the function to shrink if it went over the limits by forced inlining.  */
   newsize = estimate_size_after_inlining (to, e);
   if (newsize >= ipa_size_summaries->get (what)->size
-      && newsize > param_large_function_insns
+      && newsize > opt_for_fn (to->decl, param_large_function_insns)
       && newsize > limit)
     {
       e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
@@ -200,7 +200,8 @@ caller_growth_limits (struct cgraph_edge *e)
      on every invocation of the caller (i.e. its call statement dominates
      exit block).  We do not track this information, yet.  */
   stack_size_limit += ((gcov_type)stack_size_limit
-                      * param_stack_frame_growth / 100);
+                      * opt_for_fn (to->decl, param_stack_frame_growth)
+                      / 100);
 
   inlined_stack = (ipa_get_stack_frame_offset (to)
                   + outer_info->estimated_self_stack_size
@@ -213,7 +214,7 @@ caller_growth_limits (struct cgraph_edge *e)
         This bit overoptimistically assume that we are good at stack
         packing.  */
       && inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
-      && inlined_stack > param_large_stack_frame)
+      && inlined_stack > opt_for_fn (to->decl, param_large_stack_frame))
     {
       e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
       return false;
@@ -263,18 +264,29 @@ sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
   if (!caller || !callee)
     return true;
 
-  /* Allow inlining always_inline functions into no_sanitize_address
-     functions.  */
-  if (!sanitize_flags_p (SANITIZE_ADDRESS, caller)
-      && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
+  /* Follow clang and allow inlining for always_inline functions.  */
+  if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
     return true;
 
-  return ((sanitize_flags_p (SANITIZE_ADDRESS, caller)
-          == sanitize_flags_p (SANITIZE_ADDRESS, callee))
-         && (sanitize_flags_p (SANITIZE_POINTER_COMPARE, caller)
-             == sanitize_flags_p (SANITIZE_POINTER_COMPARE, callee))
-         && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, caller)
-             == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, callee)));
+  const sanitize_code codes[] =
+    {
+      SANITIZE_ADDRESS,
+      SANITIZE_THREAD,
+      SANITIZE_UNDEFINED,
+      SANITIZE_UNDEFINED_NONDEFAULT,
+      SANITIZE_POINTER_COMPARE,
+      SANITIZE_POINTER_SUBTRACT
+    };
+
+  for (unsigned i = 0; i < sizeof (codes) / sizeof (codes[0]); i++)
+    if (sanitize_flags_p (codes[i], caller)
+       != sanitize_flags_p (codes[i], callee))
+      return false;
+
+  if (sanitize_coverage_p (caller) != sanitize_coverage_p (callee))
+    return false;
+
+  return true;
 }
 
 /* Used for flags where it is safe to inline when caller's value is
@@ -381,7 +393,7 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
   /* Don't inline a function with mismatched sanitization attributes. */
   else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
     {
-      e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
+      e->inline_failed = CIF_SANITIZE_ATTRIBUTE_MISMATCH;
       inlinable = false;
     }
   if (!inlinable && report)
@@ -389,37 +401,44 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
   return inlinable;
 }
 
-/* Return inlining_insns_single limit for function N. If HINT is true
+/* Return inlining_insns_single limit for function N.  If HINT or HINT2 is true
    scale up the bound.  */
 
 static int
-inline_insns_single (cgraph_node *n, bool hint)
+inline_insns_single (cgraph_node *n, bool hint, bool hint2)
 {
-  if (opt_for_fn (n->decl, optimize) >= 3)
+  if (hint && hint2)
     {
-      if (hint)
-       return param_max_inline_insns_single
-              * param_inline_heuristics_hint_percent / 100;
-      return param_max_inline_insns_single;
-    }
-  else
-    {
-      if (hint)
-       return param_max_inline_insns_single_o2
-              * param_inline_heuristics_hint_percent_o2 / 100;
-      return param_max_inline_insns_single_o2;
+      int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
+      spd = spd * spd;
+      if (spd > 1000000)
+       spd = 1000000;
+      return opt_for_fn (n->decl, param_max_inline_insns_single) * spd / 100;
     }
+  if (hint || hint2)
+    return opt_for_fn (n->decl, param_max_inline_insns_single)
+          * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
+  return opt_for_fn (n->decl, param_max_inline_insns_single);
 }
 
-/* Return inlining_insns_auto limit for function N. If HINT is true
+/* Return inlining_insns_auto limit for function N.  If HINT or HINT2 is true
    scale up the bound.   */
 
 static int
-inline_insns_auto (cgraph_node *n, bool hint)
+inline_insns_auto (cgraph_node *n, bool hint, bool hint2)
 {
   int max_inline_insns_auto = opt_for_fn (n->decl, param_max_inline_insns_auto);
-  if (hint)
-    return max_inline_insns_auto * param_inline_heuristics_hint_percent / 100;
+  if (hint && hint2)
+    {
+      int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
+      spd = spd * spd;
+      if (spd > 1000000)
+       spd = 1000000;
+      return max_inline_insns_auto * spd / 100;
+    }
+  if (hint || hint2)
+    return max_inline_insns_auto
+          * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
   return max_inline_insns_auto;
 }
 
@@ -493,6 +512,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
      else if (check_match (flag_wrapv)
              || check_match (flag_trapv)
              || check_match (flag_pcc_struct_return)
+             || check_maybe_down (optimize_debug)
              /* When caller or callee does FP math, be sure FP codegen flags
                 compatible.  */
              || ((caller_info->fp_expressions && callee_info->fp_expressions)
@@ -563,10 +583,10 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
               > opt_for_fn (caller->decl, optimize_size))
        {
          int growth = estimate_edge_growth (e);
-         if (growth > param_max_inline_insns_size
+         if (growth > opt_for_fn (caller->decl, param_max_inline_insns_size)
              && (!DECL_DECLARED_INLINE_P (callee->decl)
-                 && growth >= MAX (inline_insns_single (caller, false),
-                                   inline_insns_auto (caller, false))))
+                 && growth >= MAX (inline_insns_single (caller, false, false),
+                                   inline_insns_auto (caller, false, false))))
            {
              e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
              inlinable = false;
@@ -675,9 +695,7 @@ want_early_inline_function_p (struct cgraph_edge *e)
       /* First take care of very large functions.  */
       int min_growth = estimate_min_edge_growth (e), growth = 0;
       int n;
-      int early_inlining_insns = opt_for_fn (e->caller->decl, optimize) >= 3
-                                ? param_early_inlining_insns
-                                : param_early_inlining_insns_o2;
+      int early_inlining_insns = param_early_inlining_insns;
 
       if (min_growth > early_inlining_insns)
        {
@@ -711,10 +729,8 @@ want_early_inline_function_p (struct cgraph_edge *e)
          if (dump_enabled_p ())
            dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
                             "  will not early inline: %C->%C, "
-                            "growth %i exceeds --param early-inlining-insns%s\n",
-                            e->caller, callee, growth,
-                            opt_for_fn (e->caller->decl, optimize) >= 3
-                            ? "" : "-O2");
+                            "growth %i exceeds --param early-inlining-insns\n",
+                            e->caller, callee, growth);
          want_inline = false;
        }
       else if ((n = num_calls (callee)) != 0
@@ -723,11 +739,9 @@ want_early_inline_function_p (struct cgraph_edge *e)
          if (dump_enabled_p ())
            dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
                             "  will not early inline: %C->%C, "
-                            "growth %i exceeds --param early-inlining-insns%s "
+                            "growth %i exceeds --param early-inlining-insns "
                             "divided by number of calls\n",
-                            e->caller, callee, growth,
-                            opt_for_fn (e->caller->decl, optimize) >= 3
-                            ? "" : "-O2");
+                            e->caller, callee, growth);
          want_inline = false;
        }
     }
@@ -811,7 +825,7 @@ inlining_speedup (struct cgraph_edge *edge,
 }
 
 /* Return true if the speedup for inlining E is bigger than
-   PARAM_MAX_INLINE_MIN_SPEEDUP.  */
+   param_inline_min_speedup.  */
 
 static bool
 big_speedup_p (struct cgraph_edge *e)
@@ -824,9 +838,7 @@ big_speedup_p (struct cgraph_edge *e)
   cgraph_node *caller = (e->caller->inlined_to
                         ? e->caller->inlined_to
                         : e->caller);
-  int limit = opt_for_fn (caller->decl, optimize) >= 3
-             ? param_inline_min_speedup
-             : param_inline_min_speedup_o2;
+  int limit = opt_for_fn (caller->decl, param_inline_min_speedup);
 
   if ((time - inlined_time) * 100 > time * limit)
     return true;
@@ -841,6 +853,8 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
 {
   bool want_inline = true;
   struct cgraph_node *callee = e->callee->ultimate_alias_target ();
+  cgraph_node *to  = (e->caller->inlined_to
+                     ? e->caller->inlined_to : e->caller);
 
   /* Allow this function to be called before can_inline_edge_p,
      since it's usually cheaper.  */
@@ -860,7 +874,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
           && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
           && ipa_fn_summaries->get (callee)->min_size
                - ipa_call_summaries->get (e)->call_stmt_size
-             > inline_insns_auto (e->caller, true))
+             > inline_insns_auto (e->caller, true, true))
     {
       e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
       want_inline = false;
@@ -869,51 +883,51 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
            || e->count.ipa ().nonzero_p ())
           && ipa_fn_summaries->get (callee)->min_size
                - ipa_call_summaries->get (e)->call_stmt_size
-             > inline_insns_single (e->caller, true))
+             > inline_insns_single (e->caller, true, true))
     {
-      if (opt_for_fn (e->caller->decl, optimize) >= 3)
-       e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
-                           ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
-                           : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
-      else
-       e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
-                             ? CIF_MAX_INLINE_INSNS_SINGLE_O2_LIMIT
-                             : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
+      e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
+                         ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
+                         : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
       want_inline = false;
     }
   else
     {
       int growth = estimate_edge_growth (e);
       ipa_hints hints = estimate_edge_hints (e);
+      /* We have two independent groups of hints.  If one matches in each
+        of groups the limits are inreased.  If both groups matches, limit
+        is increased even more.  */
       bool apply_hints = (hints & (INLINE_HINT_indirect_call
                                   | INLINE_HINT_known_hot
                                   | INLINE_HINT_loop_iterations
                                   | INLINE_HINT_loop_stride));
+      bool apply_hints2 = (hints & INLINE_HINT_builtin_constant_p);
 
-      if (growth <= param_max_inline_insns_size)
+      if (growth <= opt_for_fn (to->decl,
+                               param_max_inline_insns_size))
        ;
       /* Apply param_max_inline_insns_single limit.  Do not do so when
         hints suggests that inlining given function is very profitable.
         Avoid computation of big_speedup_p when not necessary to change
         outcome of decision.  */
       else if (DECL_DECLARED_INLINE_P (callee->decl)
-              && growth >= inline_insns_single (e->caller, apply_hints)
-              && (apply_hints
-                  || growth >= inline_insns_single (e->caller, true)
+              && growth >= inline_insns_single (e->caller, apply_hints,
+                                                apply_hints2)
+              && (apply_hints || apply_hints2
+                  || growth >= inline_insns_single (e->caller, true,
+                                                    apply_hints2)
                   || !big_speedup_p (e)))
        {
-         if (opt_for_fn (e->caller->decl, optimize) >= 3)
-            e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
-         else
-            e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_O2_LIMIT;
+          e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
          want_inline = false;
        }
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
               && !opt_for_fn (e->caller->decl, flag_inline_functions)
-              && growth >= param_max_inline_insns_small)
+              && growth >= opt_for_fn (to->decl,
+                                       param_max_inline_insns_small))
        {
          /* growth_positive_p is expensive, always test it last.  */
-          if (growth >= inline_insns_single (e->caller, false)
+         if (growth >= inline_insns_single (e->caller, false, false)
              || growth_positive_p (callee, e, growth))
            {
               e->inline_failed = CIF_NOT_DECLARED_INLINED;
@@ -923,13 +937,15 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
       /* Apply param_max_inline_insns_auto limit for functions not declared
         inline.  Bypass the limit when speedup seems big.  */
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
-              && growth >= inline_insns_auto (e->caller, apply_hints)
-              && (apply_hints
-                  || growth >= inline_insns_auto (e->caller, true)
+              && growth >= inline_insns_auto (e->caller, apply_hints,
+                                              apply_hints2)
+              && (apply_hints || apply_hints2
+                  || growth >= inline_insns_auto (e->caller, true,
+                                                  apply_hints2)
                   || !big_speedup_p (e)))
        {
          /* growth_positive_p is expensive, always test it last.  */
-          if (growth >= inline_insns_single (e->caller, false)
+         if (growth >= inline_insns_single (e->caller, false, false)
              || growth_positive_p (callee, e, growth))
            {
              e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
@@ -938,7 +954,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
        }
       /* If call is cold, do not inline when function body would grow. */
       else if (!e->maybe_hot_p ()
-              && (growth >= inline_insns_single (e->caller, false)
+              && (growth >= inline_insns_single (e->caller, false, false)
                   || growth_positive_p (callee, e, growth)))
        {
           e->inline_failed = CIF_UNLIKELY_CALL;
@@ -951,7 +967,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report)
 }
 
 /* EDGE is self recursive edge.
-   We hand two cases - when function A is inlining into itself
+   We handle two cases - when function A is inlining into itself
    or when function A is being inlined into another inliner copy of function
    A within function B.  
 
@@ -970,10 +986,12 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
   char const *reason = NULL;
   bool want_inline = true;
   sreal caller_freq = 1;
-  int max_depth = param_max_inline_recursive_depth_auto;
+  int max_depth = opt_for_fn (outer_node->decl,
+                             param_max_inline_recursive_depth_auto);
 
   if (DECL_DECLARED_INLINE_P (edge->caller->decl))
-    max_depth = param_max_inline_recursive_depth;
+    max_depth = opt_for_fn (outer_node->decl,
+                           param_max_inline_recursive_depth);
 
   if (!edge->maybe_hot_p ())
     {
@@ -1035,7 +1053,8 @@ want_inline_self_recursive_call_p (struct cgraph_edge *edge,
     {
       if (edge->sreal_frequency () * 100
           <= caller_freq
-            * param_min_inline_recursive_probability)
+            * opt_for_fn (outer_node->decl,
+                          param_min_inline_recursive_probability))
        {
          reason = "frequency of recursive call is too small";
          want_inline = false;
@@ -1119,8 +1138,8 @@ static bool
 wrapper_heuristics_may_apply (struct cgraph_node *where, int size)
 {
   return size < (DECL_DECLARED_INLINE_P (where->decl)
-                ? inline_insns_single (where, false)
-                : inline_insns_auto (where, false));
+                ? inline_insns_single (where, false, false)
+                : inline_insns_auto (where, false, false));
 }
 
 /* A cost model driving the inlining heuristics in a way so the edges with
@@ -1241,7 +1260,9 @@ edge_badness (struct cgraph_edge *edge, bool dump)
              /* ... or when early optimizers decided to split and edge
                 frequency still indicates splitting is a win ... */
              || (callee->split_part && !caller->split_part
-                 && freq * 100 < param_partial_inlining_entry_probability
+                 && freq * 100
+                        < opt_for_fn (caller->decl,
+                                      param_partial_inlining_entry_probability)
                  /* ... and do not overwrite user specified hints.   */
                  && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
                      || DECL_DECLARED_INLINE_P (caller->decl)))))
@@ -1323,6 +1344,8 @@ edge_badness (struct cgraph_edge *edge, bool dump)
                | INLINE_HINT_loop_stride))
       || callee_info->growth <= 0)
     badness = badness.shift (badness > 0 ? -2 : 2);
+  if (hints & INLINE_HINT_builtin_constant_p)
+    badness = badness.shift (badness > 0 ? -4 : 4);
   if (hints & (INLINE_HINT_same_scc))
     badness = badness.shift (badness > 0 ? 3 : -3);
   else if (hints & (INLINE_HINT_in_scc))
@@ -1599,7 +1622,10 @@ static bool
 recursive_inlining (struct cgraph_edge *edge,
                    vec<cgraph_edge *> *new_edges)
 {
-  int limit = param_max_inline_insns_recursive_auto;
+  cgraph_node *to  = (edge->caller->inlined_to
+                     ? edge->caller->inlined_to : edge->caller);
+  int limit = opt_for_fn (to->decl,
+                         param_max_inline_insns_recursive_auto);
   edge_heap_t heap (sreal::min ());
   struct cgraph_node *node;
   struct cgraph_edge *e;
@@ -1612,7 +1638,7 @@ recursive_inlining (struct cgraph_edge *edge,
     node = node->inlined_to;
 
   if (DECL_DECLARED_INLINE_P (node->decl))
-    limit = param_max_inline_insns_recursive;
+    limit = opt_for_fn (to->decl, param_max_inline_insns_recursive);
 
   /* Make sure that function is small enough to be considered for inlining.  */
   if (estimate_size_after_inlining (node, edge)  >= limit)
@@ -1623,8 +1649,7 @@ recursive_inlining (struct cgraph_edge *edge,
 
   if (dump_file)
     fprintf (dump_file,
-            "  Performing recursive inlining on %s\n",
-            node->name ());
+            "  Performing recursive inlining on %s\n", node->dump_name ());
 
   /* Do the inlining and update list of recursive call during process.  */
   while (!heap.empty ())
@@ -1734,22 +1759,22 @@ recursive_inlining (struct cgraph_edge *edge,
 /* Given whole compilation unit estimate of INSNS, compute how large we can
    allow the unit to grow.  */
 
-static int
-compute_max_insns (int insns)
+static int64_t
+compute_max_insns (cgraph_node *node, int insns)
 {
   int max_insns = insns;
-  if (max_insns < param_large_unit_insns)
-    max_insns = param_large_unit_insns;
+  if (max_insns < opt_for_fn (node->decl, param_large_unit_insns))
+    max_insns = opt_for_fn (node->decl, param_large_unit_insns);
 
   return ((int64_t) max_insns
-         * (100 + param_inline_unit_growth) / 100);
+         * (100 + opt_for_fn (node->decl, param_inline_unit_growth)) / 100);
 }
 
 
 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP.  */
 
 static void
-add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
+add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> &new_edges)
 {
   while (new_edges.length () > 0)
     {
@@ -1791,8 +1816,6 @@ speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
   enum availability avail;
   struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
                                                                 e->caller);
-  struct cgraph_edge *direct, *indirect;
-  struct ipa_ref *ref;
 
   gcc_assert (e->speculative && !e->indirect_unknown_callee);
 
@@ -1807,14 +1830,14 @@ speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
       int ecf_flags = flags_from_decl_or_type (target->decl);
       if (ecf_flags & ECF_CONST)
         {
-         e->speculative_call_info (direct, indirect, ref);
-         if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
+         if (!(e->speculative_call_indirect_edge ()->indirect_info
+               ->ecf_flags & ECF_CONST))
            return true;
         }
       else if (ecf_flags & ECF_PURE)
         {
-         e->speculative_call_info (direct, indirect, ref);
-         if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
+         if (!(e->speculative_call_indirect_edge ()->indirect_info
+               ->ecf_flags & ECF_PURE))
            return true;
         }
     }
@@ -1847,7 +1870,7 @@ resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
 
       if (edge->count.ipa ().initialized_p ())
         spec_rem += edge->count.ipa ();
-      edge->resolve_speculation ();
+      cgraph_edge::resolve_speculation (edge);
       reset_edge_caches (where);
       ipa_update_overall_fn_summary (where);
       update_caller_keys (edge_heap, where,
@@ -1906,7 +1929,7 @@ inline_small_functions (void)
   struct cgraph_edge *edge;
   edge_heap_t edge_heap (sreal::min ());
   auto_bitmap updated_nodes;
-  int min_size, max_size;
+  int min_size;
   auto_vec<cgraph_edge *> new_indirect_edges;
   int initial_size = 0;
   struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
@@ -1927,7 +1950,7 @@ inline_small_functions (void)
     if (!node->inlined_to)
       {
        if (!node->alias && node->analyzed
-           && (node->has_gimple_body_p () || node->thunk.thunk_p)
+           && (node->has_gimple_body_p () || node->thunk)
            && opt_for_fn (node->decl, optimize))
          {
            class ipa_fn_summary *info = ipa_fn_summaries->get (node);
@@ -1973,7 +1996,6 @@ inline_small_functions (void)
             initial_size);
 
   overall_size = initial_size;
-  max_size = compute_max_insns (overall_size);
   min_size = overall_size;
 
   /* Populate the heap with all edges we might inline.  */
@@ -1990,9 +2012,8 @@ inline_small_functions (void)
       if (dump_file)
        fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
 
-      for (edge = node->callees; edge; edge = next)
+      for (edge = node->callees; edge; edge = edge->next_callee)
        {
-         next = edge->next_callee;
          if (edge->inline_failed
              && !edge->aux
              && can_inline_edge_p (edge, true)
@@ -2013,7 +2034,7 @@ inline_small_functions (void)
            if (edge->speculative
                && !speculation_useful_p (edge, edge->aux != NULL))
              {
-               edge->resolve_speculation ();
+               cgraph_edge::resolve_speculation (edge);
                update = true;
              }
          }
@@ -2141,7 +2162,9 @@ inline_small_functions (void)
            edge_badness (edge, true);
        }
 
-      if (overall_size + growth > max_size
+      where = edge->caller;
+
+      if (overall_size + growth > compute_max_insns (where, min_size)
          && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
        {
          edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
@@ -2164,7 +2187,6 @@ inline_small_functions (void)
         specific inliner.  */
       if (edge->recursive_p ())
        {
-         where = edge->caller;
          if (where->inlined_to)
            where = where->inlined_to;
          if (!recursive_inlining (edge,
@@ -2269,16 +2291,16 @@ inline_small_functions (void)
 
          dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, edge->call_stmt,
                           " Inlined %C into %C which now has time %f and "
-                          "size %i, net change of %s.\n",
+                          "size %i, net change of %s%s.\n",
                           edge->callee, edge->caller,
                           s->time.to_double (),
                           ipa_size_summaries->get (edge->caller)->size,
-                          buf_net_change);
+                          buf_net_change,
+                          cross_module_call_p (edge) ? " (cross module)":"");
        }
       if (min_size > overall_size)
        {
          min_size = overall_size;
-         max_size = compute_max_insns (min_size);
 
          if (dump_file)
            fprintf (dump_file, "New minimal size reached: %i\n", min_size);
@@ -2409,11 +2431,11 @@ inline_to_all_callers_1 (struct cgraph_node *node, void *data,
          cgraph_node *ultimate = node->ultimate_alias_target ();
          fprintf (dump_file,
                   "\nInlining %s size %i.\n",
-                  ultimate->name (),
+                  ultimate->dump_name (),
                   ipa_size_summaries->get (ultimate)->size);
          fprintf (dump_file,
                   " Called once from %s %i insns.\n",
-                  node->callers->caller->name (),
+                  node->callers->caller->dump_name (),
                   ipa_size_summaries->get (node->callers->caller)->size);
        }
 
@@ -2424,7 +2446,7 @@ inline_to_all_callers_1 (struct cgraph_node *node, void *data,
       if (dump_file)
        fprintf (dump_file,
                 " Inlined into %s which now has %i size\n",
-                caller->name (),
+                caller->dump_name (),
                 ipa_size_summaries->get (caller)->size);
       if (!(*num_calls)--)
        {
@@ -2650,6 +2672,9 @@ ipa_inline (void)
     {
       node = order[i];
       if (node->definition
+         /* Do not try to flatten aliases.  These may happen for example when
+            creating local aliases.  */
+         && !node->alias
          && lookup_attribute ("flatten",
                               DECL_ATTRIBUTES (node->decl)) != NULL)
        order[j--] = order[i];
@@ -2684,7 +2709,7 @@ ipa_inline (void)
         try to flatten itself turning it into a self-recursive
         function.  */
       if (dump_file)
-       fprintf (dump_file, "Flattening %s\n", node->name ());
+       fprintf (dump_file, "Flattening %s\n", node->dump_name ());
       flatten_function (node, false, true);
     }
 
@@ -2749,7 +2774,7 @@ ipa_inline (void)
                {
                  if (edge->count.ipa ().initialized_p ())
                    spec_rem += edge->count.ipa ();
-                 edge->resolve_speculation ();
+                 cgraph_edge::resolve_speculation (edge);
                  update = true;
                  remove_functions = true;
                }
@@ -2774,9 +2799,6 @@ ipa_inline (void)
        }
     }
 
-  /* Free ipa-prop structures if they are no longer needed.  */
-  ipa_free_all_structures_after_iinln ();
-
   if (dump_enabled_p ())
     dump_printf (MSG_NOTE,
                 "\nInlined %i calls, eliminated %i functions\n\n",
@@ -2980,7 +3002,8 @@ early_inliner (function *fun)
        }
       /* We iterate incremental inlining to get trivial cases of indirect
         inlining.  */
-      while (iterations < param_early_inliner_max_iterations
+      while (iterations < opt_for_fn (node->decl,
+                                     param_early_inliner_max_iterations)
             && early_inline_small_functions (node))
        {
          timevar_push (TV_INTEGRATION);
@@ -2999,7 +3022,8 @@ early_inliner (function *fun)
              es->call_stmt_time
                = estimate_num_insns (edge->call_stmt, &eni_time_weights);
            }
-         if (iterations < param_early_inliner_max_iterations - 1)
+         if (iterations < opt_for_fn (node->decl,
+                                      param_early_inliner_max_iterations) - 1)
            ipa_update_overall_fn_summary (node);
          timevar_pop (TV_INTEGRATION);
          iterations++;