]> git.ipfire.org Git - thirdparty/gcc.git/blobdiff - gcc/ipa-fnsummary.c
Update copyright years.
[thirdparty/gcc.git] / gcc / ipa-fnsummary.c
index 7b8182766eed7850f15253913d1fb739e8bfa6d0..fa01cb6c0836f59a7fd2250d84e5858087eae2ec 100644 (file)
@@ -1,5 +1,5 @@
 /* Function summary pass.
-   Copyright (C) 2003-2019 Free Software Foundation, Inc.
+   Copyright (C) 2003-2020 Free Software Foundation, Inc.
    Contributed by Jan Hubicka
 
 This file is part of GCC.
@@ -67,7 +67,6 @@ along with GCC; see the file COPYING3.  If not see
 #include "print-tree.h"
 #include "tree-inline.h"
 #include "gimple-pretty-print.h"
-#include "params.h"
 #include "cfganal.h"
 #include "gimple-iterator.h"
 #include "tree-cfg.h"
@@ -83,6 +82,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "gimplify.h"
 #include "stringpool.h"
 #include "attribs.h"
+#include "tree-into-ssa.h"
 
 /* Summaries.  */
 fast_function_summary <ipa_fn_summary *, va_gc> *ipa_fn_summaries;
@@ -146,18 +146,23 @@ ipa_dump_hints (FILE *f, ipa_hints hints)
 
 /* Record SIZE and TIME to SUMMARY.
    The accounted code will be executed when EXEC_PRED is true.
-   When NONCONST_PRED is false the code will evaulate to constant and
-   will get optimized out in specialized clones of the function.   */
+   When NONCONST_PRED is false the code will evaluate to constant and
+   will get optimized out in specialized clones of the function.
+   If CALL is true account to call_size_time_table rather than
+   size_time_table.   */
 
 void
 ipa_fn_summary::account_size_time (int size, sreal time,
                                   const predicate &exec_pred,
-                                  const predicate &nonconst_pred_in)
+                                  const predicate &nonconst_pred_in,
+                                  bool call)
 {
   size_time_entry *e;
   bool found = false;
   int i;
   predicate nonconst_pred;
+  vec<size_time_entry, va_gc> *table = call
+                                      ? call_size_time_table : size_time_table;
 
   if (exec_pred == false)
     return;
@@ -167,25 +172,26 @@ ipa_fn_summary::account_size_time (int size, sreal time,
   if (nonconst_pred == false)
     return;
 
-  /* We need to create initial empty unconitional clause, but otherwie
+  /* We need to create initial empty unconditional clause, but otherwise
      we don't need to account empty times and sizes.  */
-  if (!size && time == 0 && size_time_table)
+  if (!size && time == 0 && table)
     return;
 
-  gcc_assert (time >= 0);
+  /* Only for calls we are unaccounting what we previously recorded.  */
+  gcc_checking_assert (time >= 0 || call);
 
-  for (i = 0; vec_safe_iterate (size_time_table, i, &e); i++)
+  for (i = 0; vec_safe_iterate (table, i, &e); i++)
     if (e->exec_predicate == exec_pred
        && e->nonconst_predicate == nonconst_pred)
       {
        found = true;
        break;
       }
-  if (i == 256)
+  if (i == max_size_time_table_size)
     {
       i = 0;
       found = true;
-      e = &(*size_time_table)[0];
+      e = &(*table)[0];
       if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
                 "\t\tReached limit on number of entries, "
@@ -213,16 +219,23 @@ ipa_fn_summary::account_size_time (int size, sreal time,
       new_entry.time = time;
       new_entry.exec_predicate = exec_pred;
       new_entry.nonconst_predicate = nonconst_pred;
-      vec_safe_push (size_time_table, new_entry);
+      if (call)
+        vec_safe_push (call_size_time_table, new_entry);
+      else
+        vec_safe_push (size_time_table, new_entry);
     }
   else
     {
       e->size += size;
       e->time += time;
+      /* FIXME: PR bootstrap/92653 gcc_checking_assert (e->time >= -1); */
+      /* Tolerate small roundoff issues.  */
+      if (e->time < 0)
+       e->time = 0;
     }
 }
 
-/* We proved E to be unreachable, redirect it to __bultin_unreachable.  */
+/* We proved E to be unreachable, redirect it to __builtin_unreachable.  */
 
 static struct cgraph_edge *
 redirect_to_unreachable (struct cgraph_edge *e)
@@ -297,9 +310,9 @@ set_hint_predicate (predicate **p, predicate new_predicate)
 }
 
 
-/* Compute what conditions may or may not hold given invormation about
+/* Compute what conditions may or may not hold given information about
    parameters.  RET_CLAUSE returns truths that may hold in a specialized copy,
-   whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
+   while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
    copy when called in a given context.  It is a bitmask of conditions. Bit
    0 means that condition is known to be false, while bit 1 means that condition
    may or may not be true.  These differs - for example NOT_INLINED condition
@@ -307,9 +320,9 @@ set_hint_predicate (predicate **p, predicate new_predicate)
    the fact that parameter is indeed a constant.
 
    KNOWN_VALS is partial mapping of parameters of NODE to constant values.
-   KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
-   Return clause of possible truths. When INLINE_P is true, assume that we are
-   inlining.
+   KNOWN_AGGS is a vector of aggregate known offset/value set for each
+   parameter.  Return clause of possible truths.  When INLINE_P is true, assume
+   that we are inlining.
 
    ERROR_MARK means compile time invariant.  */
 
@@ -317,8 +330,8 @@ static void
 evaluate_conditions_for_known_args (struct cgraph_node *node,
                                    bool inline_p,
                                    vec<tree> known_vals,
-                                   vec<ipa_agg_jump_function_p>
-                                   known_aggs,
+                                   vec<value_range> known_value_ranges,
+                                   vec<ipa_agg_value_set> known_aggs,
                                    clause_t *ret_clause,
                                    clause_t *ret_nonspec_clause)
 {
@@ -330,7 +343,7 @@ evaluate_conditions_for_known_args (struct cgraph_node *node,
 
   for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
     {
-      tree val;
+      tree val = NULL;
       tree res;
       int j;
       struct expr_eval_op *op;
@@ -339,41 +352,42 @@ evaluate_conditions_for_known_args (struct cgraph_node *node,
          (especially for K&R style programs).  So bound check here (we assume
          known_aggs vector, if non-NULL, has the same length as
          known_vals).  */
-      gcc_checking_assert (!known_aggs.exists ()
+      gcc_checking_assert (!known_aggs.length () || !known_vals.length ()
                           || (known_vals.length () == known_aggs.length ()));
-      if (c->operand_num >= (int) known_vals.length ())
-       {
-         clause |= 1 << (i + predicate::first_dynamic_condition);
-         nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
-         continue;
-       }
 
       if (c->agg_contents)
        {
-         struct ipa_agg_jump_function *agg;
+         struct ipa_agg_value_set *agg;
 
          if (c->code == predicate::changed
              && !c->by_ref
+             && c->operand_num < (int)known_vals.length ()
              && (known_vals[c->operand_num] == error_mark_node))
            continue;
 
-         if (known_aggs.exists ())
+         if (c->operand_num < (int)known_aggs.length ())
            {
-             agg = known_aggs[c->operand_num];
-             val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
+             agg = &known_aggs[c->operand_num];
+             val = ipa_find_agg_cst_for_param (agg,
+                                               c->operand_num
+                                                  < (int) known_vals.length ()
+                                               ? known_vals[c->operand_num]
+                                               : NULL,
                                                c->offset, c->by_ref);
            }
          else
            val = NULL_TREE;
        }
-      else
+      else if (c->operand_num < (int) known_vals.length ())
        {
          val = known_vals[c->operand_num];
          if (val == error_mark_node && c->code != predicate::changed)
            val = NULL_TREE;
        }
 
-      if (!val)
+      if (!val
+         && (c->code == predicate::changed
+             || c->code == predicate::is_not_constant))
        {
          clause |= 1 << (i + predicate::first_dynamic_condition);
          nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
@@ -385,48 +399,101 @@ evaluate_conditions_for_known_args (struct cgraph_node *node,
          continue;
        }
 
-      if (TYPE_SIZE (c->type) != TYPE_SIZE (TREE_TYPE (val)))
-       {
-         clause |= 1 << (i + predicate::first_dynamic_condition);
-         nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
-         continue;
-       }
       if (c->code == predicate::is_not_constant)
        {
          nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
          continue;
        }
 
-      val = fold_unary (VIEW_CONVERT_EXPR, c->type, val);
-      for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
+      if (val && TYPE_SIZE (c->type) == TYPE_SIZE (TREE_TYPE (val)))
        {
-         if (!val)
-           break;
-         if (!op->val[0])
-           val = fold_unary (op->code, op->type, val);
-         else if (!op->val[1])
-           val = fold_binary (op->code, op->type,
-                              op->index ? op->val[0] : val,
-                              op->index ? val : op->val[0]);
-         else if (op->index == 0)
-           val = fold_ternary (op->code, op->type,
-                               val, op->val[0], op->val[1]);
-         else if (op->index == 1)
-           val = fold_ternary (op->code, op->type,
-                               op->val[0], val, op->val[1]);
-         else if (op->index == 2)
-           val = fold_ternary (op->code, op->type,
-                               op->val[0], op->val[1], val);
-         else
-           val = NULL_TREE;
+         if (c->type != TREE_TYPE (val))
+           val = fold_unary (VIEW_CONVERT_EXPR, c->type, val);
+         for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
+           {
+             if (!val)
+               break;
+             if (!op->val[0])
+               val = fold_unary (op->code, op->type, val);
+             else if (!op->val[1])
+               val = fold_binary (op->code, op->type,
+                                  op->index ? op->val[0] : val,
+                                  op->index ? val : op->val[0]);
+             else if (op->index == 0)
+               val = fold_ternary (op->code, op->type,
+                                   val, op->val[0], op->val[1]);
+             else if (op->index == 1)
+               val = fold_ternary (op->code, op->type,
+                                   op->val[0], val, op->val[1]);
+             else if (op->index == 2)
+               val = fold_ternary (op->code, op->type,
+                                   op->val[0], op->val[1], val);
+             else
+               val = NULL_TREE;
+           }
+
+         res = val
+           ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
+           : NULL;
+
+         if (res && integer_zerop (res))
+           continue;
+         if (res && integer_onep (res))
+           {
+             clause |= 1 << (i + predicate::first_dynamic_condition);
+             nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
+             continue;
+           }
        }
+      if (c->operand_num < (int) known_value_ranges.length ()
+         && !c->agg_contents
+         && !known_value_ranges[c->operand_num].undefined_p ()
+         && !known_value_ranges[c->operand_num].varying_p ()
+         && TYPE_SIZE (c->type)
+                == TYPE_SIZE (known_value_ranges[c->operand_num].type ())
+         && (!val || TREE_CODE (val) != INTEGER_CST))
+       {
+         value_range vr = known_value_ranges[c->operand_num];
+         if (!useless_type_conversion_p (c->type, vr.type ()))
+           {
+             value_range res;
+             range_fold_unary_expr (&res, NOP_EXPR,
+                                    c->type, &vr, vr.type ());
+             vr = res;
+           }
+         tree type = c->type;
 
-      res = val
-       ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
-       : NULL;
+         for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
+           {
+             if (vr.varying_p () || vr.undefined_p ())
+               break;
 
-      if (res && integer_zerop (res))
-       continue;
+             value_range res;
+             if (!op->val[0])
+               range_fold_unary_expr (&res, op->code, op->type, &vr, type);
+             else if (!op->val[1])
+               {
+                 value_range op0 (op->val[0], op->val[0]);
+                 range_fold_binary_expr (&res, op->code, op->type,
+                                         op->index ? &op0 : &vr,
+                                         op->index ? &vr : &op0);
+               }
+             else
+               gcc_unreachable ();
+             type = op->type;
+             vr = res;
+           }
+         if (!vr.varying_p () && !vr.undefined_p ())
+           {
+             value_range res;
+             value_range val_vr (c->val, c->val);
+             range_fold_binary_expr (&res, c->code, boolean_type_node,
+                                     &vr,
+                                     &val_vr);
+             if (res.zero_p ())
+               continue;
+           }
+       }
 
       clause |= 1 << (i + predicate::first_dynamic_condition);
       nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
@@ -437,7 +504,18 @@ evaluate_conditions_for_known_args (struct cgraph_node *node,
 }
 
 
-/* Work out what conditions might be true at invocation of E.  */
+/* Work out what conditions might be true at invocation of E.
+   Compute costs for inlined edge if INLINE_P is true.
+
+   Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR
+   (if non-NULL) conditions evaluated for nonspecialized clone called
+   in a given context.
+
+   KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by
+   known constant and aggregate values of parameters.
+
+   KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts
+   of parameter used by a polymorphic call.  */
 
 void
 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
@@ -446,104 +524,145 @@ evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
                              vec<tree> *known_vals_ptr,
                              vec<ipa_polymorphic_call_context>
                              *known_contexts_ptr,
-                             vec<ipa_agg_jump_function_p> *known_aggs_ptr)
+                             vec<ipa_agg_value_set> *known_aggs_ptr)
 {
   struct cgraph_node *callee = e->callee->ultimate_alias_target ();
   class ipa_fn_summary *info = ipa_fn_summaries->get (callee);
-  vec<tree> known_vals = vNULL;
-  vec<ipa_agg_jump_function_p> known_aggs = vNULL;
+  auto_vec<value_range, 32> known_value_ranges;
   class ipa_edge_args *args;
 
   if (clause_ptr)
     *clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition;
-  if (known_vals_ptr)
-    known_vals_ptr->create (0);
-  if (known_contexts_ptr)
-    known_contexts_ptr->create (0);
 
   if (ipa_node_params_sum
       && !e->call_stmt_cannot_inline_p
-      && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr)
+      && (info->conds || known_contexts_ptr)
       && (args = IPA_EDGE_REF (e)) != NULL)
     {
-      class ipa_node_params *caller_parms_info, *callee_pi;
+      struct cgraph_node *caller;
+      class ipa_node_params *caller_parms_info, *callee_pi = NULL;
       class ipa_call_summary *es = ipa_call_summaries->get (e);
       int i, count = ipa_get_cs_argument_count (args);
 
-      if (e->caller->inlined_to)
-       caller_parms_info = IPA_NODE_REF (e->caller->inlined_to);
-      else
-       caller_parms_info = IPA_NODE_REF (e->caller);
-      callee_pi = IPA_NODE_REF (callee);
+      if (count)
+       {
+         if (e->caller->inlined_to)
+           caller = e->caller->inlined_to;
+         else
+           caller = e->caller;
+         caller_parms_info = IPA_NODE_REF (caller);
+          callee_pi = IPA_NODE_REF (callee);
+
+         /* Watch for thunks.  */
+         if (callee_pi)
+           /* Watch for variadic functions.  */
+           count = MIN (count, ipa_get_param_count (callee_pi));
+       }
 
-      if (count && (info->conds || known_vals_ptr))
-       known_vals.safe_grow_cleared (count);
-      if (count && (info->conds || known_aggs_ptr))
-       known_aggs.safe_grow_cleared (count);
-      if (count && known_contexts_ptr)
-       known_contexts_ptr->safe_grow_cleared (count);
+      if (callee_pi)
+       for (i = 0; i < count; i++)
+         {
+           struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
 
-      for (i = 0; i < count; i++)
-       {
-         struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
-         tree cst = ipa_value_from_jfunc (caller_parms_info, jf,
-                                          ipa_get_type (callee_pi, i));
+           if (ipa_is_param_used_by_indirect_call (callee_pi, i)
+               || ipa_is_param_used_by_ipa_predicates (callee_pi, i))
+             {
+               /* Determine if we know constant value of the parameter.  */
+               tree cst = ipa_value_from_jfunc (caller_parms_info, jf,
+                                                ipa_get_type (callee_pi, i));
 
-         if (!cst && e->call_stmt
-             && i < (int)gimple_call_num_args (e->call_stmt))
-           {
-             cst = gimple_call_arg (e->call_stmt, i);
-             if (!is_gimple_min_invariant (cst))
-               cst = NULL;
-           }
-         if (cst)
-           {
-             gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
-             if (known_vals.exists ())
-               known_vals[i] = cst;
-           }
-         else if (inline_p && !es->param[i].change_prob)
-           known_vals[i] = error_mark_node;
-
-         if (known_contexts_ptr)
-           (*known_contexts_ptr)[i]
-             = ipa_context_from_jfunc (caller_parms_info, e, i, jf);
-         /* TODO: When IPA-CP starts propagating and merging aggregate jump
-            functions, use its knowledge of the caller too, just like the
-            scalar case above.  */
-         known_aggs[i] = &jf->agg;
-       }
+               if (!cst && e->call_stmt
+                   && i < (int)gimple_call_num_args (e->call_stmt))
+                 {
+                   cst = gimple_call_arg (e->call_stmt, i);
+                   if (!is_gimple_min_invariant (cst))
+                     cst = NULL;
+                 }
+               if (cst)
+                 {
+                   gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
+                   if (!known_vals_ptr->length ())
+                     vec_safe_grow_cleared (known_vals_ptr, count);
+                   (*known_vals_ptr)[i] = cst;
+                 }
+               else if (inline_p && !es->param[i].change_prob)
+                 {
+                   if (!known_vals_ptr->length ())
+                     vec_safe_grow_cleared (known_vals_ptr, count);
+                   (*known_vals_ptr)[i] = error_mark_node;
+                 }
+
+               /* If we failed to get simple constant, try value range.  */
+               if ((!cst || TREE_CODE (cst) != INTEGER_CST)
+                   && ipa_is_param_used_by_ipa_predicates (callee_pi, i))
+                 {
+                   value_range vr 
+                      = ipa_value_range_from_jfunc (caller_parms_info, e, jf,
+                                                    ipa_get_type (callee_pi,
+                                                                  i));
+                   if (!vr.undefined_p () && !vr.varying_p ())
+                     {
+                       if (!known_value_ranges.length ())
+                         known_value_ranges.safe_grow_cleared (count);
+                       known_value_ranges[i] = vr;
+                     }
+                 }
+
+               /* Determine known aggregate values.  */
+               ipa_agg_value_set agg
+                   = ipa_agg_value_set_from_jfunc (caller_parms_info,
+                                                   caller, &jf->agg);
+               if (agg.items.length ())
+                 {
+                   if (!known_aggs_ptr->length ())
+                     vec_safe_grow_cleared (known_aggs_ptr, count);
+                   (*known_aggs_ptr)[i] = agg;
+                 }
+             }
+
+           /* For calls used in polymorphic calls we further determine
+              polymorphic call context.  */
+           if (known_contexts_ptr
+               && ipa_is_param_used_by_polymorphic_call (callee_pi, i))
+             {
+               ipa_polymorphic_call_context
+                  ctx = ipa_context_from_jfunc (caller_parms_info, e, i, jf);
+               if (!ctx.useless_p ())
+                 {
+                   if (!known_contexts_ptr->length ())
+                     known_contexts_ptr->safe_grow_cleared (count);
+                   (*known_contexts_ptr)[i]
+                     = ipa_context_from_jfunc (caller_parms_info, e, i, jf);
+                 }
+              }
+         }
+       else
+         gcc_assert (!count || callee->thunk.thunk_p);
     }
-  else if (e->call_stmt && !e->call_stmt_cannot_inline_p
-          && ((clause_ptr && info->conds) || known_vals_ptr))
+  else if (e->call_stmt && !e->call_stmt_cannot_inline_p && info->conds)
     {
       int i, count = (int)gimple_call_num_args (e->call_stmt);
 
-      if (count && (info->conds || known_vals_ptr))
-       known_vals.safe_grow_cleared (count);
       for (i = 0; i < count; i++)
        {
          tree cst = gimple_call_arg (e->call_stmt, i);
          if (!is_gimple_min_invariant (cst))
            cst = NULL;
          if (cst)
-           known_vals[i] = cst;
+           {
+             if (!known_vals_ptr->length ())
+               vec_safe_grow_cleared (known_vals_ptr, count);
+             (*known_vals_ptr)[i] = cst;
+           }
        }
     }
 
   evaluate_conditions_for_known_args (callee, inline_p,
-                                     known_vals, known_aggs, clause_ptr,
+                                     *known_vals_ptr,
+                                     known_value_ranges,
+                                     *known_aggs_ptr,
+                                     clause_ptr,
                                      nonspec_clause_ptr);
-
-  if (known_vals_ptr)
-    *known_vals_ptr = known_vals;
-  else
-    known_vals.release ();
-
-  if (known_aggs_ptr)
-    *known_aggs_ptr = known_aggs;
-  else
-    known_aggs.release ();
 }
 
 
@@ -553,8 +672,7 @@ static void
 ipa_fn_summary_alloc (void)
 {
   gcc_checking_assert (!ipa_fn_summaries);
-  ipa_size_summaries = new fast_function_summary <ipa_size_summary *, va_heap>
-                                                        (symtab);
+  ipa_size_summaries = new ipa_size_summary_t (symtab);
   ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab);
   ipa_call_summaries = new ipa_call_summary_t (symtab);
 }
@@ -575,6 +693,7 @@ ipa_fn_summary::~ipa_fn_summary ()
     edge_predicate_pool.remove (loop_stride);
   vec_free (conds);
   vec_free (size_time_table);
+  vec_free (call_size_time_table);
 }
 
 void
@@ -655,6 +774,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
       evaluate_conditions_for_known_args (dst, false,
                                          known_vals,
                                          vNULL,
+                                         vNULL,
                                          &possible_truths,
                                          /* We are going to specialize,
                                             so ignore nonspec truths.  */
@@ -664,7 +784,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
       info->account_size_time (0, 0, true_pred, true_pred);
 
       /* Remap size_time vectors.
-         Simplify the predicate by prunning out alternatives that are known
+         Simplify the predicate by pruning out alternatives that are known
          to be false.
          TODO: as on optimization, we can also eliminate conditions known
          to be true.  */
@@ -702,7 +822,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
          edge_set_predicate (edge, &new_predicate);
        }
 
-      /* Remap indirect edge predicates with the same simplificaiton as above. 
+      /* Remap indirect edge predicates with the same simplification as above.
          Also copy constantness arrays.   */
       for (edge = dst->indirect_calls; edge; edge = next)
        {
@@ -727,7 +847,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
       /* If inliner or someone after inliner will ever start producing
          non-trivial clones, we will get trouble with lack of information
          about updating self sizes, because size vectors already contains
-         sizes of the calees.  */
+         sizes of the callees.  */
       gcc_assert (!inlined_to_p || !optimized_out_size);
     }
   else
@@ -793,6 +913,9 @@ dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
               ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
               indent, "", edge->sreal_frequency ().to_double ());
 
+      if (cross_module_call_p (edge))
+       fprintf (f, " cross module");
+
       if (es)
        fprintf (f, " loop depth:%2i size:%2i time: %2i",
                 es->loop_depth, es->call_stmt_size, es->call_stmt_time);
@@ -1082,7 +1205,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
 
       /* Casts of parameters, loads from parameters passed by reference
          and stores to return value or parameters are often free after
-         inlining dua to SRA and further combining.
+         inlining due to SRA and further combining.
          Assume that half of statements goes away.  */
       if (CONVERT_EXPR_CODE_P (rhs_code)
          || rhs_code == VIEW_CONVERT_EXPR
@@ -1136,12 +1259,12 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
            lhs_free = true;
 
          /* Writes to parameters, parameters passed by value and return value
-            (either dirrectly or passed via invisible reference) are free.  
+            (either directly or passed via invisible reference) are free.  
 
             TODO: We ought to handle testcase like
             struct a {int a,b;};
             struct a
-            retrurnsturct (void)
+            returnstruct (void)
             {
             struct a a ={1,2};
             return a;
@@ -1149,7 +1272,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
 
             This translate into:
 
-            retrurnsturct ()
+            returnstruct ()
             {
             int a$b;
             int a$a;
@@ -1201,7 +1324,7 @@ decompose_param_expr (struct ipa_func_body_info *fbi,
                      struct agg_position_info *aggpos,
                      expr_eval_ops *param_ops_p = NULL)
 {
-  int op_limit = PARAM_VALUE (PARAM_IPA_MAX_PARAM_EXPR_OPS);
+  int op_limit = param_ipa_max_param_expr_ops;
   int op_count = 0;
 
   if (param_ops_p)
@@ -1347,7 +1470,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
          enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
                                      ? code : inverted_code);
          /* invert_tree_comparison will return ERROR_MARK on FP
-            comparsions that are not EQ/NE instead of returning proper
+            comparisons that are not EQ/NE instead of returning proper
             unordered one.  Be sure it is not confused with NON_CONSTANT.
 
             And if the edge's target is the final block of diamond CFG graph
@@ -1378,7 +1501,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
      Here we can predicate nonconstant_code.  We can't
      really handle constant_code since we have no predicate
      for this and also the constant code is not known to be
-     optimized away when inliner doen't see operand is constant.
+     optimized away when inliner doesn't see operand is constant.
      Other optimizers might think otherwise.  */
   if (gimple_cond_code (last) != NE_EXPR
       || !integer_zerop (gimple_cond_rhs (last)))
@@ -1432,7 +1555,7 @@ set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
 
   auto_vec<std::pair<tree, tree> > ranges;
   tree type = TREE_TYPE (op);
-  int bound_limit = PARAM_VALUE (PARAM_IPA_MAX_SWITCH_PREDICATE_BOUNDS);
+  int bound_limit = param_ipa_max_switch_predicate_bounds;
   int bound_count = 0;
   wide_int vr_wmin, vr_wmax;
   value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax);
@@ -1801,7 +1924,7 @@ will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
   int base_index;
   struct agg_position_info aggpos;
 
-  /* What statments might be optimized away
+  /* What statements might be optimized away
      when their arguments are constant.  */
   if (gimple_code (stmt) != GIMPLE_ASSIGN
       && gimple_code (stmt) != GIMPLE_COND
@@ -1884,7 +2007,7 @@ struct record_modified_bb_info
   gimple *stmt;
 };
 
-/* Value is initialized in INIT_BB and used in USE_BB.  We want to copute
+/* Value is initialized in INIT_BB and used in USE_BB.  We want to compute
    probability how often it changes between USE_BB.
    INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB
    is in different loop nest, we can do better.
@@ -2213,7 +2336,7 @@ find_foldable_builtin_expect (basic_block bb)
    presence of EH and will be optimized out by optimize_clobbers later in the
    game. 
 
-   NEED_EH is used to recurse in case the clobber has non-EH predecestors
+   NEED_EH is used to recurse in case the clobber has non-EH predecessors
    that can be clobber only, too.. When it is false, the RESX is not necessary
    on the end of basic block.  */
 
@@ -2247,7 +2370,7 @@ clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
       return false;
     }
 
-  /* See if all predecestors are either throws or clobber only BBs.  */
+  /* See if all predecessors are either throws or clobber only BBs.  */
   FOR_EACH_EDGE (e, ei, bb->preds)
     if (!(e->flags & EDGE_EH)
        && !clobber_only_eh_bb_p (e->src, false))
@@ -2277,9 +2400,9 @@ fp_expression_p (gimple *stmt)
 static void
 analyze_function_body (struct cgraph_node *node, bool early)
 {
-  sreal time = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME);
+  sreal time = opt_for_fn (node->decl, param_uninlined_function_time);
   /* Estimate static overhead for function prologue/epilogue and alignment. */
-  int size = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS);
+  int size = opt_for_fn (node->decl, param_uninlined_function_insns);
   /* Benefits are scaled by probability of elimination that is in range
      <0,2>.  */
   basic_block bb;
@@ -2328,7 +2451,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
          fbi.bb_infos = vNULL;
          fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
          fbi.param_count = count_formal_params (node->decl);
-         fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+         fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
          nonconstant_names.safe_grow_cleared
            (SSANAMES (my_function)->length ());
@@ -2345,9 +2468,11 @@ analyze_function_body (struct cgraph_node *node, bool early)
   info->account_size_time (0, 0, bb_predicate, bb_predicate);
 
   bb_predicate = predicate::not_inlined ();
-  info->account_size_time (PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS)
+  info->account_size_time (opt_for_fn (node->decl,
+                               param_uninlined_function_insns)
                           * ipa_fn_summary::size_scale,
-                          PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME),
+                          opt_for_fn (node->decl,
+                               param_uninlined_function_time),
                           bb_predicate,
                           bb_predicate);
 
@@ -2423,7 +2548,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
          predicate will_be_nonconstant;
 
           /* This relation stmt should be folded after we remove
-             buildin_expect call. Adjust the cost here.  */
+             __builtin_expect call. Adjust the cost here.  */
          if (stmt == fix_builtin_expect_stmt)
             {
               this_size--;
@@ -2489,7 +2614,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
                }
            }
 
-         /* TODO: When conditional jump or swithc is known to be constant, but
+         /* TODO: When conditional jump or switch is known to be constant, but
             we did not translate it into the predicates, we really can account
             just maximum of the possible paths.  */
          if (fbi.info)
@@ -2745,10 +2870,10 @@ compute_fn_summary (struct cgraph_node *node, bool early)
       es->call_stmt_size = eni_size_weights.call_cost;
       es->call_stmt_time = eni_time_weights.call_cost;
       info->account_size_time (ipa_fn_summary::size_scale
-                              * PARAM_VALUE
-                                (PARAM_UNINLINED_FUNCTION_THUNK_INSNS),
-                              PARAM_VALUE
-                                (PARAM_UNINLINED_FUNCTION_THUNK_TIME), t, t);
+                              * opt_for_fn (node->decl,
+                                param_uninlined_function_thunk_insns),
+                              opt_for_fn (node->decl,
+                                param_uninlined_function_thunk_time), t, t);
       t = predicate::not_inlined ();
       info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
       ipa_update_overall_fn_summary (node);
@@ -2766,6 +2891,10 @@ compute_fn_summary (struct cgraph_node *node, bool early)
        /* Even is_gimple_min_invariant rely on current_function_decl.  */
        push_cfun (DECL_STRUCT_FUNCTION (node->decl));
 
+       /* During IPA profile merging we may be called w/o virtual SSA form
+         built.  */
+       update_ssa (TODO_update_ssa_only_virtuals);
+
        /* Can this function be inlined at all?  */
        if (!opt_for_fn (node->decl, optimize)
           && !lookup_attribute ("always_inline",
@@ -2838,7 +2967,7 @@ estimate_edge_devirt_benefit (struct cgraph_edge *ie,
                              int *size, int *time,
                              vec<tree> known_vals,
                              vec<ipa_polymorphic_call_context> known_contexts,
-                             vec<ipa_agg_jump_function_p> known_aggs)
+                             vec<ipa_agg_value_set> known_aggs)
 {
   tree target;
   struct cgraph_node *callee;
@@ -2846,7 +2975,7 @@ estimate_edge_devirt_benefit (struct cgraph_edge *ie,
   enum availability avail;
   bool speculative;
 
-  if (!known_vals.exists () && !known_contexts.exists ())
+  if (!known_vals.length () && !known_contexts.length ())
     return false;
   if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
     return false;
@@ -2884,16 +3013,16 @@ estimate_edge_devirt_benefit (struct cgraph_edge *ie,
 static inline void
 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
                             sreal *time,
-                            int prob,
                             vec<tree> known_vals,
                             vec<ipa_polymorphic_call_context> known_contexts,
-                            vec<ipa_agg_jump_function_p> known_aggs,
+                            vec<ipa_agg_value_set> known_aggs,
                             ipa_hints *hints)
 {
   class ipa_call_summary *es = ipa_call_summaries->get (e);
   int call_size = es->call_stmt_size;
   int call_time = es->call_stmt_time;
   int cur_size;
+
   if (!e->callee && hints && e->maybe_hot_p ()
       && estimate_edge_devirt_benefit (e, &call_size, &call_time,
                                       known_vals, known_contexts, known_aggs))
@@ -2902,28 +3031,26 @@ estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
   *size += cur_size;
   if (min_size)
     *min_size += cur_size;
-  if (!time)
-    ;
-  else if (prob == REG_BR_PROB_BASE)
+  if (time)
     *time += ((sreal)call_time) * e->sreal_frequency ();
-  else
-    *time += ((sreal)call_time * prob) * e->sreal_frequency ();
 }
 
 
-
 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
    calls in NODE.  POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
-   describe context of the call site.  */
+   describe context of the call site.
+   Helper for estimate_calls_size_and_time which does the same but
+   (in most cases) faster.  */
 
 static void
-estimate_calls_size_and_time (struct cgraph_node *node, int *size,
-                             int *min_size, sreal *time,
-                             ipa_hints *hints,
-                             clause_t possible_truths,
-                             vec<tree> known_vals,
-                             vec<ipa_polymorphic_call_context> known_contexts,
-                             vec<ipa_agg_jump_function_p> known_aggs)
+estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size,
+                               int *min_size, sreal *time,
+                               ipa_hints *hints,
+                               clause_t possible_truths,
+                               vec<tree> known_vals,
+                               vec<ipa_polymorphic_call_context> known_contexts,
+                               vec<ipa_agg_value_set> known_aggs)
 {
   struct cgraph_edge *e;
   for (e = node->callees; e; e = e->next_callee)
@@ -2931,11 +3058,11 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size,
       if (!e->inline_failed)
        {
          gcc_checking_assert (!ipa_call_summaries->get (e));
-         estimate_calls_size_and_time (e->callee, size, min_size, time,
-                                       hints,
-                                       possible_truths,
-                                       known_vals, known_contexts,
-                                       known_aggs);
+         estimate_calls_size_and_time_1 (e->callee, size, min_size, time,
+                                         hints,
+                                         possible_truths,
+                                         known_vals, known_contexts,
+                                         known_aggs);
          continue;
        }
       class ipa_call_summary *es = ipa_call_summaries->get (e);
@@ -2950,10 +3077,10 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size,
          || es->predicate->evaluate (possible_truths))
        {
          /* Predicates of calls shall not use NOT_CHANGED codes,
-            sowe do not need to compute probabilities.  */
+            so we do not need to compute probabilities.  */
          estimate_edge_size_and_time (e, size,
                                       es->predicate ? NULL : min_size,
-                                      time, REG_BR_PROB_BASE,
+                                      time,
                                       known_vals, known_contexts,
                                       known_aggs, hints);
        }
@@ -2965,14 +3092,165 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size,
          || es->predicate->evaluate (possible_truths))
        estimate_edge_size_and_time (e, size,
                                     es->predicate ? NULL : min_size,
-                                    time, REG_BR_PROB_BASE,
+                                    time,
                                     known_vals, known_contexts, known_aggs,
                                     hints);
     }
 }
 
+/* Populate sum->call_size_time_table for edges from NODE.  */
+
+static void
+summarize_calls_size_and_time (struct cgraph_node *node,
+                              ipa_fn_summary *sum)
+{
+  struct cgraph_edge *e;
+  for (e = node->callees; e; e = e->next_callee)
+    {
+      if (!e->inline_failed)
+       {
+         gcc_checking_assert (!ipa_call_summaries->get (e));
+         summarize_calls_size_and_time (e->callee, sum);
+         continue;
+       }
+      int size = 0;
+      sreal time = 0;
+
+      estimate_edge_size_and_time (e, &size, NULL, &time,
+                                  vNULL, vNULL, vNULL, NULL);
+
+      struct predicate pred = true;
+      class ipa_call_summary *es = ipa_call_summaries->get (e);
+
+      if (es->predicate)
+       pred = *es->predicate;
+      sum->account_size_time (size, time, pred, pred, true);
+    }
+  for (e = node->indirect_calls; e; e = e->next_callee)
+    {
+      int size = 0;
+      sreal time = 0;
+
+      estimate_edge_size_and_time (e, &size, NULL, &time,
+                                  vNULL, vNULL, vNULL, NULL);
+      struct predicate pred = true;
+      class ipa_call_summary *es = ipa_call_summaries->get (e);
+
+      if (es->predicate)
+       pred = *es->predicate;
+      sum->account_size_time (size, time, pred, pred, true);
+    }
+}
+
+/* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
+   calls in NODE.  POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
+   describe context of the call site.  */
+
+static void
+estimate_calls_size_and_time (struct cgraph_node *node, int *size,
+                             int *min_size, sreal *time,
+                             ipa_hints *hints,
+                             clause_t possible_truths,
+                             vec<tree> known_vals,
+                             vec<ipa_polymorphic_call_context> known_contexts,
+                             vec<ipa_agg_value_set> known_aggs)
+{
+  class ipa_fn_summary *sum = ipa_fn_summaries->get (node);
+  bool use_table = true;
+
+  gcc_assert (node->callees || node->indirect_calls);
+
+  /* During early inlining we do not calculate info for very
+     large functions and thus there is no need for producing
+     summaries.  */
+  if (!ipa_node_params_sum)
+    use_table = false;
+  /* Do not calculate summaries for simple wrappers; it is waste
+     of memory.  */
+  else if (node->callees && node->indirect_calls
+           && node->callees->inline_failed && !node->callees->next_callee)
+    use_table = false;
+  /* If there is an indirect edge that may be optimized, we need
+     to go the slow way.  */
+  else if ((known_vals.length ()
+           || known_contexts.length ()
+           || known_aggs.length ()) && hints)
+    {
+      class ipa_node_params *params_summary = IPA_NODE_REF (node);
+      unsigned int nargs = params_summary
+                          ? ipa_get_param_count (params_summary) : 0;
+
+      for (unsigned int i = 0; i < nargs && use_table; i++)
+       {
+         if (ipa_is_param_used_by_indirect_call (params_summary, i)
+             && ((known_vals.length () > i && known_vals[i])
+                 || (known_aggs.length () > i
+                     && known_aggs[i].items.length ())))
+           use_table = false;
+         else if (ipa_is_param_used_by_polymorphic_call (params_summary, i)
+                  && (known_contexts.length () > i
+                      && !known_contexts[i].useless_p ()))
+           use_table = false;
+       }
+    }
+
+  /* Fast path is via the call size time table.  */
+  if (use_table)
+    {
+      /* Build summary if it is absent.  */
+      if (!sum->call_size_time_table)
+       {
+         predicate true_pred = true;
+         sum->account_size_time (0, 0, true_pred, true_pred, true);
+         summarize_calls_size_and_time (node, sum);
+       }
+
+      int old_size = *size;
+      sreal old_time = time ? *time : 0;
+
+      if (min_size)
+       *min_size += (*sum->call_size_time_table)[0].size;
+
+      unsigned int i;
+      size_time_entry *e;
+
+      /* Walk the table and account sizes and times.  */
+      for (i = 0; vec_safe_iterate (sum->call_size_time_table, i, &e);
+          i++)
+       if (e->exec_predicate.evaluate (possible_truths))
+         {
+           *size += e->size;
+           if (time)
+             *time += e->time;
+         }
+
+      /* Be careful and see if both methods agree.  */
+      if ((flag_checking || dump_file)
+         /* Do not try to sanity check when we know we lost some
+            precision.  */
+         && sum->call_size_time_table->length ()
+            < ipa_fn_summary::max_size_time_table_size)
+       {
+         estimate_calls_size_and_time_1 (node, &old_size, NULL, &old_time, NULL,
+                                         possible_truths, known_vals,
+                                         known_contexts, known_aggs);
+         gcc_assert (*size == old_size);
+         if (time && (*time - old_time > 1 || *time - old_time < -1)
+             && dump_file)
+           fprintf (dump_file, "Time mismatch in call summary %f!=%f",
+                    old_time.to_double (),
+                    time->to_double ());
+       }
+    }
+  /* Slow path by walking all edges.  */
+  else
+    estimate_calls_size_and_time_1 (node, size, min_size, time, hints,
+                                   possible_truths, known_vals, known_contexts,
+                                   known_aggs);
+}
+
 /* Default constructor for ipa call context.
-   Memory alloction of known_vals, known_contexts
+   Memory allocation of known_vals, known_contexts
    and known_aggs vectors is owned by the caller, but can
    be release by ipa_call_context::release.  
    
@@ -2983,7 +3261,7 @@ ipa_call_context::ipa_call_context (cgraph_node *node,
                                    vec<tree> known_vals,
                                    vec<ipa_polymorphic_call_context>
                                         known_contexts,
-                                   vec<ipa_agg_jump_function_p> known_aggs,
+                                   vec<ipa_agg_value_set> known_aggs,
                                    vec<inline_param_summary>
                                         inline_param_summary)
 : m_node (node), m_possible_truths (possible_truths),
@@ -3004,7 +3282,8 @@ ipa_call_context::duplicate_from (const ipa_call_context &ctx)
   m_possible_truths = ctx.m_possible_truths;
   m_nonspec_possible_truths = ctx.m_nonspec_possible_truths;
   class ipa_node_params *params_summary = IPA_NODE_REF (m_node);
-  unsigned int nargs = ipa_get_param_count (params_summary);
+  unsigned int nargs = params_summary
+                      ? ipa_get_param_count (params_summary) : 0;
 
   m_inline_param_summary = vNULL;
   /* Copy the info only if there is at least one useful entry.  */
@@ -3056,9 +3335,9 @@ ipa_call_context::duplicate_from (const ipa_call_context &ctx)
 
       for (unsigned int i = 0; i < n; i++)
        if (ipa_is_param_used_by_indirect_call (params_summary, i)
-           && ctx.m_known_aggs[i])
+           && !ctx.m_known_aggs[i].is_empty ())
          {
-           m_known_aggs = ctx.m_known_aggs.copy ();
+           m_known_aggs = ipa_copy_agg_values (ctx.m_known_aggs);
            break;
          }
     }
@@ -3066,7 +3345,7 @@ ipa_call_context::duplicate_from (const ipa_call_context &ctx)
 
 /* Release memory used by known_vals/contexts/aggs vectors.
    If ALL is true release also inline_param_summary.
-   This happens when context was previously duplciated to be stored
+   This happens when context was previously duplicated to be stored
    into cache.  */
 
 void
@@ -3075,11 +3354,13 @@ ipa_call_context::release (bool all)
   /* See if context is initialized at first place.  */
   if (!m_node)
     return;
-  m_known_vals.release ();
-  m_known_contexts.release ();
-  m_known_aggs.release ();
+  ipa_release_agg_values (m_known_aggs, all);
   if (all)
-    m_inline_param_summary.release ();
+    {
+      m_known_vals.release ();
+      m_known_contexts.release ();
+      m_inline_param_summary.release ();
+    }
 }
 
 /* Return true if CTX describes the same call context as THIS.  */
@@ -3093,7 +3374,8 @@ ipa_call_context::equal_to (const ipa_call_context &ctx)
     return false;
 
   class ipa_node_params *params_summary = IPA_NODE_REF (m_node);
-  unsigned int nargs = ipa_get_param_count (params_summary);
+  unsigned int nargs = params_summary
+                      ? ipa_get_param_count (params_summary) : 0;
 
   if (m_inline_param_summary.exists () || ctx.m_inline_param_summary.exists ())
     {
@@ -3177,19 +3459,22 @@ ipa_call_context::equal_to (const ipa_call_context &ctx)
        {
          if (!ipa_is_param_used_by_indirect_call (params_summary, i))
            continue;
-         if (i >= m_known_aggs.length () || !m_known_aggs[i])
+         if (i >= m_known_aggs.length () || m_known_aggs[i].is_empty ())
            {
-             if (i < ctx.m_known_aggs.length () && ctx.m_known_aggs[i])
+             if (i < ctx.m_known_aggs.length ()
+                 && !ctx.m_known_aggs[i].is_empty ())
                return false;
              continue;
            }
-         if (i >= ctx.m_known_aggs.length () || !ctx.m_known_aggs[i])
+         if (i >= ctx.m_known_aggs.length ()
+             || ctx.m_known_aggs[i].is_empty ())
            {
-             if (i < m_known_aggs.length () && m_known_aggs[i])
+             if (i < m_known_aggs.length ()
+                 && !m_known_aggs[i].is_empty ())
                return false;
              continue;
            }
-         if (m_known_aggs[i] != ctx.m_known_aggs[i])
+         if (!m_known_aggs[i].equal_to (ctx.m_known_aggs[i]))
            return false;
        }
     }
@@ -3197,7 +3482,7 @@ ipa_call_context::equal_to (const ipa_call_context &ctx)
 }
 
 /* Estimate size and time needed to execute call in the given context.
-   Additionally detemine hints determined by the context.  Finally compute
+   Additionally determine hints determined by the context.  Finally compute
    minimal size needed for the call that is independent on the call context and
    can be used for fast estimates.  Return the values in RET_SIZE,
    RET_MIN_SIZE, RET_TIME and RET_HINTS.  */
@@ -3236,10 +3521,11 @@ ipa_call_context::estimate_size_and_time (int *ret_size,
          }
     }
 
-  estimate_calls_size_and_time (m_node, &size, &min_size,
-                               ret_time ? &time : NULL,
-                               ret_hints ? &hints : NULL, m_possible_truths,
-                               m_known_vals, m_known_contexts, m_known_aggs);
+  if (m_node->callees || m_node->indirect_calls)
+    estimate_calls_size_and_time (m_node, &size, &min_size,
+                                 ret_time ? &time : NULL,
+                                 ret_hints ? &hints : NULL, m_possible_truths,
+                                 m_known_vals, m_known_contexts, m_known_aggs);
 
   sreal nonspecialized_time = time;
 
@@ -3282,7 +3568,10 @@ ipa_call_context::estimate_size_and_time (int *ret_size,
                                                m_inline_param_summary);
              gcc_checking_assert (prob >= 0);
              gcc_checking_assert (prob <= REG_BR_PROB_BASE);
-             time += e->time * prob / REG_BR_PROB_BASE;
+             if (prob == REG_BR_PROB_BASE)
+               time += e->time;
+             else
+               time += e->time * prob / REG_BR_PROB_BASE;
            }
          gcc_checking_assert (time >= 0);
         }
@@ -3297,7 +3586,7 @@ ipa_call_context::estimate_size_and_time (int *ret_size,
   gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1);
 
   /* Roundoff issues may make specialized time bigger than nonspecialized
-     time.  We do not really want that to happen because some heurstics
+     time.  We do not really want that to happen because some heuristics
      may get confused by seeing negative speedups.  */
   if (time > nonspecialized_time)
     time = nonspecialized_time;
@@ -3346,15 +3635,16 @@ estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
                                   vec<tree> known_vals,
                                   vec<ipa_polymorphic_call_context>
                                   known_contexts,
-                                  vec<ipa_agg_jump_function_p> known_aggs,
+                                  vec<ipa_agg_value_set> known_aggs,
                                   int *ret_size, sreal *ret_time,
                                   sreal *ret_nonspec_time,
                                   ipa_hints *hints)
 {
   clause_t clause, nonspec_clause;
 
-  evaluate_conditions_for_known_args (node, false, known_vals, known_aggs,
-                                     &clause, &nonspec_clause);
+  /* TODO: Also pass known value ranges.  */
+  evaluate_conditions_for_known_args (node, false, known_vals, vNULL,
+                                     known_aggs, &clause, &nonspec_clause);
   ipa_call_context ctx (node, clause, nonspec_clause,
                        known_vals, known_contexts,
                        known_aggs, vNULL);
@@ -3404,8 +3694,8 @@ inline_update_callee_summaries (struct cgraph_node *node, int depth)
 }
 
 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
-   When functoin A is inlined in B and A calls C with parameter that
-   changes with probability PROB1 and C is known to be passthroug
+   When function A is inlined in B and A calls C with parameter that
+   changes with probability PROB1 and C is known to be passthrough
    of argument if B that change with probability PROB2, the probability
    of change is now PROB1*PROB2.  */
 
@@ -3472,12 +3762,12 @@ remap_edge_summaries (struct cgraph_edge *inlined_edge,
   struct cgraph_edge *e, *next;
   for (e = node->callees; e; e = next)
     {
-      class ipa_call_summary *es = ipa_call_summaries->get (e);
       predicate p;
       next = e->next_callee;
 
       if (e->inline_failed)
        {
+          class ipa_call_summary *es = ipa_call_summaries->get (e);
          remap_edge_change_prob (inlined_edge, e);
 
          if (es->predicate)
@@ -3574,7 +3864,12 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
   info->fp_expressions |= callee_info->fp_expressions;
 
   if (callee_info->conds)
-    evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL, NULL);
+    {
+      auto_vec<tree, 32> known_vals;
+      auto_vec<ipa_agg_value_set, 32> known_aggs;
+      evaluate_properties_for_edge (edge, true, &clause, NULL,
+                                   &known_vals, NULL, &known_aggs);
+    }
   if (ipa_node_params_sum && callee_info->conds)
     {
       class ipa_edge_args *args = IPA_EDGE_REF (edge);
@@ -3634,7 +3929,8 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
          sreal add_time = ((sreal)e->time * freq);
          int prob = e->nonconst_predicate.probability (callee_info->conds,
                                                        clause, es->param);
-         add_time = add_time * prob / REG_BR_PROB_BASE;
+         if (prob != REG_BR_PROB_BASE)
+           add_time = add_time * prob / REG_BR_PROB_BASE;
          if (prob != REG_BR_PROB_BASE
              && dump_file && (dump_flags & TDF_DETAILS))
            {
@@ -3661,6 +3957,21 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
     info->estimated_stack_size = peak;
 
   inline_update_callee_summaries (edge->callee, es->loop_depth);
+  if (info->call_size_time_table)
+    {
+      int edge_size = 0;
+      sreal edge_time = 0;
+
+      estimate_edge_size_and_time (edge, &edge_size, NULL, &edge_time, vNULL,
+                                  vNULL, vNULL, 0);
+      /* Unaccount size and time of the optimized out call.  */
+      info->account_size_time (-edge_size, -edge_time,
+                              es->predicate ? *es->predicate : true,
+                              es->predicate ? *es->predicate : true,
+                              true);
+      /* Account new calls.  */
+      summarize_calls_size_and_time (edge->callee, info);
+    }
 
   /* Free summaries that are not maintained for inline clones/edges.  */
   ipa_call_summaries->remove (edge);
@@ -3669,10 +3980,11 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
 }
 
 /* For performance reasons ipa_merge_fn_summary_after_inlining is not updating
-   overall size and time.  Recompute it.  */
+   overall size and time.  Recompute it.
+   If RESET is true also recompute call_time_size_table.  */
 
 void
-ipa_update_overall_fn_summary (struct cgraph_node *node)
+ipa_update_overall_fn_summary (struct cgraph_node *node, bool reset)
 {
   class ipa_fn_summary *info = ipa_fn_summaries->get (node);
   class ipa_size_summary *size_info = ipa_size_summaries->get (node);
@@ -3687,10 +3999,13 @@ ipa_update_overall_fn_summary (struct cgraph_node *node)
       info->time += e->time;
     }
   info->min_size = (*info->size_time_table)[0].size;
-  estimate_calls_size_and_time (node, &size_info->size, &info->min_size,
-                               &info->time, NULL,
-                               ~(clause_t) (1 << predicate::false_condition),
-                               vNULL, vNULL, vNULL);
+  if (reset)
+    vec_free (info->call_size_time_table);
+  if (node->callees || node->indirect_calls)
+    estimate_calls_size_and_time (node, &size_info->size, &info->min_size,
+                                 &info->time, NULL,
+                                 ~(clause_t) (1 << predicate::false_condition),
+                                 vNULL, vNULL, vNULL);
   size_info->size = RDIV (size_info->size, ipa_fn_summary::size_scale);
   info->min_size = RDIV (info->min_size, ipa_fn_summary::size_scale);
 }
@@ -4048,24 +4363,24 @@ static void
 ipa_fn_summary_write (void)
 {
   struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary);
+  lto_symtab_encoder_iterator lsei;
   lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
   unsigned int count = 0;
-  int i;
 
-  for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
+  for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
+       lsei_next_function_in_partition (&lsei))
     {
-      symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
-      cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
-      if (cnode && cnode->definition && !cnode->alias)
+      cgraph_node *cnode = lsei_cgraph_node (lsei);
+      if (cnode->definition && !cnode->alias)
        count++;
     }
   streamer_write_uhwi (ob, count);
 
-  for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
+  for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
+       lsei_next_function_in_partition (&lsei))
     {
-      symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
-      cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
-      if (cnode && cnode->definition && !cnode->alias)
+      cgraph_node *cnode = lsei_cgraph_node (lsei);
+      if (cnode->definition && !cnode->alias)
        {
          class ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
          class ipa_size_summary *size_info = ipa_size_summaries->get (cnode);