]> git.ipfire.org Git - thirdparty/gcc.git/blobdiff - gcc/tree-inline.c
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / gcc / tree-inline.c
index 2b8b9ee58c196c4658c7e052e94a09436154d314..5e50e8013e2af3b1ab6f9b45f6421a8933f23dc2 100644 (file)
@@ -1,5 +1,5 @@
 /* Tree inlining.
-   Copyright (C) 2001-2019 Free Software Foundation, Inc.
+   Copyright (C) 2001-2021 Free Software Foundation, Inc.
    Contributed by Alexandre Oliva <aoliva@redhat.com>
 
 This file is part of GCC.
@@ -53,7 +53,6 @@ along with GCC; see the file COPYING3.  If not see
 #include "tree-ssa.h"
 #include "except.h"
 #include "debug.h"
-#include "params.h"
 #include "value-prof.h"
 #include "cfgloop.h"
 #include "builtins.h"
@@ -62,6 +61,10 @@ along with GCC; see the file COPYING3.  If not see
 #include "sreal.h"
 #include "tree-cfgcleanup.h"
 #include "tree-ssa-live.h"
+#include "alloc-pool.h"
+#include "symbol-summary.h"
+#include "symtab-thunks.h"
+#include "symtab-clones.h"
 
 /* I'm not real happy about this, but we need to handle gimple and
    non-gimple trees.  */
@@ -149,30 +152,6 @@ insert_decl_map (copy_body_data *id, tree key, tree value)
     id->decl_map->put (value, value);
 }
 
-/* Insert a tree->tree mapping for ID.  This is only used for
-   variables.  */
-
-static void
-insert_debug_decl_map (copy_body_data *id, tree key, tree value)
-{
-  if (!gimple_in_ssa_p (id->src_cfun))
-    return;
-
-  if (!opt_for_fn (id->dst_fn, flag_var_tracking_assignments))
-    return;
-
-  if (!target_for_debug_bind (key))
-    return;
-
-  gcc_assert (TREE_CODE (key) == PARM_DECL);
-  gcc_assert (VAR_P (value));
-
-  if (!id->debug_map)
-    id->debug_map = new hash_map<tree, tree>;
-
-  id->debug_map->put (key, value);
-}
-
 /* If nonzero, we're remapping the contents of inlined debug
    statements.  If negative, an error has occurred, such as a
    reference to a variable that isn't available in the inlined
@@ -557,8 +536,9 @@ remap_type_1 (tree type, copy_body_data *id)
          /* For array bounds where we have decided not to copy over the bounds
             variable which isn't used in OpenMP/OpenACC region, change them to
             an uninitialized VAR_DECL temporary.  */
-         if (TYPE_MAX_VALUE (TYPE_DOMAIN (new_tree)) == error_mark_node
-             && id->adjust_array_error_bounds
+         if (id->adjust_array_error_bounds
+             && TYPE_DOMAIN (new_tree)
+             && TYPE_MAX_VALUE (TYPE_DOMAIN (new_tree)) == error_mark_node
              && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
            {
              tree v = create_tmp_var (TREE_TYPE (TYPE_DOMAIN (new_tree)));
@@ -1136,7 +1116,7 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
          *tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
          TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
          TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
-         TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+         copy_warning (*tp, old);
          if (MR_DEPENDENCE_CLIQUE (old) != 0)
            {
              MR_DEPENDENCE_CLIQUE (*tp)
@@ -1395,7 +1375,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
          *tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
          TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
          TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
-         TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+         copy_warning (*tp, old);
          if (MR_DEPENDENCE_CLIQUE (old) != 0)
            {
              MR_DEPENDENCE_CLIQUE (*tp)
@@ -1473,6 +1453,28 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
 
          *walk_subtrees = 0;
        }
+      else if (TREE_CODE (*tp) == OMP_CLAUSE
+              && (OMP_CLAUSE_CODE (*tp) == OMP_CLAUSE_AFFINITY
+                  || OMP_CLAUSE_CODE (*tp) == OMP_CLAUSE_DEPEND))
+       {
+         tree t = OMP_CLAUSE_DECL (*tp);
+         if (t
+             && TREE_CODE (t) == TREE_LIST
+             && TREE_PURPOSE (t)
+             && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+           {
+             *walk_subtrees = 0;
+             OMP_CLAUSE_DECL (*tp) = copy_node (t);
+             t = OMP_CLAUSE_DECL (*tp);
+             TREE_PURPOSE (t) = copy_node (TREE_PURPOSE (t));
+             for (int i = 0; i <= 4; i++)
+               walk_tree (&TREE_VEC_ELT (TREE_PURPOSE (t), i),
+                          copy_tree_body_r, id, NULL);
+             if (TREE_VEC_ELT (TREE_PURPOSE (t), 5))
+               remap_block (&TREE_VEC_ELT (TREE_PURPOSE (t), 5), id);
+             walk_tree (&TREE_VALUE (t), copy_tree_body_r, id, NULL);
+           }
+       }
     }
 
   /* Keep iterating.  */
@@ -1524,6 +1526,11 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
          : !opt_for_fn (id->dst_fn, flag_var_tracking_assignments)))
     return NULL;
 
+  if (!is_gimple_debug (stmt)
+      && id->param_body_adjs
+      && id->param_body_adjs->m_dead_stmts.contains (stmt))
+    return NULL;
+
   /* Begin by recognizing trees that we'll completely rewrite for the
      inlining context.  Our output for these trees is completely
      different from our input (e.g. RETURN_EXPR is deleted and morphs
@@ -1542,9 +1549,12 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
         assignment to the equivalent of the original RESULT_DECL.
         If RETVAL is just the result decl, the result decl has
         already been set (e.g. a recent "foo (&result_decl, ...)");
-        just toss the entire GIMPLE_RETURN.  */
+        just toss the entire GIMPLE_RETURN.  Likewise for when the
+        call doesn't want the return value.  */
       if (retval
          && (TREE_CODE (retval) != RESULT_DECL
+             && (!id->call_stmt
+                 || gimple_call_lhs (id->call_stmt) != NULL_TREE)
              && (TREE_CODE (retval) != SSA_NAME
                  || ! SSA_NAME_VAR (retval)
                  || TREE_CODE (SSA_NAME_VAR (retval)) != RESULT_DECL)))
@@ -1648,6 +1658,18 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
          copy = gimple_build_omp_master (s1);
          break;
 
+       case GIMPLE_OMP_MASKED:
+         s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
+         copy = gimple_build_omp_masked
+                  (s1, gimple_omp_masked_clauses (stmt));
+         break;
+
+       case GIMPLE_OMP_SCOPE:
+         s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
+         copy = gimple_build_omp_scope
+                  (s1, gimple_omp_scope_clauses (stmt));
+         break;
+
        case GIMPLE_OMP_TASKGROUP:
          s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
          copy = gimple_build_omp_taskgroup
@@ -1785,10 +1807,15 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
 
       if (gimple_debug_bind_p (stmt))
        {
+         tree value;
+         if (id->param_body_adjs
+             && id->param_body_adjs->m_dead_stmts.contains (stmt))
+           value = NULL_TREE;
+         else
+           value = gimple_debug_bind_get_value (stmt);
          gdebug *copy
            = gimple_build_debug_bind (gimple_debug_bind_get_var (stmt),
-                                      gimple_debug_bind_get_value (stmt),
-                                      stmt);
+                                      value, stmt);
          if (id->reset_location)
            gimple_set_location (copy, input_location);
          id->debug_stmts.safe_push (copy);
@@ -1812,12 +1839,11 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
          /* If the inlined function has too many debug markers,
             don't copy them.  */
          if (id->src_cfun->debug_marker_count
-             > PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+             > param_max_debug_marker_count
+             || id->reset_location)
            return stmts;
 
          gdebug *copy = as_a <gdebug *> (gimple_copy (stmt));
-         if (id->reset_location)
-           gimple_set_location (copy, input_location);
          id->debug_stmts.safe_push (copy);
          gimple_seq_add_stmt (&stmts, copy);
          return stmts;
@@ -1918,7 +1944,7 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
   if (id->param_body_adjs)
     {
       gimple_seq extra_stmts = NULL;
-      id->param_body_adjs->modify_gimple_stmt (&copy, &extra_stmts);
+      id->param_body_adjs->modify_gimple_stmt (&copy, &extra_stmts, stmt);
       if (!gimple_seq_empty_p (extra_stmts))
        {
          memset (&wi, 0, sizeof (wi));
@@ -1953,6 +1979,26 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
       gimple_set_vuse (copy, NULL_TREE);
     }
 
+  if (cfun->can_throw_non_call_exceptions)
+    {
+      /* When inlining a function which does not have non-call exceptions
+        enabled into a function that has (which only happens with
+        always-inline) we have to fixup stmts that cannot throw.  */
+      if (gcond *cond = dyn_cast <gcond *> (copy))
+       if (gimple_could_trap_p (cond))
+         {
+           gassign *cmp
+             = gimple_build_assign (make_ssa_name (boolean_type_node),
+                                    gimple_cond_code (cond),
+                                    gimple_cond_lhs (cond),
+                                    gimple_cond_rhs (cond));
+           gimple_seq_add_stmt (&stmts, cmp);
+           gimple_cond_set_code (cond, NE_EXPR);
+           gimple_cond_set_lhs (cond, gimple_assign_lhs (cmp));
+           gimple_cond_set_rhs (cond, boolean_false_node);
+         }
+    }
+
   gimple_seq_add_stmt (&stmts, copy);
   return stmts;
 }
@@ -2067,27 +2113,29 @@ copy_bb (copy_body_data *id, basic_block bb,
              tree p;
              gcall *new_call;
              vec<tree> argarray;
-             size_t nargs = gimple_call_num_args (id->call_stmt);
-             size_t n;
+             size_t nargs_caller = gimple_call_num_args (id->call_stmt);
+             size_t nargs = nargs_caller;
 
              for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
                nargs--;
 
              /* Create the new array of arguments.  */
-             n = nargs + gimple_call_num_args (call_stmt);
+             size_t nargs_callee = gimple_call_num_args (call_stmt);
+             size_t n = nargs + nargs_callee;
              argarray.create (n);
-             argarray.safe_grow_cleared (n);
+             argarray.safe_grow_cleared (n, true);
 
              /* Copy all the arguments before '...'  */
-             memcpy (argarray.address (),
-                     gimple_call_arg_ptr (call_stmt, 0),
-                     gimple_call_num_args (call_stmt) * sizeof (tree));
+             if (nargs_callee)
+               memcpy (argarray.address (),
+                       gimple_call_arg_ptr (call_stmt, 0),
+                       nargs_callee * sizeof (tree));
 
              /* Append the arguments passed in '...'  */
-             memcpy (argarray.address () + gimple_call_num_args (call_stmt),
-                     gimple_call_arg_ptr (id->call_stmt, 0)
-                     + (gimple_call_num_args (id->call_stmt) - nargs),
-                     nargs * sizeof (tree));
+             if (nargs)
+               memcpy (argarray.address () + nargs_callee,
+                       gimple_call_arg_ptr (id->call_stmt, 0)
+                       + (nargs_caller - nargs), nargs * sizeof (tree));
 
              new_call = gimple_build_call_vec (gimple_call_fn (call_stmt),
                                                argarray);
@@ -2098,6 +2146,7 @@ copy_bb (copy_body_data *id, basic_block bb,
                 GF_CALL_VA_ARG_PACK.  */
              gimple_call_copy_flags (new_call, call_stmt);
              gimple_call_set_va_arg_pack (new_call, false);
+             gimple_call_set_fntype (new_call, gimple_call_fntype (call_stmt));
              /* location includes block.  */
              gimple_set_location (new_call, gimple_location (stmt));
              gimple_call_set_lhs (new_call, gimple_call_lhs (call_stmt));
@@ -2179,38 +2228,73 @@ copy_bb (copy_body_data *id, basic_block bb,
                  if (edge)
                    {
                      struct cgraph_edge *old_edge = edge;
-                     profile_count old_cnt = edge->count;
-                     edge = edge->clone (id->dst_node, call_stmt,
-                                         gimple_uid (stmt),
-                                         num, den,
-                                         true);
-
-                     /* Speculative calls consist of two edges - direct and
-                        indirect.  Duplicate the whole thing and distribute
-                        frequencies accordingly.  */
+
+                     /* A speculative call is consist of multiple
+                        edges - indirect edge and one or more direct edges
+                        Duplicate the whole thing and distribute frequencies
+                        accordingly.  */
                      if (edge->speculative)
                        {
-                         struct cgraph_edge *direct, *indirect;
-                         struct ipa_ref *ref;
+                         int n = 0;
+                         profile_count direct_cnt
+                                = profile_count::zero ();
+
+                         /* First figure out the distribution of counts
+                            so we can re-scale BB profile accordingly.  */
+                         for (cgraph_edge *e = old_edge; e;
+                              e = e->next_speculative_call_target ())
+                           direct_cnt = direct_cnt + e->count;
+
+                         cgraph_edge *indirect
+                                = old_edge->speculative_call_indirect_edge ();
+                         profile_count indir_cnt = indirect->count;
 
-                         gcc_assert (!edge->indirect_unknown_callee);
-                         old_edge->speculative_call_info (direct, indirect, ref);
+                         /* Next iterate all direct edges, clone it and its
+                            corresponding reference and update profile.  */
+                         for (cgraph_edge *e = old_edge;
+                              e;
+                              e = e->next_speculative_call_target ())
+                           {
+                             profile_count cnt = e->count;
+
+                             id->dst_node->clone_reference
+                                (e->speculative_call_target_ref (), stmt);
+                             edge = e->clone (id->dst_node, call_stmt,
+                                              gimple_uid (stmt), num, den,
+                                              true);
+                             profile_probability prob
+                                = cnt.probability_in (direct_cnt
+                                                      + indir_cnt);
+                             edge->count
+                                = copy_basic_block->count.apply_probability
+                                        (prob);
+                             n++;
+                           }
+                         gcc_checking_assert
+                                (indirect->num_speculative_call_targets_p ()
+                                 == n);
 
-                         profile_count indir_cnt = indirect->count;
+                         /* Duplicate the indirect edge after all direct edges
+                            cloned.  */
                          indirect = indirect->clone (id->dst_node, call_stmt,
                                                      gimple_uid (stmt),
                                                      num, den,
                                                      true);
 
                          profile_probability prob
-                            = indir_cnt.probability_in (old_cnt + indir_cnt);
+                            = indir_cnt.probability_in (direct_cnt
+                                                        + indir_cnt);
                          indirect->count
                             = copy_basic_block->count.apply_probability (prob);
-                         edge->count = copy_basic_block->count - indirect->count;
-                         id->dst_node->clone_reference (ref, stmt);
                        }
                      else
-                       edge->count = copy_basic_block->count;
+                       {
+                         edge = edge->clone (id->dst_node, call_stmt,
+                                             gimple_uid (stmt),
+                                             num, den,
+                                             true);
+                         edge->count = copy_basic_block->count;
+                       }
                    }
                  break;
 
@@ -2223,7 +2307,7 @@ copy_bb (copy_body_data *id, basic_block bb,
                case CB_CGE_MOVE:
                  edge = id->dst_node->get_edge (orig_stmt);
                  if (edge)
-                   edge->set_call_stmt (call_stmt);
+                   edge = cgraph_edge::set_call_stmt (edge, call_stmt);
                  break;
 
                default:
@@ -2261,7 +2345,7 @@ copy_bb (copy_body_data *id, basic_block bb,
                  if (dump_file)
                    {
                      fprintf (dump_file, "Created new direct edge to %s\n",
-                              dest->name ());
+                              dest->dump_name ());
                    }
                }
 
@@ -2613,7 +2697,9 @@ copy_phis_for_bb (basic_block bb, copy_body_data *id)
       phi = si.phi ();
       res = PHI_RESULT (phi);
       new_res = res;
-      if (!virtual_operand_p (res))
+      if (!virtual_operand_p (res)
+         && (!id->param_body_adjs
+             || !id->param_body_adjs->m_dead_stmts.contains (phi)))
        {
          walk_tree (&new_res, copy_tree_body_r, id, NULL);
          if (EDGE_COUNT (new_bb->preds) == 0)
@@ -2794,7 +2880,7 @@ maybe_move_debug_stmts_to_successors (copy_body_data *id, basic_block new_bb)
                  gimple_set_location (stmt, UNKNOWN_LOCATION);
                }
              gsi_remove (&si, false);
-             gsi_insert_before (&dsi, stmt, GSI_SAME_STMT);
+             gsi_insert_before (&dsi, stmt, GSI_NEW_STMT);
              continue;
            }
 
@@ -2820,7 +2906,7 @@ maybe_move_debug_stmts_to_successors (copy_body_data *id, basic_block new_bb)
            new_stmt = as_a <gdebug *> (gimple_copy (stmt));
          else
            gcc_unreachable ();
-         gsi_insert_before (&dsi, new_stmt, GSI_SAME_STMT);
+         gsi_insert_before (&dsi, new_stmt, GSI_NEW_STMT);
          id->debug_stmts.safe_push (new_stmt);
          gsi_prev (&ssi);
        }
@@ -2897,7 +2983,8 @@ redirect_all_calls (copy_body_data * id, basic_block bb)
          struct cgraph_edge *edge = id->dst_node->get_edge (stmt);
          if (edge)
            {
-             gimple *new_stmt = edge->redirect_call_stmt_to_callee ();
+             gimple *new_stmt
+               = cgraph_edge::redirect_call_stmt_to_callee (edge);
              /* If IPA-SRA transformation, run as part of edge redirection,
                 removed the LHS because it is unused, save it to
                 killed_new_ssa_names so that we can prune it from debug
@@ -3106,7 +3193,14 @@ copy_debug_stmt (gdebug *stmt, copy_body_data *id)
     }
 
   if (gimple_debug_nonbind_marker_p (stmt))
-    return;
+    {
+      if (id->call_stmt && !gimple_block (stmt))
+       {
+         gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+         gsi_remove (&gsi, true);
+       }
+      return;
+    }
 
   /* Remap all the operands in COPY.  */
   memset (&wi, 0, sizeof (wi));
@@ -3121,7 +3215,8 @@ copy_debug_stmt (gdebug *stmt, copy_body_data *id)
   else
     gcc_unreachable ();
 
-  if (TREE_CODE (t) == PARM_DECL && id->debug_map
+  if (TREE_CODE (t) == PARM_DECL
+      && id->debug_map
       && (n = id->debug_map->get (t)))
     {
       gcc_assert (VAR_P (*n));
@@ -3189,13 +3284,10 @@ copy_debug_stmt (gdebug *stmt, copy_body_data *id)
 static void
 copy_debug_stmts (copy_body_data *id)
 {
-  size_t i;
-  gdebug *stmt;
-
   if (!id->debug_stmts.exists ())
     return;
 
-  FOR_EACH_VEC_ELT (id->debug_stmts, i, stmt)
+  for (gdebug *stmt : id->debug_stmts)
     copy_debug_stmt (stmt, id);
 
   id->debug_stmts.release ();
@@ -3283,7 +3375,10 @@ insert_init_debug_bind (copy_body_data *id,
        base_stmt = gsi_stmt (gsi);
     }
 
-  note = gimple_build_debug_bind (tracked_var, unshare_expr (value), base_stmt);
+  note = gimple_build_debug_bind (tracked_var,
+                                 value == error_mark_node
+                                 ? NULL_TREE : unshare_expr (value),
+                                 base_stmt);
 
   if (bb)
     {
@@ -3315,7 +3410,7 @@ insert_init_stmt (copy_body_data *id, basic_block bb, gimple *init_stmt)
          && gimple_assign_rhs_class (init_stmt) == GIMPLE_UNARY_RHS)
        {
          tree rhs = build1 (gimple_assign_rhs_code (init_stmt),
-                            gimple_expr_type (init_stmt),
+                            TREE_TYPE (gimple_assign_lhs (init_stmt)),
                             gimple_assign_rhs1 (init_stmt));
          rhs = force_gimple_operand_gsi (&si, rhs, true, NULL_TREE, false,
                                          GSI_NEW_STMT);
@@ -3323,10 +3418,10 @@ insert_init_stmt (copy_body_data *id, basic_block bb, gimple *init_stmt)
          gimple_assign_set_rhs1 (init_stmt, rhs);
        }
       gsi_insert_after (&si, init_stmt, GSI_NEW_STMT);
-      gimple_regimplify_operands (init_stmt, &si);
-
       if (!is_gimple_debug (init_stmt))
        {
+         gimple_regimplify_operands (init_stmt, &si);
+
          tree def = gimple_assign_lhs (init_stmt);
          insert_init_debug_bind (id, bb, def, def, init_stmt);
        }
@@ -3349,7 +3444,9 @@ force_value_to_type (tree type, tree value)
      Still if we end up with truly mismatched types here, fall back
      to using a VIEW_CONVERT_EXPR or a literal zero to not leak invalid
      GIMPLE to the following passes.  */
-  if (!is_gimple_reg_type (TREE_TYPE (value))
+  if (TREE_CODE (value) == WITH_SIZE_EXPR)
+    return error_mark_node;
+  else if (!is_gimple_reg_type (TREE_TYPE (value))
           || TYPE_SIZE (type) == TYPE_SIZE (TREE_TYPE (value)))
     return fold_build1 (VIEW_CONVERT_EXPR, type, value);
   else
@@ -3365,15 +3462,9 @@ setup_one_parameter (copy_body_data *id, tree p, tree value, tree fn,
 {
   gimple *init_stmt = NULL;
   tree var;
-  tree rhs = value;
   tree def = (gimple_in_ssa_p (cfun)
              ? ssa_default_def (id->src_cfun, p) : NULL);
 
-  if (value
-      && value != error_mark_node
-      && !useless_type_conversion_p (TREE_TYPE (p), TREE_TYPE (value)))
-    rhs = force_value_to_type (TREE_TYPE (p), value);
-
   /* Make an equivalent VAR_DECL.  Note that we must NOT remap the type
      here since the type of this decl must be visible to the calling
      function.  */
@@ -3392,16 +3483,18 @@ setup_one_parameter (copy_body_data *id, tree p, tree value, tree fn,
      value.  */
   if (TREE_READONLY (p)
       && !TREE_ADDRESSABLE (p)
-      && value && !TREE_SIDE_EFFECTS (value)
+      && value
+      && !TREE_SIDE_EFFECTS (value)
       && !def)
     {
-      /* We may produce non-gimple trees by adding NOPs or introduce
-        invalid sharing when operand is not really constant.
-        It is not big deal to prohibit constant propagation here as
-        we will constant propagate in DOM1 pass anyway.  */
-      if (is_gimple_min_invariant (value)
-         && useless_type_conversion_p (TREE_TYPE (p),
-                                                TREE_TYPE (value))
+      /* We may produce non-gimple trees by adding NOPs or introduce invalid
+        sharing when the value is not constant or DECL.  And we need to make
+        sure that it cannot be modified from another path in the callee.  */
+      if ((is_gimple_min_invariant (value)
+          || (DECL_P (value) && TREE_READONLY (value))
+          || (auto_var_in_fn_p (value, id->dst_fn)
+              && !TREE_ADDRESSABLE (value)))
+         && useless_type_conversion_p (TREE_TYPE (p), TREE_TYPE (value))
          /* We have to be very careful about ADDR_EXPR.  Make sure
             the base variable isn't a local variable of the inlined
             function, e.g., when doing recursive inlining, direct or
@@ -3410,7 +3503,9 @@ setup_one_parameter (copy_body_data *id, tree p, tree value, tree fn,
          && ! self_inlining_addr_expr (value, fn))
        {
          insert_decl_map (id, p, value);
-         insert_debug_decl_map (id, p, var);
+         if (!id->debug_map)
+           id->debug_map = new hash_map<tree, tree>;
+         id->debug_map->put (p, var);
          return insert_init_debug_bind (id, bb, var, value, NULL);
        }
     }
@@ -3420,17 +3515,17 @@ setup_one_parameter (copy_body_data *id, tree p, tree value, tree fn,
      automatically replaced by the VAR_DECL.  */
   insert_decl_map (id, p, var);
 
-  /* Even if P was TREE_READONLY, the new VAR should not be.
-     In the original code, we would have constructed a
-     temporary, and then the function body would have never
-     changed the value of P.  However, now, we will be
-     constructing VAR directly.  The constructor body may
-     change its value multiple times as it is being
-     constructed.  Therefore, it must not be TREE_READONLY;
-     the back-end assumes that TREE_READONLY variable is
-     assigned to only once.  */
-  if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (p)))
-    TREE_READONLY (var) = 0;
+  /* Even if P was TREE_READONLY, the new VAR should not be.  In the original
+     code, we would have constructed a temporary, and then the function body
+     would have never changed the value of P.  However, now, we will be
+     constructing VAR directly.  Therefore, it must not be TREE_READONLY.  */
+  TREE_READONLY (var) = 0;
+
+  tree rhs = value;
+  if (value
+      && value != error_mark_node
+      && !useless_type_conversion_p (TREE_TYPE (p), TREE_TYPE (value)))
+    rhs = force_value_to_type (TREE_TYPE (p), value);
 
   /* If there is no setup required and we are in SSA, take the easy route
      replacing all SSA names representing the function parameter by the
@@ -3593,7 +3688,9 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
      vs. the call expression.  */
   if (modify_dest)
     caller_type = TREE_TYPE (modify_dest);
-  else
+  else if (return_slot)
+    caller_type = TREE_TYPE (return_slot);
+  else /* No LHS on the call.  */
     caller_type = TREE_TYPE (TREE_TYPE (callee));
 
   /* We don't need to do anything for functions that don't return anything.  */
@@ -3629,11 +3726,13 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
          if (TREE_ADDRESSABLE (result))
            mark_addressable (var);
        }
-      if ((TREE_CODE (TREE_TYPE (result)) == COMPLEX_TYPE
-           || TREE_CODE (TREE_TYPE (result)) == VECTOR_TYPE)
-         && !DECL_GIMPLE_REG_P (result)
+      if (DECL_NOT_GIMPLE_REG_P (result)
          && DECL_P (var))
-       DECL_GIMPLE_REG_P (var) = 0;
+       DECL_NOT_GIMPLE_REG_P (var) = 1;
+
+      if (!useless_type_conversion_p (callee_type, caller_type))
+       var = build1 (VIEW_CONVERT_EXPR, callee_type, var);
+
       use = NULL;
       goto done;
     }
@@ -3654,7 +3753,7 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
       /* ??? If we're assigning to a variable sized type, then we must
         reuse the destination variable, because we've no good way to
         create variable sized temporaries at this point.  */
-      else if (TREE_CODE (TYPE_SIZE_UNIT (caller_type)) != INTEGER_CST)
+      else if (!poly_int_tree_p (TYPE_SIZE_UNIT (caller_type)))
        use_it = true;
 
       /* If the callee cannot possibly modify MODIFY_DEST, then we can
@@ -3672,10 +3771,8 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
            use_it = false;
          else if (is_global_var (base_m))
            use_it = false;
-         else if ((TREE_CODE (TREE_TYPE (result)) == COMPLEX_TYPE
-                   || TREE_CODE (TREE_TYPE (result)) == VECTOR_TYPE)
-                  && !DECL_GIMPLE_REG_P (result)
-                  && DECL_GIMPLE_REG_P (base_m))
+         else if (DECL_NOT_GIMPLE_REG_P (result)
+                  && !DECL_NOT_GIMPLE_REG_P (base_m))
            use_it = false;
          else if (!TREE_ADDRESSABLE (base_m))
            use_it = true;
@@ -3689,14 +3786,14 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
        }
     }
 
-  gcc_assert (TREE_CODE (TYPE_SIZE_UNIT (callee_type)) == INTEGER_CST);
+  gcc_assert (poly_int_tree_p (TYPE_SIZE_UNIT (callee_type)));
 
   var = copy_result_decl_to_var (result, id);
   DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
 
   /* Do not have the rest of GCC warn about this variable as it should
      not be visible to the user.  */
-  TREE_NO_WARNING (var) = 1;
+  suppress_warning (var /* OPT_Wuninitialized? */);
 
   declare_inline_vars (id->block, var);
 
@@ -3715,11 +3812,8 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
             to using a MEM_REF to not leak invalid GIMPLE to the following
             passes.  */
          /* Prevent var from being written into SSA form.  */
-         if (TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE
-             || TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE)
-           DECL_GIMPLE_REG_P (var) = false;
-         else if (is_gimple_reg_type (TREE_TYPE (var)))
-           TREE_ADDRESSABLE (var) = true;
+         if (is_gimple_reg_type (TREE_TYPE (var)))
+           DECL_NOT_GIMPLE_REG_P (var) = true;
          use = fold_build2 (MEM_REF, caller_type,
                             build_fold_addr_expr (var),
                             build_int_cst (ptr_type_node, 0));
@@ -3949,6 +4043,19 @@ inline_forbidden_p (tree fndecl)
   wi.info = (void *) fndecl;
   wi.pset = &visited_nodes;
 
+  /* We cannot inline a function with a variable-sized parameter because we
+     cannot materialize a temporary of such a type in the caller if need be.
+     Note that the return case is not symmetrical because we can guarantee
+     that a temporary is not needed by means of CALL_EXPR_RETURN_SLOT_OPT.  */
+  for (tree parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
+    if (!poly_int_tree_p (DECL_SIZE (parm)))
+      {
+       inline_forbidden_reason
+         = G_("function %q+F can never be inlined because "
+              "it has a VLA argument");
+       return true;
+      }
+
   FOR_EACH_BB_FN (bb, fun)
     {
       gimple *ret;
@@ -4001,7 +4108,7 @@ tree_inlinable_function_p (tree fn)
     return false;
 
   /* We only warn for functions declared `inline' by the user.  */
-  do_warning = (warn_inline
+  do_warning = (opt_for_fn (fn, warn_inline)
                && DECL_DECLARED_INLINE_P (fn)
                && !DECL_NO_INLINE_WARNING_P (fn)
                && !DECL_IN_SYSTEM_HEADER (fn));
@@ -4162,6 +4269,8 @@ estimate_operator_cost (enum tree_code code, eni_weights *weights,
 
     case REALIGN_LOAD_EXPR:
 
+    case WIDEN_PLUS_EXPR:
+    case WIDEN_MINUS_EXPR:
     case WIDEN_SUM_EXPR:
     case WIDEN_MULT_EXPR:
     case DOT_PROD_EXPR:
@@ -4170,6 +4279,10 @@ estimate_operator_cost (enum tree_code code, eni_weights *weights,
     case WIDEN_MULT_MINUS_EXPR:
     case WIDEN_LSHIFT_EXPR:
 
+    case VEC_WIDEN_PLUS_HI_EXPR:
+    case VEC_WIDEN_PLUS_LO_EXPR:
+    case VEC_WIDEN_MINUS_HI_EXPR:
+    case VEC_WIDEN_MINUS_LO_EXPR:
     case VEC_WIDEN_MULT_HI_EXPR:
     case VEC_WIDEN_MULT_LO_EXPR:
     case VEC_WIDEN_MULT_EVEN_EXPR:
@@ -4323,8 +4436,8 @@ estimate_num_insns (gimple *stmt, eni_weights *weights)
            /* Do not special case builtins where we see the body.
               This just confuse inliner.  */
            struct cgraph_node *node;
-           if (!(node = cgraph_node::get (decl))
-               || node->definition)
+           if ((node = cgraph_node::get (decl))
+               && node->definition)
              ;
            /* For buitins that are likely expanded to nothing or
               inlined do not account operand costs.  */
@@ -4443,6 +4556,8 @@ estimate_num_insns (gimple *stmt, eni_weights *weights)
     case GIMPLE_OMP_TASK:
     case GIMPLE_OMP_CRITICAL:
     case GIMPLE_OMP_MASTER:
+    case GIMPLE_OMP_MASKED:
+    case GIMPLE_OMP_SCOPE:
     case GIMPLE_OMP_TASKGROUP:
     case GIMPLE_OMP_ORDERED:
     case GIMPLE_OMP_SCAN:
@@ -4618,7 +4733,8 @@ reset_debug_bindings (copy_body_data *id, gimple_stmt_iterator gsi)
 /* If STMT is a GIMPLE_CALL, replace it with its inline expansion.  */
 
 static bool
-expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
+expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id,
+                   bitmap to_purge)
 {
   tree use_retvar;
   tree fn;
@@ -4705,7 +4821,7 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
            inform (DECL_SOURCE_LOCATION (cfun->decl),
                    "called from this function");
        }
-      else if (warn_inline
+      else if (opt_for_fn (fn, warn_inline)
               && DECL_DECLARED_INLINE_P (fn)
               && !DECL_NO_INLINE_WARNING_P (fn)
               && !DECL_IN_SYSTEM_HEADER (fn)
@@ -4733,39 +4849,42 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
 
   /* If callee is thunk, all we need is to adjust the THIS pointer
      and redirect to function being thunked.  */
-  if (id->src_node->thunk.thunk_p)
+  if (id->src_node->thunk)
     {
       cgraph_edge *edge;
       tree virtual_offset = NULL;
       profile_count count = cg_edge->count;
       tree op;
       gimple_stmt_iterator iter = gsi_for_stmt (stmt);
+      thunk_info *info = thunk_info::get (id->src_node);
 
-      cg_edge->remove ();
+      cgraph_edge::remove (cg_edge);
       edge = id->src_node->callees->clone (id->dst_node, call_stmt,
                                           gimple_uid (stmt),
                                           profile_count::one (),
                                           profile_count::one (),
                                           true);
       edge->count = count;
-      if (id->src_node->thunk.virtual_offset_p)
-        virtual_offset = size_int (id->src_node->thunk.virtual_value);
+      if (info->virtual_offset_p)
+       virtual_offset = size_int (info->virtual_value);
       op = create_tmp_reg_fn (cfun, TREE_TYPE (gimple_call_arg (stmt, 0)),
                              NULL);
       gsi_insert_before (&iter, gimple_build_assign (op,
                                                    gimple_call_arg (stmt, 0)),
                         GSI_NEW_STMT);
-      gcc_assert (id->src_node->thunk.this_adjusting);
-      op = thunk_adjust (&iter, op, 1, id->src_node->thunk.fixed_offset,
-                        virtual_offset, id->src_node->thunk.indirect_offset);
+      gcc_assert (info->this_adjusting);
+      op = thunk_adjust (&iter, op, 1, info->fixed_offset,
+                        virtual_offset, info->indirect_offset);
 
       gimple_call_set_arg (stmt, 0, op);
       gimple_call_set_fndecl (stmt, edge->callee->decl);
       update_stmt (stmt);
       id->src_node->remove ();
-      expand_call_inline (bb, stmt, id);
+      successfully_inlined = expand_call_inline (bb, stmt, id, to_purge);
       maybe_remove_unused_call_args (cfun, stmt);
-      return true;
+      /* This used to return true even though we do fail to inline in
+        some cases.  See PR98525.  */
+      goto egress;
     }
   fn = cg_edge->callee->decl;
   cg_edge->callee->get_untransformed_body ();
@@ -4861,6 +4980,8 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
   if (src_properties != prop_mask)
     dst_cfun->curr_properties &= src_properties | ~prop_mask;
   dst_cfun->calls_eh_return |= id->src_cfun->calls_eh_return;
+  id->dst_node->calls_declare_variant_alt
+    |= id->src_node->calls_declare_variant_alt;
 
   gcc_assert (!id->src_cfun->after_inlining);
 
@@ -4931,7 +5052,7 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
         initialized.  We do not want to issue a warning about that
         uninitialized variable.  */
       if (DECL_P (modify_dest))
-       TREE_NO_WARNING (modify_dest) = 1;
+       suppress_warning (modify_dest, OPT_Wuninitialized);
 
       if (gimple_call_return_slot_opt_p (call_stmt))
        {
@@ -4959,38 +5080,6 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
   /* Add local vars in this inlined callee to caller.  */
   add_local_variables (id->src_cfun, cfun, id);
 
-  if (id->src_node->clone.performed_splits)
-    {
-      /* Any calls from the inlined function will be turned into calls from the
-        function we inline into.  We must preserve notes about how to split
-        parameters such calls should be redirected/updated.  */
-      unsigned len = vec_safe_length (id->src_node->clone.performed_splits);
-      for (unsigned i = 0; i < len; i++)
-       {
-         ipa_param_performed_split ps
-           = (*id->src_node->clone.performed_splits)[i];
-         ps.dummy_decl = remap_decl (ps.dummy_decl, id);
-         vec_safe_push (id->dst_node->clone.performed_splits, ps);
-       }
-
-      if (flag_checking)
-       {
-         len = vec_safe_length (id->dst_node->clone.performed_splits);
-         for (unsigned i = 0; i < len; i++)
-           {
-             ipa_param_performed_split *ps1
-               = &(*id->dst_node->clone.performed_splits)[i];
-             for (unsigned j = i + 1; j < len; j++)
-               {
-                 ipa_param_performed_split *ps2
-                   = &(*id->dst_node->clone.performed_splits)[j];
-                 gcc_assert (ps1->dummy_decl != ps2->dummy_decl
-                             || ps1->unit_offset != ps2->unit_offset);
-               }
-           }
-       }
-    }
-
   if (dump_enabled_p ())
     {
       char buf[128];
@@ -5020,8 +5109,13 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
     for (tree p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
       if (!TREE_THIS_VOLATILE (p))
        {
+         /* The value associated with P is a local temporary only if
+            there is no value associated with P in the debug map.  */
          tree *varp = id->decl_map->get (p);
-         if (varp && VAR_P (*varp) && !is_gimple_reg (*varp))
+         if (varp
+             && VAR_P (*varp)
+             && !is_gimple_reg (*varp)
+             && !(id->debug_map && id->debug_map->get (p)))
            {
              tree clobber = build_clobber (TREE_TYPE (*varp));
              gimple *clobber_stmt;
@@ -5151,10 +5245,7 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
     }
 
   if (purge_dead_abnormal_edges)
-    {
-      gimple_purge_dead_eh_edges (return_block);
-      gimple_purge_dead_abnormal_call_edges (return_block);
-    }
+    bitmap_set_bit (to_purge, return_block->index);
 
   /* If the value of the new expression is ignored, that's OK.  We
      don't warn about this for CALL_EXPRs, so we shouldn't warn about
@@ -5192,7 +5283,8 @@ expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
    in a MODIFY_EXPR.  */
 
 static bool
-gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
+gimple_expand_calls_inline (basic_block bb, copy_body_data *id,
+                           bitmap to_purge)
 {
   gimple_stmt_iterator gsi;
   bool inlined = false;
@@ -5204,7 +5296,7 @@ gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
 
       if (is_gimple_call (stmt)
          && !gimple_call_internal_p (stmt))
-       inlined |= expand_call_inline (bb, stmt, id);
+       inlined |= expand_call_inline (bb, stmt, id, to_purge);
     }
 
   return inlined;
@@ -5217,87 +5309,119 @@ gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
 static void
 fold_marked_statements (int first, hash_set<gimple *> *statements)
 {
-  for (; first < last_basic_block_for_fn (cfun); first++)
-    if (BASIC_BLOCK_FOR_FN (cfun, first))
-      {
-        gimple_stmt_iterator gsi;
+  auto_bitmap to_purge;
 
-       for (gsi = gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, first));
-            !gsi_end_p (gsi);
-            gsi_next (&gsi))
-         if (statements->contains (gsi_stmt (gsi)))
-           {
-             gimple *old_stmt = gsi_stmt (gsi);
-             tree old_decl = is_gimple_call (old_stmt) ? gimple_call_fndecl (old_stmt) : 0;
+  auto_vec<edge, 20> stack (n_basic_blocks_for_fn (cfun) + 2);
+  auto_sbitmap visited (last_basic_block_for_fn (cfun));
+  bitmap_clear (visited);
 
-             if (old_decl && fndecl_built_in_p (old_decl))
-               {
-                 /* Folding builtins can create multiple instructions,
-                    we need to look at all of them.  */
-                 gimple_stmt_iterator i2 = gsi;
-                 gsi_prev (&i2);
-                 if (fold_stmt (&gsi))
-                   {
-                     gimple *new_stmt;
-                     /* If a builtin at the end of a bb folded into nothing,
-                        the following loop won't work.  */
-                     if (gsi_end_p (gsi))
-                       {
-                         cgraph_update_edges_for_call_stmt (old_stmt,
-                                                            old_decl, NULL);
-                         break;
-                       }
-                     if (gsi_end_p (i2))
-                       i2 = gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, first));
-                     else
+  stack.quick_push (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+  while (!stack.is_empty ())
+    {
+      /* Look at the edge on the top of the stack.  */
+      edge e = stack.pop ();
+      basic_block dest = e->dest;
+
+      if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
+         || bitmap_bit_p (visited, dest->index))
+       continue;
+
+      bitmap_set_bit (visited, dest->index);
+
+      if (dest->index >= first)
+       for (gimple_stmt_iterator gsi = gsi_start_bb (dest);
+            !gsi_end_p (gsi); gsi_next (&gsi))
+         {
+           if (!statements->contains (gsi_stmt (gsi)))
+             continue;
+
+           gimple *old_stmt = gsi_stmt (gsi);
+           tree old_decl = (is_gimple_call (old_stmt)
+                            ? gimple_call_fndecl (old_stmt) : 0);
+           if (old_decl && fndecl_built_in_p (old_decl))
+             {
+               /* Folding builtins can create multiple instructions,
+                  we need to look at all of them.  */
+               gimple_stmt_iterator i2 = gsi;
+               gsi_prev (&i2);
+               if (fold_stmt (&gsi))
+                 {
+                   gimple *new_stmt;
+                   /* If a builtin at the end of a bb folded into nothing,
+                      the following loop won't work.  */
+                   if (gsi_end_p (gsi))
+                     {
+                       cgraph_update_edges_for_call_stmt (old_stmt,
+                                                          old_decl, NULL);
+                       break;
+                     }
+                   if (gsi_end_p (i2))
+                     i2 = gsi_start_bb (dest);
+                   else
+                     gsi_next (&i2);
+                   while (1)
+                     {
+                       new_stmt = gsi_stmt (i2);
+                       update_stmt (new_stmt);
+                       cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
+                                                          new_stmt);
+
+                       if (new_stmt == gsi_stmt (gsi))
+                         {
+                           /* It is okay to check only for the very last
+                              of these statements.  If it is a throwing
+                              statement nothing will change.  If it isn't
+                              this can remove EH edges.  If that weren't
+                              correct then because some intermediate stmts
+                              throw, but not the last one.  That would mean
+                              we'd have to split the block, which we can't
+                              here and we'd loose anyway.  And as builtins
+                              probably never throw, this all
+                              is mood anyway.  */
+                           if (maybe_clean_or_replace_eh_stmt (old_stmt,
+                                                               new_stmt))
+                             bitmap_set_bit (to_purge, dest->index);
+                           break;
+                         }
                        gsi_next (&i2);
-                     while (1)
-                       {
-                         new_stmt = gsi_stmt (i2);
-                         update_stmt (new_stmt);
-                         cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
-                                                            new_stmt);
+                     }
+                 }
+             }
+           else if (fold_stmt (&gsi))
+             {
+               /* Re-read the statement from GSI as fold_stmt() may
+                  have changed it.  */
+               gimple *new_stmt = gsi_stmt (gsi);
+               update_stmt (new_stmt);
+
+               if (is_gimple_call (old_stmt)
+                   || is_gimple_call (new_stmt))
+                 cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
+                                                    new_stmt);
+
+               if (maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt))
+                 bitmap_set_bit (to_purge, dest->index);
+             }
+         }
 
-                         if (new_stmt == gsi_stmt (gsi))
-                           {
-                             /* It is okay to check only for the very last
-                                of these statements.  If it is a throwing
-                                statement nothing will change.  If it isn't
-                                this can remove EH edges.  If that weren't
-                                correct then because some intermediate stmts
-                                throw, but not the last one.  That would mean
-                                we'd have to split the block, which we can't
-                                here and we'd loose anyway.  And as builtins
-                                probably never throw, this all
-                                is mood anyway.  */
-                             if (maybe_clean_or_replace_eh_stmt (old_stmt,
-                                                                 new_stmt))
-                               gimple_purge_dead_eh_edges (
-                                 BASIC_BLOCK_FOR_FN (cfun, first));
-                             break;
-                           }
-                         gsi_next (&i2);
-                       }
-                   }
-               }
-             else if (fold_stmt (&gsi))
-               {
-                 /* Re-read the statement from GSI as fold_stmt() may
-                    have changed it.  */
-                 gimple *new_stmt = gsi_stmt (gsi);
-                 update_stmt (new_stmt);
-
-                 if (is_gimple_call (old_stmt)
-                     || is_gimple_call (new_stmt))
-                   cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
-                                                      new_stmt);
-
-                 if (maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt))
-                   gimple_purge_dead_eh_edges (BASIC_BLOCK_FOR_FN (cfun,
-                                                                   first));
-               }
+      if (EDGE_COUNT (dest->succs) > 0)
+       {
+         /* Avoid warnings emitted from folding statements that
+            became unreachable because of inlined function parameter
+            propagation.  */
+         e = find_taken_edge (dest, NULL_TREE);
+         if (e)
+           stack.quick_push (e);
+         else
+           {
+             edge_iterator ei;
+             FOR_EACH_EDGE (e, ei, dest->succs)
+               stack.safe_push (e);
            }
-      }
+       }
+    }
+
+  gimple_purge_all_dead_eh_edges (to_purge);
 }
 
 /* Expand calls to inline functions in the body of FN.  */
@@ -5343,8 +5467,9 @@ optimize_inline_calls (tree fn)
      will split id->current_basic_block, and the new blocks will
      follow it; we'll trudge through them, processing their CALL_EXPRs
      along the way.  */
+  auto_bitmap to_purge;
   FOR_EACH_BB_FN (bb, cfun)
-    inlined_p |= gimple_expand_calls_inline (bb, &id);
+    inlined_p |= gimple_expand_calls_inline (bb, &id, to_purge);
 
   pop_gimplify_context (NULL);
 
@@ -5359,21 +5484,40 @@ optimize_inline_calls (tree fn)
        gcc_assert (e->inline_failed);
     }
 
+  /* If we didn't inline into the function there is nothing to do.  */
+  if (!inlined_p)
+    {
+      delete id.statements_to_fold;
+      return 0;
+    }
+
   /* Fold queued statements.  */
   update_max_bb_count ();
   fold_marked_statements (last, id.statements_to_fold);
   delete id.statements_to_fold;
 
-  gcc_assert (!id.debug_stmts.exists ());
+  /* Finally purge EH and abnormal edges from the call stmts we inlined.
+     We need to do this after fold_marked_statements since that may walk
+     the SSA use-def chain.  */
+  unsigned i;
+  bitmap_iterator bi;
+  EXECUTE_IF_SET_IN_BITMAP (to_purge, 0, i, bi)
+    {
+      basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+      if (bb)
+       {
+         gimple_purge_dead_eh_edges (bb);
+         gimple_purge_dead_abnormal_call_edges (bb);
+       }
+    }
 
-  /* If we didn't inline into the function there is nothing to do.  */
-  if (!inlined_p)
-    return 0;
+  gcc_assert (!id.debug_stmts.exists ());
 
   /* Renumber the lexical scoping (non-code) blocks consecutively.  */
   number_blocks (fn);
 
   delete_unreachable_blocks_update_callgraph (id.dst_node, false);
+  id.dst_node->calls_comdat_local = id.dst_node->check_calls_comdat_local_p ();
 
   if (flag_checking)
     id.dst_node->verify ();
@@ -5834,7 +5978,8 @@ copy_decl_to_var (tree decl, copy_body_data *id)
   TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (decl);
   TREE_READONLY (copy) = TREE_READONLY (decl);
   TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (decl);
-  DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (decl);
+  DECL_NOT_GIMPLE_REG_P (copy) = DECL_NOT_GIMPLE_REG_P (decl);
+  DECL_BY_REFERENCE (copy) = DECL_BY_REFERENCE (decl);
 
   return copy_decl_for_dup_finish (id, decl, copy);
 }
@@ -5863,7 +6008,12 @@ copy_result_decl_to_var (tree decl, copy_body_data *id)
   if (!DECL_BY_REFERENCE (decl))
     {
       TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (decl);
-      DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (decl);
+      DECL_NOT_GIMPLE_REG_P (copy)
+       = (DECL_NOT_GIMPLE_REG_P (decl)
+          /* RESULT_DECLs are treated special by needs_to_live_in_memory,
+             mirror that to the created VAR_DECL.  */
+          || (TREE_CODE (decl) == RESULT_DECL
+              && aggregate_value_p (decl, id->src_fn)));
     }
 
   return copy_decl_for_dup_finish (id, decl, copy);
@@ -5953,79 +6103,23 @@ tree_versionable_function_p (tree fndecl)
 static void
 update_clone_info (copy_body_data * id)
 {
-  vec<ipa_param_performed_split, va_gc> *cur_performed_splits
-    = id->dst_node->clone.performed_splits;
-  if (cur_performed_splits)
-    {
-      unsigned len = cur_performed_splits->length ();
-      for (unsigned i = 0; i < len; i++)
-       {
-         ipa_param_performed_split *ps = &(*cur_performed_splits)[i];
-         ps->dummy_decl = remap_decl (ps->dummy_decl, id);
-       }
-    }
-
-  struct cgraph_node *node;
-  if (!id->dst_node->clones)
+  struct cgraph_node *this_node = id->dst_node;
+  if (!this_node->clones)
     return;
-  for (node = id->dst_node->clones; node != id->dst_node;)
+  for (cgraph_node *node = this_node->clones; node != this_node;)
     {
       /* First update replace maps to match the new body.  */
-      if (node->clone.tree_map)
-        {
+      clone_info *info = clone_info::get (node);
+      if (info && info->tree_map)
+       {
          unsigned int i;
-          for (i = 0; i < vec_safe_length (node->clone.tree_map); i++)
+         for (i = 0; i < vec_safe_length (info->tree_map); i++)
            {
              struct ipa_replace_map *replace_info;
-             replace_info = (*node->clone.tree_map)[i];
+             replace_info = (*info->tree_map)[i];
              walk_tree (&replace_info->new_tree, copy_tree_body_r, id, NULL);
            }
        }
-      if (node->clone.performed_splits)
-       {
-         unsigned len = vec_safe_length (node->clone.performed_splits);
-         for (unsigned i = 0; i < len; i++)
-           {
-             ipa_param_performed_split *ps
-               = &(*node->clone.performed_splits)[i];
-             ps->dummy_decl = remap_decl (ps->dummy_decl, id);
-           }
-       }
-      if (unsigned len = vec_safe_length (cur_performed_splits))
-       {
-         /* We do not want to add current performed splits when we are saving
-            a copy of function body for later during inlining, that would just
-            duplicate all entries.  So let's have a look whether anything
-            referring to the first dummy_decl is present.  */
-         unsigned dst_len = vec_safe_length (node->clone.performed_splits);
-         ipa_param_performed_split *first = &(*cur_performed_splits)[0];
-         for (unsigned i = 0; i < dst_len; i++)
-           if ((*node->clone.performed_splits)[i].dummy_decl
-               == first->dummy_decl)
-             {
-               len = 0;
-               break;
-             }
-
-         for (unsigned i = 0; i < len; i++)
-           vec_safe_push (node->clone.performed_splits,
-                          (*cur_performed_splits)[i]);
-         if (flag_checking)
-           {
-             for (unsigned i = 0; i < dst_len; i++)
-               {
-                 ipa_param_performed_split *ps1
-                   = &(*node->clone.performed_splits)[i];
-                 for (unsigned j = i + 1; j < dst_len; j++)
-                   {
-                     ipa_param_performed_split *ps2
-                       = &(*node->clone.performed_splits)[j];
-                     gcc_assert (ps1->dummy_decl != ps2->dummy_decl
-                                 || ps1->unit_offset != ps2->unit_offset);
-                   }
-               }
-           }
-       }
 
       if (node->clones)
        node = node->clones;
@@ -6072,6 +6166,12 @@ tree_function_versioning (tree old_decl, tree new_decl,
   auto_vec<gimple *, 10> init_stmts;
   tree vars = NULL_TREE;
 
+  /* We can get called recursively from expand_call_inline via clone
+     materialization.  While expand_call_inline maintains input_location
+     we cannot tolerate it to leak into the materialized clone.  */
+  location_t saved_location = input_location;
+  input_location = UNKNOWN_LOCATION;
+
   gcc_assert (TREE_CODE (old_decl) == FUNCTION_DECL
              && TREE_CODE (new_decl) == FUNCTION_DECL);
   DECL_POSSIBLY_INLINED (old_decl) = 1;
@@ -6136,6 +6236,8 @@ tree_function_versioning (tree old_decl, tree new_decl,
   DECL_ARGUMENTS (new_decl) = DECL_ARGUMENTS (old_decl);
   initialize_cfun (new_decl, old_decl,
                   new_entry ? new_entry->count : old_entry_block->count);
+  new_version_node->calls_declare_variant_alt
+    = old_version_node->calls_declare_variant_alt;
   if (DECL_STRUCT_FUNCTION (new_decl)->gimple_df)
     DECL_STRUCT_FUNCTION (new_decl)->gimple_df->ipa_pta
       = id.src_cfun->gimple_df->ipa_pta;
@@ -6147,8 +6249,9 @@ tree_function_versioning (tree old_decl, tree new_decl,
       = copy_static_chain (p, &id);
 
   auto_vec<int, 16> new_param_indices;
+  clone_info *info = clone_info::get (old_version_node);
   ipa_param_adjustments *old_param_adjustments
-    = old_version_node->clone.param_adjustments;
+    = info ? info->param_adjustments : NULL;
   if (old_param_adjustments)
     old_param_adjustments->get_updated_indices (&new_param_indices);
 
@@ -6164,46 +6267,14 @@ tree_function_versioning (tree old_decl, tree new_decl,
          p = new_param_indices[p];
 
        tree parm;
-       tree req_type, new_type;
-
        for (parm = DECL_ARGUMENTS (old_decl); p;
             parm = DECL_CHAIN (parm))
          p--;
-       tree old_tree = parm;
-       req_type = TREE_TYPE (parm);
-       new_type = TREE_TYPE (replace_info->new_tree);
-       if (!useless_type_conversion_p (req_type, new_type))
-         {
-           if (fold_convertible_p (req_type, replace_info->new_tree))
-             replace_info->new_tree
-               = fold_build1 (NOP_EXPR, req_type, replace_info->new_tree);
-           else if (TYPE_SIZE (req_type) == TYPE_SIZE (new_type))
-             replace_info->new_tree
-               = fold_build1 (VIEW_CONVERT_EXPR, req_type,
-                              replace_info->new_tree);
-           else
-             {
-               if (dump_file)
-                 {
-                   fprintf (dump_file, "    const ");
-                   print_generic_expr (dump_file,
-                                       replace_info->new_tree);
-                   fprintf (dump_file,
-                            "  can't be converted to param ");
-                   print_generic_expr (dump_file, parm);
-                   fprintf (dump_file, "\n");
-                 }
-               old_tree = NULL;
-             }
-         }
-
-       if (old_tree)
-         {
-           init = setup_one_parameter (&id, old_tree, replace_info->new_tree,
-                                       id.src_fn, NULL, &vars);
-           if (init)
-             init_stmts.safe_push (init);
-         }
+       gcc_assert (parm);
+       init = setup_one_parameter (&id, parm, replace_info->new_tree,
+                                   id.src_fn, NULL, &vars);
+       if (init)
+         init_stmts.safe_push (init);
       }
 
   ipa_param_body_adjustments *param_body_adjs = NULL;
@@ -6236,6 +6307,8 @@ tree_function_versioning (tree old_decl, tree new_decl,
       tree resdecl_repl = copy_result_decl_to_var (DECL_RESULT (old_decl),
                                                   &id);
       declare_inline_vars (NULL, resdecl_repl);
+      if (DECL_BY_REFERENCE (DECL_RESULT (old_decl)))
+       resdecl_repl = build_fold_addr_expr (resdecl_repl);
       insert_decl_map (&id, DECL_RESULT (old_decl), resdecl_repl);
 
       DECL_RESULT (new_decl)
@@ -6402,6 +6475,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
 
   gcc_assert (!id.debug_stmts.exists ());
   pop_cfun ();
+  input_location = saved_location;
   return;
 }