/* Tree inlining.
- Copyright (C) 2001-2019 Free Software Foundation, Inc.
+ Copyright (C) 2001-2021 Free Software Foundation, Inc.
Contributed by Alexandre Oliva <aoliva@redhat.com>
This file is part of GCC.
#include "tree-ssa.h"
#include "except.h"
#include "debug.h"
-#include "params.h"
#include "value-prof.h"
#include "cfgloop.h"
#include "builtins.h"
#include "sreal.h"
#include "tree-cfgcleanup.h"
#include "tree-ssa-live.h"
+#include "alloc-pool.h"
+#include "symbol-summary.h"
+#include "symtab-thunks.h"
+#include "symtab-clones.h"
/* I'm not real happy about this, but we need to handle gimple and
non-gimple trees. */
static void declare_inline_vars (tree, tree);
static void remap_save_expr (tree *, hash_map<tree, tree> *, int *);
static void prepend_lexical_block (tree current_block, tree new_block);
-static tree copy_decl_to_var (tree, copy_body_data *);
static tree copy_result_decl_to_var (tree, copy_body_data *);
static tree copy_decl_maybe_to_var (tree, copy_body_data *);
static gimple_seq remap_gimple_stmt (gimple *, copy_body_data *);
id->decl_map->put (value, value);
}
-/* Insert a tree->tree mapping for ID. This is only used for
- variables. */
-
-static void
-insert_debug_decl_map (copy_body_data *id, tree key, tree value)
-{
- if (!gimple_in_ssa_p (id->src_cfun))
- return;
-
- if (!opt_for_fn (id->dst_fn, flag_var_tracking_assignments))
- return;
-
- if (!target_for_debug_bind (key))
- return;
-
- gcc_assert (TREE_CODE (key) == PARM_DECL);
- gcc_assert (VAR_P (value));
-
- if (!id->debug_map)
- id->debug_map = new hash_map<tree, tree>;
-
- id->debug_map->put (key, value);
-}
-
/* If nonzero, we're remapping the contents of inlined debug
statements. If negative, an error has occurred, such as a
reference to a variable that isn't available in the inlined
n = id->decl_map->get (name);
if (n)
- return unshare_expr (*n);
+ {
+ /* WHen we perform edge redirection as part of CFG copy, IPA-SRA can
+ remove an unused LHS from a call statement. Such LHS can however
+ still appear in debug statements, but their value is lost in this
+ function and we do not want to map them. */
+ if (id->killed_new_ssa_names
+ && id->killed_new_ssa_names->contains (*n))
+ {
+ gcc_assert (processing_debug_stmt);
+ processing_debug_stmt = -1;
+ return name;
+ }
+
+ return unshare_expr (*n);
+ }
if (processing_debug_stmt)
{
/* For array bounds where we have decided not to copy over the bounds
variable which isn't used in OpenMP/OpenACC region, change them to
an uninitialized VAR_DECL temporary. */
- if (TYPE_MAX_VALUE (TYPE_DOMAIN (new_tree)) == error_mark_node
- && id->adjust_array_error_bounds
+ if (id->adjust_array_error_bounds
+ && TYPE_DOMAIN (new_tree)
+ && TYPE_MAX_VALUE (TYPE_DOMAIN (new_tree)) == error_mark_node
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
{
tree v = create_tmp_var (TREE_TYPE (TYPE_DOMAIN (new_tree)));
*tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
- TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+ copy_warning (*tp, old);
if (MR_DEPENDENCE_CLIQUE (old) != 0)
{
MR_DEPENDENCE_CLIQUE (*tp)
*tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
- TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+ copy_warning (*tp, old);
if (MR_DEPENDENCE_CLIQUE (old) != 0)
{
MR_DEPENDENCE_CLIQUE (*tp)
*walk_subtrees = 0;
}
+ else if (TREE_CODE (*tp) == OMP_CLAUSE
+ && (OMP_CLAUSE_CODE (*tp) == OMP_CLAUSE_AFFINITY
+ || OMP_CLAUSE_CODE (*tp) == OMP_CLAUSE_DEPEND))
+ {
+ tree t = OMP_CLAUSE_DECL (*tp);
+ if (t
+ && TREE_CODE (t) == TREE_LIST
+ && TREE_PURPOSE (t)
+ && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ {
+ *walk_subtrees = 0;
+ OMP_CLAUSE_DECL (*tp) = copy_node (t);
+ t = OMP_CLAUSE_DECL (*tp);
+ TREE_PURPOSE (t) = copy_node (TREE_PURPOSE (t));
+ for (int i = 0; i <= 4; i++)
+ walk_tree (&TREE_VEC_ELT (TREE_PURPOSE (t), i),
+ copy_tree_body_r, id, NULL);
+ if (TREE_VEC_ELT (TREE_PURPOSE (t), 5))
+ remap_block (&TREE_VEC_ELT (TREE_PURPOSE (t), 5), id);
+ walk_tree (&TREE_VALUE (t), copy_tree_body_r, id, NULL);
+ }
+ }
}
/* Keep iterating. */
: !opt_for_fn (id->dst_fn, flag_var_tracking_assignments)))
return NULL;
+ if (!is_gimple_debug (stmt)
+ && id->param_body_adjs
+ && id->param_body_adjs->m_dead_stmts.contains (stmt))
+ return NULL;
+
/* Begin by recognizing trees that we'll completely rewrite for the
inlining context. Our output for these trees is completely
different from our input (e.g. RETURN_EXPR is deleted and morphs
assignment to the equivalent of the original RESULT_DECL.
If RETVAL is just the result decl, the result decl has
already been set (e.g. a recent "foo (&result_decl, ...)");
- just toss the entire GIMPLE_RETURN. */
+ just toss the entire GIMPLE_RETURN. Likewise for when the
+ call doesn't want the return value. */
if (retval
&& (TREE_CODE (retval) != RESULT_DECL
+ && (!id->call_stmt
+ || gimple_call_lhs (id->call_stmt) != NULL_TREE)
&& (TREE_CODE (retval) != SSA_NAME
|| ! SSA_NAME_VAR (retval)
|| TREE_CODE (SSA_NAME_VAR (retval)) != RESULT_DECL)))
copy = gimple_build_omp_master (s1);
break;
+ case GIMPLE_OMP_MASKED:
+ s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
+ copy = gimple_build_omp_masked
+ (s1, gimple_omp_masked_clauses (stmt));
+ break;
+
+ case GIMPLE_OMP_SCOPE:
+ s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
+ copy = gimple_build_omp_scope
+ (s1, gimple_omp_scope_clauses (stmt));
+ break;
+
case GIMPLE_OMP_TASKGROUP:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_taskgroup
return NULL;
}
}
+
+ /* We do not allow CLOBBERs of handled components. In case
+ returned value is stored via such handled component, remove
+ the clobber so stmt verifier is happy. */
+ if (gimple_clobber_p (stmt)
+ && TREE_CODE (gimple_assign_lhs (stmt)) == RESULT_DECL)
+ {
+ tree remapped = remap_decl (gimple_assign_lhs (stmt), id);
+ if (!DECL_P (remapped)
+ && TREE_CODE (remapped) != MEM_REF)
+ return NULL;
+ }
if (gimple_debug_bind_p (stmt))
{
+ tree value;
+ if (id->param_body_adjs
+ && id->param_body_adjs->m_dead_stmts.contains (stmt))
+ value = NULL_TREE;
+ else
+ value = gimple_debug_bind_get_value (stmt);
gdebug *copy
= gimple_build_debug_bind (gimple_debug_bind_get_var (stmt),
- gimple_debug_bind_get_value (stmt),
- stmt);
+ value, stmt);
if (id->reset_location)
gimple_set_location (copy, input_location);
id->debug_stmts.safe_push (copy);
/* If the inlined function has too many debug markers,
don't copy them. */
if (id->src_cfun->debug_marker_count
- > PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+ > param_max_debug_marker_count
+ || id->reset_location)
return stmts;
gdebug *copy = as_a <gdebug *> (gimple_copy (stmt));
- if (id->reset_location)
- gimple_set_location (copy, input_location);
id->debug_stmts.safe_push (copy);
gimple_seq_add_stmt (&stmts, copy);
return stmts;
gcc_assert (n);
gimple_set_block (copy, *n);
}
+ if (id->param_body_adjs)
+ {
+ gimple_seq extra_stmts = NULL;
+ id->param_body_adjs->modify_gimple_stmt (©, &extra_stmts, stmt);
+ if (!gimple_seq_empty_p (extra_stmts))
+ {
+ memset (&wi, 0, sizeof (wi));
+ wi.info = id;
+ for (gimple_stmt_iterator egsi = gsi_start (extra_stmts);
+ !gsi_end_p (egsi);
+ gsi_next (&egsi))
+ walk_gimple_op (gsi_stmt (egsi), remap_gimple_op_r, &wi);
+ gimple_seq_add_seq (&stmts, extra_stmts);
+ }
+ }
if (id->reset_location)
gimple_set_location (copy, input_location);
gimple_set_vuse (copy, NULL_TREE);
}
+ if (cfun->can_throw_non_call_exceptions)
+ {
+ /* When inlining a function which does not have non-call exceptions
+ enabled into a function that has (which only happens with
+ always-inline) we have to fixup stmts that cannot throw. */
+ if (gcond *cond = dyn_cast <gcond *> (copy))
+ if (gimple_could_trap_p (cond))
+ {
+ gassign *cmp
+ = gimple_build_assign (make_ssa_name (boolean_type_node),
+ gimple_cond_code (cond),
+ gimple_cond_lhs (cond),
+ gimple_cond_rhs (cond));
+ gimple_seq_add_stmt (&stmts, cmp);
+ gimple_cond_set_code (cond, NE_EXPR);
+ gimple_cond_set_lhs (cond, gimple_assign_lhs (cmp));
+ gimple_cond_set_rhs (cond, boolean_false_node);
+ }
+ }
+
gimple_seq_add_stmt (&stmts, copy);
return stmts;
}
tree p;
gcall *new_call;
vec<tree> argarray;
- size_t nargs = gimple_call_num_args (id->call_stmt);
- size_t n;
+ size_t nargs_caller = gimple_call_num_args (id->call_stmt);
+ size_t nargs = nargs_caller;
for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
nargs--;
/* Create the new array of arguments. */
- n = nargs + gimple_call_num_args (call_stmt);
+ size_t nargs_callee = gimple_call_num_args (call_stmt);
+ size_t n = nargs + nargs_callee;
argarray.create (n);
- argarray.safe_grow_cleared (n);
+ argarray.safe_grow_cleared (n, true);
/* Copy all the arguments before '...' */
- memcpy (argarray.address (),
- gimple_call_arg_ptr (call_stmt, 0),
- gimple_call_num_args (call_stmt) * sizeof (tree));
+ if (nargs_callee)
+ memcpy (argarray.address (),
+ gimple_call_arg_ptr (call_stmt, 0),
+ nargs_callee * sizeof (tree));
/* Append the arguments passed in '...' */
- memcpy (argarray.address () + gimple_call_num_args (call_stmt),
- gimple_call_arg_ptr (id->call_stmt, 0)
- + (gimple_call_num_args (id->call_stmt) - nargs),
- nargs * sizeof (tree));
+ if (nargs)
+ memcpy (argarray.address () + nargs_callee,
+ gimple_call_arg_ptr (id->call_stmt, 0)
+ + (nargs_caller - nargs), nargs * sizeof (tree));
new_call = gimple_build_call_vec (gimple_call_fn (call_stmt),
argarray);
GF_CALL_VA_ARG_PACK. */
gimple_call_copy_flags (new_call, call_stmt);
gimple_call_set_va_arg_pack (new_call, false);
+ gimple_call_set_fntype (new_call, gimple_call_fntype (call_stmt));
/* location includes block. */
gimple_set_location (new_call, gimple_location (stmt));
gimple_call_set_lhs (new_call, gimple_call_lhs (call_stmt));
if (edge)
{
struct cgraph_edge *old_edge = edge;
- profile_count old_cnt = edge->count;
- edge = edge->clone (id->dst_node, call_stmt,
- gimple_uid (stmt),
- num, den,
- true);
-
- /* Speculative calls consist of two edges - direct and
- indirect. Duplicate the whole thing and distribute
- frequencies accordingly. */
+
+ /* A speculative call is consist of multiple
+ edges - indirect edge and one or more direct edges
+ Duplicate the whole thing and distribute frequencies
+ accordingly. */
if (edge->speculative)
{
- struct cgraph_edge *direct, *indirect;
- struct ipa_ref *ref;
+ int n = 0;
+ profile_count direct_cnt
+ = profile_count::zero ();
+
+ /* First figure out the distribution of counts
+ so we can re-scale BB profile accordingly. */
+ for (cgraph_edge *e = old_edge; e;
+ e = e->next_speculative_call_target ())
+ direct_cnt = direct_cnt + e->count;
+
+ cgraph_edge *indirect
+ = old_edge->speculative_call_indirect_edge ();
+ profile_count indir_cnt = indirect->count;
- gcc_assert (!edge->indirect_unknown_callee);
- old_edge->speculative_call_info (direct, indirect, ref);
+ /* Next iterate all direct edges, clone it and its
+ corresponding reference and update profile. */
+ for (cgraph_edge *e = old_edge;
+ e;
+ e = e->next_speculative_call_target ())
+ {
+ profile_count cnt = e->count;
+
+ id->dst_node->clone_reference
+ (e->speculative_call_target_ref (), stmt);
+ edge = e->clone (id->dst_node, call_stmt,
+ gimple_uid (stmt), num, den,
+ true);
+ profile_probability prob
+ = cnt.probability_in (direct_cnt
+ + indir_cnt);
+ edge->count
+ = copy_basic_block->count.apply_probability
+ (prob);
+ n++;
+ }
+ gcc_checking_assert
+ (indirect->num_speculative_call_targets_p ()
+ == n);
- profile_count indir_cnt = indirect->count;
+ /* Duplicate the indirect edge after all direct edges
+ cloned. */
indirect = indirect->clone (id->dst_node, call_stmt,
gimple_uid (stmt),
num, den,
true);
profile_probability prob
- = indir_cnt.probability_in (old_cnt + indir_cnt);
+ = indir_cnt.probability_in (direct_cnt
+ + indir_cnt);
indirect->count
= copy_basic_block->count.apply_probability (prob);
- edge->count = copy_basic_block->count - indirect->count;
- id->dst_node->clone_reference (ref, stmt);
}
else
- edge->count = copy_basic_block->count;
+ {
+ edge = edge->clone (id->dst_node, call_stmt,
+ gimple_uid (stmt),
+ num, den,
+ true);
+ edge->count = copy_basic_block->count;
+ }
}
break;
case CB_CGE_MOVE:
edge = id->dst_node->get_edge (orig_stmt);
if (edge)
- edge->set_call_stmt (call_stmt);
+ edge = cgraph_edge::set_call_stmt (edge, call_stmt);
break;
default:
if (dump_file)
{
fprintf (dump_file, "Created new direct edge to %s\n",
- dest->name ());
+ dest->dump_name ());
}
}
phi = si.phi ();
res = PHI_RESULT (phi);
new_res = res;
- if (!virtual_operand_p (res))
+ if (!virtual_operand_p (res)
+ && (!id->param_body_adjs
+ || !id->param_body_adjs->m_dead_stmts.contains (phi)))
{
walk_tree (&new_res, copy_tree_body_r, id, NULL);
if (EDGE_COUNT (new_bb->preds) == 0)
gimple_set_location (stmt, UNKNOWN_LOCATION);
}
gsi_remove (&si, false);
- gsi_insert_before (&dsi, stmt, GSI_SAME_STMT);
+ gsi_insert_before (&dsi, stmt, GSI_NEW_STMT);
continue;
}
new_stmt = as_a <gdebug *> (gimple_copy (stmt));
else
gcc_unreachable ();
- gsi_insert_before (&dsi, new_stmt, GSI_SAME_STMT);
+ gsi_insert_before (&dsi, new_stmt, GSI_NEW_STMT);
id->debug_stmts.safe_push (new_stmt);
gsi_prev (&ssi);
}
static void
copy_loops (copy_body_data *id,
- struct loop *dest_parent, struct loop *src_parent)
+ class loop *dest_parent, class loop *src_parent)
{
- struct loop *src_loop = src_parent->inner;
+ class loop *src_loop = src_parent->inner;
while (src_loop)
{
if (!id->blocks_to_copy
|| bitmap_bit_p (id->blocks_to_copy, src_loop->header->index))
{
- struct loop *dest_loop = alloc_loop ();
+ class loop *dest_loop = alloc_loop ();
/* Assign the new loop its header and latch and associate
those with the new loop. */
gimple *stmt = gsi_stmt (si);
if (is_gimple_call (stmt))
{
+ tree old_lhs = gimple_call_lhs (stmt);
struct cgraph_edge *edge = id->dst_node->get_edge (stmt);
if (edge)
{
- edge->redirect_call_stmt_to_callee ();
+ gimple *new_stmt
+ = cgraph_edge::redirect_call_stmt_to_callee (edge);
+ /* If IPA-SRA transformation, run as part of edge redirection,
+ removed the LHS because it is unused, save it to
+ killed_new_ssa_names so that we can prune it from debug
+ statements. */
+ if (old_lhs
+ && TREE_CODE (old_lhs) == SSA_NAME
+ && !gimple_call_lhs (new_stmt))
+ {
+ if (!id->killed_new_ssa_names)
+ id->killed_new_ssa_names = new hash_set<tree> (16);
+ id->killed_new_ssa_names->add (old_lhs);
+ }
+
if (stmt == last && id->call_stmt && maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (bb);
}
}
if (gimple_debug_nonbind_marker_p (stmt))
- return;
+ {
+ if (id->call_stmt && !gimple_block (stmt))
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gsi_remove (&gsi, true);
+ }
+ return;
+ }
/* Remap all the operands in COPY. */
memset (&wi, 0, sizeof (wi));
else
gcc_unreachable ();
- if (TREE_CODE (t) == PARM_DECL && id->debug_map
+ if (TREE_CODE (t) == PARM_DECL
+ && id->debug_map
&& (n = id->debug_map->get (t)))
{
gcc_assert (VAR_P (*n));
static void
copy_debug_stmts (copy_body_data *id)
{
- size_t i;
- gdebug *stmt;
-
if (!id->debug_stmts.exists ())
return;
- FOR_EACH_VEC_ELT (id->debug_stmts, i, stmt)
+ for (gdebug *stmt : id->debug_stmts)
copy_debug_stmt (stmt, id);
id->debug_stmts.release ();
body = copy_cfg_body (id, entry_block_map, exit_block_map,
new_entry);
copy_debug_stmts (id);
+ delete id->killed_new_ssa_names;
+ id->killed_new_ssa_names = NULL;
return body;
}
base_stmt = gsi_stmt (gsi);
}
- note = gimple_build_debug_bind (tracked_var, unshare_expr (value), base_stmt);
+ note = gimple_build_debug_bind (tracked_var,
+ value == error_mark_node
+ ? NULL_TREE : unshare_expr (value),
+ base_stmt);
if (bb)
{
&& gimple_assign_rhs_class (init_stmt) == GIMPLE_UNARY_RHS)
{
tree rhs = build1 (gimple_assign_rhs_code (init_stmt),
- gimple_expr_type (init_stmt),
+ TREE_TYPE (gimple_assign_lhs (init_stmt)),
gimple_assign_rhs1 (init_stmt));
rhs = force_gimple_operand_gsi (&si, rhs, true, NULL_TREE, false,
GSI_NEW_STMT);
gimple_assign_set_rhs1 (init_stmt, rhs);
}
gsi_insert_after (&si, init_stmt, GSI_NEW_STMT);
- gimple_regimplify_operands (init_stmt, &si);
-
if (!is_gimple_debug (init_stmt))
{
+ gimple_regimplify_operands (init_stmt, &si);
+
tree def = gimple_assign_lhs (init_stmt);
insert_init_debug_bind (id, bb, def, def, init_stmt);
}
}
}
+/* Deal with mismatched formal/actual parameters, in a rather brute-force way
+ if need be (which should only be necessary for invalid programs). Attempt
+ to convert VAL to TYPE and return the result if it is possible, just return
+ a zero constant of the given type if it fails. */
+
+tree
+force_value_to_type (tree type, tree value)
+{
+ /* If we can match up types by promotion/demotion do so. */
+ if (fold_convertible_p (type, value))
+ return fold_convert (type, value);
+
+ /* ??? For valid programs we should not end up here.
+ Still if we end up with truly mismatched types here, fall back
+ to using a VIEW_CONVERT_EXPR or a literal zero to not leak invalid
+ GIMPLE to the following passes. */
+ if (TREE_CODE (value) == WITH_SIZE_EXPR)
+ return error_mark_node;
+ else if (!is_gimple_reg_type (TREE_TYPE (value))
+ || TYPE_SIZE (type) == TYPE_SIZE (TREE_TYPE (value)))
+ return fold_build1 (VIEW_CONVERT_EXPR, type, value);
+ else
+ return build_zero_cst (type);
+}
+
/* Initialize parameter P with VALUE. If needed, produce init statement
at the end of BB. When BB is NULL, we return init statement to be
output later. */
{
gimple *init_stmt = NULL;
tree var;
- tree rhs = value;
tree def = (gimple_in_ssa_p (cfun)
? ssa_default_def (id->src_cfun, p) : NULL);
- if (value
- && value != error_mark_node
- && !useless_type_conversion_p (TREE_TYPE (p), TREE_TYPE (value)))
- {
- /* If we can match up types by promotion/demotion do so. */
- if (fold_convertible_p (TREE_TYPE (p), value))
- rhs = fold_convert (TREE_TYPE (p), value);
- else
- {
- /* ??? For valid programs we should not end up here.
- Still if we end up with truly mismatched types here, fall back
- to using a VIEW_CONVERT_EXPR or a literal zero to not leak invalid
- GIMPLE to the following passes. */
- if (!is_gimple_reg_type (TREE_TYPE (value))
- || TYPE_SIZE (TREE_TYPE (p)) == TYPE_SIZE (TREE_TYPE (value)))
- rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (p), value);
- else
- rhs = build_zero_cst (TREE_TYPE (p));
- }
- }
-
/* Make an equivalent VAR_DECL. Note that we must NOT remap the type
here since the type of this decl must be visible to the calling
function. */
value. */
if (TREE_READONLY (p)
&& !TREE_ADDRESSABLE (p)
- && value && !TREE_SIDE_EFFECTS (value)
+ && value
+ && !TREE_SIDE_EFFECTS (value)
&& !def)
{
- /* We may produce non-gimple trees by adding NOPs or introduce
- invalid sharing when operand is not really constant.
- It is not big deal to prohibit constant propagation here as
- we will constant propagate in DOM1 pass anyway. */
- if (is_gimple_min_invariant (value)
- && useless_type_conversion_p (TREE_TYPE (p),
- TREE_TYPE (value))
+ /* We may produce non-gimple trees by adding NOPs or introduce invalid
+ sharing when the value is not constant or DECL. And we need to make
+ sure that it cannot be modified from another path in the callee. */
+ if ((is_gimple_min_invariant (value)
+ || (DECL_P (value) && TREE_READONLY (value))
+ || (auto_var_in_fn_p (value, id->dst_fn)
+ && !TREE_ADDRESSABLE (value)))
+ && useless_type_conversion_p (TREE_TYPE (p), TREE_TYPE (value))
/* We have to be very careful about ADDR_EXPR. Make sure
the base variable isn't a local variable of the inlined
function, e.g., when doing recursive inlining, direct or
&& ! self_inlining_addr_expr (value, fn))
{
insert_decl_map (id, p, value);
- insert_debug_decl_map (id, p, var);
+ if (!id->debug_map)
+ id->debug_map = new hash_map<tree, tree>;
+ id->debug_map->put (p, var);
return insert_init_debug_bind (id, bb, var, value, NULL);
}
}
automatically replaced by the VAR_DECL. */
insert_decl_map (id, p, var);
- /* Even if P was TREE_READONLY, the new VAR should not be.
- In the original code, we would have constructed a
- temporary, and then the function body would have never
- changed the value of P. However, now, we will be
- constructing VAR directly. The constructor body may
- change its value multiple times as it is being
- constructed. Therefore, it must not be TREE_READONLY;
- the back-end assumes that TREE_READONLY variable is
- assigned to only once. */
- if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (p)))
- TREE_READONLY (var) = 0;
+ /* Even if P was TREE_READONLY, the new VAR should not be. In the original
+ code, we would have constructed a temporary, and then the function body
+ would have never changed the value of P. However, now, we will be
+ constructing VAR directly. Therefore, it must not be TREE_READONLY. */
+ TREE_READONLY (var) = 0;
+
+ tree rhs = value;
+ if (value
+ && value != error_mark_node
+ && !useless_type_conversion_p (TREE_TYPE (p), TREE_TYPE (value)))
+ rhs = force_value_to_type (TREE_TYPE (p), value);
/* If there is no setup required and we are in SSA, take the easy route
replacing all SSA names representing the function parameter by the
vs. the call expression. */
if (modify_dest)
caller_type = TREE_TYPE (modify_dest);
- else
+ else if (return_slot)
+ caller_type = TREE_TYPE (return_slot);
+ else /* No LHS on the call. */
caller_type = TREE_TYPE (TREE_TYPE (callee));
/* We don't need to do anything for functions that don't return anything. */
taken by alias analysis. */
gcc_assert (TREE_CODE (return_slot) != SSA_NAME);
var = return_slot_addr;
+ mark_addressable (return_slot);
}
else
{
if (TREE_ADDRESSABLE (result))
mark_addressable (var);
}
- if ((TREE_CODE (TREE_TYPE (result)) == COMPLEX_TYPE
- || TREE_CODE (TREE_TYPE (result)) == VECTOR_TYPE)
- && !DECL_GIMPLE_REG_P (result)
+ if (DECL_NOT_GIMPLE_REG_P (result)
&& DECL_P (var))
- DECL_GIMPLE_REG_P (var) = 0;
+ DECL_NOT_GIMPLE_REG_P (var) = 1;
+
+ if (!useless_type_conversion_p (callee_type, caller_type))
+ var = build1 (VIEW_CONVERT_EXPR, callee_type, var);
+
use = NULL;
goto done;
}
/* ??? If we're assigning to a variable sized type, then we must
reuse the destination variable, because we've no good way to
create variable sized temporaries at this point. */
- else if (TREE_CODE (TYPE_SIZE_UNIT (caller_type)) != INTEGER_CST)
+ else if (!poly_int_tree_p (TYPE_SIZE_UNIT (caller_type)))
use_it = true;
/* If the callee cannot possibly modify MODIFY_DEST, then we can
use_it = false;
else if (is_global_var (base_m))
use_it = false;
- else if ((TREE_CODE (TREE_TYPE (result)) == COMPLEX_TYPE
- || TREE_CODE (TREE_TYPE (result)) == VECTOR_TYPE)
- && !DECL_GIMPLE_REG_P (result)
- && DECL_GIMPLE_REG_P (base_m))
+ else if (DECL_NOT_GIMPLE_REG_P (result)
+ && !DECL_NOT_GIMPLE_REG_P (base_m))
use_it = false;
else if (!TREE_ADDRESSABLE (base_m))
use_it = true;
}
}
- gcc_assert (TREE_CODE (TYPE_SIZE_UNIT (callee_type)) == INTEGER_CST);
+ gcc_assert (poly_int_tree_p (TYPE_SIZE_UNIT (callee_type)));
var = copy_result_decl_to_var (result, id);
DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
/* Do not have the rest of GCC warn about this variable as it should
not be visible to the user. */
- TREE_NO_WARNING (var) = 1;
+ suppress_warning (var /* OPT_Wuninitialized? */);
declare_inline_vars (id->block, var);
to using a MEM_REF to not leak invalid GIMPLE to the following
passes. */
/* Prevent var from being written into SSA form. */
- if (TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE
- || TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE)
- DECL_GIMPLE_REG_P (var) = false;
- else if (is_gimple_reg_type (TREE_TYPE (var)))
- TREE_ADDRESSABLE (var) = true;
+ if (is_gimple_reg_type (TREE_TYPE (var)))
+ DECL_NOT_GIMPLE_REG_P (var) = true;
use = fold_build2 (MEM_REF, caller_type,
build_fold_addr_expr (var),
build_int_cst (ptr_type_node, 0));
wi.info = (void *) fndecl;
wi.pset = &visited_nodes;
+ /* We cannot inline a function with a variable-sized parameter because we
+ cannot materialize a temporary of such a type in the caller if need be.
+ Note that the return case is not symmetrical because we can guarantee
+ that a temporary is not needed by means of CALL_EXPR_RETURN_SLOT_OPT. */
+ for (tree parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
+ if (!poly_int_tree_p (DECL_SIZE (parm)))
+ {
+ inline_forbidden_reason
+ = G_("function %q+F can never be inlined because "
+ "it has a VLA argument");
+ return true;
+ }
+
FOR_EACH_BB_FN (bb, fun)
{
gimple *ret;
return false;
/* We only warn for functions declared `inline' by the user. */
- do_warning = (warn_inline
+ do_warning = (opt_for_fn (fn, warn_inline)
&& DECL_DECLARED_INLINE_P (fn)
&& !DECL_NO_INLINE_WARNING_P (fn)
&& !DECL_IN_SYSTEM_HEADER (fn));
case REALIGN_LOAD_EXPR:
+ case WIDEN_PLUS_EXPR:
+ case WIDEN_MINUS_EXPR:
case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
case DOT_PROD_EXPR:
case WIDEN_MULT_MINUS_EXPR:
case WIDEN_LSHIFT_EXPR:
+ case VEC_WIDEN_PLUS_HI_EXPR:
+ case VEC_WIDEN_PLUS_LO_EXPR:
+ case VEC_WIDEN_MINUS_HI_EXPR:
+ case VEC_WIDEN_MINUS_LO_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
/* Do not special case builtins where we see the body.
This just confuse inliner. */
struct cgraph_node *node;
- if (!(node = cgraph_node::get (decl))
- || node->definition)
+ if ((node = cgraph_node::get (decl))
+ && node->definition)
;
/* For buitins that are likely expanded to nothing or
inlined do not account operand costs. */
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
/* If STMT is a GIMPLE_CALL, replace it with its inline expansion. */
static bool
-expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id)
+expand_call_inline (basic_block bb, gimple *stmt, copy_body_data *id,
+ bitmap to_purge)
{
tree use_retvar;
tree fn;
be to be able to keep both bodies and use extern inline body
for inlining, but we can't do that because frontends overwrite
the body. */
- && !cg_edge->callee->local.redefined_extern_inline
+ && !cg_edge->callee->redefined_extern_inline
/* During early inline pass, report only when optimization is
not turned on. */
&& (symtab->global_info_ready
inform (DECL_SOURCE_LOCATION (cfun->decl),
"called from this function");
}
- else if (warn_inline
+ else if (opt_for_fn (fn, warn_inline)
&& DECL_DECLARED_INLINE_P (fn)
&& !DECL_NO_INLINE_WARNING_P (fn)
&& !DECL_IN_SYSTEM_HEADER (fn)
/* If callee is thunk, all we need is to adjust the THIS pointer
and redirect to function being thunked. */
- if (id->src_node->thunk.thunk_p)
+ if (id->src_node->thunk)
{
cgraph_edge *edge;
tree virtual_offset = NULL;
profile_count count = cg_edge->count;
tree op;
gimple_stmt_iterator iter = gsi_for_stmt (stmt);
+ thunk_info *info = thunk_info::get (id->src_node);
- cg_edge->remove ();
+ cgraph_edge::remove (cg_edge);
edge = id->src_node->callees->clone (id->dst_node, call_stmt,
gimple_uid (stmt),
profile_count::one (),
profile_count::one (),
true);
edge->count = count;
- if (id->src_node->thunk.virtual_offset_p)
- virtual_offset = size_int (id->src_node->thunk.virtual_value);
+ if (info->virtual_offset_p)
+ virtual_offset = size_int (info->virtual_value);
op = create_tmp_reg_fn (cfun, TREE_TYPE (gimple_call_arg (stmt, 0)),
NULL);
gsi_insert_before (&iter, gimple_build_assign (op,
gimple_call_arg (stmt, 0)),
GSI_NEW_STMT);
- gcc_assert (id->src_node->thunk.this_adjusting);
- op = thunk_adjust (&iter, op, 1, id->src_node->thunk.fixed_offset,
- virtual_offset, id->src_node->thunk.indirect_offset);
+ gcc_assert (info->this_adjusting);
+ op = thunk_adjust (&iter, op, 1, info->fixed_offset,
+ virtual_offset, info->indirect_offset);
gimple_call_set_arg (stmt, 0, op);
gimple_call_set_fndecl (stmt, edge->callee->decl);
update_stmt (stmt);
id->src_node->remove ();
- expand_call_inline (bb, stmt, id);
+ successfully_inlined = expand_call_inline (bb, stmt, id, to_purge);
maybe_remove_unused_call_args (cfun, stmt);
- return true;
+ /* This used to return true even though we do fail to inline in
+ some cases. See PR98525. */
+ goto egress;
}
fn = cg_edge->callee->decl;
cg_edge->callee->get_untransformed_body ();
if (src_properties != prop_mask)
dst_cfun->curr_properties &= src_properties | ~prop_mask;
dst_cfun->calls_eh_return |= id->src_cfun->calls_eh_return;
+ id->dst_node->calls_declare_variant_alt
+ |= id->src_node->calls_declare_variant_alt;
gcc_assert (!id->src_cfun->after_inlining);
initialized. We do not want to issue a warning about that
uninitialized variable. */
if (DECL_P (modify_dest))
- TREE_NO_WARNING (modify_dest) = 1;
+ suppress_warning (modify_dest, OPT_Wuninitialized);
if (gimple_call_return_slot_opt_p (call_stmt))
{
we may get confused if the compiler sees that the inlined new
function returns a pointer which was just deleted. See bug
33407. */
- if (DECL_IS_OPERATOR_NEW (fn))
+ if (DECL_IS_OPERATOR_NEW_P (fn))
{
return_slot = NULL;
modify_dest = NULL;
for (tree p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
if (!TREE_THIS_VOLATILE (p))
{
+ /* The value associated with P is a local temporary only if
+ there is no value associated with P in the debug map. */
tree *varp = id->decl_map->get (p);
- if (varp && VAR_P (*varp) && !is_gimple_reg (*varp))
+ if (varp
+ && VAR_P (*varp)
+ && !is_gimple_reg (*varp)
+ && !(id->debug_map && id->debug_map->get (p)))
{
- tree clobber = build_constructor (TREE_TYPE (*varp), NULL);
+ tree clobber = build_clobber (TREE_TYPE (*varp));
gimple *clobber_stmt;
- TREE_THIS_VOLATILE (clobber) = 1;
clobber_stmt = gimple_build_assign (*varp, clobber);
gimple_set_location (clobber_stmt, gimple_location (stmt));
gsi_insert_before (&stmt_gsi, clobber_stmt, GSI_SAME_STMT);
&& !is_gimple_reg (id->retvar)
&& !stmt_ends_bb_p (stmt))
{
- tree clobber = build_constructor (TREE_TYPE (id->retvar), NULL);
+ tree clobber = build_clobber (TREE_TYPE (id->retvar));
gimple *clobber_stmt;
- TREE_THIS_VOLATILE (clobber) = 1;
clobber_stmt = gimple_build_assign (id->retvar, clobber);
gimple_set_location (clobber_stmt, gimple_location (old_stmt));
gsi_insert_after (&stmt_gsi, clobber_stmt, GSI_SAME_STMT);
&& !TREE_THIS_VOLATILE (id->retvar)
&& !is_gimple_reg (id->retvar))
{
- tree clobber = build_constructor (TREE_TYPE (id->retvar), NULL);
+ tree clobber = build_clobber (TREE_TYPE (id->retvar));
gimple *clobber_stmt;
- TREE_THIS_VOLATILE (clobber) = 1;
clobber_stmt = gimple_build_assign (id->retvar, clobber);
gimple_set_location (clobber_stmt, gimple_location (stmt));
gsi_replace (&stmt_gsi, clobber_stmt, false);
}
if (purge_dead_abnormal_edges)
- {
- gimple_purge_dead_eh_edges (return_block);
- gimple_purge_dead_abnormal_call_edges (return_block);
- }
+ bitmap_set_bit (to_purge, return_block->index);
/* If the value of the new expression is ignored, that's OK. We
don't warn about this for CALL_EXPRs, so we shouldn't warn about
in a MODIFY_EXPR. */
static bool
-gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
+gimple_expand_calls_inline (basic_block bb, copy_body_data *id,
+ bitmap to_purge)
{
gimple_stmt_iterator gsi;
bool inlined = false;
if (is_gimple_call (stmt)
&& !gimple_call_internal_p (stmt))
- inlined |= expand_call_inline (bb, stmt, id);
+ inlined |= expand_call_inline (bb, stmt, id, to_purge);
}
return inlined;
static void
fold_marked_statements (int first, hash_set<gimple *> *statements)
{
- for (; first < last_basic_block_for_fn (cfun); first++)
- if (BASIC_BLOCK_FOR_FN (cfun, first))
- {
- gimple_stmt_iterator gsi;
+ auto_bitmap to_purge;
- for (gsi = gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, first));
- !gsi_end_p (gsi);
- gsi_next (&gsi))
- if (statements->contains (gsi_stmt (gsi)))
- {
- gimple *old_stmt = gsi_stmt (gsi);
- tree old_decl = is_gimple_call (old_stmt) ? gimple_call_fndecl (old_stmt) : 0;
+ auto_vec<edge, 20> stack (n_basic_blocks_for_fn (cfun) + 2);
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
+ bitmap_clear (visited);
- if (old_decl && fndecl_built_in_p (old_decl))
- {
- /* Folding builtins can create multiple instructions,
- we need to look at all of them. */
- gimple_stmt_iterator i2 = gsi;
- gsi_prev (&i2);
- if (fold_stmt (&gsi))
- {
- gimple *new_stmt;
- /* If a builtin at the end of a bb folded into nothing,
- the following loop won't work. */
- if (gsi_end_p (gsi))
- {
- cgraph_update_edges_for_call_stmt (old_stmt,
- old_decl, NULL);
- break;
- }
- if (gsi_end_p (i2))
- i2 = gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, first));
- else
+ stack.quick_push (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ while (!stack.is_empty ())
+ {
+ /* Look at the edge on the top of the stack. */
+ edge e = stack.pop ();
+ basic_block dest = e->dest;
+
+ if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || bitmap_bit_p (visited, dest->index))
+ continue;
+
+ bitmap_set_bit (visited, dest->index);
+
+ if (dest->index >= first)
+ for (gimple_stmt_iterator gsi = gsi_start_bb (dest);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ if (!statements->contains (gsi_stmt (gsi)))
+ continue;
+
+ gimple *old_stmt = gsi_stmt (gsi);
+ tree old_decl = (is_gimple_call (old_stmt)
+ ? gimple_call_fndecl (old_stmt) : 0);
+ if (old_decl && fndecl_built_in_p (old_decl))
+ {
+ /* Folding builtins can create multiple instructions,
+ we need to look at all of them. */
+ gimple_stmt_iterator i2 = gsi;
+ gsi_prev (&i2);
+ if (fold_stmt (&gsi))
+ {
+ gimple *new_stmt;
+ /* If a builtin at the end of a bb folded into nothing,
+ the following loop won't work. */
+ if (gsi_end_p (gsi))
+ {
+ cgraph_update_edges_for_call_stmt (old_stmt,
+ old_decl, NULL);
+ break;
+ }
+ if (gsi_end_p (i2))
+ i2 = gsi_start_bb (dest);
+ else
+ gsi_next (&i2);
+ while (1)
+ {
+ new_stmt = gsi_stmt (i2);
+ update_stmt (new_stmt);
+ cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
+ new_stmt);
+
+ if (new_stmt == gsi_stmt (gsi))
+ {
+ /* It is okay to check only for the very last
+ of these statements. If it is a throwing
+ statement nothing will change. If it isn't
+ this can remove EH edges. If that weren't
+ correct then because some intermediate stmts
+ throw, but not the last one. That would mean
+ we'd have to split the block, which we can't
+ here and we'd loose anyway. And as builtins
+ probably never throw, this all
+ is mood anyway. */
+ if (maybe_clean_or_replace_eh_stmt (old_stmt,
+ new_stmt))
+ bitmap_set_bit (to_purge, dest->index);
+ break;
+ }
gsi_next (&i2);
- while (1)
- {
- new_stmt = gsi_stmt (i2);
- update_stmt (new_stmt);
- cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
- new_stmt);
+ }
+ }
+ }
+ else if (fold_stmt (&gsi))
+ {
+ /* Re-read the statement from GSI as fold_stmt() may
+ have changed it. */
+ gimple *new_stmt = gsi_stmt (gsi);
+ update_stmt (new_stmt);
+
+ if (is_gimple_call (old_stmt)
+ || is_gimple_call (new_stmt))
+ cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
+ new_stmt);
+
+ if (maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt))
+ bitmap_set_bit (to_purge, dest->index);
+ }
+ }
- if (new_stmt == gsi_stmt (gsi))
- {
- /* It is okay to check only for the very last
- of these statements. If it is a throwing
- statement nothing will change. If it isn't
- this can remove EH edges. If that weren't
- correct then because some intermediate stmts
- throw, but not the last one. That would mean
- we'd have to split the block, which we can't
- here and we'd loose anyway. And as builtins
- probably never throw, this all
- is mood anyway. */
- if (maybe_clean_or_replace_eh_stmt (old_stmt,
- new_stmt))
- gimple_purge_dead_eh_edges (
- BASIC_BLOCK_FOR_FN (cfun, first));
- break;
- }
- gsi_next (&i2);
- }
- }
- }
- else if (fold_stmt (&gsi))
- {
- /* Re-read the statement from GSI as fold_stmt() may
- have changed it. */
- gimple *new_stmt = gsi_stmt (gsi);
- update_stmt (new_stmt);
-
- if (is_gimple_call (old_stmt)
- || is_gimple_call (new_stmt))
- cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
- new_stmt);
-
- if (maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt))
- gimple_purge_dead_eh_edges (BASIC_BLOCK_FOR_FN (cfun,
- first));
- }
+ if (EDGE_COUNT (dest->succs) > 0)
+ {
+ /* Avoid warnings emitted from folding statements that
+ became unreachable because of inlined function parameter
+ propagation. */
+ e = find_taken_edge (dest, NULL_TREE);
+ if (e)
+ stack.quick_push (e);
+ else
+ {
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, dest->succs)
+ stack.safe_push (e);
}
- }
+ }
+ }
+
+ gimple_purge_all_dead_eh_edges (to_purge);
}
/* Expand calls to inline functions in the body of FN. */
will split id->current_basic_block, and the new blocks will
follow it; we'll trudge through them, processing their CALL_EXPRs
along the way. */
+ auto_bitmap to_purge;
FOR_EACH_BB_FN (bb, cfun)
- inlined_p |= gimple_expand_calls_inline (bb, &id);
+ inlined_p |= gimple_expand_calls_inline (bb, &id, to_purge);
pop_gimplify_context (NULL);
gcc_assert (e->inline_failed);
}
+ /* If we didn't inline into the function there is nothing to do. */
+ if (!inlined_p)
+ {
+ delete id.statements_to_fold;
+ return 0;
+ }
+
/* Fold queued statements. */
update_max_bb_count ();
fold_marked_statements (last, id.statements_to_fold);
delete id.statements_to_fold;
- gcc_assert (!id.debug_stmts.exists ());
+ /* Finally purge EH and abnormal edges from the call stmts we inlined.
+ We need to do this after fold_marked_statements since that may walk
+ the SSA use-def chain. */
+ unsigned i;
+ bitmap_iterator bi;
+ EXECUTE_IF_SET_IN_BITMAP (to_purge, 0, i, bi)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+ if (bb)
+ {
+ gimple_purge_dead_eh_edges (bb);
+ gimple_purge_dead_abnormal_call_edges (bb);
+ }
+ }
- /* If we didn't inline into the function there is nothing to do. */
- if (!inlined_p)
- return 0;
+ gcc_assert (!id.debug_stmts.exists ());
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (fn);
delete_unreachable_blocks_update_callgraph (id.dst_node, false);
+ id.dst_node->calls_comdat_local = id.dst_node->check_calls_comdat_local_p ();
if (flag_checking)
id.dst_node->verify ();
return copy;
}
-static tree
+/* Create a new VAR_DECL that is indentical in all respect to DECL except that
+ DECL can be either a VAR_DECL, a PARM_DECL or RESULT_DECL. The original
+ DECL must come from ID->src_fn and the copy will be part of ID->dst_fn. */
+
+tree
copy_decl_to_var (tree decl, copy_body_data *id)
{
tree copy, type;
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (decl);
TREE_READONLY (copy) = TREE_READONLY (decl);
TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (decl);
- DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (decl);
+ DECL_NOT_GIMPLE_REG_P (copy) = DECL_NOT_GIMPLE_REG_P (decl);
+ DECL_BY_REFERENCE (copy) = DECL_BY_REFERENCE (decl);
return copy_decl_for_dup_finish (id, decl, copy);
}
if (!DECL_BY_REFERENCE (decl))
{
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (decl);
- DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (decl);
+ DECL_NOT_GIMPLE_REG_P (copy)
+ = (DECL_NOT_GIMPLE_REG_P (decl)
+ /* RESULT_DECLs are treated special by needs_to_live_in_memory,
+ mirror that to the created VAR_DECL. */
+ || (TREE_CODE (decl) == RESULT_DECL
+ && aggregate_value_p (decl, id->src_fn)));
}
return copy_decl_for_dup_finish (id, decl, copy);
return copy_decl_no_change (decl, id);
}
-/* Return a copy of the function's argument tree. */
+/* Return a copy of the function's argument tree without any modifications. */
+
static tree
-copy_arguments_for_versioning (tree orig_parm, copy_body_data * id,
- bitmap args_to_skip, tree *vars)
+copy_arguments_nochange (tree orig_parm, copy_body_data * id)
{
tree arg, *parg;
tree new_parm = NULL;
- int i = 0;
parg = &new_parm;
-
- for (arg = orig_parm; arg; arg = DECL_CHAIN (arg), i++)
- if (!args_to_skip || !bitmap_bit_p (args_to_skip, i))
- {
- tree new_tree = remap_decl (arg, id);
- if (TREE_CODE (new_tree) != PARM_DECL)
- new_tree = id->copy_decl (arg, id);
- lang_hooks.dup_lang_specific_decl (new_tree);
- *parg = new_tree;
- parg = &DECL_CHAIN (new_tree);
- }
- else if (!id->decl_map->get (arg))
- {
- /* Make an equivalent VAR_DECL. If the argument was used
- as temporary variable later in function, the uses will be
- replaced by local variable. */
- tree var = copy_decl_to_var (arg, id);
- insert_decl_map (id, arg, var);
- /* Declare this new variable. */
- DECL_CHAIN (var) = *vars;
- *vars = var;
- }
+ for (arg = orig_parm; arg; arg = DECL_CHAIN (arg))
+ {
+ tree new_tree = remap_decl (arg, id);
+ if (TREE_CODE (new_tree) != PARM_DECL)
+ new_tree = id->copy_decl (arg, id);
+ lang_hooks.dup_lang_specific_decl (new_tree);
+ *parg = new_tree;
+ parg = &DECL_CHAIN (new_tree);
+ }
return new_parm;
}
static void
update_clone_info (copy_body_data * id)
{
- struct cgraph_node *node;
- if (!id->dst_node->clones)
+ struct cgraph_node *this_node = id->dst_node;
+ if (!this_node->clones)
return;
- for (node = id->dst_node->clones; node != id->dst_node;)
+ for (cgraph_node *node = this_node->clones; node != this_node;)
{
/* First update replace maps to match the new body. */
- if (node->clone.tree_map)
- {
+ clone_info *info = clone_info::get (node);
+ if (info && info->tree_map)
+ {
unsigned int i;
- for (i = 0; i < vec_safe_length (node->clone.tree_map); i++)
+ for (i = 0; i < vec_safe_length (info->tree_map); i++)
{
struct ipa_replace_map *replace_info;
- replace_info = (*node->clone.tree_map)[i];
- walk_tree (&replace_info->old_tree, copy_tree_body_r, id, NULL);
+ replace_info = (*info->tree_map)[i];
walk_tree (&replace_info->new_tree, copy_tree_body_r, id, NULL);
}
}
+
if (node->clones)
node = node->clones;
else if (node->next_sibling_clone)
tree with another tree while duplicating the function's
body, TREE_MAP represents the mapping between these
trees. If UPDATE_CLONES is set, the call_stmt fields
- of edges of clones of the function will be updated.
+ of edges of clones of the function will be updated.
- If non-NULL ARGS_TO_SKIP determine function parameters to remove
- from new version.
- If SKIP_RETURN is true, the new version will return void.
- If non-NULL BLOCK_TO_COPY determine what basic blocks to copy.
+ If non-NULL PARAM_ADJUSTMENTS determines how function prototype (i.e. the
+ function parameters and return value) should be modified).
+ If non-NULL BLOCKS_TO_COPY determine what basic blocks to copy.
If non_NULL NEW_ENTRY determine new entry BB of the clone.
*/
void
tree_function_versioning (tree old_decl, tree new_decl,
vec<ipa_replace_map *, va_gc> *tree_map,
- bool update_clones, bitmap args_to_skip,
- bool skip_return, bitmap blocks_to_copy,
+ ipa_param_adjustments *param_adjustments,
+ bool update_clones, bitmap blocks_to_copy,
basic_block new_entry)
{
struct cgraph_node *old_version_node;
basic_block old_entry_block, bb;
auto_vec<gimple *, 10> init_stmts;
tree vars = NULL_TREE;
- bitmap debug_args_to_skip = args_to_skip;
+
+ /* We can get called recursively from expand_call_inline via clone
+ materialization. While expand_call_inline maintains input_location
+ we cannot tolerate it to leak into the materialized clone. */
+ location_t saved_location = input_location;
+ input_location = UNKNOWN_LOCATION;
gcc_assert (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) == FUNCTION_DECL);
DECL_ARGUMENTS (new_decl) = DECL_ARGUMENTS (old_decl);
initialize_cfun (new_decl, old_decl,
new_entry ? new_entry->count : old_entry_block->count);
+ new_version_node->calls_declare_variant_alt
+ = old_version_node->calls_declare_variant_alt;
if (DECL_STRUCT_FUNCTION (new_decl)->gimple_df)
DECL_STRUCT_FUNCTION (new_decl)->gimple_df->ipa_pta
= id.src_cfun->gimple_df->ipa_pta;
DECL_STRUCT_FUNCTION (new_decl)->static_chain_decl
= copy_static_chain (p, &id);
+ auto_vec<int, 16> new_param_indices;
+ clone_info *info = clone_info::get (old_version_node);
+ ipa_param_adjustments *old_param_adjustments
+ = info ? info->param_adjustments : NULL;
+ if (old_param_adjustments)
+ old_param_adjustments->get_updated_indices (&new_param_indices);
+
/* If there's a tree_map, prepare for substitution. */
if (tree_map)
for (i = 0; i < tree_map->length (); i++)
{
gimple *init;
replace_info = (*tree_map)[i];
- if (replace_info->replace_p)
- {
- int parm_num = -1;
- if (!replace_info->old_tree)
- {
- int p = replace_info->parm_num;
- tree parm;
- tree req_type, new_type;
-
- for (parm = DECL_ARGUMENTS (old_decl); p;
- parm = DECL_CHAIN (parm))
- p--;
- replace_info->old_tree = parm;
- parm_num = replace_info->parm_num;
- req_type = TREE_TYPE (parm);
- new_type = TREE_TYPE (replace_info->new_tree);
- if (!useless_type_conversion_p (req_type, new_type))
- {
- if (fold_convertible_p (req_type, replace_info->new_tree))
- replace_info->new_tree
- = fold_build1 (NOP_EXPR, req_type,
- replace_info->new_tree);
- else if (TYPE_SIZE (req_type) == TYPE_SIZE (new_type))
- replace_info->new_tree
- = fold_build1 (VIEW_CONVERT_EXPR, req_type,
- replace_info->new_tree);
- else
- {
- if (dump_file)
- {
- fprintf (dump_file, " const ");
- print_generic_expr (dump_file,
- replace_info->new_tree);
- fprintf (dump_file,
- " can't be converted to param ");
- print_generic_expr (dump_file, parm);
- fprintf (dump_file, "\n");
- }
- replace_info->old_tree = NULL;
- }
- }
- }
- else
- gcc_assert (TREE_CODE (replace_info->old_tree) == PARM_DECL);
- if (replace_info->old_tree)
- {
- init = setup_one_parameter (&id, replace_info->old_tree,
- replace_info->new_tree, id.src_fn,
- NULL,
- &vars);
- if (init)
- init_stmts.safe_push (init);
- if (MAY_HAVE_DEBUG_BIND_STMTS && args_to_skip)
- {
- if (parm_num == -1)
- {
- tree parm;
- int p;
- for (parm = DECL_ARGUMENTS (old_decl), p = 0; parm;
- parm = DECL_CHAIN (parm), p++)
- if (parm == replace_info->old_tree)
- {
- parm_num = p;
- break;
- }
- }
- if (parm_num != -1)
- {
- if (debug_args_to_skip == args_to_skip)
- {
- debug_args_to_skip = BITMAP_ALLOC (NULL);
- bitmap_copy (debug_args_to_skip, args_to_skip);
- }
- bitmap_clear_bit (debug_args_to_skip, parm_num);
- }
- }
- }
- }
+
+ int p = replace_info->parm_num;
+ if (old_param_adjustments)
+ p = new_param_indices[p];
+
+ tree parm;
+ for (parm = DECL_ARGUMENTS (old_decl); p;
+ parm = DECL_CHAIN (parm))
+ p--;
+ gcc_assert (parm);
+ init = setup_one_parameter (&id, parm, replace_info->new_tree,
+ id.src_fn, NULL, &vars);
+ if (init)
+ init_stmts.safe_push (init);
}
- /* Copy the function's arguments. */
- if (DECL_ARGUMENTS (old_decl) != NULL_TREE)
+
+ ipa_param_body_adjustments *param_body_adjs = NULL;
+ if (param_adjustments)
+ {
+ param_body_adjs = new ipa_param_body_adjustments (param_adjustments,
+ new_decl, old_decl,
+ &id, &vars, tree_map);
+ id.param_body_adjs = param_body_adjs;
+ DECL_ARGUMENTS (new_decl) = param_body_adjs->get_new_param_chain ();
+ }
+ else if (DECL_ARGUMENTS (old_decl) != NULL_TREE)
DECL_ARGUMENTS (new_decl)
- = copy_arguments_for_versioning (DECL_ARGUMENTS (old_decl), &id,
- args_to_skip, &vars);
+ = copy_arguments_nochange (DECL_ARGUMENTS (old_decl), &id);
DECL_INITIAL (new_decl) = remap_blocks (DECL_INITIAL (id.src_fn), &id);
BLOCK_SUPERCONTEXT (DECL_INITIAL (new_decl)) = new_decl;
if (DECL_RESULT (old_decl) == NULL_TREE)
;
- else if (skip_return && !VOID_TYPE_P (TREE_TYPE (DECL_RESULT (old_decl))))
+ else if (param_adjustments && param_adjustments->m_skip_return
+ && !VOID_TYPE_P (TREE_TYPE (DECL_RESULT (old_decl))))
{
+ tree resdecl_repl = copy_result_decl_to_var (DECL_RESULT (old_decl),
+ &id);
+ declare_inline_vars (NULL, resdecl_repl);
+ if (DECL_BY_REFERENCE (DECL_RESULT (old_decl)))
+ resdecl_repl = build_fold_addr_expr (resdecl_repl);
+ insert_decl_map (&id, DECL_RESULT (old_decl), resdecl_repl);
+
DECL_RESULT (new_decl)
= build_decl (DECL_SOURCE_LOCATION (DECL_RESULT (old_decl)),
RESULT_DECL, NULL_TREE, void_type_node);
DECL_CONTEXT (DECL_RESULT (new_decl)) = new_decl;
+ DECL_IS_MALLOC (new_decl) = false;
cfun->returns_struct = 0;
cfun->returns_pcc_struct = 0;
}
}
}
- if (debug_args_to_skip && MAY_HAVE_DEBUG_BIND_STMTS)
+ if (param_body_adjs && MAY_HAVE_DEBUG_BIND_STMTS)
{
- tree parm;
vec<tree, va_gc> **debug_args = NULL;
unsigned int len = 0;
- for (parm = DECL_ARGUMENTS (old_decl), i = 0;
- parm; parm = DECL_CHAIN (parm), i++)
- if (bitmap_bit_p (debug_args_to_skip, i) && is_gimple_reg (parm))
- {
- tree ddecl;
+ unsigned reset_len = param_body_adjs->m_reset_debug_decls.length ();
- if (debug_args == NULL)
- {
- debug_args = decl_debug_args_insert (new_decl);
- len = vec_safe_length (*debug_args);
- }
- ddecl = make_node (DEBUG_EXPR_DECL);
- DECL_ARTIFICIAL (ddecl) = 1;
- TREE_TYPE (ddecl) = TREE_TYPE (parm);
- SET_DECL_MODE (ddecl, DECL_MODE (parm));
- vec_safe_push (*debug_args, DECL_ORIGIN (parm));
- vec_safe_push (*debug_args, ddecl);
- }
+ for (i = 0; i < reset_len; i++)
+ {
+ tree parm = param_body_adjs->m_reset_debug_decls[i];
+ gcc_assert (is_gimple_reg (parm));
+ tree ddecl;
+
+ if (debug_args == NULL)
+ {
+ debug_args = decl_debug_args_insert (new_decl);
+ len = vec_safe_length (*debug_args);
+ }
+ ddecl = make_node (DEBUG_EXPR_DECL);
+ DECL_ARTIFICIAL (ddecl) = 1;
+ TREE_TYPE (ddecl) = TREE_TYPE (parm);
+ SET_DECL_MODE (ddecl, DECL_MODE (parm));
+ vec_safe_push (*debug_args, DECL_ORIGIN (parm));
+ vec_safe_push (*debug_args, ddecl);
+ }
if (debug_args != NULL)
{
/* On the callee side, add
if (var == NULL_TREE)
break;
vexpr = make_node (DEBUG_EXPR_DECL);
- parm = (**debug_args)[i];
+ tree parm = (**debug_args)[i];
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (parm);
SET_DECL_MODE (vexpr, DECL_MODE (parm));
while (i > len);
}
}
-
- if (debug_args_to_skip && debug_args_to_skip != args_to_skip)
- BITMAP_FREE (debug_args_to_skip);
+ delete param_body_adjs;
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
gcc_assert (!id.debug_stmts.exists ());
pop_cfun ();
+ input_location = saved_location;
return;
}