/* Miscellaneous SSA utility functions.
- Copyright (C) 2001-2013 Free Software Foundation, Inc.
+ Copyright (C) 2001-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
#include "tree.h"
-#include "flags.h"
-#include "tm_p.h"
-#include "target.h"
-#include "ggc.h"
-#include "langhooks.h"
-#include "basic-block.h"
-#include "function.h"
-#include "gimple-pretty-print.h"
-#include "bitmap.h"
-#include "pointer-set.h"
-#include "tree-flow.h"
#include "gimple.h"
-#include "tree-inline.h"
-#include "hashtab.h"
+#include "cfghooks.h"
#include "tree-pass.h"
+#include "ssa.h"
+#include "gimple-pretty-print.h"
#include "diagnostic-core.h"
+#include "fold-const.h"
+#include "stor-layout.h"
+#include "gimple-fold.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "gimple-walk.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-into-ssa.h"
+#include "tree-ssa.h"
#include "cfgloop.h"
+#include "cfgexpand.h"
+#include "tree-cfg.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "asan.h"
/* Pointer map of variable mappings, keyed by edge. */
-static struct pointer_map_t *edge_var_maps;
+static hash_map<edge, auto_vec<edge_var_map> > *edge_var_maps;
/* Add a mapping with PHI RESULT and PHI DEF associated with edge E. */
void
-redirect_edge_var_map_add (edge e, tree result, tree def, source_location locus)
+redirect_edge_var_map_add (edge e, tree result, tree def, location_t locus)
{
- void **slot;
- edge_var_map_vector *head;
edge_var_map new_node;
if (edge_var_maps == NULL)
- edge_var_maps = pointer_map_create ();
+ edge_var_maps = new hash_map<edge, auto_vec<edge_var_map> >;
- slot = pointer_map_insert (edge_var_maps, e);
- head = (edge_var_map_vector *) *slot;
- if (!head)
- vec_safe_reserve (head, 5);
+ auto_vec<edge_var_map> &slot = edge_var_maps->get_or_insert (e);
new_node.def = def;
new_node.result = result;
new_node.locus = locus;
- vec_safe_push (head, new_node);
- *slot = head;
+ slot.safe_push (new_node);
}
void
redirect_edge_var_map_clear (edge e)
{
- void **slot;
- edge_var_map_vector *head;
-
if (!edge_var_maps)
return;
- slot = pointer_map_contains (edge_var_maps, e);
+ auto_vec<edge_var_map> *head = edge_var_maps->get (e);
- if (slot)
- {
- head = (edge_var_map_vector *) *slot;
- vec_free (head);
- *slot = NULL;
- }
+ if (head)
+ head->release ();
}
/* Duplicate the redirected var mappings in OLDE in NEWE.
- Since we can't remove a mapping, let's just duplicate it. This assumes a
- pointer_map can have multiple edges mapping to the same var_map (many to
- one mapping), since we don't remove the previous mappings. */
+ This assumes a hash_map can have multiple edges mapping to the same
+ var_map (many to one mapping), since we don't remove the previous mappings.
+ */
void
redirect_edge_var_map_dup (edge newe, edge olde)
{
- void **new_slot, **old_slot;
- edge_var_map_vector *head;
-
if (!edge_var_maps)
return;
- new_slot = pointer_map_insert (edge_var_maps, newe);
- old_slot = pointer_map_contains (edge_var_maps, olde);
- if (!old_slot)
+ auto_vec<edge_var_map> *new_head = &edge_var_maps->get_or_insert (newe);
+ auto_vec<edge_var_map> *old_head = edge_var_maps->get (olde);
+ if (!old_head)
return;
- head = (edge_var_map_vector *) *old_slot;
- edge_var_map_vector *new_head = NULL;
- if (head)
- new_head = vec_safe_copy (head);
- else
- vec_safe_reserve (new_head, 5);
- *new_slot = new_head;
+ new_head->safe_splice (*old_head);
}
/* Return the variable mappings for a given edge. If there is none, return
NULL. */
-edge_var_map_vector *
+vec<edge_var_map> *
redirect_edge_var_map_vector (edge e)
{
- void **slot;
-
/* Hey, what kind of idiot would... you'd be surprised. */
if (!edge_var_maps)
return NULL;
- slot = pointer_map_contains (edge_var_maps, e);
+ auto_vec<edge_var_map> *slot = edge_var_maps->get (e);
if (!slot)
return NULL;
- return (edge_var_map_vector *) *slot;
-}
-
-/* Used by redirect_edge_var_map_destroy to free all memory. */
-
-static bool
-free_var_map_entry (const void *key ATTRIBUTE_UNUSED,
- void **value,
- void *data ATTRIBUTE_UNUSED)
-{
- edge_var_map_vector *head = (edge_var_map_vector *) *value;
- vec_free (head);
- return true;
+ return slot;
}
/* Clear the edge variable mappings. */
void
-redirect_edge_var_map_destroy (void)
+redirect_edge_var_map_empty (void)
{
if (edge_var_maps)
- {
- pointer_map_traverse (edge_var_maps, free_var_map_entry, NULL);
- pointer_map_destroy (edge_var_maps);
- edge_var_maps = NULL;
- }
+ edge_var_maps->empty ();
}
edge
ssa_redirect_edge (edge e, basic_block dest)
{
- gimple_stmt_iterator gsi;
- gimple phi;
+ gphi_iterator gsi;
+ gphi *phi;
redirect_edge_var_map_clear (e);
- /* Remove the appropriate PHI arguments in E's destination block. */
- for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- tree def;
- source_location locus ;
+ /* Remove the appropriate PHI arguments in E's destination block.
+ If we are redirecting a copied edge the destination has not
+ got PHI argument space reserved nor an interesting argument. */
+ if (! (e->dest->flags & BB_DUPLICATED))
+ for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ tree def;
+ location_t locus;
- phi = gsi_stmt (gsi);
- def = gimple_phi_arg_def (phi, e->dest_idx);
- locus = gimple_phi_arg_location (phi, e->dest_idx);
+ phi = gsi.phi ();
+ def = gimple_phi_arg_def (phi, e->dest_idx);
+ locus = gimple_phi_arg_location (phi, e->dest_idx);
- if (def == NULL_TREE)
- continue;
+ if (def == NULL_TREE)
+ continue;
- redirect_edge_var_map_add (e, gimple_phi_result (phi), def, locus);
- }
+ redirect_edge_var_map_add (e, gimple_phi_result (phi), def, locus);
+ }
e = redirect_edge_succ_nodup (e, dest);
void
flush_pending_stmts (edge e)
{
- gimple phi;
- edge_var_map_vector *v;
+ gphi *phi;
edge_var_map *vm;
int i;
- gimple_stmt_iterator gsi;
+ gphi_iterator gsi;
- v = redirect_edge_var_map_vector (e);
+ vec<edge_var_map> *v = redirect_edge_var_map_vector (e);
if (!v)
return;
{
tree def;
- phi = gsi_stmt (gsi);
+ phi = gsi.phi ();
def = redirect_edge_var_map_def (vm);
add_phi_arg (phi, def, e, redirect_edge_var_map_location (vm));
}
redirect_edge_var_map_clear (e);
}
+/* Replace the LHS of STMT, an assignment, either a GIMPLE_ASSIGN or a
+ GIMPLE_CALL, with NLHS, in preparation for modifying the RHS to an
+ expression with a different value.
+
+ This will update any annotations (say debug bind stmts) referring
+ to the original LHS, so that they use the RHS instead. This is
+ done even if NLHS and LHS are the same, for it is understood that
+ the RHS will be modified afterwards, and NLHS will not be assigned
+ an equivalent value.
+
+ Adjusting any non-annotation uses of the LHS, if needed, is a
+ responsibility of the caller.
+
+ The effect of this call should be pretty much the same as that of
+ inserting a copy of STMT before STMT, and then removing the
+ original stmt, at which time gsi_remove() would have update
+ annotations, but using this function saves all the inserting,
+ copying and removing. */
+
+void
+gimple_replace_ssa_lhs (gimple *stmt, tree nlhs)
+{
+ if (MAY_HAVE_DEBUG_BIND_STMTS)
+ {
+ tree lhs = gimple_get_lhs (stmt);
+
+ gcc_assert (SSA_NAME_DEF_STMT (lhs) == stmt);
+
+ insert_debug_temp_for_var_def (NULL, lhs);
+ }
+
+ gimple_set_lhs (stmt, nlhs);
+}
+
+
/* Given a tree for an expression for which we might want to emit
locations or values in debug information (generally a variable, but
we might deal with other kinds of trees in the future), return the
tree
target_for_debug_bind (tree var)
{
- if (!MAY_HAVE_DEBUG_STMTS)
+ if (!MAY_HAVE_DEBUG_BIND_STMTS)
return NULL_TREE;
if (TREE_CODE (var) == SSA_NAME)
return NULL_TREE;
}
- if ((TREE_CODE (var) != VAR_DECL
- || VAR_DECL_IS_VIRTUAL_OPERAND (var))
+ if ((!VAR_P (var) || VAR_DECL_IS_VIRTUAL_OPERAND (var))
&& TREE_CODE (var) != PARM_DECL)
return NULL_TREE;
{
imm_use_iterator imm_iter;
use_operand_p use_p;
- gimple stmt;
- gimple def_stmt = NULL;
+ gimple *stmt;
+ gimple *def_stmt = NULL;
int usecount = 0;
tree value = NULL;
- if (!MAY_HAVE_DEBUG_STMTS)
+ if (!MAY_HAVE_DEBUG_BIND_STMTS)
return;
/* If this name has already been registered for replacement, do nothing
we'll have to drop debug information. */
if (gimple_code (def_stmt) == GIMPLE_PHI)
{
- value = degenerate_phi_result (def_stmt);
+ value = degenerate_phi_result (as_a <gphi *> (def_stmt));
if (value && walk_tree (&value, find_released_ssa_name, NULL, NULL))
value = NULL;
/* error_mark_node is what fixup_noreturn_call changes PHI arguments
else if (value == error_mark_node)
value = NULL;
}
+ else if (gimple_clobber_p (def_stmt))
+ /* We can end up here when rewriting a decl into SSA and coming
+ along a clobber for the original decl. Turn that into
+ # DEBUG decl => NULL */
+ value = NULL;
else if (is_gimple_assign (def_stmt))
{
bool no_value = false;
;
else
{
- gimple def_temp;
+ gdebug *def_temp;
tree vexpr = make_node (DEBUG_EXPR_DECL);
def_temp = gimple_build_debug_bind (vexpr,
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (value);
if (DECL_P (value))
- DECL_MODE (vexpr) = DECL_MODE (value);
+ SET_DECL_MODE (vexpr, DECL_MODE (value));
else
- DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (value));
+ SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (value)));
if (gsi)
gsi_insert_before (gsi, def_temp, GSI_SAME_STMT);
void
insert_debug_temps_for_defs (gimple_stmt_iterator *gsi)
{
- gimple stmt;
+ gimple *stmt;
ssa_op_iter op_iter;
def_operand_p def_p;
- if (!MAY_HAVE_DEBUG_STMTS)
+ if (!MAY_HAVE_DEBUG_BIND_STMTS)
return;
stmt = gsi_stmt (*gsi);
/* Reset all debug stmts that use SSA_NAME(s) defined in STMT. */
void
-reset_debug_uses (gimple stmt)
+reset_debug_uses (gimple *stmt)
{
ssa_op_iter op_iter;
def_operand_p def_p;
imm_use_iterator imm_iter;
- gimple use_stmt;
+ gimple *use_stmt;
- if (!MAY_HAVE_DEBUG_STMTS)
+ if (!MAY_HAVE_DEBUG_BIND_STMTS)
return;
FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
/* Performing a topological sort is probably overkill, this will
most likely run in slightly superlinear time, rather than the
- pathological quadratic worst case. */
+ pathological quadratic worst case.
+ But iterate from max SSA name version to min one because
+ that mimics allocation order during code generation behavior best.
+ Use an array for this which we compact on-the-fly with a NULL
+ marker moving towards the end of the vector. */
+ auto_vec<tree, 16> names;
+ names.reserve (bitmap_count_bits (toremove) + 1);
+ names.quick_push (NULL_TREE);
+ EXECUTE_IF_SET_IN_BITMAP (toremove, 0, j, bi)
+ names.quick_push (ssa_name (j));
+
+ bitmap_tree_view (toremove);
while (!bitmap_empty_p (toremove))
- EXECUTE_IF_SET_IN_BITMAP (toremove, 0, j, bi)
- {
- bool remove_now = true;
- tree var = ssa_name (j);
- gimple stmt;
- imm_use_iterator uit;
+ {
+ j = names.length () - 1;
+ for (unsigned i = names.length () - 1; names[i];)
+ {
+ bool remove_now = true;
+ tree var = names[i];
+ gimple *stmt;
+ imm_use_iterator uit;
- FOR_EACH_IMM_USE_STMT (stmt, uit, var)
- {
- ssa_op_iter dit;
- def_operand_p def_p;
+ FOR_EACH_IMM_USE_STMT (stmt, uit, var)
+ {
+ ssa_op_iter dit;
+ def_operand_p def_p;
- /* We can't propagate PHI nodes into debug stmts. */
- if (gimple_code (stmt) == GIMPLE_PHI
- || is_gimple_debug (stmt))
- continue;
+ /* We can't propagate PHI nodes into debug stmts. */
+ if (gimple_code (stmt) == GIMPLE_PHI
+ || is_gimple_debug (stmt))
+ continue;
- /* If we find another definition to remove that uses
- the one we're looking at, defer the removal of this
- one, so that it can be propagated into debug stmts
- after the other is. */
- FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, dit, SSA_OP_DEF)
- {
- tree odef = DEF_FROM_PTR (def_p);
+ /* If we find another definition to remove that uses
+ the one we're looking at, defer the removal of this
+ one, so that it can be propagated into debug stmts
+ after the other is. */
+ FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, dit, SSA_OP_DEF)
+ {
+ tree odef = DEF_FROM_PTR (def_p);
- if (bitmap_bit_p (toremove, SSA_NAME_VERSION (odef)))
- {
- remove_now = false;
- break;
- }
- }
+ if (bitmap_bit_p (toremove, SSA_NAME_VERSION (odef)))
+ {
+ remove_now = false;
+ break;
+ }
+ }
- if (!remove_now)
- BREAK_FROM_IMM_USE_STMT (uit);
- }
+ if (!remove_now)
+ BREAK_FROM_IMM_USE_STMT (uit);
+ }
- if (remove_now)
- {
- gimple def = SSA_NAME_DEF_STMT (var);
- gimple_stmt_iterator gsi = gsi_for_stmt (def);
+ if (remove_now)
+ {
+ gimple *def = SSA_NAME_DEF_STMT (var);
+ gimple_stmt_iterator gsi = gsi_for_stmt (def);
- if (gimple_code (def) == GIMPLE_PHI)
- remove_phi_node (&gsi, true);
- else
- {
- gsi_remove (&gsi, true);
- release_defs (def);
- }
+ if (gimple_code (def) == GIMPLE_PHI)
+ remove_phi_node (&gsi, true);
+ else
+ {
+ gsi_remove (&gsi, true);
+ release_defs (def);
+ }
+ bitmap_clear_bit (toremove, SSA_NAME_VERSION (var));
+ }
+ else
+ --i;
+ if (--j != i)
+ names[i] = names[j];
+ }
+ }
+ bitmap_list_view (toremove);
+}
- bitmap_clear_bit (toremove, j);
- }
- }
+/* Disable warnings about missing quoting in GCC diagnostics for
+ the verification errors. Their format strings don't follow GCC
+ diagnostic conventions and the calls are ultimately followed by
+ one to internal_error. */
+#if __GNUC__ >= 10
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wformat-diag"
+#endif
+
+/* Verify virtual SSA form. */
+
+bool
+verify_vssa (basic_block bb, tree current_vdef, sbitmap visited)
+{
+ bool err = false;
+
+ if (bitmap_bit_p (visited, bb->index))
+ return false;
+
+ bitmap_set_bit (visited, bb->index);
+
+ /* Pick up the single virtual PHI def. */
+ gphi *phi = NULL;
+ for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
+ gsi_next (&si))
+ {
+ tree res = gimple_phi_result (si.phi ());
+ if (virtual_operand_p (res))
+ {
+ if (phi)
+ {
+ error ("multiple virtual PHI nodes in BB %d", bb->index);
+ print_gimple_stmt (stderr, phi, 0);
+ print_gimple_stmt (stderr, si.phi (), 0);
+ err = true;
+ }
+ else
+ phi = si.phi ();
+ }
+ }
+ if (phi)
+ {
+ current_vdef = gimple_phi_result (phi);
+ if (TREE_CODE (current_vdef) != SSA_NAME)
+ {
+ error ("virtual definition is not an SSA name");
+ print_gimple_stmt (stderr, phi, 0);
+ err = true;
+ }
+ }
+
+ /* Verify stmts. */
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ tree vuse = gimple_vuse (stmt);
+ if (vuse)
+ {
+ if (vuse != current_vdef)
+ {
+ error ("stmt with wrong VUSE");
+ print_gimple_stmt (stderr, stmt, 0, TDF_VOPS);
+ fprintf (stderr, "expected ");
+ print_generic_expr (stderr, current_vdef);
+ fprintf (stderr, "\n");
+ err = true;
+ }
+ tree vdef = gimple_vdef (stmt);
+ if (vdef)
+ {
+ current_vdef = vdef;
+ if (TREE_CODE (current_vdef) != SSA_NAME)
+ {
+ error ("virtual definition is not an SSA name");
+ print_gimple_stmt (stderr, phi, 0);
+ err = true;
+ }
+ }
+ }
+ }
+
+ /* Verify destination PHI uses and recurse. */
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ gphi *phi = get_virtual_phi (e->dest);
+ if (phi
+ && PHI_ARG_DEF_FROM_EDGE (phi, e) != current_vdef)
+ {
+ error ("PHI node with wrong VUSE on edge from BB %d",
+ e->src->index);
+ print_gimple_stmt (stderr, phi, 0, TDF_VOPS);
+ fprintf (stderr, "expected ");
+ print_generic_expr (stderr, current_vdef);
+ fprintf (stderr, "\n");
+ err = true;
+ }
+
+ /* Recurse. */
+ err |= verify_vssa (e->dest, current_vdef, visited);
+ }
+
+ return err;
}
/* Return true if SSA_NAME is malformed and mark it visited.
static bool
verify_def (basic_block bb, basic_block *definition_block, tree ssa_name,
- gimple stmt, bool is_virtual)
+ gimple *stmt, bool is_virtual)
{
if (verify_ssa_name (ssa_name, is_virtual))
goto err;
err:
fprintf (stderr, "while verifying SSA_NAME ");
- print_generic_expr (stderr, ssa_name, 0);
+ print_generic_expr (stderr, ssa_name);
fprintf (stderr, " in statement\n");
print_gimple_stmt (stderr, stmt, 4, TDF_VOPS);
static bool
verify_use (basic_block bb, basic_block def_bb, use_operand_p use_p,
- gimple stmt, bool check_abnormal, bitmap names_defined_in_bb)
+ gimple *stmt, bool check_abnormal, bitmap names_defined_in_bb)
{
bool err = false;
tree ssa_name = USE_FROM_PTR (use_p);
definition of SSA_NAME. */
static bool
-verify_phi_args (gimple phi, basic_block bb, basic_block *definition_block)
+verify_phi_args (gphi *phi, basic_block bb, basic_block *definition_block)
{
edge e;
bool err = false;
tree base = TREE_OPERAND (op, 0);
while (handled_component_p (base))
base = TREE_OPERAND (base, 0);
- if ((TREE_CODE (base) == VAR_DECL
+ if ((VAR_P (base)
|| TREE_CODE (base) == PARM_DECL
|| TREE_CODE (base) == RESULT_DECL)
&& !TREE_ADDRESSABLE (base))
TODO: verify the variable annotations. */
DEBUG_FUNCTION void
-verify_ssa (bool check_modified_stmt)
+verify_ssa (bool check_modified_stmt, bool check_ssa_operands)
{
- size_t i;
basic_block bb;
basic_block *definition_block = XCNEWVEC (basic_block, num_ssa_names);
ssa_op_iter iter;
tree op;
enum dom_state orig_dom_state = dom_info_state (CDI_DOMINATORS);
- bitmap names_defined_in_bb = BITMAP_ALLOC (NULL);
+ auto_bitmap names_defined_in_bb;
gcc_assert (!need_ssa_update_p (cfun));
timevar_push (TV_TREE_SSA_VERIFY);
- /* Keep track of SSA names present in the IL. */
- for (i = 1; i < num_ssa_names; i++)
{
- tree name = ssa_name (i);
- if (name)
+ /* Keep track of SSA names present in the IL. */
+ size_t i;
+ tree name;
+ hash_map <void *, tree> ssa_info;
+
+ FOR_EACH_SSA_NAME (i, name, cfun)
{
- gimple stmt;
+ gimple *stmt;
TREE_VISITED (name) = 0;
verify_ssa_name (name, virtual_operand_p (name));
if (!gimple_nop_p (stmt))
{
basic_block bb = gimple_bb (stmt);
- verify_def (bb, definition_block,
- name, stmt, virtual_operand_p (name));
+ if (verify_def (bb, definition_block,
+ name, stmt, virtual_operand_p (name)))
+ goto err;
+ }
+ void *info = NULL;
+ if (POINTER_TYPE_P (TREE_TYPE (name)))
+ info = SSA_NAME_PTR_INFO (name);
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (name)))
+ info = SSA_NAME_RANGE_INFO (name);
+ if (info)
+ {
+ bool existed;
+ tree &val = ssa_info.get_or_insert (info, &existed);
+ if (existed)
+ {
+ error ("shared SSA name info");
+ print_generic_expr (stderr, val);
+ fprintf (stderr, " and ");
+ print_generic_expr (stderr, name);
+ fprintf (stderr, "\n");
+ goto err;
+ }
+ else
+ val = name;
}
}
}
/* Now verify all the uses and make sure they agree with the definitions
found in the previous pass. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
- gimple phi;
edge_iterator ei;
- gimple_stmt_iterator gsi;
/* Make sure that all edges have a clear 'aux' field. */
FOR_EACH_EDGE (e, ei, bb->preds)
}
/* Verify the arguments for every PHI node in the block. */
- for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- phi = gsi_stmt (gsi);
+ gphi *phi = gsi.phi ();
if (verify_phi_args (phi, bb, definition_block))
goto err;
}
/* Now verify all the uses and vuses in every statement of the block. */
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
use_operand_p use_p;
if (check_modified_stmt && gimple_modified_p (stmt))
goto err;
}
- if (verify_ssa_operands (stmt))
+ if (check_ssa_operands && verify_ssa_operands (cfun, stmt))
{
print_gimple_stmt (stderr, stmt, 0, TDF_VOPS);
goto err;
free (definition_block);
+ if (gimple_vop (cfun)
+ && ssa_default_def (cfun, gimple_vop (cfun)))
+ {
+ auto_sbitmap visited (last_basic_block_for_fn (cfun) + 1);
+ bitmap_clear (visited);
+ if (verify_vssa (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ ssa_default_def (cfun, gimple_vop (cfun)), visited))
+ goto err;
+ }
+
/* Restore the dominance information to its prior known state, so
that we do not perturb the compiler's subsequent behavior. */
if (orig_dom_state == DOM_NONE)
else
set_dom_info_availability (CDI_DOMINATORS, orig_dom_state);
- BITMAP_FREE (names_defined_in_bb);
timevar_pop (TV_TREE_SSA_VERIFY);
return;
internal_error ("verify_ssa failed");
}
-/* Return true if the DECL_UID in both trees are equal. */
-
-static int
-uid_ssaname_map_eq (const void *va, const void *vb)
-{
- const_tree a = (const_tree) va;
- const_tree b = (const_tree) vb;
- return (a->ssa_name.var->decl_minimal.uid == b->ssa_name.var->decl_minimal.uid);
-}
-
-/* Hash a tree in a uid_decl_map. */
-
-static unsigned int
-uid_ssaname_map_hash (const void *item)
-{
- return ((const_tree)item)->ssa_name.var->decl_minimal.uid;
-}
-
+#if __GNUC__ >= 10
+# pragma GCC diagnostic pop
+#endif
/* Initialize global DFA and SSA structures. */
void
init_tree_ssa (struct function *fn)
{
- fn->gimple_df = ggc_alloc_cleared_gimple_df ();
- fn->gimple_df->default_defs = htab_create_ggc (20, uid_ssaname_map_hash,
- uid_ssaname_map_eq, NULL);
+ fn->gimple_df = ggc_cleared_alloc<gimple_df> ();
+ fn->gimple_df->default_defs = hash_table<ssa_name_hasher>::create_ggc (20);
pt_solution_reset (&fn->gimple_df->escaped);
init_ssanames (fn, 0);
}
-/* Do the actions required to initialize internal data structures used
- in tree-ssa optimization passes. */
-
-static unsigned int
-execute_init_datastructures (void)
-{
- /* Allocate hash tables, arrays and other structures. */
- init_tree_ssa (cfun);
- return 0;
-}
-
-namespace {
-
-const pass_data pass_data_init_datastructures =
-{
- GIMPLE_PASS, /* type */
- "*init_datastructures", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
- true, /* has_execute */
- TV_NONE, /* tv_id */
- PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0, /* todo_flags_finish */
-};
-
-class pass_init_datastructures : public gimple_opt_pass
-{
-public:
- pass_init_datastructures(gcc::context *ctxt)
- : gimple_opt_pass(pass_data_init_datastructures, ctxt)
- {}
-
- /* opt_pass methods: */
- unsigned int execute () { return execute_init_datastructures (); }
-
-}; // class pass_init_datastructures
-
-} // anon namespace
-
-gimple_opt_pass *
-make_pass_init_datastructures (gcc::context *ctxt)
-{
- return new pass_init_datastructures (ctxt);
-}
-
/* Deallocate memory associated with SSA data structures for FNDECL. */
void
-delete_tree_ssa (void)
+delete_tree_ssa (struct function *fn)
{
- fini_ssanames ();
+ fini_ssanames (fn);
/* We no longer maintain the SSA operand cache at this point. */
- if (ssa_operands_active (cfun))
- fini_ssa_operands ();
-
- htab_delete (cfun->gimple_df->default_defs);
- cfun->gimple_df->default_defs = NULL;
- pt_solution_reset (&cfun->gimple_df->escaped);
- if (cfun->gimple_df->decls_to_pointers != NULL)
- pointer_map_destroy (cfun->gimple_df->decls_to_pointers);
- cfun->gimple_df->decls_to_pointers = NULL;
- cfun->gimple_df->modified_noreturn_calls = NULL;
- cfun->gimple_df = NULL;
-
- /* We no longer need the edge variable maps. */
- redirect_edge_var_map_destroy ();
-}
-
-/* Return true if the conversion from INNER_TYPE to OUTER_TYPE is a
- useless type conversion, otherwise return false.
-
- This function implicitly defines the middle-end type system. With
- the notion of 'a < b' meaning that useless_type_conversion_p (a, b)
- holds and 'a > b' meaning that useless_type_conversion_p (b, a) holds,
- the following invariants shall be fulfilled:
-
- 1) useless_type_conversion_p is transitive.
- If a < b and b < c then a < c.
-
- 2) useless_type_conversion_p is not symmetric.
- From a < b does not follow a > b.
-
- 3) Types define the available set of operations applicable to values.
- A type conversion is useless if the operations for the target type
- is a subset of the operations for the source type. For example
- casts to void* are useless, casts from void* are not (void* can't
- be dereferenced or offsetted, but copied, hence its set of operations
- is a strict subset of that of all other data pointer types). Casts
- to const T* are useless (can't be written to), casts from const T*
- to T* are not. */
-
-bool
-useless_type_conversion_p (tree outer_type, tree inner_type)
-{
- /* Do the following before stripping toplevel qualifiers. */
- if (POINTER_TYPE_P (inner_type)
- && POINTER_TYPE_P (outer_type))
- {
- /* Do not lose casts between pointers to different address spaces. */
- if (TYPE_ADDR_SPACE (TREE_TYPE (outer_type))
- != TYPE_ADDR_SPACE (TREE_TYPE (inner_type)))
- return false;
- }
-
- /* From now on qualifiers on value types do not matter. */
- inner_type = TYPE_MAIN_VARIANT (inner_type);
- outer_type = TYPE_MAIN_VARIANT (outer_type);
-
- if (inner_type == outer_type)
- return true;
-
- /* If we know the canonical types, compare them. */
- if (TYPE_CANONICAL (inner_type)
- && TYPE_CANONICAL (inner_type) == TYPE_CANONICAL (outer_type))
- return true;
-
- /* Changes in machine mode are never useless conversions unless we
- deal with aggregate types in which case we defer to later checks. */
- if (TYPE_MODE (inner_type) != TYPE_MODE (outer_type)
- && !AGGREGATE_TYPE_P (inner_type))
- return false;
+ if (ssa_operands_active (fn))
+ fini_ssa_operands (fn);
- /* If both the inner and outer types are integral types, then the
- conversion is not necessary if they have the same mode and
- signedness and precision, and both or neither are boolean. */
- if (INTEGRAL_TYPE_P (inner_type)
- && INTEGRAL_TYPE_P (outer_type))
- {
- /* Preserve changes in signedness or precision. */
- if (TYPE_UNSIGNED (inner_type) != TYPE_UNSIGNED (outer_type)
- || TYPE_PRECISION (inner_type) != TYPE_PRECISION (outer_type))
- return false;
-
- /* Preserve conversions to/from BOOLEAN_TYPE if types are not
- of precision one. */
- if (((TREE_CODE (inner_type) == BOOLEAN_TYPE)
- != (TREE_CODE (outer_type) == BOOLEAN_TYPE))
- && TYPE_PRECISION (outer_type) != 1)
- return false;
-
- /* We don't need to preserve changes in the types minimum or
- maximum value in general as these do not generate code
- unless the types precisions are different. */
- return true;
- }
-
- /* Scalar floating point types with the same mode are compatible. */
- else if (SCALAR_FLOAT_TYPE_P (inner_type)
- && SCALAR_FLOAT_TYPE_P (outer_type))
- return true;
-
- /* Fixed point types with the same mode are compatible. */
- else if (FIXED_POINT_TYPE_P (inner_type)
- && FIXED_POINT_TYPE_P (outer_type))
- return true;
-
- /* We need to take special care recursing to pointed-to types. */
- else if (POINTER_TYPE_P (inner_type)
- && POINTER_TYPE_P (outer_type))
- {
- /* Do not lose casts to function pointer types. */
- if ((TREE_CODE (TREE_TYPE (outer_type)) == FUNCTION_TYPE
- || TREE_CODE (TREE_TYPE (outer_type)) == METHOD_TYPE)
- && !(TREE_CODE (TREE_TYPE (inner_type)) == FUNCTION_TYPE
- || TREE_CODE (TREE_TYPE (inner_type)) == METHOD_TYPE))
- return false;
-
- /* We do not care for const qualification of the pointed-to types
- as const qualification has no semantic value to the middle-end. */
-
- /* Otherwise pointers/references are equivalent. */
- return true;
- }
-
- /* Recurse for complex types. */
- else if (TREE_CODE (inner_type) == COMPLEX_TYPE
- && TREE_CODE (outer_type) == COMPLEX_TYPE)
- return useless_type_conversion_p (TREE_TYPE (outer_type),
- TREE_TYPE (inner_type));
-
- /* Recurse for vector types with the same number of subparts. */
- else if (TREE_CODE (inner_type) == VECTOR_TYPE
- && TREE_CODE (outer_type) == VECTOR_TYPE
- && TYPE_PRECISION (inner_type) == TYPE_PRECISION (outer_type))
- return useless_type_conversion_p (TREE_TYPE (outer_type),
- TREE_TYPE (inner_type));
-
- else if (TREE_CODE (inner_type) == ARRAY_TYPE
- && TREE_CODE (outer_type) == ARRAY_TYPE)
- {
- /* Preserve string attributes. */
- if (TYPE_STRING_FLAG (inner_type) != TYPE_STRING_FLAG (outer_type))
- return false;
-
- /* Conversions from array types with unknown extent to
- array types with known extent are not useless. */
- if (!TYPE_DOMAIN (inner_type)
- && TYPE_DOMAIN (outer_type))
- return false;
-
- /* Nor are conversions from array types with non-constant size to
- array types with constant size or to different size. */
- if (TYPE_SIZE (outer_type)
- && TREE_CODE (TYPE_SIZE (outer_type)) == INTEGER_CST
- && (!TYPE_SIZE (inner_type)
- || TREE_CODE (TYPE_SIZE (inner_type)) != INTEGER_CST
- || !tree_int_cst_equal (TYPE_SIZE (outer_type),
- TYPE_SIZE (inner_type))))
- return false;
-
- /* Check conversions between arrays with partially known extents.
- If the array min/max values are constant they have to match.
- Otherwise allow conversions to unknown and variable extents.
- In particular this declares conversions that may change the
- mode to BLKmode as useless. */
- if (TYPE_DOMAIN (inner_type)
- && TYPE_DOMAIN (outer_type)
- && TYPE_DOMAIN (inner_type) != TYPE_DOMAIN (outer_type))
- {
- tree inner_min = TYPE_MIN_VALUE (TYPE_DOMAIN (inner_type));
- tree outer_min = TYPE_MIN_VALUE (TYPE_DOMAIN (outer_type));
- tree inner_max = TYPE_MAX_VALUE (TYPE_DOMAIN (inner_type));
- tree outer_max = TYPE_MAX_VALUE (TYPE_DOMAIN (outer_type));
-
- /* After gimplification a variable min/max value carries no
- additional information compared to a NULL value. All that
- matters has been lowered to be part of the IL. */
- if (inner_min && TREE_CODE (inner_min) != INTEGER_CST)
- inner_min = NULL_TREE;
- if (outer_min && TREE_CODE (outer_min) != INTEGER_CST)
- outer_min = NULL_TREE;
- if (inner_max && TREE_CODE (inner_max) != INTEGER_CST)
- inner_max = NULL_TREE;
- if (outer_max && TREE_CODE (outer_max) != INTEGER_CST)
- outer_max = NULL_TREE;
-
- /* Conversions NULL / variable <- cst are useless, but not
- the other way around. */
- if (outer_min
- && (!inner_min
- || !tree_int_cst_equal (inner_min, outer_min)))
- return false;
- if (outer_max
- && (!inner_max
- || !tree_int_cst_equal (inner_max, outer_max)))
- return false;
- }
-
- /* Recurse on the element check. */
- return useless_type_conversion_p (TREE_TYPE (outer_type),
- TREE_TYPE (inner_type));
- }
-
- else if ((TREE_CODE (inner_type) == FUNCTION_TYPE
- || TREE_CODE (inner_type) == METHOD_TYPE)
- && TREE_CODE (inner_type) == TREE_CODE (outer_type))
- {
- tree outer_parm, inner_parm;
-
- /* If the return types are not compatible bail out. */
- if (!useless_type_conversion_p (TREE_TYPE (outer_type),
- TREE_TYPE (inner_type)))
- return false;
-
- /* Method types should belong to a compatible base class. */
- if (TREE_CODE (inner_type) == METHOD_TYPE
- && !useless_type_conversion_p (TYPE_METHOD_BASETYPE (outer_type),
- TYPE_METHOD_BASETYPE (inner_type)))
- return false;
-
- /* A conversion to an unprototyped argument list is ok. */
- if (!prototype_p (outer_type))
- return true;
-
- /* If the unqualified argument types are compatible the conversion
- is useless. */
- if (TYPE_ARG_TYPES (outer_type) == TYPE_ARG_TYPES (inner_type))
- return true;
-
- for (outer_parm = TYPE_ARG_TYPES (outer_type),
- inner_parm = TYPE_ARG_TYPES (inner_type);
- outer_parm && inner_parm;
- outer_parm = TREE_CHAIN (outer_parm),
- inner_parm = TREE_CHAIN (inner_parm))
- if (!useless_type_conversion_p
- (TYPE_MAIN_VARIANT (TREE_VALUE (outer_parm)),
- TYPE_MAIN_VARIANT (TREE_VALUE (inner_parm))))
- return false;
-
- /* If there is a mismatch in the number of arguments the functions
- are not compatible. */
- if (outer_parm || inner_parm)
- return false;
-
- /* Defer to the target if necessary. */
- if (TYPE_ATTRIBUTES (inner_type) || TYPE_ATTRIBUTES (outer_type))
- return comp_type_attributes (outer_type, inner_type) != 0;
-
- return true;
- }
-
- /* For aggregates we rely on TYPE_CANONICAL exclusively and require
- explicit conversions for types involving to be structurally
- compared types. */
- else if (AGGREGATE_TYPE_P (inner_type)
- && TREE_CODE (inner_type) == TREE_CODE (outer_type))
- return false;
-
- return false;
-}
-
-/* Return true if a conversion from either type of TYPE1 and TYPE2
- to the other is not required. Otherwise return false. */
+ fn->gimple_df->default_defs->empty ();
+ fn->gimple_df->default_defs = NULL;
+ pt_solution_reset (&fn->gimple_df->escaped);
+ if (fn->gimple_df->decls_to_pointers != NULL)
+ delete fn->gimple_df->decls_to_pointers;
+ fn->gimple_df->decls_to_pointers = NULL;
+ fn->gimple_df = NULL;
-bool
-types_compatible_p (tree type1, tree type2)
-{
- return (type1 == type2
- || (useless_type_conversion_p (type1, type2)
- && useless_type_conversion_p (type2, type1)));
+ /* We no longer need the edge variable maps. */
+ redirect_edge_var_map_empty ();
}
/* Return true if EXPR is a useless type conversion, otherwise return
return exp;
}
+/* Return true if T, as SSA_NAME, has an implicit default defined value. */
-/* Internal helper for walk_use_def_chains. VAR, FN and DATA are as
- described in walk_use_def_chains.
-
- VISITED is a pointer set used to mark visited SSA_NAMEs to avoid
- infinite loops. We used to have a bitmap for this to just mark
- SSA versions we had visited. But non-sparse bitmaps are way too
- expensive, while sparse bitmaps may cause quadratic behavior.
-
- IS_DFS is true if the caller wants to perform a depth-first search
- when visiting PHI nodes. A DFS will visit each PHI argument and
- call FN after each one. Otherwise, all the arguments are
- visited first and then FN is called with each of the visited
- arguments in a separate pass. */
-
-static bool
-walk_use_def_chains_1 (tree var, walk_use_def_chains_fn fn, void *data,
- struct pointer_set_t *visited, bool is_dfs)
+bool
+ssa_defined_default_def_p (tree t)
{
- gimple def_stmt;
+ tree var = SSA_NAME_VAR (t);
- if (pointer_set_insert (visited, var))
- return false;
-
- def_stmt = SSA_NAME_DEF_STMT (var);
-
- if (gimple_code (def_stmt) != GIMPLE_PHI)
- {
- /* If we reached the end of the use-def chain, call FN. */
- return fn (var, def_stmt, data);
- }
- else
- {
- size_t i;
-
- /* When doing a breadth-first search, call FN before following the
- use-def links for each argument. */
- if (!is_dfs)
- for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
- if (fn (gimple_phi_arg_def (def_stmt, i), def_stmt, data))
- return true;
-
- /* Follow use-def links out of each PHI argument. */
- for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
- {
- tree arg = gimple_phi_arg_def (def_stmt, i);
-
- /* ARG may be NULL for newly introduced PHI nodes. */
- if (arg
- && TREE_CODE (arg) == SSA_NAME
- && walk_use_def_chains_1 (arg, fn, data, visited, is_dfs))
- return true;
- }
-
- /* When doing a depth-first search, call FN after following the
- use-def links for each argument. */
- if (is_dfs)
- for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
- if (fn (gimple_phi_arg_def (def_stmt, i), def_stmt, data))
- return true;
- }
+ if (!var)
+ ;
+ /* Parameters get their initial value from the function entry. */
+ else if (TREE_CODE (var) == PARM_DECL)
+ return true;
+ /* When returning by reference the return address is actually a hidden
+ parameter. */
+ else if (TREE_CODE (var) == RESULT_DECL && DECL_BY_REFERENCE (var))
+ return true;
+ /* Hard register variables get their initial value from the ether. */
+ else if (VAR_P (var) && DECL_HARD_REGISTER (var))
+ return true;
return false;
}
+/* Return true if T, an SSA_NAME, has an undefined value. PARTIAL is what
+ should be returned if the value is only partially undefined. */
-/* Walk use-def chains starting at the SSA variable VAR. Call
- function FN at each reaching definition found. FN takes three
- arguments: VAR, its defining statement (DEF_STMT) and a generic
- pointer to whatever state information that FN may want to maintain
- (DATA). FN is able to stop the walk by returning true, otherwise
- in order to continue the walk, FN should return false.
-
- Note, that if DEF_STMT is a PHI node, the semantics are slightly
- different. The first argument to FN is no longer the original
- variable VAR, but the PHI argument currently being examined. If FN
- wants to get at VAR, it should call PHI_RESULT (PHI).
-
- If IS_DFS is true, this function will:
-
- 1- walk the use-def chains for all the PHI arguments, and,
- 2- call (*FN) (ARG, PHI, DATA) on all the PHI arguments.
-
- If IS_DFS is false, the two steps above are done in reverse order
- (i.e., a breadth-first search). */
-
-void
-walk_use_def_chains (tree var, walk_use_def_chains_fn fn, void *data,
- bool is_dfs)
+bool
+ssa_undefined_value_p (tree t, bool partial)
{
- gimple def_stmt;
+ gimple *def_stmt;
- gcc_assert (TREE_CODE (var) == SSA_NAME);
+ if (ssa_defined_default_def_p (t))
+ return false;
- def_stmt = SSA_NAME_DEF_STMT (var);
+ /* The value is undefined iff its definition statement is empty. */
+ def_stmt = SSA_NAME_DEF_STMT (t);
+ if (gimple_nop_p (def_stmt))
+ return true;
- /* We only need to recurse if the reaching definition comes from a PHI
- node. */
- if (gimple_code (def_stmt) != GIMPLE_PHI)
- (*fn) (var, def_stmt, data);
- else
+ /* Check if the complex was not only partially defined. */
+ if (partial && is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == COMPLEX_EXPR)
{
- struct pointer_set_t *visited = pointer_set_create ();
- walk_use_def_chains_1 (var, fn, data, visited, is_dfs);
- pointer_set_destroy (visited);
- }
-}
-
-\f
-/* Emit warnings for uninitialized variables. This is done in two passes.
-
- The first pass notices real uses of SSA names with undefined values.
- Such uses are unconditionally uninitialized, and we can be certain that
- such a use is a mistake. This pass is run before most optimizations,
- so that we catch as many as we can.
+ tree rhs1, rhs2;
- The second pass follows PHI nodes to find uses that are potentially
- uninitialized. In this case we can't necessarily prove that the use
- is really uninitialized. This pass is run after most optimizations,
- so that we thread as many jumps and possible, and delete as much dead
- code as possible, in order to reduce false positives. We also look
- again for plain uninitialized variables, since optimization may have
- changed conditionally uninitialized to unconditionally uninitialized. */
-
-/* Emit a warning for EXPR based on variable VAR at the point in the
- program T, an SSA_NAME, is used being uninitialized. The exact
- warning text is in MSGID and LOCUS may contain a location or be null.
- WC is the warning code. */
-
-void
-warn_uninit (enum opt_code wc, tree t,
- tree expr, tree var, const char *gmsgid, void *data)
-{
- gimple context = (gimple) data;
- location_t location, cfun_loc;
- expanded_location xloc, floc;
-
- if (!ssa_undefined_value_p (t))
- return;
-
- /* TREE_NO_WARNING either means we already warned, or the front end
- wishes to suppress the warning. */
- if ((context
- && (gimple_no_warning_p (context)
- || (gimple_assign_single_p (context)
- && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
- || TREE_NO_WARNING (expr))
- return;
-
- location = (context != NULL && gimple_has_location (context))
- ? gimple_location (context)
- : DECL_SOURCE_LOCATION (var);
- location = linemap_resolve_location (line_table, location,
- LRK_SPELLING_LOCATION,
- NULL);
- cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
- xloc = expand_location (location);
- floc = expand_location (cfun_loc);
- if (warning_at (location, wc, gmsgid, expr))
- {
- TREE_NO_WARNING (expr) = 1;
-
- if (location == DECL_SOURCE_LOCATION (var))
- return;
- if (xloc.file != floc.file
- || linemap_location_before_p (line_table,
- location, cfun_loc)
- || linemap_location_before_p (line_table,
- cfun->function_end_locus,
- location))
- inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
+ rhs1 = gimple_assign_rhs1 (def_stmt);
+ rhs2 = gimple_assign_rhs2 (def_stmt);
+ return (TREE_CODE (rhs1) == SSA_NAME && ssa_undefined_value_p (rhs1))
+ || (TREE_CODE (rhs2) == SSA_NAME && ssa_undefined_value_p (rhs2));
}
+ return false;
}
-unsigned int
-warn_uninitialized_vars (bool warn_possibly_uninitialized)
-{
- gimple_stmt_iterator gsi;
- basic_block bb;
- FOR_EACH_BB (bb)
- {
- bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
- single_succ (ENTRY_BLOCK_PTR), bb);
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
- use_operand_p use_p;
- ssa_op_iter op_iter;
- tree use;
-
- if (is_gimple_debug (stmt))
- continue;
-
- /* We only do data flow with SSA_NAMEs, so that's all we
- can warn about. */
- FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
- {
- use = USE_FROM_PTR (use_p);
- if (always_executed)
- warn_uninit (OPT_Wuninitialized, use,
- SSA_NAME_VAR (use), SSA_NAME_VAR (use),
- "%qD is used uninitialized in this function",
- stmt);
- else if (warn_possibly_uninitialized)
- warn_uninit (OPT_Wmaybe_uninitialized, use,
- SSA_NAME_VAR (use), SSA_NAME_VAR (use),
- "%qD may be used uninitialized in this function",
- stmt);
- }
-
- /* For memory the only cheap thing we can do is see if we
- have a use of the default def of the virtual operand.
- ??? Note that at -O0 we do not have virtual operands.
- ??? Not so cheap would be to use the alias oracle via
- walk_aliased_vdefs, if we don't find any aliasing vdef
- warn as is-used-uninitialized, if we don't find an aliasing
- vdef that kills our use (stmt_kills_ref_p), warn as
- may-be-used-uninitialized. But this walk is quadratic and
- so must be limited which means we would miss warning
- opportunities. */
- use = gimple_vuse (stmt);
- if (use
- && gimple_assign_single_p (stmt)
- && !gimple_vdef (stmt)
- && SSA_NAME_IS_DEFAULT_DEF (use))
- {
- tree rhs = gimple_assign_rhs1 (stmt);
- tree base = get_base_address (rhs);
-
- /* Do not warn if it can be initialized outside this function. */
- if (TREE_CODE (base) != VAR_DECL
- || DECL_HARD_REGISTER (base)
- || is_global_var (base))
- continue;
+/* Return TRUE iff STMT, a gimple statement, references an undefined
+ SSA name. */
- if (always_executed)
- warn_uninit (OPT_Wuninitialized, use,
- gimple_assign_rhs1 (stmt), base,
- "%qE is used uninitialized in this function",
- stmt);
- else if (warn_possibly_uninitialized)
- warn_uninit (OPT_Wmaybe_uninitialized, use,
- gimple_assign_rhs1 (stmt), base,
- "%qE may be used uninitialized in this function",
- stmt);
- }
- }
- }
-
- return 0;
-}
-
-static unsigned int
-execute_early_warn_uninitialized (void)
+bool
+gimple_uses_undefined_value_p (gimple *stmt)
{
- /* Currently, this pass runs always but
- execute_late_warn_uninitialized only runs with optimization. With
- optimization we want to warn about possible uninitialized as late
- as possible, thus don't do it here. However, without
- optimization we need to warn here about "may be uninitialized".
- */
- calculate_dominance_info (CDI_POST_DOMINATORS);
-
- warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
-
- /* Post-dominator information can not be reliably updated. Free it
- after the use. */
+ ssa_op_iter iter;
+ tree op;
- free_dominance_info (CDI_POST_DOMINATORS);
- return 0;
-}
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
+ if (ssa_undefined_value_p (op))
+ return true;
-static bool
-gate_warn_uninitialized (void)
-{
- return warn_uninitialized != 0;
+ return false;
}
-namespace {
-
-const pass_data pass_data_early_warn_uninitialized =
-{
- GIMPLE_PASS, /* type */
- "*early_warn_uninitialized", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
- TV_TREE_UNINIT, /* tv_id */
- PROP_ssa, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0, /* todo_flags_finish */
-};
-
-class pass_early_warn_uninitialized : public gimple_opt_pass
-{
-public:
- pass_early_warn_uninitialized(gcc::context *ctxt)
- : gimple_opt_pass(pass_data_early_warn_uninitialized, ctxt)
- {}
-
- /* opt_pass methods: */
- bool gate () { return gate_warn_uninitialized (); }
- unsigned int execute () { return execute_early_warn_uninitialized (); }
-
-}; // class pass_early_warn_uninitialized
-
-} // anon namespace
-
-gimple_opt_pass *
-make_pass_early_warn_uninitialized (gcc::context *ctxt)
-{
- return new pass_early_warn_uninitialized (ctxt);
-}
/* If necessary, rewrite the base of the reference tree *TP from
&& (sym = TREE_OPERAND (TREE_OPERAND (*tp, 0), 0))
&& DECL_P (sym)
&& !TREE_ADDRESSABLE (sym)
- && bitmap_bit_p (suitable_for_renaming, DECL_UID (sym)))
+ && bitmap_bit_p (suitable_for_renaming, DECL_UID (sym))
+ && is_gimple_reg_type (TREE_TYPE (*tp))
+ && ! VOID_TYPE_P (TREE_TYPE (*tp)))
{
if (TREE_CODE (TREE_TYPE (sym)) == VECTOR_TYPE
&& useless_type_conversion_p (TREE_TYPE (*tp),
? REALPART_EXPR : IMAGPART_EXPR,
TREE_TYPE (*tp), sym);
}
- else if (integer_zerop (TREE_OPERAND (*tp, 1)))
+ else if (integer_zerop (TREE_OPERAND (*tp, 1))
+ && DECL_SIZE (sym) == TYPE_SIZE (TREE_TYPE (*tp)))
{
if (!useless_type_conversion_p (TREE_TYPE (*tp),
TREE_TYPE (sym)))
else
*tp = sym;
}
+ else if (DECL_SIZE (sym)
+ && TREE_CODE (DECL_SIZE (sym)) == INTEGER_CST
+ && (known_subrange_p
+ (mem_ref_offset (*tp),
+ wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (*tp))),
+ 0, wi::to_offset (DECL_SIZE_UNIT (sym))))
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (*tp))
+ || (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp)))
+ == TYPE_PRECISION (TREE_TYPE (*tp))))
+ && wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp))),
+ BITS_PER_UNIT) == 0)
+ {
+ *tp = build3 (BIT_FIELD_REF, TREE_TYPE (*tp), sym,
+ TYPE_SIZE (TREE_TYPE (*tp)),
+ wide_int_to_tree (bitsizetype,
+ mem_ref_offset (*tp)
+ << LOG2_BITS_PER_UNIT));
+ }
}
}
static tree
non_rewritable_mem_ref_base (tree ref)
{
- tree base = ref;
+ tree base;
/* A plain decl does not need it set. */
if (DECL_P (ref))
return NULL_TREE;
- while (handled_component_p (base))
- base = TREE_OPERAND (base, 0);
+ if (! (base = CONST_CAST_TREE (strip_invariant_refs (ref))))
+ {
+ base = get_base_address (ref);
+ if (DECL_P (base))
+ return base;
+ return NULL_TREE;
+ }
/* But watch out for MEM_REFs we cannot lower to a
VIEW_CONVERT_EXPR or a BIT_FIELD_REF. */
&& TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
{
tree decl = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
+ if (! DECL_P (decl))
+ return NULL_TREE;
+ if (! is_gimple_reg_type (TREE_TYPE (base))
+ || VOID_TYPE_P (TREE_TYPE (base))
+ || TREE_THIS_VOLATILE (decl) != TREE_THIS_VOLATILE (base))
+ return decl;
if ((TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE)
&& useless_type_conversion_p (TREE_TYPE (base),
TREE_TYPE (TREE_TYPE (decl)))
- && mem_ref_offset (base).fits_uhwi ()
- && tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
- .ugt (mem_ref_offset (base))
+ && known_ge (mem_ref_offset (base), 0)
+ && known_gt (wi::to_poly_offset (TYPE_SIZE_UNIT (TREE_TYPE (decl))),
+ mem_ref_offset (base))
&& multiple_of_p (sizetype, TREE_OPERAND (base, 1),
TYPE_SIZE_UNIT (TREE_TYPE (base))))
return NULL_TREE;
- if (DECL_P (decl)
- && (!integer_zerop (TREE_OPERAND (base, 1))
- || (DECL_SIZE (decl)
- != TYPE_SIZE (TREE_TYPE (base)))
- || TREE_THIS_VOLATILE (decl) != TREE_THIS_VOLATILE (base)))
- return decl;
+ /* For same sizes and zero offset we can use a VIEW_CONVERT_EXPR. */
+ if (integer_zerop (TREE_OPERAND (base, 1))
+ && DECL_SIZE (decl) == TYPE_SIZE (TREE_TYPE (base)))
+ return NULL_TREE;
+ /* For integral typed extracts we can use a BIT_FIELD_REF. */
+ if (DECL_SIZE (decl)
+ && TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST
+ && (known_subrange_p
+ (mem_ref_offset (base),
+ wi::to_poly_offset (TYPE_SIZE_UNIT (TREE_TYPE (base))),
+ 0, wi::to_poly_offset (DECL_SIZE_UNIT (decl))))
+ /* ??? We can't handle bitfield precision extracts without
+ either using an alternate type for the BIT_FIELD_REF and
+ then doing a conversion or possibly adjusting the offset
+ according to endianness. */
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (base))
+ || (wi::to_offset (TYPE_SIZE (TREE_TYPE (base)))
+ == TYPE_PRECISION (TREE_TYPE (base))))
+ && wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (base))),
+ BITS_PER_UNIT) == 0)
+ return NULL_TREE;
+ return decl;
}
return NULL_TREE;
if (DECL_P (lhs))
return false;
- /* A decl that is wrapped inside a MEM-REF that covers
- it full is also rewritable.
- ??? The following could be relaxed allowing component
+ /* We can re-write REALPART_EXPR and IMAGPART_EXPR sets in
+ a reasonably efficient manner... */
+ if ((TREE_CODE (lhs) == REALPART_EXPR
+ || TREE_CODE (lhs) == IMAGPART_EXPR)
+ && DECL_P (TREE_OPERAND (lhs, 0)))
+ return false;
+
+ /* ??? The following could be relaxed allowing component
references that do not change the access size. */
if (TREE_CODE (lhs) == MEM_REF
- && TREE_CODE (TREE_OPERAND (lhs, 0)) == ADDR_EXPR
- && integer_zerop (TREE_OPERAND (lhs, 1)))
+ && TREE_CODE (TREE_OPERAND (lhs, 0)) == ADDR_EXPR)
{
tree decl = TREE_OPERAND (TREE_OPERAND (lhs, 0), 0);
- if (DECL_P (decl)
+
+ /* A decl that is wrapped inside a MEM-REF that covers
+ it full is also rewritable. */
+ if (integer_zerop (TREE_OPERAND (lhs, 1))
+ && DECL_P (decl)
&& DECL_SIZE (decl) == TYPE_SIZE (TREE_TYPE (lhs))
+ /* If the dynamic type of the decl has larger precision than
+ the decl itself we can't use the decls type for SSA rewriting. */
+ && ((! INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ || compare_tree_int (DECL_SIZE (decl),
+ TYPE_PRECISION (TREE_TYPE (decl))) == 0)
+ || (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && (TYPE_PRECISION (TREE_TYPE (decl))
+ >= TYPE_PRECISION (TREE_TYPE (lhs)))))
+ /* Make sure we are not re-writing non-float copying into float
+ copying as that can incur normalization. */
+ && (! FLOAT_TYPE_P (TREE_TYPE (decl))
+ || types_compatible_p (TREE_TYPE (lhs), TREE_TYPE (decl)))
&& (TREE_THIS_VOLATILE (decl) == TREE_THIS_VOLATILE (lhs)))
return false;
+
+ /* A vector-insert using a MEM_REF or ARRAY_REF is rewritable
+ using a BIT_INSERT_EXPR. */
+ if (DECL_P (decl)
+ && VECTOR_TYPE_P (TREE_TYPE (decl))
+ && TYPE_MODE (TREE_TYPE (decl)) != BLKmode
+ && known_ge (mem_ref_offset (lhs), 0)
+ && known_gt (wi::to_poly_offset (TYPE_SIZE_UNIT (TREE_TYPE (decl))),
+ mem_ref_offset (lhs))
+ && multiple_of_p (sizetype, TREE_OPERAND (lhs, 1),
+ TYPE_SIZE_UNIT (TREE_TYPE (lhs))))
+ {
+ poly_uint64 lhs_bits, nelts;
+ if (poly_int_tree_p (TYPE_SIZE (TREE_TYPE (lhs)), &lhs_bits)
+ && multiple_p (lhs_bits,
+ tree_to_uhwi
+ (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl)))),
+ &nelts))
+ {
+ if (known_eq (nelts, 1u))
+ return false;
+ /* For sub-vector inserts the insert vector mode has to be
+ supported. */
+ tree vtype = build_vector_type (TREE_TYPE (TREE_TYPE (decl)),
+ nelts);
+ if (TYPE_MODE (vtype) != BLKmode)
+ return false;
+ }
+ }
}
+ /* A vector-insert using a BIT_FIELD_REF is rewritable using
+ BIT_INSERT_EXPR. */
+ if (TREE_CODE (lhs) == BIT_FIELD_REF
+ && DECL_P (TREE_OPERAND (lhs, 0))
+ && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (lhs, 0)))
+ && TYPE_MODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) != BLKmode
+ && operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (lhs)),
+ TYPE_SIZE_UNIT
+ (TREE_TYPE (TREE_TYPE (TREE_OPERAND (lhs, 0)))), 0)
+ && (tree_to_uhwi (TREE_OPERAND (lhs, 2))
+ % tree_to_uhwi (TYPE_SIZE (TREE_TYPE (lhs)))) == 0)
+ return false;
+
return true;
}
|| !bitmap_bit_p (not_reg_needs, DECL_UID (var))))
{
TREE_ADDRESSABLE (var) = 0;
+ /* If we cleared TREE_ADDRESSABLE make sure DECL_GIMPLE_REG_P
+ is unset if we cannot rewrite the var into SSA. */
+ if ((TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE
+ || TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE)
+ && bitmap_bit_p (not_reg_needs, DECL_UID (var)))
+ DECL_GIMPLE_REG_P (var) = 0;
if (is_gimple_reg (var))
bitmap_set_bit (suitable_for_renaming, DECL_UID (var));
if (dump_file)
{
fprintf (dump_file, "No longer having address taken: ");
- print_generic_expr (dump_file, var, 0);
+ print_generic_expr (dump_file, var);
fprintf (dump_file, "\n");
}
}
&& (TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (var)
- && (TREE_CODE (var) != VAR_DECL || !DECL_HARD_REGISTER (var)))
+ && (!VAR_P (var) || !DECL_HARD_REGISTER (var)))
{
DECL_GIMPLE_REG_P (var) = 1;
bitmap_set_bit (suitable_for_renaming, DECL_UID (var));
if (dump_file)
{
fprintf (dump_file, "Now a gimple register: ");
- print_generic_expr (dump_file, var, 0);
+ print_generic_expr (dump_file, var);
fprintf (dump_file, "\n");
}
}
}
+/* Return true when STMT is ASAN mark where second argument is an address
+ of a local variable. */
+
+static bool
+is_asan_mark_p (gimple *stmt)
+{
+ if (!gimple_call_internal_p (stmt, IFN_ASAN_MARK))
+ return false;
+
+ tree addr = get_base_address (gimple_call_arg (stmt, 1));
+ if (TREE_CODE (addr) == ADDR_EXPR
+ && VAR_P (TREE_OPERAND (addr, 0)))
+ {
+ tree var = TREE_OPERAND (addr, 0);
+ if (lookup_attribute (ASAN_USE_AFTER_SCOPE_ATTRIBUTE,
+ DECL_ATTRIBUTES (var)))
+ return false;
+
+ unsigned addressable = TREE_ADDRESSABLE (var);
+ TREE_ADDRESSABLE (var) = 0;
+ bool r = is_gimple_reg (var);
+ TREE_ADDRESSABLE (var) = addressable;
+ return r;
+ }
+
+ return false;
+}
+
/* Compute TREE_ADDRESSABLE and DECL_GIMPLE_REG_P for local variables. */
void
execute_update_addresses_taken (void)
{
- gimple_stmt_iterator gsi;
basic_block bb;
- bitmap addresses_taken = BITMAP_ALLOC (NULL);
- bitmap not_reg_needs = BITMAP_ALLOC (NULL);
- bitmap suitable_for_renaming = BITMAP_ALLOC (NULL);
+ auto_bitmap addresses_taken;
+ auto_bitmap not_reg_needs;
+ auto_bitmap suitable_for_renaming;
tree var;
unsigned i;
/* Collect into ADDRESSES_TAKEN all variables whose address is taken within
the function body. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
enum gimple_code code = gimple_code (stmt);
tree decl;
- /* Note all addresses taken by the stmt. */
- gimple_ior_addresses_taken (addresses_taken, stmt);
+ if (code == GIMPLE_CALL)
+ {
+ if (optimize_atomic_compare_exchange_p (stmt))
+ {
+ /* For __atomic_compare_exchange_N if the second argument
+ is &var, don't mark var addressable;
+ if it becomes non-addressable, we'll rewrite it into
+ ATOMIC_COMPARE_EXCHANGE call. */
+ tree arg = gimple_call_arg (stmt, 1);
+ gimple_call_set_arg (stmt, 1, null_pointer_node);
+ gimple_ior_addresses_taken (addresses_taken, stmt);
+ gimple_call_set_arg (stmt, 1, arg);
+ }
+ else if (is_asan_mark_p (stmt)
+ || gimple_call_internal_p (stmt, IFN_GOMP_SIMT_ENTER))
+ ;
+ else
+ gimple_ior_addresses_taken (addresses_taken, stmt);
+ }
+ else
+ /* Note all addresses taken by the stmt. */
+ gimple_ior_addresses_taken (addresses_taken, stmt);
/* If we have a call or an assignment, see if the lhs contains
a local decl that requires not to be a gimple register. */
tree lhs = gimple_get_lhs (stmt);
if (lhs
&& TREE_CODE (lhs) != SSA_NAME
- && non_rewritable_lvalue_p (lhs))
+ && ((code == GIMPLE_CALL && ! DECL_P (lhs))
+ || non_rewritable_lvalue_p (lhs)))
{
decl = get_base_address (lhs);
if (DECL_P (decl))
else if (code == GIMPLE_ASM)
{
- for (i = 0; i < gimple_asm_noutputs (stmt); ++i)
+ gasm *asm_stmt = as_a <gasm *> (stmt);
+ for (i = 0; i < gimple_asm_noutputs (asm_stmt); ++i)
{
- tree link = gimple_asm_output_op (stmt, i);
+ tree link = gimple_asm_output_op (asm_stmt, i);
tree lhs = TREE_VALUE (link);
if (TREE_CODE (lhs) != SSA_NAME)
{
bitmap_set_bit (not_reg_needs, DECL_UID (decl));
}
}
- for (i = 0; i < gimple_asm_ninputs (stmt); ++i)
+ for (i = 0; i < gimple_asm_ninputs (asm_stmt); ++i)
{
- tree link = gimple_asm_input_op (stmt, i);
+ tree link = gimple_asm_input_op (asm_stmt, i);
if ((decl = non_rewritable_mem_ref_base (TREE_VALUE (link))))
bitmap_set_bit (not_reg_needs, DECL_UID (decl));
}
}
}
- for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
size_t i;
- gimple phi = gsi_stmt (gsi);
+ gphi *phi = gsi.phi ();
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
variables and operands need to be rewritten to expose bare symbols. */
if (!bitmap_empty_p (suitable_for_renaming))
{
- FOR_EACH_BB (bb)
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
+ FOR_EACH_BB_FN (bb, cfun)
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
/* Re-write TARGET_MEM_REFs of symbols we want to
rewrite into SSA form. */
tree rhs, *rhsp = gimple_assign_rhs1_ptr (stmt);
tree sym;
+ /* Rewrite LHS IMAG/REALPART_EXPR similar to
+ gimplify_modify_expr_complex_part. */
+ if ((TREE_CODE (lhs) == IMAGPART_EXPR
+ || TREE_CODE (lhs) == REALPART_EXPR)
+ && DECL_P (TREE_OPERAND (lhs, 0))
+ && bitmap_bit_p (suitable_for_renaming,
+ DECL_UID (TREE_OPERAND (lhs, 0))))
+ {
+ tree other = make_ssa_name (TREE_TYPE (lhs));
+ tree lrhs = build1 (TREE_CODE (lhs) == IMAGPART_EXPR
+ ? REALPART_EXPR : IMAGPART_EXPR,
+ TREE_TYPE (other),
+ TREE_OPERAND (lhs, 0));
+ gimple *load = gimple_build_assign (other, lrhs);
+ location_t loc = gimple_location (stmt);
+ gimple_set_location (load, loc);
+ gimple_set_vuse (load, gimple_vuse (stmt));
+ gsi_insert_before (&gsi, load, GSI_SAME_STMT);
+ gimple_assign_set_lhs (stmt, TREE_OPERAND (lhs, 0));
+ gimple_assign_set_rhs_with_ops
+ (&gsi, COMPLEX_EXPR,
+ TREE_CODE (lhs) == IMAGPART_EXPR
+ ? other : gimple_assign_rhs1 (stmt),
+ TREE_CODE (lhs) == IMAGPART_EXPR
+ ? gimple_assign_rhs1 (stmt) : other, NULL_TREE);
+ stmt = gsi_stmt (gsi);
+ unlink_stmt_vdef (stmt);
+ update_stmt (stmt);
+ continue;
+ }
+
+ /* Rewrite a vector insert via a BIT_FIELD_REF on the LHS
+ into a BIT_INSERT_EXPR. */
+ if (TREE_CODE (lhs) == BIT_FIELD_REF
+ && DECL_P (TREE_OPERAND (lhs, 0))
+ && bitmap_bit_p (suitable_for_renaming,
+ DECL_UID (TREE_OPERAND (lhs, 0)))
+ && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (lhs, 0)))
+ && TYPE_MODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) != BLKmode
+ && operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (lhs)),
+ TYPE_SIZE_UNIT (TREE_TYPE
+ (TREE_TYPE (TREE_OPERAND (lhs, 0)))),
+ 0)
+ && (tree_to_uhwi (TREE_OPERAND (lhs, 2))
+ % tree_to_uhwi (TYPE_SIZE (TREE_TYPE (lhs))) == 0))
+ {
+ tree var = TREE_OPERAND (lhs, 0);
+ tree val = gimple_assign_rhs1 (stmt);
+ if (! types_compatible_p (TREE_TYPE (TREE_TYPE (var)),
+ TREE_TYPE (val)))
+ {
+ tree tem = make_ssa_name (TREE_TYPE (TREE_TYPE (var)));
+ gimple *pun
+ = gimple_build_assign (tem,
+ build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (tem), val));
+ gsi_insert_before (&gsi, pun, GSI_SAME_STMT);
+ val = tem;
+ }
+ tree bitpos = TREE_OPERAND (lhs, 2);
+ gimple_assign_set_lhs (stmt, var);
+ gimple_assign_set_rhs_with_ops
+ (&gsi, BIT_INSERT_EXPR, var, val, bitpos);
+ stmt = gsi_stmt (gsi);
+ unlink_stmt_vdef (stmt);
+ update_stmt (stmt);
+ continue;
+ }
+
+ /* Rewrite a vector insert using a MEM_REF on the LHS
+ into a BIT_INSERT_EXPR. */
+ if (TREE_CODE (lhs) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (lhs, 0)) == ADDR_EXPR
+ && (sym = TREE_OPERAND (TREE_OPERAND (lhs, 0), 0))
+ && DECL_P (sym)
+ && bitmap_bit_p (suitable_for_renaming, DECL_UID (sym))
+ && VECTOR_TYPE_P (TREE_TYPE (sym))
+ && TYPE_MODE (TREE_TYPE (sym)) != BLKmode
+ /* If it is a full replacement we can do better below. */
+ && maybe_ne (wi::to_poly_offset
+ (TYPE_SIZE_UNIT (TREE_TYPE (lhs))),
+ wi::to_poly_offset
+ (TYPE_SIZE_UNIT (TREE_TYPE (sym))))
+ && known_ge (mem_ref_offset (lhs), 0)
+ && known_gt (wi::to_poly_offset
+ (TYPE_SIZE_UNIT (TREE_TYPE (sym))),
+ mem_ref_offset (lhs))
+ && multiple_of_p (sizetype,
+ TREE_OPERAND (lhs, 1),
+ TYPE_SIZE_UNIT (TREE_TYPE (lhs))))
+ {
+ tree val = gimple_assign_rhs1 (stmt);
+ if (! types_compatible_p (TREE_TYPE (val),
+ TREE_TYPE (TREE_TYPE (sym))))
+ {
+ poly_uint64 lhs_bits, nelts;
+ tree temtype = TREE_TYPE (TREE_TYPE (sym));
+ if (poly_int_tree_p (TYPE_SIZE (TREE_TYPE (lhs)),
+ &lhs_bits)
+ && multiple_p (lhs_bits,
+ tree_to_uhwi
+ (TYPE_SIZE (TREE_TYPE
+ (TREE_TYPE (sym)))),
+ &nelts)
+ && maybe_ne (nelts, 1u))
+ temtype = build_vector_type (temtype, nelts);
+ tree tem = make_ssa_name (temtype);
+ gimple *pun
+ = gimple_build_assign (tem,
+ build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (tem), val));
+ gsi_insert_before (&gsi, pun, GSI_SAME_STMT);
+ val = tem;
+ }
+ tree bitpos
+ = wide_int_to_tree (bitsizetype,
+ mem_ref_offset (lhs) * BITS_PER_UNIT);
+ gimple_assign_set_lhs (stmt, sym);
+ gimple_assign_set_rhs_with_ops
+ (&gsi, BIT_INSERT_EXPR, sym, val, bitpos);
+ stmt = gsi_stmt (gsi);
+ unlink_stmt_vdef (stmt);
+ update_stmt (stmt);
+ continue;
+ }
+
/* We shouldn't have any fancy wrapping of
component-refs on the LHS, but look through
VIEW_CONVERT_EXPRs as that is easy. */
if (gimple_assign_lhs (stmt) != lhs
&& !useless_type_conversion_p (TREE_TYPE (lhs),
TREE_TYPE (rhs)))
- rhs = fold_build1 (VIEW_CONVERT_EXPR,
- TREE_TYPE (lhs), rhs);
-
- if (gimple_assign_lhs (stmt) != lhs)
- gimple_assign_set_lhs (stmt, lhs);
-
- /* For var ={v} {CLOBBER}; where var lost
- TREE_ADDRESSABLE just remove the stmt. */
- if (DECL_P (lhs)
- && TREE_CLOBBER_P (rhs)
- && bitmap_bit_p (suitable_for_renaming, DECL_UID (lhs)))
{
- unlink_stmt_vdef (stmt);
- gsi_remove (&gsi, true);
- release_defs (stmt);
- continue;
+ if (gimple_clobber_p (stmt))
+ {
+ rhs = build_constructor (TREE_TYPE (lhs), NULL);
+ TREE_THIS_VOLATILE (rhs) = 1;
+ }
+ else
+ rhs = fold_build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (lhs), rhs);
}
+ if (gimple_assign_lhs (stmt) != lhs)
+ gimple_assign_set_lhs (stmt, lhs);
if (gimple_assign_rhs1 (stmt) != rhs)
{
else if (gimple_code (stmt) == GIMPLE_CALL)
{
unsigned i;
+ if (optimize_atomic_compare_exchange_p (stmt))
+ {
+ tree expected = gimple_call_arg (stmt, 1);
+ if (bitmap_bit_p (suitable_for_renaming,
+ DECL_UID (TREE_OPERAND (expected, 0))))
+ {
+ fold_builtin_atomic_compare_exchange (&gsi);
+ continue;
+ }
+ }
+ else if (is_asan_mark_p (stmt))
+ {
+ tree var = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
+ if (bitmap_bit_p (suitable_for_renaming, DECL_UID (var)))
+ {
+ unlink_stmt_vdef (stmt);
+ if (asan_mark_p (stmt, ASAN_MARK_POISON))
+ {
+ gcall *call
+ = gimple_build_call_internal (IFN_ASAN_POISON, 0);
+ gimple_call_set_lhs (call, var);
+ gsi_replace (&gsi, call, GSI_SAME_STMT);
+ }
+ else
+ {
+ /* In ASAN_MARK (UNPOISON, &b, ...) the variable
+ is uninitialized. Avoid dependencies on
+ previous out of scope value. */
+ tree clobber = build_clobber (TREE_TYPE (var));
+ gimple *g = gimple_build_assign (var, clobber);
+ gsi_replace (&gsi, g, GSI_SAME_STMT);
+ }
+ continue;
+ }
+ }
+ else if (gimple_call_internal_p (stmt, IFN_GOMP_SIMT_ENTER))
+ for (i = 1; i < gimple_call_num_args (stmt); i++)
+ {
+ tree *argp = gimple_call_arg_ptr (stmt, i);
+ if (*argp == null_pointer_node)
+ continue;
+ gcc_assert (TREE_CODE (*argp) == ADDR_EXPR
+ && VAR_P (TREE_OPERAND (*argp, 0)));
+ tree var = TREE_OPERAND (*argp, 0);
+ if (bitmap_bit_p (suitable_for_renaming, DECL_UID (var)))
+ *argp = null_pointer_node;
+ }
for (i = 0; i < gimple_call_num_args (stmt); ++i)
{
tree *argp = gimple_call_arg_ptr (stmt, i);
else if (gimple_code (stmt) == GIMPLE_ASM)
{
+ gasm *asm_stmt = as_a <gasm *> (stmt);
unsigned i;
- for (i = 0; i < gimple_asm_noutputs (stmt); ++i)
+ for (i = 0; i < gimple_asm_noutputs (asm_stmt); ++i)
{
- tree link = gimple_asm_output_op (stmt, i);
+ tree link = gimple_asm_output_op (asm_stmt, i);
maybe_rewrite_mem_ref_base (&TREE_VALUE (link),
suitable_for_renaming);
}
- for (i = 0; i < gimple_asm_ninputs (stmt); ++i)
+ for (i = 0; i < gimple_asm_ninputs (asm_stmt); ++i)
{
- tree link = gimple_asm_input_op (stmt, i);
+ tree link = gimple_asm_input_op (asm_stmt, i);
maybe_rewrite_mem_ref_base (&TREE_VALUE (link),
suitable_for_renaming);
}
update_ssa (TODO_update_ssa);
}
- BITMAP_FREE (not_reg_needs);
- BITMAP_FREE (addresses_taken);
- BITMAP_FREE (suitable_for_renaming);
timevar_pop (TV_ADDRESS_TAKEN);
}
GIMPLE_PASS, /* type */
"addressables", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
- false, /* has_execute */
TV_ADDRESS_TAKEN, /* tv_id */
PROP_ssa, /* properties_required */
0, /* properties_provided */
class pass_update_address_taken : public gimple_opt_pass
{
public:
- pass_update_address_taken(gcc::context *ctxt)
- : gimple_opt_pass(pass_data_update_address_taken, ctxt)
+ pass_update_address_taken (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_update_address_taken, ctxt)
{}
/* opt_pass methods: */