/* Passes for transactional memory support.
- Copyright (C) 2008-2015 Free Software Foundation, Inc.
+ Copyright (C) 2008-2021 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "alias.h"
-#include "symtab.h"
-#include "options.h"
+#include "backend.h"
+#include "target.h"
+#include "rtl.h"
#include "tree.h"
+#include "gimple.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "cgraph.h"
+#include "gimple-pretty-print.h"
+#include "diagnostic-core.h"
#include "fold-const.h"
-#include "predict.h"
-#include "tm.h"
-#include "hard-reg-set.h"
-#include "function.h"
-#include "dominance.h"
-#include "cfg.h"
-#include "basic-block.h"
-#include "tree-ssa-alias.h"
-#include "internal-fn.h"
#include "tree-eh.h"
-#include "gimple-expr.h"
-#include "gimple.h"
#include "calls.h"
-#include "rtl.h"
-#include "emit-rtl.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
-#include "gimple-ssa.h"
-#include "plugin-api.h"
-#include "ipa-ref.h"
-#include "cgraph.h"
#include "tree-cfg.h"
-#include "stringpool.h"
-#include "tree-ssanames.h"
#include "tree-into-ssa.h"
-#include "tree-pass.h"
#include "tree-inline.h"
-#include "diagnostic-core.h"
#include "demangle.h"
#include "output.h"
#include "trans-mem.h"
-#include "params.h"
-#include "target.h"
#include "langhooks.h"
-#include "gimple-pretty-print.h"
#include "cfgloop.h"
#include "tree-ssa-address.h"
-
+#include "stringpool.h"
+#include "attribs.h"
+#include "alloc-pool.h"
+#include "symbol-summary.h"
+#include "symtab-thunks.h"
#define A_RUNINSTRUMENTEDCODE 0x0001
#define A_RUNUNINSTRUMENTEDCODE 0x0002
{
case FUNCTION_DECL:
return TYPE_ATTRIBUTES (TREE_TYPE (x));
- break;
default:
if (TYPE_P (x))
if (TREE_CODE (x) == ADDR_EXPR)
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
+ && fndecl_built_in_p (x, BUILT_IN_TM_IRREVOCABLE))
return true;
return false;
/* Return true if CALL is const, or tm_pure. */
static bool
-is_tm_pure_call (gimple call)
+is_tm_pure_call (gimple *call)
{
- tree fn = gimple_call_fn (call);
-
- if (TREE_CODE (fn) == ADDR_EXPR)
- {
- fn = TREE_OPERAND (fn, 0);
- gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
- }
- else
- fn = TREE_TYPE (fn);
-
- return is_tm_pure (fn);
+ return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0;
}
/* Return true if X has been marked TM_CALLABLE. */
transaction. */
bool
-is_tm_ending (gimple stmt)
+is_tm_ending (gimple *stmt)
{
tree fndecl;
/* Return true if STMT is a TM load. */
static bool
-is_tm_load (gimple stmt)
+is_tm_load (gimple *stmt)
{
tree fndecl;
return false;
fndecl = gimple_call_fndecl (stmt);
- return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ return (fndecl
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
}
after-write, after-read, etc optimized variants. */
static bool
-is_tm_simple_load (gimple stmt)
+is_tm_simple_load (gimple *stmt)
{
tree fndecl;
return false;
fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
return (fcode == BUILT_IN_TM_LOAD_1
/* Return true if STMT is a TM store. */
static bool
-is_tm_store (gimple stmt)
+is_tm_store (gimple *stmt)
{
tree fndecl;
return false;
fndecl = gimple_call_fndecl (stmt);
- return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ return (fndecl
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
}
after-write, after-read, etc optimized variants. */
static bool
-is_tm_simple_store (gimple stmt)
+is_tm_simple_store (gimple *stmt)
{
tree fndecl;
return false;
fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
return (fcode == BUILT_IN_TM_STORE_1
static bool
is_tm_abort (tree fndecl)
{
- return (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
+ return (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_TM_ABORT));
}
/* Build a GENERIC tree for a user abort. This is called by front ends
| (is_outer ? AR_OUTERABORT : 0)));
}
\f
-/* Map for aribtrary function replacement under TM, as created
+/* Map for arbitrary function replacement under TM, as created
by the tm_wrap attribute. */
struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
unsigned int block_flags : 8;
unsigned int func_flags : 8;
unsigned int saw_volatile : 1;
- gimple stmt;
+ gimple *stmt;
};
-/* Return true if T is a volatile variable of some kind. */
+/* Return true if T is a volatile lvalue of some kind. */
static bool
-volatile_var_p (tree t)
+volatile_lvalue_p (tree t)
{
- return (SSA_VAR_P (t)
+ return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t))
&& TREE_THIS_VOLATILE (TREE_TYPE (t)));
}
/* Tree callback function for diagnose_tm pass. */
static tree
-diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
- void *data)
+diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
- if (volatile_var_p (*tp)
- && d->block_flags & DIAG_TM_SAFE
- && !d->saw_volatile)
+ if (TYPE_P (*tp))
+ *walk_subtrees = false;
+ else if (volatile_lvalue_p (*tp)
+ && !d->saw_volatile)
{
d->saw_volatile = 1;
- error_at (gimple_location (d->stmt),
- "invalid volatile use of %qD inside transaction",
- *tp);
+ if (d->block_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (d->stmt),
+ "invalid use of volatile lvalue inside transaction");
+ else if (d->func_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (d->stmt),
+ "invalid use of volatile lvalue inside %<transaction_safe%> "
+ "function");
}
return NULL_TREE;
diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
/* Save stmt for use in leaf analysis. */
"atomic transaction", fn);
else
{
- if (!DECL_P (fn) || DECL_NAME (fn))
+ if ((!DECL_P (fn) || DECL_NAME (fn))
+ && TREE_CODE (fn) != SSA_NAME)
error_at (gimple_location (stmt),
"unsafe function call %qE within "
"atomic transaction", fn);
"%<transaction_safe%> function", fn);
else
{
- if (!DECL_P (fn) || DECL_NAME (fn))
+ if ((!DECL_P (fn) || DECL_NAME (fn))
+ && TREE_CODE (fn) != SSA_NAME)
error_at (gimple_location (stmt),
"unsafe function call %qE within "
"%<transaction_safe%> function", fn);
Either that or get the language spec to resurrect __tm_waiver. */
if (d->block_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
- "asm not allowed in atomic transaction");
+ "%<asm%> not allowed in atomic transaction");
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
- "asm not allowed in %<transaction_safe%> function");
+ "%<asm%> not allowed in %<transaction_safe%> function");
break;
case GIMPLE_TRANSACTION:
/* One individual log entry. We may have multiple statements for the
same location if neither dominate each other (on different
execution paths). */
-typedef struct tm_log_entry
+struct tm_log_entry
{
/* Address to save. */
tree addr;
/* Entry block for the transaction this address occurs in. */
basic_block entry_block;
/* Dominating statements the store occurs in. */
- vec<gimple> stmts;
+ vec<gimple *> stmts;
/* Initially, while we are building the log, we place a nonzero
value here to mean that this address *will* be saved with a
save/restore sequence. Later, when generating the save sequence
we place the SSA temp generated here. */
tree save_var;
-} *tm_log_entry_t;
+};
/* Log entry hashtable helpers. */
mem_max
};
-typedef struct tm_new_mem_map
+struct tm_new_mem_map
{
/* SSA_NAME being dereferenced. */
tree val;
enum thread_memory_type local_new_memory;
-} tm_new_mem_map_t;
+};
/* Hashtable helpers. */
-struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map_t>
+struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
{
- static inline hashval_t hash (const tm_new_mem_map_t *);
- static inline bool equal (const tm_new_mem_map_t *, const tm_new_mem_map_t *);
+ static inline hashval_t hash (const tm_new_mem_map *);
+ static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
};
inline hashval_t
-tm_mem_map_hasher::hash (const tm_new_mem_map_t *v)
+tm_mem_map_hasher::hash (const tm_new_mem_map *v)
{
return (intptr_t)v->val >> 4;
}
inline bool
-tm_mem_map_hasher::equal (const tm_new_mem_map_t *v, const tm_new_mem_map_t *c)
+tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
{
return v->val == c->val;
}
If known, ENTRY_BLOCK is the entry block for the region, otherwise
NULL. */
static void
-tm_log_add (basic_block entry_block, tree addr, gimple stmt)
+tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
{
tm_log_entry **slot;
struct tm_log_entry l, *lp;
&& TYPE_SIZE_UNIT (type) != NULL
&& tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
&& ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
- < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
+ < param_tm_max_aggregate_size)
/* We must be able to copy this type normally. I.e., no
special constructors and the like. */
&& !TREE_ADDRESSABLE (type))
else
{
size_t i;
- gimple oldstmt;
+ gimple *oldstmt;
lp = *slot;
ADDR is the address to save.
STMT is the statement before which to place it. */
static void
-tm_log_emit_stmt (tree addr, gimple stmt)
+tm_log_emit_stmt (tree addr, gimple *stmt)
{
tree type = TREE_TYPE (addr);
- tree size = TYPE_SIZE_UNIT (type);
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
- gimple log;
+ gimple *log;
enum built_in_function code = BUILT_IN_TM_LOG;
if (type == float_type_node)
code = BUILT_IN_TM_LOG_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_LOG_LDOUBLE;
- else if (tree_fits_uhwi_p (size))
+ else if (TYPE_SIZE (type) != NULL
+ && tree_fits_uhwi_p (TYPE_SIZE (type)))
{
- unsigned int n = tree_to_uhwi (size);
- switch (n)
+ unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+
+ if (TREE_CODE (type) == VECTOR_TYPE)
{
- case 1:
- code = BUILT_IN_TM_LOG_1;
- break;
- case 2:
- code = BUILT_IN_TM_LOG_2;
- break;
- case 4:
- code = BUILT_IN_TM_LOG_4;
- break;
- case 8:
- code = BUILT_IN_TM_LOG_8;
- break;
- default:
- code = BUILT_IN_TM_LOG;
- if (TREE_CODE (type) == VECTOR_TYPE)
+ switch (type_size)
{
- if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
- code = BUILT_IN_TM_LOG_M64;
- else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
- code = BUILT_IN_TM_LOG_M128;
- else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
- code = BUILT_IN_TM_LOG_M256;
+ case 64:
+ code = BUILT_IN_TM_LOG_M64;
+ break;
+ case 128:
+ code = BUILT_IN_TM_LOG_M128;
+ break;
+ case 256:
+ code = BUILT_IN_TM_LOG_M256;
+ break;
+ default:
+ goto unhandled_vec;
+ }
+ if (!builtin_decl_explicit_p (code))
+ goto unhandled_vec;
+ }
+ else
+ {
+ unhandled_vec:
+ switch (type_size)
+ {
+ case 8:
+ code = BUILT_IN_TM_LOG_1;
+ break;
+ case 16:
+ code = BUILT_IN_TM_LOG_2;
+ break;
+ case 32:
+ code = BUILT_IN_TM_LOG_4;
+ break;
+ case 64:
+ code = BUILT_IN_TM_LOG_8;
+ break;
}
- break;
}
}
+ if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
+ code = BUILT_IN_TM_LOG;
+ tree decl = builtin_decl_explicit (code);
+
addr = gimplify_addr (&gsi, addr);
if (code == BUILT_IN_TM_LOG)
- log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
+ log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
else
- log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
+ log = gimple_build_call (decl, 1, addr);
gsi_insert_before (&gsi, log, GSI_SAME_STMT);
}
FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
{
size_t i;
- gimple stmt;
+ gimple *stmt;
if (dump_file)
{
fprintf (dump_file, "TM thread private mem logging: ");
- print_generic_expr (dump_file, lp->addr, 0);
+ print_generic_expr (dump_file, lp->addr);
fprintf (dump_file, "\n");
}
{
size_t i;
gimple_stmt_iterator gsi = gsi_last_bb (bb);
- gimple stmt;
+ gimple *stmt;
struct tm_log_entry l, *lp;
for (i = 0; i < tm_log_save_addresses.length (); ++i)
int i;
struct tm_log_entry l, *lp;
gimple_stmt_iterator gsi;
- gimple stmt;
+ gimple *stmt;
for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
{
static enum thread_memory_type
thread_private_new_memory (basic_block entry_block, tree x)
{
- gimple stmt = NULL;
+ gimple *stmt = NULL;
enum tree_code code;
- tm_new_mem_map_t **slot;
- tm_new_mem_map_t elt, *elt_p;
+ tm_new_mem_map **slot;
+ tm_new_mem_map elt, *elt_p;
tree val = x;
enum thread_memory_type retval = mem_transaction_local;
/* Optimistically assume the memory is transaction local during
processing. This catches recursion into this variable. */
- *slot = elt_p = XNEW (tm_new_mem_map_t);
+ *slot = elt_p = XNEW (tm_new_mem_map);
elt_p->val = val;
elt_p->local_new_memory = mem_transaction_local;
private memory instrumentation. If no TPM instrumentation is
desired, STMT should be null. */
static bool
-requires_barrier (basic_block entry_block, tree x, gimple stmt)
+requires_barrier (basic_block entry_block, tree x, gimple *stmt)
{
tree orig = x;
while (handled_component_p (x))
x = TREE_OPERAND (TMR_BASE (x), 0);
if (TREE_CODE (x) == PARM_DECL)
return false;
- gcc_assert (TREE_CODE (x) == VAR_DECL);
+ gcc_assert (VAR_P (x));
/* FALLTHRU */
case PARM_DECL:
static void
examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
*state |= GTMA_HAVE_LOAD;
static void
examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
tree fn;
if (is_tm_pure_call (stmt))
*state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
}
+/* Iterate through the statements in the sequence, moving labels
+ (and thus edges) of transactions from "label_norm" to "label_uninst". */
+
+static tree
+make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p,
+ struct walk_stmt_info *)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+
+ if (gtransaction *txn = dyn_cast <gtransaction *> (stmt))
+ {
+ *handled_ops_p = true;
+ txn->label_uninst = txn->label_norm;
+ txn->label_norm = NULL;
+ }
+ else
+ *handled_ops_p = !gimple_has_substatements (stmt);
+
+ return NULL_TREE;
+}
+
/* Lower a GIMPLE_TRANSACTION statement. */
static void
lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
{
- gimple g;
+ gimple *g;
gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
unsigned int *outer_state = (unsigned int *) wi->info;
unsigned int this_state = 0;
g = gimple_build_try (gimple_transaction_body (stmt),
gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
- gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
- gimple_transaction_set_body (stmt, NULL);
+ /* For a (potentially) outer transaction, create two paths. */
+ gimple_seq uninst = NULL;
+ if (outer_state == NULL)
+ {
+ uninst = copy_gimple_seq_and_replace_locals (g);
+ /* In the uninstrumented copy, reset inner transactions to have only
+ an uninstrumented code path. */
+ memset (&this_wi, 0, sizeof (this_wi));
+ walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi);
+ }
+
+ tree label1 = create_artificial_label (UNKNOWN_LOCATION);
+ gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING);
+ gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
+ gimple_transaction_set_label_norm (stmt, label1);
/* If the transaction calls abort or if this is an outer transaction,
add an "over" label afterwards. */
- if ((this_state & (GTMA_HAVE_ABORT))
+ tree label3 = NULL;
+ if ((this_state & GTMA_HAVE_ABORT)
+ || outer_state == NULL
|| (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
{
- tree label = create_artificial_label (UNKNOWN_LOCATION);
- gimple_transaction_set_label (stmt, label);
- gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
+ label3 = create_artificial_label (UNKNOWN_LOCATION);
+ gimple_transaction_set_label_over (stmt, label3);
}
+ if (uninst != NULL)
+ {
+ gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING);
+
+ tree label2 = create_artificial_label (UNKNOWN_LOCATION);
+ gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING);
+ gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING);
+ gimple_transaction_set_label_uninst (stmt, label2);
+ }
+
+ if (label3 != NULL)
+ gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING);
+
+ gimple_transaction_set_body (stmt, NULL);
+
/* Record the set of operations found for use later. */
this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
gimple_transaction_set_subcode (stmt, this_state);
struct walk_stmt_info *wi)
{
unsigned int *state = (unsigned int *) wi->info;
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
*handled_ops_p = true;
switch (gimple_code (stmt))
lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info * wi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
if (gimple_code (stmt) == GIMPLE_TRANSACTION)
{
After TM_MARK, this gets replaced by a call to
BUILT_IN_TM_START.
Hence this will be either a gtransaction *or a gcall *. */
- gimple transaction_stmt;
+ gimple *transaction_stmt;
/* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
BUILT_IN_TM_START, this field is true if the transaction is an
bitmap irr_blocks;
};
-typedef struct tm_region *tm_region_p;
-
/* True if there are pending edge statements to be committed for the
current function being scanned in the tmmark pass. */
bool pending_edge_inserts_p;
tm_region_init_1 (struct tm_region *region, basic_block bb)
{
gimple_stmt_iterator gsi;
- gimple g;
+ gimple *g;
if (!region
|| (!region->irr_blocks && !region->exit_blocks))
if (gimple_code (g) == GIMPLE_CALL)
{
tree fn = gimple_call_fndecl (g);
- if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
+ if (fn && fndecl_built_in_p (fn, BUILT_IN_NORMAL))
{
if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
|| DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
static void
tm_region_init (struct tm_region *region)
{
- gimple g;
+ gimple *g;
edge_iterator ei;
edge e;
basic_block bb;
auto_vec<basic_block> queue;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
struct tm_region *old_region;
- auto_vec<tm_region_p> bb_regions;
-
- all_tm_regions = region;
- bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ auto_vec<tm_region *> bb_regions;
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
using bb->aux. */
- bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
+ all_tm_regions = region;
+ bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
queue.safe_push (bb);
+ bitmap_set_bit (visited_blocks, bb->index);
bb_regions[bb->index] = region;
+
do
{
bb = queue.pop ();
static gcall *
build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
- enum built_in_function code = END_BUILTINS;
- tree t, type = TREE_TYPE (rhs), decl;
+ tree t, type = TREE_TYPE (rhs);
gcall *gcall;
+ built_in_function code;
if (type == float_type_node)
code = BUILT_IN_TM_LOAD_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_LOAD_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_LOAD_LDOUBLE;
- else if (TYPE_SIZE_UNIT (type) != NULL
- && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
+ else
{
- switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
+ if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
+ return NULL;
+ unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+
+ if (TREE_CODE (type) == VECTOR_TYPE)
{
- case 1:
- code = BUILT_IN_TM_LOAD_1;
- break;
- case 2:
- code = BUILT_IN_TM_LOAD_2;
- break;
- case 4:
- code = BUILT_IN_TM_LOAD_4;
- break;
- case 8:
- code = BUILT_IN_TM_LOAD_8;
- break;
+ switch (type_size)
+ {
+ case 64:
+ code = BUILT_IN_TM_LOAD_M64;
+ break;
+ case 128:
+ code = BUILT_IN_TM_LOAD_M128;
+ break;
+ case 256:
+ code = BUILT_IN_TM_LOAD_M256;
+ break;
+ default:
+ goto unhandled_vec;
+ }
+ if (!builtin_decl_explicit_p (code))
+ goto unhandled_vec;
+ }
+ else
+ {
+ unhandled_vec:
+ switch (type_size)
+ {
+ case 8:
+ code = BUILT_IN_TM_LOAD_1;
+ break;
+ case 16:
+ code = BUILT_IN_TM_LOAD_2;
+ break;
+ case 32:
+ code = BUILT_IN_TM_LOAD_4;
+ break;
+ case 64:
+ code = BUILT_IN_TM_LOAD_8;
+ break;
+ default:
+ return NULL;
+ }
}
}
- if (code == END_BUILTINS)
- {
- decl = targetm.vectorize.builtin_tm_load (type);
- if (!decl)
- return NULL;
- }
- else
- decl = builtin_decl_explicit (code);
+ tree decl = builtin_decl_explicit (code);
+ gcc_assert (decl);
t = gimplify_addr (gsi, rhs);
gcall = gimple_build_call (decl, 1, t);
}
else
{
- gimple g;
+ gimple *g;
tree temp;
temp = create_tmp_reg (t);
static gcall *
build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
- enum built_in_function code = END_BUILTINS;
tree t, fn, type = TREE_TYPE (rhs), simple_type;
gcall *gcall;
+ built_in_function code;
if (type == float_type_node)
code = BUILT_IN_TM_STORE_FLOAT;
else if (type == double_type_node)
code = BUILT_IN_TM_STORE_DOUBLE;
else if (type == long_double_type_node)
code = BUILT_IN_TM_STORE_LDOUBLE;
- else if (TYPE_SIZE_UNIT (type) != NULL
- && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
+ else
{
- switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
+ if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
+ return NULL;
+ unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+
+ if (TREE_CODE (type) == VECTOR_TYPE)
{
- case 1:
- code = BUILT_IN_TM_STORE_1;
- break;
- case 2:
- code = BUILT_IN_TM_STORE_2;
- break;
- case 4:
- code = BUILT_IN_TM_STORE_4;
- break;
- case 8:
- code = BUILT_IN_TM_STORE_8;
- break;
+ switch (type_size)
+ {
+ case 64:
+ code = BUILT_IN_TM_STORE_M64;
+ break;
+ case 128:
+ code = BUILT_IN_TM_STORE_M128;
+ break;
+ case 256:
+ code = BUILT_IN_TM_STORE_M256;
+ break;
+ default:
+ goto unhandled_vec;
+ }
+ if (!builtin_decl_explicit_p (code))
+ goto unhandled_vec;
+ }
+ else
+ {
+ unhandled_vec:
+ switch (type_size)
+ {
+ case 8:
+ code = BUILT_IN_TM_STORE_1;
+ break;
+ case 16:
+ code = BUILT_IN_TM_STORE_2;
+ break;
+ case 32:
+ code = BUILT_IN_TM_STORE_4;
+ break;
+ case 64:
+ code = BUILT_IN_TM_STORE_8;
+ break;
+ default:
+ return NULL;
+ }
}
}
- if (code == END_BUILTINS)
- {
- fn = targetm.vectorize.builtin_tm_store (type);
- if (!fn)
- return NULL;
- }
- else
- fn = builtin_decl_explicit (code);
+ fn = builtin_decl_explicit (code);
+ gcc_assert (fn);
simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
}
else if (!useless_type_conversion_p (simple_type, type))
{
- gimple g;
+ gimple *g;
tree temp;
temp = create_tmp_reg (simple_type);
static void
expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gimple *stmt = gsi_stmt (*gsi);
location_t loc = gimple_location (stmt);
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
bool store_p = requires_barrier (region->entry_block, lhs, NULL);
bool load_p = requires_barrier (region->entry_block, rhs, NULL);
- gimple gcall = NULL;
+ gimple *gcall = NULL;
if (!load_p && !store_p)
{
return;
}
+ if (load_p)
+ transaction_subcode_ior (region, GTMA_HAVE_LOAD);
+ if (store_p)
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+
// Remove original load/store statement.
gsi_remove (gsi, true);
+ // Attempt to use a simple load/store helper function.
if (load_p && !store_p)
- {
- transaction_subcode_ior (region, GTMA_HAVE_LOAD);
- gcall = build_tm_load (loc, lhs, rhs, gsi);
- }
+ gcall = build_tm_load (loc, lhs, rhs, gsi);
else if (store_p && !load_p)
- {
- transaction_subcode_ior (region, GTMA_HAVE_STORE);
- gcall = build_tm_store (loc, lhs, rhs, gsi);
- }
+ gcall = build_tm_store (loc, lhs, rhs, gsi);
+
+ // If gcall has not been set, then we do not have a simple helper
+ // function available for the type. This may be true of larger
+ // structures, vectors, and non-standard float types.
if (!gcall)
{
- tree lhs_addr, rhs_addr, tmp;
-
- if (load_p)
- transaction_subcode_ior (region, GTMA_HAVE_LOAD);
- if (store_p)
- transaction_subcode_ior (region, GTMA_HAVE_STORE);
+ tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
- /* ??? Figure out if there's any possible overlap between the LHS
- and the RHS and if not, use MEMCPY. */
+ // If this is a type that we couldn't handle above, but it's
+ // in a register, we must spill it to memory for the copy.
+ if (is_gimple_reg (lhs))
+ {
+ ltmp = create_tmp_var (TREE_TYPE (lhs));
+ lhs_addr = build_fold_addr_expr (ltmp);
+ }
+ else
+ lhs_addr = gimplify_addr (gsi, lhs);
+ if (is_gimple_reg (rhs))
+ {
+ tree rtmp = create_tmp_var (TREE_TYPE (rhs));
+ TREE_ADDRESSABLE (rtmp) = 1;
+ rhs_addr = build_fold_addr_expr (rtmp);
+ gcall = gimple_build_assign (rtmp, rhs);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+ }
+ else
+ rhs_addr = gimplify_addr (gsi, rhs);
- if (load_p && is_gimple_reg (lhs))
+ // Choose the appropriate memory transfer function.
+ if (load_p && store_p)
+ {
+ // ??? Figure out if there's any possible overlap between
+ // the LHS and the RHS and if not, use MEMCPY.
+ copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
+ }
+ else if (load_p)
{
- tmp = create_tmp_var (TREE_TYPE (lhs));
- lhs_addr = build_fold_addr_expr (tmp);
+ // Note that the store is non-transactional and cannot overlap.
+ copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
}
else
{
- tmp = NULL_TREE;
- lhs_addr = gimplify_addr (gsi, lhs);
+ // Note that the load is non-transactional and cannot overlap.
+ copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
}
- rhs_addr = gimplify_addr (gsi, rhs);
- gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
- 3, lhs_addr, rhs_addr,
+
+ gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
gimple_set_location (gcall, loc);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
- if (tmp)
+ if (ltmp)
{
- gcall = gimple_build_assign (lhs, tmp);
+ gcall = gimple_build_assign (lhs, ltmp);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
}
- /* Now that we have the load/store in its instrumented form, add
- thread private addresses to the log if applicable. */
+ // Now that we have the load/store in its instrumented form, add
+ // thread private addresses to the log if applicable.
if (!store_p)
requires_barrier (region->entry_block, lhs, gcall);
-
- // The calls to build_tm_{store,load} above inserted the instrumented
- // call into the stream.
- // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
}
gimple_call_set_fndecl (stmt, repl);
update_stmt (stmt);
node = cgraph_node::create (repl);
- node->local.tm_may_enter_irr = false;
+ node->tm_may_enter_irr = false;
return expand_call_tm (region, gsi);
}
gcc_unreachable ();
}
- if (node->local.tm_may_enter_irr)
+ if (node->tm_may_enter_irr)
transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
if (is_tm_abort (fn_decl))
gassign *assign_stmt;
/* Remember if the call was going to throw. */
- if (stmt_can_throw_internal (stmt))
+ if (stmt_can_throw_internal (cfun, stmt))
{
edge_iterator ei;
edge e;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
switch (gimple_code (stmt))
{
case GIMPLE_ASSIGN:
// Callback data for collect_bb2reg.
struct bb2reg_stuff
{
- vec<tm_region_p> *bb2reg;
+ vec<tm_region *> *bb2reg;
bool include_uninstrumented_p;
};
collect_bb2reg (struct tm_region *region, void *data)
{
struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
- vec<tm_region_p> *bb2reg = stuff->bb2reg;
+ vec<tm_region *> *bb2reg = stuff->bb2reg;
vec<basic_block> queue;
unsigned int i;
basic_block bb;
// ??? There is currently a hack inside tree-ssa-pre.c to work around the
// only known instance of this block sharing.
-static vec<tm_region_p>
+static vec<tm_region *>
get_bb_regions_instrumented (bool traverse_clones,
bool include_uninstrumented_p)
{
unsigned n = last_basic_block_for_fn (cfun);
struct bb2reg_stuff stuff;
- vec<tm_region_p> ret;
+ vec<tm_region *> ret;
ret.create (n);
- ret.safe_grow_cleared (n);
+ ret.safe_grow_cleared (n, true);
stuff.bb2reg = &ret;
stuff.include_uninstrumented_p = include_uninstrumented_p;
expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
tree t1 = create_tmp_reg (tm_state_type);
tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
- gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
+ gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
redirect_edge_pred (fallthru_edge, join_bb);
- join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
join_bb->count = test_bb->count = transaction_bb->count;
- ei->probability = PROB_ALWAYS;
- et->probability = PROB_LIKELY;
- ef->probability = PROB_UNLIKELY;
- et->count = apply_probability (test_bb->count, et->probability);
- ef->count = apply_probability (test_bb->count, ef->probability);
+ ei->probability = profile_probability::always ();
+ et->probability = profile_probability::likely ();
+ ef->probability = profile_probability::unlikely ();
- code_bb->count = et->count;
- code_bb->frequency = EDGE_FREQUENCY (et);
+ code_bb->count = et->count ();
transaction_bb = join_bb;
}
tree t1 = create_tmp_reg (tm_state_type);
tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
- gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
+ gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
- test_bb->frequency = transaction_bb->frequency;
test_bb->count = transaction_bb->count;
- ei->probability = PROB_ALWAYS;
+ ei->probability = profile_probability::always ();
// Not abort edge. If both are live, chose one at random as we'll
// we'll be fixing that up below.
redirect_edge_pred (fallthru_edge, test_bb);
fallthru_edge->flags = EDGE_FALSE_VALUE;
- fallthru_edge->probability = PROB_VERY_LIKELY;
- fallthru_edge->count
- = apply_probability (test_bb->count, fallthru_edge->probability);
+ fallthru_edge->probability = profile_probability::very_likely ();
// Abort/over edge.
redirect_edge_pred (abort_edge, test_bb);
abort_edge->flags = EDGE_TRUE_VALUE;
- abort_edge->probability = PROB_VERY_UNLIKELY;
- abort_edge->count
- = apply_probability (test_bb->count, abort_edge->probability);
+ abort_edge->probability = profile_probability::unlikely ();
transaction_bb = test_bb;
}
tree t1 = create_tmp_reg (tm_state_type);
tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
- gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
+ gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
// out of the fallthru edge.
edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
e->probability = fallthru_edge->probability;
- test_bb->count = e->count = fallthru_edge->count;
- test_bb->frequency = EDGE_FREQUENCY (e);
+ test_bb->count = fallthru_edge->count ();
// Now update the edges to the inst/uninist implementations.
// For now assume that the paths are equally likely. When using HTM,
// use the uninst path when falling back to serial mode.
redirect_edge_pred (inst_edge, test_bb);
inst_edge->flags = EDGE_FALSE_VALUE;
- inst_edge->probability = REG_BR_PROB_BASE / 2;
- inst_edge->count
- = apply_probability (test_bb->count, inst_edge->probability);
+ inst_edge->probability = profile_probability::even ();
redirect_edge_pred (uninst_edge, test_bb);
uninst_edge->flags = EDGE_TRUE_VALUE;
- uninst_edge->probability = REG_BR_PROB_BASE / 2;
- uninst_edge->count
- = apply_probability (test_bb->count, uninst_edge->probability);
+ uninst_edge->probability = profile_probability::even ();
}
// If we have no previous special cases, and we have PHIs at the beginning
tm_log_init ();
- vec<tm_region_p> bb_regions
+ vec<tm_region *> bb_regions
= get_bb_regions_instrumented (/*traverse_clones=*/true,
/*include_uninstrumented_p=*/false);
struct tm_region *r;
as necessary. Adjust *PNEXT as needed for the split block. */
static inline void
-split_bb_make_tm_edge (gimple stmt, basic_block dest_bb,
+split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
{
basic_block bb = gimple_bb (stmt);
edge e = split_block (bb, stmt);
*pnext = gsi_start_bb (e->dest);
}
- make_edge (bb, dest_bb, EDGE_ABNORMAL);
+ edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL);
+ if (e)
+ e->probability = profile_probability::guessed_never ();
// Record the need for the edge for the benefit of the rtl passes.
if (cfun->gimple_df->tm_restart == NULL)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
gcall *call_stmt;
next_gsi = gsi;
|| (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
continue;
- if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt))
- == BUILT_IN_TM_ABORT)
+ if (gimple_call_builtin_p (call_stmt, BUILT_IN_TM_ABORT))
{
// If we have a ``_transaction_cancel [[outer]]'', there is only
// one abnormal edge: to the transaction marked OUTER.
unsigned int
pass_tm_edges::execute (function *fun)
{
- vec<tm_region_p> bb_regions
+ vec<tm_region *> bb_regions
= get_bb_regions_instrumented (/*traverse_clones=*/false,
/*include_uninstrumented_p=*/true);
struct tm_region *r;
must be rebuilt completely. Otherwise we'll crash trying to update
the SSA web in the TODO section following this pass. */
free_dominance_info (CDI_DOMINATORS);
+ /* We'ge also wrecked loops badly with inserting of abnormal edges. */
+ loops_state_set (LOOPS_NEED_FIXUP);
bitmap_obstack_release (&tm_obstack);
all_tm_regions = NULL;
\f
/* A unique TM memory operation. */
-typedef struct tm_memop
+struct tm_memop
{
/* Unique ID that all memory operations to the same location have. */
unsigned int value_id;
/* Address of load/store. */
tree addr;
-} *tm_memop_t;
+};
/* TM memory operation hashtable helpers. */
it accesses. */
static unsigned int
-tm_memopt_value_number (gimple stmt, enum insert_option op)
+tm_memopt_value_number (gimple *stmt, enum insert_option op)
{
struct tm_memop tmpmem, *mem;
tm_memop **slot;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
bitmap bits;
unsigned int loc;
fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
is_tm_load (stmt) ? "LOAD" : "STORE", loc,
gimple_bb (stmt)->index);
- print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
+ print_generic_expr (dump_file, gimple_call_arg (stmt, 0));
fprintf (dump_file, "\n");
}
}
gcc_assert (mem->value_id == i);
fprintf (dump_file, "%s", comma);
comma = ", ";
- print_generic_expr (dump_file, mem->addr, 0);
+ print_generic_expr (dump_file, mem->addr);
}
fprintf (dump_file, "]\n");
}
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks in the region. */
+ gcc_assert (!blocks.is_empty ());
qlen = blocks.length () - 1;
- qin = qout = worklist =
- XNEWVEC (basic_block, qlen);
+ qin = qout = worklist = XNEWVEC (basic_block, qlen);
/* Put every block in the region on the worklist. */
for (i = 0; blocks.iterate (i, &bb); ++i)
/* Inform about a load/store optimization. */
static void
-dump_tm_memopt_transform (gimple stmt)
+dump_tm_memopt_transform (gimple *stmt)
{
if (dump_file)
{
fprintf (dump_file, "TM memopt: transforming: ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
+ print_gimple_stmt (dump_file, stmt, 0);
fprintf (dump_file, "\n");
}
}
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
bitmap read_avail = READ_AVAIL_IN (bb);
bitmap store_avail = STORE_AVAIL_IN (bb);
bitmap store_antic = STORE_ANTIC_OUT (bb);
}
}
-/* Duplicate the basic blocks in QUEUE for use in the uninstrumented
- code path. QUEUE are the basic blocks inside the transaction
- represented in REGION.
-
- Later in split_code_paths() we will add the conditional to choose
- between the two alternatives. */
-
-static void
-ipa_uninstrument_transaction (struct tm_region *region,
- vec<basic_block> queue)
-{
- gimple transaction = region->transaction_stmt;
- basic_block transaction_bb = gimple_bb (transaction);
- int n = queue.length ();
- basic_block *new_bbs = XNEWVEC (basic_block, n);
-
- copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb,
- true);
- edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
- add_phi_args_after_copy (new_bbs, n, e);
-
- // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
- // a) EDGE_FALLTHRU into the transaction
- // b) EDGE_TM_ABORT out of the transaction
- // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
-
- free (new_bbs);
-}
-
/* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
Queue all callees within block BB. */
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
cgraph_node_queue *callees_p)
{
- struct tm_region *r;
-
d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
d->all_tm_regions = all_tm_regions;
- for (r = all_tm_regions; r; r = r->next)
+ for (tm_region *r = all_tm_regions; r; r = r->next)
{
vec<basic_block> bbs;
basic_block bb;
unsigned i;
bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
- d->transaction_blocks_normal, false);
-
- // Generate the uninstrumented code path for this transaction.
- ipa_uninstrument_transaction (r, bbs);
+ d->transaction_blocks_normal, false, false);
FOR_EACH_VEC_ELT (bbs, i, bb)
ipa_tm_scan_calls_block (callees_p, bb, false);
bbs.release ();
}
-
- // ??? copy_bbs should maintain cgraph edges for the blocks as it is
- // copying them, rather than forcing us to do this externally.
- cgraph_edge::rebuild_edges ();
-
- // ??? In ipa_uninstrument_transaction we don't try to update dominators
- // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
- // Instead, just release dominators here so update_ssa recomputes them.
- free_dominance_info (CDI_DOMINATORS);
-
- // When building the uninstrumented code path, copy_bbs will have invoked
- // create_new_def_for starting an "ssa update context". There is only one
- // instance of this context, so resolve ssa updates before moving on to
- // the next function.
- update_ssa (TODO_update_ssa);
}
/* Scan all calls in NODE as if this is the transactional clone,
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
switch (gimple_code (stmt))
{
case GIMPLE_ASSIGN:
{
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
- if (volatile_var_p (lhs) || volatile_var_p (rhs))
+ if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs))
return true;
}
break;
case GIMPLE_CALL:
{
tree lhs = gimple_call_lhs (stmt);
- if (lhs && volatile_var_p (lhs))
+ if (lhs && volatile_lvalue_p (lhs))
return true;
if (is_tm_pure_call (stmt))
is to wrap it in a __tm_waiver block. This is not
yet implemented, so we can't check for it. */
if (is_tm_safe (current_function_decl))
- {
- tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
- SET_EXPR_LOCATION (t, gimple_location (stmt));
- error ("%Kasm not allowed in %<transaction_safe%> function", t);
- }
+ error_at (gimple_location (stmt),
+ "%<asm%> not allowed in %<transaction_safe%> function");
return true;
default:
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
/* We may have previously marked this function as tm_may_enter_irr;
see pass_diagnose_tm_blocks. */
- if (node->local.tm_may_enter_irr)
+ if (node->tm_may_enter_irr)
return true;
/* Recurse on the main body for aliases. In general, this will
result in one of the bits above being set so that we will not
have to recurse next time. */
if (node->alias)
- return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
+ return ipa_tm_mayenterirr_function
+ (cgraph_node::get (thunk_info::get (node)->alias));
/* What remains is unmarked local functions without items that force
the function to go irrevocable. */
for (e = node->callees; e ; e = e->next_callee)
if (!is_tm_callable (e->callee->decl)
- && e->callee->local.tm_may_enter_irr)
+ && e->callee->tm_may_enter_irr)
error_at (gimple_location (e->call_stmt),
"unsafe function call %qD within "
"%<transaction_safe%> function", e->callee->decl);
for (i = 0; bbs.iterate (i, &bb); ++i)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
tree fndecl;
if (gimple_code (stmt) == GIMPLE_ASM)
{
error_at (gimple_location (stmt),
- "asm not allowed in atomic transaction");
+ "%<asm%> not allowed in atomic transaction");
continue;
}
if (is_tm_callable (fndecl))
continue;
- if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
+ if (cgraph_node::local_info_node (fndecl)->tm_may_enter_irr)
error_at (gimple_location (stmt),
"unsafe function call %qD within "
"atomic transaction", fndecl);
if (dc == NULL)
{
- char length[8];
+ char length[12];
do_unencoded:
sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
- new_node->local.local = false;
+ new_node->local = false;
new_node->externally_visible = old_node->externally_visible;
new_node->lowered = true;
new_node->tm_clone = 1;
if (!old_node->implicit_section)
- new_node->set_section (old_node->get_section ());
+ new_node->set_section (*old_node);
get_cg_data (&old_node, true)->clone = new_node;
if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
}
tree_function_versioning (old_decl, new_decl,
- NULL, false, NULL,
- false, NULL, NULL);
+ NULL, NULL, false, NULL, NULL);
}
record_tm_clone_pair (old_decl, new_decl);
node->create_edge (cgraph_node::get_create
(builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
- g, 0,
- compute_call_stmt_bb_frequency (node->decl,
- gimple_bb (g)));
+ g, gimple_bb (g)->count);
}
/* Construct a call to TM_GETTMCLONE and insert it before GSI. */
gsi_insert_before (gsi, g, GSI_SAME_STMT);
- node->create_edge (cgraph_node::get_create (gettm_fn), g, 0,
- compute_call_stmt_bb_frequency (node->decl,
- gimple_bb (g)));
+ node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count);
/* Cast return value from tm_gettmclone* into appropriate function
pointer. */
CALLER. Also note that find_tm_replacement_function also
contains mappings into the TM runtime, e.g. memcpy. These
we know won't go irrevocable. */
- new_node->local.tm_may_enter_irr = 1;
+ new_node->tm_may_enter_irr = 1;
}
else
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
if (!is_gimple_call (stmt))
continue;
enum availability a;
unsigned int i;
-#ifdef ENABLE_CHECKING
- cgraph_node::verify_cgraph_nodes ();
-#endif
+ cgraph_node::checking_verify_cgraph_nodes ();
bitmap_obstack_initialize (&tm_obstack);
initialize_original_copy_tables ();
No need to do this if the function's address can't be taken. */
if (is_tm_pure (node->decl))
{
- if (!node->local.local)
+ if (!node->local)
record_tm_clone_pair (node->decl, node->decl);
continue;
}
we need not scan the callees now, as the base will do. */
if (node->alias)
{
- node = cgraph_node::get (node->thunk.alias);
+ node = cgraph_node::get (thunk_info::get (node)->alias);
d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
continue;
node = irr_worklist[i];
d = get_cg_data (&node, true);
d->in_worklist = false;
- node->local.tm_may_enter_irr = true;
+ node->tm_may_enter_irr = true;
/* Propagate back to normal callers. */
for (e = node->callers; e ; e = e->next_caller)
{
caller = e->caller;
if (!is_tm_safe_or_pure (caller->decl)
- && !caller->local.tm_may_enter_irr)
+ && !caller->tm_may_enter_irr)
{
d = get_cg_data (&caller, true);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
FOR_EACH_ALIAS (node, ref)
{
caller = dyn_cast<cgraph_node *> (ref->referring);
- if (!caller->local.tm_may_enter_irr)
+ if (!caller->tm_may_enter_irr)
{
/* ?? Do not traverse aliases here. */
d = get_cg_data (&caller, false);
FOR_EACH_FUNCTION (node)
node->aux = NULL;
-#ifdef ENABLE_CHECKING
- cgraph_node::verify_cgraph_nodes ();
-#endif
+ cgraph_node::checking_verify_cgraph_nodes ();
return 0;
}