};
// State used by the pass for a given basic block.
-struct ldp_bb_info
+struct pair_fusion_bb_info
{
using def_hash = nofree_ptr_hash<def_info>;
using expr_key_t = pair_hash<tree_operand_hash, int_hash<int, -1, -2>>;
static const size_t obstack_alignment = sizeof (void *);
- ldp_bb_info (bb_info *bb, pair_fusion *d)
+ pair_fusion_bb_info (bb_info *bb, pair_fusion *d)
: m_bb (bb), m_pass (d), m_emitted_tombstone (false)
{
obstack_specify_allocation (&m_obstack, OBSTACK_CHUNK_SIZE,
obstack_alignment, obstack_chunk_alloc,
obstack_chunk_free);
}
- ~ldp_bb_info ()
+ ~pair_fusion_bb_info ()
{
obstack_free (&m_obstack, nullptr);
}
splay_tree_node<access_record *> *
-ldp_bb_info::node_alloc (access_record *access)
+pair_fusion_bb_info::node_alloc (access_record *access)
{
using T = splay_tree_node<access_record *>;
void *addr = obstack_alloc (&m_obstack, sizeof (T));
// RTX_AUTOINC addresses. The interface is like strip_offset except we take a
// MEM so that we know the mode of the access.
static rtx
-ldp_strip_offset (rtx mem, poly_int64 *offset)
+pair_mem_strip_offset (rtx mem, poly_int64 *offset)
{
rtx addr = XEXP (mem, 0);
// MEM_EXPR base (i.e. a tree decl) relative to which we can track the access.
// LFS is used as part of the key to the hash table, see track_access.
bool
-ldp_bb_info::track_via_mem_expr (insn_info *insn, rtx mem, lfs_fields lfs)
+pair_fusion_bb_info::track_via_mem_expr (insn_info *insn, rtx mem,
+ lfs_fields lfs)
{
if (!MEM_EXPR (mem) || !MEM_OFFSET_KNOWN_P (mem))
return false;
// this basic block. LOAD_P is true if the access is a load, and MEM
// is the mem rtx that occurs in INSN.
void
-ldp_bb_info::track_access (insn_info *insn, bool load_p, rtx mem)
+pair_fusion_bb_info::track_access (insn_info *insn, bool load_p, rtx mem)
{
// We can't combine volatile MEMs, so punt on these.
if (MEM_VOLATILE_P (mem))
poly_int64 mem_off;
rtx addr = XEXP (mem, 0);
const bool autoinc_p = GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC;
- rtx base = ldp_strip_offset (mem, &mem_off);
+ rtx base = pair_mem_strip_offset (mem, &mem_off);
if (!REG_P (base))
return;
// Class that implements a state machine for building the changes needed to form
// a store pair instruction. This allows us to easily build the changes in
// program order, as required by rtl-ssa.
-struct stp_change_builder
+struct store_change_builder
{
enum class state
{
bool done () const { return m_state == state::DONE; }
- stp_change_builder (insn_info *insns[2],
- insn_info *repurpose,
- insn_info *dest)
+ store_change_builder (insn_info *insns[2],
+ insn_info *repurpose,
+ insn_info *dest)
: m_state (state::FIRST), m_insns { insns[0], insns[1] },
m_repurpose (repurpose), m_dest (dest), m_use (nullptr) {}
const bool autoinc_p = GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC;
poly_int64 offset;
- rtx this_base = ldp_strip_offset (mem, &offset);
+ rtx this_base = pair_mem_strip_offset (mem, &offset);
gcc_assert (REG_P (this_base));
if (base_reg)
gcc_assert (rtx_equal_p (base_reg, this_base));
// We just emitted a tombstone with uid UID, track it in a bitmap for
// this BB so we can easily identify it later when cleaning up tombstones.
void
-ldp_bb_info::track_tombstone (int uid)
+pair_fusion_bb_info::track_tombstone (int uid)
{
if (!m_emitted_tombstone)
{
gcc_checking_assert (GET_RTX_CLASS (GET_CODE (XEXP (mem, 0)))
== RTX_AUTOINC);
- base = ldp_strip_offset (mem, &offset);
+ base = pair_mem_strip_offset (mem, &offset);
gcc_checking_assert (REG_P (base) && REGNO (base) == base_regno);
}
fixup_debug_use (attempt, use, def, base, offset);
// BASE gives the chosen base candidate for the pair and MOVE_RANGE is
// a singleton range which says where to place the pair.
bool
-ldp_bb_info::fuse_pair (bool load_p,
- unsigned access_size,
- int writeback,
- insn_info *i1, insn_info *i2,
- base_cand &base,
- const insn_range_info &move_range)
+pair_fusion_bb_info::fuse_pair (bool load_p,
+ unsigned access_size,
+ int writeback,
+ insn_info *i1, insn_info *i2,
+ base_cand &base,
+ const insn_range_info &move_range)
{
auto attempt = crtl->ssa->new_change_attempt ();
}
else
{
- using Action = stp_change_builder::action;
+ using Action = store_change_builder::action;
insn_info *store_to_change = try_repurpose_store (first, second,
move_range);
- stp_change_builder builder (insns, store_to_change, pair_dst);
+ store_change_builder builder (insns, store_to_change, pair_dst);
insn_change *change;
set_info *new_set = nullptr;
for (; !builder.done (); builder.advance ())
auto d2 = drop_memory_access (input_defs[1]);
change->new_defs = merge_access_arrays (attempt, d1, d2);
gcc_assert (change->new_defs.is_valid ());
- def_info *stp_def = memory_access (change->insn ()->defs ());
+ def_info *store_def = memory_access (change->insn ()->defs ());
change->new_defs = insert_access (attempt,
- stp_def,
+ store_def,
change->new_defs);
gcc_assert (change->new_defs.is_valid ());
change->move_range = move_range;
{
const bool is_lower = (i == reversed);
poly_int64 poly_off;
- rtx base = ldp_strip_offset (cand_mems[i], &poly_off);
+ rtx base = pair_mem_strip_offset (cand_mems[i], &poly_off);
if (GET_RTX_CLASS (GET_CODE (XEXP (cand_mems[i], 0))) == RTX_AUTOINC)
writeback |= (1 << i);
continue;
// Punt on accesses relative to eliminable regs. See the comment in
- // ldp_bb_info::track_access for a detailed explanation of this.
+ // pair_fusion_bb_info::track_access for a detailed explanation of this.
if (!reload_completed
&& (REGNO (base) == FRAME_POINTER_REGNUM
|| REGNO (base) == ARG_POINTER_REGNUM))
// ACCESS_SIZE gives the (common) size of a single access, LOAD_P is true
// if the accesses are both loads, otherwise they are both stores.
bool
-ldp_bb_info::try_fuse_pair (bool load_p, unsigned access_size,
- insn_info *i1, insn_info *i2)
+pair_fusion_bb_info::try_fuse_pair (bool load_p, unsigned access_size,
+ insn_info *i1, insn_info *i2)
{
if (dump_file)
fprintf (dump_file, "analyzing pair (load=%d): (%d,%d)\n",
// we can't re-order them anyway, so provided earlier passes have cleaned up
// redundant loads, we shouldn't miss opportunities by doing this.
void
-ldp_bb_info::merge_pairs (insn_list_t &left_list,
+pair_fusion_bb_info::merge_pairs (insn_list_t &left_list,
insn_list_t &right_list,
bool load_p,
unsigned access_size)
// of accesses. If we find two sets of adjacent accesses, call
// merge_pairs.
void
-ldp_bb_info::transform_for_base (int encoded_lfs,
+pair_fusion_bb_info::transform_for_base (int encoded_lfs,
access_group &group)
{
const auto lfs = decode_lfs (encoded_lfs);
// and remove all the tombstone insns, being sure to reparent any uses
// of mem to previous defs when we do this.
void
-ldp_bb_info::cleanup_tombstones ()
+pair_fusion_bb_info::cleanup_tombstones ()
{
// No need to do anything if we didn't emit a tombstone insn for this BB.
if (!m_emitted_tombstone)
template<typename Map>
void
-ldp_bb_info::traverse_base_map (Map &map)
+pair_fusion_bb_info::traverse_base_map (Map &map)
{
for (auto kv : map)
{
}
void
-ldp_bb_info::transform ()
+pair_fusion_bb_info::transform ()
{
traverse_base_map (expr_map);
traverse_base_map (def_map);
const bool track_loads = track_loads_p ();
const bool track_stores = track_stores_p ();
- ldp_bb_info bb_state (bb, this);
+ pair_fusion_bb_info bb_state (bb, this);
for (auto insn : bb->nondebug_insns ())
{