/* Copy propagation on hard registers for the GNU compiler.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
- 2010 Free Software Foundation, Inc.
+ Copyright (C) 2000-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
#include "rtl.h"
+#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
#include "regs.h"
-#include "addresses.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
-#include "reload.h"
-#include "function.h"
+#include "emit-rtl.h"
#include "recog.h"
-#include "flags.h"
#include "diagnostic-core.h"
-#include "obstack.h"
+#include "addresses.h"
#include "tree-pass.h"
-#include "df.h"
+#include "rtl-iter.h"
+#include "cfgrtl.h"
+#include "target.h"
+#include "function-abi.h"
/* The following code does forward propagation of hard register copies.
The object is to eliminate as many dependencies as possible, so that
struct queued_debug_insn_change
{
struct queued_debug_insn_change *next;
- rtx insn;
+ rtx_insn *insn;
rtx *loc;
rtx new_rtx;
};
struct value_data_entry
{
- enum machine_mode mode;
+ machine_mode mode;
unsigned int oldest_regno;
unsigned int next_regno;
struct queued_debug_insn_change *debug_insn_changes;
unsigned int n_debug_insn_changes;
};
-static alloc_pool debug_insn_changes_pool;
+static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool
+ ("debug insn changes pool");
+
+static bool skip_debug_insn_p;
static void kill_value_one_regno (unsigned, struct value_data *);
static void kill_value_regno (unsigned, unsigned, struct value_data *);
-static void kill_value (rtx, struct value_data *);
-static void set_value_regno (unsigned, enum machine_mode, struct value_data *);
+static void kill_value (const_rtx, struct value_data *);
+static void set_value_regno (unsigned, machine_mode, struct value_data *);
static void init_value_data (struct value_data *);
static void kill_clobbered_value (rtx, const_rtx, void *);
static void kill_set_value (rtx, const_rtx, void *);
-static int kill_autoinc_value (rtx *, void *);
static void copy_value (rtx, rtx, struct value_data *);
-static bool mode_change_ok (enum machine_mode, enum machine_mode,
+static bool mode_change_ok (machine_mode, machine_mode,
unsigned int);
-static rtx maybe_mode_change (enum machine_mode, enum machine_mode,
- enum machine_mode, unsigned int, unsigned int);
+static rtx maybe_mode_change (machine_mode, machine_mode,
+ machine_mode, unsigned int, unsigned int);
static rtx find_oldest_value_reg (enum reg_class, rtx, struct value_data *);
-static bool replace_oldest_value_reg (rtx *, enum reg_class, rtx,
+static bool replace_oldest_value_reg (rtx *, enum reg_class, rtx_insn *,
struct value_data *);
static bool replace_oldest_value_addr (rtx *, enum reg_class,
- enum machine_mode, addr_space_t, rtx,
- struct value_data *);
-static bool replace_oldest_value_mem (rtx, rtx, struct value_data *);
+ machine_mode, addr_space_t,
+ rtx_insn *, struct value_data *);
+static bool replace_oldest_value_mem (rtx, rtx_insn *, struct value_data *);
static bool copyprop_hardreg_forward_1 (basic_block, struct value_data *);
extern void debug_value_data (struct value_data *);
-#ifdef ENABLE_CHECKING
static void validate_value_data (struct value_data *);
-#endif
/* Free all queued updates for DEBUG_INSNs that change some reg to
register REGNO. */
{
next = cur->next;
--vd->n_debug_insn_changes;
- pool_free (debug_insn_changes_pool, cur);
+ queued_debug_insn_change_pool.remove (cur);
}
vd->e[regno].debug_insn_changes = NULL;
}
if (vd->e[regno].debug_insn_changes)
free_debug_insn_changes (vd, regno);
-#ifdef ENABLE_CHECKING
- validate_value_data (vd);
-#endif
+ if (flag_checking)
+ validate_value_data (vd);
}
/* Kill the value in register REGNO for NREGS, and any other registers
unsigned int i, n;
if (vd->e[j].mode == VOIDmode)
continue;
- n = hard_regno_nregs[j][vd->e[j].mode];
+ n = hard_regno_nregs (j, vd->e[j].mode);
if (j + n > regno)
for (i = 0; i < n; ++i)
kill_value_one_regno (j + i, vd);
so that we mind the mode the register is in. */
static void
-kill_value (rtx x, struct value_data *vd)
+kill_value (const_rtx x, struct value_data *vd)
{
- rtx orig_rtx = x;
-
if (GET_CODE (x) == SUBREG)
{
- x = simplify_subreg (GET_MODE (x), SUBREG_REG (x),
- GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
- if (x == NULL_RTX)
- x = SUBREG_REG (orig_rtx);
+ rtx tmp = simplify_subreg (GET_MODE (x), SUBREG_REG (x),
+ GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
+ x = tmp ? tmp : SUBREG_REG (x);
}
if (REG_P (x))
- {
- unsigned int regno = REGNO (x);
- unsigned int n = hard_regno_nregs[regno][GET_MODE (x)];
-
- kill_value_regno (regno, n, vd);
- }
+ kill_value_regno (REGNO (x), REG_NREGS (x), vd);
}
/* Remember that REGNO is valid in MODE. */
static void
-set_value_regno (unsigned int regno, enum machine_mode mode,
+set_value_regno (unsigned int regno, machine_mode mode,
struct value_data *vd)
{
unsigned int nregs;
vd->e[regno].mode = mode;
- nregs = hard_regno_nregs[regno][mode];
+ nregs = hard_regno_nregs (regno, mode);
if (nregs > vd->max_value_regs)
vd->max_value_regs = nregs;
}
kill_clobbered_value (rtx x, const_rtx set, void *data)
{
struct value_data *const vd = (struct value_data *) data;
+
if (GET_CODE (set) == CLOBBER)
kill_value (x, vd);
}
struct kill_set_value_data *ksvd = (struct kill_set_value_data *) data;
if (rtx_equal_p (x, ksvd->ignore_set_reg))
return;
+
if (GET_CODE (set) != CLOBBER)
{
kill_value (x, ksvd->vd);
}
}
-/* Called through for_each_rtx. Kill any register used as the base of an
- auto-increment expression, and install that register as the root of its
- own value list. */
+/* Kill any register used in X as the base of an auto-increment expression,
+ and install that register as the root of its own value list. */
-static int
-kill_autoinc_value (rtx *px, void *data)
+static void
+kill_autoinc_value (rtx_insn *insn, struct value_data *vd)
{
- rtx x = *px;
- struct value_data *const vd = (struct value_data *) data;
-
- if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC)
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
{
- x = XEXP (x, 0);
- kill_value (x, vd);
- set_value_regno (REGNO (x), GET_MODE (x), vd);
- return -1;
+ const_rtx x = *iter;
+ if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC)
+ {
+ x = XEXP (x, 0);
+ kill_value (x, vd);
+ set_value_regno (REGNO (x), GET_MODE (x), vd);
+ iter.skip_subrtxes ();
+ }
}
-
- return 0;
}
/* Assert that SRC has been copied to DEST. Adjust the data structures
return;
/* If SRC and DEST overlap, don't record anything. */
- dn = hard_regno_nregs[dr][GET_MODE (dest)];
- sn = hard_regno_nregs[sr][GET_MODE (dest)];
+ dn = REG_NREGS (dest);
+ sn = REG_NREGS (src);
if ((dr > sr && dr < sr + sn)
|| (sr > dr && sr < dr + dn))
return;
we must not do the same for the high part.
Note we can still get low parts for the same mode combination through
a two-step copy involving differently sized hard regs.
- Assume hard regs fr* are 32 bits bits each, while r* are 64 bits each:
+ Assume hard regs fr* are 32 bits each, while r* are 64 bits each:
(set (reg:DI r0) (reg:DI fr0))
(set (reg:SI fr2) (reg:SI r0))
loads the low part of (reg:DI fr0) - i.e. fr1 - into fr2, while:
We can't properly represent the latter case in our tables, so don't
record anything then. */
- else if (sn < (unsigned int) hard_regno_nregs[sr][vd->e[sr].mode]
- && (GET_MODE_SIZE (vd->e[sr].mode) > UNITS_PER_WORD
- ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
+ else if (sn < hard_regno_nregs (sr, vd->e[sr].mode)
+ && maybe_ne (subreg_lowpart_offset (GET_MODE (dest),
+ vd->e[sr].mode), 0U))
return;
/* If SRC had been assigned a mode narrower than the copy, we can't
link DEST into the chain, because not all of the pieces of the
copy came from oldest_regno. */
- else if (sn > (unsigned int) hard_regno_nregs[sr][vd->e[sr].mode])
+ else if (sn > hard_regno_nregs (sr, vd->e[sr].mode))
return;
/* Link DR at the end of the value chain used by SR. */
continue;
vd->e[i].next_regno = dr;
-#ifdef ENABLE_CHECKING
- validate_value_data (vd);
-#endif
+ if (flag_checking)
+ validate_value_data (vd);
}
/* Return true if a mode change from ORIG to NEW is allowed for REGNO. */
static bool
-mode_change_ok (enum machine_mode orig_mode, enum machine_mode new_mode,
+mode_change_ok (machine_mode orig_mode, machine_mode new_mode,
unsigned int regno ATTRIBUTE_UNUSED)
{
- if (GET_MODE_SIZE (orig_mode) < GET_MODE_SIZE (new_mode))
+ if (partial_subreg_p (orig_mode, new_mode))
return false;
-#ifdef CANNOT_CHANGE_MODE_CLASS
- return !REG_CANNOT_CHANGE_MODE_P (regno, orig_mode, new_mode);
-#endif
-
- return true;
+ return REG_CAN_CHANGE_MODE_P (regno, orig_mode, new_mode);
}
/* Register REGNO was originally set in ORIG_MODE. It - or a copy of it -
Return a NEW_MODE rtx for REGNO if that's OK, otherwise return NULL_RTX. */
static rtx
-maybe_mode_change (enum machine_mode orig_mode, enum machine_mode copy_mode,
- enum machine_mode new_mode, unsigned int regno,
+maybe_mode_change (machine_mode orig_mode, machine_mode copy_mode,
+ machine_mode new_mode, unsigned int regno,
unsigned int copy_regno ATTRIBUTE_UNUSED)
{
- if (GET_MODE_SIZE (copy_mode) < GET_MODE_SIZE (orig_mode)
- && GET_MODE_SIZE (copy_mode) < GET_MODE_SIZE (new_mode))
+ if (partial_subreg_p (copy_mode, orig_mode)
+ && partial_subreg_p (copy_mode, new_mode))
+ return NULL_RTX;
+
+ /* Avoid creating multiple copies of the stack pointer. Some ports
+ assume there is one and only one stack pointer.
+
+ It's unclear if we need to do the same for other special registers. */
+ if (regno == STACK_POINTER_REGNUM)
return NULL_RTX;
if (orig_mode == new_mode)
- return gen_rtx_raw_REG (new_mode, regno);
+ return gen_raw_REG (new_mode, regno);
else if (mode_change_ok (orig_mode, new_mode, regno))
{
- int copy_nregs = hard_regno_nregs[copy_regno][copy_mode];
- int use_nregs = hard_regno_nregs[copy_regno][new_mode];
- int copy_offset
- = GET_MODE_SIZE (copy_mode) / copy_nregs * (copy_nregs - use_nregs);
- int offset
- = GET_MODE_SIZE (orig_mode) - GET_MODE_SIZE (new_mode) - copy_offset;
- int byteoffset = offset % UNITS_PER_WORD;
- int wordoffset = offset - byteoffset;
-
- offset = ((WORDS_BIG_ENDIAN ? wordoffset : 0)
- + (BYTES_BIG_ENDIAN ? byteoffset : 0));
+ int copy_nregs = hard_regno_nregs (copy_regno, copy_mode);
+ int use_nregs = hard_regno_nregs (copy_regno, new_mode);
+ poly_uint64 bytes_per_reg;
+ if (!can_div_trunc_p (GET_MODE_SIZE (copy_mode),
+ copy_nregs, &bytes_per_reg))
+ return NULL_RTX;
+ poly_uint64 copy_offset = bytes_per_reg * (copy_nregs - use_nregs);
+ poly_uint64 offset
+ = subreg_size_lowpart_offset (GET_MODE_SIZE (new_mode) + copy_offset,
+ GET_MODE_SIZE (orig_mode));
regno += subreg_regno_offset (regno, orig_mode, offset, new_mode);
- if (HARD_REGNO_MODE_OK (regno, new_mode))
- return gen_rtx_raw_REG (new_mode, regno);
+ if (targetm.hard_regno_mode_ok (regno, new_mode))
+ return gen_raw_REG (new_mode, regno);
}
return NULL_RTX;
}
find_oldest_value_reg (enum reg_class cl, rtx reg, struct value_data *vd)
{
unsigned int regno = REGNO (reg);
- enum machine_mode mode = GET_MODE (reg);
+ machine_mode mode = GET_MODE (reg);
unsigned int i;
+ gcc_assert (regno < FIRST_PSEUDO_REGISTER);
+
/* If we are accessing REG in some mode other that what we set it in,
make sure that the replacement is valid. In particular, consider
(set (reg:DI r11) (...))
(set (reg:SI r10) (...))
(set (...) (reg:DI r9))
Replacing r9 with r11 is invalid. */
- if (mode != vd->e[regno].mode)
- {
- if (hard_regno_nregs[regno][mode]
- > hard_regno_nregs[regno][vd->e[regno].mode])
- return NULL_RTX;
- }
+ if (mode != vd->e[regno].mode
+ && REG_NREGS (reg) > hard_regno_nregs (regno, vd->e[regno].mode))
+ return NULL_RTX;
for (i = vd->e[regno].oldest_regno; i != regno; i = vd->e[i].next_regno)
{
- enum machine_mode oldmode = vd->e[i].mode;
+ machine_mode oldmode = vd->e[i].mode;
rtx new_rtx;
if (!in_hard_reg_set_p (reg_class_contents[cl], mode, i))
in register class CL. Return true if successfully replaced. */
static bool
-replace_oldest_value_reg (rtx *loc, enum reg_class cl, rtx insn,
+replace_oldest_value_reg (rtx *loc, enum reg_class cl, rtx_insn *insn,
struct value_data *vd)
{
rtx new_rtx = find_oldest_value_reg (cl, *loc, vd);
- if (new_rtx)
+ if (new_rtx && (!DEBUG_INSN_P (insn) || !skip_debug_insn_p))
{
if (DEBUG_INSN_P (insn))
{
fprintf (dump_file, "debug_insn %u: queued replacing reg %u with %u\n",
INSN_UID (insn), REGNO (*loc), REGNO (new_rtx));
- change = (struct queued_debug_insn_change *)
- pool_alloc (debug_insn_changes_pool);
+ change = queued_debug_insn_change_pool.allocate ();
change->next = vd->e[REGNO (new_rtx)].debug_insn_changes;
change->insn = insn;
change->loc = loc;
static bool
replace_oldest_value_addr (rtx *loc, enum reg_class cl,
- enum machine_mode mode, addr_space_t as,
- rtx insn, struct value_data *vd)
+ machine_mode mode, addr_space_t as,
+ rtx_insn *insn, struct value_data *vd)
{
rtx x = *loc;
RTX_CODE code = GET_CODE (x);
/* Similar to replace_oldest_value_reg, but X contains a memory. */
static bool
-replace_oldest_value_mem (rtx x, rtx insn, struct value_data *vd)
+replace_oldest_value_mem (rtx x, rtx_insn *insn, struct value_data *vd)
{
enum reg_class cl;
apply_debug_insn_changes (struct value_data *vd, unsigned int regno)
{
struct queued_debug_insn_change *change;
- rtx last_insn = vd->e[regno].debug_insn_changes->insn;
+ rtx_insn *last_insn = vd->e[regno].debug_insn_changes->insn;
for (change = vd->e[regno].debug_insn_changes;
change;
apply_change_group ();
}
-/* Called via for_each_rtx, for all used registers in a real
- insn apply DEBUG_INSN changes that change registers to the
- used register. */
+/* Called via note_uses, for all used registers in a real insn
+ apply DEBUG_INSN changes that change registers to the used
+ registers. */
-static int
-cprop_find_used_regs_1 (rtx *loc, void *data)
+static void
+cprop_find_used_regs (rtx *loc, void *data)
{
- if (REG_P (*loc))
+ struct value_data *const vd = (struct value_data *) data;
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, *loc, NONCONST)
{
- struct value_data *vd = (struct value_data *) data;
- if (vd->e[REGNO (*loc)].debug_insn_changes)
+ const_rtx x = *iter;
+ if (REG_P (x))
{
- apply_debug_insn_changes (vd, REGNO (*loc));
- free_debug_insn_changes (vd, REGNO (*loc));
+ unsigned int regno = REGNO (x);
+ if (vd->e[regno].debug_insn_changes)
+ {
+ apply_debug_insn_changes (vd, regno);
+ free_debug_insn_changes (vd, regno);
+ }
}
}
- return 0;
}
-/* Called via note_uses, for all used registers in a real insn
- apply DEBUG_INSN changes that change registers to the used
- registers. */
+/* Apply clobbers of INSN in PATTERN and C_I_F_U to value_data VD. */
static void
-cprop_find_used_regs (rtx *loc, void *vd)
+kill_clobbered_values (rtx_insn *insn, struct value_data *vd)
{
- for_each_rtx (loc, cprop_find_used_regs_1, vd);
+ note_stores (insn, kill_clobbered_value, vd);
}
/* Perform the forward copy propagation on basic block BB. */
copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd)
{
bool anything_changed = false;
- rtx insn;
+ rtx_insn *insn, *next;
- for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); ; insn = next)
{
- int n_ops, i, alt, predicated;
+ int n_ops, i, predicated;
bool is_asm, any_replacements;
rtx set;
- bool replaced[MAX_RECOG_OPERANDS];
+ rtx link;
bool changed = false;
struct kill_set_value_data ksvd;
+ next = NEXT_INSN (insn);
if (!NONDEBUG_INSN_P (insn))
{
- if (DEBUG_INSN_P (insn))
+ if (DEBUG_BIND_INSN_P (insn))
{
rtx loc = INSN_VAR_LOCATION_LOC (insn);
if (!VAR_LOC_UNKNOWN_P (loc))
}
set = single_set (insn);
- extract_insn (insn);
- if (! constrain_operands (1))
- fatal_insn_not_found (insn);
- preprocess_constraints ();
- alt = which_alternative;
+
+ /* Detect noop sets and remove them before processing side effects. */
+ if (set && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set)))
+ {
+ unsigned int regno = REGNO (SET_SRC (set));
+ rtx r1 = find_oldest_value_reg (REGNO_REG_CLASS (regno),
+ SET_DEST (set), vd);
+ rtx r2 = find_oldest_value_reg (REGNO_REG_CLASS (regno),
+ SET_SRC (set), vd);
+ if (rtx_equal_p (r1 ? r1 : SET_DEST (set), r2 ? r2 : SET_SRC (set)))
+ {
+ bool last = insn == BB_END (bb);
+ delete_insn (insn);
+ if (last)
+ break;
+ continue;
+ }
+ }
+
+ /* Detect obviously dead sets (via REG_UNUSED notes) and remove them. */
+ if (set
+ && !RTX_FRAME_RELATED_P (insn)
+ && !may_trap_p (set)
+ && find_reg_note (insn, REG_UNUSED, SET_DEST (set))
+ && !side_effects_p (SET_SRC (set))
+ && !side_effects_p (SET_DEST (set)))
+ {
+ bool last = insn == BB_END (bb);
+ delete_insn (insn);
+ if (last)
+ break;
+ continue;
+ }
+
+
+ extract_constrain_insn (insn);
+ preprocess_constraints (insn);
+ const operand_alternative *op_alt = which_op_alt ();
n_ops = recog_data.n_operands;
is_asm = asm_noperands (PATTERN (insn)) >= 0;
- /* Simplify the code below by rewriting things to reflect
- matching constraints. Also promote OP_OUT to OP_INOUT
+ /* Simplify the code below by promoting OP_OUT to OP_INOUT
in predicated instructions. */
predicated = GET_CODE (PATTERN (insn)) == COND_EXEC;
for (i = 0; i < n_ops; ++i)
{
- int matches = recog_op_alt[i][alt].matches;
- if (matches >= 0)
- recog_op_alt[i][alt].cl = recog_op_alt[matches][alt].cl;
- if (matches >= 0 || recog_op_alt[i][alt].matched >= 0
+ int matches = op_alt[i].matches;
+ if (matches >= 0 || op_alt[i].matched >= 0
|| (predicated && recog_data.operand_type[i] == OP_OUT))
recog_data.operand_type[i] = OP_INOUT;
}
/* For each earlyclobber operand, zap the value data. */
for (i = 0; i < n_ops; i++)
- if (recog_op_alt[i][alt].earlyclobber)
+ if (op_alt[i].earlyclobber)
kill_value (recog_data.operand[i], vd);
/* Within asms, a clobber cannot overlap inputs or outputs.
I wouldn't think this were true for regular insns, but
scan_rtx treats them like that... */
- note_stores (PATTERN (insn), kill_clobbered_value, vd);
+ kill_clobbered_values (insn, vd);
/* Kill all auto-incremented values. */
/* ??? REG_INC is useless, since stack pushes aren't done that way. */
- for_each_rtx (&PATTERN (insn), kill_autoinc_value, vd);
+ kill_autoinc_value (insn, vd);
/* Kill all early-clobbered operands. */
for (i = 0; i < n_ops; i++)
- if (recog_op_alt[i][alt].earlyclobber)
+ if (op_alt[i].earlyclobber)
kill_value (recog_data.operand[i], vd);
+ /* If we have dead sets in the insn, then we need to note these as we
+ would clobbers. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) == REG_UNUSED)
+ {
+ kill_value (XEXP (link, 0), vd);
+ /* Furthermore, if the insn looked like a single-set,
+ but the dead store kills the source value of that
+ set, then we can no-longer use the plain move
+ special case below. */
+ if (set
+ && reg_overlap_mentioned_p (XEXP (link, 0), SET_SRC (set)))
+ set = NULL;
+ }
+
+ /* We need to keep CFI info correct, and the same on all paths,
+ so we cannot normally replace the registers REG_CFA_REGISTER
+ refers to. Bail. */
+ if (REG_NOTE_KIND (link) == REG_CFA_REGISTER)
+ goto did_replacement;
+ }
+
/* Special-case plain move instructions, since we may well
be able to do the move from a different register class. */
if (set && REG_P (SET_SRC (set)))
{
rtx src = SET_SRC (set);
unsigned int regno = REGNO (src);
- enum machine_mode mode = GET_MODE (src);
+ machine_mode mode = GET_MODE (src);
unsigned int i;
rtx new_rtx;
set it in, make sure that the replacement is valid. */
if (mode != vd->e[regno].mode)
{
- if (hard_regno_nregs[regno][mode]
- > hard_regno_nregs[regno][vd->e[regno].mode])
+ if (REG_NREGS (src)
+ > hard_regno_nregs (regno, vd->e[regno].mode))
goto no_move_special_case;
/* And likewise, if we are narrowing on big endian the transformation
is also invalid. */
- if (hard_regno_nregs[regno][mode]
- < hard_regno_nregs[regno][vd->e[regno].mode]
- && (GET_MODE_SIZE (vd->e[regno].mode) > UNITS_PER_WORD
- ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
+ if (REG_NREGS (src) < hard_regno_nregs (regno, vd->e[regno].mode)
+ && maybe_ne (subreg_lowpart_offset (mode,
+ vd->e[regno].mode), 0U))
goto no_move_special_case;
}
register in the same class. */
if (REG_P (SET_DEST (set)))
{
- new_rtx = find_oldest_value_reg (REGNO_REG_CLASS (regno), src, vd);
+ new_rtx = find_oldest_value_reg (REGNO_REG_CLASS (regno),
+ src, vd);
+
if (new_rtx && validate_change (insn, &SET_SRC (set), new_rtx, 0))
{
if (dump_file)
}
/* We need to re-extract as validate_change clobbers
recog_data. */
- extract_insn (insn);
- if (! constrain_operands (1))
- fatal_insn_not_found (insn);
- preprocess_constraints ();
+ extract_constrain_insn (insn);
+ preprocess_constraints (insn);
}
/* Otherwise, try all valid registers and see if its valid. */
}
/* We need to re-extract as validate_change clobbers
recog_data. */
- extract_insn (insn);
- if (! constrain_operands (1))
- fatal_insn_not_found (insn);
- preprocess_constraints ();
+ extract_constrain_insn (insn);
+ preprocess_constraints (insn);
}
}
}
eldest live copy that's in an appropriate register class. */
for (i = 0; i < n_ops; i++)
{
- replaced[i] = false;
+ bool replaced = false;
/* Don't scan match_operand here, since we've no reg class
information to pass down. Any operands that we could
if (recog_data.operand_type[i] == OP_IN)
{
- if (recog_op_alt[i][alt].is_address)
- replaced[i]
+ if (op_alt[i].is_address)
+ replaced
= replace_oldest_value_addr (recog_data.operand_loc[i],
- recog_op_alt[i][alt].cl,
+ alternative_class (op_alt, i),
VOIDmode, ADDR_SPACE_GENERIC,
insn, vd);
else if (REG_P (recog_data.operand[i]))
- replaced[i]
+ replaced
= replace_oldest_value_reg (recog_data.operand_loc[i],
- recog_op_alt[i][alt].cl,
+ alternative_class (op_alt, i),
insn, vd);
else if (MEM_P (recog_data.operand[i]))
- replaced[i] = replace_oldest_value_mem (recog_data.operand[i],
- insn, vd);
+ replaced = replace_oldest_value_mem (recog_data.operand[i],
+ insn, vd);
}
else if (MEM_P (recog_data.operand[i]))
- replaced[i] = replace_oldest_value_mem (recog_data.operand[i],
- insn, vd);
+ replaced = replace_oldest_value_mem (recog_data.operand[i],
+ insn, vd);
/* If we performed any replacement, update match_dups. */
- if (replaced[i])
+ if (replaced)
{
int j;
rtx new_rtx;
{
if (! apply_change_group ())
{
- for (i = 0; i < n_ops; i++)
- if (replaced[i])
- {
- rtx old = *recog_data.operand_loc[i];
- recog_data.operand[i] = old;
- }
-
if (dump_file)
fprintf (dump_file,
"insn %u: reg replacements not verified\n",
DEBUG_INSNs can be applied. */
if (vd->n_debug_insn_changes)
note_uses (&PATTERN (insn), cprop_find_used_regs, vd);
+ df_insn_rescan (insn);
}
ksvd.vd = vd;
unsigned int set_nregs = 0;
unsigned int regno;
rtx exp;
- hard_reg_set_iterator hrsi;
for (exp = CALL_INSN_FUNCTION_USAGE (insn); exp; exp = XEXP (exp, 1))
{
copy_value (dest, SET_SRC (x), vd);
ksvd.ignore_set_reg = dest;
set_regno = REGNO (dest);
- set_nregs
- = hard_regno_nregs[set_regno][GET_MODE (dest)];
+ set_nregs = REG_NREGS (dest);
break;
}
}
- EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, regno, hrsi)
- if (regno < set_regno || regno >= set_regno + set_nregs)
+ function_abi callee_abi = insn_callee_abi (insn);
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (vd->e[regno].mode != VOIDmode
+ && callee_abi.clobbers_reg_p (vd->e[regno].mode, regno)
+ && (regno < set_regno || regno >= set_regno + set_nregs))
kill_value_regno (regno, 1, vd);
- }
-
- /* Notice stores. */
- note_stores (PATTERN (insn), kill_set_value, &ksvd);
-
- /* Notice copies. */
- if (set && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set)))
- copy_value (SET_DEST (set), SET_SRC (set), vd);
-
- if (insn == BB_END (bb))
- break;
- }
-
- return anything_changed;
-}
-
-/* Main entry point for the forward copy propagation optimization. */
-
-static unsigned int
-copyprop_hardreg_forward (void)
-{
- struct value_data *all_vd;
- basic_block bb;
- sbitmap visited;
- bool analyze_called = false;
- all_vd = XNEWVEC (struct value_data, last_basic_block);
-
- visited = sbitmap_alloc (last_basic_block);
- bitmap_clear (visited);
-
- if (MAY_HAVE_DEBUG_INSNS)
- debug_insn_changes_pool
- = create_alloc_pool ("debug insn changes pool",
- sizeof (struct queued_debug_insn_change), 256);
+ /* If SET was seen in CALL_INSN_FUNCTION_USAGE, and SET_SRC
+ of the SET isn't clobbered by CALLEE_ABI, but instead among
+ CLOBBERs on the CALL_INSN, we could wrongly assume the
+ value in it is still live. */
+ if (ksvd.ignore_set_reg)
+ kill_clobbered_values (insn, vd);
+ }
- FOR_EACH_BB (bb)
- {
- bitmap_set_bit (visited, bb->index);
-
- /* If a block has a single predecessor, that we've already
- processed, begin with the value data that was live at
- the end of the predecessor block. */
- /* ??? Ought to use more intelligent queuing of blocks. */
- if (single_pred_p (bb)
- && bitmap_bit_p (visited, single_pred (bb)->index)
- && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
+ bool copy_p = (set
+ && REG_P (SET_DEST (set))
+ && REG_P (SET_SRC (set)));
+ bool noop_p = (copy_p
+ && rtx_equal_p (SET_DEST (set), SET_SRC (set)));
+
+ /* If a noop move is using narrower mode than we have recorded,
+ we need to either remove the noop move, or kill_set_value. */
+ if (noop_p
+ && partial_subreg_p (GET_MODE (SET_DEST (set)),
+ vd->e[REGNO (SET_DEST (set))].mode))
{
- all_vd[bb->index] = all_vd[single_pred (bb)->index];
- if (all_vd[bb->index].n_debug_insn_changes)
+ if (noop_move_p (insn))
{
- unsigned int regno;
-
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- {
- if (all_vd[bb->index].e[regno].debug_insn_changes)
- {
- all_vd[bb->index].e[regno].debug_insn_changes = NULL;
- if (--all_vd[bb->index].n_debug_insn_changes == 0)
- break;
- }
- }
+ bool last = insn == BB_END (bb);
+ delete_insn (insn);
+ if (last)
+ break;
}
+ else
+ noop_p = false;
}
- else
- init_value_data (all_vd + bb->index);
- copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
- }
-
- if (MAY_HAVE_DEBUG_INSNS)
- {
- FOR_EACH_BB (bb)
- if (bitmap_bit_p (visited, bb->index)
- && all_vd[bb->index].n_debug_insn_changes)
- {
- unsigned int regno;
- bitmap live;
+ if (!noop_p)
+ {
+ /* Notice stores. */
+ note_stores (insn, kill_set_value, &ksvd);
- if (!analyze_called)
- {
- df_analyze ();
- analyze_called = true;
- }
- live = df_get_live_out (bb);
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (all_vd[bb->index].e[regno].debug_insn_changes)
- {
- if (REGNO_REG_SET_P (live, regno))
- apply_debug_insn_changes (all_vd + bb->index, regno);
- if (all_vd[bb->index].n_debug_insn_changes == 0)
- break;
- }
- }
+ /* Notice copies. */
+ if (copy_p)
+ {
+ df_insn_rescan (insn);
+ copy_value (SET_DEST (set), SET_SRC (set), vd);
+ }
+ }
- free_alloc_pool (debug_insn_changes_pool);
+ if (insn == BB_END (bb))
+ break;
}
- sbitmap_free (visited);
- free (all_vd);
- return 0;
+ return anything_changed;
}
/* Dump the value chain data to stderr. */
vd->e[i].next_regno);
}
-#ifdef ENABLE_CHECKING
+/* Do copyprop_hardreg_forward_1 for a single basic block BB.
+ DEBUG_INSN is skipped since we do not want to involve DF related
+ staff as how it is handled in function pass_cprop_hardreg::execute.
+
+ NOTE: Currently it is only used for shrink-wrap. Maybe extend it
+ to handle DEBUG_INSN for other uses. */
+
+void
+copyprop_hardreg_forward_bb_without_debug_insn (basic_block bb)
+{
+ struct value_data *vd;
+ vd = XNEWVEC (struct value_data, 1);
+ init_value_data (vd);
+
+ skip_debug_insn_p = true;
+ copyprop_hardreg_forward_1 (bb, vd);
+ free (vd);
+ skip_debug_insn_p = false;
+}
+
static void
validate_value_data (struct value_data *vd)
{
if (vd->e[i].mode == VOIDmode)
{
if (vd->e[i].next_regno != INVALID_REGNUM)
- internal_error ("validate_value_data: [%u] Bad next_regno for empty chain (%u)",
- i, vd->e[i].next_regno);
+ internal_error ("%qs: [%u] bad %<next_regno%> for empty chain (%u)",
+ __func__, i, vd->e[i].next_regno);
continue;
}
j = vd->e[j].next_regno)
{
if (TEST_HARD_REG_BIT (set, j))
- internal_error ("validate_value_data: Loop in regno chain (%u)",
- j);
+ internal_error ("%qs: loop in %<next_regno%> chain (%u)",
+ __func__, j);
if (vd->e[j].oldest_regno != i)
- internal_error ("validate_value_data: [%u] Bad oldest_regno (%u)",
- j, vd->e[j].oldest_regno);
+ internal_error ("%qs: [%u] bad %<oldest_regno%> (%u)",
+ __func__, j, vd->e[j].oldest_regno);
SET_HARD_REG_BIT (set, j);
}
&& (vd->e[i].mode != VOIDmode
|| vd->e[i].oldest_regno != i
|| vd->e[i].next_regno != INVALID_REGNUM))
- internal_error ("validate_value_data: [%u] Non-empty reg in chain (%s %u %i)",
- i, GET_MODE_NAME (vd->e[i].mode), vd->e[i].oldest_regno,
+ internal_error ("%qs: [%u] non-empty register in chain (%s %u %i)",
+ __func__, i,
+ GET_MODE_NAME (vd->e[i].mode), vd->e[i].oldest_regno,
vd->e[i].next_regno);
}
-#endif
+
\f
+namespace {
+
+const pass_data pass_data_cprop_hardreg =
+{
+ RTL_PASS, /* type */
+ "cprop_hardreg", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_CPROP_REGISTERS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish, /* todo_flags_finish */
+};
+
+class pass_cprop_hardreg : public rtl_opt_pass
+{
+public:
+ pass_cprop_hardreg (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_cprop_hardreg, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return (optimize > 0 && (flag_cprop_registers));
+ }
+
+ virtual unsigned int execute (function *);
+
+}; // class pass_cprop_hardreg
+
static bool
-gate_handle_cprop (void)
+cprop_hardreg_bb (basic_block bb, struct value_data *all_vd, sbitmap visited)
+{
+ bitmap_set_bit (visited, bb->index);
+
+ /* If a block has a single predecessor, that we've already
+ processed, begin with the value data that was live at
+ the end of the predecessor block. */
+ /* ??? Ought to use more intelligent queuing of blocks. */
+ if (single_pred_p (bb)
+ && bitmap_bit_p (visited, single_pred (bb)->index)
+ && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
+ {
+ all_vd[bb->index] = all_vd[single_pred (bb)->index];
+ if (all_vd[bb->index].n_debug_insn_changes)
+ {
+ unsigned int regno;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (all_vd[bb->index].e[regno].debug_insn_changes)
+ {
+ struct queued_debug_insn_change *cur;
+ for (cur = all_vd[bb->index].e[regno].debug_insn_changes;
+ cur; cur = cur->next)
+ --all_vd[bb->index].n_debug_insn_changes;
+ all_vd[bb->index].e[regno].debug_insn_changes = NULL;
+ if (all_vd[bb->index].n_debug_insn_changes == 0)
+ break;
+ }
+ }
+ }
+ }
+ else
+ init_value_data (all_vd + bb->index);
+
+ return copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
+}
+
+static void
+cprop_hardreg_debug (function *fun, struct value_data *all_vd)
{
- return (optimize > 0 && (flag_cprop_registers));
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, fun)
+ if (all_vd[bb->index].n_debug_insn_changes)
+ {
+ unsigned int regno;
+ bitmap live;
+
+ live = df_get_live_out (bb);
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (all_vd[bb->index].e[regno].debug_insn_changes)
+ {
+ if (REGNO_REG_SET_P (live, regno))
+ apply_debug_insn_changes (all_vd + bb->index, regno);
+
+ struct queued_debug_insn_change *cur;
+ for (cur = all_vd[bb->index].e[regno].debug_insn_changes;
+ cur; cur = cur->next)
+ --all_vd[bb->index].n_debug_insn_changes;
+ all_vd[bb->index].e[regno].debug_insn_changes = NULL;
+ if (all_vd[bb->index].n_debug_insn_changes == 0)
+ break;
+ }
+ }
+
+ queued_debug_insn_change_pool.release ();
+}
+
+unsigned int
+pass_cprop_hardreg::execute (function *fun)
+{
+ struct value_data *all_vd;
+ basic_block bb;
+
+ all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (fun));
+
+ auto_sbitmap visited (last_basic_block_for_fn (fun));
+ bitmap_clear (visited);
+
+ auto_vec<int> worklist;
+ bool any_debug_changes = false;
+
+ /* We need accurate notes. Earlier passes such as if-conversion may
+ leave notes in an inconsistent state. */
+ df_note_add_problem ();
+ df_analyze ();
+
+ /* It is tempting to set DF_LR_RUN_DCE, but DCE may choose to delete
+ an insn and this pass would not have visibility into the removal.
+ This pass would then potentially use the source of that
+ INSN for propagation purposes, generating invalid code.
+
+ So we just ask for updated notes and handle trivial deletions
+ within this pass where we can update this passes internal
+ data structures appropriately. */
+ df_set_flags (DF_DEFER_INSN_RESCAN);
+
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ if (cprop_hardreg_bb (bb, all_vd, visited))
+ worklist.safe_push (bb->index);
+ if (all_vd[bb->index].n_debug_insn_changes)
+ any_debug_changes = true;
+ }
+
+ /* We must call df_analyze here unconditionally to ensure that the
+ REG_UNUSED and REG_DEAD notes are consistent with and without -g. */
+ df_analyze ();
+
+ if (MAY_HAVE_DEBUG_BIND_INSNS && any_debug_changes)
+ cprop_hardreg_debug (fun, all_vd);
+
+ /* Second pass if we've changed anything, only for the bbs where we have
+ changed anything though. */
+ if (!worklist.is_empty ())
+ {
+ unsigned int i;
+ int index;
+
+ any_debug_changes = false;
+ bitmap_clear (visited);
+ FOR_EACH_VEC_ELT (worklist, i, index)
+ {
+ bb = BASIC_BLOCK_FOR_FN (fun, index);
+ cprop_hardreg_bb (bb, all_vd, visited);
+ if (all_vd[bb->index].n_debug_insn_changes)
+ any_debug_changes = true;
+ }
+
+ df_analyze ();
+ if (MAY_HAVE_DEBUG_BIND_INSNS && any_debug_changes)
+ cprop_hardreg_debug (fun, all_vd);
+ }
+
+ free (all_vd);
+ return 0;
}
+} // anon namespace
-struct rtl_opt_pass pass_cprop_hardreg =
+rtl_opt_pass *
+make_pass_cprop_hardreg (gcc::context *ctxt)
{
- {
- RTL_PASS,
- "cprop_hardreg", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_handle_cprop, /* gate */
- copyprop_hardreg_forward, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_CPROP_REGISTERS, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_df_finish
- | TODO_verify_rtl_sharing /* todo_flags_finish */
- }
-};
+ return new pass_cprop_hardreg (ctxt);
+}