/* Common subexpression elimination for GNU compiler.
- Copyright (C) 1987-2017 Free Software Foundation, Inc.
+ Copyright (C) 1987-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "cfgcleanup.h"
#include "alias.h"
#include "toplev.h"
-#include "params.h"
#include "rtlhooks-def.h"
#include "tree-pass.h"
#include "dbgcnt.h"
#include "rtl-iter.h"
+#include "regs.h"
+#include "function-abi.h"
/* The basic idea of common subexpression elimination is to go
through the code, keeping a record of expressions that would
static void merge_equiv_classes (struct table_elt *, struct table_elt *);
static void invalidate (rtx, machine_mode);
static void remove_invalid_refs (unsigned int);
-static void remove_invalid_subreg_refs (unsigned int, unsigned int,
+static void remove_invalid_subreg_refs (unsigned int, poly_uint64,
machine_mode);
static void rehash_using_reg (rtx);
static void invalidate_memory (void);
-static void invalidate_for_call (void);
static rtx use_related_value (rtx, struct table_elt *);
static inline unsigned canon_hash (rtx, machine_mode);
static void cse_prescan_path (struct cse_basic_block_data *);
static void invalidate_from_clobbers (rtx_insn *);
static void invalidate_from_sets_and_clobbers (rtx_insn *);
-static rtx cse_process_notes (rtx, rtx, bool *);
static void cse_extended_basic_block (struct cse_basic_block_data *);
extern void dump_class (struct table_elt*);
static void get_cse_reg_info_1 (unsigned int regno);
}
return false;
}
-\f
+
+/* Remove from the hash table, or mark as invalid, all expressions whose
+ values could be altered by storing in register X. */
+
+static void
+invalidate_reg (rtx x)
+{
+ gcc_assert (GET_CODE (x) == REG);
+
+ /* If X is a register, dependencies on its contents are recorded
+ through the qty number mechanism. Just change the qty number of
+ the register, mark it as invalid for expressions that refer to it,
+ and remove it itself. */
+ unsigned int regno = REGNO (x);
+ unsigned int hash = HASH (x, GET_MODE (x));
+
+ /* Remove REGNO from any quantity list it might be on and indicate
+ that its value might have changed. If it is a pseudo, remove its
+ entry from the hash table.
+
+ For a hard register, we do the first two actions above for any
+ additional hard registers corresponding to X. Then, if any of these
+ registers are in the table, we must remove any REG entries that
+ overlap these registers. */
+
+ delete_reg_equiv (regno);
+ REG_TICK (regno)++;
+ SUBREG_TICKED (regno) = -1;
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ remove_pseudo_from_table (x, hash);
+ else
+ {
+ HOST_WIDE_INT in_table = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
+ unsigned int endregno = END_REGNO (x);
+ unsigned int rn;
+ struct table_elt *p, *next;
+
+ CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
+
+ for (rn = regno + 1; rn < endregno; rn++)
+ {
+ in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, rn);
+ CLEAR_HARD_REG_BIT (hard_regs_in_table, rn);
+ delete_reg_equiv (rn);
+ REG_TICK (rn)++;
+ SUBREG_TICKED (rn) = -1;
+ }
+
+ if (in_table)
+ for (hash = 0; hash < HASH_SIZE; hash++)
+ for (p = table[hash]; p; p = next)
+ {
+ next = p->next_same_hash;
+
+ if (!REG_P (p->exp) || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
+ continue;
+
+ unsigned int tregno = REGNO (p->exp);
+ unsigned int tendregno = END_REGNO (p->exp);
+ if (tendregno > regno && tregno < endregno)
+ remove_from_table (p, hash);
+ }
+ }
+}
+
/* Remove from the hash table, or mark as invalid, all expressions whose
values could be altered by storing in X. X is a register, a subreg, or
a memory reference with nonvarying address (because, when a memory
switch (GET_CODE (x))
{
case REG:
- {
- /* If X is a register, dependencies on its contents are recorded
- through the qty number mechanism. Just change the qty number of
- the register, mark it as invalid for expressions that refer to it,
- and remove it itself. */
- unsigned int regno = REGNO (x);
- unsigned int hash = HASH (x, GET_MODE (x));
-
- /* Remove REGNO from any quantity list it might be on and indicate
- that its value might have changed. If it is a pseudo, remove its
- entry from the hash table.
-
- For a hard register, we do the first two actions above for any
- additional hard registers corresponding to X. Then, if any of these
- registers are in the table, we must remove any REG entries that
- overlap these registers. */
-
- delete_reg_equiv (regno);
- REG_TICK (regno)++;
- SUBREG_TICKED (regno) = -1;
-
- if (regno >= FIRST_PSEUDO_REGISTER)
- remove_pseudo_from_table (x, hash);
- else
- {
- HOST_WIDE_INT in_table
- = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
- unsigned int endregno = END_REGNO (x);
- unsigned int tregno, tendregno, rn;
- struct table_elt *p, *next;
-
- CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
-
- for (rn = regno + 1; rn < endregno; rn++)
- {
- in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, rn);
- CLEAR_HARD_REG_BIT (hard_regs_in_table, rn);
- delete_reg_equiv (rn);
- REG_TICK (rn)++;
- SUBREG_TICKED (rn) = -1;
- }
-
- if (in_table)
- for (hash = 0; hash < HASH_SIZE; hash++)
- for (p = table[hash]; p; p = next)
- {
- next = p->next_same_hash;
-
- if (!REG_P (p->exp)
- || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
- continue;
-
- tregno = REGNO (p->exp);
- tendregno = END_REGNO (p->exp);
- if (tendregno > regno && tregno < endregno)
- remove_from_table (p, hash);
- }
- }
- }
+ invalidate_reg (x);
return;
case SUBREG:
/* Likewise for a subreg with subreg_reg REGNO, subreg_byte OFFSET,
and mode MODE. */
static void
-remove_invalid_subreg_refs (unsigned int regno, unsigned int offset,
+remove_invalid_subreg_refs (unsigned int regno, poly_uint64 offset,
machine_mode mode)
{
unsigned int i;
struct table_elt *p, *next;
- unsigned int end = offset + (GET_MODE_SIZE (mode) - 1);
for (i = 0; i < HASH_SIZE; i++)
for (p = table[i]; p; p = next)
&& (GET_CODE (exp) != SUBREG
|| !REG_P (SUBREG_REG (exp))
|| REGNO (SUBREG_REG (exp)) != regno
- || (((SUBREG_BYTE (exp)
- + (GET_MODE_SIZE (GET_MODE (exp)) - 1)) >= offset)
- && SUBREG_BYTE (exp) <= end))
+ || ranges_maybe_overlap_p (SUBREG_BYTE (exp),
+ GET_MODE_SIZE (GET_MODE (exp)),
+ offset, GET_MODE_SIZE (mode)))
&& refers_to_regno_p (regno, p->exp))
remove_from_table (p, i);
}
}
\f
/* Remove from the hash table any expression that is a call-clobbered
- register. Also update their TICK values. */
+ register in INSN. Also update their TICK values. */
static void
-invalidate_for_call (void)
+invalidate_for_call (rtx_insn *insn)
{
- unsigned int regno, endregno;
- unsigned int i;
+ unsigned int regno;
unsigned hash;
struct table_elt *p, *next;
int in_table = 0;
hard_reg_set_iterator hrsi;
- /* Go through all the hard registers. For each that is clobbered in
- a CALL_INSN, remove the register from quantity chains and update
+ /* Go through all the hard registers. For each that might be clobbered
+ in call insn INSN, remove the register from quantity chains and update
reg_tick if defined. Also see if any of these registers is currently
- in the table. */
- EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, regno, hrsi)
+ in the table.
+
+ ??? We could be more precise for partially-clobbered registers,
+ and only invalidate values that actually occupy the clobbered part
+ of the registers. It doesn't seem worth the effort though, since
+ we shouldn't see this situation much before RA. Whatever choice
+ we make here has to be consistent with the table walk below,
+ so any change to this test will require a change there too. */
+ HARD_REG_SET callee_clobbers
+ = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
+ EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
{
delete_reg_equiv (regno);
if (REG_TICK (regno) >= 0)
|| REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
continue;
- regno = REGNO (p->exp);
- endregno = END_REGNO (p->exp);
-
- for (i = regno; i < endregno; i++)
- if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
- {
- remove_from_table (p, hash);
- break;
- }
+ /* This must use the same test as above rather than the
+ more accurate clobbers_reg_p. */
+ if (overlaps_hard_reg_set_p (callee_clobbers, GET_MODE (p->exp),
+ REGNO (p->exp)))
+ remove_from_table (p, hash);
}
}
\f
{
hash += (((unsigned int) SUBREG << 7)
+ REGNO (SUBREG_REG (x))
- + (SUBREG_BYTE (x) / UNITS_PER_WORD));
+ + (constant_lower_bound (SUBREG_BYTE (x))
+ / UNITS_PER_WORD));
return hash;
}
break;
hash += CONST_WIDE_INT_ELT (x, i);
return hash;
+ case CONST_POLY_INT:
+ {
+ inchash::hash h;
+ h.add_int (hash);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
+ return h.end ();
+ }
+
case CONST_DOUBLE:
/* This is like the general case, except that it only counts
the integers representing the constant. */
int units;
rtx elt;
- units = CONST_VECTOR_NUNITS (x);
+ units = const_vector_encoded_nelts (x);
for (i = 0; i < units; ++i)
{
- elt = CONST_VECTOR_ELT (x, i);
+ elt = CONST_VECTOR_ENCODED_ELT (x, i);
hash += hash_rtx_cb (elt, GET_MODE (elt),
do_not_record_p, hash_arg_in_memory_p,
have_reg_qty, cb);
hash += (unsigned int) XINT (x, i);
break;
+ case 'p':
+ hash += constant_lower_bound (SUBREG_BYTE (x));
+ break;
+
case '0': case 't':
/* Unused. */
break;
return 0;
break;
+ case 'p':
+ if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
+ return 0;
+ break;
+
case '0':
case 't':
break;
int i;
rtx new_rtx = 0;
int changed = 0;
+ poly_int64 xval;
/* Operands of X. */
/* Workaround -Wmaybe-uninitialized false positive during
case MINUS:
/* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
If so, produce (PLUS Z C2-C). */
- if (const_arg1 != 0 && CONST_INT_P (const_arg1))
+ if (const_arg1 != 0 && poly_int_rtx_p (const_arg1, &xval))
{
rtx y = lookup_as_function (XEXP (x, 0), PLUS);
- if (y && CONST_INT_P (XEXP (y, 1)))
- return fold_rtx (plus_constant (mode, copy_rtx (y),
- -INTVAL (const_arg1)),
+ if (y && poly_int_rtx_p (XEXP (y, 1)))
+ return fold_rtx (plus_constant (mode, copy_rtx (y), -xval),
NULL);
}
enum rtx_code associate_code;
if (is_shift
- && (INTVAL (const_arg1) >= GET_MODE_PRECISION (mode)
+ && (INTVAL (const_arg1) >= GET_MODE_UNIT_PRECISION (mode)
|| INTVAL (const_arg1) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
- canon_const_arg1 = GEN_INT (INTVAL (const_arg1)
- & (GET_MODE_BITSIZE (mode)
- - 1));
+ canon_const_arg1 = gen_int_shift_amount
+ (mode, (INTVAL (const_arg1)
+ & (GET_MODE_UNIT_BITSIZE (mode) - 1)));
else
break;
}
break;
if (is_shift
- && (INTVAL (inner_const) >= GET_MODE_PRECISION (mode)
+ && (INTVAL (inner_const) >= GET_MODE_UNIT_PRECISION (mode)
|| INTVAL (inner_const) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
- inner_const = GEN_INT (INTVAL (inner_const)
- & (GET_MODE_BITSIZE (mode) - 1));
+ inner_const = gen_int_shift_amount
+ (mode, (INTVAL (inner_const)
+ & (GET_MODE_UNIT_BITSIZE (mode) - 1)));
else
break;
}
if (is_shift
&& CONST_INT_P (new_const)
- && INTVAL (new_const) >= GET_MODE_PRECISION (mode))
+ && INTVAL (new_const) >= GET_MODE_UNIT_PRECISION (mode))
{
/* As an exception, we can turn an ASHIFTRT of this
form into a shift of the number of bits - 1. */
if (code == ASHIFTRT)
- new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
+ new_const = gen_int_shift_amount
+ (mode, GET_MODE_UNIT_BITSIZE (mode) - 1);
else if (!side_effects_p (XEXP (y, 0)))
return CONST0_RTX (mode);
else
/* See if we previously assigned a constant value to this SUBREG. */
if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0
|| (new_rtx = lookup_as_function (x, CONST_WIDE_INT)) != 0
+ || (NUM_POLY_INT_COEFFS > 1
+ && (new_rtx = lookup_as_function (x, CONST_POLY_INT)) != 0)
|| (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0
|| (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0)
return new_rtx;
/* If we didn't and if doing so makes sense, see if we previously
assigned a constant value to the enclosing word mode SUBREG. */
- if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode)
- && GET_MODE_SIZE (word_mode) < GET_MODE_SIZE (imode))
+ if (known_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD)
+ && known_lt (UNITS_PER_WORD, GET_MODE_SIZE (imode)))
{
- int byte = SUBREG_BYTE (x) - subreg_lowpart_offset (mode, word_mode);
- if (byte >= 0 && (byte % UNITS_PER_WORD) == 0)
+ poly_int64 byte = (SUBREG_BYTE (x)
+ - subreg_lowpart_offset (mode, word_mode));
+ if (known_ge (byte, 0) && multiple_p (byte, UNITS_PER_WORD))
{
rtx y = gen_rtx_SUBREG (word_mode, SUBREG_REG (x), byte);
new_rtx = lookup_as_function (y, CONST_INT);
if we test MODE instead, we can get an infinite recursion
alternating between two modes each wider than MODE. */
- if (code == NE && GET_CODE (op0) == SUBREG
- && subreg_lowpart_p (op0)
- && (GET_MODE_SIZE (GET_MODE (op0))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ if (code == NE
+ && partial_subreg_p (op0)
+ && subreg_lowpart_p (op0))
{
machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
rtx tem = record_jump_cond_subreg (inner_mode, op1);
reversed_nonequality);
}
- if (code == NE && GET_CODE (op1) == SUBREG
- && subreg_lowpart_p (op1)
- && (GET_MODE_SIZE (GET_MODE (op1))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
+ if (code == NE
+ && partial_subreg_p (op1)
+ && subreg_lowpart_p (op1))
{
machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
rtx tem = record_jump_cond_subreg (inner_mode, op0);
&& (reg_mentioned_p (dest, XEXP (note, 0))
|| rtx_equal_p (src, XEXP (note, 0))))
remove_note (insn, note);
+
+ /* If INSN has a REG_ARGS_SIZE note, move it to PREV. */
+ note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
+ if (note != 0)
+ {
+ remove_note (insn, note);
+ gcc_assert (!find_reg_note (prev, REG_ARGS_SIZE, NULL_RTX));
+ set_unique_reg_note (prev, REG_ARGS_SIZE, XEXP (note, 0));
+ }
}
}
}
&& CONST_INT_P (XEXP (SET_DEST (sets[0].rtl), 2)))
{
rtx dest_reg = XEXP (SET_DEST (sets[0].rtl), 0);
+ /* This is the mode of XEXP (tem, 0) as well. */
+ scalar_int_mode dest_mode
+ = as_a <scalar_int_mode> (GET_MODE (dest_reg));
rtx width = XEXP (SET_DEST (sets[0].rtl), 1);
rtx pos = XEXP (SET_DEST (sets[0].rtl), 2);
HOST_WIDE_INT val = INTVAL (XEXP (tem, 0));
HOST_WIDE_INT mask;
unsigned int shift;
if (BITS_BIG_ENDIAN)
- shift = GET_MODE_PRECISION (GET_MODE (dest_reg))
- - INTVAL (pos) - INTVAL (width);
+ shift = (GET_MODE_PRECISION (dest_mode)
+ - INTVAL (pos) - INTVAL (width));
else
shift = INTVAL (pos);
if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
for (i = 0; i < n_sets; i++)
{
bool repeat = false;
- bool mem_noop_insn = false;
+ bool noop_insn = false;
rtx src, dest;
rtx src_folded;
struct table_elt *elt = 0, *p;
/* Compute SRC's hash code, and also notice if it
should not be recorded at all. In that case,
- prevent any further processing of this assignment. */
- do_not_record = 0;
+ prevent any further processing of this assignment.
+
+ We set DO_NOT_RECORD if the destination has a REG_UNUSED note.
+ This avoids getting the source register into the tables, where it
+ may be invalidated later (via REG_QTY), then trigger an ICE upon
+ re-insertion.
+
+ This is only a problem in multi-set insns. If it were a single
+ set the dead copy would have been removed. If the RHS were anything
+ but a simple REG, then we won't call insert_regs and thus there's
+ no potential for triggering the ICE. */
+ do_not_record = (REG_P (dest)
+ && REG_P (src)
+ && find_reg_note (insn, REG_UNUSED, dest));
hash_arg_in_memory = 0;
sets[i].src = src;
value. */
if (flag_expensive_optimizations && ! src_related
+ && is_a <scalar_int_mode> (mode, &int_mode)
&& GET_CODE (src) == AND && CONST_INT_P (XEXP (src, 1))
- && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ && GET_MODE_SIZE (int_mode) < UNITS_PER_WORD)
{
- machine_mode tmode;
+ opt_scalar_int_mode tmode_iter;
rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1));
- FOR_EACH_WIDER_MODE (tmode, mode)
+ FOR_EACH_WIDER_MODE (tmode_iter, int_mode)
{
+ scalar_int_mode tmode = tmode_iter.require ();
if (GET_MODE_SIZE (tmode) > UNITS_PER_WORD)
break;
if (REG_P (larger_elt->exp))
{
src_related
- = gen_lowpart (mode, larger_elt->exp);
+ = gen_lowpart (int_mode, larger_elt->exp);
break;
}
rtx_code extend_op;
if (flag_expensive_optimizations && src_related == 0
&& MEM_P (src) && ! do_not_record
- && (extend_op = load_extend_op (mode)) != UNKNOWN)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && (extend_op = load_extend_op (int_mode)) != UNKNOWN)
{
struct rtx_def memory_extend_buf;
rtx memory_extend_rtx = &memory_extend_buf;
- machine_mode tmode;
/* Set what we are trying to extend and the operation it might
have been extended with. */
PUT_CODE (memory_extend_rtx, extend_op);
XEXP (memory_extend_rtx, 0) = src;
- FOR_EACH_WIDER_MODE (tmode, mode)
+ opt_scalar_int_mode tmode_iter;
+ FOR_EACH_WIDER_MODE (tmode_iter, int_mode)
{
struct table_elt *larger_elt;
+ scalar_int_mode tmode = tmode_iter.require ();
if (GET_MODE_SIZE (tmode) > UNITS_PER_WORD)
break;
larger_elt; larger_elt = larger_elt->next_same_value)
if (REG_P (larger_elt->exp))
{
- src_related = gen_lowpart (mode, larger_elt->exp);
+ src_related = gen_lowpart (int_mode, larger_elt->exp);
break;
}
&& ! (src != 0
&& GET_CODE (src) == SUBREG
&& GET_MODE (src) == GET_MODE (p->exp)
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))))
+ && partial_subreg_p (GET_MODE (SUBREG_REG (src)),
+ GET_MODE (SUBREG_REG (p->exp)))))
continue;
if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
to prefer it. Copy it to src_related. The code below will
then give it a negative cost. */
if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
- src_related = dest;
+ src_related = p->exp;
}
/* Find the cheapest valid equivalent, trying all the available
&& ! (src != 0
&& GET_CODE (src) == SUBREG
&& GET_MODE (src) == GET_MODE (elt->exp)
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))))
+ && partial_subreg_p (GET_MODE (SUBREG_REG (src)),
+ GET_MODE (SUBREG_REG (elt->exp)))))
{
elt = elt->next_same_value;
continue;
src_elt_cost = MAX_COST;
}
- /* Avoid creation of overlapping memory moves. */
- if (MEM_P (trial) && MEM_P (dest) && !rtx_equal_p (trial, dest))
- {
- rtx src, dest;
-
- /* BLKmode moves are not handled by cse anyway. */
- if (GET_MODE (trial) == BLKmode)
- break;
-
- src = canon_rtx (trial);
- dest = canon_rtx (SET_DEST (sets[i].rtl));
-
- if (!MEM_P (src) || !MEM_P (dest)
- || !nonoverlapping_memrefs_p (src, dest, false))
- break;
- }
-
/* Try to optimize
(set (reg:M N) (const_int A))
(set (reg:M2 O) (const_int B))
&& CONST_INT_P (XEXP (SET_DEST (sets[i].rtl), 1))
&& CONST_INT_P (XEXP (SET_DEST (sets[i].rtl), 2))
&& REG_P (XEXP (SET_DEST (sets[i].rtl), 0))
- && (GET_MODE_PRECISION (GET_MODE (SET_DEST (sets[i].rtl)))
- >= INTVAL (XEXP (SET_DEST (sets[i].rtl), 1)))
+ && (known_ge
+ (GET_MODE_PRECISION (GET_MODE (SET_DEST (sets[i].rtl))),
+ INTVAL (XEXP (SET_DEST (sets[i].rtl), 1))))
&& ((unsigned) INTVAL (XEXP (SET_DEST (sets[i].rtl), 1))
+ (unsigned) INTVAL (XEXP (SET_DEST (sets[i].rtl), 2))
<= HOST_BITS_PER_WIDE_INT))
HOST_WIDE_INT val = INTVAL (dest_cst);
HOST_WIDE_INT mask;
unsigned int shift;
+ /* This is the mode of DEST_CST as well. */
+ scalar_int_mode dest_mode
+ = as_a <scalar_int_mode> (GET_MODE (dest_reg));
if (BITS_BIG_ENDIAN)
- shift = GET_MODE_PRECISION (GET_MODE (dest_reg))
+ shift = GET_MODE_PRECISION (dest_mode)
- INTVAL (pos) - INTVAL (width);
else
shift = INTVAL (pos);
mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
val &= ~(mask << shift);
val |= (INTVAL (trial) & mask) << shift;
- val = trunc_int_for_mode (val, GET_MODE (dest_reg));
+ val = trunc_int_for_mode (val, dest_mode);
validate_unshare_change (insn, &SET_DEST (sets[i].rtl),
dest_reg, 1);
validate_unshare_change (insn, &SET_SRC (sets[i].rtl),
}
/* Similarly, lots of targets don't allow no-op
- (set (mem x) (mem x)) moves. */
+ (set (mem x) (mem x)) moves. Even (set (reg x) (reg x))
+ might be impossible for certain registers (like CC registers). */
else if (n_sets == 1
- && MEM_P (trial)
- && MEM_P (dest)
+ && !CALL_P (insn)
+ && (MEM_P (trial) || REG_P (trial))
&& rtx_equal_p (trial, dest)
&& !side_effects_p (dest)
&& (cfun->can_delete_dead_exceptions
- || insn_nothrow_p (insn)))
+ || insn_nothrow_p (insn))
+ /* We can only remove the later store if the earlier aliases
+ at least all accesses the later one. */
+ && (!MEM_P (trial)
+ || ((MEM_ALIAS_SET (dest) == MEM_ALIAS_SET (trial)
+ || alias_set_subset_of (MEM_ALIAS_SET (dest),
+ MEM_ALIAS_SET (trial)))
+ && (!MEM_EXPR (trial)
+ || refs_same_for_tbaa_p (MEM_EXPR (trial),
+ MEM_EXPR (dest))))))
{
SET_SRC (sets[i].rtl) = trial;
- mem_noop_insn = true;
+ noop_insn = true;
break;
}
/* Do nothing for this case. */
;
+ /* Do not replace anything with a MEM, except the replacement
+ is a no-op. This allows this loop to terminate. */
+ else if (MEM_P (trial) && !rtx_equal_p (trial, SET_SRC(sets[i].rtl)))
+ /* Do nothing for this case. */
+ ;
+
/* Look for a substitution that makes a valid insn. */
else if (validate_unshare_change (insn, &SET_SRC (sets[i].rtl),
trial, 0))
sets[i].rtl = 0;
}
- /* Similarly for no-op MEM moves. */
- else if (mem_noop_insn)
+ /* Similarly for no-op moves. */
+ else if (noop_insn)
{
if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
cse_cfg_altered = true;
if (GET_CODE (XEXP (tem, 0)) == USE
&& MEM_P (XEXP (XEXP (tem, 0), 0)))
invalidate (XEXP (XEXP (tem, 0), 0), VOIDmode);
- invalidate_for_call ();
+ invalidate_for_call (insn);
}
/* Now invalidate everything set by this instruction.
already entered SRC and DEST of the SET in the table. */
if (GET_CODE (dest) == SUBREG
- && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
- / UNITS_PER_WORD)
- == (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD)
- && (GET_MODE_SIZE (GET_MODE (dest))
- >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
+ && (known_equal_after_align_down
+ (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1,
+ GET_MODE_SIZE (GET_MODE (dest)) - 1,
+ UNITS_PER_WORD))
+ && !partial_subreg_p (dest)
&& sets[i].src_elt != 0)
{
machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
rtx new_src = 0;
unsigned src_hash;
struct table_elt *src_elt;
- int byte = 0;
/* Ignore invalid entries. */
if (!REG_P (elt->exp)
new_src = elt->exp;
else
{
- /* Calculate big endian correction for the SUBREG_BYTE.
- We have already checked that M1 (GET_MODE (dest))
- is not narrower than M2 (new_mode). */
- if (BYTES_BIG_ENDIAN)
- byte = (GET_MODE_SIZE (GET_MODE (dest))
- - GET_MODE_SIZE (new_mode));
-
+ poly_uint64 byte
+ = subreg_lowpart_offset (new_mode, GET_MODE (dest));
new_src = simplify_gen_subreg (new_mode, elt->exp,
GET_MODE (dest), byte);
}
if (CALL_P (insn))
{
for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
- if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
- invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
+ {
+ rtx temx = XEXP (tem, 0);
+ if (GET_CODE (temx) == CLOBBER)
+ invalidate (SET_DEST (temx), VOIDmode);
+ }
}
/* Ensure we invalidate the destination register of a CALL insn.
}
}
\f
-/* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
- and replace any registers in them with either an equivalent constant
- or the canonical form of the register. If we are inside an address,
- only do this if the address remains valid.
+static rtx cse_process_note (rtx);
- OBJECT is 0 except when within a MEM in which case it is the MEM.
+/* A simplify_replace_fn_rtx callback for cse_process_note. Process X,
+ part of the REG_NOTES of an insn. Replace any registers with either
+ an equivalent constant or the canonical form of the register.
+ Only replace addresses if the containing MEM remains valid.
- Return the replacement for X. */
+ Return the replacement for X, or null if it should be simplified
+ recursively. */
static rtx
-cse_process_notes_1 (rtx x, rtx object, bool *changed)
+cse_process_note_1 (rtx x, const_rtx, void *)
{
- enum rtx_code code = GET_CODE (x);
- const char *fmt = GET_RTX_FORMAT (code);
- int i;
-
- switch (code)
+ if (MEM_P (x))
{
- case CONST:
- case SYMBOL_REF:
- case LABEL_REF:
- CASE_CONST_ANY:
- case PC:
- case CC0:
- case LO_SUM:
- return x;
-
- case MEM:
- validate_change (x, &XEXP (x, 0),
- cse_process_notes (XEXP (x, 0), x, changed), 0);
- return x;
-
- case EXPR_LIST:
- if (REG_NOTE_KIND (x) == REG_EQUAL)
- XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX, changed);
- /* Fall through. */
-
- case INSN_LIST:
- case INT_LIST:
- if (XEXP (x, 1))
- XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX, changed);
+ validate_change (x, &XEXP (x, 0), cse_process_note (XEXP (x, 0)), false);
return x;
+ }
- case SIGN_EXTEND:
- case ZERO_EXTEND:
- case SUBREG:
- {
- rtx new_rtx = cse_process_notes (XEXP (x, 0), object, changed);
- /* We don't substitute VOIDmode constants into these rtx,
- since they would impede folding. */
- if (GET_MODE (new_rtx) != VOIDmode)
- validate_change (object, &XEXP (x, 0), new_rtx, 0);
- return x;
- }
-
- case UNSIGNED_FLOAT:
- {
- rtx new_rtx = cse_process_notes (XEXP (x, 0), object, changed);
- /* We don't substitute negative VOIDmode constants into these rtx,
- since they would impede folding. */
- if (GET_MODE (new_rtx) != VOIDmode
- || (CONST_INT_P (new_rtx) && INTVAL (new_rtx) >= 0)
- || (CONST_DOUBLE_P (new_rtx) && CONST_DOUBLE_HIGH (new_rtx) >= 0))
- validate_change (object, &XEXP (x, 0), new_rtx, 0);
- return x;
- }
-
- case REG:
- i = REG_QTY (REGNO (x));
+ if (REG_P (x))
+ {
+ int i = REG_QTY (REGNO (x));
/* Return a constant or a constant register. */
if (REGNO_QTY_VALID_P (REGNO (x)))
/* Otherwise, canonicalize this register. */
return canon_reg (x, NULL);
-
- default:
- break;
}
- for (i = 0; i < GET_RTX_LENGTH (code); i++)
- if (fmt[i] == 'e')
- validate_change (object, &XEXP (x, i),
- cse_process_notes (XEXP (x, i), object, changed), 0);
-
- return x;
+ return NULL_RTX;
}
+/* Process X, part of the REG_NOTES of an insn. Replace any registers in it
+ with either an equivalent constant or the canonical form of the register.
+ Only replace addresses if the containing MEM remains valid. */
+
static rtx
-cse_process_notes (rtx x, rtx object, bool *changed)
+cse_process_note (rtx x)
{
- rtx new_rtx = cse_process_notes_1 (x, object, changed);
- if (new_rtx != x)
- *changed = true;
- return new_rtx;
+ return simplify_replace_fn_rtx (x, NULL_RTX, cse_process_note_1, NULL);
}
\f
if (follow_jumps)
{
bb = data->path[path_size - 1].bb;
- while (bb && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH))
+ while (bb && path_size < param_max_cse_path_length)
{
if (single_succ_p (bb))
e = single_succ_edge (bb);
fprintf (f, ";; Following path with %d sets: ", nsets);
for (path_entry = 0; path_entry < data->path_size; path_entry++)
fprintf (f, "%d ", (data->path[path_entry].bb)->index);
- fputc ('\n', dump_file);
+ fputc ('\n', f);
fflush (f);
}
FIXME: This is a real kludge and needs to be done some other
way. */
if (NONDEBUG_INSN_P (insn)
- && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
+ && num_insns++ > param_max_cse_insns)
{
flush_hash_table ();
num_insns = 0;
{
/* Process notes first so we have all notes in canonical forms
when looking for duplicate operations. */
- if (REG_NOTES (insn))
- {
- bool changed = false;
- REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn),
- NULL_RTX, &changed);
- if (changed)
- df_notes_rescan (insn);
- }
+ bool changed = false;
+ for (rtx note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_EQUAL)
+ {
+ rtx newval = cse_process_note (XEXP (note, 0));
+ if (newval != XEXP (note, 0))
+ {
+ XEXP (note, 0) = newval;
+ changed = true;
+ }
+ }
+ if (changed)
+ df_notes_rescan (insn);
cse_insn (insn);
init_cse_reg_info (nregs);
ebb_data.path = XNEWVEC (struct branch_path,
- PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+ param_max_cse_path_length);
cse_cfg_altered = false;
cse_jumps_altered = false;
{
rtx_insn *next;
+ if (DEBUG_MARKER_INSN_P (insn))
+ return true;
+
for (next = NEXT_INSN (insn); next; next = NEXT_INSN (next))
if (NOTE_P (next))
continue;
else if (!DEBUG_INSN_P (next))
return true;
+ /* If we find an inspection point, such as a debug begin stmt,
+ we want to keep the earlier debug insn. */
+ else if (DEBUG_MARKER_INSN_P (next))
+ return true;
else if (INSN_VAR_LOCATION_DECL (insn) == INSN_VAR_LOCATION_DECL (next))
return false;
timevar_push (TV_DELETE_TRIVIALLY_DEAD);
/* First count the number of times each register is used. */
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
counts = XCNEWVEC (int, nreg * 3);
for (insn = insns; insn; insn = NEXT_INSN (insn))
- if (DEBUG_INSN_P (insn))
+ if (DEBUG_BIND_INSN_P (insn))
count_reg_usage (INSN_VAR_LOCATION_LOC (insn), counts + nreg,
NULL_RTX, 1);
else if (INSN_P (insn))
{
count_reg_usage (insn, counts, NULL_RTX, 1);
- note_stores (PATTERN (insn), count_stores, counts + nreg * 2);
+ note_stores (insn, count_stores, counts + nreg * 2);
}
/* If there can be debug insns, COUNTS are 3 consecutive arrays.
First one counts how many times each pseudo is used outside
if (! live_insn && dbg_cnt (delete_trivial_dead))
{
if (DEBUG_INSN_P (insn))
- count_reg_usage (INSN_VAR_LOCATION_LOC (insn), counts + nreg,
- NULL_RTX, -1);
+ {
+ if (DEBUG_BIND_INSN_P (insn))
+ count_reg_usage (INSN_VAR_LOCATION_LOC (insn), counts + nreg,
+ NULL_RTX, -1);
+ }
else
{
rtx set;
- if (MAY_HAVE_DEBUG_INSNS
+ if (MAY_HAVE_DEBUG_BIND_INSNS
&& (set = single_set (insn)) != NULL_RTX
&& is_dead_reg (SET_DEST (set), counts)
/* Used at least once in some DEBUG_INSN. */
}
}
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
- if (DEBUG_INSN_P (insn))
+ if (DEBUG_BIND_INSN_P (insn))
{
/* If this debug insn references a dead register that wasn't replaced
with an DEBUG_EXPR, reset the DEBUG_INSN. */
cse_cfg_altered |= cleanup_cfg (CLEANUP_CFG_CHANGED);
timevar_pop (TV_JUMP);
}
- else if (tem == 1)
+ else if (tem == 1 || cse_cfg_altered)
cse_cfg_altered |= cleanup_cfg (0);
cse_not_expected = 1;
cse_cfg_altered |= cleanup_cfg (CLEANUP_CFG_CHANGED);
timevar_pop (TV_JUMP);
}
- else if (tem == 1)
+ else if (tem == 1 || cse_cfg_altered)
cse_cfg_altered |= cleanup_cfg (0);
flag_cse_follow_jumps = save_cfj;