/* Emit RTL for the GCC expander.
- Copyright (C) 1987-2017 Free Software Foundation, Inc.
+ Copyright (C) 1987-2021 Free Software Foundation, Inc.
This file is part of GCC.
#include "tree-eh.h"
#include "explow.h"
#include "expr.h"
-#include "params.h"
#include "builtins.h"
#include "rtl-iter.h"
#include "stor-layout.h"
#include "opts.h"
#include "predict.h"
+#include "rtx-vector-builder.h"
+#include "gimple.h"
+#include "gimple-ssa.h"
+#include "gimplify.h"
struct target_rtl default_target_rtl;
#if SWITCHABLE_TARGET
rtx pc_rtx;
rtx ret_rtx;
rtx simple_return_rtx;
-rtx cc0_rtx;
/* Marker used for denoting an INSN, which should never be accessed (i.e.,
this pointer should normally never be dereferenced), but is required to be
static GTY ((cache)) hash_table<const_wide_int_hasher> *const_wide_int_htab;
+struct const_poly_int_hasher : ggc_cache_ptr_hash<rtx_def>
+{
+ typedef std::pair<machine_mode, poly_wide_int_ref> compare_type;
+
+ static hashval_t hash (rtx x);
+ static bool equal (rtx x, const compare_type &y);
+};
+
+static GTY ((cache)) hash_table<const_poly_int_hasher> *const_poly_int_htab;
+
/* A hash table storing register attribute structures. */
struct reg_attr_hasher : ggc_cache_ptr_hash<reg_attrs>
{
#endif
static rtx lookup_const_double (rtx);
static rtx lookup_const_fixed (rtx);
-static reg_attrs *get_reg_attrs (tree, int);
static rtx gen_const_vector (machine_mode, int);
static void copy_rtx_if_shared_1 (rtx *orig);
}
#endif
+/* Returns a hash code for CONST_POLY_INT X. */
+
+hashval_t
+const_poly_int_hasher::hash (rtx x)
+{
+ inchash::hash h;
+ h.add_int (GET_MODE (x));
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
+ return h.end ();
+}
+
+/* Returns nonzero if CONST_POLY_INT X is an rtx representation of Y. */
+
+bool
+const_poly_int_hasher::equal (rtx x, const compare_type &y)
+{
+ if (GET_MODE (x) != y.first)
+ return false;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ if (CONST_POLY_INT_COEFFS (x)[i] != y.second.coeffs[i])
+ return false;
+ return true;
+}
+
/* Returns a hash code for X (which is really a CONST_DOUBLE). */
hashval_t
const_double_hasher::hash (rtx x)
/* Return true if the given memory attributes are equal. */
bool
-mem_attrs_eq_p (const struct mem_attrs *p, const struct mem_attrs *q)
+mem_attrs_eq_p (const class mem_attrs *p, const class mem_attrs *q)
{
if (p == q)
return true;
return false;
return (p->alias == q->alias
&& p->offset_known_p == q->offset_known_p
- && (!p->offset_known_p || p->offset == q->offset)
+ && (!p->offset_known_p || known_eq (p->offset, q->offset))
&& p->size_known_p == q->size_known_p
- && (!p->size_known_p || p->size == q->size)
+ && (!p->size_known_p || known_eq (p->size, q->size))
&& p->align == q->align
&& p->addrspace == q->addrspace
&& (p->expr == q->expr
{
const reg_attrs *const p = x;
- return ((p->offset * 1000) ^ (intptr_t) p->decl);
+ inchash::hash h;
+ h.add_ptr (p->decl);
+ h.add_poly_hwi (p->offset);
+ return h.end ();
}
/* Returns nonzero if the value represented by X is the same as that given by
const reg_attrs *const p = x;
const reg_attrs *const q = y;
- return (p->decl == q->decl && p->offset == q->offset);
+ return (p->decl == q->decl && known_eq (p->offset, q->offset));
}
/* Allocate a new reg_attrs structure and insert it into the hash table if
one identical to it is not already in the table. We are doing this for
MEM of mode MODE. */
static reg_attrs *
-get_reg_attrs (tree decl, int offset)
+get_reg_attrs (tree decl, poly_int64 offset)
{
reg_attrs attrs;
/* If everything is the default, we can just return zero. */
- if (decl == 0 && offset == 0)
+ if (decl == 0 && known_eq (offset, 0))
return 0;
attrs.decl = decl;
set_regno_raw (x, regno, nregs);
}
+/* Initialize a fresh REG rtx with mode MODE and register REGNO. */
+
+rtx
+init_raw_REG (rtx x, machine_mode mode, unsigned int regno)
+{
+ set_mode_and_regno (x, mode, regno);
+ REG_ATTRS (x) = NULL;
+ ORIGINAL_REGNO (x) = regno;
+ return x;
+}
+
/* Generate a new REG rtx. Make sure ORIGINAL_REGNO is set properly, and
don't attempt to share with the various global pieces of rtl (such as
frame_pointer_rtx). */
gen_raw_REG (machine_mode mode, unsigned int regno)
{
rtx x = rtx_alloc (REG MEM_STAT_INFO);
- set_mode_and_regno (x, mode, regno);
- REG_ATTRS (x) = NULL;
- ORIGINAL_REGNO (x) = regno;
+ init_raw_REG (x, mode, regno);
return x;
}
}
rtx
-gen_int_mode (HOST_WIDE_INT c, machine_mode mode)
+gen_int_mode (poly_int64 c, machine_mode mode)
{
- return GEN_INT (trunc_int_for_mode (c, mode));
+ c = trunc_int_for_mode (c, mode);
+ if (c.is_constant ())
+ return GEN_INT (c.coeffs[0]);
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+ return immed_wide_int_const (poly_wide_int::from (c, prec, SIGNED), mode);
}
/* CONST_DOUBLEs might be created from pairs of integers, or from
a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT
(if TARGET_SUPPORTS_WIDE_INT). */
-rtx
-immed_wide_int_const (const wide_int_ref &v, machine_mode mode)
+static rtx
+immed_wide_int_const_1 (const wide_int_ref &v, machine_mode mode)
{
unsigned int len = v.get_len ();
/* Not scalar_int_mode because we also allow pointer bound modes. */
}
#endif
+/* Return an rtx representation of C in mode MODE. */
+
+rtx
+immed_wide_int_const (const poly_wide_int_ref &c, machine_mode mode)
+{
+ if (c.is_constant ())
+ return immed_wide_int_const_1 (c.coeffs[0], mode);
+
+ /* Not scalar_int_mode because we also allow pointer bound modes. */
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+
+ /* Allow truncation but not extension since we do not know if the
+ number is signed or unsigned. */
+ gcc_assert (prec <= c.coeffs[0].get_precision ());
+ poly_wide_int newc = poly_wide_int::from (c, prec, SIGNED);
+
+ /* See whether we already have an rtx for this constant. */
+ inchash::hash h;
+ h.add_int (mode);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (newc.coeffs[i]);
+ const_poly_int_hasher::compare_type typed_value (mode, newc);
+ rtx *slot = const_poly_int_htab->find_slot_with_hash (typed_value,
+ h.end (), INSERT);
+ rtx x = *slot;
+ if (x)
+ return x;
+
+ /* Create a new rtx. There's a choice to be made here between installing
+ the actual mode of the rtx or leaving it as VOIDmode (for consistency
+ with CONST_INT). In practice the handling of the codes is different
+ enough that we get no benefit from using VOIDmode, and various places
+ assume that VOIDmode implies CONST_INT. Using the real mode seems like
+ the right long-term direction anyway. */
+ typedef trailing_wide_ints<NUM_POLY_INT_COEFFS> twi;
+ size_t extra_size = twi::extra_size (prec);
+ x = rtx_alloc_v (CONST_POLY_INT,
+ sizeof (struct const_poly_int_def) + extra_size);
+ PUT_MODE (x, mode);
+ CONST_POLY_INT_COEFFS (x).set_precision (prec);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ CONST_POLY_INT_COEFFS (x)[i] = newc.coeffs[i];
+
+ *slot = x;
+ return x;
+}
+
rtx
gen_rtx_REG (machine_mode mode, unsigned int regno)
{
bool
validate_subreg (machine_mode omode, machine_mode imode,
- const_rtx reg, unsigned int offset)
+ const_rtx reg, poly_uint64 offset)
{
- unsigned int isize = GET_MODE_SIZE (imode);
- unsigned int osize = GET_MODE_SIZE (omode);
+ poly_uint64 isize = GET_MODE_SIZE (imode);
+ poly_uint64 osize = GET_MODE_SIZE (omode);
+
+ /* The sizes must be ordered, so that we know whether the subreg
+ is partial, paradoxical or complete. */
+ if (!ordered_p (isize, osize))
+ return false;
/* All subregs must be aligned. */
- if (offset % osize != 0)
+ if (!multiple_p (offset, osize))
return false;
/* The subreg offset cannot be outside the inner object. */
- if (offset >= isize)
+ if (maybe_ge (offset, isize))
return false;
- unsigned int regsize = REGMODE_NATURAL_SIZE (imode);
+ poly_uint64 regsize = REGMODE_NATURAL_SIZE (imode);
/* ??? This should not be here. Temporarily continue to allow word_mode
subregs of anything. The most common offender is (subreg:SI (reg:DF)).
;
/* ??? Similarly, e.g. with (subreg:DF (reg:TI)). Though store_bit_field
is the culprit here, and not the backends. */
- else if (osize >= regsize && isize >= osize)
+ else if (known_ge (osize, regsize) && known_ge (isize, osize))
;
/* Allow component subregs of complex and vector. Though given the below
extraction rules, it's not always clear what that means. */
&& GET_MODE_INNER (imode) == omode)
;
/* ??? x86 sse code makes heavy use of *paradoxical* vector subregs,
- i.e. (subreg:V4SF (reg:SF) 0). This surely isn't the cleanest way to
- represent this. It's questionable if this ought to be represented at
- all -- why can't this all be hidden in post-reload splitters that make
- arbitrarily mode changes to the registers themselves. */
- else if (VECTOR_MODE_P (omode) && GET_MODE_INNER (omode) == imode)
+ i.e. (subreg:V4SF (reg:SF) 0) or (subreg:V4SF (reg:V2SF) 0). This
+ surely isn't the cleanest way to represent this. It's questionable
+ if this ought to be represented at all -- why can't this all be hidden
+ in post-reload splitters that make arbitrarily mode changes to the
+ registers themselves. */
+ else if (VECTOR_MODE_P (omode)
+ && GET_MODE_INNER (omode) == GET_MODE_INNER (imode))
;
/* Subregs involving floating point modes are not allowed to
change size. Therefore (subreg:DI (reg:DF) 0) is fine, but
(subreg:SI (reg:DF) 0) isn't. */
else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))
{
- if (! (isize == osize
+ if (! (known_eq (isize, osize)
/* LRA can use subreg to store a floating point value in
an integer mode. Although the floating point and the
integer modes need the same number of hard registers,
}
/* Paradoxical subregs must have offset zero. */
- if (osize > isize)
- return offset == 0;
+ if (maybe_gt (osize, isize))
+ return known_eq (offset, 0U);
/* This is a normal subreg. Verify that the offset is representable. */
return subreg_offset_representable_p (regno, imode, offset, omode);
}
+ /* The outer size must be ordered wrt the register size, otherwise
+ we wouldn't know at compile time how many registers the outer
+ mode occupies. */
+ if (!ordered_p (osize, regsize))
+ return false;
+
/* For pseudo registers, we want most of the same checks. Namely:
Assume that the pseudo register will be allocated to hard registers
Given that we've already checked the mode and offset alignment,
we only have to check subblock subregs here. */
- if (osize < regsize
+ if (maybe_lt (osize, regsize)
&& ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))))
{
- unsigned int block_size = MIN (isize, regsize);
- unsigned int offset_within_block = offset % block_size;
- if (BYTES_BIG_ENDIAN
- ? offset_within_block != block_size - osize
- : offset_within_block != 0)
+ /* It is invalid for the target to pick a register size for a mode
+ that isn't ordered wrt to the size of that mode. */
+ poly_uint64 block_size = ordered_min (isize, regsize);
+ unsigned int start_reg;
+ poly_uint64 offset_within_reg;
+ if (!can_div_trunc_p (offset, block_size, &start_reg, &offset_within_reg)
+ || (BYTES_BIG_ENDIAN
+ ? maybe_ne (offset_within_reg, block_size - osize)
+ : maybe_ne (offset_within_reg, 0U)))
return false;
}
return true;
}
rtx
-gen_rtx_SUBREG (machine_mode mode, rtx reg, int offset)
+gen_rtx_SUBREG (machine_mode mode, rtx reg, poly_uint64 offset)
{
gcc_assert (validate_subreg (mode, GET_MODE (reg), reg, offset));
return gen_rtx_raw_SUBREG (mode, reg, offset);
paradoxical lowpart, in which case the offset will be negative
on big-endian targets. */
-int
+poly_int64
byte_lowpart_offset (machine_mode outer_mode,
machine_mode inner_mode)
{
from address X. For paradoxical big-endian subregs this is a
negative value, otherwise it's the same as OFFSET. */
-int
+poly_int64
subreg_memory_offset (machine_mode outer_mode, machine_mode inner_mode,
- unsigned int offset)
+ poly_uint64 offset)
{
if (paradoxical_subreg_p (outer_mode, inner_mode))
{
- gcc_assert (offset == 0);
+ gcc_assert (known_eq (offset, 0U));
return -subreg_lowpart_offset (inner_mode, outer_mode);
}
return offset;
if SUBREG_REG (X) were stored in memory. The only significant thing
about the current SUBREG_REG is its mode. */
-int
+poly_int64
subreg_memory_offset (const_rtx x)
{
return subreg_memory_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
to the REG_OFFSET. */
static void
-update_reg_offset (rtx new_rtx, rtx reg, int offset)
+update_reg_offset (rtx new_rtx, rtx reg, poly_int64 offset)
{
REG_ATTRS (new_rtx) = get_reg_attrs (REG_EXPR (reg),
- REG_OFFSET (reg) + offset);
+ REG_OFFSET (reg) + offset);
}
/* Generate a register with same attributes as REG, but with OFFSET
rtx
gen_rtx_REG_offset (rtx reg, machine_mode mode, unsigned int regno,
- int offset)
+ poly_int64 offset)
{
rtx new_rtx = gen_rtx_REG (mode, regno);
void
set_reg_attrs_from_value (rtx reg, rtx x)
{
- int offset;
+ poly_int64 offset;
bool can_be_reg_pointer = true;
/* Don't call mark_reg_pointer for incompatible pointer sign
rtx
gen_lowpart_common (machine_mode mode, rtx x)
{
- int msize = GET_MODE_SIZE (mode);
- int xsize;
+ poly_uint64 msize = GET_MODE_SIZE (mode);
machine_mode innermode;
/* Unfortunately, this routine doesn't take a parameter for the mode of X,
so we have to make one up. Yuk. */
innermode = GET_MODE (x);
if (CONST_INT_P (x)
- && msize * BITS_PER_UNIT <= HOST_BITS_PER_WIDE_INT)
+ && known_le (msize * BITS_PER_UNIT,
+ (unsigned HOST_WIDE_INT) HOST_BITS_PER_WIDE_INT))
innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
else if (innermode == VOIDmode)
innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, 0).require ();
- xsize = GET_MODE_SIZE (innermode);
-
gcc_assert (innermode != VOIDmode && innermode != BLKmode);
if (innermode == mode)
return x;
+ /* The size of the outer and inner modes must be ordered. */
+ poly_uint64 xsize = GET_MODE_SIZE (innermode);
+ if (!ordered_p (msize, xsize))
+ return 0;
+
if (SCALAR_FLOAT_MODE_P (mode))
{
/* Don't allow paradoxical FLOAT_MODE subregs. */
- if (msize > xsize)
+ if (maybe_gt (msize, xsize))
return 0;
}
else
{
/* MODE must occupy no more of the underlying registers than X. */
- unsigned int regsize = REGMODE_NATURAL_SIZE (innermode);
- unsigned int mregs = CEIL (msize, regsize);
- unsigned int xregs = CEIL (xsize, regsize);
- if (mregs > xregs)
+ poly_uint64 regsize = REGMODE_NATURAL_SIZE (innermode);
+ unsigned int mregs, xregs;
+ if (!can_div_away_from_zero_p (msize, regsize, &mregs)
+ || !can_div_away_from_zero_p (xsize, regsize, &xregs)
+ || mregs > xregs)
return 0;
}
return gen_rtx_fmt_e (GET_CODE (x), int_mode, XEXP (x, 0));
}
else if (GET_CODE (x) == SUBREG || REG_P (x)
- || GET_CODE (x) == CONCAT || const_vec_p (x)
- || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x))
+ || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
+ || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x)
+ || CONST_POLY_INT_P (x))
return lowpart_subreg (mode, x, innermode);
/* Otherwise, we can't do this. */
rtx
gen_highpart (machine_mode mode, rtx x)
{
- unsigned int msize = GET_MODE_SIZE (mode);
+ poly_uint64 msize = GET_MODE_SIZE (mode);
rtx result;
/* This case loses if X is a subreg. To catch bugs early,
complain if an invalid MODE is used even in other cases. */
- gcc_assert (msize <= UNITS_PER_WORD
- || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x)));
+ gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD)
+ || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x))));
- result = simplify_gen_subreg (mode, x, GET_MODE (x),
- subreg_highpart_offset (mode, GET_MODE (x)));
- gcc_assert (result);
-
- /* simplify_gen_subreg is not guaranteed to return a valid operand for
- the target if we have a MEM. gen_highpart must return a valid operand,
- emitting code if necessary to do so. */
- if (MEM_P (result))
+ /* gen_lowpart_common handles a lot of special cases due to needing to handle
+ paradoxical subregs; it only calls simplify_gen_subreg when certain that
+ it will produce something meaningful. The only case we need to handle
+ specially here is MEM. */
+ if (MEM_P (x))
{
- result = validize_mem (result);
- gcc_assert (result);
+ poly_int64 offset = subreg_highpart_offset (mode, GET_MODE (x));
+ return adjust_address (x, mode, offset);
}
+ result = simplify_gen_subreg (mode, x, GET_MODE (x),
+ subreg_highpart_offset (mode, GET_MODE (x)));
+ /* Since we handle MEM directly above, we should never get a MEM back
+ from simplify_gen_subreg. */
+ gcc_assert (result && !MEM_P (result));
+
return result;
}
/* Return the SUBREG_BYTE for a lowpart subreg whose outer mode has
OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */
-unsigned int
-subreg_size_lowpart_offset (unsigned int outer_bytes, unsigned int inner_bytes)
+poly_uint64
+subreg_size_lowpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
{
- if (outer_bytes > inner_bytes)
+ gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
+ if (maybe_gt (outer_bytes, inner_bytes))
/* Paradoxical subregs always have a SUBREG_BYTE of 0. */
return 0;
/* Return the SUBREG_BYTE for a highpart subreg whose outer mode has
OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */
-unsigned int
-subreg_size_highpart_offset (unsigned int outer_bytes,
- unsigned int inner_bytes)
+poly_uint64
+subreg_size_highpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
{
- gcc_assert (inner_bytes >= outer_bytes);
+ gcc_assert (known_ge (inner_bytes, outer_bytes));
if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN)
return 0;
else if (GET_MODE (SUBREG_REG (x)) == VOIDmode)
return 0;
- return (subreg_lowpart_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)))
- == SUBREG_BYTE (x));
+ return known_eq (subreg_lowpart_offset (GET_MODE (x),
+ GET_MODE (SUBREG_REG (x))),
+ SUBREG_BYTE (x));
}
\f
/* Return subword OFFSET of operand OP.
*/
rtx
-operand_subword (rtx op, unsigned int offset, int validate_address, machine_mode mode)
+operand_subword (rtx op, poly_uint64 offset, int validate_address,
+ machine_mode mode)
{
if (mode == VOIDmode)
mode = GET_MODE (op);
/* If OP is narrower than a word, fail. */
if (mode != BLKmode
- && (GET_MODE_SIZE (mode) < UNITS_PER_WORD))
+ && maybe_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD))
return 0;
/* If we want a word outside OP, return zero. */
if (mode != BLKmode
- && (offset + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode))
+ && maybe_gt ((offset + 1) * UNITS_PER_WORD, GET_MODE_SIZE (mode)))
return const0_rtx;
/* Form a new MEM at the requested address. */
MODE is the mode of OP, in case it is CONST_INT. */
rtx
-operand_subword_force (rtx op, unsigned int offset, machine_mode mode)
+operand_subword_force (rtx op, poly_uint64 offset, machine_mode mode)
{
rtx result = operand_subword (op, offset, 1, mode);
if (mode != BLKmode && mode != VOIDmode)
{
- /* If this is a register which can not be accessed by words, copy it
+ /* If this is a register which cannot be accessed by words, copy it
to a pseudo register. */
if (REG_P (op))
op = copy_to_reg (op);
return result;
}
\f
+mem_attrs::mem_attrs ()
+ : expr (NULL_TREE),
+ offset (0),
+ size (0),
+ alias (0),
+ align (0),
+ addrspace (ADDR_SPACE_GENERIC),
+ offset_known_p (false),
+ size_known_p (false)
+{}
+
/* Returns 1 if both MEM_EXPR can be considered equal
and 0 otherwise. */
get_mem_align_offset (rtx mem, unsigned int align)
{
tree expr;
- unsigned HOST_WIDE_INT offset;
+ poly_uint64 offset;
/* This function can't use
if (!MEM_EXPR (mem) || !MEM_OFFSET_KNOWN_P (mem)
tree byte_offset = component_ref_field_offset (expr);
tree bit_offset = DECL_FIELD_BIT_OFFSET (field);
+ poly_uint64 suboffset;
if (!byte_offset
- || !tree_fits_uhwi_p (byte_offset)
+ || !poly_int_tree_p (byte_offset, &suboffset)
|| !tree_fits_uhwi_p (bit_offset))
return -1;
- offset += tree_to_uhwi (byte_offset);
+ offset += suboffset;
offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT;
if (inner == NULL_TREE)
else
return -1;
- return offset & ((align / BITS_PER_UNIT) - 1);
+ HOST_WIDE_INT misalign;
+ if (!known_misalignment (offset, align / BITS_PER_UNIT, &misalign))
+ return -1;
+ return misalign;
}
/* Given REF (a MEM) and T, either the type of X or the expression
void
set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
- HOST_WIDE_INT bitpos)
+ poly_int64 bitpos)
{
- HOST_WIDE_INT apply_bitpos = 0;
+ poly_int64 apply_bitpos = 0;
tree type;
- struct mem_attrs attrs, *defattrs, *refattrs;
+ class mem_attrs attrs, *defattrs, *refattrs;
addr_space_t as;
/* It can happen that type_for_mode was given a mode for which there
set_mem_attributes. */
gcc_assert (!DECL_P (t) || ref != DECL_RTL_IF_SET (t));
- memset (&attrs, 0, sizeof (attrs));
-
/* Get the alias set from the expression or type (perhaps using a
front-end routine) and use it. */
attrs.alias = get_alias_set (t);
new_size = DECL_SIZE_UNIT (t);
}
- /* ??? If we end up with a constant here do record a MEM_EXPR. */
- else if (CONSTANT_CLASS_P (t))
+ /* ??? If we end up with a constant or a descriptor do not
+ record a MEM_EXPR. */
+ else if (CONSTANT_CLASS_P (t)
+ || TREE_CODE (t) == CONSTRUCTOR)
;
/* If this is a field reference, record it. */
new_size = DECL_SIZE_UNIT (TREE_OPERAND (t, 1));
}
- /* If this is an array reference, look for an outer field reference. */
- else if (TREE_CODE (t) == ARRAY_REF)
- {
- tree off_tree = size_zero_node;
- /* We can't modify t, because we use it at the end of the
- function. */
- tree t2 = t;
-
- do
- {
- tree index = TREE_OPERAND (t2, 1);
- tree low_bound = array_ref_low_bound (t2);
- tree unit_size = array_ref_element_size (t2);
-
- /* We assume all arrays have sizes that are a multiple of a byte.
- First subtract the lower bound, if any, in the type of the
- index, then convert to sizetype and multiply by the size of
- the array element. */
- if (! integer_zerop (low_bound))
- index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
- index, low_bound);
-
- off_tree = size_binop (PLUS_EXPR,
- size_binop (MULT_EXPR,
- fold_convert (sizetype,
- index),
- unit_size),
- off_tree);
- t2 = TREE_OPERAND (t2, 0);
- }
- while (TREE_CODE (t2) == ARRAY_REF);
-
- if (DECL_P (t2)
- || (TREE_CODE (t2) == COMPONENT_REF
- /* For trailing arrays t2 doesn't have a size that
- covers all valid accesses. */
- && ! array_at_struct_end_p (t)))
- {
- attrs.expr = t2;
- attrs.offset_known_p = false;
- if (tree_fits_uhwi_p (off_tree))
- {
- attrs.offset_known_p = true;
- attrs.offset = tree_to_uhwi (off_tree);
- apply_bitpos = bitpos;
- }
- }
- /* Else do not record a MEM_EXPR. */
- }
-
- /* If this is an indirect reference, record it. */
- else if (TREE_CODE (t) == MEM_REF
- || TREE_CODE (t) == TARGET_MEM_REF)
+ /* Else record it. */
+ else
{
+ gcc_assert (handled_component_p (t)
+ || TREE_CODE (t) == MEM_REF
+ || TREE_CODE (t) == TARGET_MEM_REF);
attrs.expr = t;
attrs.offset_known_p = true;
attrs.offset = 0;
apply_bitpos = bitpos;
}
+ /* If this is a reference based on a partitioned decl replace the
+ base with a MEM_REF of the pointer representative we created
+ during stack slot partitioning. */
+ if (attrs.expr
+ && VAR_P (base)
+ && ! is_global_var (base)
+ && cfun->gimple_df->decls_to_pointers != NULL)
+ {
+ tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
+ if (namep)
+ {
+ attrs.expr = unshare_expr (attrs.expr);
+ tree *orig_base = &attrs.expr;
+ while (handled_component_p (*orig_base))
+ orig_base = &TREE_OPERAND (*orig_base, 0);
+ tree aptrt = reference_alias_ptr_type (*orig_base);
+ *orig_base = build2 (MEM_REF, TREE_TYPE (*orig_base), *namep,
+ build_int_cst (aptrt, 0));
+ }
+ }
+
/* Compute the alignment. */
unsigned int obj_align;
unsigned HOST_WIDE_INT obj_bitpos;
get_object_alignment_1 (t, &obj_align, &obj_bitpos);
- obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1);
- if (obj_bitpos != 0)
- obj_align = least_bit_hwi (obj_bitpos);
+ unsigned int diff_align = known_alignment (obj_bitpos - bitpos);
+ if (diff_align != 0)
+ obj_align = MIN (obj_align, diff_align);
attrs.align = MAX (attrs.align, obj_align);
}
- if (tree_fits_uhwi_p (new_size))
+ poly_uint64 const_size;
+ if (poly_int_tree_p (new_size, &const_size))
{
attrs.size_known_p = true;
- attrs.size = tree_to_uhwi (new_size);
+ attrs.size = const_size;
}
/* If we modified OFFSET based on T, then subtract the outstanding
bit position offset. Similarly, increase the size of the accessed
object to contain the negative offset. */
- if (apply_bitpos)
+ if (maybe_ne (apply_bitpos, 0))
{
gcc_assert (attrs.offset_known_p);
- attrs.offset -= apply_bitpos / BITS_PER_UNIT;
+ poly_int64 bytepos = bits_to_bytes_round_down (apply_bitpos);
+ attrs.offset -= bytepos;
if (attrs.size_known_p)
- attrs.size += apply_bitpos / BITS_PER_UNIT;
+ attrs.size += bytepos;
}
/* Now set the attributes we computed above. */
void
set_mem_alias_set (rtx mem, alias_set_type set)
{
- struct mem_attrs attrs;
-
/* If the new and old alias sets don't conflict, something is wrong. */
gcc_checking_assert (alias_sets_conflict_p (set, MEM_ALIAS_SET (mem)));
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.alias = set;
set_mem_attrs (mem, &attrs);
}
void
set_mem_addr_space (rtx mem, addr_space_t addrspace)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.addrspace = addrspace;
set_mem_attrs (mem, &attrs);
}
void
set_mem_align (rtx mem, unsigned int align)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.align = align;
set_mem_attrs (mem, &attrs);
}
void
set_mem_expr (rtx mem, tree expr)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.expr = expr;
set_mem_attrs (mem, &attrs);
}
/* Set the offset of MEM to OFFSET. */
void
-set_mem_offset (rtx mem, HOST_WIDE_INT offset)
+set_mem_offset (rtx mem, poly_int64 offset)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.offset_known_p = true;
attrs.offset = offset;
set_mem_attrs (mem, &attrs);
void
clear_mem_offset (rtx mem)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.offset_known_p = false;
set_mem_attrs (mem, &attrs);
}
/* Set the size of MEM to SIZE. */
void
-set_mem_size (rtx mem, HOST_WIDE_INT size)
+set_mem_size (rtx mem, poly_int64 size)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.size_known_p = true;
attrs.size = size;
set_mem_attrs (mem, &attrs);
void
clear_mem_size (rtx mem)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.size_known_p = false;
set_mem_attrs (mem, &attrs);
}
{
rtx new_rtx = change_address_1 (memref, mode, addr, 1, false);
machine_mode mmode = GET_MODE (new_rtx);
- struct mem_attrs attrs, *defattrs;
+ class mem_attrs *defattrs;
- attrs = *get_mem_attrs (memref);
+ mem_attrs attrs (*get_mem_attrs (memref));
defattrs = mode_mem_attrs[(int) mmode];
attrs.expr = NULL_TREE;
attrs.offset_known_p = false;
has no inherent size. */
rtx
-adjust_address_1 (rtx memref, machine_mode mode, HOST_WIDE_INT offset,
+adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset,
int validate, int adjust_address, int adjust_object,
- HOST_WIDE_INT size)
+ poly_int64 size)
{
rtx addr = XEXP (memref, 0);
rtx new_rtx;
scalar_int_mode address_mode;
- int pbits;
- struct mem_attrs attrs = *get_mem_attrs (memref), *defattrs;
+ class mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
unsigned HOST_WIDE_INT max_align;
#ifdef POINTERS_EXTEND_UNSIGNED
scalar_int_mode pointer_mode
size = defattrs->size;
/* If there are no changes, just return the original memory reference. */
- if (mode == GET_MODE (memref) && !offset
- && (size == 0 || (attrs.size_known_p && attrs.size == size))
+ if (mode == GET_MODE (memref)
+ && known_eq (offset, 0)
+ && (known_eq (size, 0)
+ || (attrs.size_known_p && known_eq (attrs.size, size)))
&& (!validate || memory_address_addr_space_p (mode, addr,
attrs.addrspace)))
return memref;
/* Convert a possibly large offset to a signed value within the
range of the target address space. */
address_mode = get_address_mode (memref);
- pbits = GET_MODE_BITSIZE (address_mode);
- if (HOST_BITS_PER_WIDE_INT > pbits)
- {
- int shift = HOST_BITS_PER_WIDE_INT - pbits;
- offset = (((HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) offset << shift))
- >> shift);
- }
+ offset = trunc_int_for_mode (offset, address_mode);
if (adjust_address)
{
/* If MEMREF is a LO_SUM and the offset is within the alignment of the
object, we can merge it into the LO_SUM. */
- if (GET_MODE (memref) != BLKmode && GET_CODE (addr) == LO_SUM
- && offset >= 0
- && (unsigned HOST_WIDE_INT) offset
- < GET_MODE_ALIGNMENT (GET_MODE (memref)) / BITS_PER_UNIT)
+ if (GET_MODE (memref) != BLKmode
+ && GET_CODE (addr) == LO_SUM
+ && known_in_range_p (offset,
+ 0, (GET_MODE_ALIGNMENT (GET_MODE (memref))
+ / BITS_PER_UNIT)))
addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0),
plus_constant (address_mode,
XEXP (addr, 1), offset));
else if (POINTERS_EXTEND_UNSIGNED > 0
&& GET_CODE (addr) == ZERO_EXTEND
&& GET_MODE (XEXP (addr, 0)) == pointer_mode
- && trunc_int_for_mode (offset, pointer_mode) == offset)
+ && known_eq (trunc_int_for_mode (offset, pointer_mode), offset))
addr = gen_rtx_ZERO_EXTEND (address_mode,
plus_constant (pointer_mode,
XEXP (addr, 0), offset));
/* If the address is a REG, change_address_1 rightfully returns memref,
but this would destroy memref's MEM_ATTRS. */
- if (new_rtx == memref && offset != 0)
+ if (new_rtx == memref && maybe_ne (offset, 0))
new_rtx = copy_rtx (new_rtx);
/* Conservatively drop the object if we don't know where we start from. */
attrs.offset += offset;
/* Drop the object if the new left end is not within its bounds. */
- if (adjust_object && attrs.offset < 0)
+ if (adjust_object && maybe_lt (attrs.offset, 0))
{
attrs.expr = NULL_TREE;
attrs.alias = 0;
/* Compute the new alignment by taking the MIN of the alignment and the
lowest-order set bit in OFFSET, but don't change the alignment if OFFSET
if zero. */
- if (offset != 0)
+ if (maybe_ne (offset, 0))
{
- max_align = least_bit_hwi (offset) * BITS_PER_UNIT;
+ max_align = known_alignment (offset) * BITS_PER_UNIT;
attrs.align = MIN (attrs.align, max_align);
}
- if (size)
+ if (maybe_ne (size, 0))
{
/* Drop the object if the new right end is not within its bounds. */
- if (adjust_object && (offset + size) > attrs.size)
+ if (adjust_object && maybe_gt (offset + size, attrs.size))
{
attrs.expr = NULL_TREE;
attrs.alias = 0;
rtx
adjust_automodify_address_1 (rtx memref, machine_mode mode, rtx addr,
- HOST_WIDE_INT offset, int validate)
+ poly_int64 offset, int validate)
{
memref = change_address_1 (memref, VOIDmode, addr, validate, false);
return adjust_address_1 (memref, mode, offset, validate, 0, 0, 0);
{
rtx new_rtx, addr = XEXP (memref, 0);
machine_mode address_mode;
- struct mem_attrs attrs, *defattrs;
+ class mem_attrs *defattrs;
- attrs = *get_mem_attrs (memref);
+ mem_attrs attrs (*get_mem_attrs (memref));
address_mode = get_address_mode (memref);
new_rtx = simplify_gen_binary (PLUS, address_mode, addr, offset);
operations plus masking logic. */
rtx
-widen_memory_access (rtx memref, machine_mode mode, HOST_WIDE_INT offset)
+widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset)
{
rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
- struct mem_attrs attrs;
- unsigned int size = GET_MODE_SIZE (mode);
+ poly_uint64 size = GET_MODE_SIZE (mode);
/* If there are no changes, just return the original memory reference. */
if (new_rtx == memref)
return new_rtx;
- attrs = *get_mem_attrs (new_rtx);
+ mem_attrs attrs (*get_mem_attrs (new_rtx));
/* If we don't know what offset we were at within the expression, then
we can't know if we've overstepped the bounds. */
/* Is the field at least as large as the access? If so, ok,
otherwise strip back to the containing structure. */
- if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST
- && compare_tree_int (DECL_SIZE_UNIT (field), size) >= 0
- && attrs.offset >= 0)
+ if (poly_int_tree_p (DECL_SIZE_UNIT (field))
+ && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (field)), size)
+ && known_ge (attrs.offset, 0))
break;
- if (! tree_fits_uhwi_p (offset))
+ poly_uint64 suboffset;
+ if (!poly_int_tree_p (offset, &suboffset))
{
attrs.expr = NULL_TREE;
break;
}
attrs.expr = TREE_OPERAND (attrs.expr, 0);
- attrs.offset += tree_to_uhwi (offset);
+ attrs.offset += suboffset;
attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
/ BITS_PER_UNIT);
}
/* Similarly for the decl. */
else if (DECL_P (attrs.expr)
&& DECL_SIZE_UNIT (attrs.expr)
- && TREE_CODE (DECL_SIZE_UNIT (attrs.expr)) == INTEGER_CST
- && compare_tree_int (DECL_SIZE_UNIT (attrs.expr), size) >= 0
- && (! attrs.offset_known_p || attrs.offset >= 0))
+ && poly_int_tree_p (DECL_SIZE_UNIT (attrs.expr))
+ && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (attrs.expr)),
+ size)
+ && known_ge (attrs.offset, 0))
break;
else
{
{
tree d = spill_slot_decl;
rtx rd;
- struct mem_attrs attrs;
if (d || !force_build_p)
return d;
rd = gen_rtx_MEM (BLKmode, frame_pointer_rtx);
MEM_NOTRAP_P (rd) = 1;
- attrs = *mode_mem_attrs[(int) BLKmode];
+ mem_attrs attrs (*mode_mem_attrs[(int) BLKmode]);
attrs.alias = new_alias_set ();
attrs.expr = d;
set_mem_attrs (rd, &attrs);
void
set_mem_attrs_for_spill (rtx mem)
{
- struct mem_attrs attrs;
rtx addr;
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.expr = get_spill_slot_decl (true);
attrs.alias = MEM_ALIAS_SET (DECL_RTL (attrs.expr));
attrs.addrspace = ADDR_SPACE_GENERIC;
with perhaps the plus missing for offset = 0. */
addr = XEXP (mem, 0);
attrs.offset_known_p = true;
- attrs.offset = 0;
- if (GET_CODE (addr) == PLUS
- && CONST_INT_P (XEXP (addr, 1)))
- attrs.offset = INTVAL (XEXP (addr, 1));
+ strip_offset (addr, &attrs.offset);
set_mem_attrs (mem, &attrs);
MEM_NOTRAP_P (mem) = 1;
set_last_insn (last);
cur_insn_uid = 0;
- if (MIN_NONDEBUG_INSN_UID || MAY_HAVE_DEBUG_INSNS)
+ if (param_min_nondebug_insn_uid || MAY_HAVE_DEBUG_INSNS)
{
int debug_count = 0;
- cur_insn_uid = MIN_NONDEBUG_INSN_UID - 1;
+ cur_insn_uid = param_min_nondebug_insn_uid - 1;
cur_debug_insn_uid = 0;
for (insn = first; insn; insn = NEXT_INSN (insn))
- if (INSN_UID (insn) < MIN_NONDEBUG_INSN_UID)
+ if (INSN_UID (insn) < param_min_nondebug_insn_uid)
cur_debug_insn_uid = MAX (cur_debug_insn_uid, INSN_UID (insn));
else
{
}
if (debug_count)
- cur_debug_insn_uid = MIN_NONDEBUG_INSN_UID + debug_count;
+ cur_debug_insn_uid = param_min_nondebug_insn_uid + debug_count;
else
cur_debug_insn_uid++;
}
case LABEL_REF:
case CODE_LABEL:
case PC:
- case CC0:
case RETURN:
case SIMPLE_RETURN:
case SCRATCH:
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
- /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
+ /* Share clobbers of hard registers, but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
if (REG_P (XEXP (x, 0))
case LABEL_REF:
case CODE_LABEL:
case PC:
- case CC0:
case RETURN:
case SIMPLE_RETURN:
case SCRATCH:
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
- /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
+ /* Share clobbers of hard registers, but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
if (REG_P (XEXP (x, 0))
case SYMBOL_REF:
case CODE_LABEL:
case PC:
- case CC0:
case RETURN:
case SIMPLE_RETURN:
return;
differences due to debug insns, and not be affected by
-fmin-insn-uid, to avoid excessive table size and to simplify
debugging of -fcompare-debug failures. */
- if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+ if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
n -= cur_debug_insn_uid;
else
- n -= MIN_NONDEBUG_INSN_UID;
+ n -= param_min_nondebug_insn_uid;
return n;
}
return insn;
}
+/* Return the next INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN after INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx_insn *
+next_real_insn (rtx_insn *insn)
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || INSN_P (insn))
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the last INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN before INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx_insn *
+prev_real_insn (rtx_insn *insn)
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || INSN_P (insn))
+ break;
+ }
+
+ return insn;
+}
+
/* Return the next INSN, CALL_INSN or JUMP_INSN after INSN;
or 0, if there is none. This routine does not look inside
SEQUENCEs. */
rtx_insn *
-next_real_insn (rtx uncast_insn)
+next_real_nondebug_insn (rtx uncast_insn)
{
rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0 || INSN_P (insn))
+ if (insn == 0 || NONDEBUG_INSN_P (insn))
break;
}
SEQUENCEs. */
rtx_insn *
-prev_real_insn (rtx_insn *insn)
+prev_real_nondebug_insn (rtx_insn *insn)
{
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0 || INSN_P (insn))
+ if (insn == 0 || NONDEBUG_INSN_P (insn))
break;
}
return insn;
}
\f
-/* Return the next insn that uses CC0 after INSN, which is assumed to
- set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter
- applied to the result of this function should yield INSN).
-
- Normally, this is simply the next insn. However, if a REG_CC_USER note
- is present, it contains the insn that uses CC0.
-
- Return 0 if we can't find the insn. */
-
-rtx_insn *
-next_cc0_user (rtx_insn *insn)
-{
- rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX);
-
- if (note)
- return safe_as_a <rtx_insn *> (XEXP (note, 0));
-
- insn = next_nonnote_insn (insn);
- if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
- insn = as_a <rtx_sequence *> (PATTERN (insn))->insn (0);
-
- if (insn && INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
- return insn;
-
- return 0;
-}
-
-/* Find the insn that set CC0 for INSN. Unless INSN has a REG_CC_SETTER
- note, it is the previous insn. */
-
-rtx_insn *
-prev_cc0_setter (rtx_insn *insn)
-{
- rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
-
- if (note)
- return safe_as_a <rtx_insn *> (XEXP (note, 0));
-
- insn = prev_nonnote_insn (insn);
- gcc_assert (sets_cc0_p (PATTERN (insn)));
-
- return insn;
-}
-
/* Find a RTX_AUTOINC class rtx which matches DATA. */
static int
int njumps = 0;
rtx_insn *call_insn = NULL;
- /* We're not good at redistributing frame information. */
- if (RTX_FRAME_RELATED_P (trial))
- return trial;
-
if (any_condjump_p (trial)
&& (note = find_reg_note (trial, REG_BR_PROB, 0)))
split_branch_probability
if (!seq)
return trial;
+ int split_insn_count = 0;
/* Avoid infinite loop if any insn of the result matches
the original pattern. */
insn_last = seq;
if (INSN_P (insn_last)
&& rtx_equal_p (PATTERN (insn_last), pat))
return trial;
+ split_insn_count++;
if (!NEXT_INSN (insn_last))
break;
insn_last = NEXT_INSN (insn_last);
}
+ /* We're not good at redistributing frame information if
+ the split occurs before reload or if it results in more
+ than one insn. */
+ if (RTX_FRAME_RELATED_P (trial))
+ {
+ if (!reload_completed || split_insn_count != 1)
+ return trial;
+
+ rtx_insn *new_insn = seq;
+ rtx_insn *old_insn = trial;
+ copy_frame_info_to_split_insn (old_insn, new_insn);
+ }
+
/* We will be adding the new sequence to the function. The splitters
may have introduced invalid RTL sharing, so unshare the sequence now. */
unshare_all_rtl_in_chain (seq);
for (insn = insn_last; insn ; insn = PREV_INSN (insn))
if (CALL_P (insn))
{
- rtx_insn *next;
- rtx *p;
-
gcc_assert (call_insn == NULL_RTX);
call_insn = insn;
/* Add the old CALL_INSN_FUNCTION_USAGE to whatever the
target may have explicitly specified. */
- p = &CALL_INSN_FUNCTION_USAGE (insn);
+ rtx *p = &CALL_INSN_FUNCTION_USAGE (insn);
while (*p)
p = &XEXP (*p, 1);
*p = CALL_INSN_FUNCTION_USAGE (trial);
/* If the old call was a sibling call, the new one must
be too. */
SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial);
-
- /* If the new call is the last instruction in the sequence,
- it will effectively replace the old call in-situ. Otherwise
- we must move any following NOTE_INSN_CALL_ARG_LOCATION note
- so that it comes immediately after the new call. */
- if (NEXT_INSN (insn))
- for (next = NEXT_INSN (trial);
- next && NOTE_P (next);
- next = NEXT_INSN (next))
- if (NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
- {
- remove_insn (next);
- add_insn_after (next, insn, NULL);
- break;
- }
}
}
case REG_SETJMP:
case REG_TM:
case REG_CALL_NOCF_CHECK:
+ case REG_CALL_ARG_LOCATION:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (CALL_P (insn))
break;
case REG_NON_LOCAL_GOTO:
+ case REG_LABEL_TARGET:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (JUMP_P (insn))
break;
case REG_ARGS_SIZE:
- fixup_args_size_notes (NULL, insn_last, INTVAL (XEXP (note, 0)));
+ fixup_args_size_notes (NULL, insn_last, get_args_size (note));
break;
case REG_CALL_DECL:
+ case REG_UNTYPED_CALL:
gcc_assert (call_insn != NULL_RTX);
add_reg_note (call_insn, REG_NOTE_KIND (note), XEXP (note, 0));
break;
before = PREV_INSN (trial);
after = NEXT_INSN (trial);
- tem = emit_insn_after_setloc (seq, trial, INSN_LOCATION (trial));
+ emit_insn_after_setloc (seq, trial, INSN_LOCATION (trial));
delete_insn (trial);
insn = as_a <rtx_debug_insn *> (rtx_alloc (DEBUG_INSN));
INSN_UID (insn) = cur_debug_insn_uid++;
- if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+ if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
INSN_UID (insn) = cur_insn_uid++;
PATTERN (insn) = pattern;
{
rtx_insn *prev = get_last_insn ();
link_insn_into_chain (insn, prev, NULL);
- if (NULL == get_insns ())
+ if (get_insns () == NULL)
set_first_insn (insn);
set_last_insn (insn);
}
they know how to update a SEQUENCE. */
void
-add_insn_after (rtx uncast_insn, rtx uncast_after, basic_block bb)
+add_insn_after (rtx_insn *insn, rtx_insn *after, basic_block bb)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *after = as_a <rtx_insn *> (uncast_after);
add_insn_after_nobb (insn, after);
if (!BARRIER_P (after)
&& !BARRIER_P (insn)
they know how to update a SEQUENCE. */
void
-add_insn_before (rtx uncast_insn, rtx uncast_before, basic_block bb)
+add_insn_before (rtx_insn *insn, rtx_insn *before, basic_block bb)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *before = as_a <rtx_insn *> (uncast_before);
add_insn_before_nobb (insn, before);
if (!bb
/* Replace insn with an deleted instruction note. */
void
-set_insn_deleted (rtx insn)
+set_insn_deleted (rtx_insn *insn)
{
if (INSN_P (insn))
- df_insn_delete (as_a <rtx_insn *> (insn));
+ df_insn_delete (insn);
PUT_CODE (insn, NOTE);
NOTE_KIND (insn) = NOTE_INSN_DELETED;
}
To really delete an insn and related DF information, use delete_insn. */
void
-remove_insn (rtx uncast_insn)
+remove_insn (rtx_insn *insn)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
rtx_insn *next = NEXT_INSN (insn);
rtx_insn *prev = PREV_INSN (insn);
basic_block bb;
generated would almost certainly die right after it was created. */
static rtx_insn *
-emit_pattern_before_noloc (rtx x, rtx before, rtx last, basic_block bb,
+emit_pattern_before_noloc (rtx x, rtx_insn *before, rtx_insn *last,
+ basic_block bb,
rtx_insn *(*make_raw) (rtx))
{
rtx_insn *insn;
gcc_assert (before);
if (x == NULL_RTX)
- return safe_as_a <rtx_insn *> (last);
+ return last;
switch (GET_CODE (x))
{
break;
}
- return safe_as_a <rtx_insn *> (last);
+ return last;
}
/* Make X be output before the instruction BEFORE. */
emit_jump_insn_before_noloc (rtx x, rtx_insn *before)
{
return as_a <rtx_jump_insn *> (
- emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ emit_pattern_before_noloc (x, before, NULL, NULL,
make_jump_insn_raw));
}
rtx_insn *
emit_call_insn_before_noloc (rtx x, rtx_insn *before)
{
- return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ return emit_pattern_before_noloc (x, before, NULL, NULL,
make_call_insn_raw);
}
and output it before the instruction BEFORE. */
rtx_insn *
-emit_debug_insn_before_noloc (rtx x, rtx before)
+emit_debug_insn_before_noloc (rtx x, rtx_insn *before)
{
- return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ return emit_pattern_before_noloc (x, before, NULL, NULL,
make_debug_insn_raw);
}
and output it before the insn BEFORE. */
rtx_barrier *
-emit_barrier_before (rtx before)
+emit_barrier_before (rtx_insn *before)
{
rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
/* Emit the label LABEL before the insn BEFORE. */
rtx_code_label *
-emit_label_before (rtx label, rtx_insn *before)
+emit_label_before (rtx_code_label *label, rtx_insn *before)
{
gcc_checking_assert (INSN_UID (label) == 0);
INSN_UID (label) = cur_insn_uid++;
add_insn_before (label, before, NULL);
- return as_a <rtx_code_label *> (label);
+ return label;
}
\f
/* Helper for emit_insn_after, handles lists of instructions
efficiently. */
static rtx_insn *
-emit_insn_after_1 (rtx_insn *first, rtx uncast_after, basic_block bb)
+emit_insn_after_1 (rtx_insn *first, rtx_insn *after, basic_block bb)
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last;
rtx_insn *after_after;
if (!bb && !BARRIER_P (after))
}
static rtx_insn *
-emit_pattern_after_noloc (rtx x, rtx uncast_after, basic_block bb,
+emit_pattern_after_noloc (rtx x, rtx_insn *after, basic_block bb,
rtx_insn *(*make_raw)(rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last = after;
gcc_assert (after);
BB is NULL, an attempt is made to infer the BB from AFTER. */
rtx_insn *
-emit_insn_after_noloc (rtx x, rtx after, basic_block bb)
+emit_insn_after_noloc (rtx x, rtx_insn *after, basic_block bb)
{
return emit_pattern_after_noloc (x, after, bb, make_insn_raw);
}
and output it after the insn AFTER. */
rtx_jump_insn *
-emit_jump_insn_after_noloc (rtx x, rtx after)
+emit_jump_insn_after_noloc (rtx x, rtx_insn *after)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after_noloc (x, after, NULL, make_jump_insn_raw));
and output it after the instruction AFTER. */
rtx_insn *
-emit_call_insn_after_noloc (rtx x, rtx after)
+emit_call_insn_after_noloc (rtx x, rtx_insn *after)
{
return emit_pattern_after_noloc (x, after, NULL, make_call_insn_raw);
}
and output it after the instruction AFTER. */
rtx_insn *
-emit_debug_insn_after_noloc (rtx x, rtx after)
+emit_debug_insn_after_noloc (rtx x, rtx_insn *after)
{
return emit_pattern_after_noloc (x, after, NULL, make_debug_insn_raw);
}
and output it after the insn AFTER. */
rtx_barrier *
-emit_barrier_after (rtx after)
+emit_barrier_after (rtx_insn *after)
{
rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
/* Emit the label LABEL after the insn AFTER. */
rtx_insn *
-emit_label_after (rtx label, rtx_insn *after)
+emit_label_after (rtx_insn *label, rtx_insn *after)
{
gcc_checking_assert (INSN_UID (label) == 0);
INSN_UID (label) = cur_insn_uid++;
add_insn_after (label, after, NULL);
- return as_a <rtx_insn *> (label);
+ return label;
}
\f
/* Notes require a bit of special handling: Some notes need to have their
inside basic blocks. If the caller is emitting on the basic block
boundary, do not set BLOCK_FOR_INSN on the new note. */
case NOTE_INSN_VAR_LOCATION:
- case NOTE_INSN_CALL_ARG_LOCATION:
case NOTE_INSN_EH_REGION_BEG:
case NOTE_INSN_EH_REGION_END:
return on_bb_boundary_p;
MAKE_RAW indicates how to turn PATTERN into a real insn. */
static rtx_insn *
-emit_pattern_after_setloc (rtx pattern, rtx uncast_after, int loc,
+emit_pattern_after_setloc (rtx pattern, rtx_insn *after, location_t loc,
rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
if (pattern == NULL_RTX || !loc)
any DEBUG_INSNs. */
static rtx_insn *
-emit_pattern_after (rtx pattern, rtx uncast_after, bool skip_debug_insns,
+emit_pattern_after (rtx pattern, rtx_insn *after, bool skip_debug_insns,
rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *prev = after;
if (skip_debug_insns)
/* Like emit_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_insn_raw);
}
/* Like emit_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_insn_after (rtx pattern, rtx after)
+emit_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, true, make_insn_raw);
}
/* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_jump_insn *
-emit_jump_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_jump_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after_setloc (pattern, after, loc, make_jump_insn_raw));
/* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_jump_insn *
-emit_jump_insn_after (rtx pattern, rtx after)
+emit_jump_insn_after (rtx pattern, rtx_insn *after)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after (pattern, after, true, make_jump_insn_raw));
/* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_call_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_call_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_call_insn_raw);
}
/* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_call_insn_after (rtx pattern, rtx after)
+emit_call_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, true, make_call_insn_raw);
}
/* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_debug_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_debug_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_debug_insn_raw);
}
/* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_debug_insn_after (rtx pattern, rtx after)
+emit_debug_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, false, make_debug_insn_raw);
}
CALL_INSN, etc. */
static rtx_insn *
-emit_pattern_before_setloc (rtx pattern, rtx uncast_before, int loc, bool insnp,
- rtx_insn *(*make_raw) (rtx))
+emit_pattern_before_setloc (rtx pattern, rtx_insn *before, location_t loc,
+ bool insnp, rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *before = as_a <rtx_insn *> (uncast_before);
rtx_insn *first = PREV_INSN (before);
rtx_insn *last = emit_pattern_before_noloc (pattern, before,
- insnp ? before : NULL_RTX,
+ insnp ? before : NULL,
NULL, make_raw);
if (pattern == NULL_RTX || !loc)
INSN as opposed to a JUMP_INSN, CALL_INSN, etc. */
static rtx_insn *
-emit_pattern_before (rtx pattern, rtx uncast_before, bool skip_debug_insns,
+emit_pattern_before (rtx pattern, rtx_insn *before, bool skip_debug_insns,
bool insnp, rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *before = safe_as_a <rtx_insn *> (uncast_before);
rtx_insn *next = before;
if (skip_debug_insns)
insnp, make_raw);
else
return emit_pattern_before_noloc (pattern, before,
- insnp ? before : NULL_RTX,
+ insnp ? before : NULL,
NULL, make_raw);
}
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, true,
make_insn_raw);
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
rtx_insn *
-emit_insn_before (rtx pattern, rtx before)
+emit_insn_before (rtx pattern, rtx_insn *before)
{
return emit_pattern_before (pattern, before, true, true, make_insn_raw);
}
/* like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_jump_insn *
-emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return as_a <rtx_jump_insn *> (
emit_pattern_before_setloc (pattern, before, loc, false,
/* Like emit_jump_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
rtx_jump_insn *
-emit_jump_insn_before (rtx pattern, rtx before)
+emit_jump_insn_before (rtx pattern, rtx_insn *before)
{
return as_a <rtx_jump_insn *> (
emit_pattern_before (pattern, before, true, false,
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, false,
make_call_insn_raw);
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_debug_insn_before_setloc (rtx pattern, rtx before, int loc)
+emit_debug_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, false,
make_debug_insn_raw);
case SYMBOL_REF:
case CODE_LABEL:
case PC:
- case CC0:
case RETURN:
case SIMPLE_RETURN:
return orig;
case CLOBBER:
- /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
+ /* Share clobbers of hard registers, but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
if (REG_P (XEXP (orig, 0))
case 't':
case 'w':
case 'i':
+ case 'p':
case 's':
case 'S':
case 'u':
{
set_first_insn (NULL);
set_last_insn (NULL);
- if (MIN_NONDEBUG_INSN_UID)
- cur_insn_uid = MIN_NONDEBUG_INSN_UID;
+ if (param_min_nondebug_insn_uid)
+ cur_insn_uid = param_min_nondebug_insn_uid;
else
cur_insn_uid = 1;
cur_debug_insn_uid = 1;
#endif
}
-/* Return true if X is a valid element for a duplicated vector constant
- of the given mode. */
+/* Return the value of element I of CONST_VECTOR X as a wide_int. */
-bool
-valid_for_const_vec_duplicate_p (machine_mode, rtx x)
+wide_int
+const_vector_int_elt (const_rtx x, unsigned int i)
{
- return (CONST_SCALAR_INT_P (x)
- || CONST_DOUBLE_AS_FLOAT_P (x)
- || CONST_FIXED_P (x));
+ /* First handle elements that are directly encoded. */
+ machine_mode elt_mode = GET_MODE_INNER (GET_MODE (x));
+ if (i < (unsigned int) XVECLEN (x, 0))
+ return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, i), elt_mode);
+
+ /* Identify the pattern that contains element I and work out the index of
+ the last encoded element for that pattern. */
+ unsigned int encoded_nelts = const_vector_encoded_nelts (x);
+ unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
+ unsigned int count = i / npatterns;
+ unsigned int pattern = i % npatterns;
+ unsigned int final_i = encoded_nelts - npatterns + pattern;
+
+ /* If there are no steps, the final encoded value is the right one. */
+ if (!CONST_VECTOR_STEPPED_P (x))
+ return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, final_i), elt_mode);
+
+ /* Otherwise work out the value from the last two encoded elements. */
+ rtx v1 = CONST_VECTOR_ENCODED_ELT (x, final_i - npatterns);
+ rtx v2 = CONST_VECTOR_ENCODED_ELT (x, final_i);
+ wide_int diff = wi::sub (rtx_mode_t (v2, elt_mode),
+ rtx_mode_t (v1, elt_mode));
+ return wi::add (rtx_mode_t (v2, elt_mode), (count - 2) * diff);
}
-/* Like gen_const_vec_duplicate, but ignore const_tiny_rtx. */
+/* Return the value of element I of CONST_VECTOR X. */
-static rtx
-gen_const_vec_duplicate_1 (machine_mode mode, rtx el)
+rtx
+const_vector_elt (const_rtx x, unsigned int i)
+{
+ /* First handle elements that are directly encoded. */
+ if (i < (unsigned int) XVECLEN (x, 0))
+ return CONST_VECTOR_ENCODED_ELT (x, i);
+
+ /* If there are no steps, the final encoded value is the right one. */
+ if (!CONST_VECTOR_STEPPED_P (x))
+ {
+ /* Identify the pattern that contains element I and work out the index of
+ the last encoded element for that pattern. */
+ unsigned int encoded_nelts = const_vector_encoded_nelts (x);
+ unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
+ unsigned int pattern = i % npatterns;
+ unsigned int final_i = encoded_nelts - npatterns + pattern;
+ return CONST_VECTOR_ENCODED_ELT (x, final_i);
+ }
+
+ /* Otherwise work out the value from the last two encoded elements. */
+ return immed_wide_int_const (const_vector_int_elt (x, i),
+ GET_MODE_INNER (GET_MODE (x)));
+}
+
+/* Return true if X is a valid element for a CONST_VECTOR of the given
+ mode. */
+
+bool
+valid_for_const_vector_p (machine_mode, rtx x)
{
- int nunits = GET_MODE_NUNITS (mode);
- rtvec v = rtvec_alloc (nunits);
- for (int i = 0; i < nunits; ++i)
- RTVEC_ELT (v, i) = el;
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ return (CONST_SCALAR_INT_P (x)
+ || CONST_POLY_INT_P (x)
+ || CONST_DOUBLE_AS_FLOAT_P (x)
+ || CONST_FIXED_P (x));
}
/* Generate a vector constant of mode MODE in which every element has
rtx
gen_const_vec_duplicate (machine_mode mode, rtx elt)
{
- scalar_mode inner_mode = GET_MODE_INNER (mode);
- if (elt == CONST0_RTX (inner_mode))
- return CONST0_RTX (mode);
- else if (elt == CONST1_RTX (inner_mode))
- return CONST1_RTX (mode);
- else if (elt == CONSTM1_RTX (inner_mode))
- return CONSTM1_RTX (mode);
-
- return gen_const_vec_duplicate_1 (mode, elt);
+ rtx_vector_builder builder (mode, 1, 1);
+ builder.quick_push (elt);
+ return builder.build ();
}
/* Return a vector rtx of mode MODE in which every element has value X.
rtx
gen_vec_duplicate (machine_mode mode, rtx x)
{
- if (valid_for_const_vec_duplicate_p (mode, x))
+ if (valid_for_const_vector_p (mode, x))
return gen_const_vec_duplicate (mode, x);
return gen_rtx_VEC_DUPLICATE (mode, x);
}
-/* A subroutine of const_vec_series_p that handles the case in which
- X is known to be an integer CONST_VECTOR. */
+/* A subroutine of const_vec_series_p that handles the case in which:
+
+ (GET_CODE (X) == CONST_VECTOR
+ && CONST_VECTOR_NPATTERNS (X) == 1
+ && !CONST_VECTOR_DUPLICATE_P (X))
+
+ is known to hold. */
bool
const_vec_series_p_1 (const_rtx x, rtx *base_out, rtx *step_out)
{
- unsigned int nelts = CONST_VECTOR_NUNITS (x);
- if (nelts < 2)
+ /* Stepped sequences are only defined for integers, to avoid specifying
+ rounding behavior. */
+ if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
return false;
+ /* A non-duplicated vector with two elements can always be seen as a
+ series with a nonzero step. Longer vectors must have a stepped
+ encoding. */
+ if (maybe_ne (CONST_VECTOR_NUNITS (x), 2)
+ && !CONST_VECTOR_STEPPED_P (x))
+ return false;
+
+ /* Calculate the step between the first and second elements. */
scalar_mode inner = GET_MODE_INNER (GET_MODE (x));
rtx base = CONST_VECTOR_ELT (x, 0);
rtx step = simplify_binary_operation (MINUS, inner,
- CONST_VECTOR_ELT (x, 1), base);
+ CONST_VECTOR_ENCODED_ELT (x, 1), base);
if (rtx_equal_p (step, CONST0_RTX (inner)))
return false;
- for (unsigned int i = 2; i < nelts; ++i)
+ /* If we have a stepped encoding, check that the step between the
+ second and third elements is the same as STEP. */
+ if (CONST_VECTOR_STEPPED_P (x))
{
rtx diff = simplify_binary_operation (MINUS, inner,
- CONST_VECTOR_ELT (x, i),
- CONST_VECTOR_ELT (x, i - 1));
+ CONST_VECTOR_ENCODED_ELT (x, 2),
+ CONST_VECTOR_ENCODED_ELT (x, 1));
if (!rtx_equal_p (step, diff))
return false;
}
rtx
gen_const_vec_series (machine_mode mode, rtx base, rtx step)
{
- gcc_assert (CONSTANT_P (base) && CONSTANT_P (step));
+ gcc_assert (valid_for_const_vector_p (mode, base)
+ && valid_for_const_vector_p (mode, step));
- int nunits = GET_MODE_NUNITS (mode);
- rtvec v = rtvec_alloc (nunits);
- scalar_mode inner_mode = GET_MODE_INNER (mode);
- RTVEC_ELT (v, 0) = base;
- for (int i = 1; i < nunits; ++i)
- RTVEC_ELT (v, i) = simplify_gen_binary (PLUS, inner_mode,
- RTVEC_ELT (v, i - 1), step);
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ rtx_vector_builder builder (mode, 1, 3);
+ builder.quick_push (base);
+ for (int i = 1; i < 3; ++i)
+ builder.quick_push (simplify_gen_binary (PLUS, GET_MODE_INNER (mode),
+ builder[i - 1], step));
+ return builder.build ();
}
/* Generate a vector of mode MODE in which element I has the value
{
if (step == const0_rtx)
return gen_vec_duplicate (mode, base);
- if (CONSTANT_P (base) && CONSTANT_P (step))
+ if (valid_for_const_vector_p (mode, base)
+ && valid_for_const_vector_p (mode, step))
return gen_const_vec_series (mode, base, step);
return gen_rtx_VEC_SERIES (mode, base, step);
}
rtx el = const_tiny_rtx[constant][(int) inner];
gcc_assert (el);
- return gen_const_vec_duplicate_1 (mode, el);
+ return gen_const_vec_duplicate (mode, el);
}
/* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when
rtx
gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v)
{
- gcc_assert (GET_MODE_NUNITS (mode) == GET_NUM_ELEM (v));
+ gcc_assert (known_eq (GET_MODE_NUNITS (mode), GET_NUM_ELEM (v)));
/* If the values are all the same, check to see if we can use one of the
standard constant vectors. */
if (rtvec_all_equal_p (v))
return gen_const_vec_duplicate (mode, RTVEC_ELT (v, 0));
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ unsigned int nunits = GET_NUM_ELEM (v);
+ rtx_vector_builder builder (mode, nunits, 1);
+ for (unsigned int i = 0; i < nunits; ++i)
+ builder.quick_push (RTVEC_ELT (v, i));
+ return builder.build (v);
}
/* Initialise global register information required by all functions. */
attrs = ggc_cleared_alloc<mem_attrs> ();
attrs->align = BITS_PER_UNIT;
attrs->addrspace = ADDR_SPACE_GENERIC;
- if (mode != BLKmode)
+ if (mode != BLKmode && mode != VOIDmode)
{
attrs->size_known_p = true;
attrs->size = GET_MODE_SIZE (mode);
#endif
const_double_htab = hash_table<const_double_hasher>::create_ggc (37);
+ if (NUM_POLY_INT_COEFFS > 1)
+ const_poly_int_htab = hash_table<const_poly_int_hasher>::create_ggc (37);
+
const_fixed_htab = hash_table<const_fixed_hasher>::create_ggc (37);
reg_attrs_htab = hash_table<reg_attr_hasher>::create_ggc (37);
FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
const_tiny_rtx[3][(int) mode] = constm1_rtx;
+ /* For BImode, 1 and -1 are unsigned and signed interpretations
+ of the same value. */
+ const_tiny_rtx[0][(int) BImode] = const0_rtx;
+ const_tiny_rtx[1][(int) BImode] = const_true_rtx;
+ const_tiny_rtx[3][(int) BImode] = const_true_rtx;
+
for (mode = MIN_MODE_PARTIAL_INT;
mode <= MAX_MODE_PARTIAL_INT;
mode = (machine_mode)((int)(mode) + 1))
const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
}
+ /* As for BImode, "all 1" and "all -1" are unsigned and signed
+ interpretations of the same value. */
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL)
+ {
+ const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
+ const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
+ const_tiny_rtx[1][(int) mode] = const_tiny_rtx[3][(int) mode];
+ }
+
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC)
const_tiny_rtx[0][i] = const0_rtx;
- const_tiny_rtx[0][(int) BImode] = const0_rtx;
- if (STORE_FLAG_VALUE == 1)
- const_tiny_rtx[1][(int) BImode] = const1_rtx;
-
- FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_POINTER_BOUNDS)
- {
- scalar_mode smode = smode_iter.require ();
- wide_int wi_zero = wi::zero (GET_MODE_PRECISION (smode));
- const_tiny_rtx[0][smode] = immed_wide_int_const (wi_zero, smode);
- }
-
pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode);
simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode);
- cc0_rtx = gen_rtx_fmt_ (CC0, VOIDmode);
invalid_insn_rtx = gen_rtx_INSN (VOIDmode,
/*prev_insn=*/NULL,
/*next_insn=*/NULL,
return curr_location;
}
+/* Set the location of the insn chain starting at INSN to LOC. */
+void
+set_insn_locations (rtx_insn *insn, location_t loc)
+{
+ while (insn)
+ {
+ if (INSN_P (insn))
+ INSN_LOCATION (insn) = loc;
+ insn = NEXT_INSN (insn);
+ }
+}
+
/* Return lexical scope block insn belongs to. */
tree
insn_scope (const rtx_insn *insn)
}
}
+/* Return a constant shift amount for shifting a value of mode MODE
+ by VALUE bits. */
+
+rtx
+gen_int_shift_amount (machine_mode, poly_int64 value)
+{
+ /* Use a 64-bit mode, to avoid any truncation.
+
+ ??? Perhaps this should be automatically derived from the .md files
+ instead, or perhaps have a target hook. */
+ scalar_int_mode shift_mode = (BITS_PER_UNIT == 8
+ ? DImode
+ : int_mode_for_size (64, 0).require ());
+ return gen_int_mode (value, shift_mode);
+}
+
/* Initialize fields of rtl_data related to stack alignment. */
void