/* Convert tree expression to rtl instructions, for GNU compiler.
- Copyright (C) 1988-2017 Free Software Foundation, Inc.
+ Copyright (C) 1988-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "reload.h"
#include "langhooks.h"
#include "common/common-target.h"
+#include "tree-dfa.h"
#include "tree-ssa-live.h"
#include "tree-outof-ssa.h"
#include "tree-ssa-address.h"
#include "builtins.h"
-#include "tree-chkp.h"
-#include "rtl-chkp.h"
#include "ccmp.h"
+#include "gimple-fold.h"
+#include "rtx-vector-builder.h"
/* If this is nonzero, we do not bother generating VOLATILE
static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
static rtx_insn *compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
-static void store_constructor_field (rtx, unsigned HOST_WIDE_INT,
- HOST_WIDE_INT, unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, machine_mode,
- tree, int, alias_set_type, bool);
-static void store_constructor (tree, rtx, int, HOST_WIDE_INT, bool);
-static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
+static void store_constructor (tree, rtx, int, poly_int64, bool);
+static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
machine_mode, tree, alias_set_type, bool, bool);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
&& (GET_MODE_PRECISION (subreg_promoted_mode (from))
>= GET_MODE_PRECISION (to_int_mode))
&& SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
- from = gen_lowpart (to_int_mode, from), from_mode = to_int_mode;
+ {
+ from = gen_lowpart (to_int_mode, SUBREG_REG (from));
+ from_mode = to_int_mode;
+ }
gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
{
- gcc_assert (GET_MODE_BITSIZE (from_mode) == GET_MODE_BITSIZE (to_mode));
+ gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
+ GET_MODE_BITSIZE (to_mode)));
if (VECTOR_MODE_P (to_mode))
from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
&& is_int_mode (oldmode, &int_oldmode)
&& GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
&& ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
+ || CONST_POLY_INT_P (x)
|| (REG_P (x)
&& (!HARD_REGISTER_P (x)
|| targetm.hard_regno_mode_ok (REGNO (x), int_mode))
subreg operation. */
if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
{
- gcc_assert (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (oldmode));
+ gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (oldmode)));
return simplify_gen_subreg (mode, x, oldmode, 0);
}
: op_by_pieces_d (to, false, from, true, NULL, NULL, len, align)
{
}
- rtx finish_endp (int);
+ rtx finish_retmode (memop_ret);
};
/* Return true if MODE can be used for a set of copies, given an
}
/* Perform the final adjustment at the end of a string to obtain the
- correct return value for the block operation. If ENDP is 1 return
- memory at the end ala mempcpy, and if ENDP is 2 return memory the
- end minus one byte ala stpcpy. */
+ correct return value for the block operation.
+ Return value is based on RETMODE argument. */
rtx
-move_by_pieces_d::finish_endp (int endp)
+move_by_pieces_d::finish_retmode (memop_ret retmode)
{
gcc_assert (!m_reverse);
- if (endp == 2)
+ if (retmode == RETURN_END_MINUS_ONE)
{
m_to.maybe_postinc (-1);
--m_offset;
ALIGN is maximum stack alignment we can assume.
- If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
- mempcpy, and if ENDP is 2 return memory the end minus one byte ala
- stpcpy. */
+ Return value is based on RETMODE argument. */
rtx
move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
- unsigned int align, int endp)
+ unsigned int align, memop_ret retmode)
{
#ifndef PUSH_ROUNDING
if (to == NULL)
data.run ();
- if (endp)
- return data.finish_endp (endp);
+ if (retmode != RETURN_BEGIN)
+ return data.finish_retmode (retmode);
else
return to;
}
: op_by_pieces_d (to, false, NULL_RTX, true, cfn, cfn_data, len, align)
{
}
- rtx finish_endp (int);
+ rtx finish_retmode (memop_ret);
};
/* Return true if MODE can be used for a set of stores, given an
}
/* Perform the final adjustment at the end of a string to obtain the
- correct return value for the block operation. If ENDP is 1 return
- memory at the end ala mempcpy, and if ENDP is 2 return memory the
- end minus one byte ala stpcpy. */
+ correct return value for the block operation.
+ Return value is based on RETMODE argument. */
rtx
-store_by_pieces_d::finish_endp (int endp)
+store_by_pieces_d::finish_retmode (memop_ret retmode)
{
gcc_assert (!m_reverse);
- if (endp == 2)
+ if (retmode == RETURN_END_MINUS_ONE)
{
m_to.maybe_postinc (-1);
--m_offset;
pointer which will be passed as argument in every CONSTFUN call.
ALIGN is maximum alignment we can assume. MEMSETP is true if this is
a memset operation and false if it's a copy of a constant string.
- If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
- mempcpy, and if ENDP is 2 return memory the end minus one byte ala
- stpcpy. */
+ Return value is based on RETMODE argument. */
rtx
store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
- void *constfundata, unsigned int align, bool memsetp, int endp)
+ void *constfundata, unsigned int align, bool memsetp,
+ memop_ret retmode)
{
if (len == 0)
{
- gcc_assert (endp != 2);
+ gcc_assert (retmode != RETURN_END_MINUS_ONE);
return to;
}
store_by_pieces_d data (to, constfun, constfundata, len, align);
data.run ();
- if (endp)
- return data.finish_endp (endp);
+ if (retmode != RETURN_BEGIN)
+ return data.finish_retmode (retmode);
else
return to;
}
ALIGN is the maximum alignment we can assume they have.
METHOD describes what kind of copy this is, and what mechanisms may be used.
MIN_SIZE is the minimal size of block to move
- MAX_SIZE is the maximal size of block to move, if it can not be represented
+ MAX_SIZE is the maximal size of block to move, if it cannot be represented
in unsigned HOST_WIDE_INT, than it is mask of all ones.
Return the address of the new block, if memcpy is called and returns it,
unsigned int expected_align, HOST_WIDE_INT expected_size,
unsigned HOST_WIDE_INT min_size,
unsigned HOST_WIDE_INT max_size,
- unsigned HOST_WIDE_INT probable_max_size)
+ unsigned HOST_WIDE_INT probable_max_size,
+ bool bail_out_libcall, bool *is_move_done)
{
- bool may_use_call;
+ int may_use_call;
rtx retval = 0;
unsigned int align;
+ if (is_move_done)
+ *is_move_done = true;
+
gcc_assert (size);
if (CONST_INT_P (size) && INTVAL (size) == 0)
return 0;
{
case BLOCK_OP_NORMAL:
case BLOCK_OP_TAILCALL:
- may_use_call = true;
+ may_use_call = 1;
break;
case BLOCK_OP_CALL_PARM:
break;
case BLOCK_OP_NO_LIBCALL:
- may_use_call = false;
+ may_use_call = 0;
+ break;
+
+ case BLOCK_OP_NO_LIBCALL_RET:
+ may_use_call = -1;
break;
default:
/* Set MEM_SIZE as appropriate for this block copy. The main place this
can be incorrect is coming from __builtin_memcpy. */
- if (CONST_INT_P (size))
+ poly_int64 const_size;
+ if (poly_int_rtx_p (size, &const_size))
{
x = shallow_copy_rtx (x);
y = shallow_copy_rtx (y);
- set_mem_size (x, INTVAL (size));
- set_mem_size (y, INTVAL (size));
+ set_mem_size (x, const_size);
+ set_mem_size (y, const_size);
}
if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
- move_by_pieces (x, y, INTVAL (size), align, 0);
+ move_by_pieces (x, y, INTVAL (size), align, RETURN_BEGIN);
else if (emit_block_move_via_movmem (x, y, size, align,
expected_align, expected_size,
min_size, max_size, probable_max_size))
&& ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
&& ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
{
- /* Since x and y are passed to a libcall, mark the corresponding
- tree EXPR as addressable. */
- tree y_expr = MEM_EXPR (y);
- tree x_expr = MEM_EXPR (x);
- if (y_expr)
- mark_addressable (y_expr);
- if (x_expr)
- mark_addressable (x_expr);
+ if (bail_out_libcall)
+ {
+ if (is_move_done)
+ *is_move_done = false;
+ return retval;
+ }
+
+ if (may_use_call < 0)
+ return pc_rtx;
+
retval = emit_block_copy_via_libcall (x, y, size,
method == BLOCK_OP_TAILCALL);
}
if (nops >= 8)
{
create_integer_operand (&ops[6], min_size);
- /* If we can not represent the maximal size,
+ /* If we cannot represent the maximal size,
make parameter NULL. */
if ((HOST_WIDE_INT) max_size != -1)
create_integer_operand (&ops[7], max_size);
}
if (nops == 9)
{
- /* If we can not represent the maximal size,
+ /* If we cannot represent the maximal size,
make parameter NULL. */
if ((HOST_WIDE_INT) probable_max_size != -1)
create_integer_operand (&ops[8], probable_max_size);
tree call_expr, dst_tree, src_tree, size_tree;
machine_mode size_mode;
+ /* Since dst and src are passed to a libcall, mark the corresponding
+ tree EXPR as addressable. */
+ tree dst_expr = MEM_EXPR (dst);
+ tree src_expr = MEM_EXPR (src);
+ if (dst_expr)
+ mark_addressable (dst_expr);
+ if (src_expr)
+ mark_addressable (src_expr);
+
dst_addr = copy_addr_to_reg (XEXP (dst, 0));
dst_addr = convert_memory_address (ptr_mode, dst_addr);
dst_tree = make_tree (ptr_type_node, dst_addr);
into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
static void
-emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
+emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
+ poly_int64 ssize)
{
rtx src;
int start, i;
for (i = start; i < XVECLEN (dst, 0); i++)
{
machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
- HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
- unsigned int bytelen = GET_MODE_SIZE (mode);
- int shift = 0;
+ poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
+ poly_int64 bytelen = GET_MODE_SIZE (mode);
+ poly_int64 shift = 0;
- /* Handle trailing fragments that run over the size of the struct. */
- if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+ /* Handle trailing fragments that run over the size of the struct.
+ It's the target's responsibility to make sure that the fragment
+ cannot be strictly smaller in some cases and strictly larger
+ in others. */
+ gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
+ if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
{
/* Arrange to shift the fragment to where it belongs.
extract_bit_field loads to the lsb of the reg. */
)
shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
bytelen = ssize - bytepos;
- gcc_assert (bytelen > 0);
+ gcc_assert (maybe_gt (bytelen, 0));
}
/* If we won't be loading directly from memory, protect the real source
if (MEM_P (src)
&& (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
|| MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
- && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
- && bytelen == GET_MODE_SIZE (mode))
+ && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
+ && known_eq (bytelen, GET_MODE_SIZE (mode)))
{
tmps[i] = gen_reg_rtx (mode);
emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
}
else if (COMPLEX_MODE_P (mode)
&& GET_MODE (src) == mode
- && bytelen == GET_MODE_SIZE (mode))
+ && known_eq (bytelen, GET_MODE_SIZE (mode)))
/* Let emit_move_complex do the bulk of the work. */
tmps[i] = src;
else if (GET_CODE (src) == CONCAT)
{
- unsigned int slen = GET_MODE_SIZE (GET_MODE (src));
- unsigned int slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
- unsigned int elt = bytepos / slen0;
- unsigned int subpos = bytepos % slen0;
+ poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
+ poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
+ unsigned int elt;
+ poly_int64 subpos;
- if (subpos + bytelen <= slen0)
+ if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
+ && known_le (subpos + bytelen, slen0))
{
/* The following assumes that the concatenated objects all
have the same size. In this case, a simple calculation
can be used to determine the object and the bit field
to be extracted. */
tmps[i] = XEXP (src, elt);
- if (subpos != 0
- || subpos + bytelen != slen0
+ if (maybe_ne (subpos, 0)
+ || maybe_ne (subpos + bytelen, slen0)
|| (!CONSTANT_P (tmps[i])
&& (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
{
rtx mem;
- gcc_assert (!bytepos);
+ gcc_assert (known_eq (bytepos, 0));
mem = assign_stack_temp (GET_MODE (src), slen);
emit_move_insn (mem, src);
tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
else if (VECTOR_MODE_P (GET_MODE (dst))
&& REG_P (src))
{
- int slen = GET_MODE_SIZE (GET_MODE (src));
+ poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
rtx mem;
mem = assign_stack_temp (GET_MODE (src), slen);
emit_move_insn (mem, src);
- tmps[i] = adjust_address (mem, mode, (int) bytepos);
+ tmps[i] = adjust_address (mem, mode, bytepos);
}
else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
&& XVECLEN (dst, 0) > 1)
tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
else if (CONSTANT_P (src))
{
- HOST_WIDE_INT len = (HOST_WIDE_INT) bytelen;
-
- if (len == ssize)
+ if (known_eq (bytelen, ssize))
tmps[i] = src;
else
{
rtx first, second;
/* TODO: const_wide_int can have sizes other than this... */
- gcc_assert (2 * len == ssize);
+ gcc_assert (known_eq (2 * bytelen, ssize));
split_double (src, &first, &second);
if (i)
tmps[i] = second;
bytepos * BITS_PER_UNIT, 1, NULL_RTX,
mode, mode, false, NULL);
- if (shift)
+ if (maybe_ne (shift, 0))
tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
shift, tmps[i], 0);
}
if not known. */
void
-emit_group_load (rtx dst, rtx src, tree type, int ssize)
+emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
{
rtx *tmps;
int i;
in the right place. */
rtx
-emit_group_load_into_temps (rtx parallel, rtx src, tree type, int ssize)
+emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
{
rtvec vec;
int i;
known. */
void
-emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize)
+emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
+ poly_int64 ssize)
{
rtx *tmps, dst;
int start, finish, i;
{
machine_mode outer = GET_MODE (dst);
machine_mode inner;
- HOST_WIDE_INT bytepos;
+ poly_int64 bytepos;
bool done = false;
rtx temp;
{
inner = GET_MODE (tmps[start]);
bytepos = subreg_lowpart_offset (inner, outer);
- if (INTVAL (XEXP (XVECEXP (src, 0, start), 1)) == bytepos)
+ if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
+ bytepos))
{
temp = simplify_gen_subreg (outer, tmps[start],
inner, 0);
{
inner = GET_MODE (tmps[finish - 1]);
bytepos = subreg_lowpart_offset (inner, outer);
- if (INTVAL (XEXP (XVECEXP (src, 0, finish - 1), 1)) == bytepos)
+ if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
+ finish - 1), 1)),
+ bytepos))
{
temp = simplify_gen_subreg (outer, tmps[finish - 1],
inner, 0);
/* Process the pieces. */
for (i = start; i < finish; i++)
{
- HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
+ poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
machine_mode mode = GET_MODE (tmps[i]);
- unsigned int bytelen = GET_MODE_SIZE (mode);
- unsigned int adj_bytelen;
+ poly_int64 bytelen = GET_MODE_SIZE (mode);
+ poly_uint64 adj_bytelen;
rtx dest = dst;
- /* Handle trailing fragments that run over the size of the struct. */
- if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+ /* Handle trailing fragments that run over the size of the struct.
+ It's the target's responsibility to make sure that the fragment
+ cannot be strictly smaller in some cases and strictly larger
+ in others. */
+ gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
+ if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
adj_bytelen = ssize - bytepos;
else
adj_bytelen = bytelen;
if (GET_CODE (dst) == CONCAT)
{
- if (bytepos + adj_bytelen
- <= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))
+ if (known_le (bytepos + adj_bytelen,
+ GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
dest = XEXP (dst, 0);
- else if (bytepos >= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))
+ else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
{
bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
dest = XEXP (dst, 1);
machine_mode dest_mode = GET_MODE (dest);
machine_mode tmp_mode = GET_MODE (tmps[i]);
- gcc_assert (bytepos == 0 && XVECLEN (src, 0));
+ gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
if (GET_MODE_ALIGNMENT (dest_mode)
>= GET_MODE_ALIGNMENT (tmp_mode))
}
/* Handle trailing fragments that run over the size of the struct. */
- if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+ if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
{
/* store_bit_field always takes its value from the lsb.
Move the fragment to the lsb if it's not already there. */
#endif
)
{
- int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
+ poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
shift, tmps[i], 0);
}
else if (MEM_P (dest)
&& (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
|| MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
- && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
- && bytelen == GET_MODE_SIZE (mode))
+ && multiple_p (bytepos * BITS_PER_UNIT,
+ GET_MODE_ALIGNMENT (mode))
+ && known_eq (bytelen, GET_MODE_SIZE (mode)))
emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
else
/* No current ABI uses variable-sized modes to pass a BLKmnode type. */
fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
fixed_size_mode dst_mode;
+ scalar_int_mode min_mode;
gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
x = expand_normal (src);
- bytes = int_size_in_bytes (TREE_TYPE (src));
+ bytes = arg_int_size_in_bytes (TREE_TYPE (src));
if (bytes == 0)
return NULL_RTX;
n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
dst_words = XALLOCAVEC (rtx, n_regs);
bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
+ min_mode = smallest_int_mode_for_size (bitsize);
/* Copy the structure BITSIZE bits at a time. */
for (bitpos = 0, xbitpos = padding_correction;
emit_move_insn (dst_word, CONST0_RTX (word_mode));
}
+ /* Find the largest integer mode that can be used to copy all or as
+ many bits as possible of the structure if the target supports larger
+ copies. There are too many corner cases here w.r.t to alignments on
+ the read/writes. So if there is any padding just use single byte
+ operations. */
+ opt_scalar_int_mode mode_iter;
+ if (padding_correction == 0 && !STRICT_ALIGNMENT)
+ {
+ FOR_EACH_MODE_FROM (mode_iter, min_mode)
+ {
+ unsigned int msize = GET_MODE_BITSIZE (mode_iter.require ());
+ if (msize <= ((bytes * BITS_PER_UNIT) - bitpos)
+ && msize <= BITS_PER_WORD)
+ bitsize = msize;
+ else
+ break;
+ }
+ }
+
/* We need a new source operand each time bitpos is on a word
boundary. */
if (bitpos % BITS_PER_WORD == 0)
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
+ poly_int64 size_val;
if (mode != BLKmode
- && CONST_INT_P (size)
- && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (mode))
+ && poly_int_rtx_p (size, &size_val)
+ && known_eq (size_val, GET_MODE_SIZE (mode)))
{
rtx zero = CONST0_RTX (mode);
if (zero != NULL)
if (nops >= 8)
{
create_integer_operand (&ops[6], min_size);
- /* If we can not represent the maximal size,
+ /* If we cannot represent the maximal size,
make parameter NULL. */
if ((HOST_WIDE_INT) max_size != -1)
create_integer_operand (&ops[7], max_size);
}
if (nops == 9)
{
- /* If we can not represent the maximal size,
+ /* If we cannot represent the maximal size,
make parameter NULL. */
if ((HOST_WIDE_INT) probable_max_size != -1)
create_integer_operand (&ops[8], probable_max_size);
emit_move_resolve_push (machine_mode mode, rtx x)
{
enum rtx_code code = GET_CODE (XEXP (x, 0));
- HOST_WIDE_INT adjust;
rtx temp;
- adjust = GET_MODE_SIZE (mode);
+ poly_int64 adjust = GET_MODE_SIZE (mode);
#ifdef PUSH_ROUNDING
adjust = PUSH_ROUNDING (adjust);
#endif
else if (code == PRE_MODIFY || code == POST_MODIFY)
{
rtx expr = XEXP (XEXP (x, 0), 1);
- HOST_WIDE_INT val;
gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
- gcc_assert (CONST_INT_P (XEXP (expr, 1)));
- val = INTVAL (XEXP (expr, 1));
+ poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
if (GET_CODE (expr) == MINUS)
val = -val;
- gcc_assert (adjust == val || adjust == -val);
+ gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
adjust = val;
}
bool imag_first;
#ifdef PUSH_ROUNDING
- unsigned int submodesize = GET_MODE_SIZE (submode);
+ poly_int64 submodesize = GET_MODE_SIZE (submode);
/* In case we output to the stack, but the size is smaller than the
machine can push exactly, we need to use move instructions. */
- if (PUSH_ROUNDING (submodesize) != submodesize)
+ if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
{
x = emit_move_resolve_push (mode, x);
return emit_move_insn (x, y);
existing block move logic. */
if (MEM_P (x) && MEM_P (y))
{
- emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)),
+ emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
BLOCK_OP_NO_LIBCALL);
return get_last_insn ();
}
if (GET_CODE (op) != SUBREG)
return false;
machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
- HOST_WIDE_INT offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
- return (offset >= GET_MODE_SIZE (innermostmode)
- || offset <= -UNITS_PER_WORD);
+ poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
+ return (known_ge (offset, GET_MODE_SIZE (innermostmode))
+ || known_le (offset, -UNITS_PER_WORD));
}
/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
rtx_insn *seq;
rtx inner;
bool need_clobber;
- int i;
+ int i, mode_size;
- gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
+ /* This function can only handle cases where the number of words is
+ known at compile time. */
+ mode_size = GET_MODE_SIZE (mode).to_constant ();
+ gcc_assert (mode_size >= UNITS_PER_WORD);
/* If X is a push on the stack, do the push now and replace
X with a reference to the stack pointer. */
start_sequence ();
need_clobber = false;
- for (i = 0;
- i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
- i++)
+ for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
{
rtx xpart = operand_subword (x, i, 1, mode);
rtx ypart;
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
- if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (!CONSTANT_P (y)
+ || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
{
rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
otherwise, the padding comes at high addresses. */
rtx
-push_block (rtx size, int extra, int below)
+push_block (rtx size, poly_int64 extra, int below)
{
rtx temp;
size = convert_modes (Pmode, ptr_mode, size, 1);
if (CONSTANT_P (size))
anti_adjust_stack (plus_constant (Pmode, size, extra));
- else if (REG_P (size) && extra == 0)
+ else if (REG_P (size) && known_eq (extra, 0))
anti_adjust_stack (size);
else
{
temp = copy_to_mode_reg (Pmode, size);
- if (extra != 0)
+ if (maybe_ne (extra, 0))
temp = expand_binop (Pmode, add_optab, temp,
gen_int_mode (extra, Pmode),
temp, 0, OPTAB_LIB_WIDEN);
if (STACK_GROWS_DOWNWARD)
{
temp = virtual_outgoing_args_rtx;
- if (extra != 0 && below)
+ if (maybe_ne (extra, 0) && below)
temp = plus_constant (Pmode, temp, extra);
}
else
{
- if (CONST_INT_P (size))
+ poly_int64 csize;
+ if (poly_int_rtx_p (size, &csize))
temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
- -INTVAL (size) - (below ? 0 : extra));
- else if (extra != 0 && !below)
+ -csize - (below ? 0 : extra));
+ else if (maybe_ne (extra, 0) && !below)
temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
negate_rtx (Pmode, plus_constant (Pmode, size,
extra)));
The return value is the amount of adjustment that can be trivially
verified, via immediate operand or auto-inc. If the adjustment
- cannot be trivially extracted, the return value is INT_MIN. */
+ cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
-HOST_WIDE_INT
+poly_int64
find_args_size_adjust (rtx_insn *insn)
{
rtx dest, set, pat;
/* Look for a trivial adjustment, otherwise assume nothing. */
/* Note that the SPU restore_stack_block pattern refers to
the stack pointer in V4SImode. Consider that non-trivial. */
+ poly_int64 offset;
if (SCALAR_INT_MODE_P (GET_MODE (dest))
- && GET_CODE (SET_SRC (set)) == PLUS
- && XEXP (SET_SRC (set), 0) == stack_pointer_rtx
- && CONST_INT_P (XEXP (SET_SRC (set), 1)))
- return INTVAL (XEXP (SET_SRC (set), 1));
+ && strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
+ return offset;
/* ??? Reload can generate no-op moves, which will be cleaned
up later. Recognize it and continue searching. */
else if (rtx_equal_p (dest, SET_SRC (set)))
addr = XEXP (addr, 1);
gcc_assert (GET_CODE (addr) == PLUS);
gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
- gcc_assert (CONST_INT_P (XEXP (addr, 1)));
- return INTVAL (XEXP (addr, 1));
+ return rtx_to_poly_int64 (XEXP (addr, 1));
default:
gcc_unreachable ();
}
}
}
-int
-fixup_args_size_notes (rtx_insn *prev, rtx_insn *last, int end_args_size)
+poly_int64
+fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
+ poly_int64 end_args_size)
{
- int args_size = end_args_size;
+ poly_int64 args_size = end_args_size;
bool saw_unknown = false;
rtx_insn *insn;
for (insn = last; insn != prev; insn = PREV_INSN (insn))
{
- HOST_WIDE_INT this_delta;
-
if (!NONDEBUG_INSN_P (insn))
continue;
- this_delta = find_args_size_adjust (insn);
- if (this_delta == 0)
+ /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
+ a call argument containing a TLS address that itself requires
+ a call to __tls_get_addr. The handling of stack_pointer_delta
+ in emit_single_push_insn is supposed to ensure that any such
+ notes are already correct. */
+ rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
+ gcc_assert (!note || known_eq (args_size, get_args_size (note)));
+
+ poly_int64 this_delta = find_args_size_adjust (insn);
+ if (known_eq (this_delta, 0))
{
if (!CALL_P (insn)
|| ACCUMULATE_OUTGOING_ARGS
}
gcc_assert (!saw_unknown);
- if (this_delta == HOST_WIDE_INT_MIN)
+ if (known_eq (this_delta, HOST_WIDE_INT_MIN))
saw_unknown = true;
- add_reg_note (insn, REG_ARGS_SIZE, GEN_INT (args_size));
+ if (!note)
+ add_args_size_note (insn, args_size);
if (STACK_GROWS_DOWNWARD)
- this_delta = -(unsigned HOST_WIDE_INT) this_delta;
+ this_delta = -poly_uint64 (this_delta);
- args_size -= this_delta;
+ if (saw_unknown)
+ args_size = HOST_WIDE_INT_MIN;
+ else
+ args_size -= this_delta;
}
- return saw_unknown ? INT_MIN : args_size;
+ return args_size;
}
#ifdef PUSH_ROUNDING
emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
{
rtx dest_addr;
- unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
+ poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
rtx dest;
enum insn_code icode;
- stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
/* If there is push pattern, use it. Otherwise try old way of throwing
MEM representing push operation to move expander. */
icode = optab_handler (push_optab, mode);
if (maybe_expand_insn (icode, 1, ops))
return;
}
- if (GET_MODE_SIZE (mode) == rounded_size)
+ if (known_eq (GET_MODE_SIZE (mode), rounded_size))
dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
/* If we are to pad downward, adjust the stack pointer first and
then store X into the stack location using an offset. This is
access to type. */
else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
{
- unsigned padding_size = rounded_size - GET_MODE_SIZE (mode);
- HOST_WIDE_INT offset;
-
emit_move_insn (stack_pointer_rtx,
expand_binop (Pmode,
STACK_GROWS_DOWNWARD ? sub_optab
gen_int_mode (rounded_size, Pmode),
NULL_RTX, 0, OPTAB_LIB_WIDEN));
- offset = (HOST_WIDE_INT) padding_size;
+ poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
/* We have already decremented the stack pointer, so get the
previous value. */
- offset += (HOST_WIDE_INT) rounded_size;
+ offset += rounded_size;
if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
/* We have already incremented the stack pointer, so get the
previous value. */
- offset -= (HOST_WIDE_INT) rounded_size;
+ offset -= rounded_size;
- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- gen_int_mode (offset, Pmode));
+ dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
}
else
{
if (STACK_GROWS_DOWNWARD)
/* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- gen_int_mode (-(HOST_WIDE_INT) rounded_size,
- Pmode));
+ dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
else
/* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- gen_int_mode (rounded_size, Pmode));
+ dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
}
static void
emit_single_push_insn (machine_mode mode, rtx x, tree type)
{
- int delta, old_delta = stack_pointer_delta;
+ poly_int64 delta, old_delta = stack_pointer_delta;
rtx_insn *prev = get_last_insn ();
rtx_insn *last;
emit_single_push_insn_1 (mode, x, type);
+ /* Adjust stack_pointer_delta to describe the situation after the push
+ we just performed. Note that we must do this after the push rather
+ than before the push in case calculating X needs pushes and pops of
+ its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes
+ for such pushes and pops must not include the effect of the future
+ push of X. */
+ stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
+
last = get_last_insn ();
/* Notice the common case where we emitted exactly one insn. */
if (PREV_INSN (last) == prev)
{
- add_reg_note (last, REG_ARGS_SIZE, GEN_INT (stack_pointer_delta));
+ add_args_size_note (last, stack_pointer_delta);
return;
}
delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
- gcc_assert (delta == INT_MIN || delta == old_delta);
+ gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
+ || known_eq (delta, old_delta));
}
#endif
bool
emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
- unsigned int align, int partial, rtx reg, int extra,
+ unsigned int align, int partial, rtx reg, poly_int64 extra,
rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
rtx alignment_pad, bool sibcall_p)
{
/* A value is to be stored in an insufficiently aligned
stack slot; copy via a suitably aligned slot if
necessary. */
- size = GEN_INT (GET_MODE_SIZE (mode));
+ size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
if (!MEM_P (xinner))
{
temp = assign_temp (type, 1, 1);
and such small pushes do rounding that causes trouble. */
&& ((!targetm.slow_unaligned_access (word_mode, align))
|| align >= BIGGEST_ALIGNMENT
- || (PUSH_ROUNDING (align / BITS_PER_UNIT)
- == (align / BITS_PER_UNIT)))
- && (HOST_WIDE_INT) PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
+ align / BITS_PER_UNIT))
+ && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
{
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
But if space already allocated, this has already been done. */
- if (extra && args_addr == 0
- && where_pad != PAD_NONE && where_pad != stack_direction)
- anti_adjust_stack (GEN_INT (extra));
+ if (maybe_ne (extra, 0)
+ && args_addr == 0
+ && where_pad != PAD_NONE
+ && where_pad != stack_direction)
+ anti_adjust_stack (gen_int_mode (extra, Pmode));
- move_by_pieces (NULL, xinner, INTVAL (size) - used, align, 0);
+ move_by_pieces (NULL, xinner, INTVAL (size) - used, align,
+ RETURN_BEGIN);
}
else
#endif /* PUSH_ROUNDING */
/* Get the address of the stack space.
In this case, we do not deal with EXTRA separately.
A single stack adjust will do. */
+ poly_int64 offset;
if (! args_addr)
{
temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
extra = 0;
}
- else if (CONST_INT_P (args_so_far))
+ else if (poly_int_rtx_p (args_so_far, &offset))
temp = memory_address (BLKmode,
plus_constant (Pmode, args_addr,
- skip + INTVAL (args_so_far)));
+ skip + offset));
else
temp = memory_address (BLKmode,
plus_constant (Pmode,
}
else if (partial > 0)
{
- /* Scalar partly in registers. */
-
- int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ /* Scalar partly in registers. This case is only supported
+ for fixed-wdth modes. */
+ int size = GET_MODE_SIZE (mode).to_constant ();
+ size /= UNITS_PER_WORD;
int i;
int not_stack;
/* # bytes of start of argument
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
But if space already allocated, this has already been done. */
- if (extra && args_addr == 0
- && where_pad != PAD_NONE && where_pad != stack_direction)
- anti_adjust_stack (GEN_INT (extra));
+ if (maybe_ne (extra, 0)
+ && args_addr == 0
+ && where_pad != PAD_NONE
+ && where_pad != stack_direction)
+ anti_adjust_stack (gen_int_mode (extra, Pmode));
/* If we make space by pushing it, we might as well push
the real data. Otherwise, we can leave OFFSET nonzero
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
But if space already allocated, this has already been done. */
- if (extra && args_addr == 0
- && where_pad != PAD_NONE && where_pad != stack_direction)
- anti_adjust_stack (GEN_INT (extra));
+ if (maybe_ne (extra, 0)
+ && args_addr == 0
+ && where_pad != PAD_NONE
+ && where_pad != stack_direction)
+ anti_adjust_stack (gen_int_mode (extra, Pmode));
#ifdef PUSH_ROUNDING
if (args_addr == 0 && PUSH_ARGS)
}
}
- if (extra && args_addr == 0 && where_pad == stack_direction)
- anti_adjust_stack (GEN_INT (extra));
+ if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
+ anti_adjust_stack (gen_int_mode (extra, Pmode));
if (alignment_pad && args_addr == 0)
anti_adjust_stack (alignment_pad);
and there's nothing else to do. */
static bool
-optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
- unsigned HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+optimize_bitfield_assignment_op (poly_uint64 pbitsize,
+ poly_uint64 pbitpos,
+ poly_uint64 pbitregion_start,
+ poly_uint64 pbitregion_end,
machine_mode mode1, rtx str_rtx,
tree to, tree src, bool reverse)
{
+ /* str_mode is not guaranteed to be a scalar type. */
machine_mode str_mode = GET_MODE (str_rtx);
- unsigned int str_bitsize = GET_MODE_BITSIZE (str_mode);
+ unsigned int str_bitsize;
tree op0, op1;
rtx value, result;
optab binop;
gimple *srcstmt;
enum tree_code code;
+ unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
if (mode1 != VOIDmode
+ || !pbitsize.is_constant (&bitsize)
+ || !pbitpos.is_constant (&bitpos)
+ || !pbitregion_start.is_constant (&bitregion_start)
+ || !pbitregion_end.is_constant (&bitregion_end)
|| bitsize >= BITS_PER_WORD
+ || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
|| str_bitsize > BITS_PER_WORD
|| TREE_SIDE_EFFECTS (to)
|| TREE_THIS_VOLATILE (to))
}
else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
return false;
- else
- gcc_assert (!reverse);
/* If the bit field covers the whole REG/MEM, store_field
will likely generate better code. */
*BITSTART and *BITEND. */
void
-get_bit_range (unsigned HOST_WIDE_INT *bitstart,
- unsigned HOST_WIDE_INT *bitend,
- tree exp,
- HOST_WIDE_INT *bitpos,
- tree *offset)
+get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
+ poly_int64_pod *bitpos, tree *offset)
{
- HOST_WIDE_INT bitoffset;
+ poly_int64 bitoffset;
tree field, repr;
gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
if (handled_component_p (TREE_OPERAND (exp, 0)))
{
machine_mode rmode;
- HOST_WIDE_INT rbitsize, rbitpos;
+ poly_int64 rbitsize, rbitpos;
tree roffset;
int unsignedp, reversep, volatilep = 0;
get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
&roffset, &rmode, &unsignedp, &reversep,
&volatilep);
- if ((rbitpos % BITS_PER_UNIT) != 0)
+ if (!multiple_p (rbitpos, BITS_PER_UNIT))
{
*bitstart = *bitend = 0;
return;
relative to the representative. DECL_FIELD_OFFSET of field and
repr are the same by construction if they are not constants,
see finish_bitfield_layout. */
- if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
- && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
- bitoffset = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
- - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
+ poly_uint64 field_offset, repr_offset;
+ if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
+ && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
+ bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
else
bitoffset = 0;
bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
/* If the adjustment is larger than bitpos, we would have a negative bit
position for the lower bound and this may wreak havoc later. Adjust
offset and bitpos to make the lower bound non-negative in that case. */
- if (bitoffset > *bitpos)
+ if (maybe_gt (bitoffset, *bitpos))
{
- HOST_WIDE_INT adjust = bitoffset - *bitpos;
- gcc_assert ((adjust % BITS_PER_UNIT) == 0);
+ poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
+ poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
- *bitpos += adjust;
+ *bitpos += adjust_bits;
if (*offset == NULL_TREE)
- *offset = size_int (-adjust / BITS_PER_UNIT);
+ *offset = size_int (-adjust_bytes);
else
- *offset
- = size_binop (MINUS_EXPR, *offset, size_int (adjust / BITS_PER_UNIT));
+ *offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
*bitstart = 0;
}
else
*bitstart = *bitpos - bitoffset;
- *bitend = *bitstart + tree_to_uhwi (DECL_SIZE (repr)) - 1;
+ *bitend = *bitstart + tree_to_poly_uint64 (DECL_SIZE (repr)) - 1;
}
/* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside
|| TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
{
machine_mode mode1;
- HOST_WIDE_INT bitsize, bitpos;
- unsigned HOST_WIDE_INT bitregion_start = 0;
- unsigned HOST_WIDE_INT bitregion_end = 0;
+ poly_int64 bitsize, bitpos;
+ poly_uint64 bitregion_start = 0;
+ poly_uint64 bitregion_end = 0;
tree offset;
int unsignedp, reversep, volatilep = 0;
tree tem;
&unsignedp, &reversep, &volatilep);
/* Make sure bitpos is not negative, it can wreak havoc later. */
- if (bitpos < 0)
+ if (maybe_lt (bitpos, 0))
{
gcc_assert (offset == NULL_TREE);
- offset = size_int (bitpos >> LOG2_BITS_PER_UNIT);
- bitpos &= BITS_PER_UNIT - 1;
+ offset = size_int (bits_to_bytes_round_down (bitpos));
+ bitpos = num_trailing_bits (bitpos);
}
if (TREE_CODE (to) == COMPONENT_REF
However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
BITSIZE are not byte-aligned, there is no need to limit the range
we can access. This can occur with packed structures in Ada. */
- else if (bitsize > 0
- && bitsize % BITS_PER_UNIT == 0
- && bitpos % BITS_PER_UNIT == 0)
+ else if (maybe_gt (bitsize, 0)
+ && multiple_p (bitsize, BITS_PER_UNIT)
+ && multiple_p (bitpos, BITS_PER_UNIT))
{
bitregion_start = bitpos;
bitregion_end = bitpos + bitsize - 1;
This is only done for aligned data values, as these can
be expected to result in single move instructions. */
+ poly_int64 bytepos;
if (mode1 != VOIDmode
- && bitpos != 0
- && bitsize > 0
- && (bitpos % bitsize) == 0
- && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
+ && maybe_ne (bitpos, 0)
+ && maybe_gt (bitsize, 0)
+ && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
+ && multiple_p (bitpos, bitsize)
+ && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
&& MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
{
- to_rtx = adjust_address (to_rtx, mode1, bitpos / BITS_PER_UNIT);
+ to_rtx = adjust_address (to_rtx, mode1, bytepos);
bitregion_start = 0;
- if (bitregion_end >= (unsigned HOST_WIDE_INT) bitpos)
+ if (known_ge (bitregion_end, poly_uint64 (bitpos)))
bitregion_end -= bitpos;
bitpos = 0;
}
code contains an out-of-bounds access to a small array. */
if (!MEM_P (to_rtx)
&& GET_MODE (to_rtx) != BLKmode
- && (unsigned HOST_WIDE_INT) bitpos
- >= GET_MODE_PRECISION (GET_MODE (to_rtx)))
+ && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
{
expand_normal (from);
result = NULL;
/* Handle expand_expr of a complex value returning a CONCAT. */
else if (GET_CODE (to_rtx) == CONCAT)
{
- unsigned short mode_bitsize = GET_MODE_BITSIZE (GET_MODE (to_rtx));
- if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from)))
- && bitpos == 0
- && bitsize == mode_bitsize)
+ machine_mode to_mode = GET_MODE (to_rtx);
+ gcc_checking_assert (COMPLEX_MODE_P (to_mode));
+ poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
+ unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
+ if (TYPE_MODE (TREE_TYPE (from)) == to_mode
+ && known_eq (bitpos, 0)
+ && known_eq (bitsize, mode_bitsize))
result = store_expr (from, to_rtx, false, nontemporal, reversep);
- else if (bitsize == mode_bitsize / 2
- && (bitpos == 0 || bitpos == mode_bitsize / 2))
- result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
- nontemporal, reversep);
- else if (bitpos + bitsize <= mode_bitsize / 2)
+ else if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE_INNER (to_mode)
+ && known_eq (bitsize, inner_bitsize)
+ && (known_eq (bitpos, 0)
+ || known_eq (bitpos, inner_bitsize)))
+ result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
+ false, nontemporal, reversep);
+ else if (known_le (bitpos + bitsize, inner_bitsize))
result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
bitregion_start, bitregion_end,
mode1, from, get_alias_set (to),
nontemporal, reversep);
- else if (bitpos >= mode_bitsize / 2)
+ else if (known_ge (bitpos, inner_bitsize))
result = store_field (XEXP (to_rtx, 1), bitsize,
- bitpos - mode_bitsize / 2,
+ bitpos - inner_bitsize,
bitregion_start, bitregion_end,
mode1, from, get_alias_set (to),
nontemporal, reversep);
- else if (bitpos == 0 && bitsize == mode_bitsize)
+ else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
{
- rtx from_rtx;
result = expand_normal (from);
- from_rtx = simplify_gen_subreg (GET_MODE (to_rtx), result,
- TYPE_MODE (TREE_TYPE (from)), 0);
- emit_move_insn (XEXP (to_rtx, 0),
- read_complex_part (from_rtx, false));
- emit_move_insn (XEXP (to_rtx, 1),
- read_complex_part (from_rtx, true));
+ if (GET_CODE (result) == CONCAT)
+ {
+ to_mode = GET_MODE_INNER (to_mode);
+ machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
+ rtx from_real
+ = simplify_gen_subreg (to_mode, XEXP (result, 0),
+ from_mode, 0);
+ rtx from_imag
+ = simplify_gen_subreg (to_mode, XEXP (result, 1),
+ from_mode, 0);
+ if (!from_real || !from_imag)
+ goto concat_store_slow;
+ emit_move_insn (XEXP (to_rtx, 0), from_real);
+ emit_move_insn (XEXP (to_rtx, 1), from_imag);
+ }
+ else
+ {
+ rtx from_rtx;
+ if (MEM_P (result))
+ from_rtx = change_address (result, to_mode, NULL_RTX);
+ else
+ from_rtx
+ = simplify_gen_subreg (to_mode, result,
+ TYPE_MODE (TREE_TYPE (from)), 0);
+ if (from_rtx)
+ {
+ emit_move_insn (XEXP (to_rtx, 0),
+ read_complex_part (from_rtx, false));
+ emit_move_insn (XEXP (to_rtx, 1),
+ read_complex_part (from_rtx, true));
+ }
+ else
+ {
+ machine_mode to_mode
+ = GET_MODE_INNER (GET_MODE (to_rtx));
+ rtx from_real
+ = simplify_gen_subreg (to_mode, result,
+ TYPE_MODE (TREE_TYPE (from)),
+ 0);
+ rtx from_imag
+ = simplify_gen_subreg (to_mode, result,
+ TYPE_MODE (TREE_TYPE (from)),
+ GET_MODE_SIZE (to_mode));
+ if (!from_real || !from_imag)
+ goto concat_store_slow;
+ emit_move_insn (XEXP (to_rtx, 0), from_real);
+ emit_move_insn (XEXP (to_rtx, 1), from_imag);
+ }
+ }
}
else
{
- rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ concat_store_slow:;
+ rtx temp = assign_stack_temp (to_mode,
GET_MODE_SIZE (GET_MODE (to_rtx)));
write_complex_part (temp, XEXP (to_rtx, 0), false);
write_complex_part (temp, XEXP (to_rtx, 1), true);
emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
}
}
+ /* For calls to functions returning variable length structures, if TO_RTX
+ is not a MEM, go through a MEM because we must not create temporaries
+ of the VLA type. */
+ else if (!MEM_P (to_rtx)
+ && TREE_CODE (from) == CALL_EXPR
+ && COMPLETE_TYPE_P (TREE_TYPE (from))
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) != INTEGER_CST)
+ {
+ rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ GET_MODE_SIZE (GET_MODE (to_rtx)));
+ result = store_field (temp, bitsize, bitpos, bitregion_start,
+ bitregion_end, mode1, from, get_alias_set (to),
+ nontemporal, reversep);
+ emit_move_insn (to_rtx, temp);
+ }
else
{
if (MEM_P (to_rtx))
MEM_VOLATILE_P (to_rtx) = 1;
}
+ gcc_checking_assert (known_ge (bitpos, 0));
if (optimize_bitfield_assignment_op (bitsize, bitpos,
bitregion_start, bitregion_end,
mode1, to_rtx, to, from,
|| TREE_CODE (to) == SSA_NAME))
{
rtx value;
- rtx bounds;
push_temp_slots ();
value = expand_normal (from);
- /* Split value and bounds to store them separately. */
- chkp_split_slot (value, &value, &bounds);
-
if (to_rtx == 0)
to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
emit_move_insn (to_rtx, value);
}
- /* Store bounds if required. */
- if (bounds
- && (BOUNDED_P (to) || chkp_type_has_pointer (TREE_TYPE (to))))
- {
- gcc_assert (MEM_P (to_rtx));
- chkp_emit_bounds_store (bounds, value, to_rtx);
- }
-
preserve_temp_slots (to_rtx);
pop_temp_slots ();
return;
/* Compute FROM and store the value in the rtx we got. */
push_temp_slots ();
- result = store_expr_with_bounds (from, to_rtx, 0, nontemporal, false, to);
+ result = store_expr (from, to_rtx, 0, nontemporal, false);
preserve_temp_slots (result);
pop_temp_slots ();
return;
return maybe_expand_insn (code, 2, ops);
}
+/* Helper function for store_expr storing of STRING_CST. */
+
+static rtx
+string_cst_read_str (void *data, HOST_WIDE_INT offset, scalar_int_mode mode)
+{
+ tree str = (tree) data;
+
+ gcc_assert (offset >= 0);
+ if (offset >= TREE_STRING_LENGTH (str))
+ return const0_rtx;
+
+ if ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
+ > (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (str))
+ {
+ char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode));
+ size_t l = TREE_STRING_LENGTH (str) - offset;
+ memcpy (p, TREE_STRING_POINTER (str) + offset, l);
+ memset (p + l, '\0', GET_MODE_SIZE (mode) - l);
+ return c_readstr (p, mode, false);
+ }
+
+ return c_readstr (TREE_STRING_POINTER (str) + offset, mode, false);
+}
+
/* Generate code for computing expression EXP,
and storing the value into TARGET.
If NONTEMPORAL is true, try using a nontemporal store instruction.
- If REVERSE is true, the store is to be done in reverse order.
-
- If BTARGET is not NULL then computed bounds of EXP are
- associated with BTARGET. */
+ If REVERSE is true, the store is to be done in reverse order. */
rtx
-store_expr_with_bounds (tree exp, rtx target, int call_param_p,
- bool nontemporal, bool reverse, tree btarget)
+store_expr (tree exp, rtx target, int call_param_p,
+ bool nontemporal, bool reverse)
{
rtx temp;
rtx alt_rtl = NULL_RTX;
part. */
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
- return store_expr_with_bounds (TREE_OPERAND (exp, 1), target,
- call_param_p, nontemporal, reverse,
- btarget);
+ return store_expr (TREE_OPERAND (exp, 1), target,
+ call_param_p, nontemporal, reverse);
}
else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
{
NO_DEFER_POP;
jumpifnot (TREE_OPERAND (exp, 0), lab1,
profile_probability::uninitialized ());
- store_expr_with_bounds (TREE_OPERAND (exp, 1), target, call_param_p,
- nontemporal, reverse, btarget);
+ store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
+ nontemporal, reverse);
emit_jump_insn (targetm.gen_jump (lab2));
emit_barrier ();
emit_label (lab1);
- store_expr_with_bounds (TREE_OPERAND (exp, 2), target, call_param_p,
- nontemporal, reverse, btarget);
+ store_expr (TREE_OPERAND (exp, 2), target, call_param_p,
+ nontemporal, reverse);
emit_label (lab2);
OK_DEFER_POP;
temp = expand_expr (exp, inner_target, VOIDmode,
call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
- /* Handle bounds returned by call. */
- if (TREE_CODE (exp) == CALL_EXPR)
- {
- rtx bounds;
- chkp_split_slot (temp, &temp, &bounds);
- if (bounds && btarget)
- {
- gcc_assert (TREE_CODE (btarget) == SSA_NAME);
- rtx tmp = targetm.calls.load_returned_bounds (bounds);
- chkp_set_rtl_bounds (btarget, tmp);
- }
- }
/* If TEMP is a VOIDmode constant, use convert_modes to make
sure that we properly convert it. */
if (TREE_STRING_LENGTH (str) <= 0)
goto normal_expr;
- str_copy_len = strlen (TREE_STRING_POINTER (str));
- if (str_copy_len < TREE_STRING_LENGTH (str) - 1)
- goto normal_expr;
+ if (can_store_by_pieces (exp_len, string_cst_read_str, (void *) str,
+ MEM_ALIGN (target), false))
+ {
+ store_by_pieces (target, exp_len, string_cst_read_str, (void *) str,
+ MEM_ALIGN (target), false, RETURN_BEGIN);
+ return NULL_RTX;
+ }
str_copy_len = TREE_STRING_LENGTH (str);
- if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0
- && TREE_STRING_POINTER (str)[TREE_STRING_LENGTH (str) - 1] == '\0')
+ if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
{
str_copy_len += STORE_MAX_PIECES - 1;
str_copy_len &= ~(STORE_MAX_PIECES - 1);
}
- str_copy_len = MIN (str_copy_len, exp_len);
- if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
- CONST_CAST (char *, TREE_STRING_POINTER (str)),
- MEM_ALIGN (target), false))
+ if (str_copy_len >= exp_len)
+ goto normal_expr;
+
+ if (!can_store_by_pieces (str_copy_len, string_cst_read_str,
+ (void *) str, MEM_ALIGN (target), false))
goto normal_expr;
- dest_mem = target;
-
- dest_mem = store_by_pieces (dest_mem,
- str_copy_len, builtin_strncpy_read_str,
- CONST_CAST (char *,
- TREE_STRING_POINTER (str)),
- MEM_ALIGN (target), false,
- exp_len > str_copy_len ? 1 : 0);
- if (exp_len > str_copy_len)
- clear_storage (adjust_address (dest_mem, BLKmode, 0),
- GEN_INT (exp_len - str_copy_len),
- BLOCK_OP_NORMAL);
+ dest_mem = store_by_pieces (target, str_copy_len, string_cst_read_str,
+ (void *) str, MEM_ALIGN (target), false,
+ RETURN_END);
+ clear_storage (adjust_address_1 (dest_mem, BLKmode, 0, 1, 1, 0,
+ exp_len - str_copy_len),
+ GEN_INT (exp_len - str_copy_len), BLOCK_OP_NORMAL);
return NULL_RTX;
}
else
(call_param_p
? EXPAND_STACK_PARM : EXPAND_NORMAL),
&alt_rtl, false);
-
- /* Handle bounds returned by call. */
- if (TREE_CODE (exp) == CALL_EXPR)
- {
- rtx bounds;
- chkp_split_slot (temp, &temp, &bounds);
- if (bounds && btarget)
- {
- gcc_assert (TREE_CODE (btarget) == SSA_NAME);
- rtx tmp = targetm.calls.load_returned_bounds (bounds);
- chkp_set_rtl_bounds (btarget, tmp);
- }
- }
}
/* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
&& TREE_CODE (exp) != ERROR_MARK
&& GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
- temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
- temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
+ {
+ if (GET_MODE_CLASS (GET_MODE (target))
+ != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
+ && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
+ GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
+ {
+ rtx t = simplify_gen_subreg (GET_MODE (target), temp,
+ TYPE_MODE (TREE_TYPE (exp)), 0);
+ if (t)
+ temp = t;
+ }
+ if (GET_MODE (temp) == VOIDmode)
+ temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
+ temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
+ }
/* If value was not generated in the target, store it there.
Convert the value to TARGET's type first if necessary and emit the
/* Figure out how much is left in TARGET that we have to clear.
Do all calculations in pointer_mode. */
- if (CONST_INT_P (copy_size_rtx))
+ poly_int64 const_copy_size;
+ if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
{
- size = plus_constant (address_mode, size,
- -INTVAL (copy_size_rtx));
- target = adjust_address (target, BLKmode,
- INTVAL (copy_size_rtx));
+ size = plus_constant (address_mode, size, -const_copy_size);
+ target = adjust_address (target, BLKmode, const_copy_size);
}
else
{
return NULL_RTX;
}
-
-/* Same as store_expr_with_bounds but ignoring bounds of EXP. */
-rtx
-store_expr (tree exp, rtx target, int call_param_p, bool nontemporal,
- bool reverse)
-{
- return store_expr_with_bounds (exp, target, call_param_p, nontemporal,
- reverse, NULL);
-}
\f
/* Return true if field F of structure TYPE is a flexible array. */
return 2;
case VECTOR_TYPE:
- return TYPE_VECTOR_SUBPARTS (type);
+ {
+ unsigned HOST_WIDE_INT nelts;
+ if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
+ return nelts;
+ else
+ return -1;
+ }
case INTEGER_TYPE:
case REAL_TYPE:
static bool
categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
+ HOST_WIDE_INT *p_unique_nz_elts,
HOST_WIDE_INT *p_init_elts, bool *p_complete)
{
unsigned HOST_WIDE_INT idx;
- HOST_WIDE_INT nz_elts, init_elts, num_fields;
+ HOST_WIDE_INT nz_elts, unique_nz_elts, init_elts, num_fields;
tree value, purpose, elt_type;
/* Whether CTOR is a valid constant initializer, in accordance with what
bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
nz_elts = 0;
+ unique_nz_elts = 0;
init_elts = 0;
num_fields = 0;
elt_type = NULL_TREE;
{
case CONSTRUCTOR:
{
- HOST_WIDE_INT nz = 0, ic = 0;
+ HOST_WIDE_INT nz = 0, unz = 0, ic = 0;
- bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &ic,
- p_complete);
+ bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &unz,
+ &ic, p_complete);
nz_elts += mult * nz;
+ unique_nz_elts += unz;
init_elts += mult * ic;
if (const_from_elts_p && const_p)
case REAL_CST:
case FIXED_CST:
if (!initializer_zerop (value))
- nz_elts += mult;
+ {
+ nz_elts += mult;
+ unique_nz_elts++;
+ }
init_elts += mult;
break;
case STRING_CST:
nz_elts += mult * TREE_STRING_LENGTH (value);
+ unique_nz_elts += TREE_STRING_LENGTH (value);
init_elts += mult * TREE_STRING_LENGTH (value);
break;
case COMPLEX_CST:
if (!initializer_zerop (TREE_REALPART (value)))
- nz_elts += mult;
+ {
+ nz_elts += mult;
+ unique_nz_elts++;
+ }
if (!initializer_zerop (TREE_IMAGPART (value)))
- nz_elts += mult;
- init_elts += mult;
+ {
+ nz_elts += mult;
+ unique_nz_elts++;
+ }
+ init_elts += 2 * mult;
break;
case VECTOR_CST:
{
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (value); ++i)
+ /* We can only construct constant-length vectors using
+ CONSTRUCTOR. */
+ unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
tree v = VECTOR_CST_ELT (value, i);
if (!initializer_zerop (v))
- nz_elts += mult;
+ {
+ nz_elts += mult;
+ unique_nz_elts++;
+ }
init_elts += mult;
}
}
{
HOST_WIDE_INT tc = count_type_elements (elt_type, false);
nz_elts += mult * tc;
+ unique_nz_elts += tc;
init_elts += mult * tc;
if (const_from_elts_p && const_p)
*p_complete = false;
*p_nz_elts += nz_elts;
+ *p_unique_nz_elts += unique_nz_elts;
*p_init_elts += init_elts;
return const_p;
/* Examine CTOR to discover:
* how many scalar fields are set to nonzero values,
and place it in *P_NZ_ELTS;
+ * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
+ high - low + 1 (this can be useful for callers to determine ctors
+ that could be cheaply initialized with - perhaps nested - loops
+ compared to copied from huge read-only data),
+ and place it in *P_UNIQUE_NZ_ELTS;
* how many scalar fields in total are in CTOR,
and place it in *P_ELT_COUNT.
* whether the constructor is complete -- in the sense that every
bool
categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
+ HOST_WIDE_INT *p_unique_nz_elts,
HOST_WIDE_INT *p_init_elts, bool *p_complete)
{
*p_nz_elts = 0;
+ *p_unique_nz_elts = 0;
*p_init_elts = 0;
*p_complete = true;
- return categorize_ctor_elements_1 (ctor, p_nz_elts, p_init_elts, p_complete);
+ return categorize_ctor_elements_1 (ctor, p_nz_elts, p_unique_nz_elts,
+ p_init_elts, p_complete);
}
/* TYPE is initialized by a constructor with NUM_ELTS elements, the last
return count_type_elements (type, true) == num_elts;
}
-/* Return 1 if EXP contains mostly (3/4) zeros. */
+/* Return 1 if EXP contains mostly (3/4) zeros. */
static int
mostly_zeros_p (const_tree exp)
{
if (TREE_CODE (exp) == CONSTRUCTOR)
{
- HOST_WIDE_INT nz_elts, init_elts;
+ HOST_WIDE_INT nz_elts, unz_elts, init_elts;
bool complete_p;
- categorize_ctor_elements (exp, &nz_elts, &init_elts, &complete_p);
+ categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
+ &complete_p);
return !complete_p || nz_elts < init_elts / 4;
}
{
if (TREE_CODE (exp) == CONSTRUCTOR)
{
- HOST_WIDE_INT nz_elts, init_elts;
+ HOST_WIDE_INT nz_elts, unz_elts, init_elts;
bool complete_p;
- categorize_ctor_elements (exp, &nz_elts, &init_elts, &complete_p);
+ categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
+ &complete_p);
return nz_elts == 0;
}
clear a substructure if the outer structure has already been cleared. */
static void
-store_constructor_field (rtx target, unsigned HOST_WIDE_INT bitsize,
- HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
+ poly_uint64 bitregion_start,
+ poly_uint64 bitregion_end,
machine_mode mode,
tree exp, int cleared,
alias_set_type alias_set, bool reverse)
{
+ poly_int64 bytepos;
+ poly_uint64 bytesize;
if (TREE_CODE (exp) == CONSTRUCTOR
/* We can only call store_constructor recursively if the size and
bit position are on a byte boundary. */
- && bitpos % BITS_PER_UNIT == 0
- && (bitsize > 0 && bitsize % BITS_PER_UNIT == 0)
+ && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
+ && maybe_ne (bitsize, 0U)
+ && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
/* If we have a nonzero bitpos for a register target, then we just
let store_field do the bitfield handling. This is unlikely to
generate unnecessary clear instructions anyways. */
- && (bitpos == 0 || MEM_P (target)))
+ && (known_eq (bitpos, 0) || MEM_P (target)))
{
if (MEM_P (target))
- target
- = adjust_address (target,
- GET_MODE (target) == BLKmode
- || 0 != (bitpos
- % GET_MODE_ALIGNMENT (GET_MODE (target)))
- ? BLKmode : VOIDmode, bitpos / BITS_PER_UNIT);
+ {
+ machine_mode target_mode = GET_MODE (target);
+ if (target_mode != BLKmode
+ && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
+ target_mode = BLKmode;
+ target = adjust_address (target, target_mode, bytepos);
+ }
/* Update the alias set, if required. */
set_mem_alias_set (target, alias_set);
}
- store_constructor (exp, target, cleared, bitsize / BITS_PER_UNIT,
- reverse);
+ store_constructor (exp, target, cleared, bytesize, reverse);
}
else
store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
If REVERSE is true, the store is to be done in reverse order. */
static void
-store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size,
+store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
bool reverse)
{
tree type = TREE_TYPE (exp);
HOST_WIDE_INT exp_size = int_size_in_bytes (type);
- HOST_WIDE_INT bitregion_end = size > 0 ? size * BITS_PER_UNIT - 1 : 0;
+ poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
switch (TREE_CODE (type))
{
reverse = TYPE_REVERSE_STORAGE_ORDER (type);
/* If size is zero or the target is already cleared, do nothing. */
- if (size == 0 || cleared)
+ if (known_eq (size, 0) || cleared)
cleared = 1;
/* We either clear the aggregate or indicate the value is dead. */
else if ((TREE_CODE (type) == UNION_TYPE
a constant. But if more than one register is involved,
this probably loses. */
else if (REG_P (target) && TREE_STATIC (exp)
- && (GET_MODE_SIZE (GET_MODE (target))
- <= REGMODE_NATURAL_SIZE (GET_MODE (target))))
+ && known_le (GET_MODE_SIZE (GET_MODE (target)),
+ REGMODE_NATURAL_SIZE (GET_MODE (target))))
{
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
cleared = 1;
the whole structure first. Don't do this if TARGET is a
register whose mode size isn't equal to SIZE since
clear_storage can't handle this case. */
- else if (size > 0
+ else if (known_size_p (size)
&& (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
|| mostly_zeros_p (exp))
&& (!REG_P (target)
- || ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target))
- == size)))
+ || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
{
- clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
+ clear_storage (target, gen_int_mode (size, Pmode),
+ BLOCK_OP_NORMAL);
cleared = 1;
}
need_to_clear = 1;
}
- if (need_to_clear && size > 0)
+ if (need_to_clear && maybe_gt (size, 0))
{
if (REG_P (target))
- emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
+ emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
else
- clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
+ clear_storage (target, gen_int_mode (size, Pmode),
+ BLOCK_OP_NORMAL);
cleared = 1;
}
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
{
machine_mode mode;
- HOST_WIDE_INT bitsize;
+ poly_int64 bitsize;
HOST_WIDE_INT bitpos;
rtx xtarget = target;
continue;
mode = TYPE_MODE (elttype);
- if (mode == BLKmode)
- bitsize = (tree_fits_uhwi_p (TYPE_SIZE (elttype))
- ? tree_to_uhwi (TYPE_SIZE (elttype))
- : -1);
- else
+ if (mode != BLKmode)
bitsize = GET_MODE_BITSIZE (mode);
+ else if (!poly_int_tree_p (TYPE_SIZE (elttype), &bitsize))
+ bitsize = -1;
if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
{
xtarget = adjust_address (xtarget, mode, 0);
if (TREE_CODE (value) == CONSTRUCTOR)
store_constructor (value, xtarget, cleared,
- bitsize / BITS_PER_UNIT, reverse);
+ exact_div (bitsize, BITS_PER_UNIT),
+ reverse);
else
store_expr (value, xtarget, 0, false, reverse);
constructor_elt *ce;
int i;
int need_to_clear;
- int icode = CODE_FOR_nothing;
+ insn_code icode = CODE_FOR_nothing;
+ tree elt;
tree elttype = TREE_TYPE (type);
int elt_size = tree_to_uhwi (TYPE_SIZE (elttype));
machine_mode eltmode = TYPE_MODE (elttype);
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos;
rtvec vector = NULL;
- unsigned n_elts;
+ poly_uint64 n_elts;
+ unsigned HOST_WIDE_INT const_n_elts;
alias_set_type alias;
bool vec_vec_init_p = false;
+ machine_mode mode = GET_MODE (target);
gcc_assert (eltmode != BLKmode);
+ /* Try using vec_duplicate_optab for uniform vectors. */
+ if (!TREE_SIDE_EFFECTS (exp)
+ && VECTOR_MODE_P (mode)
+ && eltmode == GET_MODE_INNER (mode)
+ && ((icode = optab_handler (vec_duplicate_optab, mode))
+ != CODE_FOR_nothing)
+ && (elt = uniform_vector_p (exp)))
+ {
+ struct expand_operand ops[2];
+ create_output_operand (&ops[0], target, mode);
+ create_input_operand (&ops[1], expand_normal (elt), eltmode);
+ expand_insn (icode, 2, ops);
+ if (!rtx_equal_p (target, ops[0].value))
+ emit_move_insn (target, ops[0].value);
+ break;
+ }
+
n_elts = TYPE_VECTOR_SUBPARTS (type);
- if (REG_P (target) && VECTOR_MODE_P (GET_MODE (target)))
+ if (REG_P (target)
+ && VECTOR_MODE_P (mode)
+ && n_elts.is_constant (&const_n_elts))
{
- machine_mode mode = GET_MODE (target);
machine_mode emode = eltmode;
if (CONSTRUCTOR_NELTS (exp)
== VECTOR_TYPE))
{
tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
- gcc_assert (CONSTRUCTOR_NELTS (exp) * TYPE_VECTOR_SUBPARTS (etype)
- == n_elts);
+ gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
+ * TYPE_VECTOR_SUBPARTS (etype),
+ n_elts));
emode = TYPE_MODE (etype);
}
- icode = (int) convert_optab_handler (vec_init_optab, mode, emode);
+ icode = convert_optab_handler (vec_init_optab, mode, emode);
if (icode != CODE_FOR_nothing)
{
- unsigned int i, n = n_elts;
+ unsigned int i, n = const_n_elts;
if (emode != eltmode)
{
/* Clear the entire vector first if there are any missing elements,
or if the incidence of zero elements is >= 75%. */
- need_to_clear = (count < n_elts || 4 * zero_count >= 3 * count);
+ need_to_clear = (maybe_lt (count, n_elts)
+ || 4 * zero_count >= 3 * count);
}
- if (need_to_clear && size > 0 && !vector)
+ if (need_to_clear && maybe_gt (size, 0) && !vector)
{
if (REG_P (target))
- emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
+ emit_move_insn (target, CONST0_RTX (mode));
else
- clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
+ clear_storage (target, gen_int_mode (size, Pmode),
+ BLOCK_OP_NORMAL);
cleared = 1;
}
/* Inform later passes that the old value is dead. */
if (!cleared && !vector && REG_P (target))
- emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
+ emit_move_insn (target, CONST0_RTX (mode));
if (MEM_P (target))
alias = MEM_ALIAS_SET (target);
if (vector)
emit_insn (GEN_FCN (icode) (target,
- gen_rtx_PARALLEL (GET_MODE (target),
- vector)));
+ gen_rtx_PARALLEL (mode, vector)));
break;
}
If REVERSE is true, the store is to be done in reverse order. */
static rtx
-store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode mode, tree exp,
alias_set_type alias_set, bool nontemporal, bool reverse)
{
/* If we have nothing to store, do nothing unless the expression has
side-effects. Don't do that for zero sized addressable lhs of
calls. */
- if (bitsize == 0
+ if (known_eq (bitsize, 0)
&& (!TREE_ADDRESSABLE (TREE_TYPE (exp))
|| TREE_CODE (exp) != CALL_EXPR))
return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
{
/* We're storing into a struct containing a single __complex. */
- gcc_assert (!bitpos);
+ gcc_assert (known_eq (bitpos, 0));
return store_expr (exp, target, 0, nontemporal, reverse);
}
is a bit field, we cannot use addressing to access it.
Use bit-field techniques or SUBREG to store in it. */
+ poly_int64 decl_bitsize;
if (mode == VOIDmode
|| (mode != BLKmode && ! direct_store[(int) mode]
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
store it as a bit field. */
|| (mode != BLKmode
&& ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
- || bitpos % GET_MODE_ALIGNMENT (mode))
+ || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
&& targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
- || (bitpos % BITS_PER_UNIT != 0)))
- || (bitsize >= 0 && mode != BLKmode
- && GET_MODE_BITSIZE (mode) > bitsize)
+ || !multiple_p (bitpos, BITS_PER_UNIT)))
+ || (known_size_p (bitsize)
+ && mode != BLKmode
+ && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
/* If the RHS and field are a constant size and the size of the
RHS isn't the same size as the bitfield, we must use bitfield
operations. */
- || (bitsize >= 0
- && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
- && compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) != 0
+ || (known_size_p (bitsize)
+ && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
+ && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
+ bitsize)
/* Except for initialization of full bytes from a CONSTRUCTOR, which
we will handle specially below. */
&& !(TREE_CODE (exp) == CONSTRUCTOR
- && bitsize % BITS_PER_UNIT == 0)
+ && multiple_p (bitsize, BITS_PER_UNIT))
/* And except for bitwise copying of TREE_ADDRESSABLE types,
where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
includes some extra padding. store_expr / expand_expr will in
get_base_address needs to live in memory. */
&& (!TREE_ADDRESSABLE (TREE_TYPE (exp))
|| TREE_CODE (exp) != COMPONENT_REF
- || TREE_CODE (DECL_SIZE (TREE_OPERAND (exp, 1))) != INTEGER_CST
- || (bitsize % BITS_PER_UNIT != 0)
- || (bitpos % BITS_PER_UNIT != 0)
- || (compare_tree_int (DECL_SIZE (TREE_OPERAND (exp, 1)), bitsize)
- != 0)))
+ || !multiple_p (bitsize, BITS_PER_UNIT)
+ || !multiple_p (bitpos, BITS_PER_UNIT)
+ || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
+ &decl_bitsize)
+ || maybe_ne (decl_bitsize, bitsize)))
/* If we are expanding a MEM_REF of a non-BLKmode non-addressable
decl we must use bitfield operations. */
- || (bitsize >= 0
+ || (known_size_p (bitsize)
&& TREE_CODE (exp) == MEM_REF
&& TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
&& DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
{
tree type = TREE_TYPE (exp);
if (INTEGRAL_TYPE_P (type)
- && TYPE_PRECISION (type) < GET_MODE_BITSIZE (TYPE_MODE (type))
- && bitsize == TYPE_PRECISION (type))
+ && maybe_ne (TYPE_PRECISION (type),
+ GET_MODE_BITSIZE (TYPE_MODE (type)))
+ && known_eq (bitsize, TYPE_PRECISION (type)))
{
tree op = gimple_assign_rhs1 (nop_def);
type = TREE_TYPE (op);
- if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) >= bitsize)
+ if (INTEGRAL_TYPE_P (type)
+ && known_ge (TYPE_PRECISION (type), bitsize))
exp = op;
}
}
temp = expand_normal (exp);
+ /* We don't support variable-sized BLKmode bitfields, since our
+ handling of BLKmode is bound up with the ability to break
+ things into words. */
+ gcc_assert (mode != BLKmode || bitsize.is_constant ());
+
/* Handle calls that return values in multiple non-contiguous locations.
The Irix 6 ABI has examples of this. */
if (GET_CODE (temp) == PARALLEL)
{
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
- scalar_int_mode temp_mode
- = smallest_int_mode_for_size (size * BITS_PER_UNIT);
+ machine_mode temp_mode = GET_MODE (temp);
+ if (temp_mode == BLKmode || temp_mode == VOIDmode)
+ temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
rtx temp_target = gen_reg_rtx (temp_mode);
emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
temp = temp_target;
if (reverse)
temp = flip_storage_order (temp_mode, temp);
- if (bitsize < size
+ gcc_checking_assert (known_le (bitsize, size));
+ if (maybe_lt (bitsize, size)
&& reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
- && !(mode == BLKmode && bitsize > BITS_PER_WORD))
+ /* Use of to_constant for BLKmode was checked above. */
+ && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
size - bitsize, NULL_RTX, 1);
}
&& (GET_MODE (target) == BLKmode
|| (MEM_P (target)
&& GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
- && (bitpos % BITS_PER_UNIT) == 0
- && (bitsize % BITS_PER_UNIT) == 0)))
+ && multiple_p (bitpos, BITS_PER_UNIT)
+ && multiple_p (bitsize, BITS_PER_UNIT))))
{
- gcc_assert (MEM_P (target) && MEM_P (temp)
- && (bitpos % BITS_PER_UNIT) == 0);
+ gcc_assert (MEM_P (target) && MEM_P (temp));
+ poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
+ poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
- target = adjust_address (target, VOIDmode, bitpos / BITS_PER_UNIT);
+ target = adjust_address (target, VOIDmode, bytepos);
emit_block_move (target, temp,
- GEN_INT ((bitsize + BITS_PER_UNIT - 1)
- / BITS_PER_UNIT),
+ gen_int_mode (bytesize, Pmode),
BLOCK_OP_NORMAL);
return const0_rtx;
/* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
word size, we need to load the value (see again store_bit_field). */
- if (GET_MODE (temp) == BLKmode && bitsize <= BITS_PER_WORD)
+ if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
{
scalar_int_mode temp_mode = smallest_int_mode_for_size (bitsize);
temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
}
/* Store the value in the bitfield. */
+ gcc_checking_assert (known_ge (bitpos, 0));
store_bit_field (target, bitsize, bitpos,
bitregion_start, bitregion_end,
mode, temp, reverse);
else
{
/* Now build a reference to just the desired component. */
- rtx to_rtx = adjust_address (target, mode, bitpos / BITS_PER_UNIT);
+ rtx to_rtx = adjust_address (target, mode,
+ exact_div (bitpos, BITS_PER_UNIT));
if (to_rtx == target)
to_rtx = copy_rtx (to_rtx);
/* Above we avoided using bitfield operations for storing a CONSTRUCTOR
into a target smaller than its type; handle that case now. */
- if (TREE_CODE (exp) == CONSTRUCTOR && bitsize >= 0)
+ if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
{
- gcc_assert (bitsize % BITS_PER_UNIT == 0);
- store_constructor (exp, to_rtx, 0, bitsize / BITS_PER_UNIT, reverse);
+ poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
+ store_constructor (exp, to_rtx, 0, bytesize, reverse);
return to_rtx;
}
this case, but the address of the object can be found. */
tree
-get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
- HOST_WIDE_INT *pbitpos, tree *poffset,
+get_inner_reference (tree exp, poly_int64_pod *pbitsize,
+ poly_int64_pod *pbitpos, tree *poffset,
machine_mode *pmode, int *punsignedp,
int *preversep, int *pvolatilep)
{
machine_mode mode = VOIDmode;
bool blkmode_bitfield = false;
tree offset = size_zero_node;
- offset_int bit_offset = 0;
+ poly_offset_int bit_offset = 0;
/* First get the mode, signedness, storage order and size. We do this from
just the outermost expression. */
size. */
mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
else if (!DECL_BIT_FIELD (field))
- mode = DECL_MODE (field);
+ {
+ mode = DECL_MODE (field);
+ /* For vector fields re-check the target flags, as DECL_MODE
+ could have been set with different target flags than
+ the current function has. */
+ if (mode == BLKmode
+ && VECTOR_TYPE_P (TREE_TYPE (field))
+ && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
+ mode = TYPE_MODE (TREE_TYPE (field));
+ }
else if (DECL_MODE (field) == BLKmode)
blkmode_bitfield = true;
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset += wi::to_offset (TREE_OPERAND (exp, 2));
+ bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
break;
case COMPONENT_REF:
break;
offset = size_binop (PLUS_EXPR, offset, this_offset);
- bit_offset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
+ bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
/* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
}
tree off = TREE_OPERAND (exp, 1);
if (!integer_zerop (off))
{
- offset_int boff, coff = mem_ref_offset (exp);
- boff = coff << LOG2_BITS_PER_UNIT;
+ poly_offset_int boff = mem_ref_offset (exp);
+ boff <<= LOG2_BITS_PER_UNIT;
bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
/* If OFFSET is constant, see if we can return the whole thing as a
constant bit position. Make sure to handle overflow during
this conversion. */
- if (TREE_CODE (offset) == INTEGER_CST)
+ if (poly_int_tree_p (offset))
{
- offset_int tem = wi::sext (wi::to_offset (offset),
- TYPE_PRECISION (sizetype));
+ poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
+ TYPE_PRECISION (sizetype));
tem <<= LOG2_BITS_PER_UNIT;
tem += bit_offset;
- if (wi::fits_shwi_p (tem))
- {
- *pbitpos = tem.to_shwi ();
- *poffset = offset = NULL_TREE;
- }
+ if (tem.to_shwi (pbitpos))
+ *poffset = offset = NULL_TREE;
}
/* Otherwise, split it up. */
if (offset)
{
/* Avoid returning a negative bitpos as this may wreak havoc later. */
- if (wi::neg_p (bit_offset) || !wi::fits_shwi_p (bit_offset))
+ if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
{
- offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
- offset_int tem = wi::bit_and_not (bit_offset, mask);
- /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
- Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
- bit_offset -= tem;
- tem >>= LOG2_BITS_PER_UNIT;
+ *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
+ poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
offset = size_binop (PLUS_EXPR, offset,
- wide_int_to_tree (sizetype, tem));
+ build_int_cst (sizetype, bytes.force_shwi ()));
}
- *pbitpos = bit_offset.to_shwi ();
*poffset = offset;
}
/* We can use BLKmode for a byte-aligned BLKmode bitfield. */
if (mode == VOIDmode
&& blkmode_bitfield
- && (*pbitpos % BITS_PER_UNIT) == 0
- && (*pbitsize % BITS_PER_UNIT) == 0)
+ && multiple_p (*pbitpos, BITS_PER_UNIT)
+ && multiple_p (*pbitsize, BITS_PER_UNIT))
*pmode = BLKmode;
else
*pmode = mode;
{
rtx result, subtarget;
tree inner, offset;
- HOST_WIDE_INT bitsize, bitpos;
+ poly_int64 bitsize, bitpos;
int unsignedp, reversep, volatilep = 0;
machine_mode mode1;
return expand_expr (tem, target, tmode, modifier);
}
+ case TARGET_MEM_REF:
+ return addr_for_mem_ref (exp, as, true);
+
case CONST_DECL:
/* Expand the initializer like constants above. */
result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
/* We must have made progress. */
gcc_assert (inner != exp);
- subtarget = offset || bitpos ? NULL_RTX : target;
+ subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
/* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
inner alignment, force the inner to be sufficiently aligned. */
if (CONSTANT_CLASS_P (inner)
result = simplify_gen_binary (PLUS, tmode, result, tmp);
else
{
- subtarget = bitpos ? NULL_RTX : target;
+ subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
1, OPTAB_LIB_WIDEN);
}
}
- if (bitpos)
+ if (maybe_ne (bitpos, 0))
{
/* Someone beforehand should have rejected taking the address
- of such an object. */
- gcc_assert ((bitpos % BITS_PER_UNIT) == 0);
-
+ of an object that isn't byte-aligned. */
+ poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
result = convert_memory_address_addr_space (tmode, result, as);
- result = plus_constant (tmode, result, bitpos / BITS_PER_UNIT);
+ result = plus_constant (tmode, result, bytepos);
if (modifier < EXPAND_SUM)
result = force_operand (result, target);
}
if ((TREE_STATIC (exp)
&& ((mode == BLKmode
&& ! (target != 0 && safe_from_p (target, exp, 1)))
- || TREE_ADDRESSABLE (exp)
- || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
- && (! can_move_by_pieces
- (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
- TYPE_ALIGN (type)))
- && ! mostly_zeros_p (exp))))
+ || TREE_ADDRESSABLE (exp)
+ || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
+ && (! can_move_by_pieces
+ (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
+ TYPE_ALIGN (type)))
+ && ! mostly_zeros_p (exp))))
|| ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
&& TREE_CONSTANT (exp)))
{
&& !TYPE_REVERSE_STORAGE_ORDER (type));
/* Store this field into a union of the proper type. */
+ poly_uint64 op0_size
+ = tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
+ poly_uint64 union_size = GET_MODE_BITSIZE (mode);
store_field (target,
- MIN ((int_size_in_bytes (TREE_TYPE
- (treeop0))
- * BITS_PER_UNIT),
- (HOST_WIDE_INT) GET_MODE_BITSIZE (mode)),
+ /* The conversion must be constructed so that
+ we know at compile time how many bits
+ to preserve. */
+ ordered_min (op0_size, union_size),
0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
false, false);
}
return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
case MINUS_EXPR:
+ case POINTER_DIFF_EXPR:
do_minus:
/* For initializers, we are allowed to return a MINUS of two
symbolic constants. Here we handle all cases when both operands
!= INTEGER_CST check. Handle it. */
if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
{
- op0 = convert_modes (innermode, mode, op0, true);
- op1 = convert_modes (innermode, mode, op1, false);
+ op0 = convert_modes (mode, innermode, op0, true);
+ op1 = convert_modes (mode, innermode, op1, false);
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
target, unsignedp));
}
if (TREE_CODE (treeop0) != INTEGER_CST)
{
if (find_widening_optab_handler (this_optab, mode, innermode)
- != CODE_FOR_nothing)
+ != CODE_FOR_nothing)
{
expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
EXPAND_NORMAL);
if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
{
widen_mult_const:
- op0 = convert_modes (innermode, mode, op0, zextend_p);
+ op0 = convert_modes (mode, innermode, op0, zextend_p);
op1
- = convert_modes (innermode, mode, op1,
+ = convert_modes (mode, innermode, op1,
TYPE_UNSIGNED (TREE_TYPE (treeop1)));
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
target,
return REDUCE_BIT_FIELD (temp);
}
if (find_widening_optab_handler (other_optab, mode, innermode)
- != CODE_FOR_nothing
+ != CODE_FOR_nothing
&& innermode == word_mode)
{
rtx htem, hipart;
op0 = expand_normal (treeop0);
- if (TREE_CODE (treeop1) == INTEGER_CST)
- op1 = convert_modes (word_mode, mode,
- expand_normal (treeop1),
- TYPE_UNSIGNED (TREE_TYPE (treeop1)));
- else
- op1 = expand_normal (treeop1);
- /* op0 and op1 might still be constant, despite the above
+ op1 = expand_normal (treeop1);
+ /* op0 and op1 might be constants, despite the above
!= INTEGER_CST check. Handle it. */
if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
goto widen_mult_const;
+ if (TREE_CODE (treeop1) == INTEGER_CST)
+ op1 = convert_modes (mode, word_mode, op1,
+ TYPE_UNSIGNED (TREE_TYPE (treeop1)));
temp = expand_binop (mode, other_optab, op0, op1, target,
unsignedp, OPTAB_LIB_WIDEN);
hipart = gen_highpart (word_mode, temp);
expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
- case FMA_EXPR:
- {
- optab opt = fma_optab;
- gimple *def0, *def2;
-
- /* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
- call. */
- if (optab_handler (fma_optab, mode) == CODE_FOR_nothing)
- {
- tree fn = mathfn_built_in (TREE_TYPE (treeop0), BUILT_IN_FMA);
- tree call_expr;
-
- gcc_assert (fn != NULL_TREE);
- call_expr = build_call_expr (fn, 3, treeop0, treeop1, treeop2);
- return expand_builtin (call_expr, target, subtarget, mode, false);
- }
-
- def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
- /* The multiplication is commutative - look at its 2nd operand
- if the first isn't fed by a negate. */
- if (!def0)
- {
- def0 = get_def_for_expr (treeop1, NEGATE_EXPR);
- /* Swap operands if the 2nd operand is fed by a negate. */
- if (def0)
- std::swap (treeop0, treeop1);
- }
- def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
-
- op0 = op2 = NULL;
-
- if (def0 && def2
- && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
- {
- opt = fnms_optab;
- op0 = expand_normal (gimple_assign_rhs1 (def0));
- op2 = expand_normal (gimple_assign_rhs1 (def2));
- }
- else if (def0
- && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
- {
- opt = fnma_optab;
- op0 = expand_normal (gimple_assign_rhs1 (def0));
- }
- else if (def2
- && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
- {
- opt = fms_optab;
- op2 = expand_normal (gimple_assign_rhs1 (def2));
- }
-
- if (op0 == NULL)
- op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
- if (op2 == NULL)
- op2 = expand_normal (treeop2);
- op1 = expand_normal (treeop1);
-
- return expand_ternary_op (TYPE_MODE (type), opt,
- op0, op1, op2, target, 0);
- }
-
case MULT_EXPR:
/* If this is a fixed-point operation, then we cannot use the code
below because "expand_mult" doesn't support sat/no-sat fixed-point
return REDUCE_BIT_FIELD (temp);
case ABS_EXPR:
+ case ABSU_EXPR:
op0 = expand_expr (treeop0, subtarget,
VOIDmode, EXPAND_NORMAL);
if (modifier == EXPAND_STACK_PARM)
/* Unsigned abs is simply the operand. Testing here means we don't
risk generating incorrect code below. */
- if (TYPE_UNSIGNED (type))
+ if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
return op0;
return expand_abs (mode, op0, target, unsignedp,
>= GET_MODE_BITSIZE (word_mode)))
{
rtx_insn *seq, *seq_old;
- unsigned int high_off = subreg_highpart_offset (word_mode,
- int_mode);
+ poly_uint64 high_off = subreg_highpart_offset (word_mode,
+ int_mode);
bool extend_unsigned
= TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
rtx low = lowpart_subreg (word_mode, op0, int_mode);
return target;
}
- case REDUC_MAX_EXPR:
- case REDUC_MIN_EXPR:
- case REDUC_PLUS_EXPR:
- {
- op0 = expand_normal (treeop0);
- this_optab = optab_for_tree_code (code, type, optab_default);
- machine_mode vec_mode = TYPE_MODE (TREE_TYPE (treeop0));
-
- struct expand_operand ops[2];
- enum insn_code icode = optab_handler (this_optab, vec_mode);
-
- create_output_operand (&ops[0], target, mode);
- create_input_operand (&ops[1], op0, vec_mode);
- expand_insn (icode, 2, ops);
- target = ops[0].value;
- if (GET_MODE (target) != mode)
- return gen_lowpart (tmode, target);
- return target;
- }
-
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_LO_EXPR:
+ case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
+ case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
{
op0 = expand_normal (treeop0);
temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
gcc_assert (target);
return target;
- case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
mode = TYPE_MODE (TREE_TYPE (treeop0));
goto binop;
- case VEC_PERM_EXPR:
- expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (treeop2);
-
- /* Careful here: if the target doesn't support integral vector modes,
- a constant selection vector could wind up smooshed into a normal
- integral constant. */
- if (CONSTANT_P (op2) && !VECTOR_MODE_P (GET_MODE (op2)))
+ case VEC_PACK_TRUNC_EXPR:
+ if (VECTOR_BOOLEAN_TYPE_P (type)
+ && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0))
+ && mode == TYPE_MODE (TREE_TYPE (treeop0))
+ && SCALAR_INT_MODE_P (mode))
{
- tree sel_type = TREE_TYPE (treeop2);
- machine_mode vmode
- = mode_for_vector (SCALAR_TYPE_MODE (TREE_TYPE (sel_type)),
- TYPE_VECTOR_SUBPARTS (sel_type)).require ();
- gcc_assert (GET_MODE_CLASS (vmode) == MODE_VECTOR_INT);
- op2 = simplify_subreg (vmode, op2, TYPE_MODE (sel_type), 0);
- gcc_assert (op2 && GET_CODE (op2) == CONST_VECTOR);
+ struct expand_operand eops[4];
+ machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
+ expand_operands (treeop0, treeop1,
+ subtarget, &op0, &op1, EXPAND_NORMAL);
+ this_optab = vec_pack_sbool_trunc_optab;
+ enum insn_code icode = optab_handler (this_optab, imode);
+ create_output_operand (&eops[0], target, mode);
+ create_convert_operand_from (&eops[1], op0, imode, false);
+ create_convert_operand_from (&eops[2], op1, imode, false);
+ temp = GEN_INT (TYPE_VECTOR_SUBPARTS (type).to_constant ());
+ create_input_operand (&eops[3], temp, imode);
+ expand_insn (icode, 4, eops);
+ return eops[0].value;
}
- else
- gcc_assert (GET_MODE_CLASS (GET_MODE (op2)) == MODE_VECTOR_INT);
+ mode = TYPE_MODE (TREE_TYPE (treeop0));
+ goto binop;
- temp = expand_vec_perm (mode, op0, op1, op2, target);
- gcc_assert (temp);
- return temp;
+ case VEC_PACK_FLOAT_EXPR:
+ mode = TYPE_MODE (TREE_TYPE (treeop0));
+ expand_operands (treeop0, treeop1,
+ subtarget, &op0, &op1, EXPAND_NORMAL);
+ this_optab = optab_for_tree_code (code, TREE_TYPE (treeop0),
+ optab_default);
+ target = expand_binop (mode, this_optab, op0, op1, target,
+ TYPE_UNSIGNED (TREE_TYPE (treeop0)),
+ OPTAB_LIB_WIDEN);
+ gcc_assert (target);
+ return target;
+
+ case VEC_PERM_EXPR:
+ {
+ expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
+ vec_perm_builder sel;
+ if (TREE_CODE (treeop2) == VECTOR_CST
+ && tree_to_vec_perm_builder (&sel, treeop2))
+ {
+ machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
+ temp = expand_vec_perm_const (mode, op0, op1, sel,
+ sel_mode, target);
+ }
+ else
+ {
+ op2 = expand_normal (treeop2);
+ temp = expand_vec_perm_var (mode, op0, op1, op2, target);
+ }
+ gcc_assert (temp);
+ return temp;
+ }
case DOT_PROD_EXPR:
{
target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
return target;
+ case VEC_DUPLICATE_EXPR:
+ op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
+ target = expand_vector_broadcast (mode, op0);
+ gcc_assert (target);
+ return target;
+
+ case VEC_SERIES_EXPR:
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
+ return expand_vec_series_expr (mode, op0, op1, target);
+
case BIT_INSERT_EXPR:
{
unsigned bitpos = tree_to_uhwi (treeop2);
case VECTOR_CST:
{
tree tmp = NULL_TREE;
- if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_FRACT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_UFRACT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_ACCUM
- || GET_MODE_CLASS (mode) == MODE_VECTOR_UACCUM)
+ if (VECTOR_MODE_P (mode))
return const_vector_from_tree (exp);
scalar_int_mode int_mode;
if (is_int_mode (mode, &int_mode))
if (!tmp)
{
vec<constructor_elt, va_gc> *v;
- unsigned i;
- vec_alloc (v, VECTOR_CST_NELTS (exp));
- for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
+ /* Constructors need to be fixed-length. FIXME. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ vec_alloc (v, nunits);
+ for (unsigned int i = 0; i < nunits; ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
tmp = build_constructor (type, v);
}
copy_rtx (XEXP (temp, 0)));
return temp;
+ case POLY_INT_CST:
+ return immed_wide_int_const (poly_int_cst_value (exp), mode);
+
case SAVE_EXPR:
{
tree val = treeop0;
might end up in a register. */
if (mem_ref_refers_to_non_mem_p (exp))
{
- HOST_WIDE_INT offset = mem_ref_offset (exp).to_short_addr ();
+ poly_int64 offset = mem_ref_offset (exp).force_shwi ();
base = TREE_OPERAND (base, 0);
- if (offset == 0
+ poly_uint64 type_size;
+ if (known_eq (offset, 0)
&& !reverse
- && tree_fits_uhwi_p (TYPE_SIZE (type))
- && (GET_MODE_BITSIZE (DECL_MODE (base))
- == tree_to_uhwi (TYPE_SIZE (type))))
+ && poly_int_tree_p (TYPE_SIZE (type), &type_size)
+ && known_eq (GET_MODE_BITSIZE (DECL_MODE (base)), type_size))
return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
target, tmode, modifier);
if (TYPE_MODE (type) == BLKmode)
normal_inner_ref:
{
machine_mode mode1, mode2;
- HOST_WIDE_INT bitsize, bitpos;
+ poly_int64 bitsize, bitpos, bytepos;
tree offset;
int reversep, volatilep = 0, must_force_mem;
tree tem
mode2
= CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
+ /* Make sure bitpos is not negative, it can wreak havoc later. */
+ if (maybe_lt (bitpos, 0))
+ {
+ gcc_checking_assert (offset == NULL_TREE);
+ offset = size_int (bits_to_bytes_round_down (bitpos));
+ bitpos = num_trailing_bits (bitpos);
+ }
+
/* If we have either an offset, a BLKmode result, or a reference
outside the underlying object, we must force it to memory.
Such a case can occur in Ada if we have unchecked conversion
to a larger size. */
must_force_mem = (offset
|| mode1 == BLKmode
- || bitpos + bitsize > GET_MODE_BITSIZE (mode2));
+ || (mode == BLKmode
+ && !int_mode_for_size (bitsize, 1).exists ())
+ || maybe_gt (bitpos + bitsize,
+ GET_MODE_BITSIZE (mode2)));
/* Handle CONCAT first. */
if (GET_CODE (op0) == CONCAT && !must_force_mem)
{
- if (bitpos == 0
- && bitsize == GET_MODE_BITSIZE (GET_MODE (op0))
+ if (known_eq (bitpos, 0)
+ && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
&& COMPLEX_MODE_P (mode1)
&& COMPLEX_MODE_P (GET_MODE (op0))
&& (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
}
return op0;
}
- if (bitpos == 0
- && bitsize == GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
- && bitsize)
+ if (known_eq (bitpos, 0)
+ && known_eq (bitsize,
+ GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
+ && maybe_ne (bitsize, 0))
{
op0 = XEXP (op0, 0);
mode2 = GET_MODE (op0);
}
- else if (bitpos == GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
- && bitsize == GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1)))
- && bitpos
- && bitsize)
+ else if (known_eq (bitpos,
+ GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
+ && known_eq (bitsize,
+ GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
+ && maybe_ne (bitpos, 0)
+ && maybe_ne (bitsize, 0))
{
op0 = XEXP (op0, 1);
bitpos = 0;
/* See the comment in expand_assignment for the rationale. */
if (mode1 != VOIDmode
- && bitpos != 0
- && bitsize > 0
- && (bitpos % bitsize) == 0
- && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
+ && maybe_ne (bitpos, 0)
+ && maybe_gt (bitsize, 0)
+ && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
+ && multiple_p (bitpos, bitsize)
+ && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
&& MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
{
- op0 = adjust_address (op0, mode1, bitpos / BITS_PER_UNIT);
+ op0 = adjust_address (op0, mode1, bytepos);
bitpos = 0;
}
/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
record its alignment as BIGGEST_ALIGNMENT. */
- if (MEM_P (op0) && bitpos == 0 && offset != 0
+ if (MEM_P (op0)
+ && known_eq (bitpos, 0)
+ && offset != 0
&& is_aligning_offset (offset, tem))
set_mem_align (op0, BIGGEST_ALIGNMENT);
|| (volatilep && TREE_CODE (exp) == COMPONENT_REF
&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
&& mode1 != BLKmode
- && bitsize < GET_MODE_SIZE (mode1) * BITS_PER_UNIT)
+ && maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
&& (((MEM_P (op0)
? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
- || (bitpos % GET_MODE_ALIGNMENT (mode1) != 0)
+ || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
: TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
- || (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
+ || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
&& modifier != EXPAND_MEMORY
&& ((modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_INITIALIZER)
? STRICT_ALIGNMENT
: targetm.slow_unaligned_access (mode1,
MEM_ALIGN (op0))))
- || (bitpos % BITS_PER_UNIT != 0)))
+ || !multiple_p (bitpos, BITS_PER_UNIT)))
/* If the type and the field are a constant size and the
size of the type isn't the same size as the bitfield,
we must use bitfield operations. */
- || (bitsize >= 0
+ || (known_size_p (bitsize)
&& TYPE_SIZE (TREE_TYPE (exp))
- && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
- && 0 != compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)),
- bitsize)))
+ && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
+ && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
+ bitsize)))
{
machine_mode ext_mode = mode;
if (ext_mode == BLKmode
&& ! (target != 0 && MEM_P (op0)
&& MEM_P (target)
- && bitpos % BITS_PER_UNIT == 0))
+ && multiple_p (bitpos, BITS_PER_UNIT)))
ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
if (ext_mode == BLKmode)
/* ??? Unlike the similar test a few lines below, this one is
very likely obsolete. */
- if (bitsize == 0)
+ if (known_eq (bitsize, 0))
return target;
/* In this case, BITPOS must start at a byte boundary and
TARGET, if specified, must be a MEM. */
gcc_assert (MEM_P (op0)
- && (!target || MEM_P (target))
- && !(bitpos % BITS_PER_UNIT));
+ && (!target || MEM_P (target)));
+ bytepos = exact_div (bitpos, BITS_PER_UNIT);
+ poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
emit_block_move (target,
- adjust_address (op0, VOIDmode,
- bitpos / BITS_PER_UNIT),
- GEN_INT ((bitsize + BITS_PER_UNIT - 1)
- / BITS_PER_UNIT),
+ adjust_address (op0, VOIDmode, bytepos),
+ gen_int_mode (bytesize, Pmode),
(modifier == EXPAND_STACK_PARM
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
return 0 for the sake of consistency, as reading a zero-sized
bitfield is valid in Ada and the value is fully specified. */
- if (bitsize == 0)
+ if (known_eq (bitsize, 0))
return const0_rtx;
op0 = validize_mem (op0);
&& GET_MODE_CLASS (ext_mode) == MODE_INT)
reversep = TYPE_REVERSE_STORAGE_ORDER (type);
+ gcc_checking_assert (known_ge (bitpos, 0));
op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
(modifier == EXPAND_STACK_PARM
? NULL_RTX : target),
{
HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
- if (bitsize < size
+ gcc_checking_assert (known_le (bitsize, size));
+ if (maybe_lt (bitsize, size)
&& reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
size - bitsize, op0, 1);
mode1 = BLKmode;
/* Get a reference to just this component. */
+ bytepos = bits_to_bytes_round_down (bitpos);
if (modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
- op0 = adjust_address_nv (op0, mode1, bitpos / BITS_PER_UNIT);
+ op0 = adjust_address_nv (op0, mode1, bytepos);
else
- op0 = adjust_address (op0, mode1, bitpos / BITS_PER_UNIT);
+ op0 = adjust_address (op0, mode1, bytepos);
if (op0 == orig_op0)
op0 = copy_rtx (op0);
tree fndecl = get_callee_fndecl (exp), attr;
if (fndecl
+ /* Don't diagnose the error attribute in thunks, those are
+ artificially created. */
+ && !CALL_FROM_THUNK_P (exp)
&& (attr = lookup_attribute ("error",
DECL_ATTRIBUTES (fndecl))) != NULL)
- error ("%Kcall to %qs declared with attribute error: %s",
- exp, identifier_to_locale (lang_hooks.decl_printable_name (fndecl, 1)),
- TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
+ {
+ const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
+ error ("%Kcall to %qs declared with attribute error: %s", exp,
+ identifier_to_locale (ident),
+ TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
+ }
if (fndecl
+ /* Don't diagnose the warning attribute in thunks, those are
+ artificially created. */
+ && !CALL_FROM_THUNK_P (exp)
&& (attr = lookup_attribute ("warning",
DECL_ATTRIBUTES (fndecl))) != NULL)
- warning_at (tree_nonartificial_location (exp),
- 0, "%Kcall to %qs declared with attribute warning: %s",
- exp, identifier_to_locale (lang_hooks.decl_printable_name (fndecl, 1)),
- TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
+ {
+ const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
+ warning_at (tree_nonartificial_location (exp),
+ OPT_Wattribute_warning,
+ "%Kcall to %qs declared with attribute warning: %s",
+ exp, identifier_to_locale (ident),
+ TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
+ }
/* Check for a built-in function. */
- if (fndecl && DECL_BUILT_IN (fndecl))
+ if (fndecl && fndecl_built_in_p (fndecl))
{
gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
- if (CALL_WITH_BOUNDS_P (exp))
- return expand_builtin_with_bounds (exp, target, subtarget,
- tmode, ignore);
- else
- return expand_builtin (exp, target, subtarget, tmode, ignore);
+ return expand_builtin (exp, target, subtarget, tmode, ignore);
}
}
return expand_call (exp, target, ignore);
/* If we are converting to BLKmode, try to avoid an intermediate
temporary by fetching an inner memory reference. */
if (mode == BLKmode
- && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && poly_int_tree_p (TYPE_SIZE (type))
&& TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
&& handled_component_p (treeop0))
{
machine_mode mode1;
- HOST_WIDE_INT bitsize, bitpos;
+ poly_int64 bitsize, bitpos, bytepos;
tree offset;
int unsignedp, reversep, volatilep = 0;
tree tem
/* ??? We should work harder and deal with non-zero offsets. */
if (!offset
- && (bitpos % BITS_PER_UNIT) == 0
+ && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
&& !reversep
- && bitsize >= 0
- && compare_tree_int (TYPE_SIZE (type), bitsize) == 0)
+ && known_size_p (bitsize)
+ && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
{
/* See the normal_inner_ref case for the rationale. */
orig_op0
if (modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_SUM
|| modifier == EXPAND_INITIALIZER)
- op0 = adjust_address_nv (op0, mode, bitpos / BITS_PER_UNIT);
+ op0 = adjust_address_nv (op0, mode, bytepos);
else
- op0 = adjust_address (op0, mode, bitpos / BITS_PER_UNIT);
+ op0 = adjust_address (op0, mode, bytepos);
if (op0 == orig_op0)
op0 = copy_rtx (op0);
;
/* If neither mode is BLKmode, and both modes are the same size
then we can use gen_lowpart. */
- else if (mode != BLKmode && GET_MODE (op0) != BLKmode
- && (GET_MODE_PRECISION (mode)
- == GET_MODE_PRECISION (GET_MODE (op0)))
+ else if (mode != BLKmode
+ && GET_MODE (op0) != BLKmode
+ && known_eq (GET_MODE_PRECISION (mode),
+ GET_MODE_PRECISION (GET_MODE (op0)))
&& !COMPLEX_MODE_P (GET_MODE (op0)))
{
if (GET_CODE (op0) == SUBREG)
}
else if (STRICT_ALIGNMENT)
{
- tree inner_type = TREE_TYPE (treeop0);
- HOST_WIDE_INT temp_size
- = MAX (int_size_in_bytes (inner_type),
- (HOST_WIDE_INT) GET_MODE_SIZE (mode));
+ poly_uint64 mode_size = GET_MODE_SIZE (mode);
+ poly_uint64 temp_size = mode_size;
+ if (GET_MODE (op0) != BLKmode)
+ temp_size = upper_bound (temp_size,
+ GET_MODE_SIZE (GET_MODE (op0)));
rtx new_rtx
= assign_stack_temp_for_type (mode, temp_size, type);
rtx new_with_op0_mode
gcc_assert (!TREE_ADDRESSABLE (exp));
if (GET_MODE (op0) == BLKmode)
- emit_block_move (new_with_op0_mode, op0,
- GEN_INT (GET_MODE_SIZE (mode)),
- (modifier == EXPAND_STACK_PARM
- ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
+ {
+ rtx size_rtx = gen_int_mode (mode_size, Pmode);
+ emit_block_move (new_with_op0_mode, op0, size_rtx,
+ (modifier == EXPAND_STACK_PARM
+ ? BLOCK_OP_CALL_PARM
+ : BLOCK_OP_NORMAL));
+ }
else
emit_move_insn (new_with_op0_mode, op0);
{
rtx_code_label *label = gen_label_rtx ();
int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
- do_jump (TREE_OPERAND (rhs, 1),
- value ? label : 0,
- value ? 0 : label,
- profile_probability::uninitialized ());
+ profile_probability prob = profile_probability::uninitialized ();
+ if (value)
+ jumpifnot (TREE_OPERAND (rhs, 1), label, prob);
+ else
+ jumpif (TREE_OPERAND (rhs, 1), label, prob);
expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
false);
do_pending_stack_adjust ();
if (target && GET_MODE (target) != GET_MODE (exp))
target = 0;
/* For constant values, reduce using build_int_cst_type. */
- if (CONST_INT_P (exp))
+ poly_int64 const_exp;
+ if (poly_int_rtx_p (exp, &const_exp))
{
- HOST_WIDE_INT value = INTVAL (exp);
- tree t = build_int_cst_type (type, value);
+ tree t = build_int_cst_type (type, const_exp);
return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
}
else if (TYPE_UNSIGNED (type))
}
\f
/* Return the tree node if an ARG corresponds to a string constant or zero
- if it doesn't. If we return nonzero, set *PTR_OFFSET to the offset
- in bytes within the string that ARG is accessing. The type of the
- offset will be `sizetype'. */
+ if it doesn't. If we return nonzero, set *PTR_OFFSET to the (possibly
+ non-constant) offset in bytes within the string that ARG is accessing.
+ If MEM_SIZE is non-zero the storage size of the memory is returned.
+ If DECL is non-zero the constant declaration is returned if available. */
tree
-string_constant (tree arg, tree *ptr_offset)
+string_constant (tree arg, tree *ptr_offset, tree *mem_size, tree *decl)
{
- tree array, offset, lower_bound;
+ tree array;
STRIP_NOPS (arg);
+ /* Non-constant index into the character array in an ARRAY_REF
+ expression or null. */
+ tree varidx = NULL_TREE;
+
+ poly_int64 base_off = 0;
+
if (TREE_CODE (arg) == ADDR_EXPR)
{
- if (TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
- {
- *ptr_offset = size_zero_node;
- return TREE_OPERAND (arg, 0);
- }
- else if (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL)
- {
- array = TREE_OPERAND (arg, 0);
- offset = size_zero_node;
- }
- else if (TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF)
+ arg = TREE_OPERAND (arg, 0);
+ tree ref = arg;
+ if (TREE_CODE (arg) == ARRAY_REF)
{
- array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
- offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
- if (TREE_CODE (array) != STRING_CST && !VAR_P (array))
- return 0;
-
- /* Check if the array has a nonzero lower bound. */
- lower_bound = array_ref_low_bound (TREE_OPERAND (arg, 0));
- if (!integer_zerop (lower_bound))
+ tree idx = TREE_OPERAND (arg, 1);
+ if (TREE_CODE (idx) != INTEGER_CST)
{
- /* If the offset and base aren't both constants, return 0. */
- if (TREE_CODE (lower_bound) != INTEGER_CST)
- return 0;
- if (TREE_CODE (offset) != INTEGER_CST)
- return 0;
- /* Adjust offset by the lower bound. */
- offset = size_diffop (fold_convert (sizetype, offset),
- fold_convert (sizetype, lower_bound));
+ /* From a pointer (but not array) argument extract the variable
+ index to prevent get_addr_base_and_unit_offset() from failing
+ due to it. Use it later to compute the non-constant offset
+ into the string and return it to the caller. */
+ varidx = idx;
+ ref = TREE_OPERAND (arg, 0);
+
+ if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE)
+ return NULL_TREE;
+
+ if (!integer_zerop (array_ref_low_bound (arg)))
+ return NULL_TREE;
+
+ if (!integer_onep (array_ref_element_size (arg)))
+ return NULL_TREE;
}
}
- else if (TREE_CODE (TREE_OPERAND (arg, 0)) == MEM_REF)
- {
- array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
- offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
- if (TREE_CODE (array) != ADDR_EXPR)
- return 0;
- array = TREE_OPERAND (array, 0);
- if (TREE_CODE (array) != STRING_CST && !VAR_P (array))
- return 0;
- }
- else
- return 0;
+ array = get_addr_base_and_unit_offset (ref, &base_off);
+ if (!array
+ || (TREE_CODE (array) != VAR_DECL
+ && TREE_CODE (array) != CONST_DECL
+ && TREE_CODE (array) != STRING_CST))
+ return NULL_TREE;
}
else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
{
tree arg0 = TREE_OPERAND (arg, 0);
tree arg1 = TREE_OPERAND (arg, 1);
- STRIP_NOPS (arg0);
- STRIP_NOPS (arg1);
-
- if (TREE_CODE (arg0) == ADDR_EXPR
- && (TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST
- || TREE_CODE (TREE_OPERAND (arg0, 0)) == VAR_DECL))
+ tree offset;
+ tree str = string_constant (arg0, &offset, mem_size, decl);
+ if (!str)
{
- array = TREE_OPERAND (arg0, 0);
- offset = arg1;
+ str = string_constant (arg1, &offset, mem_size, decl);
+ arg1 = arg0;
}
- else if (TREE_CODE (arg1) == ADDR_EXPR
- && (TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST
- || TREE_CODE (TREE_OPERAND (arg1, 0)) == VAR_DECL))
+
+ if (str)
{
- array = TREE_OPERAND (arg1, 0);
- offset = arg0;
+ /* Avoid pointers to arrays (see bug 86622). */
+ if (POINTER_TYPE_P (TREE_TYPE (arg))
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == ARRAY_TYPE
+ && !(decl && !*decl)
+ && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
+ && mem_size && tree_fits_uhwi_p (*mem_size)
+ && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
+ return NULL_TREE;
+
+ tree type = TREE_TYPE (offset);
+ arg1 = fold_convert (type, arg1);
+ *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, arg1);
+ return str;
}
- else
- return 0;
+ return NULL_TREE;
}
+ else if (TREE_CODE (arg) == SSA_NAME)
+ {
+ gimple *stmt = SSA_NAME_DEF_STMT (arg);
+ if (!is_gimple_assign (stmt))
+ return NULL_TREE;
+
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree_code code = gimple_assign_rhs_code (stmt);
+ if (code == ADDR_EXPR)
+ return string_constant (rhs1, ptr_offset, mem_size, decl);
+ else if (code != POINTER_PLUS_EXPR)
+ return NULL_TREE;
+
+ tree offset;
+ if (tree str = string_constant (rhs1, &offset, mem_size, decl))
+ {
+ /* Avoid pointers to arrays (see bug 86622). */
+ if (POINTER_TYPE_P (TREE_TYPE (rhs1))
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1))) == ARRAY_TYPE
+ && !(decl && !*decl)
+ && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
+ && mem_size && tree_fits_uhwi_p (*mem_size)
+ && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
+ return NULL_TREE;
+
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ tree type = TREE_TYPE (offset);
+ rhs2 = fold_convert (type, rhs2);
+ *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, rhs2);
+ return str;
+ }
+ return NULL_TREE;
+ }
+ else if (DECL_P (arg))
+ array = arg;
else
- return 0;
+ return NULL_TREE;
+
+ tree offset = wide_int_to_tree (sizetype, base_off);
+ if (varidx)
+ {
+ if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE)
+ return NULL_TREE;
+
+ gcc_assert (TREE_CODE (arg) == ARRAY_REF);
+ tree chartype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg, 0)));
+ if (TREE_CODE (chartype) != INTEGER_TYPE)
+ return NULL;
+
+ offset = fold_convert (sizetype, varidx);
+ }
if (TREE_CODE (array) == STRING_CST)
{
*ptr_offset = fold_convert (sizetype, offset);
+ if (mem_size)
+ *mem_size = TYPE_SIZE_UNIT (TREE_TYPE (array));
+ if (decl)
+ *decl = NULL_TREE;
+ gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array)))
+ >= TREE_STRING_LENGTH (array));
return array;
}
- else if (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
+
+ if (!VAR_P (array) && TREE_CODE (array) != CONST_DECL)
+ return NULL_TREE;
+
+ tree init = ctor_for_folding (array);
+
+ /* Handle variables initialized with string literals. */
+ if (!init || init == error_mark_node)
+ return NULL_TREE;
+ if (TREE_CODE (init) == CONSTRUCTOR)
{
- int length;
- tree init = ctor_for_folding (array);
+ /* Convert the 64-bit constant offset to a wider type to avoid
+ overflow. */
+ offset_int wioff;
+ if (!base_off.is_constant (&wioff))
+ return NULL_TREE;
- /* Variables initialized to string literals can be handled too. */
- if (init == error_mark_node
- || !init
- || TREE_CODE (init) != STRING_CST)
- return 0;
+ wioff *= BITS_PER_UNIT;
+ if (!wi::fits_uhwi_p (wioff))
+ return NULL_TREE;
- /* Avoid const char foo[4] = "abcde"; */
- if (DECL_SIZE_UNIT (array) == NULL_TREE
- || TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
- || (length = TREE_STRING_LENGTH (init)) <= 0
- || compare_tree_int (DECL_SIZE_UNIT (array), length) < 0)
- return 0;
+ base_off = wioff.to_uhwi ();
+ unsigned HOST_WIDE_INT fieldoff = 0;
+ init = fold_ctor_reference (NULL_TREE, init, base_off, 0, array,
+ &fieldoff);
+ HOST_WIDE_INT cstoff;
+ if (!base_off.is_constant (&cstoff))
+ return NULL_TREE;
- /* If variable is bigger than the string literal, OFFSET must be constant
- and inside of the bounds of the string literal. */
- offset = fold_convert (sizetype, offset);
- if (compare_tree_int (DECL_SIZE_UNIT (array), length) > 0
- && (! tree_fits_uhwi_p (offset)
- || compare_tree_int (offset, length) >= 0))
- return 0;
+ cstoff = (cstoff - fieldoff) / BITS_PER_UNIT;
+ tree off = build_int_cst (sizetype, cstoff);
+ if (varidx)
+ offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, off);
+ else
+ offset = off;
+ }
+
+ if (!init)
+ return NULL_TREE;
+
+ *ptr_offset = offset;
+
+ tree eltype = TREE_TYPE (init);
+ tree initsize = TYPE_SIZE_UNIT (eltype);
+ if (mem_size)
+ *mem_size = initsize;
+
+ if (decl)
+ *decl = array;
+
+ if (TREE_CODE (init) == INTEGER_CST
+ && (TREE_CODE (TREE_TYPE (array)) == INTEGER_TYPE
+ || TYPE_MAIN_VARIANT (eltype) == char_type_node))
+ {
+ /* For a reference to (address of) a single constant character,
+ store the native representation of the character in CHARBUF.
+ If the reference is to an element of an array or a member
+ of a struct, only consider narrow characters until ctors
+ for wide character arrays are transformed to STRING_CSTs
+ like those for narrow arrays. */
+ unsigned char charbuf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT];
+ int len = native_encode_expr (init, charbuf, sizeof charbuf, 0);
+ if (len > 0)
+ {
+ /* Construct a string literal with elements of ELTYPE and
+ the representation above. Then strip
+ the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST. */
+ init = build_string_literal (len, (char *)charbuf, eltype);
+ init = TREE_OPERAND (TREE_OPERAND (init, 0), 0);
+ }
+ }
+
+ if (TREE_CODE (init) != STRING_CST)
+ return NULL_TREE;
+
+ gcc_checking_assert (tree_to_shwi (initsize) >= TREE_STRING_LENGTH (init));
+
+ return init;
+}
+\f
+/* Compute the modular multiplicative inverse of A modulo M
+ using extended Euclid's algorithm. Assumes A and M are coprime. */
+static wide_int
+mod_inv (const wide_int &a, const wide_int &b)
+{
+ /* Verify the assumption. */
+ gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
+
+ unsigned int p = a.get_precision () + 1;
+ gcc_checking_assert (b.get_precision () + 1 == p);
+ wide_int c = wide_int::from (a, p, UNSIGNED);
+ wide_int d = wide_int::from (b, p, UNSIGNED);
+ wide_int x0 = wide_int::from (0, p, UNSIGNED);
+ wide_int x1 = wide_int::from (1, p, UNSIGNED);
+
+ if (wi::eq_p (b, 1))
+ return wide_int::from (1, p, UNSIGNED);
+
+ while (wi::gt_p (c, 1, UNSIGNED))
+ {
+ wide_int t = d;
+ wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
+ c = t;
+ wide_int s = x0;
+ x0 = wi::sub (x1, wi::mul (q, x0));
+ x1 = s;
+ }
+ if (wi::lt_p (x1, 0, SIGNED))
+ x1 += d;
+ return x1;
+}
+
+/* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
+ is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
+ for C2 > 0 to x & C3 == C2
+ for C2 < 0 to x & C3 == (C2 & C3). */
+enum tree_code
+maybe_optimize_pow2p_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
+{
+ gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
+ tree treeop0 = gimple_assign_rhs1 (stmt);
+ tree treeop1 = gimple_assign_rhs2 (stmt);
+ tree type = TREE_TYPE (*arg0);
+ scalar_int_mode mode;
+ if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
+ return code;
+ if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
+ || TYPE_PRECISION (type) <= 1
+ || TYPE_UNSIGNED (type)
+ /* Signed x % c == 0 should have been optimized into unsigned modulo
+ earlier. */
+ || integer_zerop (*arg1)
+ /* If c is known to be non-negative, modulo will be expanded as unsigned
+ modulo. */
+ || get_range_pos_neg (treeop0) == 1)
+ return code;
+
+ /* x % c == d where d < 0 && d <= -c should be always false. */
+ if (tree_int_cst_sgn (*arg1) == -1
+ && -wi::to_widest (treeop1) >= wi::to_widest (*arg1))
+ return code;
+
+ int prec = TYPE_PRECISION (type);
+ wide_int w = wi::to_wide (treeop1) - 1;
+ w |= wi::shifted_mask (0, prec - 1, true, prec);
+ tree c3 = wide_int_to_tree (type, w);
+ tree c4 = *arg1;
+ if (tree_int_cst_sgn (*arg1) == -1)
+ c4 = wide_int_to_tree (type, w & wi::to_wide (*arg1));
+
+ rtx op0 = expand_normal (treeop0);
+ treeop0 = make_tree (TREE_TYPE (treeop0), op0);
+
+ bool speed_p = optimize_insn_for_speed_p ();
+
+ do_pending_stack_adjust ();
+
+ location_t loc = gimple_location (stmt);
+ struct separate_ops ops;
+ ops.code = TRUNC_MOD_EXPR;
+ ops.location = loc;
+ ops.type = TREE_TYPE (treeop0);
+ ops.op0 = treeop0;
+ ops.op1 = treeop1;
+ ops.op2 = NULL_TREE;
+ start_sequence ();
+ rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
+ EXPAND_NORMAL);
+ rtx_insn *moinsns = get_insns ();
+ end_sequence ();
+
+ unsigned mocost = seq_cost (moinsns, speed_p);
+ mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
+ mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
+
+ ops.code = BIT_AND_EXPR;
+ ops.location = loc;
+ ops.type = TREE_TYPE (treeop0);
+ ops.op0 = treeop0;
+ ops.op1 = c3;
+ ops.op2 = NULL_TREE;
+ start_sequence ();
+ rtx mur = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
+ EXPAND_NORMAL);
+ rtx_insn *muinsns = get_insns ();
+ end_sequence ();
+
+ unsigned mucost = seq_cost (muinsns, speed_p);
+ mucost += rtx_cost (mur, mode, EQ, 0, speed_p);
+ mucost += rtx_cost (expand_normal (c4), mode, EQ, 1, speed_p);
- *ptr_offset = offset;
- return init;
+ if (mocost <= mucost)
+ {
+ emit_insn (moinsns);
+ *arg0 = make_tree (TREE_TYPE (*arg0), mor);
+ return code;
}
- return 0;
+ emit_insn (muinsns);
+ *arg0 = make_tree (TREE_TYPE (*arg0), mur);
+ *arg1 = c4;
+ return code;
+}
+
+/* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
+ If C1 is odd to:
+ (X - C2) * C3 <= C4 (or >), where
+ C3 is modular multiplicative inverse of C1 and 1<<prec and
+ C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
+ if C2 > ((1<<prec) - 1) % C1).
+ If C1 is even, S = ctz (C1) and C2 is 0, use
+ ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
+ inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
+
+ For signed (X % C1) == 0 if C1 is odd to (all operations in it
+ unsigned):
+ (X * C3) + C4 <= 2 * C4, where
+ C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
+ C4 is ((1<<(prec - 1) - 1) / C1).
+ If C1 is even, S = ctz(C1), use
+ ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
+ where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
+ and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
+
+ See the Hacker's Delight book, section 10-17. */
+enum tree_code
+maybe_optimize_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
+{
+ gcc_checking_assert (code == EQ_EXPR || code == NE_EXPR);
+ gcc_checking_assert (TREE_CODE (*arg1) == INTEGER_CST);
+
+ if (optimize < 2)
+ return code;
+
+ gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
+ if (stmt == NULL)
+ return code;
+
+ tree treeop0 = gimple_assign_rhs1 (stmt);
+ tree treeop1 = gimple_assign_rhs2 (stmt);
+ if (TREE_CODE (treeop0) != SSA_NAME
+ || TREE_CODE (treeop1) != INTEGER_CST
+ /* Don't optimize the undefined behavior case x % 0;
+ x % 1 should have been optimized into zero, punt if
+ it makes it here for whatever reason;
+ x % -c should have been optimized into x % c. */
+ || compare_tree_int (treeop1, 2) <= 0
+ /* Likewise x % c == d where d >= c should be always false. */
+ || tree_int_cst_le (treeop1, *arg1))
+ return code;
+
+ /* Unsigned x % pow2 is handled right already, for signed
+ modulo handle it in maybe_optimize_pow2p_mod_cmp. */
+ if (integer_pow2p (treeop1))
+ return maybe_optimize_pow2p_mod_cmp (code, arg0, arg1);
+
+ tree type = TREE_TYPE (*arg0);
+ scalar_int_mode mode;
+ if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
+ return code;
+ if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
+ || TYPE_PRECISION (type) <= 1)
+ return code;
+
+ signop sgn = UNSIGNED;
+ /* If both operands are known to have the sign bit clear, handle
+ even the signed modulo case as unsigned. treeop1 is always
+ positive >= 2, checked above. */
+ if (!TYPE_UNSIGNED (type) && get_range_pos_neg (treeop0) != 1)
+ sgn = SIGNED;
+
+ if (!TYPE_UNSIGNED (type))
+ {
+ if (tree_int_cst_sgn (*arg1) == -1)
+ return code;
+ type = unsigned_type_for (type);
+ if (!type || TYPE_MODE (type) != TYPE_MODE (TREE_TYPE (*arg0)))
+ return code;
+ }
+
+ int prec = TYPE_PRECISION (type);
+ wide_int w = wi::to_wide (treeop1);
+ int shift = wi::ctz (w);
+ /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
+ C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
+ If C1 is odd, we can handle all cases by subtracting
+ C4 below. We could handle even the even C1 and C2 > -1U % C1 cases
+ e.g. by testing for overflow on the subtraction, punt on that for now
+ though. */
+ if ((sgn == SIGNED || shift) && !integer_zerop (*arg1))
+ {
+ if (sgn == SIGNED)
+ return code;
+ wide_int x = wi::umod_trunc (wi::mask (prec, false, prec), w);
+ if (wi::gtu_p (wi::to_wide (*arg1), x))
+ return code;
+ }
+
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, treeop0)
+ {
+ gimple *use_stmt = USE_STMT (use_p);
+ /* Punt if treeop0 is used in the same bb in a division
+ or another modulo with the same divisor. We should expect
+ the division and modulo combined together. */
+ if (use_stmt == stmt
+ || gimple_bb (use_stmt) != gimple_bb (stmt))
+ continue;
+ if (!is_gimple_assign (use_stmt)
+ || (gimple_assign_rhs_code (use_stmt) != TRUNC_DIV_EXPR
+ && gimple_assign_rhs_code (use_stmt) != TRUNC_MOD_EXPR))
+ continue;
+ if (gimple_assign_rhs1 (use_stmt) != treeop0
+ || !operand_equal_p (gimple_assign_rhs2 (use_stmt), treeop1, 0))
+ continue;
+ return code;
+ }
+
+ w = wi::lrshift (w, shift);
+ wide_int a = wide_int::from (w, prec + 1, UNSIGNED);
+ wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
+ wide_int m = wide_int::from (mod_inv (a, b), prec, UNSIGNED);
+ tree c3 = wide_int_to_tree (type, m);
+ tree c5 = NULL_TREE;
+ wide_int d, e;
+ if (sgn == UNSIGNED)
+ {
+ d = wi::divmod_trunc (wi::mask (prec, false, prec), w, UNSIGNED, &e);
+ /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
+ otherwise use < or subtract one from C4. E.g. for
+ x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
+ x % 3U == 1 already needs to be
+ (x - 1) * 0xaaaaaaabU <= 0x55555554. */
+ if (!shift && wi::gtu_p (wi::to_wide (*arg1), e))
+ d -= 1;
+ if (shift)
+ d = wi::lrshift (d, shift);
+ }
+ else
+ {
+ e = wi::udiv_trunc (wi::mask (prec - 1, false, prec), w);
+ if (!shift)
+ d = wi::lshift (e, 1);
+ else
+ {
+ e = wi::bit_and (e, wi::mask (shift, true, prec));
+ d = wi::lrshift (e, shift - 1);
+ }
+ c5 = wide_int_to_tree (type, e);
+ }
+ tree c4 = wide_int_to_tree (type, d);
+
+ rtx op0 = expand_normal (treeop0);
+ treeop0 = make_tree (TREE_TYPE (treeop0), op0);
+
+ bool speed_p = optimize_insn_for_speed_p ();
+
+ do_pending_stack_adjust ();
+
+ location_t loc = gimple_location (stmt);
+ struct separate_ops ops;
+ ops.code = TRUNC_MOD_EXPR;
+ ops.location = loc;
+ ops.type = TREE_TYPE (treeop0);
+ ops.op0 = treeop0;
+ ops.op1 = treeop1;
+ ops.op2 = NULL_TREE;
+ start_sequence ();
+ rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
+ EXPAND_NORMAL);
+ rtx_insn *moinsns = get_insns ();
+ end_sequence ();
+
+ unsigned mocost = seq_cost (moinsns, speed_p);
+ mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
+ mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
+
+ tree t = fold_convert_loc (loc, type, treeop0);
+ if (!integer_zerop (*arg1))
+ t = fold_build2_loc (loc, MINUS_EXPR, type, t, fold_convert (type, *arg1));
+ t = fold_build2_loc (loc, MULT_EXPR, type, t, c3);
+ if (sgn == SIGNED)
+ t = fold_build2_loc (loc, PLUS_EXPR, type, t, c5);
+ if (shift)
+ {
+ tree s = build_int_cst (NULL_TREE, shift);
+ t = fold_build2_loc (loc, RROTATE_EXPR, type, t, s);
+ }
+
+ start_sequence ();
+ rtx mur = expand_normal (t);
+ rtx_insn *muinsns = get_insns ();
+ end_sequence ();
+
+ unsigned mucost = seq_cost (muinsns, speed_p);
+ mucost += rtx_cost (mur, mode, LE, 0, speed_p);
+ mucost += rtx_cost (expand_normal (c4), mode, LE, 1, speed_p);
+
+ if (mocost <= mucost)
+ {
+ emit_insn (moinsns);
+ *arg0 = make_tree (TREE_TYPE (*arg0), mor);
+ return code;
+ }
+
+ emit_insn (muinsns);
+ *arg0 = make_tree (type, mur);
+ *arg1 = c4;
+ return code == EQ_EXPR ? LE_EXPR : GT_EXPR;
}
\f
/* Generate code to calculate OPS, and exploded expression
/* We won't bother with store-flag operations involving function pointers
when function pointers must be canonicalized before comparisons. */
if (targetm.have_canonicalize_funcptr_for_compare ()
- && ((TREE_CODE (TREE_TYPE (arg0)) == POINTER_TYPE
- && (TREE_CODE (TREE_TYPE (TREE_TYPE (arg0)))
- == FUNCTION_TYPE))
- || (TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE
- && (TREE_CODE (TREE_TYPE (TREE_TYPE (arg1)))
- == FUNCTION_TYPE))))
+ && ((POINTER_TYPE_P (TREE_TYPE (arg0))
+ && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))))
+ || (POINTER_TYPE_P (TREE_TYPE (arg1))
+ && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1))))))
return 0;
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
-
+
/* For vector typed comparisons emit code to generate the desired
all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
expander for this. */
}
}
+ /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
+ into (x - C2) * C3 < C4. */
+ if ((ops->code == EQ_EXPR || ops->code == NE_EXPR)
+ && TREE_CODE (arg0) == SSA_NAME
+ && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ enum tree_code code = maybe_optimize_mod_cmp (ops->code, &arg0, &arg1);
+ if (code != ops->code)
+ {
+ struct separate_ops nops = *ops;
+ nops.code = ops->code = code;
+ nops.op0 = arg0;
+ nops.op1 = arg1;
+ nops.type = TREE_TYPE (arg0);
+ return do_store_flag (&nops, target, mode);
+ }
+ }
+
/* Get the rtx comparison code to use. We know that EXP is a comparison
operation of some type. Some comparisons against 1 and -1 can be
converted to comparisons with zero. Do so here so that the tests
emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
default_label, default_probability);
-
/* If index is in range, it must fit in Pmode.
Convert to Pmode so we can index with it. */
if (mode != Pmode)
- index = convert_to_mode (Pmode, index, 1);
+ {
+ unsigned int width;
+
+ /* We know the value of INDEX is between 0 and RANGE. If we have a
+ sign-extended subreg, and RANGE does not have the sign bit set, then
+ we have a value that is valid for both sign and zero extension. In
+ this case, we get better code if we sign extend. */
+ if (GET_CODE (index) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (index)
+ && SUBREG_PROMOTED_SIGNED_P (index)
+ && ((width = GET_MODE_PRECISION (as_a <scalar_int_mode> (mode)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ! (UINTVAL (range) & (HOST_WIDE_INT_1U << (width - 1))))
+ index = convert_to_mode (Pmode, index, 0);
+ else
+ index = convert_to_mode (Pmode, index, 1);
+ }
/* Don't let a MEM slip through, because then INDEX that comes
out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
static rtx
const_vector_mask_from_tree (tree exp)
{
- rtvec v;
- unsigned i, units;
- tree elt;
- machine_mode inner, mode;
-
- mode = TYPE_MODE (TREE_TYPE (exp));
- units = VECTOR_CST_NELTS (exp);
- inner = GET_MODE_INNER (mode);
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ machine_mode inner = GET_MODE_INNER (mode);
- v = rtvec_alloc (units);
-
- for (i = 0; i < units; ++i)
+ rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
+ VECTOR_CST_NELTS_PER_PATTERN (exp));
+ unsigned int count = builder.encoded_nelts ();
+ for (unsigned int i = 0; i < count; ++i)
{
- elt = VECTOR_CST_ELT (exp, i);
-
+ tree elt = VECTOR_CST_ELT (exp, i);
gcc_assert (TREE_CODE (elt) == INTEGER_CST);
if (integer_zerop (elt))
- RTVEC_ELT (v, i) = CONST0_RTX (inner);
+ builder.quick_push (CONST0_RTX (inner));
else if (integer_onep (elt)
|| integer_minus_onep (elt))
- RTVEC_ELT (v, i) = CONSTM1_RTX (inner);
+ builder.quick_push (CONSTM1_RTX (inner));
else
gcc_unreachable ();
}
-
- return gen_rtx_CONST_VECTOR (mode, v);
+ return builder.build ();
}
/* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
{
wide_int res = wi::zero (GET_MODE_PRECISION (mode));
tree elt;
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
+ /* The result has a fixed number of bits so the input must too. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
elt = VECTOR_CST_ELT (exp, i);
gcc_assert (TREE_CODE (elt) == INTEGER_CST);
static rtx
const_vector_from_tree (tree exp)
{
- rtvec v;
- unsigned i, units;
- tree elt;
- machine_mode inner, mode;
-
- mode = TYPE_MODE (TREE_TYPE (exp));
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
if (initializer_zerop (exp))
return CONST0_RTX (mode);
if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
return const_vector_mask_from_tree (exp);
- units = VECTOR_CST_NELTS (exp);
- inner = GET_MODE_INNER (mode);
-
- v = rtvec_alloc (units);
+ machine_mode inner = GET_MODE_INNER (mode);
- for (i = 0; i < units; ++i)
+ rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
+ VECTOR_CST_NELTS_PER_PATTERN (exp));
+ unsigned int count = builder.encoded_nelts ();
+ for (unsigned int i = 0; i < count; ++i)
{
- elt = VECTOR_CST_ELT (exp, i);
-
+ tree elt = VECTOR_CST_ELT (exp, i);
if (TREE_CODE (elt) == REAL_CST)
- RTVEC_ELT (v, i) = const_double_from_real_value (TREE_REAL_CST (elt),
- inner);
+ builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
+ inner));
else if (TREE_CODE (elt) == FIXED_CST)
- RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
- inner);
+ builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
+ inner));
else
- RTVEC_ELT (v, i) = immed_wide_int_const (wi::to_wide (elt), inner);
+ builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
+ inner));
}
-
- return gen_rtx_CONST_VECTOR (mode, v);
+ return builder.build ();
}
/* Build a decl for a personality function given a language prefix. */