+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * machmode.h (opt_mode::else_blk): New function.
+ (int_mode_for_mode): Declare.
+ * stor-layout.c (int_mode_for_mode): Return an opt_scalar_int_mode.
+ * builtins.c (expand_builtin_signbit): Adjust for new int_mode_for_mode
+ return type.
+ * cfgexpand.c (expand_debug_expr): Likewise.
+ * combine.c (gen_lowpart_or_truncate): Likewise.
+ (gen_lowpart_for_combine): Likewise.
+ * config/aarch64/aarch64.c (aarch64_emit_approx_sqrt): Likewise.
+ * config/avr/avr.c (avr_to_int_mode): Likewise.
+ (avr_out_plus_1): Likewise.
+ (avr_out_plus): Likewise.
+ (avr_out_round): Likewise.
+ * config/i386/i386.c (ix86_split_to_parts): Likewise.
+ * config/s390/s390.c (s390_expand_vec_compare_cc): Likewise.
+ (s390_expand_vcond): Likewise.
+ * config/spu/spu.c (spu_split_immediate): Likewise.
+ (spu_expand_mov): Likewise.
+ * dse.c (get_stored_val): Likewise.
+ * expmed.c (store_bit_field_1): Likewise.
+ (convert_extracted_bit_field): Use int_mode_for_mode instead of
+ int_mode_for_size.
+ (extract_bit_field_1): Adjust for new int_mode_for_mode return type.
+ (extract_low_bits): Likewise.
+ * expr.c (emit_group_load_1): Likewise. Separate out the BLKmode
+ handling rather than repeating the check.
+ (emit_group_store): Likewise.
+ (emit_move_via_integer): Adjust for new int_mode_for_mode return type.
+ * optabs.c (expand_absneg_bit): Likewise.
+ (expand_copysign_absneg): Likewise.
+ (expand_copysign_bit): Likewise.
+ * tree-if-conv.c (ifcvt_can_use_mask_load_store): Likewise.
+ * tree-vect-slp.c (vect_transform_slp_perm_load): Likewise.
+ * tree-vect-stmts.c (vect_gen_perm_mask_any): Likewise.
+ * var-tracking.c (prepare_call_arguments): Likewise.
+ * config/powerpcspe/powerpcspe.c (rs6000_do_expand_vec_perm): Use
+ int_mode_for_mode instead of mode_for_size.
+ * config/rs6000/rs6000.c (rs6000_do_expand_vec_perm): Likewise.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
if (GET_MODE_SIZE (fmode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (fmode);
- gcc_assert (imode != BLKmode);
+ imode = int_mode_for_mode (fmode).require ();
temp = gen_lowpart (imode, temp);
}
else
}
else
{
- machine_mode ifmode = int_mode_for_mode (mode);
- machine_mode ihmode = int_mode_for_mode (imode);
+ scalar_int_mode ifmode;
+ scalar_int_mode ihmode;
rtx halfsize;
- if (ifmode == BLKmode || ihmode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&ifmode)
+ || !int_mode_for_mode (imode).exists (&ihmode))
return NULL;
halfsize = GEN_INT (GET_MODE_BITSIZE (ihmode));
re = op0;
{
/* Bit-cast X into an integer mode. */
if (!SCALAR_INT_MODE_P (GET_MODE (x)))
- x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
- x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
+ x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
+ x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
x, GET_MODE (x));
}
if (imode == VOIDmode)
{
- imode = int_mode_for_mode (omode);
+ imode = int_mode_for_mode (omode).require ();
x = gen_lowpart_common (imode, x);
if (x == NULL)
goto fail;
&& SCALAR_FLOAT_MODE_P (mode)
&& aarch64_reinterpret_float_as_int (x, &ival))
{
- machine_mode imode = mode == HFmode ? SImode : int_mode_for_mode (mode);
+ machine_mode imode = (mode == HFmode
+ ? SImode
+ : int_mode_for_mode (mode).require ());
int num_instr = aarch64_internal_mov_immediate
(NULL_RTX, gen_int_mode (ival, imode), false, imode);
return num_instr < 3;
if (aarch64_float_const_zero_rtx_p (x))
return true;
- imode = int_mode_for_mode (mode);
+ imode = int_mode_for_mode (mode).require ();
}
else if (GET_CODE (x) == CONST_INT
&& SCALAR_INT_MODE_P (mode))
bool succeed = aarch64_reinterpret_float_as_int (x, &ival);
gcc_assert (succeed);
- machine_mode imode = mode == HFmode ? SImode
- : int_mode_for_mode (mode);
+ machine_mode imode = (mode == HFmode
+ ? SImode
+ : int_mode_for_mode (mode).require ());
int ncost = aarch64_internal_mov_immediate
(NULL_RTX, gen_int_mode (ival, imode), false, imode);
*cost += COSTS_N_INSNS (ncost);
}
machine_mode mmsk
- = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (mode)),
+ = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (mode)).require (),
GET_MODE_NUNITS (mode));
if (!recp)
{
return VOIDmode == mode
? x
- : simplify_gen_subreg (int_mode_for_mode (mode), x, mode, 0);
+ : simplify_gen_subreg (int_mode_for_mode (mode).require (), x, mode, 0);
}
namespace {
machine_mode mode = GET_MODE (xop[0]);
/* INT_MODE of the same size. */
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = int_mode_for_mode (mode).require ();
/* Number of bytes to operate on. */
int n_bytes = GET_MODE_SIZE (mode);
rtx xpattern = INSN_P (insn) ? single_set (as_a <rtx_insn *> (insn)) : insn;
rtx xdest = SET_DEST (xpattern);
machine_mode mode = GET_MODE (xdest);
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = int_mode_for_mode (mode).require ();
int n_bytes = GET_MODE_SIZE (mode);
enum rtx_code code_sat = GET_CODE (SET_SRC (xpattern));
enum rtx_code code
avr_out_round (rtx_insn *insn ATTRIBUTE_UNUSED, rtx *xop, int *plen)
{
machine_mode mode = GET_MODE (xop[0]);
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = int_mode_for_mode (mode).require ();
// The smallest fractional bit not cleared by the rounding is 2^(-RP).
int fbit = (int) GET_MODE_FBIT (mode);
double_int i_add = double_int_zero.set_bit (fbit-1 - INTVAL (xop[2]));
if (GET_CODE (operand) == CONST_VECTOR)
{
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = int_mode_for_mode (mode).require ();
/* Caution: if we looked through a constant pool memory above,
the operand may actually have a different mode now. That's
ok, since we want to pun this all the way back to an integer. */
imode = vmode;
if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
- {
- imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
- imode = mode_for_vector (imode, nelt);
- }
+ imode = mode_for_vector
+ (int_mode_for_mode (GET_MODE_INNER (vmode)).require (), nelt);
x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
x = expand_vec_perm (vmode, op0, op1, x, target);
imode = vmode;
if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
- {
- imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
- imode = mode_for_vector (imode, nelt);
- }
+ imode = mode_for_vector
+ (int_mode_for_mode (GET_MODE_INNER (vmode)).require (), nelt);
x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
x = expand_vec_perm (vmode, op0, op1, x, target);
case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
default: gcc_unreachable ();
}
- scratch_mode = mode_for_vector (
- int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
- GET_MODE_NUNITS (GET_MODE (cmp1)));
+ scratch_mode = mode_for_vector
+ (int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))).require (),
+ GET_MODE_NUNITS (GET_MODE (cmp1)));
gcc_assert (scratch_mode != BLKmode);
if (inv_p)
/* We always use an integral type vector to hold the comparison
result. */
- result_mode = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (cmp_mode)),
- GET_MODE_NUNITS (cmp_mode));
+ result_mode = mode_for_vector
+ (int_mode_for_mode (GET_MODE_INNER (cmp_mode)).require (),
+ GET_MODE_NUNITS (cmp_mode));
result_target = gen_reg_rtx (result_mode);
/* We allow vector immediates as comparison operands that
unsigned char arrlo[16];
rtx to, temp, hi, lo;
int i;
- machine_mode imode = mode;
/* We need to do reals as ints because the constant used in the
IOR might not be a legitimate real constant. */
- imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = int_mode_for_mode (mode).require ();
constant_to_array (mode, ops[1], arrhi);
if (imode != mode)
to = simplify_gen_subreg (imode, ops[0], mode, 0);
unsigned char arr_andbi[16];
rtx to, reg_fsmbi, reg_and;
int i;
- machine_mode imode = mode;
/* We need to do reals as ints because the constant used in the
* AND might not be a legitimate real constant. */
- imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = int_mode_for_mode (mode).require ();
constant_to_array (mode, ops[1], arr_fsmbi);
if (imode != mode)
to = simplify_gen_subreg(imode, ops[0], GET_MODE (ops[0]), 0);
if (GET_CODE (ops[1]) == SUBREG && !valid_subreg (ops[1]))
{
rtx from = SUBREG_REG (ops[1]);
- machine_mode imode = int_mode_for_mode (GET_MODE (from));
+ scalar_int_mode imode = int_mode_for_mode (GET_MODE (from)).require ();
gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_CLASS (imode) == MODE_INT
{
/* The store is a memset (addr, const_val, const_size). */
gcc_assert (CONST_INT_P (store_info->rhs));
- store_mode = int_mode_for_mode (read_mode);
- if (store_mode == BLKmode)
+ scalar_int_mode int_store_mode;
+ if (!int_mode_for_mode (read_mode).exists (&int_store_mode))
read_reg = NULL_RTX;
else if (store_info->rhs == const0_rtx)
- read_reg = extract_low_bits (read_mode, store_mode, const0_rtx);
- else if (GET_MODE_BITSIZE (store_mode) > HOST_BITS_PER_WIDE_INT
+ read_reg = extract_low_bits (read_mode, int_store_mode, const0_rtx);
+ else if (GET_MODE_BITSIZE (int_store_mode) > HOST_BITS_PER_WIDE_INT
|| BITS_PER_UNIT >= HOST_BITS_PER_WIDE_INT)
read_reg = NULL_RTX;
else
c |= (c << shift);
shift <<= 1;
}
- read_reg = gen_int_mode (c, store_mode);
- read_reg = extract_low_bits (read_mode, store_mode, read_reg);
+ read_reg = gen_int_mode (c, int_store_mode);
+ read_reg = extract_low_bits (read_mode, int_store_mode, read_reg);
}
}
else if (store_info->const_rhs
if we aren't. This must come after the entire register case above,
since that case is valid for any mode. The following cases are only
valid for integral modes. */
- {
- machine_mode imode = int_mode_for_mode (GET_MODE (op0));
- if (imode != GET_MODE (op0))
- {
- if (MEM_P (op0))
- op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
- else
- {
- gcc_assert (imode != BLKmode);
- op0 = gen_lowpart (imode, op0);
- }
- }
- }
+ opt_scalar_int_mode opt_imode = int_mode_for_mode (GET_MODE (op0));
+ scalar_int_mode imode;
+ if (!opt_imode.exists (&imode) || imode != GET_MODE (op0))
+ {
+ if (MEM_P (op0))
+ op0 = adjust_bitfield_address_size (op0, opt_imode.else_blk (),
+ 0, MEM_SIZE (op0));
+ else
+ op0 = gen_lowpart (op0_mode.require (), op0);
+ }
/* Storing an lsb-aligned field in a register
can be done with a movstrict instruction. */
&& GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
&& GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
{
- value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
+ value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)).require ());
emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
}
value via a SUBREG. */
if (!SCALAR_INT_MODE_P (tmode))
{
- scalar_int_mode int_mode
- = int_mode_for_size (GET_MODE_BITSIZE (tmode), 0).require ();
+ scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
x = convert_to_mode (int_mode, x, unsignedp);
x = force_reg (int_mode, x);
return gen_lowpart (tmode, x);
bool reverse, bool fallback_p, rtx *alt_rtl)
{
rtx op0 = str_rtx;
- machine_mode int_mode;
machine_mode mode1;
if (tmode == VOIDmode)
/* Make sure we are playing with integral modes. Pun with subregs
if we aren't. */
- {
- machine_mode imode = int_mode_for_mode (GET_MODE (op0));
- if (imode != GET_MODE (op0))
- {
- if (MEM_P (op0))
- op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
- else if (imode != BLKmode)
- {
- op0 = gen_lowpart (imode, op0);
+ opt_scalar_int_mode opt_imode = int_mode_for_mode (GET_MODE (op0));
+ scalar_int_mode imode;
+ if (!opt_imode.exists (&imode) || imode != GET_MODE (op0))
+ {
+ if (MEM_P (op0))
+ op0 = adjust_bitfield_address_size (op0, opt_imode.else_blk (),
+ 0, MEM_SIZE (op0));
+ else if (opt_imode.exists (&imode))
+ {
+ op0 = gen_lowpart (imode, op0);
- /* If we got a SUBREG, force it into a register since we
- aren't going to be able to do another SUBREG on it. */
- if (GET_CODE (op0) == SUBREG)
- op0 = force_reg (imode, op0);
- }
- else
- {
- HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
- rtx mem = assign_stack_temp (GET_MODE (op0), size);
- emit_move_insn (mem, op0);
- op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
- }
- }
- }
+ /* If we got a SUBREG, force it into a register since we
+ aren't going to be able to do another SUBREG on it. */
+ if (GET_CODE (op0) == SUBREG)
+ op0 = force_reg (imode, op0);
+ }
+ else
+ {
+ HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
+ rtx mem = assign_stack_temp (GET_MODE (op0), size);
+ emit_move_insn (mem, op0);
+ op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
+ }
+ }
/* ??? We currently assume TARGET is at least as big as BITSIZE.
If that's wrong, the solution is to test for it and set TARGET to 0
/* Find a correspondingly-sized integer field, so we can apply
shifts and masks to it. */
- int_mode = int_mode_for_mode (tmode);
- if (int_mode == BLKmode)
- int_mode = int_mode_for_mode (mode);
- /* Should probably push op0 out to memory and then do a load. */
- gcc_assert (int_mode != BLKmode);
+ scalar_int_mode int_mode;
+ if (!int_mode_for_mode (tmode).exists (&int_mode))
+ /* If this fails, we should probably push op0 out to memory and then
+ do a load. */
+ int_mode = int_mode_for_mode (mode).require ();
target = extract_fixed_bit_field (int_mode, op0, bitsize, bitnum, target,
unsignedp, reverse);
return x;
}
- src_int_mode = int_mode_for_mode (src_mode);
- int_mode = int_mode_for_mode (mode);
- if (src_int_mode == BLKmode || int_mode == BLKmode)
+ if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
+ || !int_mode_for_mode (mode).exists (&int_mode))
return NULL_RTX;
if (!MODES_TIEABLE_P (src_int_mode, src_mode))
&& !MEM_P (orig_src)
&& GET_CODE (orig_src) != CONCAT)
{
- machine_mode imode = int_mode_for_mode (GET_MODE (orig_src));
- if (imode == BLKmode)
- src = assign_stack_temp (GET_MODE (orig_src), ssize);
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
+ {
+ src = gen_reg_rtx (imode);
+ emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
+ }
else
- src = gen_reg_rtx (imode);
- if (imode != BLKmode)
- src = gen_lowpart (GET_MODE (orig_src), src);
- emit_move_insn (src, orig_src);
- /* ...and back again. */
- if (imode != BLKmode)
- src = gen_lowpart (imode, src);
+ {
+ src = assign_stack_temp (GET_MODE (orig_src), ssize);
+ emit_move_insn (src, orig_src);
+ }
emit_group_load_1 (tmps, dst, src, type, ssize);
return;
}
if (!SCALAR_INT_MODE_P (m)
&& !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
{
- machine_mode imode = int_mode_for_mode (GET_MODE (orig_dst));
- if (imode == BLKmode)
- dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
+ {
+ dst = gen_reg_rtx (imode);
+ emit_group_store (dst, src, type, ssize);
+ dst = gen_lowpart (GET_MODE (orig_dst), dst);
+ }
else
- dst = gen_reg_rtx (imode);
- emit_group_store (dst, src, type, ssize);
- if (imode != BLKmode)
- dst = gen_lowpart (GET_MODE (orig_dst), dst);
+ {
+ dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
+ emit_group_store (dst, src, type, ssize);
+ }
emit_move_insn (orig_dst, dst);
return;
}
static rtx_insn *
emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
{
- machine_mode imode;
+ scalar_int_mode imode;
enum insn_code code;
/* There must exist a mode of the exact size we require. */
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL;
/* The target must support moves in this mode. */
ALWAYS_INLINE opt_mode (from_int m) : m_mode (machine_mode (m)) {}
machine_mode else_void () const;
+ machine_mode else_blk () const;
T require () const;
bool exists () const;
return m_mode;
}
+/* If the T exists, return its enum value, otherwise return E_BLKmode. */
+
+template<typename T>
+inline machine_mode
+opt_mode<T>::else_blk () const
+{
+ return m_mode == E_VOIDmode ? E_BLKmode : m_mode;
+}
+
/* Assert that the object contains a T and return it. */
template<typename T>
enum mode_class);
-/* Return an integer mode of the exact same size as the input mode,
- or BLKmode on failure. */
+/* Return an integer mode of exactly the same size as the input mode. */
-extern machine_mode int_mode_for_mode (machine_mode);
+extern opt_scalar_int_mode int_mode_for_mode (machine_mode);
extern machine_mode bitwise_mode_for_mode (machine_mode);
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL_RTX;
word = 0;
nwords = 1;
{
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL_RTX;
op1 = gen_lowpart (imode, op1);
}
expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
int bitpos, bool op0_is_abs)
{
- machine_mode imode;
+ scalar_int_mode imode;
int word, nwords, i;
rtx temp;
rtx_insn *insns;
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL_RTX;
word = 0;
nwords = 1;
return mode;
}
-/* Find an integer mode of the exact same size, or BLKmode on failure. */
+/* Return an integer mode of exactly the same size as MODE, if one exists. */
-machine_mode
+opt_scalar_int_mode
int_mode_for_mode (machine_mode mode)
{
switch (GET_MODE_CLASS (mode))
{
case MODE_INT:
case MODE_PARTIAL_INT:
- break;
+ return as_a <scalar_int_mode> (mode);
case MODE_COMPLEX_INT:
case MODE_COMPLEX_FLOAT:
case MODE_VECTOR_UFRACT:
case MODE_VECTOR_UACCUM:
case MODE_POINTER_BOUNDS:
- mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
- break;
+ return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
case MODE_RANDOM:
if (mode == BLKmode)
- break;
+ return opt_scalar_int_mode ();
/* fall through */
default:
gcc_unreachable ();
}
-
- return mode;
}
/* Find a mode that can be used for efficient bitwise operations on MODE.
/* Mask should be integer mode of the same size as the load/store
mode. */
mode = TYPE_MODE (TREE_TYPE (lhs));
- if (int_mode_for_mode (mode) == BLKmode
- || VECTOR_MODE_P (mode))
+ if (!int_mode_for_mode (mode).exists () || VECTOR_MODE_P (mode))
return false;
if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
/* The generic VEC_PERM_EXPR code always uses an integral type of the
same size as the vector element being permuted. */
mask_element_type = lang_hooks.types.type_for_mode
- (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
+ (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
mask_type = get_vectype_for_scalar_type (mask_element_type);
nunits = TYPE_VECTOR_SUBPARTS (vectype);
mask = XALLOCAVEC (unsigned char, nunits);
nunits = TYPE_VECTOR_SUBPARTS (vectype);
mask_elt_type = lang_hooks.types.type_for_mode
- (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
+ (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
mask_type = get_vectype_for_scalar_type (mask_elt_type);
mask_elts = XALLOCAVEC (tree, nunits);
{
/* For non-integer stack argument see also if they weren't
initialized by integers. */
- machine_mode imode = int_mode_for_mode (GET_MODE (mem));
- if (imode != GET_MODE (mem) && imode != BLKmode)
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (mem)).exists (&imode)
+ && imode != GET_MODE (mem))
{
val = cselib_lookup (adjust_address_nv (mem, imode, 0),
imode, 0, VOIDmode);