if (type_precision < BITS_PER_WORD)
{
const scalar_int_mode m
- = smallest_int_mode_for_size (type_precision + 1);
+ = smallest_int_mode_for_size (type_precision + 1).require ();
tree new_type = gnat_type_for_mode (m, 1);
op = fold_convert (new_type, op);
modulus = fold_convert (new_type, modulus);
for its mode since operations are ultimately performed in the mode. */
if (TYPE_PRECISION (type) < precision)
{
- const scalar_int_mode m = smallest_int_mode_for_size (precision);
+ const scalar_int_mode m
+ = smallest_int_mode_for_size (precision).require ();
op_type = gnat_type_for_mode (m, 1);
modulus = fold_convert (op_type, modulus);
lhs = fold_convert (op_type, lhs);
if (maybe_gt (bitsize, MAX_BITSIZE_MODE_ANY_INT))
return NULL;
/* Bitfield. */
- mode1 = smallest_int_mode_for_size (bitsize);
+ mode1 = smallest_int_mode_for_size (bitsize).require ();
}
poly_int64 bytepos = bits_to_bytes_round_down (bitpos);
if (maybe_ne (bytepos, 0))
{
/* Be careful not to go beyond the extracted object and maintain the
natural alignment of the memory. */
- wanted_inner_mode = smallest_int_mode_for_size (len);
+ wanted_inner_mode = smallest_int_mode_for_size (len).require ();
while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
> GET_MODE_BITSIZE (wanted_inner_mode))
wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
(when !STRICT_ALIGNMENT) - this is smaller and faster. */
if (size > 0 && size < 16 && !STRICT_ALIGNMENT)
{
- next_mode = smallest_mode_for_size (size * BITS_PER_UNIT, MODE_INT);
+ next_mode = smallest_mode_for_size
+ (size * BITS_PER_UNIT, MODE_INT).require ();
int n_bytes = GET_MODE_SIZE (next_mode).to_constant ();
gcc_assert (n_bytes <= mode_bytes);
offset -= n_bytes - size;
(when !STRICT_ALIGNMENT) - this is smaller and faster. */
if (len > 0 && len < 16 && !STRICT_ALIGNMENT)
{
- next_mode = smallest_mode_for_size (len * BITS_PER_UNIT, MODE_INT);
+ next_mode = smallest_mode_for_size
+ (len * BITS_PER_UNIT, MODE_INT).require ();
int n_bytes = GET_MODE_SIZE (next_mode).to_constant ();
gcc_assert (n_bytes <= mode_bytes);
offset -= n_bytes - len;
while (piece > size)
piece >>= 1;
- mode = smallest_int_mode_for_size (piece * BITS_PER_UNIT);
+ mode = smallest_int_mode_for_size (piece * BITS_PER_UNIT).require ();
/* If we don't re-use temporaries, the scheduler gets carried away,
and the register pressure gets unnecessarily high. */
if (0 && tmpx[i] && GET_MODE (tmpx[i]) == mode)
gcc_assert (!TARGET_IDIV);
scalar_int_mode libval_mode
- = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
+ = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode)).require ();
rtx libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
libval_mode, op0, mode, op1, mode);
if (elem_size == 4
|| elem_size == 8
|| (TARGET_AVX512BW && (elem_size == 1 || elem_size == 2)))
- return smallest_int_mode_for_size (nunits);
+ return smallest_int_mode_for_size (nunits).require ();
}
scalar_int_mode elem_mode
- = smallest_int_mode_for_size (elem_size * BITS_PER_UNIT);
+ = smallest_int_mode_for_size (elem_size * BITS_PER_UNIT).require ();
gcc_assert (elem_size * nunits == vector_size);
elt += (BYTES_BIG_ENDIAN ? -1 : 1) * (sz - isz) / isz;
}
else if (isz > sz)
- inner = smallest_int_mode_for_size (sz * BITS_PER_UNIT);
+ inner = smallest_int_mode_for_size (sz * BITS_PER_UNIT).require ();
val = const_vector_elt_as_int (op, elt);
return EASY_VECTOR_MSB (val, inner);
})
| (small_val & mask)))
return false;
splat_val = small_val;
- inner = smallest_int_mode_for_size (bitsize);
+ inner = smallest_int_mode_for_size (bitsize).require ();
}
/* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
return true;
}
- smode = smallest_int_mode_for_size (bitsize);
+ smode = smallest_int_mode_for_size (bitsize).require ();
smode_bsize = GET_MODE_BITSIZE (smode);
mode_bsize = GET_MODE_BITSIZE (mode);
at the moment but may wish to revisit. */
if (intoffset % BITS_PER_WORD != 0)
mode = smallest_int_mode_for_size (BITS_PER_WORD
- - intoffset % BITS_PER_WORD);
+ - intoffset % BITS_PER_WORD).require ();
else
mode = word_mode;
get_gcov_type (void)
{
scalar_int_mode mode
- = smallest_int_mode_for_size (LONG_LONG_TYPE_SIZE > 32 ? 64 : 32);
+ = smallest_int_mode_for_size
+ (LONG_LONG_TYPE_SIZE > 32 ? 64 : 32).require ();
return lang_hooks.types.type_for_mode (mode, false);
}
static tree
get_gcov_unsigned_t (void)
{
- scalar_int_mode mode = smallest_int_mode_for_size (32);
+ scalar_int_mode mode = smallest_int_mode_for_size (32).require ();
return lang_hooks.types.type_for_mode (mode, true);
}
\f
if (store_info->const_rhs
&& known_le (access_size, GET_MODE_SIZE (MAX_MODE_INT)))
{
- auto new_mode = smallest_int_mode_for_size (access_size * BITS_PER_UNIT);
+ auto new_mode = smallest_int_mode_for_size
+ (access_size * BITS_PER_UNIT).require ();
auto byte = subreg_lowpart_offset (new_mode, store_mode);
rtx ret
= simplify_subreg (new_mode, store_info->const_rhs, store_mode, byte);
objects are meant to be handled before calling this function. */
fixed_size_mode value_mode = as_a <fixed_size_mode> (GET_MODE (value));
if (value_mode == VOIDmode)
- value_mode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
+ value_mode
+ = smallest_int_mode_for_size (nwords * BITS_PER_WORD).require ();
last = get_last_insn ();
for (int i = 0; i < nwords; i++)
if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
{
scalar_int_mode full_mode
- = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
+ = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode)).require ();
gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
!= CODE_FOR_nothing);
{
rtx new_from;
scalar_int_mode full_mode
- = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
+ = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode)).require ();
convert_optab ctab = unsignedp ? zext_optab : sext_optab;
enum insn_code icode;
}
}
- return smallest_int_mode_for_size (size * BITS_PER_UNIT);
+ return smallest_int_mode_for_size (size * BITS_PER_UNIT).require ();
}
/* This function contains the main loop used for expanding a block
if (mode != GET_MODE (y_addr))
{
scalar_int_mode xmode
- = smallest_int_mode_for_size (GET_MODE_BITSIZE (mode));
+ = smallest_int_mode_for_size (GET_MODE_BITSIZE (mode)).require ();
scalar_int_mode ymode
= smallest_int_mode_for_size (GET_MODE_BITSIZE
- (GET_MODE (y_addr)));
+ (GET_MODE (y_addr))).require ();
if (GET_MODE_BITSIZE (xmode) < GET_MODE_BITSIZE (ymode))
mode = ymode;
else
n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
dst_words = XALLOCAVEC (rtx, n_regs);
bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
- min_mode = smallest_int_mode_for_size (bitsize);
+ min_mode = smallest_int_mode_for_size (bitsize).require ();
/* Copy the structure BITSIZE bits at a time. */
for (bitpos = 0, xbitpos = padding_correction;
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
machine_mode temp_mode = GET_MODE (temp);
if (temp_mode == BLKmode || temp_mode == VOIDmode)
- temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
+ temp_mode
+ = smallest_int_mode_for_size (size * BITS_PER_UNIT).require ();
rtx temp_target = gen_reg_rtx (temp_mode);
emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
temp = temp_target;
word size, we need to load the value (see again store_bit_field). */
if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
{
- temp_mode = smallest_int_mode_for_size (bitsize);
+ temp_mode = smallest_int_mode_for_size (bitsize).require ();
temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
temp_mode, false, NULL);
}
if (orig_precres == precres && precop <= BITS_PER_WORD)
{
int p = MAX (min_precision, precop);
- scalar_int_mode m = smallest_int_mode_for_size (p);
+ scalar_int_mode m = smallest_int_mode_for_size (p).require ();
tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
uns0_p && uns1_p
&& unsr_p);
if (orig_precres == precres)
{
int p = MAX (prec0, prec1);
- scalar_int_mode m = smallest_int_mode_for_size (p);
+ scalar_int_mode m = smallest_int_mode_for_size (p).require ();
tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
uns0_p && uns1_p
&& unsr_p);
(mode_for_size (size, MODE_DECIMAL_FLOAT, 0));
}
-extern machine_mode smallest_mode_for_size (poly_uint64, enum mode_class);
+extern opt_machine_mode smallest_mode_for_size (poly_uint64, enum mode_class);
-/* Find the narrowest integer mode that contains at least SIZE bits.
- Such a mode must exist. */
+/* Find the narrowest integer mode that contains at least SIZE bits,
+ if such a mode exists. */
-inline scalar_int_mode
+inline opt_scalar_int_mode
smallest_int_mode_for_size (poly_uint64 size)
{
- return as_a <scalar_int_mode> (smallest_mode_for_size (size, MODE_INT));
+ return dyn_cast <scalar_int_mode> (smallest_mode_for_size (size, MODE_INT));
}
extern opt_scalar_int_mode int_mode_for_mode (machine_mode);
machine_mode field_mode)
{
opt_scalar_int_mode mode_iter;
+
FOR_EACH_MODE_FROM (mode_iter, smallest_int_mode_for_size (struct_bits))
{
scalar_int_mode mode = mode_iter.require ();
/* The value returned by the library function will have twice as
many bits as the nominal MODE. */
- libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
+ libval_mode
+ = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode)).require ();
start_sequence ();
libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
libval_mode,
}
/* Return the narrowest mode of class MCLASS that contains at least
- SIZE bits. Abort if no such mode exists. */
+ SIZE bits, if such a mode exists. */
-machine_mode
+opt_machine_mode
smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
{
machine_mode mode = VOIDmode;
if (known_ge (GET_MODE_PRECISION (mode), size))
break;
- gcc_assert (mode != VOIDmode);
+ if (mode == VOIDmode)
+ return opt_machine_mode ();
if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
for (i = 0; i < NUM_INT_N_ENTS; i ++)
case ENUMERAL_TYPE:
{
scalar_int_mode mode
- = smallest_int_mode_for_size (TYPE_PRECISION (type));
+ = smallest_int_mode_for_size (TYPE_PRECISION (type)).require ();
SET_TYPE_MODE (type, mode);
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
/* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
bprecision
= MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
- bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision));
+ bprecision
+ = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision).require ());
if (bprecision > HOST_BITS_PER_DOUBLE_INT)
bprecision = HOST_BITS_PER_DOUBLE_INT;
TYPE_UNSIGNED (bitsizetype) = 1;
/* Now layout both types manually. */
- scalar_int_mode mode = smallest_int_mode_for_size (precision);
+ scalar_int_mode mode = smallest_int_mode_for_size (precision).require ();
SET_TYPE_MODE (sizetype, mode);
SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
TYPE_SIZE (sizetype) = bitsize_int (precision);
TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
- mode = smallest_int_mode_for_size (bprecision);
+ mode = smallest_int_mode_for_size (bprecision).require ();
SET_TYPE_MODE (bitsizetype, mode);
SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl256b -mabi=lp64d -O3" } */
+
+extern short a[];
+short b;
+int main() {
+ for (char c = 0; c < 18; c += 1)
+ a[c + 0] = b;
+}
precision = TYPE_PRECISION (type);
}
- scalar_int_mode mode = smallest_int_mode_for_size (precision);
+ scalar_int_mode mode = smallest_int_mode_for_size (precision).require ();
precision = GET_MODE_PRECISION (mode);
type = build_nonstandard_integer_type (precision, unsigned_p);