registers. Therefore divide the mode size by NF before checking if it is
in range. */
int nf = 1;
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
nf = get_nf (mode);
return nunits.is_constant ()
bool vls_p = false;
if (m_vlmax_p)
{
- if (riscv_v_ext_vls_mode_p (vtype_mode))
+ if (riscv_vls_mode_p (vtype_mode))
{
/* VLS modes always set VSETVL by
"vsetvl zero, rs1/imm". */
return true;
}
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
if (GET_MODE_NUNITS (mode).to_constant () <= 31)
{
/* For VLS modes, the vlmul should be dynamically
calculated since we need to adjust VLMUL according
to TARGET_MIN_VLEN. */
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
int size = GET_MODE_BITSIZE (mode).to_constant ();
int inner_size = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
rtx
get_vlmax_rtx (machine_mode mode)
{
- gcc_assert (riscv_v_ext_vector_mode_p (mode));
+ gcc_assert (riscv_vla_mode_p (mode));
return gen_int_mode (GET_MODE_NUNITS (mode), Pmode);
}
get_nf (machine_mode mode)
{
/* We don't allow non-tuple modes go through this function. */
- gcc_assert (riscv_v_ext_tuple_mode_p (mode));
+ gcc_assert (riscv_tuple_mode_p (mode));
return mode_vtype_infos.nf[mode];
}
get_subpart_mode (machine_mode mode)
{
/* We don't allow non-tuple modes go through this function. */
- gcc_assert (riscv_v_ext_tuple_mode_p (mode));
+ gcc_assert (riscv_tuple_mode_p (mode));
return mode_vtype_infos.subpart_mode[mode];
}
unsigned int
get_ratio (machine_mode mode)
{
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
unsigned int sew = get_sew (mode);
vlmul_type vlmul = get_vlmul (mode);
get_mask_mode (machine_mode mode)
{
poly_int64 nunits = GET_MODE_NUNITS (mode);
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
{
unsigned int nf = get_nf (mode);
nunits = exact_div (nunits, nf);
}
+
return get_vector_mode (BImode, nunits).require ();
}
else
mclass = MODE_VECTOR_INT;
machine_mode mode;
+
FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (inner_mode == GET_MODE_INNER (mode)
&& known_eq (nunits, GET_MODE_NUNITS (mode))
- && (riscv_v_ext_vector_mode_p (mode)
- || riscv_v_ext_vls_mode_p (mode)))
+ && (riscv_vla_mode_p (mode)
+ || riscv_vls_mode_p (mode)))
return mode;
return opt_machine_mode ();
}
FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (inner_mode == GET_MODE_INNER (mode)
&& known_eq (nunits, GET_MODE_NUNITS (mode))
- && riscv_v_ext_tuple_mode_p (mode)
+ && riscv_tuple_mode_p (mode)
&& get_subpart_mode (mode) == subpart_mode)
return mode;
return opt_machine_mode ();
{
if (!autovec_use_vlmax_p ())
return false;
- if (riscv_v_ext_vector_mode_p (vector_mode)
+ if (riscv_vla_mode_p (vector_mode)
&& multiple_p (BYTES_PER_RISCV_VECTOR * TARGET_MAX_LMUL,
GET_MODE_SIZE (element_mode), nunits))
return true;
- if (riscv_v_ext_vls_mode_p (vector_mode)
+ if (riscv_vls_mode_p (vector_mode)
&& multiple_p ((TARGET_MIN_VLEN * TARGET_MAX_LMUL) / BITS_PER_UNIT,
GET_MODE_SIZE (element_mode), nunits))
return true;
bool
cmp_lmul_le_one (machine_mode mode)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
return known_le (GET_MODE_SIZE (mode), BYTES_PER_RISCV_VECTOR);
- else if (riscv_v_ext_vls_mode_p (mode))
+ else if (riscv_vls_mode_p (mode))
return known_le (GET_MODE_BITSIZE (mode), TARGET_MIN_VLEN);
return false;
}
bool
cmp_lmul_gt_one (machine_mode mode)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
return known_gt (GET_MODE_SIZE (mode), BYTES_PER_RISCV_VECTOR);
- else if (riscv_v_ext_vls_mode_p (mode))
+ else if (riscv_vls_mode_p (mode))
return known_gt (GET_MODE_BITSIZE (mode), TARGET_MIN_VLEN);
return false;
}
Then we can have the condition for VLS mode in fixed-vlmax, aka:
PRECISION (VLSmode) < VLEN / (64 / PRECISION(VLS_inner_mode)). */
bool
-vls_mode_valid_p (machine_mode vls_mode, bool allow_up_to_lmul_8)
+vls_mode_valid_p (machine_mode mode, bool allow_up_to_lmul_8)
{
if (!TARGET_VECTOR || TARGET_XTHEADVECTOR)
return false;
if (rvv_vector_bits == RVV_VECTOR_BITS_SCALABLE)
{
- if (GET_MODE_CLASS (vls_mode) != MODE_VECTOR_BOOL)
+ if (GET_MODE_CLASS (mode) != MODE_VECTOR_BOOL)
return true;
if (allow_up_to_lmul_8)
return true;
with size = 128 bits, we will end up with multiple ICEs in
middle-end generic codes. */
return !ordered_p (TARGET_MAX_LMUL * BITS_PER_RISCV_VECTOR,
- GET_MODE_PRECISION (vls_mode));
+ GET_MODE_PRECISION (mode));
}
if (rvv_vector_bits == RVV_VECTOR_BITS_ZVL)
{
- machine_mode inner_mode = GET_MODE_INNER (vls_mode);
+ machine_mode inner_mode = GET_MODE_INNER (mode);
int precision = GET_MODE_PRECISION (inner_mode).to_constant ();
int min_vlmax_bitsize = TARGET_MIN_VLEN / (64 / precision);
- return GET_MODE_PRECISION (vls_mode).to_constant () < min_vlmax_bitsize;
+ return GET_MODE_PRECISION (mode).to_constant () < min_vlmax_bitsize;
}
return false;
whereas 'RVVM1SI' mode is enabled if MIN_VLEN == 32. */
bool
-riscv_v_ext_vector_mode_p (machine_mode mode)
+riscv_vla_mode_p (machine_mode mode)
{
#define ENTRY(MODE, REQUIREMENT, ...) \
case MODE##mode: \
/* Return true if mode is the RVV enabled tuple mode. */
bool
-riscv_v_ext_tuple_mode_p (machine_mode mode)
+riscv_tuple_mode_p (machine_mode mode)
{
#define TUPLE_ENTRY(MODE, REQUIREMENT, ...) \
case MODE##mode: \
/* Return true if mode is the RVV enabled vls mode. */
bool
-riscv_v_ext_vls_mode_p (machine_mode mode)
+riscv_vls_mode_p (machine_mode mode)
{
#define VLS_ENTRY(MODE, REQUIREMENT) \
case MODE##mode: \
3. RVV vls mode. */
static bool
-riscv_v_ext_mode_p (machine_mode mode)
+riscv_vector_mode_p (machine_mode mode)
{
- return riscv_v_ext_vector_mode_p (mode) || riscv_v_ext_tuple_mode_p (mode)
- || riscv_v_ext_vls_mode_p (mode);
+ return riscv_vla_mode_p (mode) || riscv_tuple_mode_p (mode)
+ || riscv_vls_mode_p (mode);
}
static unsigned
riscv_v_adjust_nunits (machine_mode mode, int scale)
{
gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
if (TARGET_MIN_VLEN == 32)
scale = scale / 2;
poly_int64
riscv_v_adjust_nunits (machine_mode mode, bool fractional_p, int lmul, int nf)
{
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
scalar_mode smode = GET_MODE_INNER (mode);
int size = GET_MODE_SIZE (smode);
poly_int64
riscv_v_adjust_bytesize (machine_mode mode, int scale)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
if (TARGET_XTHEADVECTOR)
return BYTES_PER_RISCV_VECTOR;
case PLUS:
/* RVV load/store disallow any offset. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
info->type = ADDRESS_REG;
case LO_SUM:
/* RVV load/store disallow LO_SUM. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
info->type = ADDRESS_LO_SUM;
| vs1r.v v24,0(a0) |
+----------------------------------------------------------+
This behavior will benefit the underlying RVV auto vectorization. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return x == const0_rtx;
/* Small-integer addresses don't occur very often, but they
{
/* Disallow RVV modes base address.
E.g. (mem:SI (subreg:DI (reg:V1DI 155) 0). */
- if (SUBREG_P (x) && riscv_v_ext_mode_p (GET_MODE (SUBREG_REG (x))))
+ if (SUBREG_P (x) && riscv_vector_mode_p (GET_MODE (SUBREG_REG (x))))
return false;
struct riscv_address_info addr;
/* BLKmode is used for single unaligned loads and stores and should
not count as a multiword mode. */
- if (!riscv_v_ext_vector_mode_p (mode) && mode != BLKmode && might_split_p)
+ if (!riscv_vla_mode_p (mode) && mode != BLKmode && might_split_p)
n += (GET_MODE_SIZE (mode).to_constant () + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
if (addr.type == ADDRESS_LO_SUM)
out range of [-16, 15].
- 3. const series vector.
...etc. */
- if (riscv_v_ext_mode_p (GET_MODE (x)))
+ if (riscv_vector_mode_p (GET_MODE (x)))
{
rtx elt;
if (const_vec_duplicate_p (x, &elt))
(set (reg:DI target) (subreg:DI (reg:V8QI reg) 0))
Since RVV mode and scalar mode are in different REG_CLASS,
we need to explicitly move data from V_REGS to GR_REGS by scalar move. */
- if (SUBREG_P (src) && riscv_v_ext_mode_p (GET_MODE (SUBREG_REG (src))))
+ if (SUBREG_P (src) && riscv_vector_mode_p (GET_MODE (SUBREG_REG (src))))
{
machine_mode vmode = GET_MODE (SUBREG_REG (src));
unsigned int mode_size = GET_MODE_SIZE (mode).to_constant ();
static int
riscv_binary_cost (rtx x, int single_insns, int double_insns)
{
- if (!riscv_v_ext_mode_p (GET_MODE (x))
+ if (!riscv_vector_mode_p (GET_MODE (x))
&& riscv_2x_xlen_mode_p (GET_MODE (x)))
return COSTS_N_INSNS (double_insns);
return COSTS_N_INSNS (single_insns);
{
/* TODO: We set RVV instruction cost as 1 by default.
Cost Model need to be well analyzed and supported in the future. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
int gr2vr_cost = get_gr2vr_cost ();
int fr2vr_cost = get_fr2vr_cost ();
riscv_pass_vls_aggregate_in_gpr (struct riscv_arg_info *info, machine_mode mode,
unsigned gpr_base)
{
- gcc_assert (riscv_v_ext_vls_mode_p (mode));
+ gcc_assert (riscv_vls_mode_p (mode));
unsigned count = 0;
unsigned regnum = 0;
riscv_get_vector_arg (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
machine_mode mode, bool return_p, bool vls_p = false)
{
- gcc_assert (riscv_v_ext_mode_p (mode));
+ gcc_assert (riscv_vector_mode_p (mode));
info->mr_offset = cum->num_mrs;
if (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL)
register to pass. Just call TARGET_HARD_REGNO_NREGS for the number
information. */
int nregs = riscv_hard_regno_nregs (V_ARG_FIRST, mode);
- int LMUL = riscv_v_ext_tuple_mode_p (mode)
+ int LMUL = riscv_tuple_mode_p (mode)
? nregs / riscv_vector::get_nf (mode)
: nregs;
int arg_reg_start = V_ARG_FIRST - V_REG_FIRST;
riscv_pass_vls_in_vr (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
machine_mode mode, bool return_p)
{
- gcc_assert (riscv_v_ext_vls_mode_p (mode));
+ gcc_assert (riscv_vls_mode_p (mode));
unsigned int abi_vlen = riscv_get_cc_abi_vlen (cum->variant_cc);
unsigned int mode_size = GET_MODE_SIZE (mode).to_constant ();
info->fpr_offset = cum->num_fprs;
/* Passed by reference when the scalable vector argument is anonymous. */
- if (riscv_v_ext_mode_p (mode) && !named)
+ if (riscv_vector_mode_p (mode) && !named)
return NULL_RTX;
if (named)
}
/* For scalable vector argument. */
- if (riscv_vector_type_p (type) && riscv_v_ext_mode_p (mode))
+ if (riscv_vector_type_p (type) && riscv_vector_mode_p (mode))
return riscv_get_vector_arg (info, cum, mode, return_p);
if (riscv_vls_cc_p (cum->variant_cc))
{
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
return riscv_pass_vls_in_vr (info, cum, mode, return_p);
rtx ret = riscv_pass_aggregate_in_vr (info, cum, type, return_p);
}
/* For vls mode aggregated in gpr (for non-VLS-CC). */
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
return riscv_pass_vls_aggregate_in_gpr (info, mode, gpr_base);
}
return false;
/* Don't pass by reference if we can use general register(s) for vls. */
- if (info.num_gprs && riscv_v_ext_vls_mode_p (arg.mode))
+ if (info.num_gprs && riscv_vls_mode_p (arg.mode))
return false;
/* Don't pass by reference if we can use vector register groups. */
/* Passed by reference when:
1. The scalable vector argument is anonymous.
2. Args cannot be passed through vector registers. */
- if (riscv_v_ext_mode_p (arg.mode))
+ if (riscv_vector_mode_p (arg.mode))
return true;
/* Pass by reference if the data do not fit in two integer registers. */
{
machine_mode mode = TYPE_MODE (type);
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return TARGET_MIN_VLEN;
int element_bitsize = riscv_vector_element_bitsize (type);
but for mask vector register, register numbers can be any number. */
int lmul = 1;
machine_mode rvv_mode = mode;
- if (riscv_v_ext_vls_mode_p (rvv_mode))
+ if (riscv_vls_mode_p (rvv_mode))
{
int size = GET_MODE_BITSIZE (rvv_mode).to_constant ();
if (size < TARGET_MIN_VLEN)
else
return size / TARGET_MIN_VLEN;
}
- if (riscv_v_ext_tuple_mode_p (rvv_mode))
+ if (riscv_tuple_mode_p (rvv_mode))
rvv_mode = riscv_vector::get_subpart_mode (rvv_mode);
poly_int64 size = GET_MODE_SIZE (rvv_mode);
if (known_gt (size, UNITS_PER_V_REG))
1. If the operand is VECTOR REG, we print 'v'(vnsrl.wv).
2. If the operand is CONST_INT/CONST_VECTOR, we print 'i'(vnsrl.wi).
3. If the operand is SCALAR REG, we print 'x'(vnsrl.wx). */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
if (REG_P (op))
asm_fprintf (file, "v");
break;
}
case 'm': {
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
/* Calculate lmul according to mode and print the value. */
int lmul = riscv_get_v_regno_alignment (mode);
{
bool class1_is_fpr = class1 == FP_REGS || class1 == RVC_FP_REGS;
bool class2_is_fpr = class2 == FP_REGS || class2 == RVC_FP_REGS;
- return (!riscv_v_ext_mode_p (mode)
+ return (!riscv_vector_mode_p (mode)
&& GET_MODE_SIZE (mode).to_constant () > UNITS_PER_WORD
&& (class1_is_fpr != class2_is_fpr)
&& !TARGET_XTHEADFMV
static unsigned int
riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
/* Handle fractional LMUL, it only occupy part of vector register but
still need one vector register to hold. */
}
/* For tuple modes, the number of register = NF * LMUL. */
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
{
unsigned int nf = riscv_vector::get_nf (mode);
machine_mode subpart_mode = riscv_vector::get_subpart_mode (mode);
}
/* For VLS modes, we allocate registers according to TARGET_MIN_VLEN. */
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
int size = GET_MODE_BITSIZE (mode).to_constant ();
if (size < TARGET_MIN_VLEN)
if (GP_REG_P (regno))
{
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
/* Zilsd require load/store with even-odd reg pair. */
}
else if (FP_REG_P (regno))
{
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
if (!FP_REG_P (regno + nregs - 1))
}
else if (V_REG_P (regno))
{
- if (!riscv_v_ext_mode_p (mode))
+ if (!riscv_vector_mode_p (mode))
return false;
if (!V_REG_P (regno + nregs - 1))
/* We don't allow different REG_CLASS modes tieable since it
will cause ICE in register allocation (RA).
E.g. V2SI and DI are not tieable. */
- if (riscv_v_ext_mode_p (mode1) != riscv_v_ext_mode_p (mode2))
+ if (riscv_vector_mode_p (mode1) != riscv_vector_mode_p (mode2))
return false;
return (mode1 == mode2
|| !(GET_MODE_CLASS (mode1) == MODE_FLOAT
we cannot, statically, determine which part of it to extract.
Therefore prevent that. */
if (reg_classes_intersect_p (V_REGS, rclass)
- && riscv_v_ext_vls_mode_p (from)
+ && riscv_vls_mode_p (from)
&& !ordered_p (BITS_PER_RISCV_VECTOR, GET_MODE_PRECISION (from)))
return false;
riscv_vector_mode_supported_p (machine_mode mode)
{
if (TARGET_VECTOR)
- return riscv_v_ext_mode_p (mode);
+ return riscv_vector_mode_p (mode);
return false;
}
/* ??? For now, only do this for variable-width RVV registers.
Doing it for constant-sized registers breaks lower-subreg.c. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
poly_uint64 size = GET_MODE_SIZE (mode);
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
{
size = GET_MODE_SIZE (riscv_vector::get_subpart_mode (mode));
if (known_lt (size, BYTES_PER_RISCV_VECTOR))
return size;
}
- else if (riscv_v_ext_vector_mode_p (mode))
+ else if (riscv_vla_mode_p (mode))
{
/* RVV mask modes always consume a single register. */
if (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL)
}
if (!size.is_constant ())
return BYTES_PER_RISCV_VECTOR;
- else if (!riscv_v_ext_vls_mode_p (mode))
+ else if (!riscv_vls_mode_p (mode))
/* For -march=rv64gc_zve32f, the natural vector register size
is 32bits which is smaller than scalar register size, so we
return minimum size between vector register size and scalar
static opt_machine_mode
riscv_get_mask_mode (machine_mode mode)
{
- if (TARGET_VECTOR && riscv_v_ext_mode_p (mode))
+ if (TARGET_VECTOR && riscv_vector_mode_p (mode))
return riscv_vector::get_mask_mode (mode);
return default_get_mask_mode (mode);
static poly_uint64
riscv_vectorize_preferred_vector_alignment (const_tree type)
{
- if (riscv_v_ext_mode_p (TYPE_MODE (type)))
+ if (riscv_vector_mode_p (TYPE_MODE (type)))
return TYPE_ALIGN (TREE_TYPE (type));
return TYPE_ALIGN (type);
}
rtx target, rtx op0, rtx op1,
const vec_perm_indices &sel)
{
- if (TARGET_VECTOR && riscv_v_ext_mode_p (vmode))
+ if (TARGET_VECTOR && riscv_vector_mode_p (vmode))
return riscv_vector::expand_vec_perm_const (vmode, op_mode, target, op0,
op1, sel);
{
gcc_assert (costs);
- if (vectype && riscv_v_ext_vls_mode_p (TYPE_MODE (vectype)))
+ if (vectype && riscv_vls_mode_p (TYPE_MODE (vectype)))
return costs->vls;
return costs->vla;
}
riscv_preferred_else_value (unsigned ifn, tree vectype, unsigned int nops,
tree *ops)
{
- if (riscv_v_ext_mode_p (TYPE_MODE (vectype)))
+ if (riscv_vector_mode_p (TYPE_MODE (vectype)))
{
tree tmp_var = create_tmp_var (vectype);
TREE_NO_WARNING (tmp_var) = 1;