(ss_plus "ssadd")
(us_plus "usadd")
(ss_minus "sssub")
- (us_minus "ussub")])
+ (us_minus "ussub")
+ (sign_extend "extend")
+ (zero_extend "zero_extend")])
;; <or_optab> code attributes
(define_code_attr or_optab [(ior "ior")
}
};
+/* Implements vsext.vf2/vsext.vf4/vsext.vf8/vzext.vf2/vzext.vf4/vzext.vf8. */
+template<rtx_code CODE>
+class ext : public function_base
+{
+public:
+ rtx expand (function_expander &e) const override
+ {
+ switch (e.op_info->op)
+ {
+ case OP_TYPE_vf2:
+ return e.use_exact_insn (code_for_pred_vf2 (CODE, e.vector_mode ()));
+ case OP_TYPE_vf4:
+ return e.use_exact_insn (code_for_pred_vf4 (CODE, e.vector_mode ()));
+ case OP_TYPE_vf8:
+ return e.use_exact_insn (code_for_pred_vf8 (CODE, e.vector_mode ()));
+ default:
+ gcc_unreachable ();
+ }
+ }
+};
+
static CONSTEXPR const vsetvl<false> vsetvl_obj;
static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
static CONSTEXPR const binop<UMOD> vremu_obj;
static CONSTEXPR const unop<NEG> vneg_obj;
static CONSTEXPR const unop<NOT> vnot_obj;
+static CONSTEXPR const ext<SIGN_EXTEND> vsext_obj;
+static CONSTEXPR const ext<ZERO_EXTEND> vzext_obj;
static CONSTEXPR const binop<SS_PLUS> vsadd_obj;
static CONSTEXPR const binop<SS_MINUS> vssub_obj;
static CONSTEXPR const binop<US_PLUS> vsaddu_obj;
BASE (vremu)
BASE (vneg)
BASE (vnot)
+BASE (vsext)
+BASE (vzext)
BASE (vsadd)
BASE (vssub)
BASE (vsaddu)
extern const function_base *const vremu;
extern const function_base *const vneg;
extern const function_base *const vnot;
+extern const function_base *const vsext;
+extern const function_base *const vzext;
extern const function_base *const vsadd;
extern const function_base *const vssub;
extern const function_base *const vsaddu;
DEF_RVV_FUNCTION (vremu, alu, full_preds, u_vvx_ops)
DEF_RVV_FUNCTION (vneg, alu, full_preds, iu_v_ops)
DEF_RVV_FUNCTION (vnot, alu, full_preds, iu_v_ops)
+DEF_RVV_FUNCTION (vsext, alu, full_preds, i_vf2_ops)
+DEF_RVV_FUNCTION (vsext, alu, full_preds, i_vf4_ops)
+DEF_RVV_FUNCTION (vsext, alu, full_preds, i_vf8_ops)
+DEF_RVV_FUNCTION (vzext, alu, full_preds, u_vf2_ops)
+DEF_RVV_FUNCTION (vzext, alu, full_preds, u_vf4_ops)
+DEF_RVV_FUNCTION (vzext, alu, full_preds, u_vf8_ops)
/* 12. Vector Fixed-Point Arithmetic Instructions. */
DEF_RVV_FUNCTION (vsadd, alu, full_preds, i_vvv_ops)
DEF_RVV_FUNCTION (vssub, alu, full_preds, i_vvv_ops)
bool overloaded_p) const override
{
b.append_base_name (instance.base_name);
- /* vop<sew>_v --> vop<sew>_v_<type>. */
- if (!overloaded_p)
+
+ /* vop<sew> --> vop<sew>_<op>. According to rvv-intrinsic-doc, _vv/_vx/_v
+ API doesn't have OP suffix in overloaded function name, otherwise, we
+ always append OP suffix in function name. For example, vsext_vf2. */
+ if (instance.op_info->op == OP_TYPE_vv || instance.op_info->op == OP_TYPE_vx
+ || instance.op_info->op == OP_TYPE_v)
{
- /* vop<sew> --> vop<sew>_v. */
- b.append_name (operand_suffixes[instance.op_info->op]);
- /* vop<sew>_v --> vop<sew>_v_<type>. */
- b.append_name (type_suffixes[instance.type.index].vector);
+ if (!overloaded_p)
+ b.append_name (operand_suffixes[instance.op_info->op]);
}
+ else
+ b.append_name (operand_suffixes[instance.op_info->op]);
+
+ /* vop<sew>_<op> --> vop<sew>_<op>_<type>. */
+ if (!overloaded_p)
+ b.append_name (type_suffixes[instance.type.index].vector);
+
/* According to rvv-intrinsic-doc, it does not add "_m" suffix
for vop_m C++ overloaded API. */
if (overloaded_p && instance.pred == PRED_TYPE_m)
#define DEF_RVV_B_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_WEXTI_OPS" macro include Double-Widening signed integer which
+ will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_WEXTI_OPS
+#define DEF_RVV_WEXTI_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_QEXTI_OPS" macro include Quad-Widening signed integer which will
+ be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_QEXTI_OPS
+#define DEF_RVV_QEXTI_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_OEXTI_OPS" macro include Oct-Widening signed integer which will
+ be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_OEXTI_OPS
+#define DEF_RVV_OEXTI_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_WEXTU_OPS" macro include Double-Widening unsigned integer which
+ will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_WEXTU_OPS
+#define DEF_RVV_WEXTU_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_QEXTU_OPS" macro include Quad-Widening unsigned integer which
+ will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_QEXTU_OPS
+#define DEF_RVV_QEXTU_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_OEXTU_OPS" macro include Oct-Widening unsigned integer which
+ will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_OEXTU_OPS
+#define DEF_RVV_OEXTU_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ZVE64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
DEF_RVV_B_OPS (vbool2_t, 0)
DEF_RVV_B_OPS (vbool1_t, 0)
+DEF_RVV_WEXTI_OPS (vint16mf4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTI_OPS (vint16mf2_t, 0)
+DEF_RVV_WEXTI_OPS (vint16m1_t, 0)
+DEF_RVV_WEXTI_OPS (vint16m2_t, 0)
+DEF_RVV_WEXTI_OPS (vint16m4_t, 0)
+DEF_RVV_WEXTI_OPS (vint16m8_t, 0)
+DEF_RVV_WEXTI_OPS (vint32mf2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTI_OPS (vint32m1_t, 0)
+DEF_RVV_WEXTI_OPS (vint32m2_t, 0)
+DEF_RVV_WEXTI_OPS (vint32m4_t, 0)
+DEF_RVV_WEXTI_OPS (vint32m8_t, 0)
+DEF_RVV_WEXTI_OPS (vint64m1_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTI_OPS (vint64m2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTI_OPS (vint64m4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTI_OPS (vint64m8_t, RVV_REQUIRE_ZVE64)
+
+DEF_RVV_QEXTI_OPS (vint32mf2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTI_OPS (vint32m1_t, 0)
+DEF_RVV_QEXTI_OPS (vint32m2_t, 0)
+DEF_RVV_QEXTI_OPS (vint32m4_t, 0)
+DEF_RVV_QEXTI_OPS (vint32m8_t, 0)
+DEF_RVV_QEXTI_OPS (vint64m1_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTI_OPS (vint64m2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTI_OPS (vint64m4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTI_OPS (vint64m8_t, RVV_REQUIRE_ZVE64)
+
+DEF_RVV_OEXTI_OPS (vint64m1_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_OEXTI_OPS (vint64m2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_OEXTI_OPS (vint64m4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_OEXTI_OPS (vint64m8_t, RVV_REQUIRE_ZVE64)
+
+DEF_RVV_WEXTU_OPS (vuint16mf4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTU_OPS (vuint16mf2_t, 0)
+DEF_RVV_WEXTU_OPS (vuint16m1_t, 0)
+DEF_RVV_WEXTU_OPS (vuint16m2_t, 0)
+DEF_RVV_WEXTU_OPS (vuint16m4_t, 0)
+DEF_RVV_WEXTU_OPS (vuint16m8_t, 0)
+DEF_RVV_WEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTU_OPS (vuint32m1_t, 0)
+DEF_RVV_WEXTU_OPS (vuint32m2_t, 0)
+DEF_RVV_WEXTU_OPS (vuint32m4_t, 0)
+DEF_RVV_WEXTU_OPS (vuint32m8_t, 0)
+DEF_RVV_WEXTU_OPS (vuint64m1_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTU_OPS (vuint64m2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_WEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ZVE64)
+
+DEF_RVV_QEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTU_OPS (vuint32m1_t, 0)
+DEF_RVV_QEXTU_OPS (vuint32m2_t, 0)
+DEF_RVV_QEXTU_OPS (vuint32m4_t, 0)
+DEF_RVV_QEXTU_OPS (vuint32m8_t, 0)
+DEF_RVV_QEXTU_OPS (vuint64m1_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTU_OPS (vuint64m2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_QEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ZVE64)
+
+DEF_RVV_OEXTU_OPS (vuint64m1_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_OEXTU_OPS (vuint64m2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_OEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_OEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ZVE64)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
#undef DEF_RVV_B_OPS
+#undef DEF_RVV_WEXTI_OPS
+#undef DEF_RVV_QEXTI_OPS
+#undef DEF_RVV_OEXTI_OPS
+#undef DEF_RVV_WEXTU_OPS
+#undef DEF_RVV_QEXTU_OPS
+#undef DEF_RVV_OEXTU_OPS
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of Double-Widening signed integer will be registered for intrinsic
+ * functions. */
+static const rvv_type_info wexti_ops[] = {
+#define DEF_RVV_WEXTI_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of Quad-Widening signed integer will be registered for intrinsic
+ * functions. */
+static const rvv_type_info qexti_ops[] = {
+#define DEF_RVV_QEXTI_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of Oct-Widening signed integer will be registered for intrinsic
+ * functions. */
+static const rvv_type_info oexti_ops[] = {
+#define DEF_RVV_OEXTI_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of Double-Widening unsigned integer will be registered for intrinsic
+ * functions. */
+static const rvv_type_info wextu_ops[] = {
+#define DEF_RVV_WEXTU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of Quad-Widening unsigned integer will be registered for intrinsic
+ * functions. */
+static const rvv_type_info qextu_ops[] = {
+#define DEF_RVV_QEXTU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of Oct-Widening unsigned integer will be registered for intrinsic
+ * functions. */
+static const rvv_type_info oextu_ops[] = {
+#define DEF_RVV_OEXTU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
= rvv_arg_type_info (NUM_BASE_TYPES);
= {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_size),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (double demote type) function. */
+static CONSTEXPR const rvv_arg_type_info vf2_args[]
+ = {rvv_arg_type_info (RVV_BASE_double_trunc_vector), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (quad demote type) function. */
+static CONSTEXPR const rvv_arg_type_info vf4_args[]
+ = {rvv_arg_type_info (RVV_BASE_quad_trunc_vector), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (oct demote type) function. */
+static CONSTEXPR const rvv_arg_type_info vf8_args[]
+ = {rvv_arg_type_info (RVV_BASE_oct_trunc_vector), rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
v_args /* Args */};
+/* A static operand information for vector_type func (double demote type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i_vf2_ops
+ = {wexti_ops, /* Types */
+ OP_TYPE_vf2, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vf2_args /* Args */};
+
+/* A static operand information for vector_type func (quad demote type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i_vf4_ops
+ = {qexti_ops, /* Types */
+ OP_TYPE_vf4, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vf4_args /* Args */};
+
+/* A static operand information for vector_type func (oct demote type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info i_vf8_ops
+ = {oexti_ops, /* Types */
+ OP_TYPE_vf8, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vf8_args /* Args */};
+
+/* A static operand information for vector_type func (double demote type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_vf2_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_vf2, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vf2_args /* Args */};
+
+/* A static operand information for vector_type func (quad demote type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_vf4_ops
+ = {qextu_ops, /* Types */
+ OP_TYPE_vf4, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vf4_args /* Args */};
+
+/* A static operand information for vector_type func (oct demote type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info u_vf8_ops
+ = {oextu_ops, /* Types */
+ OP_TYPE_vf8, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vf8_args /* Args */};
+
/* A list of all RVV intrinsic functions. */
static function_group_info function_groups[] = {
#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \
if (!type)
return NUM_VECTOR_TYPES;
poly_int64 nunits = GET_MODE_NUNITS (TYPE_MODE (type));
- machine_mode inner_mode;
+ machine_mode inner_mode = GET_MODE_INNER (TYPE_MODE (type));
bool unsigned_p = TYPE_UNSIGNED (type);
switch (base_type)
{
inner_mode = GET_MODE_INNER (TYPE_MODE (type));
unsigned_p = true;
break;
+ case RVV_BASE_double_trunc_vector:
+ if (inner_mode == DImode)
+ inner_mode = SImode;
+ else if (inner_mode == SImode)
+ inner_mode = HImode;
+ else if (inner_mode == HImode)
+ inner_mode = QImode;
+ else
+ gcc_unreachable ();
+ break;
+ case RVV_BASE_quad_trunc_vector:
+ if (inner_mode == DImode)
+ inner_mode = HImode;
+ else if (inner_mode == SImode)
+ inner_mode = QImode;
+ else
+ gcc_unreachable ();
+ break;
+ case RVV_BASE_oct_trunc_vector:
+ if (inner_mode == DImode)
+ inner_mode = QImode;
+ else
+ gcc_unreachable ();
+ break;
default:
return NUM_VECTOR_TYPES;
}
case RVV_BASE_uint32_index:
case RVV_BASE_uint64_index:
case RVV_BASE_shift_vector:
+ case RVV_BASE_double_trunc_vector:
+ case RVV_BASE_quad_trunc_vector:
+ case RVV_BASE_oct_trunc_vector:
if (get_base_vector_type (builtin_types[type_idx].vector)
!= NUM_VECTOR_TYPES)
return builtin_types[get_base_vector_type (
RVV_BASE_uint32_index,
RVV_BASE_uint64_index,
RVV_BASE_shift_vector,
+ RVV_BASE_double_trunc_vector,
+ RVV_BASE_quad_trunc_vector,
+ RVV_BASE_oct_trunc_vector,
NUM_BASE_TYPES
};
(VNx64BI "TARGET_MIN_VLEN > 32")
])
+(define_mode_iterator VWEXTI [
+ VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
+ VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
+ (VNx1DI "TARGET_MIN_VLEN > 32") (VNx2DI "TARGET_MIN_VLEN > 32")
+ (VNx4DI "TARGET_MIN_VLEN > 32") (VNx8DI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VQEXTI [
+ VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
+ (VNx1DI "TARGET_MIN_VLEN > 32") (VNx2DI "TARGET_MIN_VLEN > 32")
+ (VNx4DI "TARGET_MIN_VLEN > 32") (VNx8DI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VOEXTI [
+ (VNx1DI "TARGET_MIN_VLEN > 32") (VNx2DI "TARGET_MIN_VLEN > 32")
+ (VNx4DI "TARGET_MIN_VLEN > 32") (VNx8DI "TARGET_MIN_VLEN > 32")
+])
+
(define_mode_attr VM [
(VNx1QI "VNx1BI") (VNx2QI "VNx2BI") (VNx4QI "VNx4BI") (VNx8QI "VNx8BI") (VNx16QI "VNx16BI") (VNx32QI "VNx32BI") (VNx64QI "VNx64BI")
(VNx1HI "VNx1BI") (VNx2HI "VNx2BI") (VNx4HI "VNx4BI") (VNx8HI "VNx8BI") (VNx16HI "VNx16BI") (VNx32HI "VNx32BI")
(VNx1DF "64") (VNx2DF "64") (VNx4DF "64") (VNx8DF "64")
])
+(define_mode_attr V_DOUBLE_TRUNC [
+ (VNx1HI "VNx1QI") (VNx2HI "VNx2QI") (VNx4HI "VNx4QI") (VNx8HI "VNx8QI")
+ (VNx16HI "VNx16QI") (VNx32HI "VNx32QI")
+ (VNx1SI "VNx1HI") (VNx2SI "VNx2HI") (VNx4SI "VNx4HI") (VNx8SI "VNx8HI")
+ (VNx16SI "VNx16HI")
+ (VNx1DI "VNx1SI") (VNx2DI "VNx2SI") (VNx4DI "VNx4SI") (VNx8DI "VNx8SI")
+])
+
+(define_mode_attr V_QUAD_TRUNC [
+ (VNx1SI "VNx1QI") (VNx2SI "VNx2QI") (VNx4SI "VNx4QI") (VNx8SI "VNx8QI")
+ (VNx16SI "VNx16QI")
+ (VNx1DI "VNx1HI") (VNx2DI "VNx2HI")
+ (VNx4DI "VNx4HI") (VNx8DI "VNx8HI")
+])
+
+(define_mode_attr V_OCT_TRUNC [
+ (VNx1DI "VNx1QI") (VNx2DI "VNx2QI") (VNx4DI "VNx4QI") (VNx8DI "VNx8QI")
+])
+
(define_int_iterator ORDER [UNSPEC_ORDERED UNSPEC_UNORDERED])
(define_int_attr order [
(umin "%3,%4")
(umax "%3,%4")
(mult "%3,%4")])
+
+(define_code_attr sz [(sign_extend "s") (zero_extend "z")])
;; It is valid for instruction that require sew/lmul ratio.
(define_attr "ratio" ""
(cond [(eq_attr "type" "vimov,vfmov,vldux,vldox,vstux,vstox,\
- vialu,vshift,vicmp,vimul,vidiv,vsalu")
+ vialu,vshift,vicmp,vimul,vidiv,vsalu,vext")
(const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "VNx1QI,VNx1BI")
(symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
;; The index of operand[] to get the merge op.
(define_attr "merge_op_idx" ""
(cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox,\
- vialu,vshift,vicmp,vimul,vidiv,vsalu")
+ vialu,vshift,vicmp,vimul,vidiv,vsalu,vext")
(const_int 2)]
(const_int INVALID_ATTRIBUTE)))
;; The index of operand[] to get the avl op.
(define_attr "vl_op_idx" ""
- (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts,vstux,vstox")
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts,vstux,\
+ vstox,vext")
(const_int 4)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; The tail policy op value.
(define_attr "ta" ""
- (cond [(eq_attr "type" "vlde,vimov,vfmov")
+ (cond [(eq_attr "type" "vlde,vimov,vfmov,vext")
(symbol_ref "riscv_vector::get_ta(operands[5])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; The mask policy op value.
(define_attr "ma" ""
- (cond [(eq_attr "type" "vlde")
+ (cond [(eq_attr "type" "vlde,vext")
(symbol_ref "riscv_vector::get_ma(operands[6])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; The avl type value.
(define_attr "avl_type" ""
- (cond [(eq_attr "type" "vlde,vlde,vste,vimov,vimov,vimov,vfmov")
+ (cond [(eq_attr "type" "vlde,vlde,vste,vimov,vimov,vimov,vfmov,vext")
(symbol_ref "INTVAL (operands[7])")
(eq_attr "type" "vldm,vstm,vimov,vmalu,vmalu")
(symbol_ref "INTVAL (operands[5])")
(set (attr "ta") (symbol_ref "riscv_vector::get_ta(operands[5])"))
(set (attr "ma") (symbol_ref "riscv_vector::get_ta(operands[6])"))
(set (attr "avl_type") (symbol_ref "INTVAL (operands[7])"))])
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated integer widening operations
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 11.3 Vector Integer Extension
+;; -------------------------------------------------------------------------------
+
+;; Vector Double-Widening Sign-extend and Zero-extend.
+(define_insn "@pred_<optab><mode>_vf2"
+ [(set (match_operand:VWEXTI 0 "register_operand" "=&vr")
+ (if_then_else:VWEXTI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (any_extend:VWEXTI
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr"))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " 0vu")))]
+ "TARGET_VECTOR"
+ "v<sz>ext.vf2\t%0,%3%p1"
+ [(set_attr "type" "vext")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Quad-Widening Sign-extend and Zero-extend.
+(define_insn "@pred_<optab><mode>_vf4"
+ [(set (match_operand:VQEXTI 0 "register_operand" "=&vr")
+ (if_then_else:VQEXTI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (any_extend:VQEXTI
+ (match_operand:<V_QUAD_TRUNC> 3 "register_operand" " vr"))
+ (match_operand:VQEXTI 2 "vector_merge_operand" " 0vu")))]
+ "TARGET_VECTOR"
+ "v<sz>ext.vf4\t%0,%3%p1"
+ [(set_attr "type" "vext")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Oct-Widening Sign-extend and Zero-extend.
+(define_insn "@pred_<optab><mode>_vf8"
+ [(set (match_operand:VOEXTI 0 "register_operand" "=&vr")
+ (if_then_else:VOEXTI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (any_extend:VOEXTI
+ (match_operand:<V_OCT_TRUNC> 3 "register_operand" " vr"))
+ (match_operand:VOEXTI 2 "vector_merge_operand" " 0vu")))]
+ "TARGET_VECTOR"
+ "v<sz>ext.vf8\t%0,%3%p1"
+ [(set_attr "type" "vext")
+ (set_attr "mode" "<MODE>")])