}
};
+/* Implements Andes vln8.v/vln8.v. */
+template <bool SIGN>
+class nds_nibbleload : public function_base
+{
+public:
+ unsigned int call_properties (const function_instance &) const override
+ {
+ return CP_READ_MEMORY;
+ }
+
+ bool can_be_overloaded_p (enum predication_type_index pred) const override
+ {
+ return pred != PRED_TYPE_none;
+ }
+
+ rtx expand (function_expander &e) const override
+ {
+ if (SIGN)
+ return e.use_contiguous_load_insn (
+ code_for_pred_intload_mov (SIGN_EXTEND, e.vector_mode ()));
+ return e.use_contiguous_load_insn (
+ code_for_pred_intload_mov (ZERO_EXTEND, e.vector_mode ()));
+ }
+};
+
static CONSTEXPR const nds_vfwcvtbf16_f nds_vfwcvt_s_obj;
static CONSTEXPR const nds_vfncvtbf16_f<NO_FRM> nds_vfncvt_bf16_obj;
static CONSTEXPR const nds_vfncvtbf16_f<HAS_FRM> nds_vfncvt_bf16_frm_obj;
+static CONSTEXPR const nds_nibbleload<true> nds_vln8_obj;
+static CONSTEXPR const nds_nibbleload<false> nds_vlnu8_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
BASE (nds_vfwcvt_s)
BASE (nds_vfncvt_bf16)
BASE (nds_vfncvt_bf16_frm)
-
+BASE (nds_vln8)
+BASE (nds_vlnu8)
} // end namespace riscv_vector
extern const function_base *const nds_vfwcvt_s;
extern const function_base *const nds_vfncvt_bf16;
extern const function_base *const nds_vfncvt_bf16_frm;
+extern const function_base *const nds_vln8;
+extern const function_base *const nds_vlnu8;
}
} // end namespace riscv_vector
DEF_RVV_FUNCTION (nds_vfncvt_bf16_frm, narrow_alu_frm, none_tu_preds, f32_to_bf16_nf_w_ops)
#undef REQUIRED_EXTENSIONS
+#define REQUIRED_EXTENSIONS XANDESVSINTLOAD_EXT
+DEF_RVV_FUNCTION (nds_vln8, alu, full_preds, q_v_void_const_ptr_ops)
+DEF_RVV_FUNCTION (nds_vlnu8, alu, full_preds, qu_v_void_const_ptr_ops)
+#undef REQUIRED_EXTENSIONS
+
#undef DEF_RVV_FUNCTION
(define_c_enum "unspec" [
UNSPEC_NDS_VFWCVTBF16
UNSPEC_NDS_VFNCVTBF16
+ UNSPEC_NDS_INTLOAD
])
;; ....................
(set (attr "avl_type_idx") (const_int 5))
(set (attr "ma") (const_int INVALID_ATTRIBUTE))
(set_attr "mode" "<NDS_V_DOUBLE_TRUNC_BF>")])
+
+;; Vector INT4 Load Extension.
+
+(define_insn "@pred_intload_mov<su><mode>"
+ [(set (match_operand:NDS_QVI 0 "nonimmediate_operand" "=vr, vr, vd")
+ (if_then_else:NDS_QVI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:NDS_QVI
+ [(any_extend:NDS_QVI (match_operand:VOID 3 "memory_operand" "m, m, m"))]
+ UNSPEC_NDS_INTLOAD)
+ (match_operand:NDS_QVI 2 "vector_merge_operand" "0, vu, vu")))]
+ "(TARGET_VECTOR && TARGET_XANDESVSINTLOAD
+ && register_operand (operands[0], <MODE>mode))"
+ "@
+ nds.vln<u>8.v\t%0,%3%p1
+ nds.vln<u>8.v\t%0,%3
+ nds.vln<u>8.v\t%0,%3,%1.t"
+ [(set_attr "type" "vlde,vlde,vlde")
+ (set_attr "mode" "<MODE>")])
#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_Q_OPS" macro include all quad signed integer which will be
+ iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_Q_OPS
+#define DEF_RVV_Q_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_QU_OPS" macro include all quad unsigned integer which will be
+ iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_QU_OPS
+#define DEF_RVV_QU_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
DEF_RVV_X2_WU_OPS (vuint32m2_t, 0)
DEF_RVV_X2_WU_OPS (vuint32m4_t, 0)
+DEF_RVV_Q_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_Q_OPS (vint8mf4_t, 0)
+DEF_RVV_Q_OPS (vint8mf2_t, 0)
+DEF_RVV_Q_OPS (vint8m1_t, 0)
+DEF_RVV_Q_OPS (vint8m2_t, 0)
+DEF_RVV_Q_OPS (vint8m4_t, 0)
+DEF_RVV_Q_OPS (vint8m8_t, 0)
+
+DEF_RVV_QU_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_QU_OPS (vuint8mf4_t, 0)
+DEF_RVV_QU_OPS (vuint8mf2_t, 0)
+DEF_RVV_QU_OPS (vuint8m1_t, 0)
+DEF_RVV_QU_OPS (vuint8m2_t, 0)
+DEF_RVV_QU_OPS (vuint8m4_t, 0)
+DEF_RVV_QU_OPS (vuint8m8_t, 0)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
#undef DEF_RVV_XFQF_OPS
#undef DEF_RVV_X2_U_OPS
#undef DEF_RVV_X2_WU_OPS
+#undef DEF_RVV_Q_OPS
+#undef DEF_RVV_QU_OPS
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of all vint8m_t will be registered for intrinsic functions. */
+static const rvv_type_info q_ops[] = {
+#define DEF_RVV_Q_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all vuint8m_t will be registered for intrinsic functions. */
+static const rvv_type_info qu_ops[] = {
+#define DEF_RVV_QU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
= rvv_arg_type_info (NUM_BASE_TYPES);
rvv_arg_type_info (RVV_BASE_scalar_float),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (const void_type *) function. */
+static CONSTEXPR const rvv_arg_type_info void_const_ptr_args[]
+ = {rvv_arg_type_info (RVV_BASE_void_const_ptr), rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
bf_w_v_args /* Args */};
+/* A static operand information for vector_type func (const void_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info q_v_void_const_ptr_ops
+ = {q_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ void_const_ptr_args /* Args */};
+
+/* A static operand information for vector_type func (const void_type *)
+ * function registration. */
+static CONSTEXPR const rvv_op_info qu_v_void_const_ptr_ops
+ = {qu_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ void_const_ptr_args /* Args */};
+
/* A static operand information for vector_type func (vector_type).
Some insns just supports SEW=32, such as the crypto vector Zvkg extension.
* function registration. */
DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx))
DEF_RVV_BASE_TYPE (xfqf_float, get_xfqf_float_type (type_idx))
DEF_RVV_BASE_TYPE (scalar_float, get_scalar_float_type (type_idx))
+DEF_RVV_BASE_TYPE (void_const_ptr, const_ptr_type_node)
DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU)
DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE)
XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension */
XSFVCP_EXT, /* XSFVCP extension*/
XANDESVBFHCVT_EXT, /* XANDESVBFHCVT extension */
+ XANDESVSINTLOAD_EXT, /* XANDESVSINTLOAD extension */
/* Please update below to isa_name func when add or remove enum type(s). */
};
return "xsfvcp";
case XANDESVBFHCVT_EXT:
return "xandesvbfhcvt";
+ case XANDESVSINTLOAD_EXT:
+ return "xandesvsintload";
default:
gcc_unreachable ();
}
return TARGET_XSFVCP;
case XANDESVBFHCVT_EXT:
return TARGET_XANDESVBFHCVT;
+ case XANDESVSINTLOAD_EXT:
+ return TARGET_XANDESVSINTLOAD;
default:
gcc_unreachable ();
}
(RVVM8SF "RVVM4BF") (RVVM4SF "RVVM2BF") (RVVM2SF "RVVM1BF")
(RVVM1SF "RVVMF2BF") (RVVMF2SF "RVVMF4BF")
])
+
+(define_mode_iterator NDS_QVI [
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI
+ RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+])
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvsintload -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvsintload -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint8mf8_t test_vln8_v_i8mf8(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf8(rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf4(rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf2(rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m1(rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m2(rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m4(rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8(const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m8(rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_m(vbool64_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf8_m(vm, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_m(vbool32_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf4_m(vm, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_m(vbool16_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf2_m(vm, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_m(vbool8_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m1_m(vm, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_m(vbool4_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m2_m(vm, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_m(vbool2_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m4_m(vm, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_m(vbool1_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m8_m(vm, rs1, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 14 } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvsintload -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvsintload -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint8mf8_t test_vln8_v_i8mf8_m(vbool64_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_m(vbool32_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_m(vbool16_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_m(vbool8_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_m(vbool4_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_m(vbool2_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_m(vbool1_t vm, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8(vm, rs1, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 7 } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvsintload -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvsintload -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint8mf8_t test_vln8_v_i8mf8_tu(vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf8_tu(vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_tu(vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf4_tu(vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_tu(vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf2_tu(vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_tu(vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m1_tu(vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_tu(vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m2_tu(vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_tu(vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m4_tu(vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_tu(vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m8_tu(vd, rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf8_tum(vm, vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf4_tum(vm, vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf2_tum(vm, vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m1_tum(vm, vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m2_tum(vm, vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m4_tum(vm, vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m8_tum(vm, vd, rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf8_tumu(vm, vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf4_tumu(vm, vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf2_tumu(vm, vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m1_tumu(vm, vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m2_tumu(vm, vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m4_tumu(vm, vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m8_tumu(vm, vd, rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf8_mu(vm, vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf4_mu(vm, vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8mf2_mu(vm, vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m1_mu(vm, vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m2_mu(vm, vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m4_mu(vm, vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_v_i8m8_mu(vm, vd, rs1, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 28 } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvsintload -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvsintload -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint8mf8_t test_vln8_v_i8mf8_tu(vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_tu(vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_tu(vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_tu(vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_tu(vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_tu(vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_tu(vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tu(vd, rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tum(vm, vd, rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_tumu(vm, vd, rs1, vl);
+}
+
+vint8mf8_t test_vln8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+
+vint8mf4_t test_vln8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+
+vint8mf2_t test_vln8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+
+vint8m1_t test_vln8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+
+vint8m2_t test_vln8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+
+vint8m4_t test_vln8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+
+vint8m8_t test_vln8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) {
+ return __riscv_nds_vln8_mu(vm, vd, rs1, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 28 } } */