fprintf (fp, " /*SIGNED_EEW%d_LMUL1_INTERPRET*/ %s,\n", eew,
inttype (eew, LMUL1_LOG2, /* unsigned_p */false).c_str ());
+ for (unsigned eew : EEW_SIZE_LIST)
+ fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ %s,\n", eew,
+ inttype (eew, LMUL1_LOG2, /* unsigned_p */true).c_str ());
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
fprintf (fp, " /*SIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
eew);
+ for (unsigned eew : EEW_SIZE_LIST)
+ fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
+ eew);
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
for (unsigned eew : EEW_SIZE_LIST)
fprintf (fp, " /*SIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew);
+ for (unsigned eew : EEW_SIZE_LIST)
+ fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
+ eew);
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew16_lmul1_interpret_ops)
DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew32_lmul1_interpret_ops)
DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew64_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_unsigned_eew8_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_unsigned_eew16_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_unsigned_eew32_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_unsigned_eew64_lmul1_interpret_ops)
DEF_RVV_FUNCTION (vlmul_ext, misc, none_preds, all_v_vlmul_ext_x2_ops)
DEF_RVV_FUNCTION (vlmul_ext, misc, none_preds, all_v_vlmul_ext_x4_ops)
DEF_RVV_FUNCTION (vlmul_ext, misc, none_preds, all_v_vlmul_ext_x8_ops)
#define DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS" macro include all types for
+ UINT8M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS
+#define DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS" macro include all types for
+ UINT16M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS
+#define DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS" macro include all types for
+ UINT32M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS
+#define DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS" macro include all types for
+ UINT64M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS
+#define DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
/* Use "DEF_RVV_X2_VLMUL_EXT_OPS" macro include all types for X2 VLMUL EXT
which will be iterated and registered as intrinsic functions. */
#ifndef DEF_RVV_X2_VLMUL_EXT_OPS
DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool32_t, 0)
DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf4_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf2_t, 0)
#undef DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS
#undef DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS
#undef DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS
#undef DEF_RVV_X2_VLMUL_EXT_OPS
#undef DEF_RVV_X4_VLMUL_EXT_OPS
#undef DEF_RVV_X8_VLMUL_EXT_OPS
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of vuint8m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info unsigned_eew8_lmul1_interpret_ops[] = {
+#define DEF_RVV_UNSIGNED_EEW8_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of vuint16m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info unsigned_eew16_lmul1_interpret_ops[] = {
+#define DEF_RVV_UNSIGNED_EEW16_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of vuint32m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info unsigned_eew32_lmul1_interpret_ops[] = {
+#define DEF_RVV_UNSIGNED_EEW32_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of vuint64m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info unsigned_eew64_lmul1_interpret_ops[] = {
+#define DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of x2 vlmul ext will be registered for intrinsic functions. */
static const rvv_type_info vlmul_ext_x2_ops[] = {
#define DEF_RVV_X2_VLMUL_EXT_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
rvv_arg_type_info (RVV_BASE_signed_eew64_lmul1_interpret),/* Return type */
v_args /* Args */};
+/* A static operand information for vuint8_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_unsigned_eew8_lmul1_interpret_ops
+ = {unsigned_eew8_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_unsigned_eew8_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
+/* A static operand information for vuint16_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_unsigned_eew16_lmul1_interpret_ops
+ = {unsigned_eew16_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_unsigned_eew16_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
+/* A static operand information for vuint32_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_unsigned_eew32_lmul1_interpret_ops
+ = {unsigned_eew32_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_unsigned_eew32_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
+/* A static operand information for vuint64_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_unsigned_eew64_lmul1_interpret_ops
+ = {unsigned_eew64_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_unsigned_eew64_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
/* A static operand information for vector_type func (vector_type)
* function registration. */
static CONSTEXPR const rvv_op_info all_v_vlmul_ext_x2_ops
BOOL16_INTERPRET, BOOL32_INTERPRET, BOOL64_INTERPRET, \
SIGNED_EEW8_LMUL1_INTERPRET, SIGNED_EEW16_LMUL1_INTERPRET, \
SIGNED_EEW32_LMUL1_INTERPRET, SIGNED_EEW64_LMUL1_INTERPRET, \
+ UNSIGNED_EEW8_LMUL1_INTERPRET, UNSIGNED_EEW16_LMUL1_INTERPRET, \
+ UNSIGNED_EEW32_LMUL1_INTERPRET, UNSIGNED_EEW64_LMUL1_INTERPRET, \
X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, X16_VLMUL_EXT, X32_VLMUL_EXT, \
X64_VLMUL_EXT, TUPLE_SUBPART) \
{ \
VECTOR_TYPE_##SIGNED_EEW16_LMUL1_INTERPRET, \
VECTOR_TYPE_##SIGNED_EEW32_LMUL1_INTERPRET, \
VECTOR_TYPE_##SIGNED_EEW64_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##UNSIGNED_EEW8_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##UNSIGNED_EEW16_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##UNSIGNED_EEW32_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##UNSIGNED_EEW64_LMUL1_INTERPRET, \
VECTOR_TYPE_##X2_VLMUL_EXT, \
VECTOR_TYPE_##X4_VLMUL_EXT, \
VECTOR_TYPE_##X8_VLMUL_EXT, \
case RVV_BASE_signed_eew16_lmul1_interpret:
case RVV_BASE_signed_eew32_lmul1_interpret:
case RVV_BASE_signed_eew64_lmul1_interpret:
+ case RVV_BASE_unsigned_eew8_lmul1_interpret:
+ case RVV_BASE_unsigned_eew16_lmul1_interpret:
+ case RVV_BASE_unsigned_eew32_lmul1_interpret:
+ case RVV_BASE_unsigned_eew64_lmul1_interpret:
case RVV_BASE_vlmul_ext_x2:
case RVV_BASE_vlmul_ext_x4:
case RVV_BASE_vlmul_ext_x8:
BOOL16_INTERPRET, BOOL32_INTERPRET, BOOL64_INTERPRET, \
SIGNED_EEW8_LMUL1_INTERPRET, SIGNED_EEW16_LMUL1_INTERPRET, \
SIGNED_EEW32_LMUL1_INTERPRET, SIGNED_EEW64_LMUL1_INTERPRET, \
+ UNSIGNED_EEW8_LMUL1_INTERPRET, UNSIGNED_EEW16_LMUL1_INTERPRET, \
+ UNSIGNED_EEW32_LMUL1_INTERPRET, UNSIGNED_EEW64_LMUL1_INTERPRET, \
X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, X16_VLMUL_EXT, X32_VLMUL_EXT, \
X64_VLMUL_EXT, TUPLE_SUBPART)
#endif
DEF_RVV_BASE_TYPE (signed_eew16_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (signed_eew32_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (signed_eew64_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (unsigned_eew8_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (unsigned_eew16_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (unsigned_eew32_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (unsigned_eew64_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x2, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x4, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x8, get_vector_type (type_idx))
return __riscv_vreinterpret_v_b1_i64m1 (src);
}
-/* { dg-final { scan-assembler-times {vlm\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 24 } } */
+vuint8m1_t test_vreinterpret_v_b1_vuint8m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_u8m1 (src);
+}
+
+vuint16m1_t test_vreinterpret_v_b1_vuint16m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_u16m1 (src);
+}
+
+vuint32m1_t test_vreinterpret_v_b1_vuint32m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_u32m1 (src);
+}
+
+vuint64m1_t test_vreinterpret_v_b1_vuint64m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_u64m1 (src);
+}
+
+/* { dg-final { scan-assembler-times {vlm\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 28 } } */
/* { dg-final { scan-assembler-times {vsm\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 20 } } */
-/* { dg-final { scan-assembler-times {vs1r\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 4 } } */
+/* { dg-final { scan-assembler-times {vs1r\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 8 } } */