#include <math.h>
#define BOOL_SIZE_LIST {1, 2, 4, 8, 16, 32, 64}
+#define EEW_SIZE_LIST {8, 16, 32, 64}
+#define LMUL1_LOG2 0
std::string
to_lmul (int lmul_log2)
for (unsigned boolsize : BOOL_SIZE_LIST)
fprintf (fp, " /*BOOL%d_INTERPRET*/ INVALID,\n", boolsize);
+ for (unsigned eew : EEW_SIZE_LIST)
+ fprintf (fp, " /*SIGNED_EEW%d_LMUL1_INTERPRET*/ %s,\n", eew,
+ inttype (eew, LMUL1_LOG2, /* unsigned_p */false).c_str ());
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
: "INVALID");
}
+ for (unsigned eew : EEW_SIZE_LIST)
+ fprintf (fp, " /*SIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
+ eew);
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
for (unsigned boolsize : BOOL_SIZE_LIST)
fprintf (fp, " /*BOOL%d_INTERPRET*/ INVALID,\n", boolsize);
+ for (unsigned eew : EEW_SIZE_LIST)
+ fprintf (fp, " /*SIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew);
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, iu_v_bool16_interpret_ops)
DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, iu_v_bool32_interpret_ops)
DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, iu_v_bool64_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew8_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew16_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew32_lmul1_interpret_ops)
+DEF_RVV_FUNCTION (vreinterpret, misc, none_preds, b_v_signed_eew64_lmul1_interpret_ops)
DEF_RVV_FUNCTION (vlmul_ext, misc, none_preds, all_v_vlmul_ext_x2_ops)
DEF_RVV_FUNCTION (vlmul_ext, misc, none_preds, all_v_vlmul_ext_x4_ops)
DEF_RVV_FUNCTION (vlmul_ext, misc, none_preds, all_v_vlmul_ext_x8_ops)
#define DEF_RVV_BOOL64_INTERPRET_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS" macro include all types for
+ INT8M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS
+#define DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS" macro include all types for
+ INT16M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS
+#define DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS" macro include all types for
+ INT32M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS
+#define DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS" macro include all types for
+ INT64M1 vinterpret which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS
+#define DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(TYPE, REQUIRE)
+#endif
+
/* Use "DEF_RVV_X2_VLMUL_EXT_OPS" macro include all types for X2 VLMUL EXT
which will be iterated and registered as intrinsic functions. */
#ifndef DEF_RVV_X2_VLMUL_EXT_OPS
DEF_RVV_BOOL64_INTERPRET_OPS (vuint32m1_t, 0)
DEF_RVV_BOOL64_INTERPRET_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool1_t, 0)
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool2_t, 0)
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool4_t, 0)
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool8_t, 0)
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool16_t, 0)
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool32_t, 0)
+DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
+
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf4_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf2_t, 0)
#undef DEF_RVV_BOOL16_INTERPRET_OPS
#undef DEF_RVV_BOOL32_INTERPRET_OPS
#undef DEF_RVV_BOOL64_INTERPRET_OPS
+#undef DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS
+#undef DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS
#undef DEF_RVV_X2_VLMUL_EXT_OPS
#undef DEF_RVV_X4_VLMUL_EXT_OPS
#undef DEF_RVV_X8_VLMUL_EXT_OPS
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of vint8m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info signed_eew8_lmul1_interpret_ops[] = {
+#define DEF_RVV_SIGNED_EEW8_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of vint16m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info signed_eew16_lmul1_interpret_ops[] = {
+#define DEF_RVV_SIGNED_EEW16_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of vint32m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info signed_eew32_lmul1_interpret_ops[] = {
+#define DEF_RVV_SIGNED_EEW32_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of vint64m1 interpret will be registered for intrinsic functions. */
+static const rvv_type_info signed_eew64_lmul1_interpret_ops[] = {
+#define DEF_RVV_SIGNED_EEW64_LMUL1_INTERPRET_OPS(TYPE, REQUIRE) \
+ {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of x2 vlmul ext will be registered for intrinsic functions. */
static const rvv_type_info vlmul_ext_x2_ops[] = {
#define DEF_RVV_X2_VLMUL_EXT_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
rvv_arg_type_info (RVV_BASE_bool64_interpret), /* Return type */
v_args /* Args */};
+/* A static operand information for vint8_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_signed_eew8_lmul1_interpret_ops
+ = {signed_eew8_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_signed_eew8_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
+/* A static operand information for vint16_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_signed_eew16_lmul1_interpret_ops
+ = {signed_eew16_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_signed_eew16_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
+/* A static operand information for vint32_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_signed_eew32_lmul1_interpret_ops
+ = {signed_eew32_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_signed_eew32_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
+/* A static operand information for vint64_t func (vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info b_v_signed_eew64_lmul1_interpret_ops
+ = {signed_eew64_lmul1_interpret_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_signed_eew64_lmul1_interpret),/* Return type */
+ v_args /* Args */};
+
/* A static operand information for vector_type func (vector_type)
* function registration. */
static CONSTEXPR const rvv_op_info all_v_vlmul_ext_x2_ops
EEW8_INTERPRET, EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, \
BOOL1_INTERPRET, BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, \
BOOL16_INTERPRET, BOOL32_INTERPRET, BOOL64_INTERPRET, \
+ SIGNED_EEW8_LMUL1_INTERPRET, SIGNED_EEW16_LMUL1_INTERPRET, \
+ SIGNED_EEW32_LMUL1_INTERPRET, SIGNED_EEW64_LMUL1_INTERPRET, \
X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, X16_VLMUL_EXT, X32_VLMUL_EXT, \
X64_VLMUL_EXT, TUPLE_SUBPART) \
{ \
VECTOR_TYPE_##BOOL16_INTERPRET, \
VECTOR_TYPE_##BOOL32_INTERPRET, \
VECTOR_TYPE_##BOOL64_INTERPRET, \
+ VECTOR_TYPE_##SIGNED_EEW8_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##SIGNED_EEW16_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##SIGNED_EEW32_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##SIGNED_EEW64_LMUL1_INTERPRET, \
VECTOR_TYPE_##X2_VLMUL_EXT, \
VECTOR_TYPE_##X4_VLMUL_EXT, \
VECTOR_TYPE_##X8_VLMUL_EXT, \
case RVV_BASE_bool16_interpret:
case RVV_BASE_bool32_interpret:
case RVV_BASE_bool64_interpret:
+ case RVV_BASE_signed_eew8_lmul1_interpret:
+ case RVV_BASE_signed_eew16_lmul1_interpret:
+ case RVV_BASE_signed_eew32_lmul1_interpret:
+ case RVV_BASE_signed_eew64_lmul1_interpret:
case RVV_BASE_vlmul_ext_x2:
case RVV_BASE_vlmul_ext_x4:
case RVV_BASE_vlmul_ext_x8:
EEW8_INTERPRET, EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, \
BOOL1_INTERPRET, BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, \
BOOL16_INTERPRET, BOOL32_INTERPRET, BOOL64_INTERPRET, \
+ SIGNED_EEW8_LMUL1_INTERPRET, SIGNED_EEW16_LMUL1_INTERPRET, \
+ SIGNED_EEW32_LMUL1_INTERPRET, SIGNED_EEW64_LMUL1_INTERPRET, \
X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, X16_VLMUL_EXT, X32_VLMUL_EXT, \
X64_VLMUL_EXT, TUPLE_SUBPART)
#endif
DEF_RVV_BASE_TYPE (bool16_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (bool32_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (bool64_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (signed_eew8_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (signed_eew16_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (signed_eew32_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (signed_eew64_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x2, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x4, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x8, get_vector_type (type_idx))
return __riscv_vreinterpret_v_u8m1_b64 (src);
}
-/* { dg-final { scan-assembler-times {vlm\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 20 } } */
+vint8m1_t test_vreinterpret_v_b1_vint8m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_i8m1 (src);
+}
+
+vint16m1_t test_vreinterpret_v_b1_vint16m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_i16m1 (src);
+}
+
+vint32m1_t test_vreinterpret_v_b1_vint32m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_i32m1 (src);
+}
+
+vint64m1_t test_vreinterpret_v_b1_vint64m1 (vbool1_t src) {
+ return __riscv_vreinterpret_v_b1_i64m1 (src);
+}
+
+/* { dg-final { scan-assembler-times {vlm\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 24 } } */
/* { dg-final { scan-assembler-times {vsm\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 20 } } */
+/* { dg-final { scan-assembler-times {vs1r\.v\s+v[0-9]+,\s*0\([a-x][0-9]+\)} 4 } } */