int inner_size = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
if (size < TARGET_MIN_VLEN)
{
+ /* Follow rule LMUL >= SEW / ELEN. */
+ int elen = TARGET_VECTOR_ELEN_64 ? 1 : 2;
int factor = TARGET_MIN_VLEN / size;
if (inner_size == 8)
- factor = MIN (factor, 8);
+ factor = MIN (factor, 8 / elen);
else if (inner_size == 16)
- factor = MIN (factor, 4);
+ factor = MIN (factor, 4 / elen);
else if (inner_size == 32)
- factor = MIN (factor, 2);
+ factor = MIN (factor, 2 / elen);
else if (inner_size == 64)
factor = MIN (factor, 1);
else
#define DEF_RVV_XFQF_OPS(TYPE, REQUIRE)
#endif
-DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
DEF_RVV_I_OPS (vint8m1_t, 0)
DEF_RVV_I_OPS (vint8m2_t, 0)
DEF_RVV_I_OPS (vint8m4_t, 0)
DEF_RVV_I_OPS (vint8m8_t, 0)
-DEF_RVV_I_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint16mf2_t, 0)
DEF_RVV_I_OPS (vint16m1_t, 0)
DEF_RVV_I_OPS (vint16m2_t, 0)
DEF_RVV_I_OPS (vint16m4_t, 0)
DEF_RVV_I_OPS (vint16m8_t, 0)
-DEF_RVV_I_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint32m1_t, 0)
DEF_RVV_I_OPS (vint32m2_t, 0)
DEF_RVV_I_OPS (vint32m4_t, 0)
DEF_RVV_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_U_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_U_OPS (vuint8mf4_t, 0)
DEF_RVV_U_OPS (vuint8mf2_t, 0)
DEF_RVV_U_OPS (vuint8m1_t, 0)
DEF_RVV_U_OPS (vuint8m2_t, 0)
DEF_RVV_U_OPS (vuint8m4_t, 0)
DEF_RVV_U_OPS (vuint8m8_t, 0)
-DEF_RVV_U_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_U_OPS (vuint16mf2_t, 0)
DEF_RVV_U_OPS (vuint16m1_t, 0)
DEF_RVV_U_OPS (vuint16m2_t, 0)
DEF_RVV_U_OPS (vuint16m4_t, 0)
DEF_RVV_U_OPS (vuint16m8_t, 0)
-DEF_RVV_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_U_OPS (vuint32m1_t, 0)
DEF_RVV_U_OPS (vuint32m2_t, 0)
DEF_RVV_U_OPS (vuint32m4_t, 0)
DEF_RVV_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_F_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_F_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_F_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_F_OPS (vbfloat16m2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_F_OPS (vbfloat16m4_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_F_OPS (vbfloat16m8_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_F_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_F_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_F_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_F_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_F_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_F_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_F_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_F_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_F_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_F_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_F_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_B_OPS (vbool64_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_B_OPS (vbool64_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_B_OPS (vbool32_t, 0)
DEF_RVV_B_OPS (vbool16_t, 0)
DEF_RVV_B_OPS (vbool8_t, 0)
DEF_RVV_B_OPS (vbool2_t, 0)
DEF_RVV_B_OPS (vbool1_t, 0)
-DEF_RVV_WEXTI_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTI_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTI_OPS (vint16mf2_t, 0)
DEF_RVV_WEXTI_OPS (vint16m1_t, 0)
DEF_RVV_WEXTI_OPS (vint16m2_t, 0)
DEF_RVV_WEXTI_OPS (vint16m4_t, 0)
DEF_RVV_WEXTI_OPS (vint16m8_t, 0)
-DEF_RVV_WEXTI_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTI_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTI_OPS (vint32m1_t, 0)
DEF_RVV_WEXTI_OPS (vint32m2_t, 0)
DEF_RVV_WEXTI_OPS (vint32m4_t, 0)
DEF_RVV_WEXTI_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTI_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_QEXTI_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_QEXTI_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_QEXTI_OPS (vint32m1_t, 0)
DEF_RVV_QEXTI_OPS (vint32m2_t, 0)
DEF_RVV_QEXTI_OPS (vint32m4_t, 0)
DEF_RVV_OEXTI_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_OEXTI_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_WEXTU_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTU_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTU_OPS (vuint16mf2_t, 0)
DEF_RVV_WEXTU_OPS (vuint16m1_t, 0)
DEF_RVV_WEXTU_OPS (vuint16m2_t, 0)
DEF_RVV_WEXTU_OPS (vuint16m4_t, 0)
DEF_RVV_WEXTU_OPS (vuint16m8_t, 0)
-DEF_RVV_WEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTU_OPS (vuint32m1_t, 0)
DEF_RVV_WEXTU_OPS (vuint32m2_t, 0)
DEF_RVV_WEXTU_OPS (vuint32m4_t, 0)
DEF_RVV_WEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_QEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_QEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_QEXTU_OPS (vuint32m1_t, 0)
DEF_RVV_QEXTU_OPS (vuint32m2_t, 0)
DEF_RVV_QEXTU_OPS (vuint32m4_t, 0)
DEF_RVV_OEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_OEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_FULL_V_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_FULL_V_I_OPS (vint8mf4_t, 0)
DEF_RVV_FULL_V_I_OPS (vint8mf2_t, 0)
DEF_RVV_FULL_V_I_OPS (vint8m1_t, 0)
DEF_RVV_FULL_V_I_OPS (vint8m2_t, 0)
DEF_RVV_FULL_V_I_OPS (vint8m4_t, 0)
DEF_RVV_FULL_V_I_OPS (vint8m8_t, 0)
-DEF_RVV_FULL_V_I_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_FULL_V_I_OPS (vint16mf2_t, 0)
DEF_RVV_FULL_V_I_OPS (vint16m1_t, 0)
DEF_RVV_FULL_V_I_OPS (vint16m2_t, 0)
DEF_RVV_FULL_V_I_OPS (vint16m4_t, 0)
DEF_RVV_FULL_V_I_OPS (vint16m8_t, 0)
-DEF_RVV_FULL_V_I_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_FULL_V_I_OPS (vint32m1_t, 0)
DEF_RVV_FULL_V_I_OPS (vint32m2_t, 0)
DEF_RVV_FULL_V_I_OPS (vint32m4_t, 0)
DEF_RVV_FULL_V_I_OPS (vint64m4_t, RVV_REQUIRE_FULL_V)
DEF_RVV_FULL_V_I_OPS (vint64m8_t, RVV_REQUIRE_FULL_V)
-DEF_RVV_FULL_V_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_U_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_FULL_V_U_OPS (vuint8mf4_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint8mf2_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint8m1_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint8m2_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint8m4_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint8m8_t, 0)
-DEF_RVV_FULL_V_U_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_FULL_V_U_OPS (vuint16mf2_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint16m1_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint16m2_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint16m4_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint16m8_t, 0)
-DEF_RVV_FULL_V_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_FULL_V_U_OPS (vuint32m1_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint32m2_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint32m4_t, 0)
DEF_RVV_FULL_V_U_OPS (vuint64m4_t, RVV_REQUIRE_FULL_V)
DEF_RVV_FULL_V_U_OPS (vuint64m8_t, RVV_REQUIRE_FULL_V)
-DEF_RVV_WEXTF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WEXTF_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WEXTF_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WEXTF_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WEXTF_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_WEXTF_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_CONVERT_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_CONVERT_I_OPS (vint16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_I_OPS (vint16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_I_OPS (vint16m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_I_OPS (vint16m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_I_OPS (vint16m8_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_CONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CONVERT_I_OPS (vint32m1_t, 0)
DEF_RVV_CONVERT_I_OPS (vint32m2_t, 0)
DEF_RVV_CONVERT_I_OPS (vint32m4_t, 0)
DEF_RVV_CONVERT_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CONVERT_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_CONVERT_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_CONVERT_U_OPS (vuint16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_U_OPS (vuint16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_U_OPS (vuint16m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_U_OPS (vuint16m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_CONVERT_U_OPS (vuint16m8_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_CONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CONVERT_U_OPS (vuint32m1_t, 0)
DEF_RVV_CONVERT_U_OPS (vuint32m2_t, 0)
DEF_RVV_CONVERT_U_OPS (vuint32m4_t, 0)
DEF_RVV_CONVERT_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CONVERT_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_WCONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WCONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WCONVERT_I_OPS (vint32m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WCONVERT_I_OPS (vint32m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WCONVERT_I_OPS (vint32m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WCONVERT_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WCONVERT_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
-DEF_RVV_WCONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WCONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WCONVERT_U_OPS (vuint32m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WCONVERT_U_OPS (vuint32m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WCONVERT_U_OPS (vuint32m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WCONVERT_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WCONVERT_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
-DEF_RVV_WCONVERT_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WCONVERT_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WCONVERT_F_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_WCONVERT_F_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_WCONVERT_F_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_WCONVERT_F_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_WCONVERT_F_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_F32_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F32_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_F32_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_F32_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_F32_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_F32_OPS (vfloat32m8_t, RVV_REQUIRE_ELEN_FP_32)
-DEF_RVV_WI_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WI_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WI_OPS (vint8mf4_t, 0)
DEF_RVV_WI_OPS (vint8mf2_t, 0)
DEF_RVV_WI_OPS (vint8m1_t, 0)
DEF_RVV_WI_OPS (vint8m2_t, 0)
DEF_RVV_WI_OPS (vint8m4_t, 0)
DEF_RVV_WI_OPS (vint8m8_t, 0)
-DEF_RVV_WI_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WI_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WI_OPS (vint16mf2_t, 0)
DEF_RVV_WI_OPS (vint16m1_t, 0)
DEF_RVV_WI_OPS (vint16m2_t, 0)
DEF_RVV_WI_OPS (vint16m4_t, 0)
DEF_RVV_WI_OPS (vint16m8_t, 0)
-DEF_RVV_WI_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WI_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WI_OPS (vint32m1_t, 0)
DEF_RVV_WI_OPS (vint32m2_t, 0)
DEF_RVV_WI_OPS (vint32m4_t, 0)
DEF_RVV_WI_OPS (vint32m8_t, 0)
-DEF_RVV_WU_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WU_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WU_OPS (vuint8mf4_t, 0)
DEF_RVV_WU_OPS (vuint8mf2_t, 0)
DEF_RVV_WU_OPS (vuint8m1_t, 0)
DEF_RVV_WU_OPS (vuint8m2_t, 0)
DEF_RVV_WU_OPS (vuint8m4_t, 0)
DEF_RVV_WU_OPS (vuint8m8_t, 0)
-DEF_RVV_WU_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WU_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WU_OPS (vuint16mf2_t, 0)
DEF_RVV_WU_OPS (vuint16m1_t, 0)
DEF_RVV_WU_OPS (vuint16m2_t, 0)
DEF_RVV_WU_OPS (vuint16m4_t, 0)
DEF_RVV_WU_OPS (vuint16m8_t, 0)
-DEF_RVV_WU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_WU_OPS (vuint32m1_t, 0)
DEF_RVV_WU_OPS (vuint32m2_t, 0)
DEF_RVV_WU_OPS (vuint32m4_t, 0)
DEF_RVV_WU_OPS (vuint32m8_t, 0)
-DEF_RVV_WF_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WF_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WF_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WF_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WF_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WF_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_WF_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_WF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_WF_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_WF_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_WF_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_WF_OPS (vfloat32m8_t, RVV_REQUIRE_ELEN_FP_32)
-DEF_RVV_EI16_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vint8mf4_t, 0)
DEF_RVV_EI16_OPS (vint8mf2_t, 0)
DEF_RVV_EI16_OPS (vint8m1_t, 0)
DEF_RVV_EI16_OPS (vint8m2_t, 0)
DEF_RVV_EI16_OPS (vint8m4_t, 0)
-DEF_RVV_EI16_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vint16mf2_t, 0)
DEF_RVV_EI16_OPS (vint16m1_t, 0)
DEF_RVV_EI16_OPS (vint16m2_t, 0)
DEF_RVV_EI16_OPS (vint16m4_t, 0)
DEF_RVV_EI16_OPS (vint16m8_t, 0)
-DEF_RVV_EI16_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vint32m1_t, 0)
DEF_RVV_EI16_OPS (vint32m2_t, 0)
DEF_RVV_EI16_OPS (vint32m4_t, 0)
DEF_RVV_EI16_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_EI16_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vuint8mf4_t, 0)
DEF_RVV_EI16_OPS (vuint8mf2_t, 0)
DEF_RVV_EI16_OPS (vuint8m1_t, 0)
DEF_RVV_EI16_OPS (vuint8m2_t, 0)
DEF_RVV_EI16_OPS (vuint8m4_t, 0)
-DEF_RVV_EI16_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vuint16mf2_t, 0)
DEF_RVV_EI16_OPS (vuint16m1_t, 0)
DEF_RVV_EI16_OPS (vuint16m2_t, 0)
DEF_RVV_EI16_OPS (vuint16m4_t, 0)
DEF_RVV_EI16_OPS (vuint16m8_t, 0)
-DEF_RVV_EI16_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vuint32m1_t, 0)
DEF_RVV_EI16_OPS (vuint32m2_t, 0)
DEF_RVV_EI16_OPS (vuint32m4_t, 0)
DEF_RVV_EI16_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_EI16_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_EI16_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_EI16_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_EI16_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_EI16_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_EI16_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_EI16_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_EI16_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_EI16_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_EI16_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_EI16_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_EEW8_INTERPRET_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW8_INTERPRET_OPS (vint16mf2_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint16m1_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint16m2_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint16m4_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint16m8_t, 0)
-DEF_RVV_EEW8_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW8_INTERPRET_OPS (vint32m1_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint32m2_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint32m4_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW8_INTERPRET_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW8_INTERPRET_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_EEW8_INTERPRET_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW8_INTERPRET_OPS (vuint16mf2_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vuint16m1_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vuint16m2_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vuint16m4_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vuint16m8_t, 0)
-DEF_RVV_EEW8_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW8_INTERPRET_OPS (vuint32m1_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vuint32m2_t, 0)
DEF_RVV_EEW8_INTERPRET_OPS (vuint32m4_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vint8m2_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vint8m4_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vint8m8_t, 0)
-DEF_RVV_EEW16_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW16_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW16_INTERPRET_OPS (vint32m1_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vint32m2_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vint32m4_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vuint8m2_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vuint8m4_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vuint8m8_t, 0)
-DEF_RVV_EEW16_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW16_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_EEW16_INTERPRET_OPS (vuint32m1_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vuint32m2_t, 0)
DEF_RVV_EEW16_INTERPRET_OPS (vuint32m4_t, 0)
DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool32_t, 0)
DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf4_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8m1_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8m2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint8m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint16mf2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint16m1_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint16m2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint16m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint32m1_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint32m2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint32m4_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf4_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint8m1_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint8m2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint8m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint16mf2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint16m1_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint16m2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint16m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint32m1_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint32m2_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint32m4_t, 0)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16m2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16m4_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_X2_VLMUL_EXT_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf4_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf2_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint8m1_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint8m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vint16mf2_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint16m1_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint16m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vint32m1_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint32m2_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf4_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf2_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint8m1_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint8m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint16mf2_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint16m1_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint16m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint32m1_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint32m2_t, 0)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16m2_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat64m1_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_X4_VLMUL_EXT_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf4_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf2_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vint8m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vint16mf2_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vint16m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vint32m1_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf4_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf2_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint8m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint16mf2_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint16m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint32m1_t, 0)
DEF_RVV_X8_VLMUL_EXT_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X8_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X8_VLMUL_EXT_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_X8_VLMUL_EXT_OPS (vfloat64m1_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf4_t, 0)
DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X16_VLMUL_EXT_OPS (vint16mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf4_t, 0)
DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X16_VLMUL_EXT_OPS (vuint16mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X16_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X16_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X16_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_X16_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X16_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X32_VLMUL_EXT_OPS (vint8mf4_t, 0)
-DEF_RVV_X32_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_X32_VLMUL_EXT_OPS (vuint8mf4_t, 0)
-DEF_RVV_X32_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
-DEF_RVV_X64_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X64_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X64_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X64_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_LMUL1_OPS (vint8m1_t, 0)
DEF_RVV_LMUL1_OPS (vint16m1_t, 0)
DEF_RVV_LMUL4_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_LMUL4_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vint8mf4x2_t, 0)
DEF_RVV_TUPLE_OPS (vuint8mf4x2_t, 0)
DEF_RVV_TUPLE_OPS (vint8mf4x3_t, 0)
DEF_RVV_TUPLE_OPS (vuint8m2x4_t, 0)
DEF_RVV_TUPLE_OPS (vint8m4x2_t, 0)
DEF_RVV_TUPLE_OPS (vuint8m4x2_t, 0)
-DEF_RVV_TUPLE_OPS (vint16mf4x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vint16mf2x2_t, 0)
DEF_RVV_TUPLE_OPS (vuint16mf2x2_t, 0)
DEF_RVV_TUPLE_OPS (vint16mf2x3_t, 0)
DEF_RVV_TUPLE_OPS (vuint16m2x4_t, 0)
DEF_RVV_TUPLE_OPS (vint16m4x2_t, 0)
DEF_RVV_TUPLE_OPS (vuint16m4x2_t, 0)
-DEF_RVV_TUPLE_OPS (vint32mf2x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vint32m1x2_t, 0)
DEF_RVV_TUPLE_OPS (vuint32m1x2_t, 0)
DEF_RVV_TUPLE_OPS (vint32m1x3_t, 0)
DEF_RVV_TUPLE_OPS (vuint64m2x4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vint64m4x2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vuint64m4x2_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x2_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x3_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x5_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x6_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x7_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x8_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x2_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x3_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x5_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x6_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x7_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x8_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vbfloat16mf2x2_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_TUPLE_OPS (vbfloat16mf2x3_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_TUPLE_OPS (vbfloat16mf2x4_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_TUPLE_OPS (vbfloat16m2x3_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_TUPLE_OPS (vbfloat16m2x4_t, RVV_REQUIRE_ELEN_BF_16)
DEF_RVV_TUPLE_OPS (vbfloat16m4x2_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x3_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x5_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x6_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x7_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x8_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x3_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x5_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x6_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x7_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x8_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vfloat16mf2x2_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_TUPLE_OPS (vfloat16mf2x3_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_TUPLE_OPS (vfloat16mf2x4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_TUPLE_OPS (vfloat16m2x3_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_TUPLE_OPS (vfloat16m2x4_t, RVV_REQUIRE_ELEN_FP_16)
DEF_RVV_TUPLE_OPS (vfloat16m4x2_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x3_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x4_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x5_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x6_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x7_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x8_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x3_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x4_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x5_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x6_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x7_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x8_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
DEF_RVV_TUPLE_OPS (vfloat32m1x2_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_TUPLE_OPS (vfloat32m1x3_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_TUPLE_OPS (vfloat32m1x4_t, RVV_REQUIRE_ELEN_FP_32)
DEF_RVV_TUPLE_OPS (vfloat64m2x4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
-DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m1_t, 0)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m2_t, 0)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m4_t, 0)
|BI |RVVM1BI|RVVMF2BI|RVVMF4BI|RVVMF8BI|RVVMF16BI|RVVMF32BI|RVVMF64BI| */
/* Return 'REQUIREMENT' for machine_mode 'MODE'.
- For example: 'MODE' = RVVMF64BImode needs TARGET_MIN_VLEN > 32. */
+ For example: 'MODE' = RVVMF64BImode needs TARGET_VECTOR_ELEN_64. */
#ifndef ENTRY
#define ENTRY(MODE, REQUIREMENT, VLMUL, RATIO)
#endif
/* Disable modes if TARGET_MIN_VLEN == 32. */
-ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 64)
+ENTRY (RVVMF64BI, TARGET_VECTOR_ELEN_64, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 64)
ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
ENTRY (RVVMF8BI, true, LMUL_1, 8)
ENTRY (RVVM1QI, true, LMUL_1, 8)
ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
-ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
+ENTRY (RVVMF8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8HI, true, LMUL_8, 2)
ENTRY (RVVM2HI, true, LMUL_2, 8)
ENTRY (RVVM1HI, true, LMUL_1, 16)
ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
-ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
+ENTRY (RVVMF4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_BF_16. */
ENTRY (RVVM8BF, TARGET_VECTOR_ELEN_BF_16, LMUL_8, 2)
ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 32)
-ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
+ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8SI, true, LMUL_8, 4)
ENTRY (RVVM4SI, true, LMUL_4, 8)
ENTRY (RVVM2SI, true, LMUL_2, 16)
ENTRY (RVVM1SI, true, LMUL_1, 32)
-ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
+ENTRY (RVVMF2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32. */
ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
-ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
+ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
/* Disable modes if !TARGET_VECTOR_ELEN_64. */
ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 8, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 8, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 7, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x7QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 7, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 6, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x6QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 6, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 5, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x5QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 5, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 4, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x4QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 4, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 3, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x3QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 3, LMUL_F8, 64)
TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 2, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x2QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 2, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x8HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x7HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x6HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x5HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x3HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x2HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8BF, TARGET_VECTOR_ELEN_BF_16, RVVM1BF, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x8BF, TARGET_VECTOR_ELEN_BF_16, RVVMF2BF, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, RVVMF2SI, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, RVVMF2SI, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)
;; Subset of VI with fractional LMUL types
(define_mode_iterator VI_FRAC [
- RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
- RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
+ RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2SI "TARGET_VECTOR_ELEN_64")
])
;; Subset of VI with non-fractional LMUL types
(define_mode_iterator VF [
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
(RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
(RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
(RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VF_ZVFHMIN [
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
])
(define_mode_iterator VEEWEXT2 [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
(RVVM8BF "TARGET_VECTOR_ELEN_BF_16") (RVVM4BF "TARGET_VECTOR_ELEN_BF_16") (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
(RVVM1BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VEEWEXT4 [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VEEWTRUNC2 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
(RVVM4BF "TARGET_VECTOR_ELEN_BF_16") (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
(RVVM1BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
(RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
(RVVM4SI "TARGET_64BIT")
(RVVM2SI "TARGET_64BIT")
(RVVM1SI "TARGET_64BIT")
- (RVVMF2SI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2SI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator VEEWTRUNC4 [
- RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
(RVVM2HI "TARGET_64BIT")
(RVVM1HI "TARGET_64BIT")
(RVVMF2HI "TARGET_64BIT")
- (RVVMF4HI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF4HI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
(RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
(RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
(RVVMF2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator VEEWTRUNC8 [
(RVVM1QI "TARGET_64BIT")
(RVVMF2QI "TARGET_64BIT")
(RVVMF4QI "TARGET_64BIT")
- (RVVMF8QI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+ (RVVMF8QI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
(define_mode_iterator VEI16 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
(RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
(RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VFULLI [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_FULL_V") (RVVM4DI "TARGET_FULL_V") (RVVM2DI "TARGET_FULL_V") (RVVM1DI "TARGET_FULL_V")
])
(define_mode_iterator VI_QH [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VI_QHS [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
])
(define_mode_iterator VI_QHS_NO_M8 [
- RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
(define_mode_iterator VF_HS [
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
(RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
(V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
(RVVM2HF "TARGET_ZVFH")
(RVVM1HF "TARGET_ZVFH")
(RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
(V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
])
(define_mode_iterator V_VLSI_QHS [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
(V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
;; E.g. when index mode = RVVM8QImode and Pmode = SImode, if it is not zero_extend or
;; scalar != 1, such gather/scatter is not allowed since we don't have RVVM32SImode.
(define_mode_iterator RATIO64 [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM1DI "TARGET_VECTOR_ELEN_64")
- (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
- (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
+ (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
])
])
(define_mode_iterator RATIO64I [
- (RVVMF8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4HI "TARGET_MIN_VLEN > 32")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
])
])
(define_mode_iterator V_FRACT [
- RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
- (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "TARGET_VECTOR_ELEN_64")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VWEXTI [
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM4SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
])
(define_mode_iterator VQEXTI [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
(RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator V1T [
- (RVVMF8x2QI "TARGET_MIN_VLEN > 32")
- (RVVMF8x3QI "TARGET_MIN_VLEN > 32")
- (RVVMF8x4QI "TARGET_MIN_VLEN > 32")
- (RVVMF8x5QI "TARGET_MIN_VLEN > 32")
- (RVVMF8x6QI "TARGET_MIN_VLEN > 32")
- (RVVMF8x7QI "TARGET_MIN_VLEN > 32")
- (RVVMF8x8QI "TARGET_MIN_VLEN > 32")
- (RVVMF4x2HI "TARGET_MIN_VLEN > 32")
- (RVVMF4x3HI "TARGET_MIN_VLEN > 32")
- (RVVMF4x4HI "TARGET_MIN_VLEN > 32")
- (RVVMF4x5HI "TARGET_MIN_VLEN > 32")
- (RVVMF4x6HI "TARGET_MIN_VLEN > 32")
- (RVVMF4x7HI "TARGET_MIN_VLEN > 32")
- (RVVMF4x8HI "TARGET_MIN_VLEN > 32")
- (RVVMF2x2SI "TARGET_MIN_VLEN > 32")
- (RVVMF2x3SI "TARGET_MIN_VLEN > 32")
- (RVVMF2x4SI "TARGET_MIN_VLEN > 32")
- (RVVMF2x5SI "TARGET_MIN_VLEN > 32")
- (RVVMF2x6SI "TARGET_MIN_VLEN > 32")
- (RVVMF2x7SI "TARGET_MIN_VLEN > 32")
- (RVVMF2x8SI "TARGET_MIN_VLEN > 32")
+ (RVVMF8x2QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF8x3QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF8x4QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF8x5QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF8x6QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF8x7QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF8x8QI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x2HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x3HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x4HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x5HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x6HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x7HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF4x8HI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x2SI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x3SI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x4SI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x5SI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x6SI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x7SI "TARGET_VECTOR_ELEN_64")
+ (RVVMF2x8SI "TARGET_VECTOR_ELEN_64")
(RVVM1x2DI "TARGET_VECTOR_ELEN_64")
(RVVM1x3DI "TARGET_VECTOR_ELEN_64")
(RVVM1x4DI "TARGET_VECTOR_ELEN_64")
(RVVM1x6DI "TARGET_VECTOR_ELEN_64")
(RVVM1x7DI "TARGET_VECTOR_ELEN_64")
(RVVM1x8DI "TARGET_VECTOR_ELEN_64")
- (RVVMF4x2BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x3BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x4BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x5BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x6BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x7BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x8BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4x2HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4x3HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4x4HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4x5HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4x6HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4x7HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF4x8HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
- (RVVMF2x2SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2x3SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2x4SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2x5SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2x6SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2x7SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2x8SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF4x2BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x3BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x4BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x5BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x6BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x7BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x8BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+ (RVVMF4x2HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4x3HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4x4HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4x5HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4x6HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4x7HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF4x8HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+ (RVVMF2x2SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2x3SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2x4SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2x5SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2x6SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2x7SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+ (RVVMF2x8SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
(RVVM1x2DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM1x3DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM1x4DF "TARGET_VECTOR_ELEN_FP_64")
(V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096")])
(define_mode_iterator VB [
- (RVVMF64BI "TARGET_MIN_VLEN > 32") RVVMF32BI RVVMF16BI RVVMF8BI RVVMF4BI RVVMF2BI RVVM1BI
+ (RVVMF64BI "TARGET_VECTOR_ELEN_64") RVVMF32BI RVVMF16BI RVVMF8BI RVVMF4BI RVVMF2BI RVVM1BI
])
;; Iterator for indexed loads and stores. We must disallow 64-bit indices on
;; VINDEXED [VI8 VI16 VI32 (VI64 "TARGET_64BIT")].
(define_mode_iterator VINDEXED [
- RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
- RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
(RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
(RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
(RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
(RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
- (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+ (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
(RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
(RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
(RVVM4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
(define_mode_iterator V_VLS_F_CONVERT_SI [
(RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH")
- (RVVMF2HF "TARGET_ZVFH") (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF2HF "TARGET_ZVFH") (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
(RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(define_mode_iterator V_VLS_F_CONVERT_DI [
(RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
- (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+ (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
(RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
(RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
- (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+ (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
(RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
(RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
(V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096")])
(define_mode_iterator VSI [
- RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VLMULX2_SI [
- RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VLMULX4_SI [
- RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VLMULX8_SI [
- RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
])
(define_mode_iterator VLMULX16_SI [
- (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVMF2SI "TARGET_VECTOR_ELEN_64")
])
(define_mode_attr VSIX2 [
])
(define_mode_iterator SF_XF [
- RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
])
#include "pr111391-1.c"
-/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*2,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 } }
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*2,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 } } */
/* { dg-final { scan-assembler-times {vmv\.x\.s} 2 } } */
/* { dg-final { scan-assembler-times {vslidedown.vi\s+v[0-9]+,\s*v[0-9]+,\s*1} 1 } } */
/* { dg-final { scan-assembler-times {slli\s+[a-x0-9]+,[a-x0-9]+,32} 1 } } */
/* { dg-do compile } */
/* { dg-options "-O3 -march=rv32gc_zve32x_zvl64b -mabi=ilp32d" } */
-void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
-void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
-void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
-void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
-void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
-void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;}
-void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
-void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
-void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
-void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
-void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
-void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
-void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
-void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
+void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x2_t'} } */
+void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x2_t'} } */
+void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x3_t'} } */
+void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x3_t'} } */
+void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x4_t'} } */
+void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x4_t'} } */
+void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x5_t'} } */
+void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x5_t'} } */
+void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x6_t'} } */
+void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x6_t'} } */
+void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x7_t'} } */
+void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x7_t'} } */
+void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x8_t'} } */
+void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x8_t'} } */
void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
-void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
-void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
-void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
-void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
-void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
-void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
-void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
-void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
-void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
-void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
-void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
-void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
-void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
-void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
+void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x2_t'} } */
+void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x2_t'} } */
+void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x3_t'} } */
+void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x3_t'} } */
+void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x4_t'} } */
+void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x4_t'} } */
+void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x5_t'} } */
+void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x5_t'} } */
+void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x6_t'} } */
+void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x6_t'} } */
+void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x7_t'} } */
+void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x7_t'} } */
+void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x8_t'} } */
+void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x8_t'} } */
void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
-void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
-void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
-void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
-void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
-void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
-void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
-void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
-void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
-void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
-void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
-void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
-void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
-void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
-void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
+void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x2_t'} } */
+void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x2_t'} } */
+void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x3_t'} } */
+void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x3_t'} } */
+void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x4_t'} } */
+void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x4_t'} } */
+void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x5_t'} } */
+void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x5_t'} } */
+void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x6_t'} } */
+void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x6_t'} } */
+void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x7_t'} } */
+void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x7_t'} } */
+void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x8_t'} } */
+void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x8_t'} } */
void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
/* { dg-do compile } */
/* { dg-options "-O3 -march=rv32gc_zve32f_zvl64b -mabi=ilp32d" } */
-void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
-void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
-void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
-void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
-void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
-void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;}
-void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
-void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
-void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
-void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
-void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
-void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
-void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
-void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
+void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x2_t'} } */
+void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x2_t'} } */
+void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x3_t'} } */
+void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x3_t'} } */
+void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x4_t'} } */
+void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x4_t'} } */
+void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x5_t'} } */
+void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x5_t'} } */
+void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x6_t'} } */
+void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x6_t'} } */
+void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x7_t'} } */
+void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x7_t'} } */
+void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x8_t'} } */
+void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x8_t'} } */
void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
-void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
-void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
-void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
-void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
-void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
-void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
-void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
-void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
-void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
-void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
-void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
-void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
-void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
-void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
+void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x2_t'} } */
+void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x2_t'} } */
+void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x3_t'} } */
+void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x3_t'} } */
+void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x4_t'} } */
+void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x4_t'} } */
+void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x5_t'} } */
+void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x5_t'} } */
+void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x6_t'} } */
+void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x6_t'} } */
+void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x7_t'} } */
+void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x7_t'} } */
+void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x8_t'} } */
+void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x8_t'} } */
void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
-void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
-void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
-void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
-void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
-void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
-void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
-void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
-void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
-void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
-void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
-void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
-void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
-void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
-void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
+void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x2_t'} } */
+void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x2_t'} } */
+void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x3_t'} } */
+void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x3_t'} } */
+void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x4_t'} } */
+void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x4_t'} } */
+void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x5_t'} } */
+void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x5_t'} } */
+void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x6_t'} } */
+void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x6_t'} } */
+void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x7_t'} } */
+void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x7_t'} } */
+void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x8_t'} } */
+void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x8_t'} } */
void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
void f___rvv_float16m2_t () {__rvv_float16m2_t t;} /* { dg-error {unknown type name '__rvv_float16m2_t'} } */
void f___rvv_float16m4_t () {__rvv_float16m4_t t;} /* { dg-error {unknown type name '__rvv_float16m4_t'} } */
void f___rvv_float16m8_t () {__rvv_float16m8_t t;} /* { dg-error {unknown type name '__rvv_float16m8_t'} } */
-void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;}
-void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;}
-void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;}
-void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;}
-void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;}
-void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;}
-void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;}
+void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x2_t'} } */
+void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x3_t'} } */
+void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x4_t'} } */
+void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x5_t'} } */
+void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x6_t'} } */
+void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x7_t'} } */
+void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x8_t'} } */
void f___rvv_float32m1x2_t () {__rvv_float32m1x2_t t;}
void f___rvv_float32m1x3_t () {__rvv_float32m1x3_t t;}
void f___rvv_float32m1x4_t () {__rvv_float32m1x4_t t;}
/* { dg-do compile } */
/* { dg-options "-O3 -march=rv32gc_zve32x_zvl64b_zvfhmin -mabi=ilp32d" } */
-void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
-void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
-void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
-void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
-void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
-void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;}
-void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
-void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
-void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
-void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
-void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
-void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
-void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
-void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
+void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x2_t'} } */
+void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x2_t'} } */
+void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x3_t'} } */
+void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x3_t'} } */
+void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x4_t'} } */
+void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x4_t'} } */
+void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x5_t'} } */
+void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x5_t'} } */
+void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x6_t'} } */
+void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x6_t'} } */
+void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x7_t'} } */
+void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x7_t'} } */
+void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown type name '__rvv_int8mf8x8_t'} } */
+void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown type name '__rvv_uint8mf8x8_t'} } */
void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
-void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
-void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
-void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
-void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
-void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
-void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
-void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
-void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
-void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
-void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
-void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
-void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
-void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
-void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
+void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x2_t'} } */
+void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x2_t'} } */
+void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x3_t'} } */
+void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x3_t'} } */
+void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x4_t'} } */
+void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x4_t'} } */
+void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x5_t'} } */
+void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x5_t'} } */
+void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x6_t'} } */
+void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x6_t'} } */
+void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x7_t'} } */
+void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x7_t'} } */
+void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_int16mf4x8_t'} } */
+void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_uint16mf4x8_t'} } */
void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
-void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
-void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
-void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
-void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
-void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
-void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
-void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
-void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
-void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
-void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
-void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
-void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
-void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
-void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
+void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x2_t'} } */
+void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x2_t'} } */
+void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x3_t'} } */
+void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x3_t'} } */
+void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x4_t'} } */
+void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x4_t'} } */
+void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x5_t'} } */
+void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x5_t'} } */
+void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x6_t'} } */
+void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x6_t'} } */
+void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x7_t'} } */
+void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x7_t'} } */
+void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_int32mf2x8_t'} } */
+void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_uint32mf2x8_t'} } */
void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
void f___rvv_uint64m2x4_t () {__rvv_uint64m2x4_t t;} /* { dg-error {unknown type name '__rvv_uint64m2x4_t'} } */
void f___rvv_int64m4x2_t () {__rvv_int64m4x2_t t;} /* { dg-error {unknown type name '__rvv_int64m4x2_t'} } */
void f___rvv_uint64m4x2_t () {__rvv_uint64m4x2_t t;} /* { dg-error {unknown type name '__rvv_uint64m4x2_t'} } */
-void f___rvv_float16mf4x2_t () {__rvv_float16mf4x2_t t;}
-void f___rvv_float16mf4x3_t () {__rvv_float16mf4x3_t t;}
-void f___rvv_float16mf4x4_t () {__rvv_float16mf4x4_t t;}
-void f___rvv_float16mf4x5_t () {__rvv_float16mf4x5_t t;}
-void f___rvv_float16mf4x6_t () {__rvv_float16mf4x6_t t;}
-void f___rvv_float16mf4x7_t () {__rvv_float16mf4x7_t t;}
-void f___rvv_float16mf4x8_t () {__rvv_float16mf4x8_t t;}
+void f___rvv_float16mf4x2_t () {__rvv_float16mf4x2_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x2_t'} } */
+void f___rvv_float16mf4x3_t () {__rvv_float16mf4x3_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x3_t'} } */
+void f___rvv_float16mf4x4_t () {__rvv_float16mf4x4_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x4_t'} } */
+void f___rvv_float16mf4x5_t () {__rvv_float16mf4x5_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x5_t'} } */
+void f___rvv_float16mf4x6_t () {__rvv_float16mf4x6_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x6_t'} } */
+void f___rvv_float16mf4x7_t () {__rvv_float16mf4x7_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x7_t'} } */
+void f___rvv_float16mf4x8_t () {__rvv_float16mf4x8_t t;} /* { dg-error {unknown type name '__rvv_float16mf4x8_t'} } */
void f___rvv_float16mf2x2_t () {__rvv_float16mf2x2_t t;}
void f___rvv_float16mf2x3_t () {__rvv_float16mf2x3_t t;}
void f___rvv_float16mf2x4_t () {__rvv_float16mf2x4_t t;}
void f___rvv_float16m2x3_t () {__rvv_float16m2x3_t t;}
void f___rvv_float16m2x4_t () {__rvv_float16m2x4_t t;}
void f___rvv_float16m4x2_t () {__rvv_float16m4x2_t t;}
-void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;}
-void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;}
-void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;}
-void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;}
-void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;}
-void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;}
-void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;}
+void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x2_t'} } */
+void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x3_t'} } */
+void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x4_t'} } */
+void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x5_t'} } */
+void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x6_t'} } */
+void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x7_t'} } */
+void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;} /* { dg-error {unknown type name '__rvv_float32mf2x8_t'} } */
void f___rvv_float32m1x2_t () {__rvv_float32m1x2_t t;}
void f___rvv_float32m1x3_t () {__rvv_float32m1x3_t t;}
void f___rvv_float32m1x4_t () {__rvv_float32m1x4_t t;}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32imafc_zve32f_zvl128b -mabi=ilp32 -O2" } */
+
+struct S0
+{
+ unsigned a : 15;
+ int b;
+ int c;
+};
+
+struct S1
+{
+ struct S0 s0;
+ int e;
+};
+
+struct Z
+{
+ char c;
+ int z;
+} __attribute__((packed));
+
+union U
+{
+ struct S1 s1;
+ struct Z z;
+};
+
+int __attribute__((noinline, noclone))
+return_zero (void)
+{
+ return 0;
+}
+
+volatile union U gu;
+struct S0 gs;
+
+int __attribute__((noinline, noclone))
+check_outcome ()
+{
+ if (gs.a != 6
+ || gs.b != 80000)
+ __builtin_abort ();
+}
+
+int
+main (int argc, char *argv[])
+{
+ union U u;
+ struct S1 m;
+ struct S0 l;
+
+ if (return_zero ())
+ u.z.z = 20000;
+ else
+ {
+ u.s1.s0.a = 6;
+ u.s1.s0.b = 80000;
+ u.s1.e = 2;
+
+ m = u.s1;
+ m.s0.c = 0;
+ l = m.s0;
+ gs = l;
+ }
+
+ gu = u;
+ check_outcome ();
+ return 0;
+}
+
+/* { dg-final { scan-assembler {vsetivli\s+zero,\s*2,\s*e32,\s*m1,\s*t[au],\s*m[au]} } } */
+/* { dg-final { scan-assembler {vsetivli\s+zero,\s*4,\s*e32,\s*m1,\s*t[au],\s*m[au]} } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64g_zve32x_zvl128b -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+typedef unsigned int V2SI __attribute__((vector_size(8)));
+
+V2SI v1, v2;
+
+/* Make sure we won't use mf2 mode even vector register is OK to hold for
+ ELEN=32. */
+void foo1()
+{
+/*
+** foo1:
+** ...
+** vsetivli zero,2,e32,m1,ta,ma
+** ...
+** vle32\.v v[0-9]+,0\([a-x][0-9]+\)
+** ...
+** vse32\.v v[0-9]+,0\([a-x][0-9]+\)
+** ...
+** ret
+*/
+ v1 = v2;
+}