#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO)
#endif
+#define REQUIRED_EXTENSIONS VECTOR_EXT_NO_XTHEAD
+// 7.4. Vector Unit-Stride Instructions
+DEF_RVV_FUNCTION (vlm, loadstore, none_preds, b_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (vsm, loadstore, none_preds, b_v_scalar_ptr_ops)
+
+// 11.3. Vector Integer Extension
+DEF_RVV_FUNCTION (vzext, widen_alu, full_preds, u_vf2_ops)
+DEF_RVV_FUNCTION (vzext, widen_alu, full_preds, u_vf4_ops)
+DEF_RVV_FUNCTION (vzext, widen_alu, full_preds, u_vf8_ops)
+DEF_RVV_FUNCTION (vsext, widen_alu, full_preds, i_vf2_ops)
+DEF_RVV_FUNCTION (vsext, widen_alu, full_preds, i_vf4_ops)
+DEF_RVV_FUNCTION (vsext, widen_alu, full_preds, i_vf8_ops)
+
+// 12.2. Vector Single-Width Averaging Add and Subtract
+DEF_RVV_FUNCTION (vaaddu, alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vaaddu, alu, full_preds, u_vvx_ops)
+DEF_RVV_FUNCTION (vaadd, alu, full_preds, i_vvv_ops)
+DEF_RVV_FUNCTION (vaadd, alu, full_preds, i_vvx_ops)
+DEF_RVV_FUNCTION (vasubu, alu, full_preds, u_vvv_ops)
+DEF_RVV_FUNCTION (vasubu, alu, full_preds, u_vvx_ops)
+DEF_RVV_FUNCTION (vasub, alu, full_preds, i_vvv_ops)
+DEF_RVV_FUNCTION (vasub, alu, full_preds, i_vvx_ops)
+
+// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
+DEF_RVV_FUNCTION (vfrsqrt7, alu, full_preds, f_v_ops)
+
+// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
+DEF_RVV_FUNCTION (vfrec7, alu, full_preds, f_v_ops)
+
+DEF_RVV_FUNCTION (vfrec7_frm, alu_frm, full_preds, f_v_ops)
+
+// 16.3. Vector Slide Instructions
+DEF_RVV_FUNCTION (vfslide1up, alu, full_preds, f_vvf_ops)
+DEF_RVV_FUNCTION (vfslide1down, alu, full_preds, f_vvf_ops)
+
+// 7.6. Vector Indexed Instructions
+DEF_RVV_FUNCTION (vluxei8, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vluxei16, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vluxei32, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vluxei64, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vsuxei8, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vsuxei16, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vsuxei32, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vsuxei64, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew64_index_ops)
+
+// 7.8. Vector Load/Store Segment Instructions
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew64_index_ops)
+
+// 16.4. Vector Register Gather Instructions
+DEF_RVV_FUNCTION (vrgatherei16, alu, full_preds, all_gatherei16_vvv_ops)
+
+#undef REQUIRED_EXTENSIONS
+
#define REQUIRED_EXTENSIONS VECTOR_EXT
/* Internal helper functions for gimple fold use. */
DEF_RVV_FUNCTION (read_vl, read_vl, none_preds, p_none_void_ops)
// 7.4. Vector Unit-Stride Instructions
DEF_RVV_FUNCTION (vle, loadstore, full_preds, all_v_scalar_const_ptr_ops)
DEF_RVV_FUNCTION (vse, loadstore, none_m_preds, all_v_scalar_ptr_ops)
-DEF_RVV_FUNCTION (vlm, loadstore, none_preds, b_v_scalar_const_ptr_ops)
-DEF_RVV_FUNCTION (vsm, loadstore, none_preds, b_v_scalar_ptr_ops)
// 7.5. Vector Strided Instructions
DEF_RVV_FUNCTION (vlse, loadstore, full_preds, all_v_scalar_const_ptr_ptrdiff_ops)
DEF_RVV_FUNCTION (vsse, loadstore, none_m_preds, all_v_scalar_ptr_ptrdiff_ops)
// 7.6. Vector Indexed Instructions
-DEF_RVV_FUNCTION (vluxei8, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew8_index_ops)
-DEF_RVV_FUNCTION (vluxei16, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew16_index_ops)
-DEF_RVV_FUNCTION (vluxei32, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew32_index_ops)
-DEF_RVV_FUNCTION (vluxei64, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew64_index_ops)
DEF_RVV_FUNCTION (vloxei8, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew8_index_ops)
DEF_RVV_FUNCTION (vloxei16, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew16_index_ops)
DEF_RVV_FUNCTION (vloxei32, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew32_index_ops)
DEF_RVV_FUNCTION (vloxei64, indexed_loadstore, full_preds, all_v_scalar_const_ptr_eew64_index_ops)
-DEF_RVV_FUNCTION (vsuxei8, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew8_index_ops)
-DEF_RVV_FUNCTION (vsuxei16, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew16_index_ops)
-DEF_RVV_FUNCTION (vsuxei32, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew32_index_ops)
-DEF_RVV_FUNCTION (vsuxei64, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew64_index_ops)
DEF_RVV_FUNCTION (vsoxei8, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew8_index_ops)
DEF_RVV_FUNCTION (vsoxei16, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew16_index_ops)
DEF_RVV_FUNCTION (vsoxei32, indexed_loadstore, none_m_preds, all_v_scalar_ptr_eew32_index_ops)
// 7.7. Unit-stride Fault-Only-First Loads
DEF_RVV_FUNCTION (vleff, fault_load, full_preds, all_v_scalar_const_ptr_size_ptr_ops)
+// 7.8. Vector Load/Store Segment Instructions
+DEF_RVV_FUNCTION (vlseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ops)
+DEF_RVV_FUNCTION (vsseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ops)
+DEF_RVV_FUNCTION (vlsseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ptrdiff_ops)
+DEF_RVV_FUNCTION (vssseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ptrdiff_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew8_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew16_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew32_index_ops)
+DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew64_index_ops)
+DEF_RVV_FUNCTION (vlsegff, seg_fault_load, full_preds, tuple_v_scalar_const_ptr_size_ptr_ops)
+
/* 11. Vector Integer Arithmetic Instructions. */
// 11.1. Vector Single-Width Integer Add and Subtract
DEF_RVV_FUNCTION (vwcvt_x, alu, full_preds, i_x_x_v_ops)
DEF_RVV_FUNCTION (vwcvtu_x, alu, full_preds, u_x_x_v_ops)
-// 11.3. Vector Integer Extension
-DEF_RVV_FUNCTION (vzext, widen_alu, full_preds, u_vf2_ops)
-DEF_RVV_FUNCTION (vzext, widen_alu, full_preds, u_vf4_ops)
-DEF_RVV_FUNCTION (vzext, widen_alu, full_preds, u_vf8_ops)
-DEF_RVV_FUNCTION (vsext, widen_alu, full_preds, i_vf2_ops)
-DEF_RVV_FUNCTION (vsext, widen_alu, full_preds, i_vf4_ops)
-DEF_RVV_FUNCTION (vsext, widen_alu, full_preds, i_vf8_ops)
-
// 11.4. Vector Integer Add-with-Carry/Subtract-with-Borrow Instructions
DEF_RVV_FUNCTION (vadc, no_mask_policy, none_tu_preds, iu_vvvm_ops)
DEF_RVV_FUNCTION (vadc, no_mask_policy, none_tu_preds, iu_vvxm_ops)
DEF_RVV_FUNCTION (vssub, alu, full_preds, i_vvv_ops)
DEF_RVV_FUNCTION (vssub, alu, full_preds, i_vvx_ops)
-// 12.2. Vector Single-Width Averaging Add and Subtract
-DEF_RVV_FUNCTION (vaaddu, alu, full_preds, u_vvv_ops)
-DEF_RVV_FUNCTION (vaaddu, alu, full_preds, u_vvx_ops)
-DEF_RVV_FUNCTION (vaadd, alu, full_preds, i_vvv_ops)
-DEF_RVV_FUNCTION (vaadd, alu, full_preds, i_vvx_ops)
-DEF_RVV_FUNCTION (vasubu, alu, full_preds, u_vvv_ops)
-DEF_RVV_FUNCTION (vasubu, alu, full_preds, u_vvx_ops)
-DEF_RVV_FUNCTION (vasub, alu, full_preds, i_vvv_ops)
-DEF_RVV_FUNCTION (vasub, alu, full_preds, i_vvx_ops)
-
// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
DEF_RVV_FUNCTION (vsmul, alu, full_preds, full_v_i_vvv_ops)
DEF_RVV_FUNCTION (vsmul, alu, full_preds, full_v_i_vvx_ops)
DEF_RVV_FUNCTION (vfsqrt_frm, alu_frm, full_preds, f_v_ops)
-// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
-DEF_RVV_FUNCTION (vfrsqrt7, alu, full_preds, f_v_ops)
-
-// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
-DEF_RVV_FUNCTION (vfrec7, alu, full_preds, f_v_ops)
-
-DEF_RVV_FUNCTION (vfrec7_frm, alu_frm, full_preds, f_v_ops)
-
// 13.11. Vector Floating-Point MIN/MAX Instructions
DEF_RVV_FUNCTION (vfmin, alu, full_preds, f_vvv_ops)
DEF_RVV_FUNCTION (vfmin, alu, full_preds, f_vvf_ops)
DEF_RVV_FUNCTION (vslidedown, alu, full_preds, all_vvx_ops)
DEF_RVV_FUNCTION (vslide1up, alu, full_preds, iu_vvx_ops)
DEF_RVV_FUNCTION (vslide1down, alu, full_preds, iu_vvx_ops)
-DEF_RVV_FUNCTION (vfslide1up, alu, full_preds, f_vvf_ops)
-DEF_RVV_FUNCTION (vfslide1down, alu, full_preds, f_vvf_ops)
// 16.4. Vector Register Gather Instructions
DEF_RVV_FUNCTION (vrgather, alu, full_preds, all_gather_vvv_ops)
DEF_RVV_FUNCTION (vrgather, alu, full_preds, all_gather_vvx_ops)
-DEF_RVV_FUNCTION (vrgatherei16, alu, full_preds, all_gatherei16_vvv_ops)
// 16.5. Vector Compress Instruction
DEF_RVV_FUNCTION (vcompress, alu, none_tu_preds, all_vvm_ops)
DEF_RVV_FUNCTION (vcreate, vcreate, none_preds, all_v_vcreate_tuple_ops)
DEF_RVV_FUNCTION (vundefined, vundefined, none_preds, all_none_void_tuple_ops)
-// 7.8. Vector Load/Store Segment Instructions
-DEF_RVV_FUNCTION (vlseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ops)
-DEF_RVV_FUNCTION (vsseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ops)
-DEF_RVV_FUNCTION (vlsseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ptrdiff_ops)
-DEF_RVV_FUNCTION (vssseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ptrdiff_ops)
-DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew8_index_ops)
-DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew16_index_ops)
-DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew32_index_ops)
-DEF_RVV_FUNCTION (vluxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew64_index_ops)
-DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew8_index_ops)
-DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew16_index_ops)
-DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew32_index_ops)
-DEF_RVV_FUNCTION (vloxseg, seg_indexed_loadstore, full_preds, tuple_v_scalar_const_ptr_eew64_index_ops)
-DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew8_index_ops)
-DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew16_index_ops)
-DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew32_index_ops)
-DEF_RVV_FUNCTION (vsuxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew64_index_ops)
-DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew8_index_ops)
-DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew16_index_ops)
-DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew32_index_ops)
-DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_ptr_eew64_index_ops)
-DEF_RVV_FUNCTION (vlsegff, seg_fault_load, full_preds, tuple_v_scalar_const_ptr_size_ptr_ops)
#undef REQUIRED_EXTENSIONS
/* Definition of crypto vector intrinsic functions */
extern GTY (()) rvv_builtin_types_t builtin_types[NUM_VECTOR_TYPES + 1];
rvv_builtin_types_t builtin_types[NUM_VECTOR_TYPES + 1];
-/* The list of all registered function decls, indexed by code. */
-static GTY (()) vec<registered_function *, va_gc> *registered_functions;
+/* Per-partition vectors of registered functions. */
+static GTY (()) vec<registered_function *, va_gc>
+ *partition_functions[NUM_RVV_EXT_PARTITIONS];
+
+/* Return true if TYPE uses fractional LMUL (mf2, mf4, mf8). */
+static bool
+is_fractional_lmul (vector_type_index type)
+{
+ /* Check if the type suffix contains "mf" indicating fractional LMUL. */
+ if (type >= NUM_VECTOR_TYPES)
+ return false;
+ const char *suffix = type_suffixes[type].vector;
+ return suffix && strstr (suffix, "mf") != NULL;
+}
+
+/* Return true if BASE is an intrinsic unsupported by XTheadVector.
+ According to
+ https://github.com/XUANTIE-RV/thead-extension-spec/blob/master/xtheadvector/intrinsics.adoc#xtheadvector
+
+ XTheadVector lacks support for:
+ - Fractional LMUL types (checked separately via is_fractional_lmul)
+ - vlm/vsm
+ - vzext/vsext
+ - vaaddu/vasubu
+ - vfrsqrt7/vfrec7
+ - vfslide1up/vfslide1down
+ - vluxseg/vsuxseg
+ - vluxei8/vluxei16/vluxei32/vluxei64
+ - vrgatherei16. */
+
+static bool
+xthvector_unsupported_p (const function_base *base)
+{
+ return base == bases::vlm
+ || base == bases::vsm
+ || base == bases::vzext
+ || base == bases::vsext
+ || base == bases::vaaddu
+ || base == bases::vasubu
+ || base == bases::vfrsqrt7
+ || base == bases::vfrec7
+ || base == bases::vfrec7_frm
+ || base == bases::vfslide1up
+ || base == bases::vfslide1down
+ || base == bases::vluxseg
+ || base == bases::vsuxseg
+ || base == bases::vluxei8
+ || base == bases::vluxei16
+ || base == bases::vluxei32
+ || base == bases::vluxei64
+ || base == bases::vrgatherei16;
+}
+
+/* Map required_ext to partition, splitting VECTOR_EXT based on instance. */
+static rvv_builtin_partition
+get_builtin_partition (required_ext ext, const function_instance &instance)
+{
+ switch (ext)
+ {
+ case VECTOR_EXT:
+ if (is_fractional_lmul (instance.type.index)
+ || xthvector_unsupported_p (instance.base))
+ return RVV_PARTITION_VECTOR_NO_XTHEAD;
+ else
+ return RVV_PARTITION_VECTOR;
+ case VECTOR_EXT_NO_XTHEAD:
+ return RVV_PARTITION_VECTOR_NO_XTHEAD;
+ case XTHEADVECTOR_EXT:
+ return RVV_PARTITION_XTHEADVECTOR;
+ case ZVBB_EXT:
+ return RVV_PARTITION_ZVBB;
+ case ZVBB_OR_ZVKB_EXT:
+ return RVV_PARTITION_ZVBB_OR_ZVKB;
+ case ZVBC_EXT:
+ return RVV_PARTITION_ZVBC;
+ case ZVKG_EXT:
+ return RVV_PARTITION_ZVKG;
+ case ZVKNED_EXT:
+ return RVV_PARTITION_ZVKNED;
+ case ZVKNHA_OR_ZVKNHB_EXT:
+ return RVV_PARTITION_ZVKNHA_OR_ZVKNHB;
+ case ZVKNHB_EXT:
+ return RVV_PARTITION_ZVKNHB;
+ case ZVKSED_EXT:
+ return RVV_PARTITION_ZVKSED;
+ case ZVKSH_EXT:
+ return RVV_PARTITION_ZVKSH;
+ case ZVFBFMIN_EXT:
+ return RVV_PARTITION_ZVFBFMIN;
+ case ZVFBFWMA_EXT:
+ return RVV_PARTITION_ZVFBFWMA;
+ case XSFVQMACCQOQ_EXT:
+ return RVV_PARTITION_XSFVQMACCQOQ;
+ case XSFVQMACCDOD_EXT:
+ return RVV_PARTITION_XSFVQMACCDOD;
+ case XSFVFNRCLIPXFQF_EXT:
+ return RVV_PARTITION_XSFVFNRCLIPXFQF;
+ case XSFVCP_EXT:
+ return RVV_PARTITION_XSFVCP;
+ case XANDESVBFHCVT_EXT:
+ return RVV_PARTITION_XANDESVBFHCVT;
+ case XANDESVSINTLOAD_EXT:
+ return RVV_PARTITION_XANDESVSINTLOAD;
+ case XANDESVPACKFPH_EXT:
+ return RVV_PARTITION_XANDESVPACKFPH;
+ case XANDESVDOT_EXT:
+ return RVV_PARTITION_XANDESVDOT;
+ default:
+ gcc_unreachable ();
+ }
+}
/* All registered function decls, hashed on the function_instance
that they implement. This is used for looking up implementations of
enum required_ext required,
bool overloaded_p = false)
{
- unsigned int code = vec_safe_length (registered_functions);
- code = (code << RISCV_BUILTIN_SHIFT) + RISCV_BUILTIN_VECTOR;
+ /* Compute the partition for this function. The per-partition index
+ is determined by the current length of that partition's vector. */
+ rvv_builtin_partition ext_partition
+ = get_builtin_partition (required, instance);
+ unsigned int index = vec_safe_length (partition_functions[ext_partition]);
+
+ unsigned int code = (index << RVV_SUBCODE_SHIFT)
+ | (ext_partition << RVV_EXT_PARTITION_SHIFT)
+ | RISCV_BUILTIN_VECTOR;
/* We need to be able to generate placeholders to ensure that we have a
consistent numbering scheme for function codes between the C and C++
rfn.overload_name = overload_name ? xstrdup (overload_name) : NULL;
rfn.argument_types = argument_types;
rfn.overloaded_p = overloaded_p;
- rfn.required = required;
- vec_safe_push (registered_functions, &rfn);
+ /* Update required extension based on partition. Functions placed in
+ the NO_XTHEAD partition due to fractional LMUL or unsupported ops
+ need VECTOR_EXT_NO_XTHEAD to get proper error messages. */
+ if (ext_partition == RVV_PARTITION_VECTOR_NO_XTHEAD
+ && required == VECTOR_EXT)
+ rfn.required = VECTOR_EXT_NO_XTHEAD;
+ else
+ rfn.required = required;
+ vec_safe_push (partition_functions[ext_partition], &rfn);
return rfn;
}
builder.register_function_group (function_groups[i]);
}
+/* Find the registered_function with the given subcode (code without
+ the class bit), or return NULL. */
+static registered_function *
+lookup_registered_function (unsigned int subcode)
+{
+ unsigned int partition = subcode & ((1u << RVV_EXT_PARTITION_BITS) - 1);
+ unsigned int index = subcode >> RVV_EXT_PARTITION_BITS;
+
+ if (partition >= NUM_RVV_EXT_PARTITIONS)
+ return NULL;
+
+ vec<registered_function *, va_gc> *funcs = partition_functions[partition];
+ if (!funcs || index >= funcs->length ())
+ return NULL;
+
+ return (*funcs)[index];
+}
+
/* Return the function decl with RVV function subcode CODE, or error_mark_node
if no such function exists. */
tree
builtin_decl (unsigned int code, bool)
{
- if (code >= vec_safe_length (registered_functions))
+ registered_function *rfn = lookup_registered_function (code);
+ if (!rfn)
return error_mark_node;
- return (*registered_functions)[code]->decl;
+ return rfn->decl;
}
/* Attempt to fold STMT, given that it's a call to the RVV function
gimple *
gimple_fold_builtin (unsigned int code, gimple_stmt_iterator *gsi, gcall *stmt)
{
- registered_function &rfn = *(*registered_functions)[code];
- return gimple_folder (rfn.instance, rfn.decl, gsi, stmt).fold ();
+ registered_function *rfn = lookup_registered_function (code);
+ if (!rfn)
+ return NULL;
+ return gimple_folder (rfn->instance, rfn->decl, gsi, stmt).fold ();
}
static bool
rtx
expand_builtin (unsigned int code, tree exp, rtx target)
{
- registered_function &rfn = *(*registered_functions)[code];
+ registered_function *rfn = lookup_registered_function (code);
+ if (!rfn)
+ {
+ error_at (EXPR_LOCATION (exp), "unrecognized RVV builtin");
+ return target;
+ }
- if (!required_extensions_specified (rfn.required))
+ if (!required_extensions_specified (rfn->required))
{
error_at (EXPR_LOCATION (exp),
"built-in function %qE requires the %qs ISA extension",
exp,
- required_ext_to_isa_name (rfn.required));
+ required_ext_to_isa_name (rfn->required));
return target;
}
- if (!validate_instance_type_required_extensions (rfn.instance.type, exp))
+ if (!validate_instance_type_required_extensions (rfn->instance.type, exp))
return target;
- return function_expander (rfn.instance, rfn.decl, exp, target).expand ();
+ return function_expander (rfn->instance, rfn->decl, exp, target).expand ();
}
/* Perform any semantic checks needed for a call to the RVV function
check_builtin_call (location_t location, vec<location_t>, unsigned int code,
tree fndecl, unsigned int nargs, tree *args)
{
- const registered_function &rfn = *(*registered_functions)[code];
- return function_checker (location, rfn.instance, fndecl,
- TREE_TYPE (rfn.decl), nargs, args).check ();
+ registered_function *rfn = lookup_registered_function (code);
+ if (!rfn)
+ return false;
+ return function_checker (location, rfn->instance, fndecl,
+ TREE_TYPE (rfn->decl), nargs, args).check ();
}
tree
resolve_overloaded_builtin (location_t loc, unsigned int code, tree fndecl,
vec<tree, va_gc> *arglist)
{
- if (code >= vec_safe_length (registered_functions))
- return NULL_TREE;
-
- registered_function *rfun = (*registered_functions)[code];
+ registered_function *rfun = lookup_registered_function (code);
if (!rfun || !rfun->overloaded_p)
return NULL_TREE;
#endif
/* Disable modes if TARGET_MIN_VLEN == 32. */
-ENTRY (RVVMF64BI, TARGET_VECTOR_ELEN_64, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 64)
-ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
-ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
+ENTRY (RVVMF64BI, TARGET_VECTOR_ELEN_64, LMUL_F8, 64)
+ENTRY (RVVMF32BI, true, LMUL_F4, 32)
+ENTRY (RVVMF16BI, true, LMUL_F2 , 16)
ENTRY (RVVMF8BI, true, LMUL_1, 8)
ENTRY (RVVMF4BI, true, LMUL_2, 4)
ENTRY (RVVMF2BI, true, LMUL_4, 2)
ENTRY (RVVM4QI, true, LMUL_4, 2)
ENTRY (RVVM2QI, true, LMUL_2, 4)
ENTRY (RVVM1QI, true, LMUL_1, 8)
-ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
-ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
-ENTRY (RVVMF8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
+ENTRY (RVVMF2QI, true, LMUL_F2, 16)
+ENTRY (RVVMF4QI, true, LMUL_F4, 32)
+ENTRY (RVVMF8QI, TARGET_VECTOR_ELEN_64, LMUL_F8, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8HI, true, LMUL_8, 2)
ENTRY (RVVM4HI, true, LMUL_4, 4)
ENTRY (RVVM2HI, true, LMUL_2, 8)
ENTRY (RVVM1HI, true, LMUL_1, 16)
-ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
-ENTRY (RVVMF4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
+ENTRY (RVVMF2HI, true, LMUL_F2, 32)
+ENTRY (RVVMF4HI, TARGET_VECTOR_ELEN_64, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_BF_16. */
ENTRY (RVVM8BF, TARGET_VECTOR_ELEN_BF_16, LMUL_8, 2)
ENTRY (RVVM4HF, TARGET_VECTOR_ELEN_FP_16, LMUL_4, 4)
ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
-ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 32)
-ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
+ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_F2, 32)
+ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, LMUL_F4, 64)
/* Disable modes if TARGET_MIN_VLEN == 32. */
ENTRY (RVVM8SI, true, LMUL_8, 4)
ENTRY (RVVM4SI, true, LMUL_4, 8)
ENTRY (RVVM2SI, true, LMUL_2, 16)
ENTRY (RVVM1SI, true, LMUL_1, 32)
-ENTRY (RVVMF2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
+ENTRY (RVVMF2SI, TARGET_VECTOR_ELEN_64, LMUL_F2, 64)
/* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32. */
ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
-ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
+ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, LMUL_F2, 64)
/* Disable modes if !TARGET_VECTOR_ELEN_64. */
ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
#endif
TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 8, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x8QI, true, RVVMF2QI, 8, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x8QI, true, RVVMF4QI, 8, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x8QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 8, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x7QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 7, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x7QI, true, RVVMF2QI, 7, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x7QI, true, RVVMF4QI, 7, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x7QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 7, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x6QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 6, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x6QI, true, RVVMF2QI, 6, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x6QI, true, RVVMF4QI, 6, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x6QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 6, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x5QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 5, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x5QI, true, RVVMF2QI, 5, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x5QI, true, RVVMF4QI, 5, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x5QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 5, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x4QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 4, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x4QI, true, RVVMF2QI, 4, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x4QI, true, RVVMF4QI, 4, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x4QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 4, LMUL_F8, 64)
TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x3QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 3, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x3QI, true, RVVMF2QI, 3, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x3QI, true, RVVMF4QI, 3, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x3QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 3, LMUL_F8, 64)
TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x2QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF8QI, 2, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x2QI, true, RVVMF2QI, 2, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x2QI, true, RVVMF4QI, 2, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x2QI, TARGET_VECTOR_ELEN_64, RVVMF8QI, 2, LMUL_F8, 64)
TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HI, true, RVVMF2HI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HI, true, RVVMF2HI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HI, true, RVVMF2HI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HI, true, RVVMF2HI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HI, true, RVVMF2HI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HI, true, RVVMF2HI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HI, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HI, true, RVVMF2HI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HI, TARGET_VECTOR_ELEN_64, RVVMF4HI, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8BF, TARGET_VECTOR_ELEN_BF_16, RVVM1BF, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVMF2x8BF, TARGET_VECTOR_ELEN_BF_16, RVVMF2BF, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVMF4x2BF, TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32, RVVMF4BF, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 8, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 7, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 6, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 5, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 4, LMUL_F4, 64)
TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 3, LMUL_F4, 64)
TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64, RVVMF4HF, 2, LMUL_F4, 64)
TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, RVVMF2SI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SI, (TARGET_VECTOR_ELEN_64), RVVMF2SI, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, RVVMF2SI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SI, (TARGET_VECTOR_ELEN_64), RVVMF2SI, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SI, TARGET_VECTOR_ELEN_64, RVVMF2SI, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SI, TARGET_VECTOR_ELEN_64, RVVMF2SI, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SI, TARGET_VECTOR_ELEN_64, RVVMF2SI, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SI, TARGET_VECTOR_ELEN_64, RVVMF2SI, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SI, TARGET_VECTOR_ELEN_64, RVVMF2SI, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 8, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 7, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 6, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 5, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 4, LMUL_F2, 32)
TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 3, LMUL_F2, 32)
TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64, RVVMF2SF, 2, LMUL_F2, 32)
TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)