(and (match_code "const_vector")
(match_test "riscv_vector::const_vec_all_same_in_range_p (op, -16, 15)")))
+(define_constraint "vj"
+ "A vector negated 5-bit signed immediate."
+ (and (match_code "const_vector")
+ (match_test "riscv_vector::const_vec_all_same_in_range_p (op, -15, 16)")))
+
+(define_constraint "vk"
+ "A vector 5-bit unsigned immediate."
+ (and (match_code "const_vector")
+ (match_test "riscv_vector::const_vec_all_same_in_range_p (op, 0, 31)")))
+
(define_constraint "Wc0"
"@internal
A constraint that matches a vector of immediate all zeros."
(xor "xor")
(and "and")
(plus "add")
- (minus "sub")])
+ (minus "sub")
+ (smin "smin")
+ (smax "smax")
+ (umin "umin")
+ (umax "umax")
+ (mult "mul")])
;; <or_optab> code attributes
(define_code_attr or_optab [(ior "ior")
(xor "xor")
(and "and")
(plus "add")
- (minus "sub")])
+ (minus "sub")
+ (smin "min")
+ (smax "max")
+ (umin "minu")
+ (umax "maxu")
+ (mult "mul")])
; atomics code attribute
(define_code_attr atomic_optab
(match_test "GET_CODE (op) == UNSPEC
&& (XINT (op, 1) == UNSPEC_VUNDEF)"))))
+(define_predicate "vector_arith_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_vector")
+ (match_test "riscv_vector::const_vec_all_same_in_range_p (op, -16, 15)"))))
+
+(define_predicate "vector_neg_arith_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_vector")
+ (match_test "riscv_vector::const_vec_all_same_in_range_p (op, -15, 16)"))))
+
+(define_predicate "vector_shift_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_vector")
+ (match_test "riscv_vector::const_vec_all_same_in_range_p (op, 0, 31)"))))
+
(define_special_predicate "pmode_reg_or_0_operand"
(ior (match_operand 0 "const_0_operand")
(match_operand 0 "pmode_register_operand")))
}
};
+/* Implements
+ * vadd/vsub/vrsub/vand/vor/vxor/vsll/vsra/vsrl/vmin/vmax/vminu/vmaxu/vdiv/vrem/vdivu/vremu/vsadd/vsaddu/vssub/vssubu.
+ */
+template<rtx_code CODE>
+class binop : public function_base
+{
+public:
+ rtx expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
+ }
+};
+
static CONSTEXPR const vsetvl<false> vsetvl_obj;
static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei16_obj;
static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei32_obj;
static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei64_obj;
+static CONSTEXPR const binop<PLUS> vadd_obj;
+static CONSTEXPR const binop<MINUS> vsub_obj;
+static CONSTEXPR const binop<MINUS> vrsub_obj;
+static CONSTEXPR const binop<AND> vand_obj;
+static CONSTEXPR const binop<IOR> vor_obj;
+static CONSTEXPR const binop<XOR> vxor_obj;
+static CONSTEXPR const binop<ASHIFT> vsll_obj;
+static CONSTEXPR const binop<ASHIFTRT> vsra_obj;
+static CONSTEXPR const binop<LSHIFTRT> vsrl_obj;
+static CONSTEXPR const binop<SMIN> vmin_obj;
+static CONSTEXPR const binop<SMAX> vmax_obj;
+static CONSTEXPR const binop<UMIN> vminu_obj;
+static CONSTEXPR const binop<UMAX> vmaxu_obj;
+static CONSTEXPR const binop<MULT> vmul_obj;
+static CONSTEXPR const binop<DIV> vdiv_obj;
+static CONSTEXPR const binop<MOD> vrem_obj;
+static CONSTEXPR const binop<UDIV> vdivu_obj;
+static CONSTEXPR const binop<UMOD> vremu_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
BASE (vsoxei16)
BASE (vsoxei32)
BASE (vsoxei64)
+BASE (vadd)
+BASE (vsub)
+BASE (vand)
+BASE (vor)
+BASE (vxor)
+BASE (vsll)
+BASE (vsra)
+BASE (vsrl)
+BASE (vmin)
+BASE (vmax)
+BASE (vminu)
+BASE (vmaxu)
+BASE (vmul)
+BASE (vdiv)
+BASE (vrem)
+BASE (vdivu)
+BASE (vremu)
} // end namespace riscv_vector
extern const function_base *const vsoxei16;
extern const function_base *const vsoxei32;
extern const function_base *const vsoxei64;
+extern const function_base *const vadd;
+extern const function_base *const vsub;
+extern const function_base *const vand;
+extern const function_base *const vor;
+extern const function_base *const vxor;
+extern const function_base *const vsll;
+extern const function_base *const vsra;
+extern const function_base *const vsrl;
+extern const function_base *const vmin;
+extern const function_base *const vmax;
+extern const function_base *const vminu;
+extern const function_base *const vmaxu;
+extern const function_base *const vmul;
+extern const function_base *const vdiv;
+extern const function_base *const vrem;
+extern const function_base *const vdivu;
+extern const function_base *const vremu;
}
} // end namespace riscv_vector
DEF_RVV_FUNCTION (vsoxei16, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint16_index_ops)
DEF_RVV_FUNCTION (vsoxei32, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint32_index_ops)
DEF_RVV_FUNCTION (vsoxei64, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint64_index_ops)
+/* 11. Vector Integer Arithmetic Instructions. */
+DEF_RVV_FUNCTION (vadd, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vsub, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vand, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vor, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vxor, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vsll, binop, full_preds, iu_shift_vvv_ops)
+DEF_RVV_FUNCTION (vsra, binop, full_preds, iu_shift_vvv_ops)
+DEF_RVV_FUNCTION (vsrl, binop, full_preds, iu_shift_vvv_ops)
+DEF_RVV_FUNCTION (vmin, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vmax, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vminu, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vmaxu, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vmul, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vdiv, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vrem, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vdivu, binop, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vremu, binop, full_preds, iu_vvv_ops)
#undef DEF_RVV_FUNCTION
}
};
+/* binop_def class. */
+struct binop_def : public build_base
+{
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ b.append_base_name (instance.base_name);
+ /* vop<sew>_v --> vop<sew>_v_<type>. */
+ if (!overloaded_p)
+ {
+ /* vop<sew> --> vop<sew>_v. */
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ /* vop<sew>_v --> vop<sew>_v_<type>. */
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+ /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+ for vop_m C++ overloaded API. */
+ if (overloaded_p && instance.pred == PRED_TYPE_m)
+ return b.finish_name ();
+ b.append_name (predication_suffixes[instance.pred]);
+ return b.finish_name ();
+ }
+};
+
SHAPE(vsetvl, vsetvl)
SHAPE(vsetvl, vsetvlmax)
SHAPE(loadstore, loadstore)
SHAPE(indexed_loadstore, indexed_loadstore)
+SHAPE(binop, binop)
} // end namespace riscv_vector
extern const function_shape *const vsetvlmax;
extern const function_shape *const loadstore;
extern const function_shape *const indexed_loadstore;
+extern const function_shape *const binop;
}
} // end namespace riscv_vector
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of all integer will be registered for intrinsic functions. */
+static const rvv_type_info iu_ops[] = {
+#define DEF_RVV_I_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#define DEF_RVV_U_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of all types will be registered for intrinsic functions. */
static const rvv_type_info all_ops[] = {
#define DEF_RVV_I_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
rvv_arg_type_info (RVV_BASE_uint64_index),
rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+/* A list of args for vector_type func (vector_type, vector_type) function. */
+static CONSTEXPR const rvv_arg_type_info vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (vector_type, shift_type) function. */
+static CONSTEXPR const rvv_arg_type_info shift_vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_shift_vector), rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
rvv_arg_type_info (RVV_BASE_void), /* Return type */
scalar_ptr_uint64_index_args /* Args */};
+/* A static operand information for vector_type func (vector_type, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info iu_vvv_ops
+ = {iu_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vv_args /* Args */};
+
+/* A static operand information for vector_type func (vector_type, shift_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info iu_shift_vvv_ops
+ = {iu_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ shift_vv_args /* Args */};
+
/* A list of all RVV intrinsic functions. */
static function_group_info function_groups[] = {
#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \
inner_mode = E_DImode;
unsigned_p = true;
break;
+ case RVV_BASE_shift_vector:
+ inner_mode = GET_MODE_INNER (TYPE_MODE (type));
+ unsigned_p = true;
+ break;
default:
return NUM_VECTOR_TYPES;
}
case RVV_BASE_uint16_index:
case RVV_BASE_uint32_index:
case RVV_BASE_uint64_index:
+ case RVV_BASE_shift_vector:
if (get_base_vector_type (builtin_types[type_idx].vector)
!= NUM_VECTOR_TYPES)
return builtin_types[get_base_vector_type (
RVV_BASE_uint16_index,
RVV_BASE_uint32_index,
RVV_BASE_uint64_index,
+ RVV_BASE_shift_vector,
NUM_BASE_TYPES
};
output_operand_lossage ("invalid vector constant");
break;
}
+ case 'V': {
+ rtx elt;
+ if (!const_vec_duplicate_p (op, &elt))
+ output_operand_lossage ("invalid vector constant");
+ else if (satisfies_constraint_vj (op))
+ asm_fprintf (file, "%wd", -INTVAL (elt));
+ else
+ output_operand_lossage ("invalid vector constant");
+ break;
+ }
case 'm': {
if (riscv_v_ext_vector_mode_p (mode))
{
(VNx8DF "TARGET_VECTOR_ELEN_FP_64")
])
+(define_mode_iterator VI [
+ VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
+ VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
+ VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
+ (VNx1DI "TARGET_MIN_VLEN > 32") (VNx2DI "TARGET_MIN_VLEN > 32")
+ (VNx4DI "TARGET_MIN_VLEN > 32") (VNx8DI "TARGET_MIN_VLEN > 32")
+])
+
(define_mode_iterator VNX1_QHSD [
VNx1QI VNx1HI VNx1SI
(VNx1DI "TARGET_MIN_VLEN > 32")
(define_int_attr order [
(UNSPEC_ORDERED "o") (UNSPEC_UNORDERED "u")
])
+
+(define_code_iterator any_int_binop [plus minus and ior xor ashift ashiftrt lshiftrt
+ smax umax smin umin mult div udiv mod umod
+])
+
+(define_code_attr binop_rhs1_predicate [
+ (plus "register_operand")
+ (minus "vector_arith_operand")
+ (ior "register_operand")
+ (xor "register_operand")
+ (and "register_operand")
+ (ashift "register_operand")
+ (ashiftrt "register_operand")
+ (lshiftrt "register_operand")
+ (smin "register_operand")
+ (smax "register_operand")
+ (umin "register_operand")
+ (umax "register_operand")
+ (mult "register_operand")
+ (div "register_operand")
+ (mod "register_operand")
+ (udiv "register_operand")
+ (umod "register_operand")])
+
+(define_code_attr binop_rhs2_predicate [
+ (plus "vector_arith_operand")
+ (minus "vector_neg_arith_operand")
+ (ior "vector_arith_operand")
+ (xor "vector_arith_operand")
+ (and "vector_arith_operand")
+ (ashift "vector_shift_operand")
+ (ashiftrt "vector_shift_operand")
+ (lshiftrt "vector_shift_operand")
+ (smin "register_operand")
+ (smax "register_operand")
+ (umin "register_operand")
+ (umax "register_operand")
+ (mult "register_operand")
+ (div "register_operand")
+ (mod "register_operand")
+ (udiv "register_operand")
+ (umod "register_operand")])
+
+(define_code_attr binop_rhs1_constraint [
+ (plus "vr,vr,vr")
+ (minus "vr,vr,vi")
+ (ior "vr,vr,vr")
+ (xor "vr,vr,vr")
+ (and "vr,vr,vr")
+ (ashift "vr,vr,vr")
+ (ashiftrt "vr,vr,vr")
+ (lshiftrt "vr,vr,vr")
+ (smin "vr,vr,vr")
+ (smax "vr,vr,vr")
+ (umin "vr,vr,vr")
+ (umax "vr,vr,vr")
+ (mult "vr,vr,vr")
+ (div "vr,vr,vr")
+ (mod "vr,vr,vr")
+ (udiv "vr,vr,vr")
+ (umod "vr,vr,vr")])
+
+(define_code_attr binop_rhs2_constraint [
+ (plus "vr,vi,vr")
+ (minus "vr,vj,vr")
+ (ior "vr,vi,vr")
+ (xor "vr,vi,vr")
+ (and "vr,vi,vr")
+ (ashift "vr,vk,vr")
+ (ashiftrt "vr,vk,vr")
+ (lshiftrt "vr,vk,vr")
+ (smin "vr,vr,vr")
+ (smax "vr,vr,vr")
+ (umin "vr,vr,vr")
+ (umax "vr,vr,vr")
+ (mult "vr,vr,vr")
+ (div "vr,vr,vr")
+ (mod "vr,vr,vr")
+ (udiv "vr,vr,vr")
+ (umod "vr,vr,vr")])
+
+(define_code_attr int_binop_insn_type [
+ (plus "vialu")
+ (minus "vialu")
+ (ior "vialu")
+ (xor "vialu")
+ (and "vialu")
+ (ashift "vshift")
+ (ashiftrt "vshift")
+ (lshiftrt "vshift")
+ (smin "vicmp")
+ (smax "vicmp")
+ (umin "vicmp")
+ (umax "vicmp")
+ (mult "vimul")
+ (div "vidiv")
+ (mod "vidiv")
+ (udiv "vidiv")
+ (umod "vidiv")])
+
+;; <binop_alt1_insn> expands to the insn name of binop matching constraint alternative = 1.
+;; minus is negated as vadd and ss_minus is negated as vsadd, others remain <insn>.
+(define_code_attr binop_alt1_insn [(ashift "sll.vi")
+ (ashiftrt "sra.vi")
+ (lshiftrt "srl.vi")
+ (div "div.vv")
+ (mod "rem.vv")
+ (udiv "divu.vv")
+ (umod "remu.vv")
+ (ior "or.vv")
+ (xor "xor.vv")
+ (and "and.vv")
+ (plus "add.vi")
+ (minus "add.vi")
+ (smin "min.vv")
+ (smax "max.vv")
+ (umin "minu.vv")
+ (umax "maxu.vv")
+ (mult "mul.vv")])
+
+;; <binop_alt2_insn> expands to the insn name of binop matching constraint alternative = 2.
+;; minus is reversed as vrsub, others remain <insn>.
+(define_code_attr binop_alt2_insn [(ashift "sll.vv")
+ (ashiftrt "sra.vv")
+ (lshiftrt "srl.vv")
+ (div "div.vv")
+ (mod "rem.vv")
+ (udiv "divu.vv")
+ (umod "remu.vv")
+ (ior "or.vv")
+ (xor "xor.vv")
+ (and "and.vv")
+ (plus "add.vv")
+ (minus "rsub.vi")
+ (smin "min.vv")
+ (smax "max.vv")
+ (umin "minu.vv")
+ (umax "maxu.vv")
+ (mult "mul.vv")])
+
+(define_code_attr binop_alt1_op [(ashift "%3,%4")
+ (ashiftrt "%3,%4")
+ (lshiftrt "%3,%4")
+ (div "%3,%4")
+ (mod "%3,%4")
+ (udiv "%3,%4")
+ (umod "%3,%4")
+ (ior "%3,%4")
+ (xor "%3,%4")
+ (and "%3,%4")
+ (plus "%3,%4")
+ (minus "%3,%V4")
+ (smin "%3,%4")
+ (smax "%3,%4")
+ (umin "%3,%4")
+ (umax "%3,%4")
+ (mult "%3,%4")])
+
+(define_code_attr binop_alt2_op [(ashift "%3,%4")
+ (ashiftrt "%3,%4")
+ (lshiftrt "%3,%4")
+ (div "%3,%4")
+ (mod "%3,%4")
+ (udiv "%3,%4")
+ (umod "%3,%4")
+ (ior "%3,%4")
+ (xor "%3,%4")
+ (and "%3,%4")
+ (plus "%3,%4")
+ (minus "%4,%v3")
+ (smin "%3,%4")
+ (smax "%3,%4")
+ (umin "%3,%4")
+ (umax "%3,%4")
+ (mult "%3,%4")])
;; It is valid for instruction that require sew/lmul ratio.
(define_attr "ratio" ""
- (cond [(eq_attr "type" "vimov,vfmov,vldux,vldox,vstux,vstox")
+ (cond [(eq_attr "type" "vimov,vfmov,vldux,vldox,vstux,vstox,\
+ vialu,vshift,vicmp,vimul,vidiv,vsalu")
(const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "VNx1QI,VNx1BI")
(symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
;; The index of operand[] to get the merge op.
(define_attr "merge_op_idx" ""
- (cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox")
+ (cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox,\
+ vialu,vshift,vicmp,vimul,vidiv,vsalu")
(const_int 2)]
(const_int INVALID_ATTRIBUTE)))
(const_int 5)
(const_int 4))
- (eq_attr "type" "vldux,vldox")
+ (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu")
(const_int 5)]
(const_int INVALID_ATTRIBUTE)))
(symbol_ref "riscv_vector::get_ta(operands[6])")
(symbol_ref "riscv_vector::get_ta(operands[5])"))
- (eq_attr "type" "vldux,vldox")
+ (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu")
(symbol_ref "riscv_vector::get_ta(operands[6])")]
(const_int INVALID_ATTRIBUTE)))
(symbol_ref "riscv_vector::get_ma(operands[7])")
(symbol_ref "riscv_vector::get_ma(operands[6])"))
- (eq_attr "type" "vldux,vldox")
+ (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu")
(symbol_ref "riscv_vector::get_ma(operands[7])")]
(const_int INVALID_ATTRIBUTE)))
(const_int INVALID_ATTRIBUTE)
(symbol_ref "INTVAL (operands[7])"))
- (eq_attr "type" "vldux,vldox")
+ (eq_attr "type" "vldux,vldox,vialu,vshift,vicmp,vimul,vidiv,vsalu")
(symbol_ref "INTVAL (operands[8])")
(eq_attr "type" "vstux,vstox")
(symbol_ref "INTVAL (operands[5])")]
"vs<order>xei<VNX64_Q:sew>.v\t%3,(%1),%2%p0"
[(set_attr "type" "vst<order>x")
(set_attr "mode" "<VNX64_Q:MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated integer binary operations
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 11.1 Vector Single-Width Integer Add and Subtract
+;; - 11.5 Vector Bitwise Logical Instructions
+;; - 11.6 Vector Single-Width Bit Shift Instructions
+;; - 11.9 Vector Integer Min/Max Instructions
+;; - 11.11 Vector Integer Divide Instructions
+;; - 12.1 Vector Single-Width Saturating Add and Subtract
+;; -------------------------------------------------------------------------------
+
+(define_insn "@pred_<optab><mode>"
+ [(set (match_operand:VI 0 "register_operand" "=vr, vr, vr")
+ (if_then_else:VI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vmWc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (any_int_binop:VI
+ (match_operand:VI 3 "<binop_rhs1_predicate>" "<binop_rhs1_constraint>")
+ (match_operand:VI 4 "<binop_rhs2_predicate>" "<binop_rhs2_constraint>"))
+ (match_operand:VI 2 "vector_merge_operand" " 0vu, 0vu, 0vu")))]
+ "TARGET_VECTOR"
+ "@
+ v<insn>.vv\t%0,%3,%4%p1
+ v<binop_alt1_insn>\t%0,<binop_alt1_op>%p1
+ v<binop_alt2_insn>\t%0,<binop_alt2_op>%p1"
+ [(set_attr "type" "<int_binop_insn_type>")
+ (set_attr "mode" "<MODE>")])