(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_single_widen_<plus_minus:optab><any_extend:su><mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=vr, vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd,vd, vr, vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,vm,Wc1,Wc1")
+ (match_operand 5 "vector_length_operand" " rK,rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus_minus:VWEXTI
- (match_operand:VWEXTI 3 "register_operand" " vr, vr")
+ (match_operand:VWEXTI 3 "register_operand" " vr,vr, vr, vr")
(any_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ,rJ, rJ, rJ"))))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vw<plus_minus:insn><any_extend:u>.wx\t%0,%3,%z4%p1"
[(set_attr "type" "vi<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
+(define_insn "@pred_single_widen_add<any_extend:su><mode>_extended_scalar"
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd,vd, vr, vr")
+ (if_then_else:VWEXTI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,vm,Wc1,Wc1")
+ (match_operand 5 "vector_length_operand" " rK,rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (plus:VWEXTI
+ (vec_duplicate:VWEXTI
+ (any_extend:<VEL>
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ,rJ, rJ, rJ")))
+ (match_operand:VWEXTI 3 "register_operand" " vr,vr, vr, vr"))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0, vu, 0")))]
+ "TARGET_VECTOR"
+ "vwadd<any_extend:u>.wx\t%0,%3,%z4%p1"
+ [(set_attr "type" "viwalu")
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+
+(define_insn "@pred_single_widen_sub<any_extend:su><mode>_extended_scalar"
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd,vd, vr, vr")
+ (if_then_else:VWEXTI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,vm,Wc1,Wc1")
+ (match_operand 5 "vector_length_operand" " rK,rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (minus:VWEXTI
+ (match_operand:VWEXTI 3 "register_operand" " vr,vr, vr, vr")
+ (vec_duplicate:VWEXTI
+ (any_extend:<VEL>
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ,rJ, rJ, rJ"))))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0, vu, 0")))]
+ "TARGET_VECTOR"
+ "vwsub<any_extend:u>.wx\t%0,%3,%z4%p1"
+ [(set_attr "type" "viwalu")
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+
(define_insn "@pred_widen_mulsu<mode>"
[(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
(if_then_else:VWEXTI
return __riscv_vfwadd_wf_f64m8_m (vm, vs2, rs1, vl);
}
+vint64m8_t
+test_vwadd_wx_i64m8_m (vbool8_t vm, vint64m8_t vs2, int32_t rs1, size_t vl)
+{
+ return __riscv_vwadd_wx_i64m8_m (vm, vs2, rs1, vl);
+}
+
+vint64m8_t
+test_vwsub_wx_i64m8_m (vbool8_t vm, vint64m8_t vs2, int32_t rs1, size_t vl)
+{
+ return __riscv_vwsub_wx_i64m8_m (vm, vs2, rs1, vl);
+}
+
char global_memory[1024];
void *fake_memory = (void *) global_memory;
__riscv_vundefined_f64m8 (), 1.0, __riscv_vsetvlmax_e64m8 ());
asm volatile ("" ::"vr"(vfwadd_wf_f64m8_m_vd) : "memory");
+ asm volatile ("fence" ::: "memory");
+ vint64m8_t vwadd_wx_i64m8_m_vd = test_vwadd_wx_i64m8_m (
+ __riscv_vreinterpret_v_i8m1_b8 (__riscv_vundefined_i8m1 ()),
+ __riscv_vundefined_i64m8 (), 1.0, __riscv_vsetvlmax_e64m8 ());
+ asm volatile ("" ::"vr"(vwadd_wx_i64m8_m_vd) : "memory");
+
+ asm volatile ("fence" ::: "memory");
+ vint64m8_t vwsub_wx_i64m8_m_vd = test_vwsub_wx_i64m8_m (
+ __riscv_vreinterpret_v_i8m1_b8 (__riscv_vundefined_i8m1 ()),
+ __riscv_vundefined_i64m8 (), 1.0, __riscv_vsetvlmax_e64m8 ());
+ asm volatile ("" ::"vr"(vwsub_wx_i64m8_m_vd) : "memory");
+
return 0;
}
/* { dg-final { scan-assembler-not "vfwadd.wf\tv0.*v0" } } */
+/* { dg-final { scan-assembler-not "vwadd.wx\tv0.*v0" } } */
+/* { dg-final { scan-assembler-not "vwsub.wx\tv0.*v0" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-add-options riscv_v } */
+/* { dg-additional-options "-std=gnu99 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include <stdint.h>
+#include <riscv_vector.h>
+
+/*
+** vwadd_wx_i64m8_m:
+** vsetvli\s+zero,[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]
+** vwadd\.wx\tv8,v8,a0,v0.t
+** ret
+*/
+vint64m8_t
+vwadd_wx_i64m8_m (vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl)
+{
+ return __riscv_vwadd_wx_i64m8_m (vm, vs2, rs1, vl);
+}
+
+/*
+** vwsub_wx_i64m8_m:
+** vsetvli\s+zero,[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]
+** vwsub\.wx\tv8,v8,a0,v0.t
+** ret
+*/
+vint64m8_t
+vwsub_wx_i64m8_m (vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl)
+{
+ return __riscv_vwsub_wx_i64m8_m (vm, vs2, rs1, vl);
+}
+
+/*
+** vwadd_wx_i32m8_m:
+** ...
+** vsetvli\s+zero,[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]
+** vwadd\.wx\tv8,v8,a5,v0.t
+** ret
+*/
+
+extern int8_t bla;
+
+vint32m8_t
+vwadd_wx_i32m8_m (vbool4_t vm, vint32m8_t vs2, int16_t rs1, size_t vl)
+{
+ return __riscv_vwadd_wx_i32m8_m (vm, vs2, bla, vl);
+}
+
+/* { dg-final { check-function-bodies "**" "" } } */