}
)
+(define_insn "*aarch64_sve2_unpred_nor<mode>"
+ [(set (match_operand:VDQ_I 0 "register_operand")
+ (and:VDQ_I
+ (not:VDQ_I
+ (match_operand:VDQ_I 1 "register_operand"))
+ (not:VDQ_I
+ (match_operand:VDQ_I 2 "register_operand"))))]
+ "TARGET_SVE2"
+ {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] nbsl\t%Z0.d, %Z0.d, %Z2.d, %Z0.d
+ [ ?&w , w , w ; yes ] movprfx\t%Z0, %Z1\;nbsl\t%Z0.d, %Z0.d, %Z2.d, %Z1.d
+ }
+)
+
;; Use NBSL for vector NAND.
(define_insn_and_rewrite "*aarch64_sve2_nand<mode>"
[(set (match_operand:SVE_FULL_I 0 "register_operand")
}
)
+;; Same as above but unpredicated and including Advanced SIMD modes.
+(define_insn "*aarch64_sve2_nand_unpred<mode>"
+ [(set (match_operand:VDQ_I 0 "register_operand")
+ (ior:VDQ_I
+ (not:VDQ_I
+ (match_operand:VDQ_I 1 "register_operand"))
+ (not:VDQ_I
+ (match_operand:VDQ_I 2 "register_operand"))))]
+ "TARGET_SVE2"
+ {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ]
+ [ w , 0 , w ; * ] nbsl\t%Z0.d, %Z0.d, %Z2.d, %Z2.d
+ [ ?&w , w , w ; yes ] movprfx\t%Z0, %Z1\;nbsl\t%Z0.d, %Z0.d, %Z2.d, %Z2.d
+ }
+)
+
;; Unpredicated bitwise select.
;; (op3 ? bsl_mov : bsl_dup) == (((bsl_mov ^ bsl_dup) & op3) ^ bsl_dup)
(define_expand "@aarch64_sve2_bsl<mode>"
--- /dev/null
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <arm_neon.h>
+
+#define NAND(x, y) (~((x) & (y)))
+#define NOR(x, y) (~((x) | (y)))
+
+/*
+** nand_d:
+** nbsl z0.d, z0.d, z1.d, z1.d
+** ret
+*/
+uint32x2_t nand_d(uint32x2_t a, uint32x2_t b) { return NAND(a, b); }
+
+/*
+** nand_d_mp:
+** movprfx z0, z1
+** nbsl z0.d, z0.d, z2.d, z2.d
+** ret
+*/
+uint32x2_t nand_d_mp(uint32x2_t c, uint32x2_t a, uint32x2_t b) { return NAND(a, b); }
+
+/*
+** nor_d:
+** nbsl z0.d, z0.d, z1.d, z0.d
+** ret
+*/
+uint32x2_t nor_d(uint32x2_t a, uint32x2_t b) { return NOR(a, b); }
+
+/*
+** nor_d_mp:
+** movprfx z0, z1
+** nbsl z0.d, z0.d, z2.d, z1.d
+** ret
+*/
+uint32x2_t nor_d_mp(uint32x2_t c, uint32x2_t a, uint32x2_t b) { return NOR(a, b); }
+
+/*
+** nand_q:
+** nbsl z0.d, z0.d, z1.d, z1.d
+** ret
+*/
+uint64x2_t nand_q(uint64x2_t a, uint64x2_t b) { return NAND(a, b); }
+
+/*
+** nand_q_mp:
+** movprfx z0, z1
+** nbsl z0.d, z0.d, z2.d, z2.d
+** ret
+*/
+uint32x4_t nand_q_mp(uint32x4_t c, uint32x4_t a, uint32x4_t b) { return NAND(a, b); }
+
+/*
+** nor_q:
+** nbsl z0.d, z0.d, z1.d, z0.d
+** ret
+*/
+uint64x2_t nor_q(uint64x2_t a, uint64x2_t b) { return NOR(a, b); }
+
+/*
+** nor_q_mp:
+** movprfx z0, z1
+** nbsl z0.d, z0.d, z2.d, z1.d
+** ret
+*/
+uint32x4_t nor_q_mp(uint32x4_t c, uint32x4_t a, uint32x4_t b) { return NOR(a, b); }
+