(set_attr "type" "alu, alu, msklog")
(set_attr "mode" "<MODE>")])
+(define_insn_and_split "*notxor<mode>_1"
+ [(set (match_operand:SWI248 0 "nonimmediate_operand" "=rm,r,?k")
+ (not:SWI248
+ (xor:SWI248
+ (match_operand:SWI248 1 "nonimmediate_operand" "%0,0,k")
+ (match_operand:SWI248 2 "<general_operand>" "r<i>,<m>,k"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (XOR, <MODE>mode, operands)"
+ "#"
+ "&& reload_completed"
+ [(parallel
+ [(set (match_dup 0)
+ (xor:SWI248 (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0)
+ (not:SWI248 (match_dup 0)))]
+{
+ if (MASK_REGNO_P (REGNO (operands[0])))
+ {
+ emit_insn (gen_kxnor<mode> (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}
+ [(set (attr "isa")
+ (cond [(eq_attr "alternative" "2")
+ (if_then_else (eq_attr "mode" "SI,DI")
+ (const_string "avx512bw")
+ (const_string "avx512f"))
+ ]
+ (const_string "*")))
+ (set_attr "type" "alu, alu, msklog")
+ (set_attr "mode" "<MODE>")])
+
(define_insn_and_split "*iordi_1_bts"
[(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
(ior:DI
(symbol_ref "!TARGET_PARTIAL_REG_STALL")]
(symbol_ref "true")))])
+(define_insn_and_split "*notxorqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q,r,?k")
+ (not:QI
+ (xor:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0,k")
+ (match_operand:QI 2 "general_operand" "qn,m,rn,k"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (XOR, QImode, operands)"
+ "#"
+ "&& reload_completed"
+ [(parallel
+ [(set (match_dup 0)
+ (xor:QI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0)
+ (not:QI (match_dup 0)))]
+{
+ if (mask_reg_operand (operands[0], QImode))
+ {
+ emit_insn (gen_kxnorqi (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}
+ [(set_attr "isa" "*,*,*,avx512f")
+ (set_attr "type" "alu,alu,alu,msklog")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "SI")
+ (and (eq_attr "alternative" "3")
+ (match_test "!TARGET_AVX512DQ"))
+ (const_string "HI")
+ ]
+ (const_string "QI")))
+ ;; Potential partial reg stall on alternative 2.
+ (set (attr "preferred_for_speed")
+ (cond [(eq_attr "alternative" "2")
+ (symbol_ref "!TARGET_PARTIAL_REG_STALL")]
+ (symbol_ref "true")))])
+
;; Alternative 1 is needed to work around LRA limitation, see PR82524.
(define_insn_and_split "*<code><mode>_1_slp"
[(set (strict_low_part (match_operand:SWI12 0 "register_operand" "+<r>,&<r>"))
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512bw -O2 -mavx512vl" } */
+/* { dg-final { scan-assembler-times {(?n)kxnor[bwqd]} 4 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times {(?n)kxnor[bwdq]} 3 { target ia32 } } } */
+
+#include<immintrin.h>
+
+__m512i
+foo (__m512i a, __m512i b, __m512i c, __m512i d)
+{
+ __mmask32 k1 = _mm512_cmp_epi16_mask (a, b, 1);
+ __mmask32 k2 = _mm512_cmp_epi16_mask (c, d, 2);
+ return _mm512_mask_mov_epi16 (a, ~(k1 ^ k2), c);
+}
+
+__m512i
+foo1 (__m512i a, __m512i b, __m512i c, __m512i d)
+{
+ __mmask16 k1 = _mm512_cmp_epi32_mask (a, b, 1);
+ __mmask16 k2 = _mm512_cmp_epi32_mask (c, d, 2);
+ return _mm512_mask_mov_epi32 (a, ~(k1 ^ k2), c);
+}
+
+__m512i
+foo2 (__m512i a, __m512i b, __m512i c, __m512i d)
+{
+ __mmask64 k1 = _mm512_cmp_epi8_mask (a, b, 1);
+ __mmask64 k2 = _mm512_cmp_epi8_mask (c, d, 2);
+ return _mm512_mask_mov_epi8 (a, ~(k1 ^ k2), c);
+}
+
+__m512i
+foo3 (__m512i a, __m512i b, __m512i c, __m512i d)
+{
+ __mmask8 k1 = _mm512_cmp_epi64_mask (a, b, 1);
+ __mmask8 k2 = _mm512_cmp_epi64_mask (c, d, 2);
+ return _mm512_mask_mov_epi64 (a, ~(k1 ^ k2), c);
+}