(set_attr "prefix" "maybe_vex")
(set_attr "mode" "SI")])
+;; Optimize pxor/pcmpeqb/pmovmskb/cmp 0xffff to ptest.
+(define_mode_attr vi1avx2const
+ [(V32QI "0xffffffff") (V16QI "0xffff")])
+
+(define_split
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (unspec:SI
+ [(eq:VI1_AVX2
+ (match_operand:VI1_AVX2 0 "vector_operand")
+ (match_operand:VI1_AVX2 1 "const0_operand"))]
+ UNSPEC_MOVMSK)
+ (match_operand 2 "const_int_operand")))]
+ "TARGET_SSE4_1 && (INTVAL (operands[2]) == (int) (<vi1avx2const>))"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC [(match_dup 0)
+ (match_dup 0)]
+ UNSPEC_PTEST))])
+
(define_expand "sse2_maskmovdqu"
[(set (match_operand:V16QI 0 "memory_operand")
(unspec:V16QI [(match_operand:V16QI 1 "register_operand")
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse4" } */
+/* { dg-final { scan-assembler "ptest\[ \\t\]" } } */
+/* { dg-final { scan-assembler-not "pxor\[ \\t\]" } } */
+/* { dg-final { scan-assembler-not "pcmpeqb\[ \\t\]" } } */
+/* { dg-final { scan-assembler-not "pmovmskb\[ \\t\]" } } */
+
+#include <smmintrin.h>
+
+int is_zero(__m128i x)
+{
+ return _mm_movemask_epi8(_mm_cmpeq_epi8(x, _mm_setzero_si128())) == 0xffff;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx2" } */
+/* { dg-final { scan-assembler "vptest\[ \\t\]" } } */
+/* { dg-final { scan-assembler-not "vpxor\[ \\t\]" } } */
+/* { dg-final { scan-assembler-not "vpcmpeqb\[ \\t\]" } } */
+/* { dg-final { scan-assembler-not "vpmovmskb\[ \\t\]" } } */
+
+#include <immintrin.h>
+
+int is_zero256(__m256i x)
+{
+ return _mm256_movemask_epi8(_mm256_cmpeq_epi8(x, _mm256_setzero_si256())) == 0xffffffff;
+}