((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \
(int) (C), (__mmask8) (-1))) \
-#define _mm_mask_fpclass_ss_mask(X, C, U) \
+#define _mm_mask_fpclass_ss_mask(U, X, C) \
((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \
(int) (C), (__mmask8) (U)))
-#define _mm_mask_fpclass_sd_mask(X, C, U) \
+#define _mm_mask_fpclass_sd_mask(U, X, C) \
((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \
(int) (C), (__mmask8) (U)))
(__mmask8)(U)))
#define _mm_reduce_round_sd(A, B, C, R) \
- ((__m128d) __builtin_ia32_reducesd_round ((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), (__mmask8)(U), (int)(R)))
+ ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_avx512_setzero_pd (), \
+ (__mmask8)(-1), (int)(R)))
#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
(__mmask8)(U)))
#define _mm_reduce_round_ss(A, B, C, R) \
- ((__m128) __builtin_ia32_reducess_round ((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), (__mmask8)(U), (int)(R)))
+ ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_avx512_setzero_ps (), \
+ (__mmask8)(-1), (int)(R)))
#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \
(__mmask8)(U), (int)(R)))
#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
- ((__m128) __builtin_ia32_reducesd_mask_round ((__v4sf)(__m128)(A), \
+ ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
(__mmask8)(U), (int)(R)))
#define _mm256_mask_alignr_epi8(W, U, X, Y, N) \
((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \
(__v4di)(__m256i)(Y), (int)((N) * 8), \
- (__v4di)(__m256i)(X), (__mmask32)(U)))
+ (__v4di)(__m256i)(W), (__mmask32)(U)))
#define _mm256_mask_srli_epi16(W, U, A, B) \
((__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi)(__m256i)(A), \
#define _mm_mask_alignr_epi8(W, U, X, Y, N) \
((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \
(__v2di)(__m128i)(Y), (int)((N) * 8), \
- (__v2di)(__m128i)(X), (__mmask16)(U)))
+ (__v2di)(__m128i)(W), (__mmask16)(U)))
#define _mm_maskz_alignr_epi8(U, X, Y, N) \
((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \
#define _mm_mask_alignr_epi64(W, U, X, Y, C) \
((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X), \
- (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(X), (__mmask8)-1))
+ (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(W), (__mmask8)(U)))
#define _mm_maskz_alignr_epi64(U, X, Y, C) \
((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X), \
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O0 -mavx512bw -mavx512vl" } */
+/* { dg-final { scan-assembler-times "vpalignr\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vpalignr\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+volatile __m256i y;
+volatile __m128i x;
+volatile __mmask32 m2;
+volatile __mmask16 m3;
+
+void extern
+avx512bw_test (void)
+{
+ y = _mm256_mask_alignr_epi8 (y, m2, y, y, 10);
+ x = _mm_mask_alignr_epi8 (x, m3, x, x, 10);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512dq -O0" } */
+/* { dg-final { scan-assembler-times "vfpclasssd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[0-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+volatile __m128d x128;
+volatile __mmask8 m8;
+
+void extern
+avx512dq_test (void)
+{
+ m8 = _mm_mask_fpclass_sd_mask (m8, x128, 13);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512dq -O0" } */
+/* { dg-final { scan-assembler-times "vfpclassss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[0-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+volatile __m128 x128;
+volatile __mmask8 m8;
+
+void extern
+avx512dq_test (void)
+{
+ m8 = _mm_mask_fpclass_ss_mask (m8, x128, 13);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512dq -O0" } */
+/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+#define IMM 123
+
+volatile __m128d x1, x2, xx1, xx2;
+volatile __mmask8 m;
+
+void extern
+avx512dq_test (void)
+{
+ xx1 = _mm_reduce_round_sd (xx1, xx2, IMM, _MM_FROUND_NO_EXC);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512dq -O0" } */
+/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+#define IMM 123
+
+volatile __m128 x1, x2, xx1, xx2;
+volatile __mmask8 m;
+
+void extern
+avx512dq_test (void)
+{
+ xx1 = _mm_reduce_round_ss (xx1, xx2, IMM, _MM_FROUND_NO_EXC);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O0 -mavx512vl" } */
+/* { dg-final { scan-assembler-times "valignq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+volatile __m256i y;
+volatile __m128i x;
+volatile __mmask8 m;
+
+void extern
+avx512vl_test (void)
+{
+ x = _mm_mask_alignr_epi64 (x, m, x, x, 1);
+}