(__mmask8) __U,
__R);
}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fnmsub_round_pd (__m256d __A, __m256d __B, __m256d __D, const int __R)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __D,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmsub_round_pd (__m256d __A, __mmask8 __U, __m256d __B,
+ __m256d __D, const int __R)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __D,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmsub_round_pd (__m256d __A, __m256d __B, __m256d __D,
+ __mmask8 __U, const int __R)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask3_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __D,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmsub_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+ __m256d __D, const int __R)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_maskz_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __D,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fnmsub_round_ph (__m256h __A, __m256h __B, __m256h __D, const int __R)
+{
+ return (__m256h)
+ __builtin_ia32_vfnmsubph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmsub_round_ph (__m256h __A, __mmask16 __U, __m256h __B,
+ __m256h __D, const int __R)
+{
+ return (__m256h)
+ __builtin_ia32_vfnmsubph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmsub_round_ph (__m256h __A, __m256h __B, __m256h __D,
+ __mmask16 __U, const int __R)
+{
+ return (__m256h)
+ __builtin_ia32_vfnmsubph256_mask3_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmsub_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+ __m256h __D, const int __R)
+{
+ return (__m256h)
+ __builtin_ia32_vfnmsubph256_maskz_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __D,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fnmsub_round_ps (__m256 __A, __m256 __B, __m256 __D, const int __R)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __D,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmsub_round_ps (__m256 __A, __mmask8 __U, __m256 __B,
+ __m256 __D, const int __R)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __D,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmsub_round_ps (__m256 __A, __m256 __B, __m256 __D,
+ __mmask8 __U, const int __R)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask3_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __D,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmsub_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+ __m256 __D, const int __R)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_maskz_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __D,
+ (__mmask8) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
#define _mm256_maskz_fnmadd_round_ps(U, A, B, D, R) \
(__m256)__builtin_ia32_vfnmaddps256_maskz_round (A, B, D, U, R)
+
+#define _mm256_fnmsub_round_pd(A, B, D, R) \
+ (__m256d)__builtin_ia32_vfnmsubpd256_mask_round (A, B, D, -1, R)
+
+#define _mm256_mask_fnmsub_round_pd(A, U, B, D, R) \
+ (__m256d)__builtin_ia32_vfnmsubpd256_mask_round (A, B, D, U, R)
+
+#define _mm256_mask3_fnmsub_round_pd(A, B, D, U, R) \
+ (__m256d)__builtin_ia32_vfnmsubpd256_mask3_round (A, B, D, U, R)
+
+#define _mm256_maskz_fnmsub_round_pd(U, A, B, D, R) \
+ (__m256d)__builtin_ia32_vfnmsubpd256_maskz_round (A, B, D, U, R)
+
+#define _mm256_fnmsub_round_ph(A, B, D, R) \
+ ((__m256h)__builtin_ia32_vfnmsubph256_mask_round ((A), (B), (D), -1, (R)))
+
+#define _mm256_mask_fnmsub_round_ph(A, U, B, D, R) \
+ ((__m256h)__builtin_ia32_vfnmsubph256_mask_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_mask3_fnmsub_round_ph(A, B, D, U, R) \
+ ((__m256h)__builtin_ia32_vfnmsubph256_mask3_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_maskz_fnmsub_round_ph(U, A, B, D, R) \
+ ((__m256h)__builtin_ia32_vfnmsubph256_maskz_round ((A), (B), (D), (U), (R)))
+
+#define _mm256_fnmsub_round_ps(A, B, D, R) \
+ (__m256)__builtin_ia32_vfnmsubps256_mask_round (A, B, D, -1, R)
+
+#define _mm256_mask_fnmsub_round_ps(A, U, B, D, R) \
+ (__m256)__builtin_ia32_vfnmsubps256_mask_round (A, B, D, U, R)
+
+#define _mm256_mask3_fnmsub_round_ps(A, B, D, U, R) \
+ (__m256)__builtin_ia32_vfnmsubps256_mask3_round (A, B, D, U, R)
+
+#define _mm256_maskz_fnmsub_round_ps(U, A, B, D, R) \
+ (__m256)__builtin_ia32_vfnmsubps256_maskz_round (A, B, D, U, R)
#endif
#define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R))
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v8sf_mask_round, "__builtin_ia32_vfnmaddps256_mask_round", IX86_BUILTIN_VFNMADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v8sf_mask3_round, "__builtin_ia32_vfnmaddps256_mask3_round", IX86_BUILTIN_VFNMADDPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmadd_v8sf_maskz_round, "__builtin_ia32_vfnmaddps256_maskz_round", IX86_BUILTIN_VFNMADDPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v4df_mask_round, "__builtin_ia32_vfnmsubpd256_mask_round", IX86_BUILTIN_VFNMSUBPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v4df_mask3_round, "__builtin_ia32_vfnmsubpd256_mask3_round", IX86_BUILTIN_VFNMSUBPD256_MASK3_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v4df_maskz_round, "__builtin_ia32_vfnmsubpd256_maskz_round", IX86_BUILTIN_VFNMSUBPD256_MASKZ_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v16hf_mask_round, "__builtin_ia32_vfnmsubph256_mask_round", IX86_BUILTIN_VFNMSUBPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v16hf_mask3_round, "__builtin_ia32_vfnmsubph256_mask3_round", IX86_BUILTIN_VFNMSUBPH256_MASK3_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v16hf_maskz_round, "__builtin_ia32_vfnmsubph256_maskz_round", IX86_BUILTIN_VFNMSUBPH256_MASKZ_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v8sf_mask_round, "__builtin_ia32_vfnmsubps256_mask_round", IX86_BUILTIN_VFNMSUBPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v8sf_mask3_round, "__builtin_ia32_vfnmsubps256_mask3_round", IX86_BUILTIN_VFNMSUBPS512_MASK3_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fnmsub_v8sf_maskz_round, "__builtin_ia32_vfnmsubps256_maskz_round", IX86_BUILTIN_VFNMSUBPS256_MASKZ_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
(match_operand:VFH_AVX512VL 3 "register_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && <round_mode_condition>"
"vfnmsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
#define __builtin_ia32_vfnmaddps256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vfnmaddps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_mask3_round(A, B, C, D, 8)
#define __builtin_ia32_vfnmaddps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_maskz_round(A, B, C, D, 8)
#include <wmmintrin.h>
#include <immintrin.h>
/* { dg-final { scan-assembler-times "vfnmadd...ps\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vfnmadd231ps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vfnmadd...ps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...ps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...ps\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub231ps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...ps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...pd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...pd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub231pd\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...pd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...ph\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...ph\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub231ph\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vfnmsub...ph\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
x = _mm256_mask3_fnmadd_round_ps (x, x, x, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
x = _mm256_maskz_fnmadd_round_ps (m8, x, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
+
+void extern
+avx10_2_test_15 (void)
+{
+ xd = _mm256_fnmsub_round_pd (xd, xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xd = _mm256_mask_fnmsub_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ xd = _mm256_mask3_fnmsub_round_pd (xd, xd, xd, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ xd = _mm256_maskz_fnmsub_round_pd (m8, xd, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ xh = _mm256_fnmsub_round_ph (xh, xh, xh, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xh = _mm256_mask_fnmsub_round_ph (xh, m8, xh, xh, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ xh = _mm256_mask3_fnmsub_round_ph (xh, xh, xh, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ xh = _mm256_maskz_fnmsub_round_ph (m8, xh, xh, xh, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ x = _mm256_fnmsub_round_ps (x, x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ x = _mm256_mask_fnmsub_round_ps (x, m8, x, x, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ x = _mm256_mask3_fnmsub_round_ps (x, x, x, m8, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ x = _mm256_maskz_fnmsub_round_ps (m8, x, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
#define __builtin_ia32_vfnmaddps256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vfnmaddps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_mask3_round(A, B, C, D, 8)
#define __builtin_ia32_vfnmaddps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_maskz_round(A, B, C, D, 8)
#include <x86intrin.h>
test_3 (_mm256_fnmadd_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
test_3 (_mm256_fnmadd_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
test_3 (_mm256_fnmadd_round_ps, __m256, __m256, __m256, __m256, 9)
+test_3 (_mm256_fnmsub_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
+test_3 (_mm256_fnmsub_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
+test_3 (_mm256_fnmsub_round_ps, __m256, __m256, __m256, __m256, 9)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
test_4 (_mm256_mask_fnmadd_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
test_4 (_mm256_mask3_fnmadd_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
test_4 (_mm256_maskz_fnmadd_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
+test_4 (_mm256_mask_fnmsub_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask3_fnmsub_round_pd, __m256d, __m256d, __m256d, __m256d, __mmask8, 9)
+test_4 (_mm256_maskz_fnmsub_round_pd, __m256d,__mmask8, __m256d, __m256d, __m256d, 9)
+test_4 (_mm256_mask_fnmsub_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
+test_4 (_mm256_mask3_fnmsub_round_ph, __m256h, __m256h, __m256h, __m256h, __mmask16, 9)
+test_4 (_mm256_maskz_fnmsub_round_ph, __m256h,__mmask16, __m256h, __m256h, __m256h, 9)
+test_4 (_mm256_mask_fnmsub_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask3_fnmsub_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
+test_4 (_mm256_maskz_fnmsub_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
test_3 (_mm256_fnmadd_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
test_3 (_mm256_fnmadd_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
test_3 (_mm256_fnmadd_round_ps, __m256, __m256, __m256, __m256, 9)
+test_3 (_mm256_fnmsub_round_pd, __m256d, __m256d, __m256d, __m256d, 9)
+test_3 (_mm256_fnmsub_round_ph, __m256h, __m256h, __m256h, __m256h, 9)
+test_3 (_mm256_fnmsub_round_ps, __m256, __m256, __m256, __m256, 9)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
test_4 (_mm256_mask_fnmadd_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
test_4 (_mm256_mask3_fnmadd_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
test_4 (_mm256_maskz_fnmadd_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
+test_4 (_mm256_mask_fnmsub_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask3_fnmsub_round_pd, __m256d, __m256d, __m256d, __m256d, __mmask8, 9)
+test_4 (_mm256_maskz_fnmsub_round_pd, __m256d,__mmask8, __m256d, __m256d, __m256d, 9)
+test_4 (_mm256_mask_fnmsub_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 9)
+test_4 (_mm256_mask3_fnmsub_round_ph, __m256h, __m256h, __m256h, __m256h, __mmask16, 9)
+test_4 (_mm256_maskz_fnmsub_round_ph, __m256h,__mmask16, __m256h, __m256h, __m256h, 9)
+test_4 (_mm256_mask_fnmsub_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask3_fnmsub_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9)
+test_4 (_mm256_maskz_fnmsub_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9)
test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8)
test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8)
test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8)
#define __builtin_ia32_vfnmaddps256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vfnmaddps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_mask3_round(A, B, C, D, 8)
#define __builtin_ia32_vfnmaddps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmaddps256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubpd256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubpd256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubph256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubph256_maskz_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_mask_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_mask3_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_mask3_round(A, B, C, D, 8)
+#define __builtin_ia32_vfnmsubps256_maskz_round(A, B, C, D, E) __builtin_ia32_vfnmsubps256_maskz_round(A, B, C, D, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")