(__mmask8) __U,
__R);
}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundps_epi32 (__m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundps_epi64 (__m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundps_epi64 (__mmask8 __U, __m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundps_epu32 (__m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_undefined_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundps_epu64 (__m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundps_epu64 (__mmask8 __U, __m128 __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
(_mm_setzero_ph ()), \
(__mmask8) (U), \
(R)))
+
+#define _mm256_cvt_roundps_epi32(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_undefined_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundps_epi32(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) (A), \
+ (__v8si) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundps_epi32(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvtps2dq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundps_epi64(A, R) \
+ ((__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundps_epi64(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) (A), \
+ (__v4di) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundps_epi64(U, A, R) \
+ ((__m256i) __builtin_ia32_cvtps2qq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundps_epu32(A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_undefined_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundps_epu32(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) (A), \
+ (__v8si) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundps_epu32(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvtps2udq256_mask_round ((__v8sf) (A), \
+ (__v8si) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvt_roundps_epu64(B, R) \
+ ((__m256i) \
+ __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) (B), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundps_epu64(W, U, A, R) \
+ ((__m256i) __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) (A), \
+ (__v4di) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundps_epu64(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_cvtps2uqq256_mask_round ((__v4sf) (A), \
+ (__v4di) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask8) (U), \
+ (R)))
#endif
#ifdef __DISABLE_AVX10_2_256__
DEF_FUNCTION_TYPE (V16HI, V16HF, V16HI, UHI, INT)
DEF_FUNCTION_TYPE (V4DF, V4SF, V4DF, UQI, INT)
DEF_FUNCTION_TYPE (V8HF, V8SF, V8HF, UQI, INT)
+DEF_FUNCTION_TYPE (V8SI, V8SF, V8SI, UQI, INT)
+DEF_FUNCTION_TYPE (V4DI, V4SF, V4DI, UQI, INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2w_v16hi_mask_round, "__builtin_ia32_vcvtph2w256_mask_round", IX86_BUILTIN_VCVTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtps2pd256_mask_round, "__builtin_ia32_vcvtps2pd256_mask_round", IX86_BUILTIN_VCVTPS2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4SF_V4DF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtps2ph_v8sf_mask_round, "__builtin_ia32_vcvtps2phx256_mask_round", IX86_BUILTIN_VCVTPS2PHX256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_fix_notruncv8sfv8si_mask_round, "__builtin_ia32_vcvtps2dq256_mask_round", IX86_BUILTIN_VCVTPS2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_cvtps2qqv4di_mask_round, "__builtin_ia32_cvtps2qq256_mask_round", IX86_BUILTIN_VCVTPS2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_fixuns_notruncv8sfv8si_mask_round, "__builtin_ia32_cvtps2udq256_mask_round", IX86_BUILTIN_VCVTPS2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8SF_V8SI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512dq_cvtps2uqqv4di_mask_round, "__builtin_ia32_cvtps2uqq256_mask_round", IX86_BUILTIN_VCVTPS2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4SF_V4DI_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
case V16SF_FTYPE_V16HI_V16SF_HI_INT:
case V8SF_FTYPE_V8SI_V8SF_UQI_INT:
case V8SF_FTYPE_V8HF_V8SF_UQI_INT:
+ case V8SI_FTYPE_V8SF_V8SI_UQI_INT:
case V8SI_FTYPE_V8HF_V8SI_UQI_INT:
case V4DF_FTYPE_V4SF_V4DF_UQI_INT:
case V4DF_FTYPE_V8HF_V4DF_UQI_INT:
case V4DI_FTYPE_V8HF_V4DI_UQI_INT:
case V4DI_FTYPE_V4DF_V4DI_UQI_INT:
+ case V4DI_FTYPE_V4SF_V4DI_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_INT:
case V4SI_FTYPE_V4DF_V4SI_UQI_INT:
case V4SF_FTYPE_V4DF_V4SF_UQI_INT:
(define_mode_attr sf2simodelower
[(V16SI "v16sf") (V8SI "v8sf") (V4SI "v4sf")])
-(define_insn "<sse2_avx_avx512f>_fix_notrunc<sf2simodelower><mode><mask_name>"
+(define_insn "<sse2_avx_avx512f>_fix_notrunc<sf2simodelower><mode><mask_name><round_name>"
[(set (match_operand:VI4_AVX 0 "register_operand" "=v")
(unspec:VI4_AVX
- [(match_operand:<ssePSmode> 1 "vector_operand" "vBm")]
+ [(match_operand:<ssePSmode> 1 "<round_nimm_predicate>" "<round_constraint4>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_SSE2 && <mask_mode512bit_condition>"
- "%vcvtps2dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode_condition>"
+ "%vcvtps2dq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
(unspec:VI4_AVX512VL
[(match_operand:<ssePSmode> 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && <round_mode_condition>"
"vcvtps2udq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(define_subst_attr "bcst_round_constraint" "round" "vmBr" "v")
(define_subst_attr "round_constraint2" "round" "m" "v")
(define_subst_attr "round_constraint3" "round" "rm" "r")
+(define_subst_attr "round_constraint4" "round" "vBm" "v")
(define_subst_attr "round_nimm_predicate" "round" "vector_operand" "register_operand")
(define_subst_attr "bcst_round_nimm_predicate" "round" "bcst_vector_operand" "register_operand")
(define_subst_attr "round_nimm_scalar_predicate" "round" "nonimmediate_operand" "register_operand")
#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2qq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2udq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2uqq256_mask_round(A, B, C, 8)
#include <wmmintrin.h>
#include <immintrin.h>
/* { dg-final { scan-assembler-times "vcvtps2phxy\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2dq\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2dq\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2dq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2qq\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2qq\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2qq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2udq\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2udq\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2udq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2uqq\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2uqq\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2uqq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
hxh = _mm256_mask_cvtx_roundps_ph (hxh, m8, x, 8);
hxh = _mm256_maskz_cvtx_roundps_ph (m8, x, 11);
}
+
+void extern
+avx10_2_test_10 (void)
+{
+ xi = _mm256_cvt_roundps_epi32 (x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvt_roundps_epi32 (xi, m8, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvt_roundps_epi32 (m8, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ xi = _mm256_cvt_roundps_epi64 (hx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvt_roundps_epi64 (xi, m8, hx, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvt_roundps_epi64 (m8, hx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ xi = _mm256_cvt_roundps_epu32 (x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvt_roundps_epu32 (xi, m8, x, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvt_roundps_epu32 (m8, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ xi = _mm256_cvt_roundps_epu64 (hx, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xi = _mm256_mask_cvt_roundps_epu64 (xi, m8, hx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ xi = _mm256_maskz_cvt_roundps_epu64 (m8, hx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+}
#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2qq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2udq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2uqq256_mask_round(A, B, C, 8)
#include <x86intrin.h>
test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8)
test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8)
test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8)
+test_1 (_mm256_cvt_roundps_epi32, __m256i, __m256, 9)
+test_1 (_mm256_cvt_roundps_epu32, __m256i, __m256, 9)
+test_1 (_mm256_cvt_roundps_epi64, __m256i, __m128, 8)
+test_1 (_mm256_cvt_roundps_epu64, __m256i, __m128, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8)
test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8)
+test_2 (_mm256_maskz_cvt_roundps_epi32, __m256i, __mmask8, __m256, 9)
+test_2 (_mm256_maskz_cvt_roundps_epu32, __m256i, __mmask8, __m256, 9)
+test_2 (_mm256_maskz_cvt_roundps_epi64, __m256i, __mmask8, __m128, 8)
+test_2 (_mm256_maskz_cvt_roundps_epu64, __m256i, __mmask8, __m128, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8)
test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8)
+test_3 (_mm256_mask_cvt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 9)
+test_3 (_mm256_mask_cvt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 9)
+test_3 (_mm256_mask_cvt_roundps_epi64, __m256i, __m256i, __mmask8, __m128, 8)
+test_3 (_mm256_mask_cvt_roundps_epu64, __m256i, __m256i, __mmask8, __m128, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8)
test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8)
test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8)
+test_1 (_mm256_cvt_roundps_epi32, __m256i, __m256, 9)
+test_1 (_mm256_cvt_roundps_epu32, __m256i, __m256, 9)
+test_1 (_mm256_cvt_roundps_epi64, __m256i, __m128, 8)
+test_1 (_mm256_cvt_roundps_epu64, __m256i, __m128, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8)
test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8)
+test_2 (_mm256_maskz_cvt_roundps_epi32, __m256i, __mmask8, __m256, 9)
+test_2 (_mm256_maskz_cvt_roundps_epu32, __m256i, __mmask8, __m256, 9)
+test_2 (_mm256_maskz_cvt_roundps_epi64, __m256i, __mmask8, __m128, 8)
+test_2 (_mm256_maskz_cvt_roundps_epu64, __m256i, __mmask8, __m128, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8)
test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8)
+test_3 (_mm256_mask_cvt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 9)
+test_3 (_mm256_mask_cvt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 9)
+test_3 (_mm256_mask_cvt_roundps_epi64, __m256i, __m256i, __mmask8, __m128, 8)
+test_3 (_mm256_mask_cvt_roundps_epu64, __m256i, __m256i, __mmask8, __m128, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2dq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2qq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2qq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2udq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2udq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_cvtps2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvtps2uqq256_mask_round(A, B, C, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")