(__mmask8) __U,
__R);
}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundph_epu16 (__m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_undefined_si256 (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
+ (__v16hi) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundph_epi16 (__m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_undefined_si256 (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A,
+ const int __R)
+{
+ return (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
+ (__v16hi) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
+{
+ return
+ (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundps_pd (__m128 __A, const int __R)
+{
+ return
+ (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
+ (__v4df)
+ _mm256_undefined_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundps_pd (__m256d __W, __mmask8 __U, __m128 __A,
+ const int __R)
+{
+ return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
+ (__v4df) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundps_pd (__mmask8 __U, __m128 __A, const int __R)
+{
+ return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtx_roundps_ph (__m256 __A, const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
+ (__v8hf)
+ _mm_setzero_ph (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtx_roundps_ph (__m128h __W, __mmask8 __U, __m256 __A,
+ const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
+ (__v8hf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtx_roundps_ph (__mmask8 __U, __m256 __A, const int __R)
+{
+ return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
+ (__v8hf)
+ _mm_setzero_ph (),
+ (__mmask8) __U,
+ __R);
+}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
(_mm256_setzero_si256 ()), \
(__mmask8) (U), \
(R)))
+
+#define _mm256_cvt_roundph_epu16(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_undefined_si256 ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundph_epu16(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundph_epu16(U, A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_cvt_roundph_epi16(A, R) \
+ ((__m256i) \
+ __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_undefined_si256 ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundph_epi16(W, U, A, R) \
+ ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
+ (__v16hi) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundph_epi16(U, A, R) \
+ ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
+ (__v16hi) \
+ (_mm256_setzero_si256 ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_cvt_roundps_pd(A, R) \
+ ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
+ (__v4df) \
+ (_mm256_undefined_pd ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvt_roundps_pd(W, U, A, R) \
+ ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
+ (__v4df) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvt_roundps_pd(U, A, R) \
+ ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
+ (__v4df) \
+ (_mm256_setzero_pd ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cvtx_roundps_ph(A, R) \
+ ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
+ (__v8hf) \
+ (_mm_setzero_ph ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cvtx_roundps_ph(W, U, A, R) \
+ ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
+ (__v8hf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_cvtx_roundps_ph(U, A, R) \
+ ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
+ (__v8hf) \
+ (_mm_setzero_ph ()), \
+ (__mmask8) (U), \
+ (R)))
#endif
#ifdef __DISABLE_AVX10_2_256__
DEF_FUNCTION_TYPE (V4DF, V8HF, V4DF, UQI, INT)
DEF_FUNCTION_TYPE (V8SF, V8HF, V8SF, UQI, INT)
DEF_FUNCTION_TYPE (V4DI, V8HF, V4DI, UQI, INT)
+DEF_FUNCTION_TYPE (V16HI, V16HF, V16HI, UHI, INT)
+DEF_FUNCTION_TYPE (V4DF, V4SF, V4DF, UQI, INT)
+DEF_FUNCTION_TYPE (V8HF, V8SF, V8HF, UQI, INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2qq_v4di_mask_round, "__builtin_ia32_vcvtph2qq256_mask_round", IX86_BUILTIN_VCVTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2udq_v8si_mask_round, "__builtin_ia32_vcvtph2udq256_mask_round", IX86_BUILTIN_VCVTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uqq_v4di_mask_round, "__builtin_ia32_vcvtph2uqq256_mask_round", IX86_BUILTIN_VCVTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uw_v16hi_mask_round, "__builtin_ia32_vcvtph2uw256_mask_round", IX86_BUILTIN_VCVTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2w_v16hi_mask_round, "__builtin_ia32_vcvtph2w256_mask_round", IX86_BUILTIN_VCVTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtps2pd256_mask_round, "__builtin_ia32_vcvtps2pd256_mask_round", IX86_BUILTIN_VCVTPS2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4SF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtps2ph_v8sf_mask_round, "__builtin_ia32_vcvtps2phx256_mask_round", IX86_BUILTIN_VCVTPS2PHX256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SF_V8HF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
case V16SI_FTYPE_V16SF_V16SI_HI_INT:
case V16SI_FTYPE_V16HF_V16SI_UHI_INT:
case V16HF_FTYPE_V16SI_V16HF_UHI_INT:
+ case V16HI_FTYPE_V16HF_V16HI_UHI_INT:
case V8DF_FTYPE_V8SF_V8DF_QI_INT:
case V16SF_FTYPE_V16HI_V16SF_HI_INT:
case V8SF_FTYPE_V8SI_V8SF_UQI_INT:
case V8SF_FTYPE_V8HF_V8SF_UQI_INT:
case V8SI_FTYPE_V8HF_V8SI_UQI_INT:
+ case V4DF_FTYPE_V4SF_V4DF_UQI_INT:
case V4DF_FTYPE_V8HF_V4DF_UQI_INT:
case V4DI_FTYPE_V8HF_V4DI_UQI_INT:
case V4DI_FTYPE_V4DF_V4DI_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_INT:
case V8HF_FTYPE_V8DI_V8HF_UQI_INT:
case V8HF_FTYPE_V8DF_V8HF_UQI_INT:
+ case V8HF_FTYPE_V8SF_V8HF_UQI_INT:
case V8HF_FTYPE_V8SI_V8HF_UQI_INT:
case V8HF_FTYPE_V4DF_V8HF_UQI_INT:
case V16HF_FTYPE_V16SF_V16HF_UHI_INT:
[(set (match_operand:<ssePHmode> 0 "register_operand" "=v")
(float_truncate:<ssePHmode>
(match_operand:VF48H_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")))]
- "TARGET_AVX512FP16"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
"vcvt<castmode>2ph<ph2pssuffix><round_qq2phsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
|| <MODE>mode == V4DFmode
|| <MODE>mode == V4DImode
|| <MODE>mode == V8SImode
+ || <MODE>mode == V16HImode
|| <MODE>mode == V16HFmode)))")
(define_subst_attr "round_applied" "round" "false" "true")
#define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
#include <wmmintrin.h>
#include <immintrin.h>
/* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2phxy\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
x = _mm256_mask_cvtx_roundph_ps (x, m8, hxh, 8);
x = _mm256_maskz_cvtx_roundph_ps (m8, hxh, 8);
}
+
+void extern
+avx10_2_test_8 (void)
+{
+ xi = _mm256_cvt_roundph_epu16 (xh, 4);
+ xi = _mm256_mask_cvt_roundph_epu16 (xi, m16, xh, 8);
+ xi = _mm256_maskz_cvt_roundph_epu16 (m16, xh, 11);
+
+ xi = _mm256_cvt_roundph_epi16 (xh, 4);
+ xi = _mm256_mask_cvt_roundph_epi16 (xi, m16, xh, 8);
+ xi = _mm256_maskz_cvt_roundph_epi16 (m16, xh, 11);
+}
+
+void extern
+avx10_2_test_9 (void)
+{
+ xd = _mm256_cvt_roundps_pd (hx, _MM_FROUND_NO_EXC);
+ xd = _mm256_mask_cvt_roundps_pd (xd, m8, hx, _MM_FROUND_NO_EXC);
+ xd = _mm256_maskz_cvt_roundps_pd (m8, hx, _MM_FROUND_NO_EXC);
+
+ hxh = _mm256_cvtx_roundps_ph (x, 4);
+ hxh = _mm256_mask_cvtx_roundps_ph (hxh, m8, x, 8);
+ hxh = _mm256_maskz_cvtx_roundps_ph (m8, x, 11);
+}
#define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
#include <x86intrin.h>
test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8)
test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvt_roundph_epu16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8)
+test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8)
+test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8)
+test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_epu32, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvt_roundph_epu16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8)
+test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8)
+test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8)
+test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
#define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")