]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
AVX10.2 ymm rounding: Support vcvtph2{,u}w and vcvtps2p{d,hx} intrins
authorHu, Lin1 <lin1.hu@intel.com>
Mon, 19 Aug 2024 02:08:57 +0000 (10:08 +0800)
committerHaochen Jiang <haochen.jiang@intel.com>
Mon, 19 Aug 2024 02:10:46 +0000 (10:10 +0800)
gcc/ChangeLog:

* config/i386/avx10_2roundingintrin.h: New intrins.
* config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE.
* config/i386/i386-builtin.def (BDESC): Add new builtins.
* config/i386/i386-expand.cc (ix86_expand_round_builtin): Handle
V16HI_FTYPE_V16HF_V16HI_UHI_INT, V4DF_FTYPE_V4SF_V4DF_UQI_INT
V8HF_FTYPE_V8SF_V8HF_UQI_INT.
* config/i386/sse.md
(avx512fp16_vcvt<castmode>2ph_<mode><mask_name><round_name>):
Add round condition check.
* config/i386/subst.md (round_mode_condition): Add V16HI check for
256bit.

gcc/testsuite/ChangeLog:

* gcc.target/i386/avx-1.c: Add new builtin test.
* gcc.target/i386/sse-13.c: Ditto.
* gcc.target/i386/sse-14.c: Ditto.
* gcc.target/i386/sse-22.c: Add new macro test.
* gcc.target/i386/sse-23.c: Ditto.
* gcc.target/i386/avx10_2-rounding-1.c: Add test.

12 files changed:
gcc/config/i386/avx10_2roundingintrin.h
gcc/config/i386/i386-builtin-types.def
gcc/config/i386/i386-builtin.def
gcc/config/i386/i386-expand.cc
gcc/config/i386/sse.md
gcc/config/i386/subst.md
gcc/testsuite/gcc.target/i386/avx-1.c
gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c
gcc/testsuite/gcc.target/i386/sse-13.c
gcc/testsuite/gcc.target/i386/sse-14.c
gcc/testsuite/gcc.target/i386/sse-22.c
gcc/testsuite/gcc.target/i386/sse-23.c

index 29966f5e1bf817d39f0c6c93a6d177313650b80c..bc3f92a7d1ae943a6cc987fb128ef6b2e9517797 100644 (file)
@@ -726,6 +726,143 @@ _mm256_maskz_cvt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R)
                                                       (__mmask8) __U,
                                                       __R);
 }
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundph_epu16 (__m256h __A, const int __R)
+{
+  return
+    (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
+                                                     (__v16hi)
+                                                     _mm256_undefined_si256 (),
+                                                     (__mmask16) -1,
+                                                     __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A,
+                              const int __R)
+{
+  return (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
+                                                          (__v16hi) __W,
+                                                          (__mmask16) __U,
+                                                          __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R)
+{
+  return
+    (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A,
+                                                     (__v16hi)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask16) __U,
+                                                     __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundph_epi16 (__m256h __A, const int __R)
+{
+  return
+    (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
+                                                    (__v16hi)
+                                                    _mm256_undefined_si256 (),
+                                                    (__mmask16) -1,
+                                                    __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A,
+                              const int __R)
+{
+  return (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
+                                                         (__v16hi) __W,
+                                                         (__mmask16) __U,
+                                                         __R);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R)
+{
+  return
+    (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) __U,
+                                                    __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundps_pd (__m128 __A, const int __R)
+{
+  return
+    (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
+                                                     (__v4df)
+                                                     _mm256_undefined_pd (),
+                                                     (__mmask8) -1,
+                                                     __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundps_pd (__m256d __W, __mmask8 __U, __m128 __A,
+                           const int __R)
+{
+  return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
+                                                          (__v4df) __W,
+                                                          (__mmask8) __U,
+                                                          __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundps_pd (__mmask8 __U, __m128 __A, const int __R)
+{
+  return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A,
+                                                          (__v4df)
+                                                          _mm256_setzero_pd (),
+                                                          (__mmask8) __U,
+                                                          __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtx_roundps_ph (__m256 __A, const int __R)
+{
+  return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
+                                                           (__v8hf)
+                                                           _mm_setzero_ph (),
+                                                           (__mmask8) -1,
+                                                           __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtx_roundps_ph (__m128h __W, __mmask8 __U, __m256 __A,
+                            const int __R)
+{
+  return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
+                                                           (__v8hf) __W,
+                                                           (__mmask8) __U,
+                                                           __R);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtx_roundps_ph (__mmask8 __U, __m256 __A, const int __R)
+{
+  return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A,
+                                                           (__v8hf)
+                                                           _mm_setzero_ph (),
+                                                           (__mmask8) __U,
+                                                           __R);
+}
 #else
 #define _mm256_add_round_pd(A, B, R) \
   ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -1139,6 +1276,89 @@ _mm256_maskz_cvt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R)
                                            (_mm256_setzero_si256 ()), \
                                            (__mmask8) (U), \
                                            (R)))
+
+#define _mm256_cvt_roundph_epu16(A, R) \
+  ((__m256i) \
+   __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
+                                          (__v16hi) \
+                                          (_mm256_undefined_si256 ()), \
+                                          (__mmask16) (-1), \
+                                          (R)))
+
+#define _mm256_mask_cvt_roundph_epu16(W, U, A, R) \
+  ((__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
+                                                    (__v16hi) (W), \
+                                                    (__mmask16) (U), \
+                                                    (R)))
+
+#define _mm256_maskz_cvt_roundph_epu16(U, A, R) \
+  ((__m256i) \
+   __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \
+                                          (__v16hi) \
+                                          (_mm256_setzero_si256 ()), \
+                                          (__mmask16) (U), \
+                                          (R)))
+
+#define _mm256_cvt_roundph_epi16(A, R) \
+  ((__m256i) \
+   __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
+                                         (__v16hi) \
+                                         (_mm256_undefined_si256 ()), \
+                                         (__mmask16) (-1), \
+                                         (R)))
+
+#define _mm256_mask_cvt_roundph_epi16(W, U, A, R) \
+  ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
+                                                   (__v16hi) (W), \
+                                                   (__mmask16) (U), \
+                                                   (R)))
+
+#define _mm256_maskz_cvt_roundph_epi16(U, A, R) \
+  ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \
+                                                   (__v16hi) \
+                                                   (_mm256_setzero_si256 ()), \
+                                                   (__mmask16) (U), \
+                                                   (R)))
+
+#define _mm256_cvt_roundps_pd(A, R) \
+  ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
+                                                    (__v4df) \
+                                                    (_mm256_undefined_pd ()), \
+                                                    (__mmask8) (-1),  \
+                                                    (R)))
+
+#define _mm256_mask_cvt_roundps_pd(W, U, A, R) \
+  ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
+                                                    (__v4df) (W), \
+                                                    (__mmask8) (U), \
+                                                    (R)))
+
+#define _mm256_maskz_cvt_roundps_pd(U, A, R) \
+  ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \
+                                                    (__v4df) \
+                                                    (_mm256_setzero_pd ()), \
+                                                    (__mmask8) (U), \
+                                                    (R)))
+
+#define _mm256_cvtx_roundps_ph(A, R) \
+  ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
+                                                     (__v8hf) \
+                                                     (_mm_setzero_ph ()), \
+                                                     (__mmask8) (-1), \
+                                                     (R)))
+
+#define _mm256_mask_cvtx_roundps_ph(W, U, A, R) \
+  ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
+                                                     (__v8hf) (W), \
+                                                     (__mmask8) (U), \
+                                                     (R)))
+
+#define _mm256_maskz_cvtx_roundps_ph(U, A, R) \
+  ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \
+                                                     (__v8hf) \
+                                                     (_mm_setzero_ph ()), \
+                                                     (__mmask8) (U), \
+                                                     (R)))
 #endif
 
 #ifdef __DISABLE_AVX10_2_256__
index a660828228d775e3f7fda93e7977286ed6e9ec8f..b850ee0c2f618e88fd77ecc2385518989375e977 100644 (file)
@@ -1431,3 +1431,6 @@ DEF_FUNCTION_TYPE (V8SI, V8HF, V8SI, UQI, INT)
 DEF_FUNCTION_TYPE (V4DF, V8HF, V4DF, UQI, INT)
 DEF_FUNCTION_TYPE (V8SF, V8HF, V8SF, UQI, INT)
 DEF_FUNCTION_TYPE (V4DI, V8HF, V4DI, UQI, INT)
+DEF_FUNCTION_TYPE (V16HI, V16HF, V16HI, UHI, INT)
+DEF_FUNCTION_TYPE (V4DF, V4SF, V4DF, UQI, INT)
+DEF_FUNCTION_TYPE (V8HF, V8SF, V8HF, UQI, INT)
index e1979e757b0b53c5dae34630bfa4788788501c5c..2a6c46f17a0029858309a8fdca650fc26fc1028f 100644 (file)
@@ -3340,6 +3340,10 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_float_extend_phv8sf2
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2qq_v4di_mask_round, "__builtin_ia32_vcvtph2qq256_mask_round", IX86_BUILTIN_VCVTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2udq_v8si_mask_round, "__builtin_ia32_vcvtph2udq256_mask_round", IX86_BUILTIN_VCVTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uqq_v4di_mask_round, "__builtin_ia32_vcvtph2uqq256_mask_round", IX86_BUILTIN_VCVTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uw_v16hi_mask_round, "__builtin_ia32_vcvtph2uw256_mask_round", IX86_BUILTIN_VCVTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2w_v16hi_mask_round, "__builtin_ia32_vcvtph2w256_mask_round", IX86_BUILTIN_VCVTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtps2pd256_mask_round, "__builtin_ia32_vcvtps2pd256_mask_round", IX86_BUILTIN_VCVTPS2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4SF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtps2ph_v8sf_mask_round, "__builtin_ia32_vcvtps2phx256_mask_round", IX86_BUILTIN_VCVTPS2PHX256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SF_V8HF_UQI_INT)
 
 BDESC_END (ROUND_ARGS, MULTI_ARG)
 
index b62eb336e19e8a3bb497aa652994c80936b31792..10318a76fd13e53a719bf2cf947ccfb9049bcd9c 100644 (file)
@@ -12399,11 +12399,13 @@ ix86_expand_round_builtin (const struct builtin_description *d,
     case V16SI_FTYPE_V16SF_V16SI_HI_INT:
     case V16SI_FTYPE_V16HF_V16SI_UHI_INT:
     case V16HF_FTYPE_V16SI_V16HF_UHI_INT:
+    case V16HI_FTYPE_V16HF_V16HI_UHI_INT:
     case V8DF_FTYPE_V8SF_V8DF_QI_INT:
     case V16SF_FTYPE_V16HI_V16SF_HI_INT:
     case V8SF_FTYPE_V8SI_V8SF_UQI_INT:
     case V8SF_FTYPE_V8HF_V8SF_UQI_INT:
     case V8SI_FTYPE_V8HF_V8SI_UQI_INT:
+    case V4DF_FTYPE_V4SF_V4DF_UQI_INT:
     case V4DF_FTYPE_V8HF_V4DF_UQI_INT:
     case V4DI_FTYPE_V8HF_V4DI_UQI_INT:
     case V4DI_FTYPE_V4DF_V4DI_UQI_INT:
@@ -12413,6 +12415,7 @@ ix86_expand_round_builtin (const struct builtin_description *d,
     case V4SF_FTYPE_V4SF_V4SF_V4SF_INT:
     case V8HF_FTYPE_V8DI_V8HF_UQI_INT:
     case V8HF_FTYPE_V8DF_V8HF_UQI_INT:
+    case V8HF_FTYPE_V8SF_V8HF_UQI_INT:
     case V8HF_FTYPE_V8SI_V8HF_UQI_INT:
     case V8HF_FTYPE_V4DF_V8HF_UQI_INT:
     case V16HF_FTYPE_V16SF_V16HF_UHI_INT:
index f1c0d08a90e12a4516dd4d459bd4b7d47b53c4bf..cf27af27a5a80fa124ec1d1a19a590ae40c0a254 100644 (file)
   [(set (match_operand:<ssePHmode> 0 "register_operand" "=v")
        (float_truncate:<ssePHmode>
          (match_operand:VF48H_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")))]
-  "TARGET_AVX512FP16"
+  "TARGET_AVX512FP16 && <round_mode_condition>"
   "vcvt<castmode>2ph<ph2pssuffix><round_qq2phsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
index 716a47f6cf4ab04de11effd797199fc13c38ebf7..7bb25d19eb52c4f2e41084a573329a6d570906bc 100644 (file)
                                                                       || <MODE>mode == V4DFmode
                                                                       || <MODE>mode == V4DImode
                                                                       || <MODE>mode == V8SImode
+                                                                      || <MODE>mode == V16HImode
                                                                       || <MODE>mode == V16HFmode)))")
 (define_subst_attr "round_applied" "round" "false" "true")
 
index bc8a72559bb367f83e2a23baae8fe1cab615b4bb..5eaad5c5250c95e5f3a2573aad2218bba8b2f39e 100644 (file)
 #define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
 
 #include <wmmintrin.h>
 #include <immintrin.h>
index 9f7ada455dfc0eabddb2383d35ce5c5bae7b8352..c35c667569a68104f063d34c22bdacd6b5ce58a5 100644 (file)
 /* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
 /* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
 /* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtps2phxy\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
 
 #include <immintrin.h>
 
@@ -182,3 +194,27 @@ avx10_2_test_7 (void)
   x = _mm256_mask_cvtx_roundph_ps (x, m8, hxh, 8);
   x = _mm256_maskz_cvtx_roundph_ps (m8, hxh, 8);
 }
+
+void extern
+avx10_2_test_8 (void)
+{
+  xi = _mm256_cvt_roundph_epu16 (xh, 4);
+  xi = _mm256_mask_cvt_roundph_epu16 (xi, m16, xh, 8);
+  xi = _mm256_maskz_cvt_roundph_epu16 (m16, xh, 11);
+
+  xi = _mm256_cvt_roundph_epi16 (xh, 4);
+  xi = _mm256_mask_cvt_roundph_epi16 (xi, m16, xh, 8);
+  xi = _mm256_maskz_cvt_roundph_epi16 (m16, xh, 11);
+}
+
+void extern
+avx10_2_test_9 (void)
+{
+  xd = _mm256_cvt_roundps_pd (hx, _MM_FROUND_NO_EXC);
+  xd = _mm256_mask_cvt_roundps_pd (xd, m8, hx, _MM_FROUND_NO_EXC);
+  xd = _mm256_maskz_cvt_roundps_pd (m8, hx, _MM_FROUND_NO_EXC);
+
+  hxh = _mm256_cvtx_roundps_ph (x, 4);
+  hxh = _mm256_mask_cvtx_roundps_ph (hxh, m8, x, 8);
+  hxh = _mm256_maskz_cvtx_roundps_ph (m8, x, 11);
+}
index 51c5c89817c6f417e68bd364ac26cb84cc94debf..2c91c66268875178537b4587e290a601022107da 100644 (file)
 #define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
 
 #include <x86intrin.h>
index 344b7d7d7f1f4a5c74413ce609208d88e629afc9..8f3271048ad1c06741fd39dc9e8e0c6432785d66 100644 (file)
@@ -1035,6 +1035,10 @@ test_1 (_mm256_cvt_roundph_ps, __m256, __m128i, 8)
 test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8)
 test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8)
 test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvt_roundph_epu16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8)
+test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8)
 test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
 test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
 test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1053,6 +1057,10 @@ test_2 (_mm256_maskz_cvtx_roundph_ps, __m256, __mmask8, __m128h, 8)
 test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
 test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
 test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8)
+test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8)
 test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
 test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
 test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1074,6 +1082,10 @@ test_3 (_mm256_mask_cvtx_roundph_ps, __m256, __m256, __mmask8, __m128h, 8)
 test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
 test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
 test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8)
+test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8)
 test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
 test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
 test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
index 0b51b067821b2246537117983f74216b8e4ed3af..f51345476d351c979064149bc8071e91e484d5d8 100644 (file)
@@ -1077,6 +1077,10 @@ test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8)
 test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8)
 test_1 (_mm256_cvt_roundph_epu32, __m256i, __m128h, 8)
 test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8)
+test_1 (_mm256_cvt_roundph_epu16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8)
+test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8)
+test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8)
 test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
 test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
 test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1095,6 +1099,10 @@ test_2 (_mm256_maskz_cvtx_roundph_ps, __m256, __mmask8, __m128h, 8)
 test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
 test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
 test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epu16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8)
+test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8)
+test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8)
 test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
 test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
 test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1116,6 +1124,10 @@ test_3 (_mm256_mask_cvtx_roundph_ps, __m256, __m256, __mmask8, __m128h, 8)
 test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
 test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
 test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
+test_3 (_mm256_mask_cvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8)
+test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8)
+test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8)
 test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
 test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
 test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
index 2c74d651336a2829f2c450b584d62304c44ee3ec..93a0904ba28818ac9968ed09595a2fcd0b665f6f 100644 (file)
 #define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8)
 
 #pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")