]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
Revert "AVX10.2 ymm rounding: Support vadd{s,d,h} and vcmp{s,d,h} intrins"
authorHaochen Jiang <haochen.jiang@intel.com>
Mon, 24 Mar 2025 06:24:33 +0000 (14:24 +0800)
committerHaochen Jiang <haochen.jiang@intel.com>
Mon, 24 Mar 2025 06:24:33 +0000 (14:24 +0800)
This reverts commit e22e3af1954469c40b139b7cfa8e7708592f4bfd.

14 files changed:
gcc/config.gcc
gcc/config/i386/avx10_2roundingintrin.h [deleted file]
gcc/config/i386/i386-builtin-types.def
gcc/config/i386/i386-builtin.def
gcc/config/i386/i386-expand.cc
gcc/config/i386/immintrin.h
gcc/config/i386/sse.md
gcc/config/i386/subst.md
gcc/testsuite/gcc.target/i386/avx-1.c
gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c [deleted file]
gcc/testsuite/gcc.target/i386/sse-13.c
gcc/testsuite/gcc.target/i386/sse-14.c
gcc/testsuite/gcc.target/i386/sse-22.c
gcc/testsuite/gcc.target/i386/sse-23.c

index a518e976b82e8feaecf5c31a6293e04a0519f6cd..f7f2002a45f33fe39f8ea4a47273bc7eb4db7d41 100644 (file)
@@ -450,8 +450,7 @@ i[34567]86-*-* | x86_64-*-*)
                       avxvnniint8intrin.h avxneconvertintrin.h
                       cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
                       raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
-                      sm3intrin.h sha512intrin.h sm4intrin.h
-                      usermsrintrin.h avx10_2roundingintrin.h
+                      sm3intrin.h sha512intrin.h sm4intrin.h usermsrintrin.h
                       avx10_2mediaintrin.h avx10_2-512mediaintrin.h
                       avx10_2convertintrin.h avx10_2-512convertintrin.h
                       avx10_2bf16intrin.h avx10_2-512bf16intrin.h
diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
deleted file mode 100644 (file)
index 9d6a497..0000000
+++ /dev/null
@@ -1,337 +0,0 @@
-/* Copyright (C) 2024-2025 Free Software Foundation, Inc.
-
-   This file is part of GCC.
-
-   GCC is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 3, or (at your option)
-   any later version.
-
-   GCC is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-
-   Under Section 7 of GPL version 3, you are granted additional
-   permissions described in the GCC Runtime Library Exception, version
-   3.1, as published by the Free Software Foundation.
-
-   You should have received a copy of the GNU General Public License and
-   a copy of the GCC Runtime Library Exception along with this program;
-   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#ifndef _IMMINTRIN_H_INCLUDED
-#error "Never use <avx10_2roundingintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED
-#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED
-
-#ifndef __AVX10_2_256__
-#pragma GCC push_options
-#pragma GCC target("avx10.2-256")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
-
-#ifdef  __OPTIMIZE__
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R)
-{
-  return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
-                                                      (__v4df) __B,
-                                                      (__v4df)
-                                                      _mm256_undefined_pd (),
-                                                      (__mmask8) -1,
-                                                      __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
-                         __m256d __B, const int __R)
-{
-  return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
-                                                      (__v4df) __B,
-                                                      (__v4df) __W,
-                                                      (__mmask8) __U,
-                                                      __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
-                          const int __R)
-{
-  return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
-                                                      (__v4df) __B,
-                                                      (__v4df)
-                                                      _mm256_setzero_pd (),
-                                                      (__mmask8) __U,
-                                                      __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R)
-{
-  return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
-                                                      (__v16hf) __B,
-                                                      (__v16hf)
-                                                      _mm256_undefined_ph (),
-                                                      (__mmask16) -1,
-                                                      __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
-                         __m256h __B, const int __R)
-{
-  return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
-                                                      (__v16hf) __B,
-                                                      (__v16hf) __W,
-                                                      (__mmask16) __U,
-                                                      __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
-                          const int __R)
-{
-  return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
-                                                      (__v16hf) __B,
-                                                      (__v16hf)
-                                                      _mm256_setzero_ph (),
-                                                      (__mmask16) __U,
-                                                      __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R)
-{
-  return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
-                                                     (__v8sf) __B,
-                                                     (__v8sf)
-                                                     _mm256_undefined_ps (),
-                                                     (__mmask8) -1,
-                                                     __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
-                         const int __R)
-{
-  return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
-                                                     (__v8sf) __B,
-                                                     (__v8sf) __W,
-                                                     (__mmask8) __U,
-                                                     __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
-                          const int __R)
-{
-  return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
-                                                     (__v8sf) __B,
-                                                     (__v8sf)
-                                                     _mm256_setzero_ps (),
-                                                     (__mmask8) __U,
-                                                     __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C,
-                         const int __R)
-{
-  return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
-                                                       (__v4df) __B,
-                                                       __C,
-                                                       (__mmask8) -1,
-                                                       __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B,
-                              const int __C, const int __R)
-{
-  return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
-                                                       (__v4df) __B,
-                                                       __C,
-                                                       (__mmask8) __U,
-                                                       __R);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C,
-                         const int __R)
-{
-  return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
-                                                        (__v16hf) __B,
-                                                        __C,
-                                                        (__mmask16) -1,
-                                                        __R);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B,
-                              const int __C, const int __R)
-{
-  return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
-                                                        (__v16hf) __B,
-                                                        __C,
-                                                        (__mmask16) __U,
-                                                        __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R)
-{
-  return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
-                                                       (__v8sf) __B,
-                                                       __C,
-                                                       (__mmask8) -1,
-                                                       __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B,
-                              const int __C, const int __R)
-{
-  return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
-                                                       (__v8sf) __B,
-                                                       __C,
-                                                       (__mmask8) __U,
-                                                       __R);
-}
-#else
-#define _mm256_add_round_pd(A, B, R) \
-  ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
-                                                (__v4df) (B), \
-                                                (__v4df) \
-                                                (_mm256_undefined_pd ()), \
-                                                (__mmask8) (-1), \
-                                                (R)))
-
-#define _mm256_mask_add_round_pd(W, U, A, B, R) \
-  ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
-                                                (__v4df) (B), \
-                                                (__v4df) (W), \
-                                                (__mmask8) (U), \
-                                                (R)))
-
-#define _mm256_maskz_add_round_pd(U, A, B, R) \
-  ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
-                                                (__v4df) (B), \
-                                                (__v4df) \
-                                                (_mm256_setzero_pd ()), \
-                                                (__mmask8) (U), \
-                                                (R)))
-
-#define _mm256_add_round_ph(A, B, R) \
-  ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
-                                                (__v16hf) (B), \
-                                                (__v16hf) \
-                                                (_mm256_undefined_ph ()), \
-                                                (__mmask16) (-1), \
-                                                (R)))
-
-#define _mm256_mask_add_round_ph(W, U, A, B, R) \
-  ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
-                                                (__v16hf) (B), \
-                                                (__v16hf) (W), \
-                                                (__mmask16) (U), \
-                                                (R)))
-
-#define _mm256_maskz_add_round_ph(U, A, B, R) \
-  ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
-                                                (__v16hf) (B), \
-                                                (__v16hf) \
-                                                (_mm256_setzero_ph ()), \
-                                                (__mmask16) (U), \
-                                                (R)))
-
-#define _mm256_add_round_ps(A, B, R) \
-  ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
-                                               (__v8sf) (B), \
-                                               (__v8sf) \
-                                               (_mm256_undefined_ps ()), \
-                                               (__mmask8) (-1), \
-                                               (R)))
-
-#define _mm256_mask_add_round_ps(W, U, A, B, R) \
-  ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
-                                               (__v8sf) (B), \
-                                               (__v8sf) (W), \
-                                               (__mmask8) (U), \
-                                               (R)))
-
-#define _mm256_maskz_add_round_ps(U, A, B, R)\
-  ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
-                                               (__v8sf) (B), \
-                                               (__v8sf) \
-                                               (_mm256_setzero_ps ()), \
-                                               (__mmask8) (U), \
-                                               (R)))
-
-#define _mm256_cmp_round_pd_mask(A, B, C, R) \
-  ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
-                                                 (__v4df) (B), \
-                                                 (C), \
-                                                 (__mmask8) (-1), \
-                                                 (R)))
-
-#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \
-  ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
-                                                 (__v4df) (B), \
-                                                 (C), \
-                                                 (__mmask8) (U), \
-                                                 (R)))
-
-#define _mm256_cmp_round_ph_mask(A, B, C, R) \
-  ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
-                                                  (__v16hf) (B), \
-                                                  (C), \
-                                                  (__mmask16) (-1), \
-                                                  (R)))
-
-#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \
-  ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
-                                                  (__v16hf) (B), \
-                                                  (C), \
-                                                  (__mmask16) (U), \
-                                                  (R)))
-
-#define _mm256_cmp_round_ps_mask(A, B, C, R) \
-  ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
-                                                 (__v8sf) (B), \
-                                                 (C), \
-                                                 (__mmask8) (-1), \
-                                                 (R)))
-
-#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \
-  ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
-                                                 (__v8sf) (B), \
-                                                 (C), \
-                                                 (__mmask8) (U), \
-                                                 (R)))
-#endif
-
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
-#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
-
-#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */
index 1974a35c9ae92211b158e3ee4250022b92309d4c..64bde021d11715316281eb21dc8a1360f5f2b074 100644 (file)
@@ -1416,12 +1416,6 @@ DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
 DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
 
 # AVX10.2 builtins
-DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT)
-DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT)
-DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT)
 DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI, INT)
 DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI)
 DEF_FUNCTION_TYPE (V16HF, V8SF, V8SF, V16HF, UHI)
index cce2e9568aff94145aba7ddb802b90f8c3e25997..712083b743534253abf0b86d3f2a5657cf693a6e 100644 (file)
@@ -3660,12 +3660,6 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
 
 /* AVX10.2.  */
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvt2ps2phx_v32hf_mask_round, "__builtin_ia32_vcvt2ps2phx512_mask_round", IX86_BUILTIN_VCVT2PS2PHX_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V16SF_V16SF_V32HF_USI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask_round, "__builtin_ia32_cvtph2ibs512_mask_round", IX86_BUILTIN_CVTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask_round, "__builtin_ia32_cvtph2iubs512_mask_round", IX86_BUILTIN_CVTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
index e0e357ce53bd1bfdf4c7d5dc6c14f14d217aa470..bceffa05c966bffe4b10bc861a286124507c8435 100644 (file)
@@ -12766,14 +12766,11 @@ ix86_expand_round_builtin (const struct builtin_description *d,
     case INT_FTYPE_V4SF_V4SF_INT_INT:
     case INT_FTYPE_V2DF_V2DF_INT_INT:
       return ix86_expand_sse_comi_round (d, exp, target, true);
-    case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT:
     case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT:
     case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT:
     case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT:
     case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT:
-    case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
     case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
-    case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
     case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
     case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
     case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
@@ -12798,11 +12795,8 @@ ix86_expand_round_builtin (const struct builtin_description *d,
       nargs = 5;
       break;
     case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT:
-    case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT:
     case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
     case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
-    case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT:
-    case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT:
     case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
     case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
     case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
index 6907a2c0b3a1424d256256a23f8cea707a648789..c30a4e036d6507512d31861ab227d1772c44966b 100644 (file)
 
 #include <amxfp16intrin.h>
 
-#include <avx10_2roundingintrin.h>
-
 #include <avx10_2mediaintrin.h>
 
 #include <avx10_2-512mediaintrin.h>
index 4dbe60e3cf0c18dd8a95c87d8bd126d2e47494a3..0ded0d5f9e5edef5e678702e186b81185be7af0d 100644 (file)
        (plusminus:VF_BHSD
          (match_operand:VF_BHSD 1 "<round_nimm_predicate>")
          (match_operand:VF_BHSD 2 "<round_nimm_predicate>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
 
 (define_insn "*<insn><mode>3<mask_name><round_name>"
          (match_operand:VFH 1 "<bcst_round_nimm_predicate>" "<comm>0,v")
          (match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
   "TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)
-   && <mask_mode512bit_condition> && <round_mode_condition>"
+   && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    <plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
    v<plusminus_mnemonic><ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
        (mult:VF_BHSD
          (match_operand:VF_BHSD 1 "<round_nimm_predicate>")
          (match_operand:VF_BHSD 2 "<round_nimm_predicate>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);")
 
 (define_insn "*mul<mode>3<mask_name><round_name>"
          (match_operand:VFH 1 "<bcst_round_nimm_predicate>" "%0,v")
          (match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
   "TARGET_SSE && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
-   && <mask_mode512bit_condition> && <round_mode_condition>"
+   && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    mul<ssemodesuffix>\t{%2, %0|%0, %2}
    vmul<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
        (div:VFH
          (match_operand:VFH 1 "register_operand" "0,v")
          (match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    div<ssemodesuffix>\t{%2, %0|%0, %2}
    vdiv<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
 (define_insn "<sse>_sqrt<mode>2<mask_name><round_name>"
   [(set (match_operand:VFH 0 "register_operand" "=x,v")
        (sqrt:VFH (match_operand:VFH 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
-  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    sqrt<ssemodesuffix>\t{%1, %0|%0, %1}
    vsqrt<ssemodesuffix>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
          (match_operand:VFH 1 "<round_saeonly_nimm_predicate>")
          (match_operand:VFH 2 "<round_saeonly_nimm_predicate>")))]
   "TARGET_SSE && <mask_mode512bit_condition>
-   && <round_saeonly_mode_condition>"
+   && <round_saeonly_mode512bit_condition>"
 {
   if (!flag_finite_math_only || flag_signed_zeros)
     {
   "TARGET_SSE
    && !(MEM_P (operands[1]) && MEM_P (operands[2]))
    && <mask_mode512bit_condition>
-   && <round_saeonly_mode_condition>"
+   && <round_saeonly_mode512bit_condition>"
   "@
    <maxmin_float><ssemodesuffix>\t{%2, %0|%0, %2}
    v<maxmin_float><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
          IEEE_MAXMIN))]
   "TARGET_SSE
    && <mask_mode512bit_condition>
-   && <round_saeonly_mode_condition>"
+   && <round_saeonly_mode512bit_condition>"
   "@
    <ieee_maxmin><ssemodesuffix>\t{%2, %0|%0, %2}
    v<ieee_maxmin><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
           (match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "<round_saeonly_constraint>")
           (match_operand:SI 3 "<cmp_imm_predicate>" "n")]
          UNSPEC_PCMP))]
-  "TARGET_AVX512F && <round_saeonly_mode_condition>"
+  "TARGET_AVX512F && <round_saeonly_mode512bit_condition>"
   "v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, <round_saeonly_mask_scalar_merge_op4>%2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2<round_saeonly_mask_scalar_merge_op4>, %3}"
   [(set_attr "type" "ssecmp")
    (set_attr "length_immediate" "1")
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
 {
   emit_insn (gen_fma_fmadd_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
          (match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v")
          (match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
          (match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    vfmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
            (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
          (match_dup 1)
          (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
   "@
    vfmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
 {
   emit_insn (gen_fma_fmsub_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
          (match_operand:VFH_SF_AVX512VL   2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
          (neg:VFH_SF_AVX512VL
            (match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    vfmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
              (match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0")))
          (match_dup 3)
          (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
   "vfmsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
 {
   emit_insn (gen_fma_fnmadd_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
            (match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v"))
          (match_operand:VFH_SF_AVX512VL   2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
          (match_operand:VFH_SF_AVX512VL   3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    vfnmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfnmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
            (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
          (match_dup 1)
          (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
   "@
    vfnmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfnmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
            (match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0"))
          (match_dup 3)
          (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
   "vfnmadd231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
    (match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskmode> 4 "register_operand")]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
 {
   emit_insn (gen_fma_fnmsub_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
          (match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
          (neg:VFH_SF_AVX512VL
            (match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    vfnmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfnmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
              (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>")))
          (match_dup 1)
          (match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
-  "TARGET_AVX512F && <round_mode_condition>"
+  "TARGET_AVX512F && <round_mode512bit_condition>"
   "@
    vfnmsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
    vfnmsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
           (match_operand:VFH_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>,v,<round_constraint>")
           (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0")]
          UNSPEC_FMADDSUB))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    vfmaddsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmaddsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
           (neg:VFH_AVX512VL
             (match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0"))]
          UNSPEC_FMADDSUB))]
-  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    vfmsubadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
    vfmsubadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   rtx op0, op1, dest;
   if (<round_embedded_complex>)
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   emit_insn (gen_fma_fmaddc_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   rtx op0, op1, dest;
   if (<round_embedded_complex>)
    (match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
    (match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
    (match_operand:<avx512fmaskcmode> 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   emit_insn (gen_fma_fcmaddc_<mode>_maskz_1<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
           (match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")
           (match_operand:VHF_AVX512VL 3 "<round_nimm_predicate>" "0")]
           UNSPEC_COMPLEX_F_C_MA))]
-  "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode512bit_condition>"
   "v<complexopname><ssemodesuffix>\t{<round_sdc_mask_op4>%2, %1, %0<sdc_mask_op4>|%0<sdc_mask_op4>, %1, %2<round_sdc_mask_op4>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
          (unspec:<avx512fmaskmode>
            [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
            UNSPEC_COMPLEX_MASK)))]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
   "v<complexopname><ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
   [(set_attr "type" "ssemuladd")
    (set_attr "prefix" "evex")
            [(match_operand:VHF_AVX512VL 1 "<round_nimm_predicate>" "<int_comm>v")
             (match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")]
             UNSPEC_COMPLEX_F_C_MUL))]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   if (TARGET_DEST_FALSE_DEP_FOR_GLC
       && <maskc_dest_false_dep_for_glc_cond>)
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   rtx op0, op1, dest;
 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
     operands[0], operands[1], operands[2], operands[3],
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   rtx op0, op1, dest;
 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   rtx dest, op0, op1;
 
    (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
    (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
    (match_operand:QI 4 "register_operand")]
-  "TARGET_AVX512FP16 && <round_mode_condition>"
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
 {
   rtx dest, op0, op1;
 
   [(set (match_operand:VF1 0 "register_operand" "=x,v")
        (float:VF1
          (match_operand:<sseintvecmode> 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
-  "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode_condition>"
+  "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode512bit_condition>"
   "@
    cvtdq2ps\t{%1, %0|%0, %1}
    vcvtdq2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set (match_operand:VI8_256_512 0 "register_operand" "=v")
        (unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
                     UNSPEC_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode_condition>"
+  "TARGET_AVX512DQ && <round_mode512bit_condition>"
   "vcvtps2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
   [(set (match_operand:VI8_256_512 0 "register_operand" "=v")
        (unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
                     UNSPEC_UNSIGNED_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode_condition>"
+  "TARGET_AVX512DQ && <round_mode512bit_condition>"
   "vcvtps2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
   [(set (match_operand:<ssePSmode2> 0 "register_operand" "=v")
         (any_float:<ssePSmode2>
           (match_operand:VI8_256_512 1 "nonimmediate_operand" "<round_constraint>")))]
-  "TARGET_AVX512DQ && <round_mode_condition>"
+  "TARGET_AVX512DQ && <round_mode512bit_condition>"
   "vcvt<floatsuffix>qq2ps<qq2pssuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
        (unspec:<sseintvecmode>
          [(match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_VCVTT_U))]
-  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
   "vcvttpd2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
   [(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
        (any_fix:<sseintvecmode>
          (match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
-  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
   "vcvttpd2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
        (unspec:<sseintvecmode>
          [(match_operand:VF2_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")]
          UNSPEC_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode_condition>"
+  "TARGET_AVX512DQ && <round_mode512bit_condition>"
   "vcvtpd2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
        (unspec:<sseintvecmode>
          [(match_operand:VF2_AVX512VL 1 "nonimmediate_operand" "<round_constraint>")]
          UNSPEC_UNSIGNED_FIX_NOTRUNC))]
-  "TARGET_AVX512DQ && <round_mode_condition>"
+  "TARGET_AVX512DQ && <round_mode512bit_condition>"
   "vcvtpd2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
        (unspec:VI8_256_512
          [(match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_VCVTT_U))]
-  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
   "vcvttps2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
   [(set (match_operand:VI8_256_512 0 "register_operand" "=v")
        (any_fix:VI8_256_512
          (match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
-  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
   "vcvttps2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "evex")
   [(set (match_operand:VF2_512_256 0 "register_operand" "=v")
        (float_extend:VF2_512_256
          (match_operand:<sf2dfmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
-  "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode_condition>"
+  "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
   "vcvtps2pd\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
   [(set_attr "type" "ssecvt")
    (set_attr "prefix" "maybe_vex")
           (match_operand:VF_AVX512VL 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
           (match_operand:SI 3 "const_0_to_15_operand")]
          UNSPEC_RANGE))]
-  "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
+  "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
 {
   if (TARGET_DEST_FALSE_DEP_FOR_GLC
       && <mask4_dest_false_dep_for_glc_cond>
            (match_operand:<ssePSmode> 2 "<round_nimm_predicate>" "<round_constraint>"))
          (float_truncate:<ssehalfvecmode>
            (match_operand:<ssePSmode> 1 "register_operand" "v"))))]
-  "TARGET_AVX10_2_256 && <round_mode_condition>"
+  "TARGET_AVX10_2_256 && <round_mode512bit_condition>"
   "vcvt2ps2phx\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}")
 
 (define_mode_attr ssebvecmode
        (unspec:<sseintvecmode>
          [(match_operand:VHF_AVX10_2 1 "<round_nimm_predicate>" "<round_constraint>")]
          UNSPEC_CVT_PH_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_mode512bit_condition>"
  "vcvtph2i<sat_cvt_sign_prefix>bs\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
        (unspec:<sseintvecmode>
          [(match_operand:VHF_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_CVTT_PH_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_saeonly_mode512bit_condition>"
  "vcvttph2i<sat_cvt_sign_prefix>bs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
        (unspec:<sseintvecmode>
          [(match_operand:VF1_AVX10_2 1 "<round_nimm_predicate>" "<round_constraint>")]
          UNSPEC_CVT_PS_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_mode512bit_condition>"
  "vcvtps2i<sat_cvt_sign_prefix>bs\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
        (unspec:<sseintvecmode>
          [(match_operand:VF1_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_CVTT_PS_IBS_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_saeonly_mode512bit_condition>"
  "vcvttps2i<sat_cvt_sign_prefix>bs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
        (unspec:<VEC_GATHER_IDXSI>
          [(match_operand:VF1_VF2_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_saeonly_mode512bit_condition>"
  "vcvtt<castmode>2<sat_cvt_sign_prefix>dqs<pd2dqssuff>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
        (unspec:<VEC_GATHER_IDXDI>
          [(match_operand:VF2_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_saeonly_mode512bit_condition>"
  "vcvttpd2<sat_cvt_sign_prefix>qqs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
        (unspec:VI8_AVX10_2
          [(match_operand:<vpckfloat_temp_mode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
          UNSPEC_SAT_CVT_DS_SIGN_ITER))]
- "TARGET_AVX10_2_256 && <round_saeonly_mode_condition>"
+ "TARGET_AVX10_2_256 && <round_saeonly_mode512bit_condition>"
  "vcvttps2<sat_cvt_sign_prefix>qqs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
  [(set_attr "type" "ssecvt")
   (set_attr "prefix" "evex")
index 88d16869fccf3682e18c18df0fadf35af2d8c855..c30b274822e949defc16fd28702b34afa6a79676 100644 (file)
 (define_subst_attr "bcst_round_nimm_predicate" "round" "bcst_vector_operand" "register_operand")
 (define_subst_attr "round_nimm_scalar_predicate" "round" "nonimmediate_operand" "register_operand")
 (define_subst_attr "round_prefix" "round" "vex" "evex")
-(define_subst_attr "round_mode_condition" "round" "1" "((<MODE>mode == V16SFmode
-                                                              || <MODE>mode == V8DFmode
-                                                              || <MODE>mode == V8DImode
-                                                              || <MODE>mode == V16SImode
-                                                              || <MODE>mode == V32HFmode)
-                                                              || (TARGET_AVX10_2_256
-                                                                  && (<MODE>mode == V8SFmode
-                                                                      || <MODE>mode == V4DFmode
-                                                                      || <MODE>mode == V4DImode
-                                                                      || <MODE>mode == V8SImode
-                                                                      || <MODE>mode == V16HFmode)))")
+(define_subst_attr "round_mode512bit_condition" "round" "1" "(<MODE>mode == V16SFmode
+                                                             || <MODE>mode == V8DFmode
+                                                             || <MODE>mode == V8DImode
+                                                             || <MODE>mode == V16SImode
+                                                             || <MODE>mode == V32HFmode)")
 
 (define_subst_attr "round_modev4sf_condition" "round" "1" "(<MODE>mode == V4SFmode)")
 (define_subst_attr "round_codefor" "round" "*" "")
 (define_subst_attr "round_saeonly_constraint2" "round_saeonly" "m" "v")
 (define_subst_attr "round_saeonly_nimm_predicate" "round_saeonly" "vector_operand" "register_operand")
 (define_subst_attr "round_saeonly_nimm_scalar_predicate" "round_saeonly" "nonimmediate_operand" "register_operand")
-(define_subst_attr "round_saeonly_mode_condition" "round_saeonly" "1" "((<MODE>mode == V16SFmode
-                                                                              || <MODE>mode == V8DFmode
-                                                                              || <MODE>mode == V8DImode
-                                                                              || <MODE>mode == V16SImode
-                                                                              || <MODE>mode == V32HFmode)
-                                                                              || (TARGET_AVX10_2_256
-                                                                                  && (<MODE>mode == V8SFmode
-                                                                                      || <MODE>mode == V4DFmode
-                                                                                      || <MODE>mode == V4DImode
-                                                                                      || <MODE>mode == V8SImode
-                                                                                      || <MODE>mode == V16HFmode)))")
+(define_subst_attr "round_saeonly_mode512bit_condition" "round_saeonly" "1" "(<MODE>mode == V16SFmode
+                                                                             || <MODE>mode == V8DFmode
+                                                                             || <MODE>mode == V8DImode
+                                                                             || <MODE>mode == V16SImode
+                                                                             || <MODE>mode == V32HFmode)")
 
 
 (define_subst "round_saeonly"
index b4f290d8230748febd0e9ac7719852d5dff6b964..33ef076a47a6b5e8f49969dbb210b8fdb7d45dec 100644 (file)
 /* sm3intrin.h */
 #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
 
-/* avx10_2roundingintrin.h */
-#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
-#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
-#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
-
 /* avx10_2-512mediaintrin.h */
 #define __builtin_ia32_mpsadbw512(A, B, C) __builtin_ia32_mpsadbw512 (A, B, 1)
 #define __builtin_ia32_mpsadbw512_mask(A, B, C, D, E) __builtin_ia32_mpsadbw512_mask (A, B, 1, D, E)
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c
deleted file mode 100644 (file)
index f1f143c..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-O2 -march=x86-64-v3 -mavx10.2-256" } */
-/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\\\$3\[^\n\r]*\{sae\}\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\[^\{\n\]*\\\$4\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1  }  } */
-/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1  }  } */
-
-#include <immintrin.h>
-
-volatile __m256 x;
-volatile __m256d xd;
-volatile __m256h xh;
-volatile __mmask8 m8;
-volatile __mmask16 m16;
-volatile __mmask32 m32;
-
-void extern
-avx10_2_test_1 (void)
-{
-  xd = _mm256_add_round_pd (xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
-  xd = _mm256_mask_add_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
-  xd = _mm256_maskz_add_round_pd (m8, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
-
-  xh = _mm256_add_round_ph (xh, xh, 8);
-  xh = _mm256_mask_add_round_ph (xh, m32, xh, xh, 8);
-  xh = _mm256_maskz_add_round_ph (m32, xh, xh, 11);
-
-  x = _mm256_add_round_ps (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
-  x = _mm256_mask_add_round_ps (x, m16, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
-  x = _mm256_maskz_add_round_ps (m16, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
-
-  m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-  m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-
-  m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8);
-  m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4);
-
-  m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-  m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-}
-
-void extern
-avx10_2_test_2 (void)
-{
-  m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-  m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-
-  m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8);
-  m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4);
-
-  m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-  m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
-}
index 74d9664d8ed4c57bfe5f2d4a28fd424cf4a26138..84a825de38302f5284c04c267569f68433846a2d 100644 (file)
 /* sm3intrin.h */
 #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
 
-/* avx10_2roundingintrin.h */
-#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
-#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
-#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
-
 /* avx10_2-512mediaintrin.h */
 #define __builtin_ia32_mpsadbw512(A, B, C) __builtin_ia32_mpsadbw512 (A, B, 1)
 #define __builtin_ia32_mpsadbw512_mask(A, B, C, D, E) __builtin_ia32_mpsadbw512_mask (A, B, 1, D, E)
index 1a285653c36a32a1da0d063ff519e41a787484da..3b1825037b09212784a0a611051958a7c9b3b0d2 100644 (file)
@@ -1020,23 +1020,6 @@ test_2 (_mm512_gf2p8affine_epi64_epi8, __m512i, __m512i, __m512i, 1)
 /* sm3intrin.h */
 test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1)
 
-/* avx10_2roundingintrin.h */
-test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
-test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
-test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
-test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
-test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
-test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
-test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
-test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8)
-test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9)
-test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
-test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
-test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
-test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
-test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
-test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
-
 /* avx10_2-512mediaintrin.h */
 test_2 (_mm512_mpsadbw_epu8, __m512i, __m512i, __m512i, 1)
 test_3 (_mm512_maskz_mpsadbw_epu8, __m512i, __mmask32, __m512i, __m512i, 1)
index cbfbb13d75ac0699c681f59d5bf676873b11316e..c1400366160a438ac22d6444af83b7b12a566ef1 100644 (file)
@@ -1061,23 +1061,6 @@ test_1 ( __bextri_u64, unsigned long long, unsigned long long, 1)
 /* sm3intrin.h */
 test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1)
 
-/* avx10_2roundingintrin.h */
-test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
-test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
-test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
-test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
-test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
-test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
-test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
-test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8)
-test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9)
-test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
-test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
-test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
-test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
-test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
-test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
-
 /* avx10_2-512mediaintrin.h */
 test_2 (_mm512_mpsadbw_epu8, __m512i, __m512i, __m512i, 1)
 test_3 (_mm512_maskz_mpsadbw_epu8, __m512i, __mmask32, __m512i, __m512i, 1)
index 8b263a8195f3963d38bc7b379073786df4abed95..e38ee99e2fc3c31e0d1dc114382326ca2c5e6f12 100644 (file)
 /* sm3intrin.h */
 #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
 
-/* avx10_2roundingintrin.h */
-#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
-#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
-#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
-#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
-
 /* avx10_2-512mediaintrin.h  */
 #define __builtin_ia32_mpsadbw512(A, B, C) __builtin_ia32_mpsadbw512 (A, B, 1)
 #define __builtin_ia32_mpsadbw512_mask(A, B, C, D, E) __builtin_ia32_mpsadbw512_mask (A, B, 1, D, E)