]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
AVX512FP16: Add vfcmaddcsh/vfmaddcsh/vfcmulcsh/vfmulcsh.
authorliuhongt <hongtao.liu@intel.com>
Mon, 2 Mar 2020 09:58:04 +0000 (17:58 +0800)
committerliuhongt <hongtao.liu@intel.com>
Wed, 22 Sep 2021 04:56:30 +0000 (12:56 +0800)
gcc/ChangeLog:

* config/i386/avx512fp16intrin.h (_mm_mask_fcmadd_sch):
New intrinsic.
(_mm_mask3_fcmadd_sch): Likewise.
(_mm_maskz_fcmadd_sch): Likewise.
(_mm_fcmadd_sch): Likewise.
(_mm_mask_fmadd_sch): Likewise.
(_mm_mask3_fmadd_sch): Likewise.
(_mm_maskz_fmadd_sch): Likewise.
(_mm_fmadd_sch): Likewise.
(_mm_mask_fcmadd_round_sch): Likewise.
(_mm_mask3_fcmadd_round_sch): Likewise.
(_mm_maskz_fcmadd_round_sch): Likewise.
(_mm_fcmadd_round_sch): Likewise.
(_mm_mask_fmadd_round_sch): Likewise.
(_mm_mask3_fmadd_round_sch): Likewise.
(_mm_maskz_fmadd_round_sch): Likewise.
(_mm_fmadd_round_sch): Likewise.
(_mm_fcmul_sch): Likewise.
(_mm_mask_fcmul_sch): Likewise.
(_mm_maskz_fcmul_sch): Likewise.
(_mm_fmul_sch): Likewise.
(_mm_mask_fmul_sch): Likewise.
(_mm_maskz_fmul_sch): Likewise.
(_mm_fcmul_round_sch): Likewise.
(_mm_mask_fcmul_round_sch): Likewise.
(_mm_maskz_fcmul_round_sch): Likewise.
(_mm_fmul_round_sch): Likewise.
(_mm_mask_fmul_round_sch): Likewise.
(_mm_maskz_fmul_round_sch): Likewise.
* config/i386/i386-builtin.def: Add corresponding new builtins.
* config/i386/sse.md
(avx512fp16_fmaddcsh_v8hf_maskz<round_expand_name>): New expander.
(avx512fp16_fcmaddcsh_v8hf_maskz<round_expand_name>): Ditto.
(avx512fp16_fma_<complexopname>sh_v8hf<mask_scalarcz_name><round_scalarcz_name>):
New define insn.
(avx512fp16_<complexopname>sh_v8hf_mask<round_name>): Ditto.
(avx512fp16_<complexopname>sh_v8hf<mask_scalarc_name><round_scalarcz_name>):
Ditto.
* config/i386/subst.md (mask_scalarcz_name): New.
(mask_scalarc_name): Ditto.
(mask_scalarc_operand3): Ditto.
(mask_scalarcz_operand4): Ditto.
(round_scalarcz_name): Ditto.
(round_scalarc_mask_operand3): Ditto.
(round_scalarcz_mask_operand4): Ditto.
(round_scalarc_mask_op3): Ditto.
(round_scalarcz_mask_op4): Ditto.
(round_scalarcz_constraint): Ditto.
(round_scalarcz_nimm_predicate): Ditto.
(mask_scalarcz): Ditto.
(mask_scalarc): Ditto.
(round_scalarcz): Ditto.

gcc/testsuite/ChangeLog:

* gcc.target/i386/avx-1.c: Add test for new builtins.
* gcc.target/i386/sse-13.c: Ditto.
* gcc.target/i386/sse-23.c: Ditto.
* gcc.target/i386/sse-14.c: Add test for new intrinsics.
* gcc.target/i386/sse-22.c: Ditto.

gcc/config/i386/avx512fp16intrin.h
gcc/config/i386/i386-builtin.def
gcc/config/i386/sse.md
gcc/config/i386/subst.md
gcc/testsuite/gcc.target/i386/avx-1.c
gcc/testsuite/gcc.target/i386/sse-13.c
gcc/testsuite/gcc.target/i386/sse-14.c
gcc/testsuite/gcc.target/i386/sse-22.c
gcc/testsuite/gcc.target/i386/sse-23.c

index e402a59ef57b495d90c4238c3ef7f4489c6927f2..e01cff674d7561a3732254772ec70906967de513 100644 (file)
@@ -6511,6 +6511,481 @@ _mm512_maskz_fmul_round_pch (__mmask16 __A, __m512h __B,
 
 #endif /* __OPTIMIZE__ */
 
+/* Intrinsics vf[,c]maddcsh.  */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+#ifdef __AVX512VL__
+  return (__m128h) __builtin_ia32_movaps128_mask (
+    (__v4sf)
+    __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+                                         (__v8hf) __C,
+                                         (__v8hf) __D, __B,
+                                         _MM_FROUND_CUR_DIRECTION),
+    (__v4sf) __A, __B);
+#else
+  return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+    (__v4sf)
+    __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+                                         (__v8hf) __C,
+                                         (__v8hf) __D, __B,
+                                         _MM_FROUND_CUR_DIRECTION),
+    (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+  return (__m128h) _mm_move_ss ((__m128) __C,
+    (__m128)
+    __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+                                         (__v8hf) __B,
+                                         (__v8hf) __C, __D,
+                                         _MM_FROUND_CUR_DIRECTION));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
+                                          (__v8hf) __C,
+                                          (__v8hf) __D,
+                                          __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
+                                    (__v8hf) __B,
+                                    (__v8hf) __C,
+                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+#ifdef __AVX512VL__
+  return (__m128h) __builtin_ia32_movaps128_mask (
+    (__v4sf)
+    __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+                                        (__v8hf) __C,
+                                        (__v8hf) __D, __B,
+                                        _MM_FROUND_CUR_DIRECTION),
+    (__v4sf) __A, __B);
+#else
+  return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+    (__v4sf)
+    __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+                                        (__v8hf) __C,
+                                        (__v8hf) __D, __B,
+                                        _MM_FROUND_CUR_DIRECTION),
+    (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+  return (__m128h) _mm_move_ss ((__m128) __C,
+    (__m128)
+    __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+                                        (__v8hf) __B,
+                                        (__v8hf) __C, __D,
+                                        _MM_FROUND_CUR_DIRECTION));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
+                                         (__v8hf) __C,
+                                         (__v8hf) __D,
+                                         __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+  return (__m128h)
+    __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
+                                   (__v8hf) __B,
+                                   (__v8hf) __C,
+                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+                          __m128h __D, const int __E)
+{
+#ifdef __AVX512VL__
+  return (__m128h) __builtin_ia32_movaps128_mask (
+    (__v4sf)
+    __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+                                         (__v8hf) __C,
+                                         (__v8hf) __D,
+                                         __B, __E),
+    (__v4sf) __A, __B);
+#else
+  return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+    (__v4sf)
+    __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+                                         (__v8hf) __C,
+                                         (__v8hf) __D,
+                                         __B, __E),
+    (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+                           __mmask8 __D, const int __E)
+{
+  return (__m128h) _mm_move_ss ((__m128) __C,
+    (__m128)
+    __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A,
+                                         (__v8hf) __B,
+                                         (__v8hf) __C,
+                                         __D, __E));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+                           __m128h __D, const int __E)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B,
+                                          (__v8hf) __C,
+                                          (__v8hf) __D,
+                                          __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A,
+                                    (__v8hf) __B,
+                                    (__v8hf) __C,
+                                    __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+                         __m128h __D, const int __E)
+{
+#ifdef __AVX512VL__
+  return (__m128h) __builtin_ia32_movaps128_mask (
+    (__v4sf)
+    __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+                                        (__v8hf) __C,
+                                        (__v8hf) __D,
+                                        __B, __E),
+    (__v4sf) __A, __B);
+#else
+  return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+    (__v4sf)
+    __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+                                        (__v8hf) __C,
+                                        (__v8hf) __D,
+                                        __B, __E),
+    (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+                          __mmask8 __D, const int __E)
+{
+  return (__m128h) _mm_move_ss ((__m128) __C,
+    (__m128)
+    __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A,
+                                        (__v8hf) __B,
+                                        (__v8hf) __C,
+                                        __D, __E));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+                          __m128h __D, const int __E)
+{
+  return (__m128h)
+    __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B,
+                                         (__v8hf) __C,
+                                         (__v8hf) __D,
+                                         __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfmaddcsh_round ((__v8hf) __A,
+                                   (__v8hf) __B,
+                                   (__v8hf) __C,
+                                   __D);
+}
+#else
+#ifdef __AVX512VL__
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E)                       \
+    ((__m128h) __builtin_ia32_movaps128_mask (                         \
+     (__v4sf)                                                          \
+     __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A),               \
+                                          (__v8hf) (C),                \
+                                          (__v8hf) (D),                \
+                                          (B), (E)),                   \
+                                          (__v4sf) (A), (B)))
+
+#else
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E)                       \
+  ((__m128h) __builtin_ia32_blendvps ((__v4sf) (A),                    \
+   (__v4sf)                                                            \
+   __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A),         \
+                                        (__v8hf) (C),          \
+                                        (__v8hf) (D),          \
+                                        (B), (E)),             \
+    (__v4sf) _mm_set_ss ((float) ((int) (B) << 31))))
+#endif
+
+#define _mm_mask3_fcmadd_round_sch(A, B, C, D, E)                      \
+  ((__m128h) _mm_move_ss ((__m128) (C),                                        \
+   (__m128)                                                            \
+   __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A),         \
+                                        (__v8hf) (B),          \
+                                        (__v8hf) (C),          \
+                                        (D), (E))))
+
+#define _mm_maskz_fcmadd_round_sch(A, B, C, D, E)              \
+  __builtin_ia32_vfcmaddcsh_maskz_round ((B), (C), (D), (A), (E))
+
+#define _mm_fcmadd_round_sch(A, B, C, D)               \
+  __builtin_ia32_vfcmaddcsh_round ((A), (B), (C), (D))
+
+#ifdef __AVX512VL__
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E)                                \
+    ((__m128h) __builtin_ia32_movaps128_mask (                         \
+     (__v4sf)                                                          \
+     __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A),                \
+                                         (__v8hf) (C),         \
+                                         (__v8hf) (D),         \
+                                         (B), (E)),            \
+                                         (__v4sf) (A), (B)))
+
+#else
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E)                                \
+  ((__m128h) __builtin_ia32_blendvps ((__v4sf) (A),                    \
+   (__v4sf)                                                            \
+   __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A),          \
+                                       (__v8hf) (C),           \
+                                       (__v8hf) (D),           \
+                                       (B), (E)),              \
+    (__v4sf) _mm_set_ss ((float) ((int) (B) << 31))))
+#endif
+
+#define _mm_mask3_fmadd_round_sch(A, B, C, D, E)                       \
+  ((__m128h) _mm_move_ss ((__m128) (C),                                        \
+   (__m128)                                                            \
+   __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A),          \
+                                       (__v8hf) (B),           \
+                                       (__v8hf) (C),           \
+                                       (D), (E))))
+
+#define _mm_maskz_fmadd_round_sch(A, B, C, D, E)               \
+  __builtin_ia32_vfmaddcsh_maskz_round ((B), (C), (D), (A), (E))
+
+#define _mm_fmadd_round_sch(A, B, C, D)                \
+  __builtin_ia32_vfmaddcsh_round ((A), (B), (C), (D))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vf[,c]mulcsh.  */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_sch (__m128h __A, __m128h __B)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
+                                   (__v8hf) __B,
+                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
+                                        (__v8hf) __D,
+                                        (__v8hf) __A,
+                                        __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
+                                        (__v8hf) __C,
+                                        _mm_setzero_ph (),
+                                        __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_sch (__m128h __A, __m128h __B)
+{
+  return (__m128h)
+    __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
+                                  (__v8hf) __B,
+                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
+                                       (__v8hf) __D,
+                                       (__v8hf) __A,
+                                       __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+  return (__m128h)
+    __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
+                                       (__v8hf) __C,
+                                       _mm_setzero_ph (),
+                                       __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,
+                                   (__v8hf) __B,
+                                   __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+                         __m128h __D, const int __E)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,
+                                        (__v8hf) __D,
+                                        (__v8hf) __A,
+                                        __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+                          const int __E)
+{
+  return (__m128h)
+    __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,
+                                        (__v8hf) __C,
+                                        _mm_setzero_ph (),
+                                        __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+  return (__m128h)
+    __builtin_ia32_vfmulcsh_round ((__v8hf) __A,
+                                  (__v8hf) __B, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+                        __m128h __D, const int __E)
+{
+  return (__m128h)
+    __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,
+                                       (__v8hf) __D,
+                                       (__v8hf) __A,
+                                       __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, const int __E)
+{
+  return (__m128h)
+    __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,
+                                       (__v8hf) __C,
+                                       _mm_setzero_ph (),
+                                       __A, __E);
+}
+
+#else
+#define _mm_fcmul_round_sch(__A, __B, __D)                             \
+  (__m128h) __builtin_ia32_vfcmulcsh_round ((__v8hf) __A,              \
+                                           (__v8hf) __B, __D)
+
+#define _mm_mask_fcmul_round_sch(__A, __B, __C, __D, __E)              \
+  (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C,         \
+                                                (__v8hf) __D,          \
+                                                (__v8hf) __A,          \
+                                                __B, __E)
+
+#define _mm_maskz_fcmul_round_sch(__A, __B, __C, __E)                  \
+  (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B,         \
+                                                (__v8hf) __C,          \
+                                                _mm_setzero_ph (),     \
+                                                __A, __E)
+
+#define _mm_fmul_round_sch(__A, __B, __D)                              \
+  (__m128h) __builtin_ia32_vfmulcsh_round ((__v8hf) __A,               \
+                                          (__v8hf) __B, __D)
+
+#define _mm_mask_fmul_round_sch(__A, __B, __C, __D, __E)               \
+  (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C,          \
+                                               (__v8hf) __D,           \
+                                               (__v8hf) __A,           \
+                                               __B, __E)
+
+#define _mm_maskz_fmul_round_sch(__A, __B, __C, __E)                   \
+  (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B,          \
+                                               (__v8hf) __C,           \
+                                               _mm_setzero_ph (),      \
+                                               __A, __E)
+
+#endif /* __OPTIMIZE__ */
+
 #ifdef __DISABLE_AVX512FP16__
 #undef __DISABLE_AVX512FP16__
 #pragma GCC pop_options
index 3d9f099df33c9bf77833561c5076da36fed7a475..302e1bc6502f3e3871f8cdbbdcd96224f03b83e0 100644 (file)
@@ -3231,6 +3231,16 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_round, "__
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_mask_round, "__builtin_ia32_vfcmulcph512_mask_round", IX86_BUILTIN_VFCMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_round, "__builtin_ia32_vfmulcph512_round", IX86_BUILTIN_VFMULCPH512_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_mask_round, "__builtin_ia32_vfmulcph512_mask_round", IX86_BUILTIN_VFMULCPH512_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fcmaddcsh_v8hf_round, "__builtin_ia32_vfcmaddcsh_round", IX86_BUILTIN_VFCMADDCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_mask_round, "__builtin_ia32_vfcmaddcsh_mask_round", IX86_BUILTIN_VFCMADDCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_maskz_round, "__builtin_ia32_vfcmaddcsh_maskz_round", IX86_BUILTIN_VFCMADDCSH_MASKZ_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fmaddcsh_v8hf_round, "__builtin_ia32_vfmaddcsh_round", IX86_BUILTIN_VFMADDCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmaddcsh_v8hf_mask_round, "__builtin_ia32_vfmaddcsh_mask_round", IX86_BUILTIN_VFMADDCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmaddcsh_v8hf_maskz_round, "__builtin_ia32_vfmaddcsh_maskz_round", IX86_BUILTIN_VFMADDCSH_MASKZ_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_round, "__builtin_ia32_vfcmulcsh_round", IX86_BUILTIN_VFCMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_mask_round, "__builtin_ia32_vfcmulcsh_mask_round", IX86_BUILTIN_VFCMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "__builtin_ia32_vfmulcsh_round", IX86_BUILTIN_VFMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
 
 BDESC_END (ROUND_ARGS, MULTI_ARG)
 
index f3d5b408580274ba6d6cdca83a2f3ff627ff001e..b08a9d300736f1e75fc712f87ca7d002b476323b 100644 (file)
   [(set_attr "type" "ssemul")
    (set_attr "mode" "<MODE>")])
 
+(define_expand "avx512fp16_fmaddcsh_v8hf_maskz<round_expand_name>"
+  [(match_operand:V8HF 0 "register_operand")
+   (match_operand:V8HF 1 "<round_expand_nimm_predicate>")
+   (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
+   (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
+   (match_operand:QI 4 "register_operand")]
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+{
+  emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
+    operands[0], operands[1], operands[2], operands[3],
+    CONST0_RTX (V8HFmode), operands[4]<round_expand_operand>));
+  DONE;
+})
+
+(define_expand "avx512fp16_fcmaddcsh_v8hf_maskz<round_expand_name>"
+  [(match_operand:V8HF 0 "register_operand")
+   (match_operand:V8HF 1 "<round_expand_nimm_predicate>")
+   (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
+   (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
+   (match_operand:QI 4 "register_operand")]
+  "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+{
+  emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
+    operands[0], operands[1], operands[2], operands[3],
+    CONST0_RTX (V8HFmode), operands[4]<round_expand_operand>));
+  DONE;
+})
+
+(define_insn "avx512fp16_fma_<complexopname>sh_v8hf<mask_scalarcz_name><round_scalarcz_name>"
+  [(set (match_operand:V8HF 0 "register_operand" "=&v")
+       (vec_merge:V8HF
+         (unspec:V8HF
+           [(match_operand:V8HF 1 "<round_scalarcz_nimm_predicate>" "v")
+            (match_operand:V8HF 2 "<round_scalarcz_nimm_predicate>" "<round_scalarcz_constraint>")
+            (match_operand:V8HF 3 "<round_scalarcz_nimm_predicate>" "0")]
+            UNSPEC_COMPLEX_F_C_MA)
+         (match_dup 2)
+         (const_int 3)))]
+  "TARGET_AVX512FP16"
+  "v<complexopname>sh\t{<round_scalarcz_mask_op4>%2, %1, %0<mask_scalarcz_operand4>|%0<mask_scalarcz_operand4>, %1, %2<round_scalarcz_maskcz_mask_op4>}"
+  [(set_attr "type" "ssemuladd")
+   (set_attr "mode" "V8HF")])
+
+(define_insn "avx512fp16_<complexopname>sh_v8hf_mask<round_name>"
+  [(set (match_operand:V8HF 0 "register_operand" "=&v")
+       (vec_merge:V8HF
+         (vec_merge:V8HF
+           (unspec:V8HF
+             [(match_operand:V8HF 1 "<round_nimm_predicate>" "v")
+              (match_operand:V8HF 2 "<round_nimm_predicate>" "<round_constraint>")
+              (match_operand:V8HF 3 "<round_nimm_predicate>" "0")]
+              UNSPEC_COMPLEX_F_C_MA)
+           (match_dup 1)
+           (unspec:QI [(match_operand:QI 4 "register_operand" "Yk")]
+             UNSPEC_COMPLEX_MASK))
+         (match_dup 2)
+         (const_int 3)))]
+  "TARGET_AVX512FP16"
+  "v<complexopname>sh\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
+  [(set_attr "type" "ssemuladd")
+   (set_attr "mode" "V8HF")])
+
+(define_insn "avx512fp16_<complexopname>sh_v8hf<mask_scalarc_name><round_scalarcz_name>"
+  [(set (match_operand:V8HF 0 "register_operand" "=&v")
+         (vec_merge:V8HF
+           (unspec:V8HF
+             [(match_operand:V8HF 1 "nonimmediate_operand" "v")
+              (match_operand:V8HF 2 "<round_scalarcz_nimm_predicate>" "<round_scalarcz_constraint>")]
+              UNSPEC_COMPLEX_F_C_MUL)
+           (match_dup 1)
+           (const_int 3)))]
+  "TARGET_AVX512FP16"
+  "v<complexopname>sh\t{<round_scalarc_mask_op3>%2, %1, %0<mask_scalarc_operand3>|%0<mask_scalarc_operand3>, %1, %2<round_scalarc_mask_op3>}"
+  [(set_attr "type" "ssemul")
+   (set_attr "mode" "V8HF")])
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ;;
 ;; Parallel half-precision floating point conversion operations
index 3e085a8e58eef69b230aa88166f1b5009ba76337..11e62c67fcc0db20d6782d269a6252e7db780b93 100644 (file)
     (match_operand:<avx512fmaskmode> 5 "register_operand")])
 
 (define_subst_attr "mask_scalar_name" "mask_scalar" "" "_mask")
+(define_subst_attr "mask_scalarcz_name" "mask_scalarcz" "" "_maskz")
+(define_subst_attr "mask_scalarc_name" "mask_scalarc" "" "_mask")
+(define_subst_attr "mask_scalarc_operand3" "mask_scalarc" "" "%{%4%}%N3")
 (define_subst_attr "mask_scalar_operand3" "mask_scalar" "" "%{%4%}%N3")
 (define_subst_attr "mask_scalar_operand4" "mask_scalar" "" "%{%5%}%N4")
+(define_subst_attr "mask_scalarcz_operand4" "mask_scalarcz" "" "%{%5%}%N4")
 
 (define_subst "mask_scalar"
   [(set (match_operand:SUBST_V 0)
          (match_dup 2)
          (const_int 1)))])
 
+(define_subst "mask_scalarcz"
+  [(set (match_operand:SUBST_CV 0)
+       (vec_merge:SUBST_CV
+         (match_operand:SUBST_CV 1)
+         (match_operand:SUBST_CV 2)
+         (const_int 3)))]
+  "TARGET_AVX512F"
+  [(set (match_dup 0)
+       (vec_merge:SUBST_CV
+         (vec_merge:SUBST_CV
+           (match_dup 1)
+           (match_operand:SUBST_CV 3 "const0_operand" "C")
+           (unspec:<avx512fmaskmode>
+             [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
+             UNSPEC_COMPLEX_MASK))
+         (match_dup 2)
+         (const_int 3)))])
+
+(define_subst "mask_scalarc"
+  [(set (match_operand:SUBST_CV 0)
+       (vec_merge:SUBST_CV
+         (match_operand:SUBST_CV 1)
+         (match_operand:SUBST_CV 2)
+         (const_int 3)))]
+  "TARGET_AVX512F"
+  [(set (match_dup 0)
+       (vec_merge:SUBST_CV
+         (vec_merge:SUBST_CV
+           (match_dup 1)
+           (match_operand:SUBST_CV 3 "nonimm_or_0_operand" "0C")
+           (unspec:<avx512fmaskmode>
+             [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
+             UNSPEC_COMPLEX_MASK))
+         (match_dup 2)
+         (const_int 3)))])
+
 (define_subst_attr "round_scalar_name" "round_scalar" "" "_round")
+(define_subst_attr "round_scalarcz_name" "round_scalarcz" "" "_round")
 (define_subst_attr "round_scalar_mask_operand3" "mask_scalar" "%R3" "%R5")
+(define_subst_attr "round_scalarc_mask_operand3" "mask_scalarc" "%R3" "%R5")
+(define_subst_attr "round_scalarcz_mask_operand4" "mask_scalarcz" "%R4" "%R6")
 (define_subst_attr "round_scalar_mask_op3" "round_scalar" "" "<round_scalar_mask_operand3>")
+(define_subst_attr "round_scalarc_mask_op3" "round_scalarcz" "" "<round_scalarc_mask_operand3>")
+(define_subst_attr "round_scalarcz_mask_op4" "round_scalarcz" "" "<round_scalarcz_mask_operand4>")
 (define_subst_attr "round_scalar_constraint" "round_scalar" "vm" "v")
+(define_subst_attr "round_scalarcz_constraint" "round_scalarcz" "vm" "v")
 (define_subst_attr "round_scalar_prefix" "round_scalar" "vex" "evex")
 (define_subst_attr "round_scalar_nimm_predicate" "round_scalar" "nonimmediate_operand" "register_operand")
+(define_subst_attr "round_scalarcz_nimm_predicate" "round_scalarcz" "vector_operand" "register_operand")
 
 (define_subst "round_scalar"
   [(set (match_operand:SUBST_V 0)
             (match_operand:SI 3 "const_4_or_8_to_11_operand")]
                UNSPEC_EMBEDDED_ROUNDING))])
 
+(define_subst "round_scalarcz"
+  [(set (match_operand:SUBST_V 0)
+       (vec_merge:SUBST_V
+         (match_operand:SUBST_V 1)
+         (match_operand:SUBST_V 2)
+         (const_int 3)))]
+  "TARGET_AVX512F"
+  [(set (match_dup 0)
+       (unspec:SUBST_V [
+            (vec_merge:SUBST_V
+               (match_dup 1)
+               (match_dup 2)
+               (const_int 3))
+            (match_operand:SI 3 "const_4_or_8_to_11_operand")]
+               UNSPEC_EMBEDDED_ROUNDING))])
+
 (define_subst_attr "round_saeonly_scalar_name" "round_saeonly_scalar" "" "_round")
 (define_subst_attr "round_saeonly_scalar_mask_operand3" "mask_scalar" "%r3" "%r5")
 (define_subst_attr "round_saeonly_scalar_mask_operand4" "mask_scalar" "%r4" "%r6")
index fcd71a2cbcb22c3e7c4f54b3e0dda849af2f3f63..8744aa7df557470904e7bb494003b1822cc81433 100644 (file)
 #define __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, 8)
 #define __builtin_ia32_vfcmulcph512_round(A, B, C) __builtin_ia32_vfcmulcph512_round(A, B, 8)
 #define __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_round(A, B, C, D) __builtin_ia32_vfmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_round(A, B, C) __builtin_ia32_vfmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_round(A, B, C) __builtin_ia32_vfcmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, 8)
 
 /* avx512fp16vlintrin.h */
 #define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)
index 5d11813a855e6fb3c5ba072b3e5c62169b14236b..f6d54e3a815d6b0e063ec9308274bda436fc8eba 100644 (file)
 #define __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, 8)
 #define __builtin_ia32_vfcmulcph512_round(A, B, C) __builtin_ia32_vfcmulcph512_round(A, B, 8)
 #define __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_round(A, B, C, D) __builtin_ia32_vfmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_round(A, B, C) __builtin_ia32_vfmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_round(A, B, C) __builtin_ia32_vfcmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, 8)
 
 /* avx512fp16vlintrin.h */
 #define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)
index f27c73fd4ccf3c8c44b7ade471498c6dd5a4d8f9..956a9d16f84964f0bb68559540a294bd8d7e1c68 100644 (file)
@@ -774,6 +774,8 @@ test_2 (_mm_cvt_roundi32_sh, __m128h, __m128h, int, 8)
 test_2 (_mm_cvt_roundu32_sh, __m128h, __m128h, unsigned, 8)
 test_2 (_mm512_fmul_round_pch, __m512h, __m512h, __m512h, 8)
 test_2 (_mm512_fcmul_round_pch, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_fmul_round_sch, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_fcmul_round_sch, __m128h, __m128h, __m128h, 8)
 test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
 test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
 test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -850,8 +852,12 @@ test_3 (_mm_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
 test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
 test_3 (_mm512_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
 test_3 (_mm512_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
+test_3 (_mm_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
+test_3 (_mm_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
 test_3 (_mm512_maskz_fmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
 test_3 (_mm512_maskz_fcmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
+test_3 (_mm_maskz_fmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_fcmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
 test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
 test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
 test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8)
@@ -920,8 +926,16 @@ test_4 (_mm512_mask3_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmas
 test_4 (_mm512_mask3_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmask16, 8)
 test_4 (_mm512_maskz_fmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
 test_4 (_mm512_maskz_fcmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask3_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_mask3_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_maskz_fmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
+test_4 (_mm_maskz_fcmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
 test_4 (_mm512_mask_fmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
 test_4 (_mm512_mask_fcmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
 test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
 test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
 test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1)
index ccf8c3a6c03a205a66a5efd787d1fd60cdd3e1d4..31492ef36970839f0e15ef937d4b823b7b22ba13 100644 (file)
@@ -878,6 +878,8 @@ test_2 (_mm_cvt_roundss_sh, __m128h, __m128h, __m128, 8)
 test_2 (_mm_cvt_roundsd_sh, __m128h, __m128h, __m128d, 8)
 test_2 (_mm512_fmul_round_pch, __m512h, __m512h, __m512h, 8)
 test_2 (_mm512_fcmul_round_pch, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_fmul_round_sch, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_fcmul_round_sch, __m128h, __m128h, __m128h, 8)
 test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
 test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
 test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -954,6 +956,10 @@ test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
 test_3 (_mm512_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
 test_3 (_mm512_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
 test_3 (_mm512_maskz_fmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
+test_3 (_mm_maskz_fmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_fcmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
+test_3 (_mm_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
 test_3 (_mm512_maskz_fcmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
 test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
 test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
@@ -1022,8 +1028,16 @@ test_4 (_mm512_mask3_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmas
 test_4 (_mm512_mask3_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmask16, 8)
 test_4 (_mm512_maskz_fmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
 test_4 (_mm512_maskz_fcmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask3_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_mask3_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_maskz_fmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
+test_4 (_mm_maskz_fcmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
 test_4 (_mm512_mask_fmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
 test_4 (_mm512_mask_fcmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
 test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
 test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
 test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1)
index a6090703c2eacf220055560b3fd872bb6dc3e868..98251269a64510b9383222db5c0bfad1055dae27 100644 (file)
 #define __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph512_mask_round(A, C, D, B, 8)
 #define __builtin_ia32_vfcmulcph512_round(A, B, C) __builtin_ia32_vfcmulcph512_round(A, B, 8)
 #define __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph512_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_round(A, B, C, D) __builtin_ia32_vfmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_round(A, B, C) __builtin_ia32_vfmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_round(A, B, C) __builtin_ia32_vfcmulcsh_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_mask_round(A, C, D, B, 8)
 
 /* avx512fp16vlintrin.h */
 #define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)