This patch aims to add "s_" after 'cvt' represent saturation.
gcc/ChangeLog:
* config/i386/avx10_2-512convertintrin.h (_mm512_mask_cvtx2ps_ph): Formatting fixes
(_mm512_mask_cvtx_round2ps_ph): Ditto
(_mm512_maskz_cvtx_round2ps_ph): Ditto
(_mm512_cvtbiassph_bf8): Rename to _mm512_cvts_biasph_bf8.
(_mm512_mask_cvtbiassph_bf8): Rename to _mm512_mask_cvts_biasph_bf8.
(_mm512_maskz_cvtbiassph_bf8): Rename to _mm512_maskz_cvts_biasph_bf8.
(_mm512_cvtbiassph_hf8): Rename to _mm512_cvts_biasph_hf8.
(_mm512_mask_cvtbiassph_hf8): Rename to _mm512_mask_cvts_biasph_hf8.
(_mm512_maskz_cvtbiassph_hf8): Rename to _mm512_maskz_cvts_biasph_hf8.
(_mm512_cvts2ph_bf8): Rename to _mm512_cvts_2ph_bf8.
(_mm512_mask_cvts2ph_bf8): Rename to _mm512_mask_cvts_2ph_bf8.
(_mm512_maskz_cvts2ph_bf8): Rename to _mm512_maskz_cvts_2ph_bf8.
(_mm512_cvts2ph_hf8): Rename to _mm512_cvts_2ph_hf8.
(_mm512_mask_cvts2ph_hf8): Rename to _mm512_mask_cvts_2ph_hf8.
(_mm512_maskz_cvts2ph_hf8): Rename to _mm512_maskz_cvts_2ph_hf8.
(_mm512_cvtsph_bf8): Rename to _mm512_cvts_ph_bf8.
(_mm512_mask_cvtsph_bf8): Rename to _mm512_mask_cvts_ph_bf8.
(_mm512_maskz_cvtsph_bf8): Rename to _mm512_maskz_cvts_ph_bf8.
(_mm512_cvtsph_hf8): Rename to _mm512_cvts_ph_hf8.
(_mm512_mask_cvtsph_hf8): Rename to _mm512_mask_cvts_ph_hf8.
(_mm512_maskz_cvtsph_hf8): Rename to _mm512_maskz_cvts_ph_hf8.
* config/i386/avx10_2convertintrin.h
(_mm_cvtbiassph_bf8): Rename to _mm_cvts_biasph_bf8.
(_mm_mask_cvtbiassph_bf8): Rename to _mm_mask_cvts_biasph_bf8.
(_mm_maskz_cvtbiassph_bf8): Rename to _mm_maskz_cvts_biasph_bf8.
(_mm256_cvtbiassph_bf8): Rename to _mm256_cvts_biasph_bf8.
(_mm256_mask_cvtbiassph_bf8): Rename to _mm256_mask_cvts_biasph_bf8.
(_mm256_maskz_cvtbiassph_bf8): Rename to _mm256_maskz_cvts_biasph_bf8.
(_mm_cvtbiassph_hf8): Rename to _mm_cvts_biasph_hf8.
(_mm_mask_cvtbiassph_hf8): Rename to _mm_mask_cvts_biasph_hf8.
(_mm_maskz_cvtbiassph_hf8): Rename to _mm_maskz_cvts_biasph_hf8.
(_mm256_cvtbiassph_hf8): Rename to _mm256_cvts_biasph_hf8.
(_mm256_mask_cvtbiassph_hf8): Rename to _mm256_mask_cvts_biasph_hf8.
(_mm256_maskz_cvtbiassph_hf8): Rename to _mm256_maskz_cvts_biasph_hf8.
(_mm_cvts2ph_bf8): Rename to _mm_cvts_2ph_bf8.
(_mm_mask_cvts2ph_bf8): Rename to _mm_mask_cvts_2ph_bf8.
(_mm_maskz_cvts2ph_bf8): Rename to _mm_maskz_cvts_2ph_bf8.
(_mm256_cvts2ph_bf8): Rename to _mm256_cvts_2ph_bf8.
(_mm256_mask_cvts2ph_bf8): Rename to _mm256_mask_cvts_2ph_bf8.
(_mm256_maskz_cvts2ph_bf8): Rename to _mm256_maskz_cvts_2ph_bf8.
(_mm_cvts2ph_hf8): Rename to _mm_cvts_2ph_hf8.
(_mm_mask_cvts2ph_hf8): Rename to _mm_mask_cvts_2ph_hf8.
(_mm_maskz_cvts2ph_hf8): Rename to _mm_maskz_cvts_2ph_hf8.
(_mm256_cvts2ph_hf8): Rename to _mm256_cvts_2ph_hf8.
(_mm256_mask_cvts2ph_hf8): Rename to _mm256_mask_cvts_2ph_hf8.
(_mm256_maskz_cvts2ph_hf8): Rename to _mm256_maskz_cvts_2ph_hf8.
(_mm_cvtsph_bf8): Rename to _mm_cvts_ph_bf8.
(_mm_mask_cvtsph_bf8): Rename to _mm_mask_cvts_ph_bf8.
(_mm_maskz_cvtsph_bf8): Rename to _mm_maskz_cvts_ph_bf8.
(_mm256_cvtsph_bf8): Rename to _mm256_cvts_ph_bf8.
(_mm256_mask_cvtsph_bf8): Rename to _mm256_mask_cvts_ph_bf8.
(_mm256_maskz_cvtsph_bf8): Rename to _mm256_maskz_cvts_ph_bf8.
(_mm_cvtsph_hf8): Rename to _mm_cvts_ph_hf8.
(_mm_mask_cvtsph_hf8): Rename to _mm_mask_cvts_ph_hf8.
(_mm_maskz_cvtsph_hf8): Rename to _mm_maskz_cvts_ph_hf8.
(_mm256_cvtsph_hf8): Rename to _mm256_cvts_ph_hf8.
(_mm256_mask_cvtsph_hf8): Rename to _mm256_mask_cvts_ph_hf8.
(_mm256_maskz_cvtsph_hf8): Rename to _mm256_maskz_cvts_ph_hf8.
gcc/testsuite/ChangeLog:
* gcc.target/i386/avx10_2-512-convert-1.c: Modify function name
to follow the latest version.
* gcc.target/i386/avx10_2-512-vcvt2ph2bf8s-2.c: Ditto.
* gcc.target/i386/avx10_2-512-vcvt2ph2hf8s-2.c: Ditto.
* gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c: Ditto.
* gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c: Ditto.
* gcc.target/i386/avx10_2-512-vcvtph2bf8s-2.c: Ditto.
* gcc.target/i386/avx10_2-512-vcvtph2hf8s-2.c: Ditto.
* gcc.target/i386/avx10_2-convert-1.c: Ditto.
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtx2ps_ph (__m512h __W, __mmask32 __U, __m512 __A,
- __m512 __B)
+ __m512 __B)
{
return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round ((__v16sf) __A,
(__v16sf) __B,
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtx_round2ps_ph (__m512h __W, __mmask32 __U, __m512 __A,
- __m512 __B, const int __R)
+ __m512 __B, const int __R)
{
return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round ((__v16sf) __A,
(__v16sf) __B,
extern __inline __m512h
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtx_round2ps_ph (__mmask32 __U, __m512 __A,
- __m512 __B, const int __R)
+ __m512 __B, const int __R)
{
return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round ((__v16sf) __A,
(__v16sf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiassph_bf8 (__m512i __A, __m512h __B)
+_mm512_cvts_biasph_bf8 (__m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiassph_bf8 (__m256i __W, __mmask32 __U,
- __m512i __A, __m512h __B)
+_mm512_mask_cvts_biasph_bf8 (__m256i __W, __mmask32 __U,
+ __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiassph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvts_biasph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiassph_hf8 (__m512i __A, __m512h __B)
+_mm512_cvts_biasph_hf8 (__m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiassph_hf8 (__m256i __W, __mmask32 __U,
- __m512i __A, __m512h __B)
+_mm512_mask_cvts_biasph_hf8 (__m256i __W, __mmask32 __U,
+ __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiassph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvts_biasph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
{
return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
(__v32hf) __B,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvts2ph_bf8 (__m512h __A, __m512h __B)
+_mm512_cvts_2ph_bf8 (__m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvts2ph_bf8 (__m512i __W, __mmask64 __U,
- __m512h __A, __m512h __B)
+_mm512_mask_cvts_2ph_bf8 (__m512i __W, __mmask64 __U,
+ __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvts2ph_bf8 (__mmask64 __U, __m512h __A, __m512h __B)
+_mm512_maskz_cvts_2ph_bf8 (__mmask64 __U, __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvts2ph_hf8 (__m512h __A, __m512h __B)
+_mm512_cvts_2ph_hf8 (__m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvts2ph_hf8 (__m512i __W, __mmask64 __U,
- __m512h __A, __m512h __B)
+_mm512_mask_cvts_2ph_hf8 (__m512i __W, __mmask64 __U,
+ __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
extern __inline__ __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvts2ph_hf8 (__mmask64 __U, __m512h __A, __m512h __B)
+_mm512_maskz_cvts_2ph_hf8 (__mmask64 __U, __m512h __A, __m512h __B)
{
return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
(__v32hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtsph_bf8 (__m512h __A)
+_mm512_cvts_ph_bf8 (__m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtsph_bf8 (__m256i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_cvts_ph_bf8 (__m256i __W, __mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i) __W,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtsph_bf8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_cvts_ph_bf8 (__mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtsph_hf8 (__m512h __A)
+_mm512_cvts_ph_hf8 (__m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtsph_hf8 (__m256i __W, __mmask32 __U, __m512h __A)
+_mm512_mask_cvts_ph_hf8 (__m256i __W, __mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i) __W,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtsph_hf8 (__mmask32 __U, __m512h __A)
+_mm512_maskz_cvts_ph_hf8 (__mmask32 __U, __m512h __A)
{
return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
(__v32qi) (__m256i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiassph_bf8 (__m128i __A, __m128h __B)
+_mm_cvts_biasph_bf8 (__m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128 ((__v16qi) __A,
(__v8hf) __B);
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiassph_bf8 (__m128i __W, __mmask8 __U,
- __m128i __A, __m128h __B)
+_mm_mask_cvts_biasph_bf8 (__m128i __W, __mmask8 __U,
+ __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiassph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvts_biasph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiassph_bf8 (__m256i __A, __m256h __B)
+_mm256_cvts_biasph_bf8 (__m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiassph_bf8 (__m128i __W, __mmask16 __U,
- __m256i __A, __m256h __B)
+_mm256_mask_cvts_biasph_bf8 (__m128i __W, __mmask16 __U,
+ __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiassph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvts_biasph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiassph_hf8 (__m128i __A, __m128h __B)
+_mm_cvts_biasph_hf8 (__m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128 ((__v16qi) __A,
(__v8hf) __B);
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiassph_hf8 (__m128i __W, __mmask8 __U,
- __m128i __A, __m128h __B)
+_mm_mask_cvts_biasph_hf8 (__m128i __W, __mmask8 __U,
+ __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiassph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvts_biasph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiassph_hf8 (__m256i __A, __m256h __B)
+_mm256_cvts_biasph_hf8 (__m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiassph_hf8 (__m128i __W, __mmask16 __U,
- __m256i __A, __m256h __B)
+_mm256_mask_cvts_biasph_hf8 (__m128i __W, __mmask16 __U,
+ __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiassph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvts_biasph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
{
return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvts2ph_bf8 (__m128h __A, __m128h __B)
+_mm_cvts_2ph_bf8 (__m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvts2ph_bf8 (__m128i __W, __mmask16 __U,
- __m128h __A, __m128h __B)
+_mm_mask_cvts_2ph_bf8 (__m128i __W, __mmask16 __U,
+ __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvts2ph_bf8 (__mmask16 __U, __m128h __A, __m128h __B)
+_mm_maskz_cvts_2ph_bf8 (__mmask16 __U, __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvts2ph_bf8 (__m256h __A, __m256h __B)
+_mm256_cvts_2ph_bf8 (__m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvts2ph_bf8 (__m256i __W, __mmask32 __U,
- __m256h __A, __m256h __B)
+_mm256_mask_cvts_2ph_bf8 (__m256i __W, __mmask32 __U,
+ __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvts2ph_bf8 (__mmask32 __U, __m256h __A, __m256h __B)
+_mm256_maskz_cvts_2ph_bf8 (__mmask32 __U, __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvts2ph_hf8 (__m128h __A, __m128h __B)
+_mm_cvts_2ph_hf8 (__m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvts2ph_hf8 (__m128i __W, __mmask16 __U,
- __m128h __A, __m128h __B)
+_mm_mask_cvts_2ph_hf8 (__m128i __W, __mmask16 __U,
+ __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvts2ph_hf8 (__mmask16 __U, __m128h __A, __m128h __B)
+_mm_maskz_cvts_2ph_hf8 (__mmask16 __U, __m128h __A, __m128h __B)
{
return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
(__v8hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvts2ph_hf8 (__m256h __A, __m256h __B)
+_mm256_cvts_2ph_hf8 (__m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvts2ph_hf8 (__m256i __W, __mmask32 __U,
- __m256h __A, __m256h __B)
+_mm256_mask_cvts_2ph_hf8 (__m256i __W, __mmask32 __U,
+ __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
extern __inline__ __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvts2ph_hf8 (__mmask32 __U, __m256h __A, __m256h __B)
+_mm256_maskz_cvts_2ph_hf8 (__mmask32 __U, __m256h __A, __m256h __B)
{
return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
(__v16hf) __B,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsph_bf8 (__m128h __A)
+_mm_cvts_ph_bf8 (__m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_cvts_ph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i) __W,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsph_bf8 (__mmask8 __U, __m128h __A)
+_mm_maskz_cvts_ph_bf8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtsph_bf8 (__m256h __A)
+_mm256_cvts_ph_bf8 (__m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtsph_bf8 (__m128i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_cvts_ph_bf8 (__m128i __W, __mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i) __W,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtsph_bf8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_cvts_ph_bf8 (__mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsph_hf8 (__m128h __A)
+_mm_cvts_ph_hf8 (__m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtsph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
+_mm_mask_cvts_ph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i) __W,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtsph_hf8 (__mmask8 __U, __m128h __A)
+_mm_maskz_cvts_ph_hf8 (__mmask8 __U, __m128h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtsph_hf8 (__m256h __A)
+_mm256_cvts_ph_hf8 (__m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtsph_hf8 (__m128i __W, __mmask16 __U, __m256h __A)
+_mm256_mask_cvts_ph_hf8 (__m128i __W, __mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i) __W,
extern __inline__ __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtsph_hf8 (__mmask16 __U, __m256h __A)
+_mm256_maskz_cvts_ph_hf8 (__mmask16 __U, __m256h __A)
{
return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
(__v16qi)(__m128i)
void extern
avx10_2_vcvtbiasph2bf8s_test (void)
{
- x256i = _mm512_cvtbiassph_bf8 (x512i, x512h);
- x256i = _mm512_mask_cvtbiassph_bf8 (x256i, m32, x512i, x512h);
- x256i = _mm512_maskz_cvtbiassph_bf8 (m32, x512i, x512h);
+ x256i = _mm512_cvts_biasph_bf8 (x512i, x512h);
+ x256i = _mm512_mask_cvts_biasph_bf8 (x256i, m32, x512i, x512h);
+ x256i = _mm512_maskz_cvts_biasph_bf8 (m32, x512i, x512h);
}
void extern
void extern
avx10_2_vcvtbiasph2hf8s_test (void)
{
- x256i = _mm512_cvtbiassph_hf8 (x512i, x512h);
- x256i = _mm512_mask_cvtbiassph_hf8 (x256i, m32, x512i, x512h);
- x256i = _mm512_maskz_cvtbiassph_hf8 (m32, x512i, x512h);
+ x256i = _mm512_cvts_biasph_hf8 (x512i, x512h);
+ x256i = _mm512_mask_cvts_biasph_hf8 (x256i, m32, x512i, x512h);
+ x256i = _mm512_maskz_cvts_biasph_hf8 (m32, x512i, x512h);
}
void extern
void extern
avx10_2_vcvt2ph2bf8s_test (void)
{
- x512i = _mm512_cvts2ph_bf8 (x512h, x512h);
- x512i = _mm512_mask_cvts2ph_bf8 (x512i, m64, x512h, x512h);
- x512i = _mm512_maskz_cvts2ph_bf8 (m64, x512h, x512h);
+ x512i = _mm512_cvts_2ph_bf8 (x512h, x512h);
+ x512i = _mm512_mask_cvts_2ph_bf8 (x512i, m64, x512h, x512h);
+ x512i = _mm512_maskz_cvts_2ph_bf8 (m64, x512h, x512h);
}
void extern
void extern
avx10_2_vcvt2ph2hf8s_test (void)
{
- x512i = _mm512_cvts2ph_hf8 (x512h, x512h);
- x512i = _mm512_mask_cvts2ph_hf8 (x512i, m64, x512h, x512h);
- x512i = _mm512_maskz_cvts2ph_hf8 (m64, x512h, x512h);
+ x512i = _mm512_cvts_2ph_hf8 (x512h, x512h);
+ x512i = _mm512_mask_cvts_2ph_hf8 (x512i, m64, x512h, x512h);
+ x512i = _mm512_maskz_cvts_2ph_hf8 (m64, x512h, x512h);
}
void extern
void extern
avx10_2_vcvtph2bf8s_test (void)
{
- x256i = _mm512_cvtsph_bf8 (x512h);
- x256i = _mm512_mask_cvtsph_bf8 (x256i, m32, x512h);
- x256i = _mm512_maskz_cvtsph_bf8 (m32, x512h);
+ x256i = _mm512_cvts_ph_bf8 (x512h);
+ x256i = _mm512_mask_cvts_ph_bf8 (x256i, m32, x512h);
+ x256i = _mm512_maskz_cvts_ph_bf8 (m32, x512h);
}
void extern
void extern
avx10_2_vcvtph2hf8s_test (void)
{
- x256i = _mm512_cvtsph_hf8 (x512h);
- x256i = _mm512_mask_cvtsph_hf8 (x256i, m32, x512h);
- x256i = _mm512_maskz_cvtsph_hf8 (m32, x512h);
+ x256i = _mm512_cvts_ph_hf8 (x512h);
+ x256i = _mm512_mask_cvts_ph_hf8 (x256i, m32, x512h);
+ x256i = _mm512_maskz_cvts_ph_hf8 (m32, x512h);
}
void extern
CALC(res_ref, src1.a, src2.a);
- res1.x = INTRINSIC (_cvts2ph_bf8) (src1.x, src2.x);
+ res1.x = INTRINSIC (_cvts_2ph_bf8) (src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
abort ();
- res2.x = INTRINSIC (_mask_cvts2ph_bf8) (res2.x, mask, src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_cvts_2ph_bf8) (res2.x, mask, src1.x, src2.x);
MASK_MERGE (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
abort ();
- res3.x = INTRINSIC (_maskz_cvts2ph_bf8) (mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_cvts_2ph_bf8) (mask, src1.x, src2.x);
MASK_ZERO (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
abort ();
CALC(res_ref, src1.a, src2.a);
- res1.x = INTRINSIC (_cvts2ph_hf8) (src1.x, src2.x);
+ res1.x = INTRINSIC (_cvts_2ph_hf8) (src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
abort ();
- res2.x = INTRINSIC (_mask_cvts2ph_hf8) (res2.x, mask, src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_cvts_2ph_hf8) (res2.x, mask, src1.x, src2.x);
MASK_MERGE (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
abort ();
- res3.x = INTRINSIC (_maskz_cvts2ph_hf8) (mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_cvts_2ph_hf8) (mask, src1.x, src2.x);
MASK_ZERO (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
abort ();
CALC (res_ref, src1.a, src2.a);
- res1.x = INTRINSIC (_cvtbiassph_bf8) (src1.x, src2.x);
+ res1.x = INTRINSIC (_cvts_biasph_bf8) (src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
abort ();
- res2.x = INTRINSIC (_mask_cvtbiassph_bf8) (res2.x, mask, src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_cvts_biasph_bf8) (res2.x, mask, src1.x, src2.x);
MASK_MERGE (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
abort ();
- res3.x = INTRINSIC (_maskz_cvtbiassph_bf8) (mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_cvts_biasph_bf8) (mask, src1.x, src2.x);
MASK_ZERO (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
abort ();
CALC (res_ref, src1.a, src2.a);
- res1.x = INTRINSIC (_cvtbiassph_hf8) (src1.x, src2.x);
+ res1.x = INTRINSIC (_cvts_biasph_hf8) (src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
abort ();
- res2.x = INTRINSIC (_mask_cvtbiassph_hf8) (res2.x, mask, src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_cvts_biasph_hf8) (res2.x, mask, src1.x, src2.x);
MASK_MERGE (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
abort ();
- res3.x = INTRINSIC (_maskz_cvtbiassph_hf8) (mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_cvts_biasph_hf8) (mask, src1.x, src2.x);
MASK_ZERO (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
abort ();
CALC(res_ref, src.a);
- res1.x = INTRINSIC (_cvtsph_bf8) (src.x);
+ res1.x = INTRINSIC (_cvts_ph_bf8) (src.x);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
abort ();
- res2.x = INTRINSIC (_mask_cvtsph_bf8) (res2.x, mask, src.x);
+ res2.x = INTRINSIC (_mask_cvts_ph_bf8) (res2.x, mask, src.x);
MASK_MERGE (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
abort ();
- res3.x = INTRINSIC (_maskz_cvtsph_bf8) (mask, src.x);
+ res3.x = INTRINSIC (_maskz_cvts_ph_bf8) (mask, src.x);
MASK_ZERO (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
abort ();
CALC(res_ref, src.a);
- res1.x = INTRINSIC (_cvtsph_hf8) (src.x);
+ res1.x = INTRINSIC (_cvts_ph_hf8) (src.x);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
abort ();
- res2.x = INTRINSIC (_mask_cvtsph_hf8) (res2.x, mask, src.x);
+ res2.x = INTRINSIC (_mask_cvts_ph_hf8) (res2.x, mask, src.x);
MASK_MERGE (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
abort ();
- res3.x = INTRINSIC (_maskz_cvtsph_hf8) (mask, src.x);
+ res3.x = INTRINSIC (_maskz_cvts_ph_hf8) (mask, src.x);
MASK_ZERO (i_b) (res_ref, mask, SIZE);
if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
abort ();
void extern
avx10_2_vcvtbiasph2bf8s_test (void)
{
- x128i = _mm_cvtbiassph_bf8 (x128i, x128h);
- x128i = _mm_mask_cvtbiassph_bf8 (x128i, m8, x128i, x128h);
- x128i = _mm_maskz_cvtbiassph_bf8 (m8, x128i, x128h);
+ x128i = _mm_cvts_biasph_bf8 (x128i, x128h);
+ x128i = _mm_mask_cvts_biasph_bf8 (x128i, m8, x128i, x128h);
+ x128i = _mm_maskz_cvts_biasph_bf8 (m8, x128i, x128h);
- x128i = _mm256_cvtbiassph_bf8 (x256i, x256h);
- x128i = _mm256_mask_cvtbiassph_bf8 (x128i, m16, x256i, x256h);
- x128i = _mm256_maskz_cvtbiassph_bf8 (m16, x256i, x256h);
+ x128i = _mm256_cvts_biasph_bf8 (x256i, x256h);
+ x128i = _mm256_mask_cvts_biasph_bf8 (x128i, m16, x256i, x256h);
+ x128i = _mm256_maskz_cvts_biasph_bf8 (m16, x256i, x256h);
}
void extern
void extern
avx10_2_vcvtbiasph2hf8s_test (void)
{
- x128i = _mm_cvtbiassph_hf8 (x128i, x128h);
- x128i = _mm_mask_cvtbiassph_hf8 (x128i, m8, x128i, x128h);
- x128i = _mm_maskz_cvtbiassph_hf8 (m8, x128i, x128h);
+ x128i = _mm_cvts_biasph_hf8 (x128i, x128h);
+ x128i = _mm_mask_cvts_biasph_hf8 (x128i, m8, x128i, x128h);
+ x128i = _mm_maskz_cvts_biasph_hf8 (m8, x128i, x128h);
- x128i = _mm256_cvtbiassph_hf8 (x256i, x256h);
- x128i = _mm256_mask_cvtbiassph_hf8 (x128i, m16, x256i, x256h);
- x128i = _mm256_maskz_cvtbiassph_hf8 (m16, x256i, x256h);
+ x128i = _mm256_cvts_biasph_hf8 (x256i, x256h);
+ x128i = _mm256_mask_cvts_biasph_hf8 (x128i, m16, x256i, x256h);
+ x128i = _mm256_maskz_cvts_biasph_hf8 (m16, x256i, x256h);
}
void extern
void extern
avx10_2_vcvt2ph2bf8s_test (void)
{
- x128i = _mm_cvts2ph_bf8 (x128h, x128h);
- x128i = _mm_mask_cvts2ph_bf8 (x128i, m16, x128h, x128h);
- x128i = _mm_maskz_cvts2ph_bf8 (m16, x128h, x128h);
- x256i = _mm256_cvts2ph_bf8 (x256h, x256h);
- x256i = _mm256_mask_cvts2ph_bf8 (x256i, m32, x256h, x256h);
- x256i = _mm256_maskz_cvts2ph_bf8 (m32, x256h, x256h);
+ x128i = _mm_cvts_2ph_bf8 (x128h, x128h);
+ x128i = _mm_mask_cvts_2ph_bf8 (x128i, m16, x128h, x128h);
+ x128i = _mm_maskz_cvts_2ph_bf8 (m16, x128h, x128h);
+ x256i = _mm256_cvts_2ph_bf8 (x256h, x256h);
+ x256i = _mm256_mask_cvts_2ph_bf8 (x256i, m32, x256h, x256h);
+ x256i = _mm256_maskz_cvts_2ph_bf8 (m32, x256h, x256h);
}
void extern
void extern
avx10_2_vcvt2ph2hf8s_test (void)
{
- x128i = _mm_cvts2ph_hf8 (x128h, x128h);
- x128i = _mm_mask_cvts2ph_hf8 (x128i, m16, x128h, x128h);
- x128i = _mm_maskz_cvts2ph_hf8 (m16, x128h, x128h);
- x256i = _mm256_cvts2ph_hf8 (x256h, x256h);
- x256i = _mm256_mask_cvts2ph_hf8 (x256i, m32, x256h, x256h);
- x256i = _mm256_maskz_cvts2ph_hf8 (m32, x256h, x256h);
+ x128i = _mm_cvts_2ph_hf8 (x128h, x128h);
+ x128i = _mm_mask_cvts_2ph_hf8 (x128i, m16, x128h, x128h);
+ x128i = _mm_maskz_cvts_2ph_hf8 (m16, x128h, x128h);
+ x256i = _mm256_cvts_2ph_hf8 (x256h, x256h);
+ x256i = _mm256_mask_cvts_2ph_hf8 (x256i, m32, x256h, x256h);
+ x256i = _mm256_maskz_cvts_2ph_hf8 (m32, x256h, x256h);
}
void extern
void extern
avx10_2_vcvtph2bf8s_test (void)
{
- x128i = _mm_cvtsph_bf8 (x128h);
- x128i = _mm_mask_cvtsph_bf8 (x128i, m8, x128h);
- x128i = _mm_maskz_cvtsph_bf8 (m8, x128h);
+ x128i = _mm_cvts_ph_bf8 (x128h);
+ x128i = _mm_mask_cvts_ph_bf8 (x128i, m8, x128h);
+ x128i = _mm_maskz_cvts_ph_bf8 (m8, x128h);
- x128i = _mm256_cvtsph_bf8 (x256h);
- x128i = _mm256_mask_cvtsph_bf8 (x128i, m16, x256h);
- x128i = _mm256_maskz_cvtsph_bf8 (m16, x256h);
+ x128i = _mm256_cvts_ph_bf8 (x256h);
+ x128i = _mm256_mask_cvts_ph_bf8 (x128i, m16, x256h);
+ x128i = _mm256_maskz_cvts_ph_bf8 (m16, x256h);
}
void extern
void extern
avx10_2_vcvtph2hf8s_test (void)
{
- x128i = _mm_cvtsph_hf8 (x128h);
- x128i = _mm_mask_cvtsph_hf8 (x128i, m8, x128h);
- x128i = _mm_maskz_cvtsph_hf8 (m8, x128h);
+ x128i = _mm_cvts_ph_hf8 (x128h);
+ x128i = _mm_mask_cvts_ph_hf8 (x128i, m8, x128h);
+ x128i = _mm_maskz_cvts_ph_hf8 (m8, x128h);
- x128i = _mm256_cvtsph_hf8 (x256h);
- x128i = _mm256_mask_cvtsph_hf8 (x128i, m16, x256h);
- x128i = _mm256_maskz_cvtsph_hf8 (m16, x256h);
+ x128i = _mm256_cvts_ph_hf8 (x256h);
+ x128i = _mm256_mask_cvts_ph_hf8 (x128i, m16, x256h);
+ x128i = _mm256_maskz_cvts_ph_hf8 (m16, x256h);
}
void extern