extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_addne_pbh (__m512bh __A, __m512bh __B)
+_mm512_add_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_addnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_addbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_addne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_add_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_addnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_addbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_addne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_add_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_addnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_addbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_subne_pbh (__m512bh __A, __m512bh __B)
+_mm512_sub_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_subnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_subbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_subne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_sub_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_subnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_subbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_subne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_sub_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_subnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_subbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mulne_pbh (__m512bh __A, __m512bh __B)
+_mm512_mul_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_mulnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_mulbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_mulne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_mul_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_mulnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_mulbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_mulne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_mul_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_mulnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_mulbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_divne_pbh (__m512bh __A, __m512bh __B)
+_mm512_div_pbh (__m512bh __A, __m512bh __B)
{
- return (__m512bh) __builtin_ia32_divnepbf16512 (__A, __B);
+ return (__m512bh) __builtin_ia32_divbf16512 (__A, __B);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_divne_pbh (__m512bh __W, __mmask32 __U,
- __m512bh __A, __m512bh __B)
+_mm512_mask_div_pbh (__m512bh __W, __mmask32 __U,
+ __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_divnepbf16512_mask (__A, __B, __W, __U);
+ __builtin_ia32_divbf16512_mask (__A, __B, __W, __U);
}
extern __inline__ __m512bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_divne_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
+_mm512_maskz_div_pbh (__mmask32 __U, __m512bh __A, __m512bh __B)
{
return (__m512bh)
- __builtin_ia32_divnepbf16512_mask (__A, __B,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_divbf16512_mask (__A, __B,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
_mm512_rcp_pbh (__m512bh __A)
{
return (__m512bh)
- __builtin_ia32_rcppbf16512_mask (__A,
- (__v32bf) _mm512_setzero_si512 (),
- (__mmask32) -1);
+ __builtin_ia32_rcpbf16512_mask (__A,
+ (__v32bf) _mm512_setzero_si512 (),
+ (__mmask32) -1);
}
extern __inline__ __m512bh
_mm512_mask_rcp_pbh (__m512bh __W, __mmask32 __U, __m512bh __A)
{
return (__m512bh)
- __builtin_ia32_rcppbf16512_mask (__A, __W, __U);
+ __builtin_ia32_rcpbf16512_mask (__A, __W, __U);
}
extern __inline__ __m512bh
_mm512_maskz_rcp_pbh (__mmask32 __U, __m512bh __A)
{
return (__m512bh)
- __builtin_ia32_rcppbf16512_mask (__A,
- (__v32bf) _mm512_setzero_si512 (),
- __U);
+ __builtin_ia32_rcpbf16512_mask (__A,
+ (__v32bf) _mm512_setzero_si512 (),
+ __U);
}
extern __inline__ __m512bh
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_addne_pbh (__m256bh __A, __m256bh __B)
+_mm256_add_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_addnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_addbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_addne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_add_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_addnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_addbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_addne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_add_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_addnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_addbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addne_pbh (__m128bh __A, __m128bh __B)
+_mm_add_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_addnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_addbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_addne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_add_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_addnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_addbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_addne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_add_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_addnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_addbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_subne_pbh (__m256bh __A, __m256bh __B)
+_mm256_sub_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_subnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_subbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_subne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_sub_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_subnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_subbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_subne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_sub_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_subnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_subbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subne_pbh (__m128bh __A, __m128bh __B)
+_mm_sub_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_subnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_subbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_subne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_sub_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_subnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_subbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_subne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_sub_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_subnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_subbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mulne_pbh (__m256bh __A, __m256bh __B)
+_mm256_mul_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_mulnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_mulbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_mulne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_mul_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_mulnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_mulbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_mulne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_mul_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_mulnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_mulbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulne_pbh (__m128bh __A, __m128bh __B)
+_mm_mul_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_mulnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_mulbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_mulne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_mul_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_mulnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_mulbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_mulne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_mul_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_mulnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_mulbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_divne_pbh (__m256bh __A, __m256bh __B)
+_mm256_div_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_divnepbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_divbf16256 (__A, __B);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_divne_pbh (__m256bh __W, __mmask16 __U,
- __m256bh __A, __m256bh __B)
+_mm256_mask_div_pbh (__m256bh __W, __mmask16 __U,
+ __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_divnepbf16256_mask (__A, __B, __W, __U);
+ __builtin_ia32_divbf16256_mask (__A, __B, __W, __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_divne_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
+_mm256_maskz_div_pbh (__mmask16 __U, __m256bh __A, __m256bh __B)
{
return (__m256bh)
- __builtin_ia32_divnepbf16256_mask (__A, __B,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_divbf16256_mask (__A, __B,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_divne_pbh (__m128bh __A, __m128bh __B)
+_mm_div_pbh (__m128bh __A, __m128bh __B)
{
- return (__m128bh) __builtin_ia32_divnepbf16128 (__A, __B);
+ return (__m128bh) __builtin_ia32_divbf16128 (__A, __B);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_divne_pbh (__m128bh __W, __mmask8 __U,
- __m128bh __A, __m128bh __B)
+_mm_mask_div_pbh (__m128bh __W, __mmask8 __U,
+ __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_divnepbf16128_mask (__A, __B, __W, __U);
+ __builtin_ia32_divbf16128_mask (__A, __B, __W, __U);
}
extern __inline__ __m128bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_divne_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
+_mm_maskz_div_pbh (__mmask8 __U, __m128bh __A, __m128bh __B)
{
return (__m128bh)
- __builtin_ia32_divnepbf16128_mask (__A, __B,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_divbf16128_mask (__A, __B,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_max_pbh (__m256bh __A, __m256bh __B)
{
- return (__m256bh) __builtin_ia32_maxpbf16256 (__A, __B);
+ return (__m256bh) __builtin_ia32_maxbf16256 (__A, __B);
}
extern __inline__ __m256bh
_mm256_rcp_pbh (__m256bh __A)
{
return (__m256bh)
- __builtin_ia32_rcppbf16256_mask (__A,
- (__v16bf) _mm256_setzero_si256 (),
- (__mmask16) -1);
+ __builtin_ia32_rcpbf16256_mask (__A,
+ (__v16bf) _mm256_setzero_si256 (),
+ (__mmask16) -1);
}
extern __inline__ __m256bh
_mm256_mask_rcp_pbh (__m256bh __W, __mmask16 __U, __m256bh __A)
{
return (__m256bh)
- __builtin_ia32_rcppbf16256_mask (__A, __W, __U);
+ __builtin_ia32_rcpbf16256_mask (__A, __W, __U);
}
extern __inline__ __m256bh
_mm256_maskz_rcp_pbh (__mmask16 __U, __m256bh __A)
{
return (__m256bh)
- __builtin_ia32_rcppbf16256_mask (__A,
- (__v16bf) _mm256_setzero_si256 (),
- __U);
+ __builtin_ia32_rcpbf16256_mask (__A,
+ (__v16bf) _mm256_setzero_si256 (),
+ __U);
}
extern __inline__ __m128bh
_mm_rcp_pbh (__m128bh __A)
{
return (__m128bh)
- __builtin_ia32_rcppbf16128_mask (__A,
- (__v8bf) _mm_setzero_si128 (),
- (__mmask8) -1);
+ __builtin_ia32_rcpbf16128_mask (__A,
+ (__v8bf) _mm_setzero_si128 (),
+ (__mmask8) -1);
}
extern __inline__ __m128bh
_mm_mask_rcp_pbh (__m128bh __W, __mmask8 __U, __m128bh __A)
{
return (__m128bh)
- __builtin_ia32_rcppbf16128_mask (__A, __W, __U);
+ __builtin_ia32_rcpbf16128_mask (__A, __W, __U);
}
extern __inline__ __m128bh
_mm_maskz_rcp_pbh (__mmask8 __U, __m128bh __A)
{
return (__m128bh)
- __builtin_ia32_rcppbf16128_mask (__A,
- (__v8bf) _mm_setzero_si128 (),
- __U);
+ __builtin_ia32_rcpbf16128_mask (__A,
+ (__v8bf) _mm_setzero_si128 (),
+ __U);
}
extern __inline__ __m256bh
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvthf82phv8hf_mask, "__builtin_ia32_vcvthf82ph128_mask", IX86_BUILTIN_VCVTHF82PH128_MASK, UNKNOWN, (int) V8HF_FTYPE_V16QI_V8HF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvthf82phv16hf_mask, "__builtin_ia32_vcvthf82ph256_mask", IX86_BUILTIN_VCVTHF82PH256_MASK, UNKNOWN, (int) V16HF_FTYPE_V16QI_V16HF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_vcvthf82phv32hf_mask, "__builtin_ia32_vcvthf82ph512_mask", IX86_BUILTIN_VCVTHF82PH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32QI_V32HF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addnepbf16_v32bf, "__builtin_ia32_addnepbf16512", IX86_BUILTIN_ADDNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addnepbf16_v32bf_mask, "__builtin_ia32_addnepbf16512_mask", IX86_BUILTIN_ADDNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v16bf, "__builtin_ia32_addnepbf16256", IX86_BUILTIN_ADDNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v16bf_mask, "__builtin_ia32_addnepbf16256_mask", IX86_BUILTIN_ADDNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v8bf, "__builtin_ia32_addnepbf16128", IX86_BUILTIN_ADDNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addnepbf16_v8bf_mask, "__builtin_ia32_addnepbf16128_mask", IX86_BUILTIN_ADDNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subnepbf16_v32bf, "__builtin_ia32_subnepbf16512", IX86_BUILTIN_SUBNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subnepbf16_v32bf_mask, "__builtin_ia32_subnepbf16512_mask", IX86_BUILTIN_SUBNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v16bf, "__builtin_ia32_subnepbf16256", IX86_BUILTIN_SUBNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v16bf_mask, "__builtin_ia32_subnepbf16256_mask", IX86_BUILTIN_SUBNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v8bf, "__builtin_ia32_subnepbf16128", IX86_BUILTIN_SUBNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subnepbf16_v8bf_mask, "__builtin_ia32_subnepbf16128_mask", IX86_BUILTIN_SUBNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulnepbf16_v32bf, "__builtin_ia32_mulnepbf16512", IX86_BUILTIN_MULNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulnepbf16_v32bf_mask, "__builtin_ia32_mulnepbf16512_mask", IX86_BUILTIN_MULNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v16bf, "__builtin_ia32_mulnepbf16256", IX86_BUILTIN_MULNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v16bf_mask, "__builtin_ia32_mulnepbf16256_mask", IX86_BUILTIN_MULNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v8bf, "__builtin_ia32_mulnepbf16128", IX86_BUILTIN_MULNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulnepbf16_v8bf_mask, "__builtin_ia32_mulnepbf16128_mask", IX86_BUILTIN_MULNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divnepbf16_v32bf, "__builtin_ia32_divnepbf16512", IX86_BUILTIN_DIVNEPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divnepbf16_v32bf_mask, "__builtin_ia32_divnepbf16512_mask", IX86_BUILTIN_DIVNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v16bf, "__builtin_ia32_divnepbf16256", IX86_BUILTIN_DIVNEPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v16bf_mask, "__builtin_ia32_divnepbf16256_mask", IX86_BUILTIN_DIVNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v8bf, "__builtin_ia32_divnepbf16128", IX86_BUILTIN_DIVNEPBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divnepbf16_v8bf_mask, "__builtin_ia32_divnepbf16128_mask", IX86_BUILTIN_DIVNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addbf16_v32bf, "__builtin_ia32_addbf16512", IX86_BUILTIN_ADDBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_addbf16_v32bf_mask, "__builtin_ia32_addbf16512_mask", IX86_BUILTIN_ADDBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v16bf, "__builtin_ia32_addbf16256", IX86_BUILTIN_ADDBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v16bf_mask, "__builtin_ia32_addbf16256_mask", IX86_BUILTIN_ADDBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v8bf, "__builtin_ia32_addbf16128", IX86_BUILTIN_ADDBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_addbf16_v8bf_mask, "__builtin_ia32_addbf16128_mask", IX86_BUILTIN_ADDBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subbf16_v32bf, "__builtin_ia32_subbf16512", IX86_BUILTIN_SUBBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_subbf16_v32bf_mask, "__builtin_ia32_subbf16512_mask", IX86_BUILTIN_SUBBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v16bf, "__builtin_ia32_subbf16256", IX86_BUILTIN_SUBBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v16bf_mask, "__builtin_ia32_subbf16256_mask", IX86_BUILTIN_SUBBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v8bf, "__builtin_ia32_subbf16128", IX86_BUILTIN_SUBBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_subbf16_v8bf_mask, "__builtin_ia32_subbf16128_mask", IX86_BUILTIN_SUBBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulbf16_v32bf, "__builtin_ia32_mulbf16512", IX86_BUILTIN_MULBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_mulbf16_v32bf_mask, "__builtin_ia32_mulbf16512_mask", IX86_BUILTIN_MULBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v16bf, "__builtin_ia32_mulbf16256", IX86_BUILTIN_MULBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v16bf_mask, "__builtin_ia32_mulbf16256_mask", IX86_BUILTIN_MULBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v8bf, "__builtin_ia32_mulbf16128", IX86_BUILTIN_MULBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_mulbf16_v8bf_mask, "__builtin_ia32_mulbf16128_mask", IX86_BUILTIN_MULBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divbf16_v32bf, "__builtin_ia32_divbf16512", IX86_BUILTIN_DIVBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_divbf16_v32bf_mask, "__builtin_ia32_divbf16512_mask", IX86_BUILTIN_DIVBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v16bf, "__builtin_ia32_divbf16256", IX86_BUILTIN_DIVBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v16bf_mask, "__builtin_ia32_divbf16256_mask", IX86_BUILTIN_DIVBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v8bf, "__builtin_ia32_divbf16128", IX86_BUILTIN_DIVBF16128, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_divbf16_v8bf_mask, "__builtin_ia32_divbf16128_mask", IX86_BUILTIN_DIVBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_V8BF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_smaxpbf16_v32bf, "__builtin_ia32_maxpbf16512", IX86_BUILTIN_MAXPBF16512, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_smaxpbf16_v32bf_mask, "__builtin_ia32_maxpbf16512_mask", IX86_BUILTIN_MAXPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_V32BF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_smaxpbf16_v16bf, "__builtin_ia32_maxpbf16256", IX86_BUILTIN_MAXPBF16256, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_sqrtnepbf16_v32bf_mask, "__builtin_ia32_sqrtnepbf16512_mask", IX86_BUILTIN_SQRTNEPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sqrtnepbf16_v16bf_mask, "__builtin_ia32_sqrtnepbf16256_mask", IX86_BUILTIN_SQRTNEPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_sqrtnepbf16_v8bf_mask, "__builtin_ia32_sqrtnepbf16128_mask", IX86_BUILTIN_SQRTNEPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rcppbf16_v32bf_mask, "__builtin_ia32_rcppbf16512_mask", IX86_BUILTIN_RCPPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcppbf16_v16bf_mask, "__builtin_ia32_rcppbf16256_mask", IX86_BUILTIN_RCPPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcppbf16_v8bf_mask, "__builtin_ia32_rcppbf16128_mask", IX86_BUILTIN_RCPPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_rcpbf16_v32bf_mask, "__builtin_ia32_rcpbf16512_mask", IX86_BUILTIN_RCPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcpbf16_v16bf_mask, "__builtin_ia32_rcpbf16256_mask", IX86_BUILTIN_RCPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_rcpbf16_v8bf_mask, "__builtin_ia32_rcpbf16128_mask", IX86_BUILTIN_RCPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_getexppbf16_v32bf_mask, "__builtin_ia32_getexppbf16512_mask", IX86_BUILTIN_GETEXPPBF16512_MASK, UNKNOWN, (int) V32BF_FTYPE_V32BF_V32BF_USI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getexppbf16_v16bf_mask, "__builtin_ia32_getexppbf16256_mask", IX86_BUILTIN_GETEXPPBF16256_MASK, UNKNOWN, (int) V16BF_FTYPE_V16BF_V16BF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx10_2_getexppbf16_v8bf_mask, "__builtin_ia32_getexppbf16128_mask", IX86_BUILTIN_GETEXPPBF16128_MASK, UNKNOWN, (int) V8BF_FTYPE_V8BF_V8BF_UQI)
{
rtx op = gen_reg_rtx (<MODE>mode);
operands[2] = force_reg (<MODE>mode, operands[2]);
- emit_insn (gen_avx10_2_rcppbf16_<mode> (op, operands[2]));
- emit_insn (gen_avx10_2_mulnepbf16_<mode> (operands[0], operands[1], op));
+ emit_insn (gen_avx10_2_rcpbf16_<mode> (op, operands[2]));
+ emit_insn (gen_avx10_2_mulbf16_<mode> (operands[0], operands[1], op));
DONE;
}
})
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
-(define_insn "avx10_2_<insn>nepbf16_<mode><mask_name>"
+(define_insn "avx10_2_<insn>bf16_<mode><mask_name>"
[(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
(plusminusmultdiv:VBF_AVX10_2
(match_operand:VBF_AVX10_2 1 "register_operand" "v")
(match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")))]
"TARGET_AVX10_2_256"
- "v<insn>nepbf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
+ "v<insn>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
(define_expand "avx10_2_fmaddnepbf16_<mode>_maskz"
"vsqrtnepbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
-(define_insn "avx10_2_rcppbf16_<mode><mask_name>"
+(define_insn "avx10_2_rcpbf16_<mode><mask_name>"
[(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
(unspec:VBF_AVX10_2
[(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
UNSPEC_RCP))]
"TARGET_AVX10_2_256"
- "vrcppbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ "vrcpbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
(define_insn "avx10_2_getexppbf16_<mode><mask_name>"
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64-v3 -mavx10.2-512 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-
-#include <immintrin.h>
-
-typedef __bf16 v32bf __attribute__ ((__vector_size__ (64)));
-
-v32bf
-foo_mul (v32bf a, v32bf b)
-{
- return a * b;
-}
-
-v32bf
-foo_add (v32bf a, v32bf b)
-{
- return a + b;
-}
-
-v32bf
-foo_div (v32bf a, v32bf b)
-{
- return a / b;
-}
-
-v32bf
-foo_sub (v32bf a, v32bf b)
-{
- return a - b;
-}
-
-__attribute__((optimize("fast-math")))
-v32bf
-foo_div_fast_math (v32bf a, v32bf b)
-{
- return a / b;
-}
/* { dg-do compile } */
/* { dg-options "-march=x86-64-v3 -mavx10.2-512 -O2" } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
void extern
avx10_2_512_test (void)
{
- res = _mm512_addne_pbh (x1, x2);
- res = _mm512_mask_addne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_addne_pbh (m32, x1, x2);
- res = _mm512_subne_pbh (x1, x2);
- res = _mm512_mask_subne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_subne_pbh (m32, x1, x2);
- res = _mm512_mulne_pbh (x1, x2);
- res = _mm512_mask_mulne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_mulne_pbh (m32, x1, x2);
- res = _mm512_divne_pbh (x1, x2);
- res = _mm512_mask_divne_pbh (res, m32, x1, x2);
- res = _mm512_maskz_divne_pbh (m32, x1, x2);
+ res = _mm512_add_pbh (x1, x2);
+ res = _mm512_mask_add_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_add_pbh (m32, x1, x2);
+ res = _mm512_sub_pbh (x1, x2);
+ res = _mm512_mask_sub_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_sub_pbh (m32, x1, x2);
+ res = _mm512_mul_pbh (x1, x2);
+ res = _mm512_mask_mul_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_mul_pbh (m32, x1, x2);
+ res = _mm512_div_pbh (x1, x2);
+ res = _mm512_mask_div_pbh (res, m32, x1, x2);
+ res = _mm512_maskz_div_pbh (m32, x1, x2);
res = _mm512_max_pbh (x1, x2);
res = _mm512_mask_max_pbh (res, m32, x1, x2);
res = _mm512_maskz_max_pbh (m32, x1, x2);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64-v3 -mavx10.2-512 -O2" } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\r]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+typedef __bf16 v32bf __attribute__ ((__vector_size__ (64)));
+
+v32bf
+foo_mul (v32bf a, v32bf b)
+{
+ return a * b;
+}
+
+v32bf
+foo_add (v32bf a, v32bf b)
+{
+ return a + b;
+}
+
+v32bf
+foo_div (v32bf a, v32bf b)
+{
+ return a / b;
+}
+
+v32bf
+foo_sub (v32bf a, v32bf b)
+{
+ return a - b;
+}
+
+__attribute__((optimize("fast-math")))
+v32bf
+foo_div_fast_math (v32bf a, v32bf b)
+{
+ return a / b;
+}
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_addne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_addne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_addne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_add_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_add_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_add_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_divne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_divne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_divne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_div_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_div_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_div_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_mulne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_mulne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_mulne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_mul_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_mul_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_mul_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
res_ref[i] = res_ref2[i] = convert_fp32_to_bf16_ne (res);
}
- res1.x = INTRINSIC (_subne_pbh) (src1.x, src2.x);
- res2.x = INTRINSIC (_mask_subne_pbh) (res2.x, mask, src1.x, src2.x);
- res3.x = INTRINSIC (_maskz_subne_pbh) (mask, src1.x, src2.x);
+ res1.x = INTRINSIC (_sub_pbh) (src1.x, src2.x);
+ res2.x = INTRINSIC (_mask_sub_pbh) (res2.x, mask, src1.x, src2.x);
+ res3.x = INTRINSIC (_maskz_sub_pbh) (mask, src1.x, src2.x);
if (UNION_CHECK (AVX512F_LEN, bf16_uw) (res1, res_ref))
abort ();
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-
-#include <immintrin.h>
-
-typedef __bf16 v16bf __attribute__ ((__vector_size__ (32)));
-typedef __bf16 v8bf __attribute__ ((__vector_size__ (16)));
-
-v16bf
-foo_mul_256 (v16bf a, v16bf b)
-{
- return a * b;
-}
-
-v16bf
-foo_add_256 (v16bf a, v16bf b)
-{
- return a + b;
-}
-
-v16bf
-foo_div_256 (v16bf a, v16bf b)
-{
- return a / b;
-}
-
-v16bf
-foo_sub_256 (v16bf a, v16bf b)
-{
- return a - b;
-}
-
-__attribute__((optimize("fast-math")))
-v16bf
-foo_div_fast_math_256 (v16bf a, v16bf b)
-{
- return a / b;
-}
-
-v8bf
-foo_mul_128 (v8bf a, v8bf b)
-{
- return a * b;
-}
-
-v8bf
-foo_add_128 (v8bf a, v8bf b)
-{
- return a + b;
-}
-
-v8bf
-foo_div_128 (v8bf a, v8bf b)
-{
- return a / b;
-}
-
-v8bf
-foo_sub_128 (v8bf a, v8bf b)
-{
- return a - b;
-}
-
-__attribute__((optimize("fast-math")))
-v8bf
-foo_div_fast_math_128 (v8bf a, v8bf b)
-{
- return a / b;
-}
/* { dg-do compile } */
/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmaxpbf16\[ \\t\]+%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vgetexppbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
void extern
avx10_2_test (void)
{
- res = _mm256_addne_pbh (x1, x2);
- res = _mm256_mask_addne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_addne_pbh (m16, x1, x2);
- res1 = _mm_addne_pbh (x3, x4);
- res1 = _mm_mask_addne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_addne_pbh (m8, x3, x4);
+ res = _mm256_add_pbh (x1, x2);
+ res = _mm256_mask_add_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_add_pbh (m16, x1, x2);
+ res1 = _mm_add_pbh (x3, x4);
+ res1 = _mm_mask_add_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_add_pbh (m8, x3, x4);
- res = _mm256_subne_pbh (x1, x2);
- res = _mm256_mask_subne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_subne_pbh (m16, x1, x2);
- res1 = _mm_subne_pbh (x3, x4);
- res1 = _mm_mask_subne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_subne_pbh (m8, x3, x4);
+ res = _mm256_sub_pbh (x1, x2);
+ res = _mm256_mask_sub_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_sub_pbh (m16, x1, x2);
+ res1 = _mm_sub_pbh (x3, x4);
+ res1 = _mm_mask_sub_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_sub_pbh (m8, x3, x4);
- res = _mm256_mulne_pbh (x1, x2);
- res = _mm256_mask_mulne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_mulne_pbh (m16, x1, x2);
- res1 = _mm_mulne_pbh (x3, x4);
- res1 = _mm_mask_mulne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_mulne_pbh (m8, x3, x4);
+ res = _mm256_mul_pbh (x1, x2);
+ res = _mm256_mask_mul_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_mul_pbh (m16, x1, x2);
+ res1 = _mm_mul_pbh (x3, x4);
+ res1 = _mm_mask_mul_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_mul_pbh (m8, x3, x4);
- res = _mm256_divne_pbh (x1, x2);
- res = _mm256_mask_divne_pbh (res, m16, x1, x2);
- res = _mm256_maskz_divne_pbh (m16, x1, x2);
- res1 = _mm_divne_pbh (x3, x4);
- res1 = _mm_mask_divne_pbh (res1, m8, x3, x4);
- res1 = _mm_maskz_divne_pbh (m8, x3, x4);
+ res = _mm256_div_pbh (x1, x2);
+ res = _mm256_mask_div_pbh (res, m16, x1, x2);
+ res = _mm256_maskz_div_pbh (m16, x1, x2);
+ res1 = _mm_div_pbh (x3, x4);
+ res1 = _mm_mask_div_pbh (res1, m8, x3, x4);
+ res1 = _mm_maskz_div_pbh (m8, x3, x4);
res = _mm256_max_pbh (x1, x2);
res = _mm256_mask_max_pbh (res, m16, x1, x2);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+typedef __bf16 v16bf __attribute__ ((__vector_size__ (32)));
+typedef __bf16 v8bf __attribute__ ((__vector_size__ (16)));
+
+v16bf
+foo_mul_256 (v16bf a, v16bf b)
+{
+ return a * b;
+}
+
+v16bf
+foo_add_256 (v16bf a, v16bf b)
+{
+ return a + b;
+}
+
+v16bf
+foo_div_256 (v16bf a, v16bf b)
+{
+ return a / b;
+}
+
+v16bf
+foo_sub_256 (v16bf a, v16bf b)
+{
+ return a - b;
+}
+
+__attribute__((optimize("fast-math")))
+v16bf
+foo_div_fast_math_256 (v16bf a, v16bf b)
+{
+ return a / b;
+}
+
+v8bf
+foo_mul_128 (v8bf a, v8bf b)
+{
+ return a * b;
+}
+
+v8bf
+foo_add_128 (v8bf a, v8bf b)
+{
+ return a + b;
+}
+
+v8bf
+foo_div_128 (v8bf a, v8bf b)
+{
+ return a / b;
+}
+
+v8bf
+foo_sub_128 (v8bf a, v8bf b)
+{
+ return a - b;
+}
+
+__attribute__((optimize("fast-math")))
+v8bf
+foo_div_fast_math_128 (v8bf a, v8bf b)
+{
+ return a / b;
+}
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vrcppbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vrcpbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
typedef __bf16 v4bf __attribute__ ((__vector_size__ (8)));
typedef __bf16 v2bf __attribute__ ((__vector_size__ (4)));
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-march=x86-64-v3 -mavx10.2 -O2" } */
-/* { dg-final { scan-assembler-times "vmulnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vaddnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vdivnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
-/* { dg-final { scan-assembler-times "vsubnepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vmulbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vaddbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vdivbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+/* { dg-final { scan-assembler-times "vsubbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
typedef __bf16 v4bf __attribute__ ((__vector_size__ (8)));
typedef __bf16 v2bf __attribute__ ((__vector_size__ (4)));
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vrcppbf16-2.c"
+#include "avx10_2-512-vaddbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vrcppbf16-2.c"
+#include "avx10_2-512-vaddbf16-2.c"
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vaddnepbf16-2.c"
+#include "avx10_2-512-vdivbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vaddnepbf16-2.c"
+#include "avx10_2-512-vdivbf16-2.c"
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vdivnepbf16-2.c"
+#include "avx10_2-512-vmulbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vdivnepbf16-2.c"
+#include "avx10_2-512-vmulbf16-2.c"
#define AVX512VL
#define AVX512F_LEN 256
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vmulnepbf16-2.c"
+#include "avx10_2-512-vrcpbf16-2.c"
#undef AVX512F_LEN
#undef AVX512F_LEN_HALF
#define AVX512F_LEN 128
#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vmulnepbf16-2.c"
+#include "avx10_2-512-vrcpbf16-2.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -march=x86-64-v3 -mavx10.2" } */
+/* { dg-require-effective-target avx10_2 } */
+
+#define AVX10_2
+#define AVX512VL
+#define AVX512F_LEN 256
+#define AVX512F_LEN_HALF 128
+#include "avx10_2-512-vsubbf16-2.c"
+
+#undef AVX512F_LEN
+#undef AVX512F_LEN_HALF
+
+#define AVX512F_LEN 128
+#define AVX512F_LEN_HALF 128
+#include "avx10_2-512-vsubbf16-2.c"
+++ /dev/null
-/* { dg-do run } */
-/* { dg-options "-O2 -march=x86-64-v3 -mavx10.2" } */
-/* { dg-require-effective-target avx10_2 } */
-
-#define AVX10_2
-#define AVX512VL
-#define AVX512F_LEN 256
-#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vsubnepbf16-2.c"
-
-#undef AVX512F_LEN
-#undef AVX512F_LEN_HALF
-
-#define AVX512F_LEN 128
-#define AVX512F_LEN_HALF 128
-#include "avx10_2-512-vsubnepbf16-2.c"
{
__asm__ volatile ("vdpphps\t%ymm4, %ymm5, %ymm6");
__asm__ volatile ("vcvthf82ph\t%xmm5, %ymm6");
- __asm__ volatile ("vaddnepbf16\t%ymm4, %ymm5, %ymm6");
+ __asm__ volatile ("vaddbf16\t%ymm4, %ymm5, %ymm6");
__asm__ volatile ("vcvtph2ibs\t%ymm5, %ymm6");
__asm__ volatile ("vminmaxpd\t$123, %ymm4, %ymm5, %ymm6");
}
{
__asm__ volatile ("vdpphps\t%zmm4, %zmm5, %zmm6");
__asm__ volatile ("vcvthf82ph\t%ymm5, %zmm6");
- __asm__ volatile ("vaddnepbf16\t%zmm4, %zmm5, %zmm6");
+ __asm__ volatile ("vaddbf16\t%zmm4, %zmm5, %zmm6");
__asm__ volatile ("vcvtph2ibs\t%zmm5, %zmm6");
__asm__ volatile ("vminmaxpd\t$123, %zmm4, %zmm5, %zmm6");
}