#endif /* __OPTIMIZE__ */
+/* Intrinsics vsqrtph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sqrt_ph (__m512h __A)
+{
+ return __builtin_ia32_vsqrtph_v32hf_mask_round (__A,
+ _mm512_setzero_ph(),
+ (__mmask32) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sqrt_ph (__m512h __A, __mmask32 __B, __m512h __C)
+{
+ return __builtin_ia32_vsqrtph_v32hf_mask_round (__C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sqrt_ph (__mmask32 __A, __m512h __B)
+{
+ return __builtin_ia32_vsqrtph_v32hf_mask_round (__B,
+ _mm512_setzero_ph (),
+ __A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sqrt_round_ph (__m512h __A, const int __B)
+{
+ return __builtin_ia32_vsqrtph_v32hf_mask_round (__A,
+ _mm512_setzero_ph(),
+ (__mmask32) -1, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sqrt_round_ph (__m512h __A, __mmask32 __B, __m512h __C,
+ const int __D)
+{
+ return __builtin_ia32_vsqrtph_v32hf_mask_round (__C, __A, __B, __D);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sqrt_round_ph (__mmask32 __A, __m512h __B, const int __C)
+{
+ return __builtin_ia32_vsqrtph_v32hf_mask_round (__B,
+ _mm512_setzero_ph (),
+ __A, __C);
+}
+
+#else
+#define _mm512_sqrt_round_ph(A, B) \
+ (__builtin_ia32_vsqrtph_v32hf_mask_round ((A), \
+ _mm512_setzero_ph (), \
+ (__mmask32)-1, (B)))
+
+#define _mm512_mask_sqrt_round_ph(A, B, C, D) \
+ (__builtin_ia32_vsqrtph_v32hf_mask_round ((C), (A), (B), (D)))
+
+#define _mm512_maskz_sqrt_round_ph(A, B, C) \
+ (__builtin_ia32_vsqrtph_v32hf_mask_round ((B), \
+ _mm512_setzero_ph (), \
+ (A), (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vrsqrtph. */
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_rsqrt_ph (__m512h __A)
+{
+ return __builtin_ia32_vrsqrtph_v32hf_mask (__A, _mm512_setzero_ph (),
+ (__mmask32) -1);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_rsqrt_ph (__m512h __A, __mmask32 __B, __m512h __C)
+{
+ return __builtin_ia32_vrsqrtph_v32hf_mask (__C, __A, __B);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_rsqrt_ph (__mmask32 __A, __m512h __B)
+{
+ return __builtin_ia32_vrsqrtph_v32hf_mask (__B, _mm512_setzero_ph (),
+ __A);
+}
+
+/* Intrinsics vrsqrtsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vrsqrtsh_v8hf_mask (__B, __A, _mm_setzero_ph (),
+ (__mmask8) -1);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vrsqrtsh_v8hf_mask (__D, __C, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vrsqrtsh_v8hf_mask (__C, __B, _mm_setzero_ph (),
+ __A);
+}
+
+/* Intrinsics vsqrtsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_sh (__m128h __A, __m128h __B)
+{
+ return __builtin_ia32_vsqrtsh_v8hf_mask_round (__B, __A,
+ _mm_setzero_ph (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vsqrtsh_v8hf_mask_round (__D, __C, __A, __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vsqrtsh_v8hf_mask_round (__C, __B,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vsqrtsh_v8hf_mask_round (__B, __A,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vsqrtsh_v8hf_mask_round (__D, __C, __A, __B,
+ __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vsqrtsh_v8hf_mask_round (__C, __B,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+
+#else
+#define _mm_sqrt_round_sh(A, B, C) \
+ (__builtin_ia32_vsqrtsh_v8hf_mask_round ((B), (A), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_sqrt_round_sh(A, B, C, D, E) \
+ (__builtin_ia32_vsqrtsh_v8hf_mask_round ((D), (C), (A), (B), (E)))
+
+#define _mm_maskz_sqrt_round_sh(A, B, C, D) \
+ (__builtin_ia32_vsqrtsh_v8hf_mask_round ((C), (B), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
#endif /* __OPTIMIZE__ */
+/* Intrinsics vsqrtph. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_ph (__m128h __A)
+{
+ return __builtin_ia32_vsqrtph_v8hf_mask (__A, _mm_setzero_ph (),
+ (__mmask8) -1);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_sqrt_ph (__m256h __A)
+{
+ return __builtin_ia32_vsqrtph_v16hf_mask (__A, _mm256_setzero_ph (),
+ (__mmask16) -1);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_ph (__m128h __A, __mmask8 __B, __m128h __C)
+{
+ return __builtin_ia32_vsqrtph_v8hf_mask (__C, __A, __B);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sqrt_ph (__m256h __A, __mmask16 __B, __m256h __C)
+{
+ return __builtin_ia32_vsqrtph_v16hf_mask (__C, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_ph (__mmask8 __A, __m128h __B)
+{
+ return __builtin_ia32_vsqrtph_v8hf_mask (__B, _mm_setzero_ph (),
+ __A);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sqrt_ph (__mmask16 __A, __m256h __B)
+{
+ return __builtin_ia32_vsqrtph_v16hf_mask (__B, _mm256_setzero_ph (),
+ __A);
+}
+
+/* Intrinsics vrsqrtph. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt_ph (__m128h __A)
+{
+ return __builtin_ia32_vrsqrtph_v8hf_mask (__A, _mm_setzero_ph (),
+ (__mmask8) -1);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rsqrt_ph (__m256h __A)
+{
+ return __builtin_ia32_vrsqrtph_v16hf_mask (__A, _mm256_setzero_ph (),
+ (__mmask16) -1);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt_ph (__m128h __A, __mmask8 __B, __m128h __C)
+{
+ return __builtin_ia32_vrsqrtph_v8hf_mask (__C, __A, __B);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rsqrt_ph (__m256h __A, __mmask16 __B, __m256h __C)
+{
+ return __builtin_ia32_vrsqrtph_v16hf_mask (__C, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt_ph (__mmask8 __A, __m128h __B)
+{
+ return __builtin_ia32_vrsqrtph_v8hf_mask (__B, _mm_setzero_ph (), __A);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rsqrt_ph (__mmask16 __A, __m256h __B)
+{
+ return __builtin_ia32_vrsqrtph_v16hf_mask (__B, _mm256_setzero_ph (),
+ __A);
+}
+
#ifdef __DISABLE_AVX512FP16VL__
#undef __DISABLE_AVX512FP16VL__
#pragma GCC pop_options
# FP16 builtins
DEF_FUNCTION_TYPE (V8HF, V8HI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF)
+DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, UQI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT)
DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI)
DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI, INT)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI, INT)
DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, UHI)
DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI)
DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI)
+DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, USI)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, INT)
DEF_FUNCTION_TYPE (USI, V32HF, V32HF, INT, USI)
+DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, USI, INT)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI)
DEF_FUNCTION_TYPE (USI, V32HF, V32HF, INT, USI, INT)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI, INT)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_cmpv8hf3_mask, "__builtin_ia32_vcmpph_v8hf_mask", IX86_BUILTIN_VCMPPH_V8HF_MASK, UNKNOWN, (int) UQI_FTYPE_V8HF_V8HF_INT_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_cmpv16hf3_mask, "__builtin_ia32_vcmpph_v16hf_mask", IX86_BUILTIN_VCMPPH_V16HF_MASK, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_cmpv32hf3_mask, "__builtin_ia32_vcmpph_v32hf_mask", IX86_BUILTIN_VCMPPH_V32HF_MASK, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_sqrtv8hf2_mask, "__builtin_ia32_vsqrtph_v8hf_mask", IX86_BUILTIN_VSQRTPH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_sqrtv16hf2_mask, "__builtin_ia32_vsqrtph_v16hf_mask", IX86_BUILTIN_VSQRTPH_V16HF_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rsqrtv8hf2_mask, "__builtin_ia32_vrsqrtph_v8hf_mask", IX86_BUILTIN_VRSQRTPH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rsqrtv16hf2_mask, "__builtin_ia32_vrsqrtph_v16hf_mask", IX86_BUILTIN_VRSQRTPH_V16HF_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_UHI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_rsqrtv32hf2_mask, "__builtin_ia32_vrsqrtph_v32hf_mask", IX86_BUILTIN_VRSQRTPH_V32HF_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmrsqrtv8hf2_mask, "__builtin_ia32_vrsqrtsh_v8hf_mask", IX86_BUILTIN_VRSQRTSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
/* Builtins with rounding support. */
BDESC_END (ARGS, ROUND_ARGS)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsminv8hf3_mask_round, "__builtin_ia32_vminsh_v8hf_mask_round", IX86_BUILTIN_VMINSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_cmpv32hf3_mask_round, "__builtin_ia32_vcmpph_v32hf_mask_round", IX86_BUILTIN_VCMPPH_V32HF_MASK_ROUND, UNKNOWN, (int) USI_FTYPE_V32HF_V32HF_INT_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmcmpv8hf3_mask_round, "__builtin_ia32_vcmpsh_v8hf_mask_round", IX86_BUILTIN_VCMPSH_V8HF_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8HF_V8HF_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_sqrtv32hf2_mask_round, "__builtin_ia32_vsqrtph_v32hf_mask_round", IX86_BUILTIN_VSQRTPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsqrtv8hf2_mask_round, "__builtin_ia32_vsqrtsh_v8hf_mask_round", IX86_BUILTIN_VSQRTSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
case V16HI_FTYPE_V16SI_V16HI_UHI:
case V16QI_FTYPE_V16SI_V16QI_UHI:
case V16QI_FTYPE_V8DI_V16QI_UQI:
+ case V32HF_FTYPE_V32HF_V32HF_USI:
case V16SF_FTYPE_V16SF_V16SF_UHI:
case V16SF_FTYPE_V4SF_V16SF_UHI:
case V16SI_FTYPE_SI_V16SI_UHI:
case V16HI_FTYPE_HI_V16HI_UHI:
case V8HI_FTYPE_V8HI_V8HI_UQI:
case V8HI_FTYPE_HI_V8HI_UQI:
+ case V16HF_FTYPE_V16HF_V16HF_UHI:
case V8SF_FTYPE_V8HI_V8SF_UQI:
case V4SF_FTYPE_V8HI_V4SF_UQI:
case V8SI_FTYPE_V8SF_V8SI_UQI:
case V4SI_FTYPE_V4SF_V4SI_UQI:
case V4DI_FTYPE_V4SF_V4DI_UQI:
case V2DI_FTYPE_V4SF_V2DI_UQI:
+ case V8HF_FTYPE_V8HF_V8HF_UQI:
case V4SF_FTYPE_V4DI_V4SF_UQI:
case V4SF_FTYPE_V2DI_V4SF_UQI:
case V4DF_FTYPE_V4DI_V4DF_UQI:
case V8DI_FTYPE_V8DF_V8DI_QI_INT:
case V8SF_FTYPE_V8DI_V8SF_QI_INT:
case V8DF_FTYPE_V8DI_V8DF_QI_INT:
+ case V32HF_FTYPE_V32HF_V32HF_USI_INT:
case V16SF_FTYPE_V16SF_V16SF_HI_INT:
case V8DI_FTYPE_V8SF_V8DI_QI_INT:
case V16SF_FTYPE_V16SI_V16SF_HI_INT:
(define_mode_iterator VF_AVX512FP16
[V32HF V16HF V8HF])
+(define_mode_iterator VF_AVX512FP16VL
+ [V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")])
+
;; All vector integer modes
(define_mode_iterator VI
[(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
(set_attr "mode" "<MODE>")])
(define_expand "sqrt<mode>2"
- [(set (match_operand:VF2 0 "register_operand")
- (sqrt:VF2 (match_operand:VF2 1 "vector_operand")))]
+ [(set (match_operand:VF2H 0 "register_operand")
+ (sqrt:VF2H (match_operand:VF2H 1 "vector_operand")))]
"TARGET_SSE2")
(define_expand "sqrt<mode>2"
})
(define_insn "<sse>_sqrt<mode>2<mask_name><round_name>"
- [(set (match_operand:VF 0 "register_operand" "=x,v")
- (sqrt:VF (match_operand:VF 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
+ [(set (match_operand:VFH 0 "register_operand" "=x,v")
+ (sqrt:VFH (match_operand:VFH 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
"TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
"@
sqrt<ssemodesuffix>\t{%1, %0|%0, %1}
(set_attr "mode" "<MODE>")])
(define_insn "<sse>_vmsqrt<mode>2<mask_scalar_name><round_scalar_name>"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (sqrt:VF_128
- (match_operand:VF_128 1 "nonimmediate_operand" "xm,<round_scalar_constraint>"))
- (match_operand:VF_128 2 "register_operand" "0,v")
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (sqrt:VFH_128
+ (match_operand:VFH_128 1 "nonimmediate_operand" "xm,<round_scalar_constraint>"))
+ (match_operand:VFH_128 2 "register_operand" "0,v")
(const_int 1)))]
"TARGET_SSE"
"@
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
+(define_insn "<sse>_rsqrt<mode>2<mask_name>"
+ [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=v")
+ (unspec:VF_AVX512FP16VL
+ [(match_operand:VF_AVX512FP16VL 1 "vector_operand" "vBm")] UNSPEC_RSQRT))]
+ "TARGET_AVX512FP16"
+ "vrsqrtph\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "<MODE>")])
+
(define_insn "<mask_codefor>rsqrt14<mode><mask_name>"
[(set (match_operand:VF_AVX512VL 0 "register_operand" "=v")
(unspec:VF_AVX512VL
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
+(define_insn "avx512fp16_vmrsqrtv8hf2<mask_scalar_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=v")
+ (vec_merge:V8HF
+ (unspec:V8HF [(match_operand:V8HF 1 "nonimmediate_operand" "vm")]
+ UNSPEC_RSQRT)
+ (match_operand:V8HF 2 "register_operand" "v")
+ (const_int 1)))]
+ "TARGET_AVX512FP16"
+ "vrsqrtsh\t{%1, %2, %0<mask_scalar_operand3>|%0<mask_scalar_operand3>, %2, %w1}"
+ [(set_attr "type" "sse")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "HF")])
+
(define_expand "cond_<code><mode>"
[(set (match_operand:VF 0 "register_operand")
(vec_merge:VF
#define __builtin_ia32_vcmpph_v32hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v32hf_mask(A, B, 1, D)
#define __builtin_ia32_vcmpph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpph_v32hf_mask_round(A, B, 1, D, 8)
#define __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_vsqrtph_v32hf_mask_round(C, A, B, D) __builtin_ia32_vsqrtph_v32hf_mask_round(C, A, B, 8)
+#define __builtin_ia32_vsqrtsh_v8hf_mask_round(D, C, A, B, E) __builtin_ia32_vsqrtsh_v8hf_mask_round(D, C, A, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
#define __builtin_ia32_vcmpph_v32hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v32hf_mask(A, B, 1, D)
#define __builtin_ia32_vcmpph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpph_v32hf_mask_round(A, B, 1, D, 8)
#define __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_vsqrtph_v32hf_mask_round(C, A, B, D) __builtin_ia32_vsqrtph_v32hf_mask_round(C, A, B, 8)
+#define __builtin_ia32_vsqrtsh_v8hf_mask_round(D, C, A, B, E) __builtin_ia32_vsqrtsh_v8hf_mask_round(D, C, A, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
test_3 (_mm512_mask_rsqrt28_round_ps, __m512, __m512, __mmask16, __m512, 8)
/* avx512fp16intrin.h */
+test_1 (_mm512_sqrt_round_ph, __m512h, __m512h, 8)
test_2 (_mm512_add_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_sub_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_mul_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm_min_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm512_cmp_ph_mask, __mmask32, __m512h, __m512h, 1)
test_2 (_mm_comi_sh, int, __m128h, __m128h, 1)
+test_2 (_mm512_maskz_sqrt_round_ph, __m512h, __mmask32, __m512h, 8)
+test_2 (_mm_sqrt_round_sh, __m128h, __m128h, __m128h, 8)
test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
test_3 (_mm_maskz_max_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_min_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm512_mask_cmp_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1)
+test_3 (_mm512_mask_sqrt_round_ph, __m512h, __m512h, __mmask32, __m512h, 8)
+test_3 (_mm_maskz_sqrt_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_min_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm_mask_max_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_min_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_sqrt_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
/* avx512fp16vlintrin.h */
test_2 (_mm_cmp_ph_mask, __mmask8, __m128h, __m128h, 1)
test_2 (_mm_rsqrt28_round_ss, __m128, __m128, __m128, 8)
/* avx512fp16intrin.h */
+test_1 (_mm512_sqrt_round_ph, __m512h, __m512h, 8)
test_2 (_mm512_add_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_sub_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_mul_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm_min_round_sh, __m128h, __m128h, __m128h, 8)
test_2 (_mm512_cmp_ph_mask, __mmask32, __m512h, __m512h, 1)
test_2 (_mm_comi_sh, int, __m128h, __m128h, 1)
+test_2 (_mm512_maskz_sqrt_round_ph, __m512h, __mmask32, __m512h, 8)
+test_2 (_mm_sqrt_round_sh, __m128h, __m128h, __m128h, 8)
test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
test_3 (_mm_maskz_max_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm_maskz_min_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3 (_mm512_mask_cmp_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1)
+test_3 (_mm512_mask_sqrt_round_ph, __m512h, __m512h, __mmask32, __m512h, 8)
+test_3 (_mm_maskz_sqrt_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_min_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm_mask_max_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm_mask_min_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_sqrt_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
/* avx512fp16vlintrin.h */
test_2 (_mm_cmp_ph_mask, __mmask8, __m128h, __m128h, 1)
#define __builtin_ia32_vcmpph_v32hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v32hf_mask(A, B, 1, D)
#define __builtin_ia32_vcmpph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpph_v32hf_mask_round(A, B, 1, D, 8)
#define __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vcmpsh_v8hf_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_vsqrtph_v32hf_mask_round(C, A, B, D) __builtin_ia32_vsqrtph_v32hf_mask_round(C, A, B, 8)
+#define __builtin_ia32_vsqrtsh_v8hf_mask_round(D, C, A, B, E) __builtin_ia32_vsqrtsh_v8hf_mask_round(D, C, A, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)