cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
sm3intrin.h sha512intrin.h sm4intrin.h
- usermsrintrin.h"
+ usermsrintrin.h avx10_2roundingintrin.h"
;;
ia64-*-*)
extra_headers=ia64intrin.h
--- /dev/null
+/* Copyright (C) 2024 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx10_2roundingintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED
+#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED
+
+#ifndef __AVX10_2_256__
+#pragma GCC push_options
+#pragma GCC target("avx10.2-256")
+#define __DISABLE_AVX10_2_256__
+#endif /* __AVX10_2_256__ */
+
+#ifdef __OPTIMIZE__
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R)
+{
+ return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_undefined_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B, const int __R)
+{
+ return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+ const int __R)
+{
+ return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R)
+{
+ return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_undefined_ph (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
+ __m256h __B, const int __R)
+{
+ return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+ const int __R)
+{
+ return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_setzero_ph (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R)
+{
+ return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_undefined_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C,
+ const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ __C,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B,
+ const int __C, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C,
+ const int __R)
+{
+ return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ __C,
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B,
+ const int __C, const int __R)
+{
+ return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ __C,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ __C,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B,
+ const int __C, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+#else
+#define _mm256_add_round_pd(A, B, R) \
+ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) \
+ (_mm256_undefined_pd ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_add_round_pd(W, U, A, B, R) \
+ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_add_round_pd(U, A, B, R) \
+ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) \
+ (_mm256_setzero_pd ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_add_round_ph(A, B, R) \
+ ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_undefined_ph ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_add_round_ph(W, U, A, B, R) \
+ ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_add_round_ph(U, A, B, R) \
+ ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_setzero_ph ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_add_round_ps(A, B, R) \
+ ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) \
+ (_mm256_undefined_ps ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_add_round_ps(W, U, A, B, R) \
+ ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_add_round_ps(U, A, B, R)\
+ ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) \
+ (_mm256_setzero_ps ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cmp_round_pd_mask(A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (C), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cmp_round_ph_mask(A, B, C, R) \
+ ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (C), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \
+ ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (C), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_cmp_round_ps_mask(A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (C), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+#endif
+
+#ifdef __DISABLE_AVX10_2_256__
+#undef __DISABLE_AVX10_2_256__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX10_2_256__ */
+
+#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */
# USER_MSR builtins
DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
+
+# AVX10.2 builtins
+DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT)
+DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT)
+DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT)
+DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT)
+DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "__builtin_ia32_vfmulcsh_round", IX86_BUILTIN_VFMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+/* AVX10.2. */
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT)
+
BDESC_END (ROUND_ARGS, MULTI_ARG)
/* FMA4 and XOP. */
case INT_FTYPE_V4SF_V4SF_INT_INT:
case INT_FTYPE_V2DF_V2DF_INT_INT:
return ix86_expand_sse_comi_round (d, exp, target);
+ case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT:
case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT:
case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT:
+ case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
+ case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
nargs = 5;
break;
case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT:
+ case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT:
case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
+ case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT:
+ case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT:
case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
#include <amxfp16intrin.h>
+#include <avx10_2roundingintrin.h>
+
#endif /* _IMMINTRIN_H_INCLUDED */
(plusminus:VFH
(match_operand:VFH 1 "<round_nimm_predicate>")
(match_operand:VFH 2 "<round_nimm_predicate>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
"ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
(define_insn "*<insn><mode>3<mask_name><round_name>"
(match_operand:VFH 1 "<bcst_round_nimm_predicate>" "<comm>0,v")
(match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
"TARGET_SSE && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)
- && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ && <mask_mode512bit_condition> && <round_mode_condition>"
"@
<plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
v<plusminus_mnemonic><ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
(mult:VFH
(match_operand:VFH 1 "<round_nimm_predicate>")
(match_operand:VFH 2 "<round_nimm_predicate>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
"ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);")
(define_insn "*mul<mode>3<mask_name><round_name>"
(match_operand:VFH 1 "<bcst_round_nimm_predicate>" "%0,v")
(match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
"TARGET_SSE && ix86_binary_operator_ok (MULT, <MODE>mode, operands)
- && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ && <mask_mode512bit_condition> && <round_mode_condition>"
"@
mul<ssemodesuffix>\t{%2, %0|%0, %2}
vmul<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
(div:VFH
(match_operand:VFH 1 "register_operand" "0,v")
(match_operand:VFH 2 "<bcst_round_nimm_predicate>" "xBm,<bcst_round_constraint>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
"@
div<ssemodesuffix>\t{%2, %0|%0, %2}
vdiv<ssemodesuffix>\t{<round_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_mask_op3>}"
(define_insn "<sse>_sqrt<mode>2<mask_name><round_name>"
[(set (match_operand:VFH 0 "register_operand" "=x,v")
(sqrt:VFH (match_operand:VFH 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
- "TARGET_SSE && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE && <mask_mode512bit_condition> && <round_mode_condition>"
"@
sqrt<ssemodesuffix>\t{%1, %0|%0, %1}
vsqrt<ssemodesuffix>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
(match_operand:VFH 1 "<round_saeonly_nimm_predicate>")
(match_operand:VFH 2 "<round_saeonly_nimm_predicate>")))]
"TARGET_SSE && <mask_mode512bit_condition>
- && <round_saeonly_mode512bit_condition>"
+ && <round_saeonly_mode_condition>"
{
if (!flag_finite_math_only || flag_signed_zeros)
{
"TARGET_SSE
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))
&& <mask_mode512bit_condition>
- && <round_saeonly_mode512bit_condition>"
+ && <round_saeonly_mode_condition>"
"@
<maxmin_float><ssemodesuffix>\t{%2, %0|%0, %2}
v<maxmin_float><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
IEEE_MAXMIN))]
"TARGET_SSE
&& <mask_mode512bit_condition>
- && <round_saeonly_mode512bit_condition>"
+ && <round_saeonly_mode_condition>"
"@
<ieee_maxmin><ssemodesuffix>\t{%2, %0|%0, %2}
v<ieee_maxmin><ssemodesuffix>\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2<round_saeonly_mask_op3>}"
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "<round_saeonly_constraint>")
(match_operand:SI 3 "<cmp_imm_predicate>" "n")]
UNSPEC_PCMP))]
- "TARGET_AVX512F && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX512F && <round_saeonly_mode_condition>"
"v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, <round_saeonly_mask_scalar_merge_op4>%2, %1, %0<mask_scalar_merge_operand4>|%0<mask_scalar_merge_operand4>, %1, %2<round_saeonly_mask_scalar_merge_op4>, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
{
emit_insn (gen_fma_fmadd_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v")
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
"@
vfmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
"@
vfmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
{
emit_insn (gen_fma_fmsub_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
"@
vfmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
(match_operand:VFH_AVX512VL 3 "register_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
"vfmsub231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
{
emit_insn (gen_fma_fnmadd_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:VFH_SF_AVX512VL 1 "<bcst_round_nimm_predicate>" "%0,0,v"))
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0")))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
"@
vfnmadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfnmadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
"@
vfnmadd132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfnmadd213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
(match_operand:VFH_AVX512VL 3 "register_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
"vfnmadd231<ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
(match_operand:VFH_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VFH_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
{
emit_insn (gen_fma_fnmsub_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:VFH_SF_AVX512VL 2 "<bcst_round_nimm_predicate>" "<bcst_round_constraint>,v,<bcst_round_constraint>")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "<bcst_round_nimm_predicate>" "v,<bcst_round_constraint>,0"))))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
"@
vfnmsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfnmsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <round_mode_condition>"
"@
vfnmsub132<ssemodesuffix>\t{<round_op5>%2, %3, %0%{%4%}|%0%{%4%}, %3, %2<round_op5>}
vfnmsub213<ssemodesuffix>\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
(match_operand:VFH_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>,v,<round_constraint>")
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0")]
UNSPEC_FMADDSUB))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
"@
vfmaddsub132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmaddsub213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
(neg:VFH_AVX512VL
(match_operand:VFH_AVX512VL 3 "<round_nimm_predicate>" "v,<round_constraint>,0"))]
UNSPEC_FMADDSUB))]
- "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512F && <sd_mask_mode512bit_condition> && <round_mode_condition>"
"@
vfmsubadd132<ssemodesuffix>\t{<round_sd_mask_op4>%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2<round_sd_mask_op4>}
vfmsubadd213<ssemodesuffix>\t{<round_sd_mask_op4>%3, %2, %0<sd_mask_op4>|%0<sd_mask_op4>, %2, %3<round_sd_mask_op4>}
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
rtx op0, op1, dest;
if (<round_embedded_complex>)
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
emit_insn (gen_fma_fmaddc_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
rtx op0, op1, dest;
if (<round_embedded_complex>)
(match_operand:VHF_AVX512VL 2 "<round_expand_nimm_predicate>")
(match_operand:VHF_AVX512VL 3 "<round_expand_nimm_predicate>")
(match_operand:<avx512fmaskcmode> 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
emit_insn (gen_fma_fcmaddc_<mode>_maskz_1<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")
(match_operand:VHF_AVX512VL 3 "<round_nimm_predicate>" "0")]
UNSPEC_COMPLEX_F_C_MA))]
- "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <sdc_mask_mode512bit_condition> && <round_mode_condition>"
"v<complexopname><ssemodesuffix>\t{<round_sdc_mask_op4>%2, %1, %0<sdc_mask_op4>|%0<sdc_mask_op4>, %1, %2<round_sdc_mask_op4>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
(unspec:<avx512fmaskmode>
[(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
UNSPEC_COMPLEX_MASK)))]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
"v<complexopname><ssemodesuffix>\t{<round_op5>%2, %1, %0%{%4%}|%0%{%4%}, %1, %2<round_op5>}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
[(match_operand:VHF_AVX512VL 1 "<round_nimm_predicate>" "<int_comm>v")
(match_operand:VHF_AVX512VL 2 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_COMPLEX_F_C_MUL))]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
if (TARGET_DEST_FALSE_DEP_FOR_GLC
&& <maskc_dest_false_dep_for_glc_cond>)
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
rtx op0, op1, dest;
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
operands[0], operands[1], operands[2], operands[3],
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
rtx op0, op1, dest;
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
rtx dest, op0, op1;
(match_operand:V8HF 2 "<round_expand_nimm_predicate>")
(match_operand:V8HF 3 "<round_expand_nimm_predicate>")
(match_operand:QI 4 "register_operand")]
- "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+ "TARGET_AVX512FP16 && <round_mode_condition>"
{
rtx dest, op0, op1;
[(set (match_operand:VF1 0 "register_operand" "=x,v")
(float:VF1
(match_operand:<sseintvecmode> 1 "<round_nimm_predicate>" "xBm,<round_constraint>")))]
- "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode512bit_condition>"
+ "TARGET_SSE2 && <mask_mode512bit_condition> && <round_mode_condition>"
"@
cvtdq2ps\t{%1, %0|%0, %1}
vcvtdq2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_mode_condition>"
"vcvtps2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(unspec:VI8_256_512 [(match_operand:<ssePSmode2> 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_mode_condition>"
"vcvtps2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
[(set (match_operand:<ssePSmode2> 0 "register_operand" "=v")
(any_float:<ssePSmode2>
(match_operand:VI8_256_512 1 "nonimmediate_operand" "<round_constraint>")))]
- "TARGET_AVX512DQ && <round_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_mode_condition>"
"vcvt<floatsuffix>qq2ps<qq2pssuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(unspec:<sseintvecmode>
[(match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
"vcvttpd2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(any_fix:<sseintvecmode>
(match_operand:VF2_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
"vcvttpd2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(unspec:<sseintvecmode>
[(match_operand:VF2_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_mode_condition>"
"vcvtpd2qq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(unspec:<sseintvecmode>
[(match_operand:VF2_AVX512VL 1 "nonimmediate_operand" "<round_constraint>")]
UNSPEC_UNSIGNED_FIX_NOTRUNC))]
- "TARGET_AVX512DQ && <round_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_mode_condition>"
"vcvtpd2uqq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(unspec:VI8_256_512
[(match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
"vcvttps2<vcvtt_suffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
[(set (match_operand:VI8_256_512 0 "register_operand" "=v")
(any_fix:VI8_256_512
(match_operand:<ssePSmode2> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
"vcvttps2<fixsuffix>qq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
[(set (match_operand:VF2_512_256 0 "register_operand" "=v")
(float_extend:VF2_512_256
(match_operand:<sf2dfmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX && <mask_mode512bit_condition> && <round_saeonly_mode_condition>"
"vcvtps2pd\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_vex")
(match_operand:VF_AVX512VL 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 3 "const_0_to_15_operand")]
UNSPEC_RANGE))]
- "TARGET_AVX512DQ && <round_saeonly_mode512bit_condition>"
+ "TARGET_AVX512DQ && <round_saeonly_mode_condition>"
{
if (TARGET_DEST_FALSE_DEP_FOR_GLC
&& <mask4_dest_false_dep_for_glc_cond>
(define_subst_attr "bcst_round_nimm_predicate" "round" "bcst_vector_operand" "register_operand")
(define_subst_attr "round_nimm_scalar_predicate" "round" "nonimmediate_operand" "register_operand")
(define_subst_attr "round_prefix" "round" "vex" "evex")
-(define_subst_attr "round_mode512bit_condition" "round" "1" "(<MODE>mode == V16SFmode
- || <MODE>mode == V8DFmode
- || <MODE>mode == V8DImode
- || <MODE>mode == V16SImode
- || <MODE>mode == V32HFmode)")
+(define_subst_attr "round_mode_condition" "round" "1" "((<MODE>mode == V16SFmode
+ || <MODE>mode == V8DFmode
+ || <MODE>mode == V8DImode
+ || <MODE>mode == V16SImode
+ || <MODE>mode == V32HFmode)
+ || (TARGET_AVX10_2_256
+ && (<MODE>mode == V8SFmode
+ || <MODE>mode == V4DFmode
+ || <MODE>mode == V4DImode
+ || <MODE>mode == V8SImode
+ || <MODE>mode == V16HFmode)))")
(define_subst_attr "round_modev4sf_condition" "round" "1" "(<MODE>mode == V4SFmode)")
(define_subst_attr "round_codefor" "round" "*" "")
(define_subst_attr "round_saeonly_constraint2" "round_saeonly" "m" "v")
(define_subst_attr "round_saeonly_nimm_predicate" "round_saeonly" "vector_operand" "register_operand")
(define_subst_attr "round_saeonly_nimm_scalar_predicate" "round_saeonly" "nonimmediate_operand" "register_operand")
-(define_subst_attr "round_saeonly_mode512bit_condition" "round_saeonly" "1" "(<MODE>mode == V16SFmode
- || <MODE>mode == V8DFmode
- || <MODE>mode == V8DImode
- || <MODE>mode == V16SImode
- || <MODE>mode == V32HFmode)")
+(define_subst_attr "round_saeonly_mode_condition" "round_saeonly" "1" "((<MODE>mode == V16SFmode
+ || <MODE>mode == V8DFmode
+ || <MODE>mode == V8DImode
+ || <MODE>mode == V16SImode
+ || <MODE>mode == V32HFmode)
+ || (TARGET_AVX10_2_256
+ && (<MODE>mode == V8SFmode
+ || <MODE>mode == V4DFmode
+ || <MODE>mode == V4DImode
+ || <MODE>mode == V8SImode
+ || <MODE>mode == V16HFmode)))")
(define_subst "round_saeonly"
/* { dg-do compile } */
-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mavx512bw -mavx512fp16 -mavx512vl -mprefetchi" } */
+/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mprefetchi -mavx10.2-512" } */
/* { dg-add-options bind_pic_locally } */
#include <mm_malloc.h>
/* sm3intrin.h */
#define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
+/* avx10_2roundingintrin.h */
+#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
+
#include <wmmintrin.h>
#include <immintrin.h>
#include <mm3dnow.h>
/* { dg-do compile } */
-/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -msse4a -maes -mpclmul -mavx512bw -mavx512fp16 -mavx512vl" } */
+/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -msse4a -maes -mpclmul -mavx10.2-512" } */
/* { dg-add-options bind_pic_locally } */
#include <mm_malloc.h>
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx10.2" } */
+/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\\\$3\[^\n\r]*\{sae\}\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\[^\{\n\]*\\\$4\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+volatile __m256 x;
+volatile __m256d xd;
+volatile __m256h xh;
+volatile __mmask8 m8;
+volatile __mmask16 m16;
+volatile __mmask32 m32;
+
+void extern
+avx10_2_test_1 (void)
+{
+ xd = _mm256_add_round_pd (xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ xd = _mm256_mask_add_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ xd = _mm256_maskz_add_round_pd (m8, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ xh = _mm256_add_round_ph (xh, xh, 8);
+ xh = _mm256_mask_add_round_ph (xh, m32, xh, xh, 8);
+ xh = _mm256_maskz_add_round_ph (m32, xh, xh, 11);
+
+ x = _mm256_add_round_ps (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ x = _mm256_mask_add_round_ps (x, m16, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ x = _mm256_maskz_add_round_ps (m16, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+
+ m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+ m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+
+ m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8);
+ m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4);
+
+ m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+ m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+}
+
+void extern
+avx10_2_test_2 (void)
+{
+ m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+ m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+
+ m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8);
+ m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4);
+
+ m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+ m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC);
+}
/* sm3intrin.h */
#define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
+/* avx10_2roundingintrin.h */
+#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
+
#include <x86intrin.h>
/* sm3intrin.h */
test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1)
+
+/* avx10_2roundingintrin.h */
+test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
+test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
+test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
+test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
+test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
+test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8)
+test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
+test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
+test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
+test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
+test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
/* sm3intrin.h */
test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1)
+
+/* avx10_2roundingintrin.h */
+test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
+test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
+test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
+test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
+test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
+test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8)
+test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9)
+test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
+test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
+test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)
+test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9)
+test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8)
+test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
/* sm3intrin.h */
#define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1)
+/* avx10_2roundingintrin.h */
+#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8)
+#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8)
+
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")
#include <x86intrin.h>