int amx_usable = 0;
/* Check if KL is usable. */
int has_kl = 0;
- /* Record AVX10 version. */
- int avx10_set = 0;
- int version = 0;
if ((ecx & bit_OSXSAVE))
{
/* Check if XMM, YMM, OPMASK, upper 256 bits of ZMM0-ZMM15 and
{
if (eax & bit_AVX512BF16)
set_feature (FEATURE_AVX512BF16);
- /* AVX10 has the same XSTATE with AVX512. */
- if (edx & bit_AVX10)
- avx10_set = 1;
}
if (amx_usable)
{
}
}
- /* Get Advanced Features at level 0x24 (eax = 0x24). */
- if (avx10_set && max_cpuid_level >= 0x24)
- {
- __cpuid (0x18, eax, ebx, ecx, edx);
- version = ebx & 0xff;
- if (ebx & bit_AVX10_256)
- switch (version)
- {
- case 1:
- set_feature (FEATURE_AVX10_1);
- break;
- default:
- gcc_unreachable ();
- }
- if (ebx & bit_AVX10_512)
- set_feature (FEATURE_AVX10_512BIT);
- }
-
/* Check cpuid level of extended features. */
__cpuid (0x80000000, ext_level, ebx, ecx, edx);
}
}
-#define SET_AVX10_512(A,B) \
- if (has_cpu_feature (cpu_model, cpu_features2, FEATURE_AVX10_##A)) \
- { \
- CHECK___builtin_cpu_supports (B); \
- set_cpu_feature (cpu_model, cpu_features2, FEATURE_AVX10_##A##_512); \
- }
-
- if (has_cpu_feature (cpu_model, cpu_features2, FEATURE_AVX10_512BIT))
- SET_AVX10_512 (1, "avx10.1-512");
-
-#undef SET_AVX10_512
-
gcc_assert (cpu_model->__cpu_vendor < VENDOR_MAX);
gcc_assert (cpu_model->__cpu_type < CPU_TYPE_MAX);
gcc_assert (cpu_model->__cpu_subtype < CPU_SUBTYPE_MAX);
#define OPTION_MASK_ISA2_SM3_SET OPTION_MASK_ISA2_SM3
#define OPTION_MASK_ISA2_SHA512_SET OPTION_MASK_ISA2_SHA512
#define OPTION_MASK_ISA2_SM4_SET OPTION_MASK_ISA2_SM4
-#define OPTION_MASK_ISA2_AVX10_512BIT_SET OPTION_MASK_ISA2_AVX10_512BIT
-#define OPTION_MASK_ISA2_AVX10_1_SET OPTION_MASK_ISA2_AVX10_1
/* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
as -msse4.2. */
#define OPTION_MASK_ISA2_AVX2_UNSET \
(OPTION_MASK_ISA2_AVXIFMA_UNSET | OPTION_MASK_ISA2_AVXVNNI_UNSET \
| OPTION_MASK_ISA2_AVXVNNIINT8_UNSET | OPTION_MASK_ISA2_AVXNECONVERT_UNSET \
- | OPTION_MASK_ISA2_AVXVNNIINT16_UNSET | OPTION_MASK_ISA2_AVX512F_UNSET \
- | OPTION_MASK_ISA2_AVX10_1_UNSET)
+ | OPTION_MASK_ISA2_AVXVNNIINT16_UNSET | OPTION_MASK_ISA2_AVX512F_UNSET)
#define OPTION_MASK_ISA_AVX512F_UNSET \
(OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_AVX512CD_UNSET \
| OPTION_MASK_ISA_AVX512PF_UNSET | OPTION_MASK_ISA_AVX512ER_UNSET \
#define OPTION_MASK_ISA2_SM3_UNSET OPTION_MASK_ISA2_SM3
#define OPTION_MASK_ISA2_SHA512_UNSET OPTION_MASK_ISA2_SHA512
#define OPTION_MASK_ISA2_SM4_UNSET OPTION_MASK_ISA2_SM4
-#define OPTION_MASK_ISA2_AVX10_512BIT_UNSET OPTION_MASK_ISA2_AVX10_512BIT
-#define OPTION_MASK_ISA2_AVX10_1_UNSET OPTION_MASK_ISA2_AVX10_1
/* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
as -mno-sse4.1. */
}
return true;
- case OPT_mavx10_max_512bit:
- if (value)
- {
- opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AVX10_512BIT_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_512BIT_SET;
- }
- else
- {
- opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AVX10_512BIT_UNSET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_512BIT_UNSET;
- }
- return true;
-
- case OPT_mavx10_1:
- if (value)
- {
- opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AVX10_1_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_1_SET;
- opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX2_SET;
- opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX2_SET;
- }
- else
- {
- opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AVX10_1_UNSET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_1_UNSET;
- }
- return true;
-
- case OPT_mavx10_1_256:
- opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AVX10_1_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_1_SET;
- opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AVX10_512BIT_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_512BIT_SET;
- opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX2_SET;
- opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX2_SET;
- return true;
-
- case OPT_mavx10_1_512:
- opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AVX10_1_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_1_SET;
- opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AVX10_512BIT_SET;
- opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AVX10_512BIT_SET;
- opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX2_SET;
- opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX2_SET;
- return true;
-
case OPT_mfma:
if (value)
{
FEATURE_SM3,
FEATURE_SHA512,
FEATURE_SM4,
- FEATURE_AVX10_512BIT,
- FEATURE_AVX10_1,
- FEATURE_AVX10_1_512,
CPU_FEATURE_MAX
};
ISA_NAMES_TABLE_ENTRY("sm3", FEATURE_SM3, P_NONE, "-msm3")
ISA_NAMES_TABLE_ENTRY("sha512", FEATURE_SHA512, P_NONE, "-msha512")
ISA_NAMES_TABLE_ENTRY("sm4", FEATURE_SM4, P_NONE, "-msm4")
- ISA_NAMES_TABLE_ENTRY("avx10-max-512bit", FEATURE_AVX10_512BIT,
- P_NONE, "-mavx10-max-512bit")
- ISA_NAMES_TABLE_ENTRY("avx10.1", FEATURE_AVX10_1, P_NONE, "-mavx10.1")
- ISA_NAMES_TABLE_ENTRY("avx10.1-256", FEATURE_AVX10_1, P_NONE, NULL)
- ISA_NAMES_TABLE_ENTRY("avx10.1-512", FEATURE_AVX10_1_512, P_NONE, NULL)
ISA_NAMES_TABLE_END
"TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 ? FP_SECOND_REG : NO_REGS"
"Second from top of 80387 floating-point stack (@code{%st(1)}).")
-(define_register_constraint "Yk" "(TARGET_AVX512F || TARGET_AVX10_1) ? MASK_REGS : NO_REGS"
+(define_register_constraint "Yk" "TARGET_AVX512F ? MASK_REGS : NO_REGS"
"@internal Any mask register that can be used as predicate, i.e. k1-k7.")
-(define_register_constraint "k" "(TARGET_AVX512F || TARGET_AVX10_1) ? ALL_MASK_REGS : NO_REGS"
+(define_register_constraint "k" "TARGET_AVX512F ? ALL_MASK_REGS : NO_REGS"
"@internal Any mask register.")
;; Vector registers (also used for plain floating point nowadays).
"@internal Lower SSE register when avoiding REX prefix and all SSE registers otherwise.")
(define_register_constraint "Yv"
- "(TARGET_AVX512VL || TARGET_AVX10_1) ? ALL_SSE_REGS : TARGET_SSE ? SSE_REGS : NO_REGS"
+ "TARGET_AVX512VL ? ALL_SSE_REGS : TARGET_SSE ? SSE_REGS : NO_REGS"
"@internal For AVX512VL, any EVEX encodable SSE register (@code{%xmm0-%xmm31}), otherwise any SSE register.")
(define_register_constraint "Yw"
#define bit_AVXNECONVERT (1 << 5)
#define bit_AVXVNNIINT16 (1 << 10)
#define bit_PREFETCHI (1 << 14)
-#define bit_AVX10 (1 << 19)
/* Extended State Enumeration Sub-leaf (%eax == 0xd, %ecx == 1) */
#define bit_XSAVEOPT (1 << 0)
/* %ebx */
#define bit_PTWRITE (1 << 4)
-/* AVX10 sub leaf (%eax == 0x18) */
-/* %ebx */
-#define bit_AVX10_256 (1 << 17)
-#define bit_AVX10_512 (1 << 18)
-
/* Keylocker leaf (%eax == 0x19) */
/* %ebx */
#define bit_AESKLE ( 1<<0 )
def_or_undef (parse_in, "__SHA512__");
if (isa_flag2 & OPTION_MASK_ISA2_SM4)
def_or_undef (parse_in, "__SM4__");
- if (isa_flag2 & OPTION_MASK_ISA2_AVX10_512BIT)
- def_or_undef (parse_in, "__AVX10_512BIT__");
- if (isa_flag2 & OPTION_MASK_ISA2_AVX10_1)
- def_or_undef (parse_in, "__AVX10_1__");
if (TARGET_IAMCU)
{
def_or_undef (parse_in, "__iamcu");
DEF_PTA(SM3)
DEF_PTA(SHA512)
DEF_PTA(SM4)
-DEF_PTA(AVX10_512BIT)
-DEF_PTA(AVX10_1)
{ "-mavxvnniint16", OPTION_MASK_ISA2_AVXVNNIINT16 },
{ "-msm3", OPTION_MASK_ISA2_SM3 },
{ "-msha512", OPTION_MASK_ISA2_SHA512 },
- { "-msm4", OPTION_MASK_ISA2_SM4 },
- { "-mavx10-max-512bit", OPTION_MASK_ISA2_AVX10_512BIT },
- { "-mavx10.1", OPTION_MASK_ISA2_AVX10_1 }
+ { "-msm4", OPTION_MASK_ISA2_SM4 }
};
static struct ix86_target_opts isa_opts[] =
{
ix86_opt_ix86_no,
ix86_opt_str,
ix86_opt_enum,
- ix86_opt_isa,
+ ix86_opt_isa
};
static const struct
IX86_ATTR_ISA ("sm3", OPT_msm3),
IX86_ATTR_ISA ("sha512", OPT_msha512),
IX86_ATTR_ISA ("sm4", OPT_msm4),
- IX86_ATTR_ISA ("avx10-max-512bit", OPT_mavx10_max_512bit),
- IX86_ATTR_ISA ("avx10.1", OPT_mavx10_1),
- IX86_ATTR_ISA ("avx10.1-256", OPT_mavx10_1_256),
- IX86_ATTR_ISA ("avx10.1-512", OPT_mavx10_1_512),
/* enum options */
IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
&= ~((OPTION_MASK_ISA_BMI | OPTION_MASK_ISA_BMI2 | OPTION_MASK_ISA_TBM)
& ~opts->x_ix86_isa_flags_explicit);
- /* Enable AVX512{F,VL,BW,DQ,CD,BF16,FP16,VBMI,VBMI2,VNNI,IFMA,BITALG,
- VPOPCNTDQ} features for AVX10.1/512. */
- if (TARGET_AVX10_1_P (opts->x_ix86_isa_flags2)
- && TARGET_AVX10_512BIT_P (opts->x_ix86_isa_flags2))
- {
- opts->x_ix86_isa_flags
- |= OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_AVX512CD
- | OPTION_MASK_ISA_AVX512DQ | OPTION_MASK_ISA_AVX512BW
- | OPTION_MASK_ISA_AVX512VL | OPTION_MASK_ISA_AVX512IFMA
- | OPTION_MASK_ISA_AVX512VBMI | OPTION_MASK_ISA_AVX512VBMI2
- | OPTION_MASK_ISA_AVX512VNNI | OPTION_MASK_ISA_AVX512VPOPCNTDQ
- | OPTION_MASK_ISA_AVX512BITALG;
- opts->x_ix86_isa_flags2
- |= OPTION_MASK_ISA2_AVX512FP16 | OPTION_MASK_ISA2_AVX512BF16;
- }
-
/* Validate -mpreferred-stack-boundary= value or default it to
PREFERRED_STACK_BOUNDARY_DEFAULT. */
ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
accessible_reg_set &= ~reg_class_contents[FLOAT_REGS];
- /* If AVX512F and AVX10 is disabled, disable the registers. */
- if (!TARGET_AVX512F && !TARGET_AVX10_1)
+ /* If AVX512F is disabled, disable the registers. */
+ if (! TARGET_AVX512F)
{
for (i = FIRST_EXT_REX_SSE_REG; i <= LAST_EXT_REX_SSE_REG; i++)
CLEAR_HARD_REG_BIT (accessible_reg_set, i);
we can only use zmm register move without memory operand. */
if (evex_reg_p
&& !TARGET_AVX512VL
- && !TARGET_AVX10_1
&& GET_MODE_SIZE (mode) < 64)
{
/* NB: Even though ix86_hard_regno_mode_ok doesn't allow
return ((TARGET_AVX512F && VALID_MASK_REG_MODE (mode))
|| (TARGET_AVX512BW
- && VALID_MASK_AVX512BW_MODE (mode))
- || (TARGET_AVX10_1 && VALID_MASK_AVX10_MODE (mode)));
+ && VALID_MASK_AVX512BW_MODE (mode)));
}
if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
|| VALID_AVX512VL_128_REG_MODE (mode)))
return true;
- /* AVX10_1 allows sse regs16+ for 256 bit modes. */
- if (TARGET_AVX10_1
- && (VALID_AVX256_REG_OR_OI_MODE (mode)
- || VALID_AVX512VL_128_REG_MODE (mode)
- || VALID_AVX512F_SCALAR_MODE (mode)))
- return true;
-
/* xmm16-xmm31 are only available for AVX-512. */
if (EXT_REX_SSE_REGNO_P (regno))
return false;
mask = XEXP (x, 2);
/* This is masked instruction, assume the same cost,
as nonmasked variant. */
- if ((TARGET_AVX512F || TARGET_AVX10_1)
- && register_operand (mask, GET_MODE (mask)))
+ if (TARGET_AVX512F && register_operand (mask, GET_MODE (mask)))
*total = rtx_cost (XEXP (x, 0), mode, outer_code, opno, speed);
else
*total = cost->sse_op;
#define VALID_MASK_AVX512BW_MODE(MODE) ((MODE) == SImode || (MODE) == DImode)
-#define VALID_MASK_AVX10_MODE(MODE) ((MODE) == SImode || (MODE) == HImode \
- || (MODE) == QImode)
-
#define VALID_FP_MODE_P(MODE) \
((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \
|| (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode)
mscatter
Target Alias(mtune-ctrl=, use_scatter, ^use_scatter)
Enable vectorization for scatter instruction.
-
-;; Only for implementation use
-mavx10-max-512bit
-Target Mask(ISA2_AVX10_512BIT) Var(ix86_isa_flags2) Undocumented Save
-Indicates 512 bit vector width support for AVX10.
-
-mavx10.1
-Target Mask(ISA2_AVX10_1) Var(ix86_isa_flags2) Save
-Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-and AVX10.1 built-in functions and code generation.
-
-mavx10.1-256
-Target RejectNegative
-Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-and AVX10.1 built-in functions and code generation.
-
-mavx10.1-512
-Target RejectNegative
-Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX, AVX2,
-and AVX10.1-512 built-in functions and code generation.
@itemx no-sm4
Enable/disable the generation of the SM4 instructions.
-@cindex @code{target("avx10.1")} function attribute, x86
-@item avx10.1
-@itemx no-avx10.1
-Enable/disable the generation of the AVX10.1 instructions.
-
-@cindex @code{target("avx10.1-256")} function attribute, x86
-@item avx10.1-256
-Enable the generation of the AVX10.1 instructions.
-
-@cindex @code{target("avx10.1-512")} function attribute, x86
-@item avx10.1-512
-Enable the generation of the AVX10.1 512 bit instructions.
-
@cindex @code{target("cld")} function attribute, x86
@item cld
@itemx no-cld
-mamx-tile -mamx-int8 -mamx-bf16 -muintr -mhreset -mavxvnni
-mavx512fp16 -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16
-mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4
--mavx10.1 -mavx10.1-256 -mavx10.1-512
-mcldemote -mms-bitfields -mno-align-stringops -minline-all-stringops
-minline-stringops-dynamically -mstringop-strategy=@var{alg}
-mkl -mwidekl
@need 200
@opindex msm4
@itemx -msm4
-@need 200
-@opindex mavx10.1
-@itemx -mavx10.1
-@need 200
-@opindex mavx10.1-256
-@itemx -mavx10.1-256
-@need 200
-@opindex mavx10.1-512
-@itemx -mavx10.1-512
These switches enable the use of instructions in the MMX, SSE,
AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
AES, PCLMUL, CLFLUSHOPT, CLWB, FSGSBASE, PTWRITE, RDRND, F16C, FMA, PCONFIG,
ENQCMD, AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, AVX5124VNNIW, SERIALIZE,
UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16,
AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AMX-FP16, PREFETCHI, RAOINT,
-AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, SM4, AVX10.1 or CLDEMOTE extended
-instruction sets. Each has a corresponding @option{-mno-} option to disable
-use of these instructions.
+AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, SM4 or CLDEMOTE extended instruction
+sets. Each has a corresponding @option{-mno-} option to disable use of these
+instructions.
These extensions are also available as built-in functions: see
@ref{x86 Built-in Functions}, for details of the functions enabled and
@item avx_runtime
Target supports the execution of @code{avx} instructions.
-@item avx10.1
-Target supports the execution of @code{avx10.1} instructions.
-
-@item avx10.1-256
-Target supports the execution of @code{avx10.1} instructions.
-
-@item avx10.1-512
-Target supports the execution of @code{avx10.1-512} instructions.
-
@item avx2
Target supports compiling @code{avx2} instructions.
+++ /dev/null
-// Test that dispatching can choose the right multiversion
-// for avx10.x-512 microarchitecture levels.
-
-// { dg-do run }
-// { dg-require-ifunc "" }
-// { dg-options "-O2" }
-
-#include <assert.h>
-
-int __attribute__ ((target("default")))
-foo ()
-{
- return 0;
-}
-
-int __attribute__ ((target("avx10.1-512"))) foo () {
- return 1;
-}
-
-int main ()
-{
- int val = foo ();
-
- if (__builtin_cpu_supports ("avx10.1-512"))
- assert (val == 1);
- else
- assert (val == 0);
-
- return 0;
-}
+++ /dev/null
-/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-O2 -march=x86-64 -mavx10.1" } */
-
-#include <immintrin.h>
-
-void
-f1 ()
-{
- register __m256d a __asm ("ymm17");
- register __m256d b __asm ("ymm16");
- a = _mm256_add_pd (a, b);
- asm volatile ("" : "+v" (a));
-}
-
-void
-f2 ()
-{
- register __m128d a __asm ("xmm17");
- register __m128d b __asm ("xmm16");
- a = _mm_add_pd (a, b);
- asm volatile ("" : "+v" (a));
-}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64" } */
-/* { dg-final { scan-assembler "%zmm" } } */
-
-typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
-
-__attribute__ ((target ("avx10.1-512"))) __m512d
-foo ()
-{
- __m512d a, b;
- a = a + b;
- return a;
-}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64 -mavx10.1-512" } */
-/* { dg-final { scan-assembler "%zmm" } } */
-
-typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
-
-__m512d
-foo ()
-{
- __m512d a, b;
- a = a + b;
- return a;
-}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-O2 -march=x86-64 -mavx10.1" } */
-
-#include <immintrin.h>
-
-int
-foo (int c)
-{
- register int a __asm ("k7") = c;
- int b = foo (a);
- asm volatile ("" : "+k" (b));
- return b;
-}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-O2 -march=x86-64 -mavx10.1-512" } */
-
-#include <immintrin.h>
-
-long long
-foo (long long c)
-{
- register long long a __asm ("k7") = c;
- long long b = foo (a);
- asm volatile ("" : "+k" (b));
- return b;
-}
+++ /dev/null
-/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-O0 -march=x86-64 -mavx10.1 -Wno-psabi" } */
-/* { dg-final { scan-assembler-not ".%zmm" } } */
-
-typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
-
-__m512d
-foo ()
-{
- __m512d a, b;
- a = a + b;
- return a;
-}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-O2 -march=x86-64 -mavx10.1" } */
-
-#include <immintrin.h>
-
-long long
-foo (long long c)
-{
- register long long a __asm ("k7") = c;
- long long b = foo (a);
- asm volatile ("" : "+k" (b)); /* { dg-error "inconsistent operand constraints in an 'asm'" } */
- return b;
-}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64 -Wno-psabi" } */
-/* { dg-final { scan-assembler-not ".%zmm" } } */
-
-typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
-
-__attribute__ ((target ("avx10.1"))) __m512d
-foo ()
-{
- __m512d a, b;
- a = a + b;
- return a;
-}
+++ /dev/null
-/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-O2 -march=x86-64 -mavx10.1-256" } */
-
-#include "avx10_1-1.c"
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-march=x86-64 -Wno-psabi" } */
-/* { dg-final { scan-assembler-not ".%zmm" } } */
-
-typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
-
-__attribute__ ((target ("avx10.1-256"))) __m512d
-foo ()
-{
- __m512d a, b;
- a = a + b;
- return a;
-}