set_feature (FEATURE_IBT);
if (edx & bit_UINTR)
set_feature (FEATURE_UINTR);
+ if (edx & bit_USER_MSR)
+ set_feature (FEATURE_USER_MSR);
if (amx_usable)
{
if (edx & bit_AMX_TILE)
#define OPTION_MASK_ISA2_SM4_SET OPTION_MASK_ISA2_SM4
#define OPTION_MASK_ISA2_APX_F_SET OPTION_MASK_ISA2_APX_F
#define OPTION_MASK_ISA2_EVEX512_SET OPTION_MASK_ISA2_EVEX512
+#define OPTION_MASK_ISA2_USER_MSR_SET OPTION_MASK_ISA2_USER_MSR
/* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
as -msse4.2. */
#define OPTION_MASK_ISA2_SM4_UNSET OPTION_MASK_ISA2_SM4
#define OPTION_MASK_ISA2_APX_F_UNSET OPTION_MASK_ISA2_APX_F
#define OPTION_MASK_ISA2_EVEX512_UNSET OPTION_MASK_ISA2_EVEX512
+#define OPTION_MASK_ISA2_USER_MSR_UNSET OPTION_MASK_ISA2_USER_MSR
/* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
as -mno-sse4.1. */
}
return true;
+ case OPT_musermsr:
+ if (value)
+ {
+ opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_USER_MSR_SET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_USER_MSR_SET;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_USER_MSR_UNSET;
+ opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_USER_MSR_UNSET;
+ }
+ return true;
+
case OPT_mfma:
if (value)
{
FEATURE_SHA512,
FEATURE_SM4,
FEATURE_APX_F,
+ FEATURE_USER_MSR,
CPU_FEATURE_MAX
};
ISA_NAMES_TABLE_ENTRY("sha512", FEATURE_SHA512, P_NONE, "-msha512")
ISA_NAMES_TABLE_ENTRY("sm4", FEATURE_SM4, P_NONE, "-msm4")
ISA_NAMES_TABLE_ENTRY("apxf", FEATURE_APX_F, P_NONE, "-mapxf")
+ ISA_NAMES_TABLE_ENTRY("usermsr", FEATURE_USER_MSR, P_NONE, "-musermsr")
ISA_NAMES_TABLE_END
avxvnniint8intrin.h avxneconvertintrin.h
cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
- sm3intrin.h sha512intrin.h sm4intrin.h"
+ sm3intrin.h sha512intrin.h sm4intrin.h
+ usermsrintrin.h"
;;
ia64-*-*)
extra_headers=ia64intrin.h
#define bit_AVXNECONVERT (1 << 5)
#define bit_AVXVNNIINT16 (1 << 10)
#define bit_PREFETCHI (1 << 14)
+#define bit_USER_MSR (1 << 15)
#define bit_APX_F (1 << 21)
/* Extended State Enumeration Sub-leaf (%eax == 0xd, %ecx == 1) */
# SHA512 builtins
DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
+
+# USER_MSR builtins
+DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
"__builtin_ia32_testui",
UINT8_FTYPE_VOID, IX86_BUILTIN_TESTUI);
+ /* USER_MSR. */
+ def_builtin (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_USER_MSR,
+ "__builtin_ia32_urdmsr", UINT64_FTYPE_UINT64,
+ IX86_BUILTIN_URDMSR);
+ def_builtin (OPTION_MASK_ISA_64BIT, OPTION_MASK_ISA2_USER_MSR,
+ "__builtin_ia32_uwrmsr", VOID_FTYPE_UINT64_UINT64,
+ IX86_BUILTIN_UWRMSR);
+
/* CLDEMOTE. */
def_builtin (0, OPTION_MASK_ISA2_CLDEMOTE, "__builtin_ia32_cldemote",
VOID_FTYPE_PCVOID, IX86_BUILTIN_CLDEMOTE);
IX86_BUILTIN_MWAIT,
IX86_BUILTIN_UMONITOR,
IX86_BUILTIN_UMWAIT,
+ IX86_BUILTIN_URDMSR,
+ IX86_BUILTIN_UWRMSR,
IX86_BUILTIN_TPAUSE,
IX86_BUILTIN_TESTUI,
IX86_BUILTIN_CLZERO,
def_or_undef (parse_in, "__SM4__");
if (isa_flag2 & OPTION_MASK_ISA2_EVEX512)
def_or_undef (parse_in, "__EVEX512__");
+ if (isa_flag2 & OPTION_MASK_ISA2_USER_MSR)
+ def_or_undef (parse_in, "__USER_MSR__");
if (TARGET_IAMCU)
{
def_or_undef (parse_in, "__iamcu");
return 0;
}
+ case IX86_BUILTIN_URDMSR:
+ case IX86_BUILTIN_UWRMSR:
+ {
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+
+ if (CONST_INT_P (op0))
+ {
+ unsigned HOST_WIDE_INT val = UINTVAL (op0);
+ if (val > 0xffffffff)
+ op0 = force_reg (DImode, op0);
+ }
+ else
+ op0 = force_reg (DImode, op0);
+
+ if (fcode == IX86_BUILTIN_UWRMSR)
+ {
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ op1 = expand_normal (arg1);
+ op1 = force_reg (DImode, op1);
+ icode = CODE_FOR_uwrmsr;
+ target = 0;
+ }
+ else
+ {
+ if (target == 0)
+ target = gen_reg_rtx (DImode);
+ icode = CODE_FOR_urdmsr;
+ op1 = op0;
+ op0 = target;
+ }
+ emit_insn (GEN_FCN (icode) (op0, op1));
+ return target;
+ }
+
case IX86_BUILTIN_VEC_INIT_V2SI:
case IX86_BUILTIN_VEC_INIT_V4HI:
case IX86_BUILTIN_VEC_INIT_V8QI:
DEF_PTA(SHA512)
DEF_PTA(SM4)
DEF_PTA(APX_F)
+DEF_PTA(USER_MSR)
{ "-msm3", OPTION_MASK_ISA2_SM3 },
{ "-msha512", OPTION_MASK_ISA2_SHA512 },
{ "-msm4", OPTION_MASK_ISA2_SM4 },
- { "-mevex512", OPTION_MASK_ISA2_EVEX512 }
+ { "-mevex512", OPTION_MASK_ISA2_EVEX512 },
+ { "-musermsr", OPTION_MASK_ISA2_USER_MSR }
};
static struct ix86_target_opts isa_opts[] =
{
IX86_ATTR_ISA ("sm4", OPT_msm4),
IX86_ATTR_ISA ("apxf", OPT_mapxf),
IX86_ATTR_ISA ("evex512", OPT_mevex512),
+ IX86_ATTR_ISA ("usermsr", OPT_musermsr),
/* enum options */
IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
;; For PREFETCHI support
UNSPECV_PREFETCHI
+
+ ;; For USER_MSR support
+ UNSPECV_URDMSR
+ UNSPECV_UWRMSR
])
;; Constants to represent rounding modes in the ROUND instruction
DONE;
})
+(define_insn "urdmsr"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI
+ [(match_operand:DI 1 "x86_64_szext_nonmemory_operand" "reZ")]
+ UNSPECV_URDMSR))]
+ "TARGET_USER_MSR && TARGET_64BIT"
+ "urdmsr\t{%1, %0|%0, %1}"
+ [(set_attr "prefix" "vex")
+ (set_attr "type" "other")])
+
+(define_insn "uwrmsr"
+ [(unspec_volatile
+ [(match_operand:DI 0 "x86_64_szext_nonmemory_operand" "reZ")
+ (match_operand:DI 1 "register_operand" "r")]
+ UNSPECV_UWRMSR)]
+ "TARGET_USER_MSR && TARGET_64BIT"
+ "uwrmsr\t{%1, %0|%0, %1}"
+ [(set_attr "prefix" "vex")
+ (set_attr "type" "other")])
+
(include "mmx.md")
(include "sse.md")
(include "sync.md")
mevex512
Target Mask(ISA2_EVEX512) Var(ix86_isa_flags2) Save
Support 512 bit vector built-in functions and code generation.
+
+musermsr
+Target Mask(ISA2_USER_MSR) Var(ix86_isa_flags2) Save
+Support USER_MSR built-in functions and code generation.
--- /dev/null
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if !defined _X86GPRINTRIN_H_INCLUDED
+#error "Never use <usermsrintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef _USER_MSRINTRIN_H_INCLUDED
+#define _USER_MSRINTRIN_H_INCLUDED
+
+#ifdef __x86_64__
+
+#ifndef __USER_MSR__
+#pragma GCC push_options
+#pragma GCC target("usermsr")
+#define __DISABLE_USER_MSR__
+#endif /* __USER_MSR__ */
+
+extern __inline unsigned long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_urdmsr (unsigned long long __A)
+{
+ return (unsigned long long) __builtin_ia32_urdmsr (__A);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_uwrmsr (unsigned long long __A, unsigned long long __B)
+{
+ __builtin_ia32_uwrmsr (__A, __B);
+}
+
+#ifdef __DISABLE_USER_MSR__
+#undef __DISABLE_USER_MSR__
+#pragma GCC pop_options
+#endif /* __DISABLE_USER_MSR__ */
+
+#endif /* __x86_64__ */
+
+#endif /* _USER_MSRINTRIN_H_INCLUDED */
#include <hresetintrin.h>
+#include <usermsrintrin.h>
+
extern __inline void
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_wbinvd (void)
@itemx no-sm4
Enable/disable the generation of the SM4 instructions.
+@cindex @code{target("usermsr")} function attribute, x86
+@item usermsr
+@itemx no-usermsr
+Enable/disable the generation of the USER_MSR instructions.
+
@cindex @code{target("cld")} function attribute, x86
@item cld
@itemx no-cld
-mamx-tile -mamx-int8 -mamx-bf16 -muintr -mhreset -mavxvnni
-mavx512fp16 -mavxifma -mavxvnniint8 -mavxneconvert -mcmpccxadd -mamx-fp16
-mprefetchi -mraoint -mamx-complex -mavxvnniint16 -msm3 -msha512 -msm4 -mapxf
+-musermsr
-mcldemote -mms-bitfields -mno-align-stringops -minline-all-stringops
-minline-stringops-dynamically -mstringop-strategy=@var{alg}
-mkl -mwidekl
@need 200
@opindex mapxf
@itemx -mapxf
+@need 200
+@opindex musermsr
+@itemx -musermsr
These switches enable the use of instructions in the MMX, SSE,
AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
AES, PCLMUL, CLFLUSHOPT, CLWB, FSGSBASE, PTWRITE, RDRND, F16C, FMA, PCONFIG,
ENQCMD, AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, AVX5124VNNIW, SERIALIZE,
UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16,
AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AMX-FP16, PREFETCHI, RAOINT,
-AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, SM4, APX_F or CLDEMOTE extended
-instruction sets. Each has a corresponding @option{-mno-} option to disable
-use of these instructions.
+AMX-COMPLEX, AVXVNNIINT16, SM3, SHA512, SM4, APX_F, USER_MSR or CLDEMOTE
+extended instruction sets. Each has a corresponding @option{-mno-} option
+to disable use of these instructions.
These extensions are also available as built-in functions: see
@ref{x86 Built-in Functions}, for details of the functions enabled and
accepts only @code{EM_SPARC} executables and chokes on @code{EM_SPARC32PLUS}
or @code{EM_SPARCV9} executables.
+@item user_msr
+Target supports the execution of @code{user_msr} instructions.
+
@item vect_cmdline_needed
Target requires a command line argument to enable a SIMD instruction set.
extern void test_sm3 (void) __attribute__((__target__("sm3")));
extern void test_sha512 (void) __attribute__((__target__("sha512")));
extern void test_sm4 (void) __attribute__((__target__("sm4")));
+extern void test_user_msr (void) __attribute__((__target__("usermsr")));
extern void test_no_sgx (void) __attribute__((__target__("no-sgx")));
extern void test_no_avx5124fmaps(void) __attribute__((__target__("no-avx5124fmaps")));
extern void test_no_sm3 (void) __attribute__((__target__("no-sm3")));
extern void test_no_sha512 (void) __attribute__((__target__("no-sha512")));
extern void test_no_sm4 (void) __attribute__((__target__("no-sm4")));
+extern void test_no_user_msr (void) __attribute__((__target__("no-usermsr")));
extern void test_arch_nocona (void) __attribute__((__target__("arch=nocona")));
extern void test_arch_core2 (void) __attribute__((__target__("arch=core2")));
--- /dev/null
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-musermsr -O2" } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\\$121" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\\$121" 1 } } */
+
+#include <x86gprintrin.h>
+
+volatile unsigned long long x;
+volatile unsigned long long y;
+
+void extern
+user_msr_test (void)
+{
+ x = _urdmsr(y);
+ x = _urdmsr(121);
+ _uwrmsr(y, x);
+ _uwrmsr(121, x);
+}
--- /dev/null
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -musermsr" } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
+/* { dg-final { scan-assembler-times "movabsq\[ \\t\]\\\$20018842566655, \\%r\[a-z\]x" 1 } } */
+
+#include <x86gprintrin.h>
+
+volatile unsigned long long x;
+
+void extern
+user_msr_test (void)
+{
+ x = _urdmsr(0x1234ffffffffULL);
+ _uwrmsr(0x1234ffffffffULL, x);
+}
/* Test that <x86gprintrin.h> is usable with -O -std=c89 -pedantic-errors. */
/* { dg-do compile } */
/* { dg-options "-O -std=c89 -pedantic-errors -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mraoint -mno-sse -mno-mmx" } */
-/* { dg-additional-options "-mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
+/* { dg-additional-options "-musermsr -mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
#include <x86gprintrin.h>
/* { dg-do compile } */
/* { dg-options "-O2 -Werror-implicit-function-declaration -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mraoint -mno-sse -mno-mmx" } */
/* { dg-add-options bind_pic_locally } */
-/* { dg-additional-options "-mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
+/* { dg-additional-options "-musermsr -mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
/* Test that the intrinsics in <x86gprintrin.h> compile with optimization.
All of them are defined as inline functions that reference the proper
#define __builtin_ia32_cmpccxadd(A, B, C, D) __builtin_ia32_cmpccxadd(A, B, C, 1)
#define __builtin_ia32_cmpccxadd64(A, B, C, D) __builtin_ia32_cmpccxadd64(A, B, C, 1)
+/* usermsrintrin.h */
+#define __builtin_ia32_urdmsr(A) __builtin_ia32_urdmsr(1)
+#define __builtin_ia32_uwrmsr(A, B) __builtin_ia32_uwrmsr(1, B)
+
#include <x86gprintrin.h>
/* { dg-do compile } */
/* { dg-options "-O0 -Werror-implicit-function-declaration -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mraoint -mno-sse -mno-mmx" } */
/* { dg-add-options bind_pic_locally } */
-/* { dg-additional-options "-mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
+/* { dg-additional-options "-musermsr -mcmpccxadd -mprefetchi -muintr" { target { ! ia32 } } } */
/* Test that the intrinsics in <x86gprintrin.h> compile without optimization.
All of them are defined as inline functions that reference the proper
#define __inline
#include <x86gprintrin.h>
+
+#define _CONCAT(x,y) x ## y
+
+#define test_0(func, type, imm) \
+ type _CONCAT(_0,func) (int const I) \
+ { return func (imm); }
+
+#define test_1(func, type, op1_type) \
+ type _CONCAT(_1,func) (op1_type A) \
+ { return func (A); }
+
+#define test_1r(func, type, op1_type, imm) \
+ type _CONCAT(_1r,func) (op1_type A, int const I) \
+ { return func (imm, A); }
+
+#define test_2(func, type, op1_type, op2_type) \
+ type _CONCAT(_2,func) (op1_type A, op2_type B) \
+ { return func (A, B); }
+
+/* usermsrintrin.h */
+#ifdef __x86_64__
+test_0 (_urdmsr, unsigned long long, 1)
+test_1 (_urdmsr, unsigned long long, unsigned long long)
+test_1r (_uwrmsr, void, unsigned long long, 1)
+test_2 (_uwrmsr, void, unsigned long long, unsigned long long)
+#endif
#define extern
#define __inline
+#define _CONCAT(x,y) x ## y
+
+#define test_0(func, type, imm) \
+ type _CONCAT(_0,func) (int const I) \
+ { return func (imm); }
+
+#define test_1(func, type, op1_type) \
+ type _CONCAT(_1,func) (op1_type A) \
+ { return func (A); }
+
+#define test_1r(func, type, op1_type, imm) \
+ type _CONCAT(_1r,func) (op1_type A, int const I) \
+ { return func (imm, A); }
+
+#define test_2(func, type, op1_type, op2_type) \
+ type _CONCAT(_2,func) (op1_type A, op2_type B) \
+ { return func (A, B); }
+
#ifndef DIFFERENT_PRAGMAS
#ifdef __x86_64__
-#pragma GCC target ("adx,bmi,bmi2,cmpccxadd,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,prefetchi,raoint,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,uintr,xsaveopt")
+#pragma GCC target ("adx,bmi,bmi2,cmpccxadd,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,prefetchi,raoint,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,uintr,usermsr,xsaveopt")
#else
#pragma GCC target ("adx,bmi,bmi2,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,raoint,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,xsaveopt")
#endif
/* x86intrin.h (LWP/BMI/BMI2/TBM/LZCNT). */
#ifdef DIFFERENT_PRAGMAS
+#ifdef __x86_64__
+#pragma GCC target ("lwp,bmi,bmi2,tbm,lzcnt,usermsr")
+#else
#pragma GCC target ("lwp,bmi,bmi2,tbm,lzcnt")
#endif
+#endif
#include <x86gprintrin.h>
+
+/* usermsrintrin.h */
+#ifdef __x86_64__
+test_0 (_urdmsr, unsigned long long, 1)
+test_1 (_urdmsr, unsigned long long, unsigned long long)
+test_1r (_uwrmsr, void, unsigned long long, 1)
+test_2 (_uwrmsr, void, unsigned long long, unsigned long long)
+#endif
#define __builtin_ia32_cmpccxadd(A, B, C, D) __builtin_ia32_cmpccxadd(A, B, C, 1)
#define __builtin_ia32_cmpccxadd64(A, B, C, D) __builtin_ia32_cmpccxadd64(A, B, C, 1)
+/* usermsrintrin.h */
+#define __builtin_ia32_urdmsr(A) __builtin_ia32_urdmsr(1)
+#define __builtin_ia32_uwrmsr(A, B) __builtin_ia32_uwrmsr(1, B)
+
#ifdef __x86_64__
-#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,cmpccxadd,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,prefetchi,raoint,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,uintr,xsavec,xsaveopt,xsaves,wbnoinvd")
+#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,cmpccxadd,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,prefetchi,raoint,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,uintr,usermsr,xsavec,xsaveopt,xsaves,wbnoinvd")
#else
#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,raoint,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,xsavec,xsaveopt,xsaves,wbnoinvd")
#endif