From: Katy Feng Date: Tue, 22 Aug 2023 18:11:42 +0000 (-0700) Subject: Change to common header file not applicable to open-vm-tools. X-Git-Tag: stable-12.3.0~28 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=972d10eab23dda4d219f919685fbcd2651a57165;p=thirdparty%2Fopen-vm-tools.git Change to common header file not applicable to open-vm-tools. --- diff --git a/open-vm-tools/lib/include/vm_atomic.h b/open-vm-tools/lib/include/vm_atomic.h index 26bd76a69..6a58fd0a8 100644 --- a/open-vm-tools/lib/include/vm_atomic.h +++ b/open-vm-tools/lib/include/vm_atomic.h @@ -1,5 +1,5 @@ /********************************************************* - * Copyright (C) 1998-2022 VMware, Inc. All rights reserved. + * Copyright (C) 1998-2023 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published @@ -45,6 +45,14 @@ #pragma warning(pop) #endif +#ifdef __wasm__ +#define VM_ATOMIC_USE_C11 +#endif + +#ifdef VM_ATOMIC_USE_C11 +#include +#endif + #include "vm_basic_types.h" #include "vm_assert.h" @@ -309,7 +317,9 @@ Atomic_Read8(Atomic_uint8 const *var) // IN: { uint8 val; -#if defined __GNUC__ && defined VM_ARM_32 +#if defined VM_ATOMIC_USE_C11 + val = atomic_load((const _Atomic uint8 *)&var->value); +#elif defined __GNUC__ && defined VM_ARM_32 val = AtomicUndefined(var); #elif defined __GNUC__ && defined VM_ARM_64 val = _VMATOM_X(R, 8, &var->value); @@ -349,7 +359,9 @@ static INLINE uint8 Atomic_ReadWrite8(Atomic_uint8 *var, // IN/OUT: uint8 val) // IN: { -#if defined __GNUC__ && defined VM_ARM_32 +#if defined VM_ATOMIC_USE_C11 + return atomic_exchange((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_32 return AtomicUndefined(var + val); #elif defined __GNUC__ && defined VM_ARM_64 return _VMATOM_X(RW, 8, TRUE, &var->value, val); @@ -391,7 +403,9 @@ static INLINE void Atomic_Write8(Atomic_uint8 *var, // IN/OUT: uint8 val) // IN: { -#if defined __GNUC__ && defined VM_ARM_32 +#if defined VM_ATOMIC_USE_C11 + atomic_store((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_32 AtomicUndefined(var + val); #elif defined __GNUC__ && defined VM_ARM_64 _VMATOM_X(W, 8, &var->value, val); @@ -430,7 +444,11 @@ Atomic_ReadIfEqualWrite8(Atomic_uint8 *var, // IN/OUT: uint8 oldVal, // IN: uint8 newVal) // IN: { -#if defined __GNUC__ && defined VM_ARM_32 +#if defined VM_ATOMIC_USE_C11 + atomic_compare_exchange_strong( + (_Atomic uint8 *)&var->value, &oldVal, newVal); + return oldVal; +#elif defined __GNUC__ && defined VM_ARM_32 return AtomicUndefined(var + oldVal + newVal); #elif defined __GNUC__ && defined VM_ARM_64 return _VMATOM_X(RIFEQW, 8, TRUE, &var->value, oldVal, newVal); @@ -479,7 +497,9 @@ Atomic_ReadAnd8(Atomic_uint8 *var, // IN/OUT { uint8 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_and((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, and, val); #else do { @@ -541,7 +561,9 @@ Atomic_ReadOr8(Atomic_uint8 *var, // IN/OUT { uint8 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_or((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, orr, val); #else do { @@ -603,7 +625,9 @@ Atomic_ReadXor8(Atomic_uint8 *var, // IN/OUT { uint8 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_xor((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, eor, val); #else do { @@ -665,7 +689,9 @@ Atomic_ReadAdd8(Atomic_uint8 *var, // IN/OUT { uint8 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_add((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, add, val); #else do { @@ -725,7 +751,9 @@ static INLINE void Atomic_Sub8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_sub((_Atomic uint8 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 _VMATOM_X(OP, 8, TRUE, &var->value, sub, val); #else Atomic_Add8(var, -val); @@ -850,7 +878,9 @@ Atomic_Read32(Atomic_uint32 const *var) // IN ASSERT(((uintptr_t)var % 4) == 0); #endif -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + value = atomic_load((_Atomic uint32 *)&var->value); +#elif defined __GNUC__ /* * Use inline assembler to force using a single load instruction to * ensure that the compiler doesn't split a transfer operation into multiple @@ -909,7 +939,9 @@ static INLINE uint32 Atomic_ReadWrite32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + return atomic_exchange((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 retVal; uint32 res; @@ -976,7 +1008,9 @@ Atomic_Write32(Atomic_uint32 *var, // OUT ASSERT(((uintptr_t)var % 4) == 0); #endif -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_store((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #if defined VM_ARM_64 _VMATOM_X(W, 32, &var->value, val); #elif defined VM_ARM_32 @@ -1055,7 +1089,11 @@ Atomic_ReadIfEqualWrite32(Atomic_uint32 *var, // IN/OUT uint32 oldVal, // IN uint32 newVal) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_compare_exchange_strong( + (_Atomic uint32 *)&var->value, &oldVal, newVal); + return oldVal; +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 retVal; uint32 res; @@ -1105,7 +1143,7 @@ Atomic_ReadIfEqualWrite32(Atomic_uint32 *var, // IN/OUT #define Atomic_ReadIfEqualWrite Atomic_ReadIfEqualWrite32 -#if defined VM_64BIT || defined VM_ARM_V7 +#if defined VM_64BIT || defined VM_ARM_V7 || defined VM_ATOMIC_USE_C11 /* *----------------------------------------------------------------------------- * @@ -1127,7 +1165,11 @@ Atomic_ReadIfEqualWrite64(Atomic_uint64 *var, // IN/OUT uint64 oldVal, // IN uint64 newVal) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_compare_exchange_strong( + (_Atomic uint64 *)&var->value, &oldVal, newVal); + return oldVal; +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint64 retVal; uint32 res; @@ -1218,7 +1260,9 @@ static INLINE void Atomic_And32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_and((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 res; uint32 tmp; @@ -1278,7 +1322,9 @@ static INLINE void Atomic_Or32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_or((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 res; uint32 tmp; @@ -1338,7 +1384,9 @@ static INLINE void Atomic_Xor32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_xor((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 res; uint32 tmp; @@ -1399,7 +1447,9 @@ static INLINE void Atomic_Xor64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_xor((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ #if defined VM_ARM_64 _VMATOM_X(OP, 64, TRUE, &var->value, eor, val); #else /* VM_X86_64 */ @@ -1441,7 +1491,9 @@ static INLINE void Atomic_Add32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_add((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 res; uint32 tmp; @@ -1501,7 +1553,9 @@ static INLINE void Atomic_Sub32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_sub((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 res; uint32 tmp; @@ -1574,7 +1628,9 @@ Atomic_Sub32(Atomic_uint32 *var, // IN/OUT static INLINE void Atomic_Inc32(Atomic_uint32 *var) // IN/OUT { -#ifdef __GNUC__ +#if defined VM_ATOMIC_USE_C11 + Atomic_Add32(var, 1); +#elif defined __GNUC__ #if defined VM_ARM_ANY Atomic_Add32(var, 1); #else /* VM_X86_ANY */ @@ -1615,7 +1671,9 @@ Atomic_Inc32(Atomic_uint32 *var) // IN/OUT static INLINE void Atomic_Dec32(Atomic_uint32 *var) // IN/OUT { -#ifdef __GNUC__ +#if defined VM_ATOMIC_USE_C11 + Atomic_Sub32(var, 1); +#elif defined __GNUC__ #if defined VM_ARM_ANY Atomic_Sub32(var, 1); #else /* VM_X86_ANY */ @@ -1665,7 +1723,9 @@ Atomic_ReadOr32(Atomic_uint32 *var, // IN/OUT { uint32 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_or((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 32, TRUE, &var->value, orr, val); #else do { @@ -1699,7 +1759,9 @@ Atomic_ReadAnd32(Atomic_uint32 *var, // IN/OUT { uint32 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_and((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 32, TRUE, &var->value, and, val); #else do { @@ -1734,7 +1796,9 @@ Atomic_ReadOr64(Atomic_uint64 *var, // IN/OUT { uint64 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_or((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 64, TRUE, &var->value, orr, val); #else do { @@ -1768,7 +1832,9 @@ Atomic_ReadAnd64(Atomic_uint64 *var, // IN/OUT { uint64 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_and((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 64, TRUE, &var->value, and, val); #else do { @@ -1805,7 +1871,9 @@ static INLINE uint32 Atomic_ReadAdd32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + return atomic_fetch_add((_Atomic uint32 *)&var->value, val); +#elif defined __GNUC__ #ifdef VM_ARM_V7 uint32 res; uint32 retVal; @@ -1916,7 +1984,10 @@ Atomic_CMPXCHG64(Atomic_uint64 *var, // IN/OUT uint64 oldVal, // IN uint64 newVal) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + return atomic_compare_exchange_strong( + (_Atomic uint64 *)&var->value, &oldVal, newVal); +#elif defined __GNUC__ #if defined VM_ARM_ANY return Atomic_ReadIfEqualWrite64(var, oldVal, newVal) == oldVal; #else /* VM_X86_ANY */ @@ -2041,7 +2112,10 @@ Atomic_CMPXCHG32(Atomic_uint32 *var, // IN/OUT uint32 oldVal, // IN uint32 newVal) // IN { -#if defined __GNUC__ +#if defined VM_ATOMIC_USE_C11 + return atomic_compare_exchange_strong( + (_Atomic uint32 *)&var->value, &oldVal, newVal); +#elif defined __GNUC__ #if defined VM_ARM_ANY return Atomic_ReadIfEqualWrite32(var, oldVal, newVal) == oldVal; #else /* VM_X86_ANY */ @@ -2085,6 +2159,9 @@ Atomic_CMPXCHG32(Atomic_uint32 *var, // IN/OUT static INLINE uint64 Atomic_Read64(Atomic_uint64 const *var) // IN { +#if defined VM_ATOMIC_USE_C11 + return atomic_load((const _Atomic uint64 *)&var->value); +#else #if defined __GNUC__ uint64 value; #endif @@ -2152,6 +2229,7 @@ Atomic_Read64(Atomic_uint64 const *var) // IN #if defined __GNUC__ return value; #endif +#endif // !defined VM_ATOMIC_USE_C11 } @@ -2205,7 +2283,9 @@ static INLINE uint64 Atomic_ReadAdd64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + return atomic_fetch_add((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 return _VMATOM_X(ROP, 64, TRUE, &var->value, add, val); #elif defined __x86_64__ @@ -2259,7 +2339,9 @@ static INLINE uint64 Atomic_ReadSub64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + return atomic_fetch_sub((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 return _VMATOM_X(ROP, 64, TRUE, &var->value, sub, val); #else # ifdef _MSC_VER @@ -2351,7 +2433,9 @@ static INLINE void Atomic_Add64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if !defined VM_64BIT +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_add((_Atomic uint64 *)&var->value, val); +#elif !defined VM_64BIT Atomic_ReadAdd64(var, val); /* Return value is unused. */ #elif defined __GNUC__ #if defined VM_ARM_64 @@ -2394,7 +2478,9 @@ static INLINE void Atomic_Sub64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if !defined VM_64BIT +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_sub((_Atomic uint64 *)&var->value, val); +#elif !defined VM_64BIT Atomic_ReadSub64(var, val); /* Return value is unused. */ #elif defined __GNUC__ #if defined VM_ARM_64 @@ -2436,7 +2522,7 @@ Atomic_Sub64(Atomic_uint64 *var, // IN/OUT static INLINE void Atomic_Inc64(Atomic_uint64 *var) // IN/OUT { -#if defined VM_ARM_64 +#if defined VM_ARM_64 || defined VM_ATOMIC_USE_C11 Atomic_Add64(var, 1); #elif !defined __x86_64__ Atomic_ReadInc64(var); /* Return value is unused. */ @@ -2476,7 +2562,7 @@ Atomic_Inc64(Atomic_uint64 *var) // IN/OUT static INLINE void Atomic_Dec64(Atomic_uint64 *var) // IN/OUT { -#if defined VM_ARM_64 +#if defined VM_ARM_64 || defined VM_ATOMIC_USE_C11 Atomic_Sub64(var, 1); #elif !defined __x86_64__ Atomic_ReadDec64(var); /* Return value is unused. */ @@ -2517,7 +2603,9 @@ static INLINE uint64 Atomic_ReadWrite64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if defined __GNUC__ && defined __x86_64__ +#if defined VM_ATOMIC_USE_C11 + return atomic_exchange((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined __x86_64__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "xchgq %0, %1" @@ -2568,7 +2656,9 @@ Atomic_Write64(Atomic_uint64 *var, // OUT ASSERT((uintptr_t)var % 8 == 0); #endif -#if defined __GNUC__ && defined __x86_64__ +#if defined VM_ATOMIC_USE_C11 + atomic_store((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined __x86_64__ /* * There is no move instruction for 64-bit immediate to memory, so unless * the immediate value fits in 32-bit (i.e. can be sign-extended), GCC @@ -2619,7 +2709,9 @@ static INLINE void Atomic_Or64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if defined __GNUC__ && defined __x86_64__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_or((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined __x86_64__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; orq %1, %0" @@ -2663,7 +2755,9 @@ static INLINE void Atomic_And64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { -#if defined __GNUC__ && defined __x86_64__ +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_and((_Atomic uint64 *)&var->value, val); +#elif defined __GNUC__ && defined __x86_64__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; andq %1, %0" @@ -2874,7 +2968,9 @@ Atomic_Read16(Atomic_uint16 const *var) // IN ASSERT((uintptr_t)var % 2 == 0); #endif -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + value = atomic_load((_Atomic uint16 *)&var->value); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "movw %1, %0" : "=r" (value) @@ -2918,7 +3014,9 @@ static INLINE uint16 Atomic_ReadWrite16(Atomic_uint16 *var, // IN/OUT: uint16 val) // IN: { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + return atomic_exchange((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "xchgw %0, %1" : "=r" (val), @@ -2981,7 +3079,9 @@ Atomic_Write16(Atomic_uint16 *var, // OUT: ASSERT((uintptr_t)var % 2 == 0); #endif -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_store((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "movw %1, %0" : "=m" (var->value) @@ -3022,7 +3122,11 @@ Atomic_ReadIfEqualWrite16(Atomic_uint16 *var, // IN/OUT uint16 oldVal, // IN uint16 newVal) // IN { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_compare_exchange_strong( + (_Atomic uint16 *)&var->value, &oldVal, newVal); + return oldVal; +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) uint16 val; __asm__ __volatile__( @@ -3087,7 +3191,9 @@ Atomic_ReadAnd16(Atomic_uint16 *var, // IN/OUT { uint16 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_and((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 16, TRUE, &var->value, and, val); #else do { @@ -3119,7 +3225,9 @@ static INLINE void Atomic_And16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_and((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "lock; andw %1, %0" : "+m" (var->value) @@ -3174,7 +3282,9 @@ static INLINE void Atomic_Or16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_or((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "lock; orw %1, %0" : "+m" (var->value) @@ -3229,7 +3339,9 @@ static INLINE void Atomic_Xor16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_xor((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "lock; xorw %1, %0" : "+m" (var->value) @@ -3284,7 +3396,9 @@ static INLINE void Atomic_Add16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_add((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "lock; addw %1, %0" : "+m" (var->value) @@ -3339,7 +3453,9 @@ static INLINE void Atomic_Sub16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + atomic_fetch_sub((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "lock; subw %1, %0" : "+m" (var->value) @@ -3460,7 +3576,9 @@ Atomic_ReadOr16(Atomic_uint16 *var, // IN/OUT { uint16 res; -#if defined __GNUC__ && defined VM_ARM_64 +#if defined VM_ATOMIC_USE_C11 + res = atomic_fetch_or((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && defined VM_ARM_64 res = _VMATOM_X(ROP, 16, TRUE, &var->value, orr, val); #else do { @@ -3492,7 +3610,9 @@ static INLINE uint16 Atomic_ReadAdd16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN: { -#if defined __GNUC__ && (defined __x86_64__ || defined __i386__) +#if defined VM_ATOMIC_USE_C11 + return atomic_fetch_add((_Atomic uint16 *)&var->value, val); +#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__) __asm__ __volatile__( "lock; xaddw %0, %1" : "=r" (val),