#include "vm_basic_types.h"
#include "vm_assert.h"
-#if defined(__cplusplus)
+#if defined __cplusplus
extern "C" {
#endif
volatile uint64 value;
} Atomic_uint64 ALIGNED(8);
-#if defined(__GNUC__) && defined(VM_64BIT) && \
- (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) || defined(VM_ARM_64))
+#if defined __GNUC__ && defined VM_64BIT && \
+ (defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 || defined VM_ARM_64)
typedef struct Atomic_uint128 {
volatile __int128 value;
} Atomic_uint128 ALIGNED(16);
* Windows typedefs. This avoids having to include windows header files
* to get to the windows types.
*/
-#if defined(_MSC_VER) && _MSC_VER >= 1310 && !defined(BORA_NO_WIN32_INTRINS)
+#if defined _MSC_VER && _MSC_VER >= 1310 && !defined BORA_NO_WIN32_INTRINS
#ifdef __cplusplus
extern "C" {
#endif
#pragma intrinsic(_InterlockedCompareExchange8, _InterlockedCompareExchange8)
#endif
-#if defined(VM_X86_64)
+#if defined VM_X86_64
long _InterlockedAnd(long volatile*, long);
__int64 _InterlockedAnd64(__int64 volatile*, __int64);
long _InterlockedOr(long volatile*, long);
__int64 _InterlockedDecrement64(__int64 volatile*);
__int64 _InterlockedExchange64(__int64 volatile*, __int64);
__int64 _InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
-#if !defined(_WIN64)
+#if !defined _WIN64
#pragma intrinsic(_InterlockedAnd, _InterlockedAnd64)
#pragma intrinsic(_InterlockedOr, _InterlockedOr64)
#pragma intrinsic(_InterlockedXor, _InterlockedXor64)
#endif
#endif /* _MSC_VER */
-#if defined(__arm__)
+#if defined __arm__
/*
* LDREX without STREX or CLREX may cause problems in environments where the
* context switch may not clear the reference monitor - according ARM manual
* may not like Linux kernel's non-preemptive context switch path. So use of
* ARM routines in kernel code may not be safe.
*/
-# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__)
+# if defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ || \
+ defined __ARM_ARCH_7R__|| defined __ARM_ARCH_7M__
# define VM_ARM_V7
# ifdef __KERNEL__
# warning LDREX/STREX may not be safe in linux kernel, since it \
* - walken
*/
-#if defined(_MSC_VER) && _MSC_VER < 1600 && defined(__x86_64__)
+#if defined _MSC_VER && _MSC_VER < 1600 && defined __x86_64__
Bool VMWInterlockedExchangeBool(Bool volatile *ptr,
Bool val);
#endif
/* The ARM32 team is expected to provide an implementation real soon now. */
-#if defined(VM_ARM_32)
+#if defined VM_ARM_32
extern Bool AtomicUndefined(void);
#endif
} Atomic_Bool;
/* This should be enforced on all architectures not just ARM... */
-#if defined(VM_ARM_ANY)
+#if defined VM_ARM_ANY
MY_ASSERTS(AtomicBoolSize,
ASSERT_ON_COMPILE(sizeof (Atomic_Bool) == sizeof (uint8));
)
{
uint8 val;
-#if defined(VM_ARM_64)
+#if defined VM_ARM_64
val = _VMATOM_X(RIFEQW, 8, TRUE, ptr, oldVal, newVal);
#else
__asm__ __volatile__("lock; cmpxchgb %b2, %1"
*
*-----------------------------------------------------------------------------
*/
-#if defined(__GNUC__) && defined(VM_64BIT) && \
- (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) || defined(VM_ARM_64))
+#if defined __GNUC__ && defined VM_64BIT && \
+ (defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 || defined VM_ARM_64)
static INLINE __int128
Atomic_ReadIfEqualWrite128(Atomic_uint128 *ptr, // IN/OUT
__int128 oldVal, // IN
{
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
return __sync_val_compare_and_swap(&ptr->value, oldVal, newVal);
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
union {
__int128 raw;
struct {
{
Bool val;
-#if defined(__GNUC__) && defined(VM_ARM_32)
+#if defined __GNUC__ && defined VM_ARM_32
val = AtomicUndefined();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
val = _VMATOM_X(R, 8, &var->value);
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
__asm__ __volatile__(
"movb %1, %0"
: "=q" (val)
: "m" (var->value)
);
-#elif defined(_MSC_VER)
+#elif defined _MSC_VER
val = var->value;
#else
#error No compiler defined for Atomic_ReadBool
Atomic_ReadWriteBool(Atomic_Bool *var, // IN/OUT:
Bool val) // IN:
{
-#if defined(__GNUC__) && defined(VM_ARM_32)
+#if defined __GNUC__ && defined VM_ARM_32
return AtomicUndefined();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RW, 8, TRUE, &var->value, val);
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
__asm__ __volatile__(
"xchgb %0, %1"
: "=q" (val),
: "0" (val)
);
return val;
-#elif defined(_MSC_VER) && _MSC_VER >= 1600
+#elif defined _MSC_VER && _MSC_VER >= 1600
return _InterlockedExchange8(&var->value, val);
-#elif defined(_MSC_VER) && defined(__i386__)
+#elif defined _MSC_VER && defined __i386__
#pragma warning(push)
#pragma warning(disable : 4035) // disable no-return warning
{
__asm xchg [ebx]Atomic_Bool.value, al
}
#pragma warning(pop)
-#elif defined(_MSC_VER) && defined(__x86_64__)
+#elif defined _MSC_VER && defined __x86_64__
return VMWInterlockedExchangeBool(&var->value, val);
#else
#error No compiler defined for Atomic_ReadBool
Atomic_WriteBool(Atomic_Bool *var, // IN/OUT:
Bool val) // IN:
{
-#if defined(__GNUC__) && defined(VM_ARM_32)
+#if defined __GNUC__ && defined VM_ARM_32
AtomicUndefined();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(W, 8, &var->value, val);
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
__asm__ __volatile__(
"movb %1, %0"
: "=m" (var->value)
: "qn" (val)
);
-#elif defined(_MSC_VER)
+#elif defined _MSC_VER
var->value = val;
#else
#error No compiler defined for Atomic_WriteBool
Bool oldVal, // IN:
Bool newVal) // IN:
{
-#if defined(__GNUC__) && defined(VM_ARM_32)
+#if defined __GNUC__ && defined VM_ARM_32
return AtomicUndefined();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RIFEQW, 8, TRUE, &var->value, oldVal, newVal);
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
Bool val;
__asm__ __volatile__(
: "cc"
);
return val;
-#elif defined(_MSC_VER) && _MSC_VER >= 1600
+#elif defined _MSC_VER && _MSC_VER >= 1600
return _InterlockedCompareExchange8(&var->value, newVal, oldVal);
-#elif defined(_MSC_VER) && defined(__i386__)
+#elif defined _MSC_VER && defined __i386__
#pragma warning(push)
#pragma warning(disable : 4035) // disable no-return warning
{
// eax is the return value, this is documented to work - edward
}
#pragma warning(pop)
-#elif defined(_MSC_VER) && defined(__x86_64__)
+#elif defined _MSC_VER && defined __x86_64__
return VMWInterlockedCompareExchangeBool(&var->value, newVal, oldVal);
#else
#error No compiler defined for Atomic_ReadIfEqualWriteBool
{
uint32 value;
-#if defined(VMM) || defined(VM_ARM_64)
+#if defined VMM || defined VM_ARM_64
ASSERT(((uintptr_t)var % 4) == 0);
#endif
-#if defined(__GNUC__)
+#if defined __GNUC__
/*
* Use inline assembler to force using a single load instruction to
* ensure that the compiler doesn't split a transfer operation into multiple
* instructions.
*/
-#if defined(VM_ARM_32)
+#if defined VM_ARM_32
__asm__ __volatile__(
"ldr %0, [%1]"
: "=r" (value)
: "r" (&var->value)
);
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
value = _VMATOM_X(R, 32, &var->value);
#else
__asm__ __volatile__(
: "m" (var->value)
);
#endif
-#elif defined(_MSC_VER)
+#elif defined _MSC_VER
/*
* Microsoft docs guarantee simple reads and writes to properly
* aligned 32-bit variables use only a single instruction.
Atomic_ReadWrite(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 retVal;
register volatile uint32 res;
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RW, 32, TRUE, &var->value, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
Atomic_Write(Atomic_uint32 *var, // OUT
uint32 val) // IN
{
-#if defined(VMM) || defined(VM_ARM_64)
+#if defined VMM || defined VM_ARM_64
ASSERT(((uintptr_t)var % 4) == 0);
#endif
-#if defined(__GNUC__)
-#if defined(VM_ARM_64)
+#if defined __GNUC__
+#if defined VM_ARM_64
_VMATOM_X(W, 32, &var->value, val);
-#elif defined(VM_ARM_32)
+#elif defined VM_ARM_32
/*
* Best left this way due to the intricacies of exclusive load/store
* operations on legacy (32-bit) ARM.
: "r" (val)
);
#endif
-#elif defined(_MSC_VER)
+#elif defined _MSC_VER
/*
* Microsoft docs guarantee simple reads and writes to properly
* aligned 32-bit variables use only a single instruction.
uint32 oldVal, // IN
uint32 newVal) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register uint32 retVal;
register uint32 res;
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RIFEQW, 32, TRUE, &var->value, oldVal, newVal);
#else /* VM_X86_ANY */
uint32 val;
#define Atomic_ReadIfEqualWrite32 Atomic_ReadIfEqualWrite
-#if defined(VM_64BIT) || defined(VM_ARM_V7)
+#if defined VM_64BIT || defined VM_ARM_V7
/*
*-----------------------------------------------------------------------------
*
uint64 oldVal, // IN
uint64 newVal) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register uint64 retVal;
register uint32 res;
* Furthermore, using a 32-bits register to store a
* 64-bits value of an variable looks risky.
*/
-#if defined(__APPLE__) && __clang__ == 1 && __clang_major__ >= 5
+#if defined __APPLE__ && __clang__ == 1 && __clang_major__ >= 5
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wasm-operand-widths"
#endif
: [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal)
: "cc"
);
-#if defined(__APPLE__) && __clang__ == 1 && __clang_major__ >= 5
+#if defined __APPLE__ && __clang__ == 1 && __clang_major__ >= 5
#pragma clang diagnostic pop
-#endif // defined(__APPLE__) && __clang__ == 1 && __clang_major__ >= 5
+#endif // defined __APPLE__ && __clang__ == 1 && __clang_major__ >= 5
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RIFEQW, 64, TRUE, &var->value, oldVal, newVal);
#else /* VM_X86_64 */
uint64 val;
Atomic_And(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 32, TRUE, &var->value, and, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
);
#endif /* VM_X86_ANY */
#elif defined _MSC_VER
-#if defined(__x86_64__) || defined(VM_ARM_32)
+#if defined __x86_64__ || defined VM_ARM_32
_InterlockedAnd((long *)&var->value, (long)val);
#else
__asm mov eax, val
Atomic_Or(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 32, TRUE, &var->value, orr, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
);
#endif /* VM_X86_ANY */
#elif defined _MSC_VER
-#if defined(__x86_64__) || defined(VM_ARM_32)
+#if defined __x86_64__ || defined VM_ARM_32
_InterlockedOr((long *)&var->value, (long)val);
#else
__asm mov eax, val
Atomic_Xor(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 32, TRUE, &var->value, eor, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
);
#endif /* VM_X86_ANY */
#elif defined _MSC_VER
-#if defined(__x86_64__) || defined(VM_ARM_32)
+#if defined __x86_64__ || defined VM_ARM_32
_InterlockedXor((long *)&var->value, (long)val);
#else
__asm mov eax, val
#define Atomic_Xor32 Atomic_Xor
-#if defined(VM_64BIT)
+#if defined VM_64BIT
/*
*-----------------------------------------------------------------------------
*
Atomic_Xor64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined(__GNUC__)
-#if defined(VM_ARM_64)
+#if defined __GNUC__
+#if defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, eor, val);
#else /* VM_X86_64 */
/* Checked against the AMD manual and GCC --hpreg */
Atomic_Add(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 32, TRUE, &var->value, add, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
Atomic_Sub(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 32, TRUE, &var->value, sub, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
Atomic_Inc(Atomic_uint32 *var) // IN/OUT
{
#ifdef __GNUC__
-#if defined(VM_ARM_ANY)
+#if defined VM_ARM_ANY
Atomic_Add(var, 1);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
Atomic_Dec(Atomic_uint32 *var) // IN/OUT
{
#ifdef __GNUC__
-#if defined(VM_ARM_ANY)
+#if defined VM_ARM_ANY
Atomic_Sub(var, 1);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
}
-#if defined(VM_64BIT)
+#if defined VM_64BIT
/*
*-----------------------------------------------------------------------------
*
return res;
}
-#endif /* defined(VM_64BIT) */
+#endif /* defined VM_64BIT */
/*
Atomic_ReadAdd32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
#ifdef VM_ARM_V7
register volatile uint32 res;
register volatile uint32 retVal;
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(ROP, 32, TRUE, &var->value, add, val);
#else /* VM_X86_ANY */
/* Checked against the Intel manual and GCC --walken */
*-----------------------------------------------------------------------------
*/
-#if defined(__GNUC__) && __GNUC__ < 3
+#if defined __GNUC__ && __GNUC__ < 3
static Bool
#else
static INLINE Bool
uint64 const *oldVal, // IN
uint64 const *newVal) // IN
{
-#if defined(__GNUC__)
-#if defined(VM_ARM_ANY)
+#if defined __GNUC__
+#if defined VM_ARM_ANY
return Atomic_ReadIfEqualWrite64(var, *oldVal, *newVal) == *oldVal;
#else /* VM_X86_ANY */
Bool equal;
/* Checked against the Intel manual and GCC --walken */
-#if defined(__x86_64__)
+#if defined __x86_64__
uint64 dummy;
__asm__ __volatile__(
"lock; cmpxchgq %3, %0" "\n\t"
return equal;
#endif //VM_ARM_V7
#elif defined _MSC_VER
-#if defined(__x86_64__) || defined(VM_ARM_32)
+#if defined __x86_64__ || defined VM_ARM_32
return (__int64)*oldVal == _InterlockedCompareExchange64((__int64 *)&var->value,
(__int64)*newVal,
(__int64)*oldVal);
uint32 oldVal, // IN
uint32 newVal) // IN
{
-#if defined(__GNUC__)
-#if defined(VM_ARM_ANY)
+#if defined __GNUC__
+#if defined VM_ARM_ANY
return Atomic_ReadIfEqualWrite(var, oldVal, newVal) == oldVal;
#else /* VM_X86_ANY */
Bool equal;
);
return equal;
#endif /* VM_X86_ANY */
-#else // defined(__GNUC__)
+#else // defined __GNUC__
return (Atomic_ReadIfEqualWrite(var, oldVal, newVal) == oldVal);
-#endif // !defined(__GNUC__)
+#endif // !defined __GNUC__
}
static INLINE uint64
Atomic_Read64(Atomic_uint64 const *var) // IN
{
-#if defined(__GNUC__)
+#if defined __GNUC__
uint64 value;
#endif
ASSERT((uintptr_t)var % 8 == 0);
#endif
-#if defined(__GNUC__) && defined(__x86_64__)
+#if defined __GNUC__ && defined __x86_64__
/*
* Use asm to ensure we emit a single load.
*/
: "=r" (value)
: "m" (var->value)
);
-#elif defined(__GNUC__) && defined(__i386__)
+#elif defined __GNUC__ && defined __i386__
/*
* Since cmpxchg8b will replace the contents of EDX:EAX with the
* value in memory if there is no match, we need only execute the
: "m" (*var)
: "cc"
);
-#elif defined (_MSC_VER) && defined(__x86_64__)
+#elif defined _MSC_VER && defined __x86_64__
/*
* Microsoft docs guarantee "Simple reads and writes to properly
* aligned 64-bit variables are atomic on 64-bit Windows."
* XXX Verify that value is properly aligned. Bug 61315.
*/
return var->value;
-#elif defined (_MSC_VER) && defined(VM_ARM_32)
+#elif defined _MSC_VER && defined VM_ARM_32
return _InterlockedAdd64((__int64 *)&var->value, 0);
-#elif defined (_MSC_VER) && defined(__i386__)
+#elif defined _MSC_VER && defined __i386__
# pragma warning(push)
# pragma warning(disable : 4035) // disable no-return warning
{
// edx:eax is the return value; this is documented to work. --mann
}
# pragma warning(pop)
-#elif defined(__GNUC__) && defined (VM_ARM_V7)
+#elif defined __GNUC__ && defined VM_ARM_V7
__asm__ __volatile__(
"ldrexd %[value], %H[value], [%[var]] \n\t"
: [value] "=&r" (value)
: [var] "r" (&var->value)
);
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
value = _VMATOM_X(R, 64, &var->value);
#endif
-#if defined(__GNUC__)
+#if defined __GNUC__
return value;
#endif
}
*----------------------------------------------------------------------
*/
-#if defined(VM_64BIT)
+#if defined VM_64BIT
static INLINE uint64
Atomic_ReadUnaligned64(Atomic_uint64 const *var) // IN:
{
Atomic_ReadAdd64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined(VM_ARM_64)
+#if defined VM_ARM_64
return _VMATOM_X(ROP, 64, TRUE, &var->value, add, val);
-#elif defined(__x86_64__)
+#elif defined __x86_64__
-#if defined(__GNUC__)
+#if defined __GNUC__
__asm__ __volatile__(
"lock; xaddq %0, %1"
: "=r" (val),
Atomic_Add64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if !defined(VM_64BIT)
+#if !defined VM_64BIT
Atomic_ReadAdd64(var, val); /* Return value is unused. */
-#elif defined(__GNUC__)
-#if defined(VM_ARM_64)
+#elif defined __GNUC__
+#if defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, add, val);
-#else /* defined(VM_X86_64) */
+#else /* defined VM_X86_64 */
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; addq %1, %0"
Atomic_Sub64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if !defined(VM_64BIT)
+#if !defined VM_64BIT
Atomic_ReadSub64(var, val); /* Return value is unused. */
-#elif defined(__GNUC__)
-#if defined(VM_ARM_64)
+#elif defined __GNUC__
+#if defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, sub, val);
#else /* VM_X86_64 */
/* Checked against the AMD manual and GCC --hpreg */
static INLINE void
Atomic_Inc64(Atomic_uint64 *var) // IN/OUT
{
-#if defined(VM_ARM_64)
+#if defined VM_ARM_64
Atomic_Add64(var, 1);
-#elif !defined(__x86_64__)
+#elif !defined __x86_64__
Atomic_ReadInc64(var); /* Return value is unused. */
-#elif defined(__GNUC__)
+#elif defined __GNUC__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; incq %0"
static INLINE void
Atomic_Dec64(Atomic_uint64 *var) // IN/OUT
{
-#if defined(VM_ARM_64)
+#if defined VM_ARM_64
Atomic_Sub64(var, 1);
-#elif !defined(__x86_64__)
+#elif !defined __x86_64__
Atomic_ReadDec64(var); /* Return value is unused. */
-#elif defined(__GNUC__)
+#elif defined __GNUC__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; decq %0"
Atomic_ReadWrite64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"xchgq %0, %1"
#else
#error No compiler defined for Atomic_ReadWrite64
#endif
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RW, 64, TRUE, &var->value, val);
#else
uint64 oldVal;
Atomic_Write64(Atomic_uint64 *var, // OUT
uint64 val) // IN
{
-#if defined(VMM) || defined(VM_ARM_64)
+#if defined VMM || defined VM_ARM_64
ASSERT((uintptr_t)var % 8 == 0);
#endif
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
/*
* There is no move instruction for 64-bit immediate to memory, so unless
* the immediate value fits in 32-bit (i.e. can be sign-extended), GCC
#else
#error No compiler defined for Atomic_Write64
#endif
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(W, 64, &var->value, val);
#else
(void)Atomic_ReadWrite64(var, val);
Atomic_Or64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; orq %1, %0"
#else
#error No compiler defined for Atomic_Or64
#endif
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, orr, val);
#else // __x86_64__
uint64 oldVal;
Atomic_And64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; andq %1, %0"
#else
#error No compiler defined for Atomic_And64
#endif
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, and, val);
#else // __x86_64__
uint64 oldVal;
Atomic_SetBit64(Atomic_uint64 *var, // IN/OUT
uint64 bit) // IN
{
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
ASSERT(bit <= 63);
__asm__ __volatile__(
"lock; bts %1, %0"
Atomic_ClearBit64(Atomic_uint64 *var, // IN/OUT
uint64 bit) // IN
{
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
ASSERT(bit <= 63);
__asm__ __volatile__(
"lock; btr %1, %0"
Atomic_TestBit64(Atomic_uint64 *var, // IN
uint64 bit) // IN
{
-#if defined(__x86_64__)
-#if defined(__GNUC__)
+#if defined __x86_64__
+#if defined __GNUC__
Bool out = FALSE;
ASSERT(bit <= 63);
__asm__ __volatile__(
}
-#if defined(__GNUC__)
+#if defined __GNUC__
/*
*-----------------------------------------------------------------------------
*
ASSERT((uintptr_t)var % 2 == 0);
#endif
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"movw %1, %0"
: "=r" (value)
: "m" (var->value)
);
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
NOT_TESTED();
__asm__ __volatile__(
: "=r" (value)
: "r" (&var->value)
);
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
value = _VMATOM_X(R, 16, &var->value);
#else
#error No 16-bits atomics.
Atomic_ReadWrite16(Atomic_uint16 *var, // IN/OUT:
uint16 val) // IN:
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"xchgw %0, %1"
: "=r" (val),
: "0" (val)
);
return val;
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 retVal;
register volatile uint16 res;
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RW, 16, TRUE, &var->value, val);
#else
#error No 16-bits atomics.
Atomic_Write16(Atomic_uint16 *var, // OUT:
uint16 val) // IN:
{
-#if defined(VMM) || defined(VM_ARM_64)
+#if defined VMM || defined VM_ARM_64
ASSERT((uintptr_t)var % 2 == 0);
#endif
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"movw %1, %0"
: "=m" (var->value)
: "r" (val)
);
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(W, 16, &var->value, val);
-#elif defined(VM_ARM_32)
+#elif defined VM_ARM_32
/*
* Best left this way due to the intricacies of exclusive load/store
* operations on legacy (32-bit) ARM.
uint16 oldVal, // IN
uint16 newVal) // IN
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
uint16 val;
__asm__ __volatile__(
: "cc"
);
return val;
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register uint16 retVal;
register uint16 res;
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(RIFEQW, 16, TRUE, &var->value, oldVal, newVal);
#else
#error No 16-bits atomics.
Atomic_And16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; andw %1, %0"
: "+m" (var->value)
: "re" (val)
: "cc"
);
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 res;
register volatile uint16 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 16, TRUE, &var->value, and, val);
#else
#error No 16-bits atomics.
Atomic_Or16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; orw %1, %0"
: "+m" (var->value)
: "re" (val)
: "cc"
);
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 res;
register volatile uint16 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 16, TRUE, &var->value, orr, val);
#else
#error No 16-bits atomics.
Atomic_Xor16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; xorw %1, %0"
: "+m" (var->value)
: "re" (val)
: "cc"
);
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 res;
register volatile uint16 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 16, TRUE, &var->value, eor, val);
#else
#error No 16-bits atomics.
Atomic_Add16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; addw %1, %0"
: "+m" (var->value)
: "re" (val)
: "cc"
);
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 res;
register volatile uint16 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 16, TRUE, &var->value, add, val);
#else
#error No 16-bits atomics.
Atomic_Sub16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; subw %1, %0"
: "+m" (var->value)
: "re" (val)
: "cc"
);
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 res;
register volatile uint16 tmp;
);
dmb();
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
_VMATOM_X(OP, 16, TRUE, &var->value, sub, val);
#else
#error No 16-bits atomics.
static INLINE void
Atomic_Inc16(Atomic_uint16 *var) // IN/OUT
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; incw %0"
: "+m" (var->value)
:
: "cc"
);
-#elif defined(VM_ARM_ANY)
+#elif defined VM_ARM_ANY
Atomic_Add16(var, 1);
#else
#error No 16-bits atomics.
static INLINE void
Atomic_Dec16(Atomic_uint16 *var) // IN/OUT
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; decw %0"
: "+m" (var->value)
:
: "cc"
);
-#elif defined(VM_ARM_ANY)
+#elif defined VM_ARM_ANY
Atomic_Sub16(var, 1);
#else
#error No 16-bits atomics.
Atomic_ReadAdd16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN:
{
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__i386__)
+#if defined __GNUC__
+#if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"lock; xaddw %0, %1"
: "=r" (val),
: "cc"
);
return val;
-#elif defined(VM_ARM_V7)
+#elif defined VM_ARM_V7
register volatile uint16 res;
register volatile uint16 retVal;
register volatile uint16 tmp;
dmb();
return retVal;
-#elif defined(VM_ARM_64)
+#elif defined VM_ARM_64
return _VMATOM_X(ROP, 16, TRUE, &var->value, add, val);
#else
#error No 16-bits atomics.
* Atomic_ReadIncInt --
* Atomic_ReadDecInt --
*/
-#if defined(VM_64BIT)
+#if defined VM_64BIT
MAKE_ATOMIC_TYPE(Ptr, 64, void const *, void *, uintptr_t)
#else
MAKE_ATOMIC_TYPE(Ptr, 32, void const *, void *, uintptr_t)
/* Prevent the compiler from re-ordering memory references. */
#ifdef __GNUC__
#define ATOMIC_COMPILER_BARRIER() __asm__ __volatile__ ("": : :"memory")
-#elif defined(_MSC_VER)
+#elif defined _MSC_VER
#define ATOMIC_COMPILER_BARRIER() _ReadWriteBarrier()
#else
#error No compiler defined for ATOMIC_COMPILER_BARRIER
# include "vm_atomic_arm64_end.h"
#endif
-#if defined(__cplusplus)
+#if defined __cplusplus
} // extern "C"
#endif