uint8 val;
#if defined VM_ATOMIC_USE_C11
- val = atomic_load((const _Atomic uint8 *)&var->value);
+ val = atomic_load((const _Atomic(uint8) *)&var->value);
#elif defined __GNUC__ && defined VM_ARM_32
val = AtomicUndefined(var);
#elif defined __GNUC__ && defined VM_ARM_64
uint8 val) // IN:
{
#if defined VM_ATOMIC_USE_C11
- return atomic_exchange((_Atomic uint8 *)&var->value, val);
+ return atomic_exchange((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_32
return AtomicUndefined(var + val);
#elif defined __GNUC__ && defined VM_ARM_64
uint8 val) // IN:
{
#if defined VM_ATOMIC_USE_C11
- atomic_store((_Atomic uint8 *)&var->value, val);
+ atomic_store((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_32
AtomicUndefined(var + val);
#elif defined __GNUC__ && defined VM_ARM_64
{
#if defined VM_ATOMIC_USE_C11
atomic_compare_exchange_strong(
- (_Atomic uint8 *)&var->value, &oldVal, newVal);
+ (_Atomic(uint8) *)&var->value, &oldVal, newVal);
return oldVal;
#elif defined __GNUC__ && defined VM_ARM_32
return AtomicUndefined(var + oldVal + newVal);
uint8 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_and((_Atomic uint8 *)&var->value, val);
+ res = atomic_fetch_and((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, and, val);
#else
uint8 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_or((_Atomic uint8 *)&var->value, val);
+ res = atomic_fetch_or((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, orr, val);
#else
uint8 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_xor((_Atomic uint8 *)&var->value, val);
+ res = atomic_fetch_xor((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, eor, val);
#else
uint8 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_add((_Atomic uint8 *)&var->value, val);
+ res = atomic_fetch_add((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, add, val);
#else
uint8 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_sub((_Atomic uint8 *)&var->value, val);
+ atomic_fetch_sub((_Atomic(uint8) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
_VMATOM_X(OP, 8, TRUE, &var->value, sub, val);
#else
#endif
#if defined VM_ATOMIC_USE_C11
- value = atomic_load((_Atomic uint32 *)&var->value);
+ value = atomic_load((_Atomic(uint32) *)&var->value);
#elif defined __GNUC__
/*
* Use inline assembler to force using a single load instruction to
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- return atomic_exchange((_Atomic uint32 *)&var->value, val);
+ return atomic_exchange((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 retVal;
#endif
#if defined VM_ATOMIC_USE_C11
- atomic_store((_Atomic uint32 *)&var->value, val);
+ atomic_store((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#if defined VM_ARM_64
_VMATOM_X(W, 32, &var->value, val);
{
#if defined VM_ATOMIC_USE_C11
atomic_compare_exchange_strong(
- (_Atomic uint32 *)&var->value, &oldVal, newVal);
+ (_Atomic(uint32) *)&var->value, &oldVal, newVal);
return oldVal;
#elif defined __GNUC__
#ifdef VM_ARM_V7
{
#if defined VM_ATOMIC_USE_C11
atomic_compare_exchange_strong(
- (_Atomic uint64 *)&var->value, &oldVal, newVal);
+ (_Atomic(uint64) *)&var->value, &oldVal, newVal);
return oldVal;
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_and((_Atomic uint32 *)&var->value, val);
+ atomic_fetch_and((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_or((_Atomic uint32 *)&var->value, val);
+ atomic_fetch_or((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_xor((_Atomic uint32 *)&var->value, val);
+ atomic_fetch_xor((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_xor((_Atomic uint64 *)&var->value, val);
+ atomic_fetch_xor((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__
#if defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, eor, val);
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_add((_Atomic uint32 *)&var->value, val);
+ atomic_fetch_add((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_sub((_Atomic uint32 *)&var->value, val);
+ atomic_fetch_sub((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_or((_Atomic uint32 *)&var->value, val);
+ res = atomic_fetch_or((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 32, TRUE, &var->value, orr, val);
#else
uint32 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_and((_Atomic uint32 *)&var->value, val);
+ res = atomic_fetch_and((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 32, TRUE, &var->value, and, val);
#else
uint64 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_or((_Atomic uint64 *)&var->value, val);
+ res = atomic_fetch_or((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 64, TRUE, &var->value, orr, val);
#else
uint64 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_and((_Atomic uint64 *)&var->value, val);
+ res = atomic_fetch_and((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 64, TRUE, &var->value, and, val);
#else
uint32 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- return atomic_fetch_add((_Atomic uint32 *)&var->value, val);
+ return atomic_fetch_add((_Atomic(uint32) *)&var->value, val);
#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
{
#if defined VM_ATOMIC_USE_C11
return atomic_compare_exchange_strong(
- (_Atomic uint64 *)&var->value, &oldVal, newVal);
+ (_Atomic(uint64) *)&var->value, &oldVal, newVal);
#elif defined __GNUC__
#if defined VM_ARM_ANY
return Atomic_ReadIfEqualWrite64(var, oldVal, newVal) == oldVal;
{
#if defined VM_ATOMIC_USE_C11
return atomic_compare_exchange_strong(
- (_Atomic uint32 *)&var->value, &oldVal, newVal);
+ (_Atomic(uint32) *)&var->value, &oldVal, newVal);
#elif defined __GNUC__
#if defined VM_ARM_ANY
return Atomic_ReadIfEqualWrite32(var, oldVal, newVal) == oldVal;
Atomic_Read64(Atomic_uint64 const *var) // IN
{
#if defined VM_ATOMIC_USE_C11
- return atomic_load((const _Atomic uint64 *)&var->value);
+ return atomic_load((const _Atomic(uint64) *)&var->value);
#else
#if defined __GNUC__
uint64 value;
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- return atomic_fetch_add((_Atomic uint64 *)&var->value, val);
+ return atomic_fetch_add((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
return _VMATOM_X(ROP, 64, TRUE, &var->value, add, val);
#elif defined __x86_64__
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- return atomic_fetch_sub((_Atomic uint64 *)&var->value, val);
+ return atomic_fetch_sub((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
return _VMATOM_X(ROP, 64, TRUE, &var->value, sub, val);
#else
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_add((_Atomic uint64 *)&var->value, val);
+ atomic_fetch_add((_Atomic(uint64) *)&var->value, val);
#elif !defined VM_64BIT
Atomic_ReadAdd64(var, val); /* Return value is unused. */
#elif defined __GNUC__
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_sub((_Atomic uint64 *)&var->value, val);
+ atomic_fetch_sub((_Atomic(uint64) *)&var->value, val);
#elif !defined VM_64BIT
Atomic_ReadSub64(var, val); /* Return value is unused. */
#elif defined __GNUC__
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- return atomic_exchange((_Atomic uint64 *)&var->value, val);
+ return atomic_exchange((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined __x86_64__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
#endif
#if defined VM_ATOMIC_USE_C11
- atomic_store((_Atomic uint64 *)&var->value, val);
+ atomic_store((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined __x86_64__
/*
* There is no move instruction for 64-bit immediate to memory, so unless
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_or((_Atomic uint64 *)&var->value, val);
+ atomic_fetch_or((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined __x86_64__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
uint64 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_and((_Atomic uint64 *)&var->value, val);
+ atomic_fetch_and((_Atomic(uint64) *)&var->value, val);
#elif defined __GNUC__ && defined __x86_64__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
#endif
#if defined VM_ATOMIC_USE_C11
- value = atomic_load((_Atomic uint16 *)&var->value);
+ value = atomic_load((_Atomic(uint16) *)&var->value);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"movw %1, %0"
uint16 val) // IN:
{
#if defined VM_ATOMIC_USE_C11
- return atomic_exchange((_Atomic uint16 *)&var->value, val);
+ return atomic_exchange((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"xchgw %0, %1"
#endif
#if defined VM_ATOMIC_USE_C11
- atomic_store((_Atomic uint16 *)&var->value, val);
+ atomic_store((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"movw %1, %0"
{
#if defined VM_ATOMIC_USE_C11
atomic_compare_exchange_strong(
- (_Atomic uint16 *)&var->value, &oldVal, newVal);
+ (_Atomic(uint16) *)&var->value, &oldVal, newVal);
return oldVal;
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
uint16 val;
uint16 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_and((_Atomic uint16 *)&var->value, val);
+ res = atomic_fetch_and((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 16, TRUE, &var->value, and, val);
#else
uint16 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_and((_Atomic uint16 *)&var->value, val);
+ atomic_fetch_and((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; andw %1, %0"
uint16 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_or((_Atomic uint16 *)&var->value, val);
+ atomic_fetch_or((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; orw %1, %0"
uint16 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_xor((_Atomic uint16 *)&var->value, val);
+ atomic_fetch_xor((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; xorw %1, %0"
uint16 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_add((_Atomic uint16 *)&var->value, val);
+ atomic_fetch_add((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; addw %1, %0"
uint16 val) // IN
{
#if defined VM_ATOMIC_USE_C11
- atomic_fetch_sub((_Atomic uint16 *)&var->value, val);
+ atomic_fetch_sub((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; subw %1, %0"
uint16 res;
#if defined VM_ATOMIC_USE_C11
- res = atomic_fetch_or((_Atomic uint16 *)&var->value, val);
+ res = atomic_fetch_or((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 16, TRUE, &var->value, orr, val);
#else
uint16 val) // IN:
{
#if defined VM_ATOMIC_USE_C11
- return atomic_fetch_add((_Atomic uint16 *)&var->value, val);
+ return atomic_fetch_add((_Atomic(uint16) *)&var->value, val);
#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; xaddw %0, %1"