/*********************************************************
- * Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
+ * Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
#pragma warning(pop)
#endif
+#ifdef __wasm__
+#define VM_ATOMIC_USE_C11
+#endif
+
+#ifdef VM_ATOMIC_USE_C11
+#include <stdatomic.h>
+#endif
+
#include "vm_basic_types.h"
#include "vm_assert.h"
{
uint8 val;
-#if defined __GNUC__ && defined VM_ARM_32
+#if defined VM_ATOMIC_USE_C11
+ val = atomic_load((const _Atomic uint8 *)&var->value);
+#elif defined __GNUC__ && defined VM_ARM_32
val = AtomicUndefined(var);
#elif defined __GNUC__ && defined VM_ARM_64
val = _VMATOM_X(R, 8, &var->value);
Atomic_ReadWrite8(Atomic_uint8 *var, // IN/OUT:
uint8 val) // IN:
{
-#if defined __GNUC__ && defined VM_ARM_32
+#if defined VM_ATOMIC_USE_C11
+ return atomic_exchange((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_32
return AtomicUndefined(var + val);
#elif defined __GNUC__ && defined VM_ARM_64
return _VMATOM_X(RW, 8, TRUE, &var->value, val);
Atomic_Write8(Atomic_uint8 *var, // IN/OUT:
uint8 val) // IN:
{
-#if defined __GNUC__ && defined VM_ARM_32
+#if defined VM_ATOMIC_USE_C11
+ atomic_store((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_32
AtomicUndefined(var + val);
#elif defined __GNUC__ && defined VM_ARM_64
_VMATOM_X(W, 8, &var->value, val);
uint8 oldVal, // IN:
uint8 newVal) // IN:
{
-#if defined __GNUC__ && defined VM_ARM_32
+#if defined VM_ATOMIC_USE_C11
+ atomic_compare_exchange_strong(
+ (_Atomic uint8 *)&var->value, &oldVal, newVal);
+ return oldVal;
+#elif defined __GNUC__ && defined VM_ARM_32
return AtomicUndefined(var + oldVal + newVal);
#elif defined __GNUC__ && defined VM_ARM_64
return _VMATOM_X(RIFEQW, 8, TRUE, &var->value, oldVal, newVal);
{
uint8 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_and((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, and, val);
#else
do {
{
uint8 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_or((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, orr, val);
#else
do {
{
uint8 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_xor((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, eor, val);
#else
do {
{
uint8 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_add((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 8, TRUE, &var->value, add, val);
#else
do {
Atomic_Sub8(Atomic_uint8 *var, // IN/OUT
uint8 val) // IN
{
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_sub((_Atomic uint8 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
_VMATOM_X(OP, 8, TRUE, &var->value, sub, val);
#else
Atomic_Add8(var, -val);
ASSERT(((uintptr_t)var % 4) == 0);
#endif
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ value = atomic_load((_Atomic uint32 *)&var->value);
+#elif defined __GNUC__
/*
* Use inline assembler to force using a single load instruction to
* ensure that the compiler doesn't split a transfer operation into multiple
Atomic_ReadWrite32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ return atomic_exchange((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 retVal;
uint32 res;
ASSERT(((uintptr_t)var % 4) == 0);
#endif
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_store((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#if defined VM_ARM_64
_VMATOM_X(W, 32, &var->value, val);
#elif defined VM_ARM_32
uint32 oldVal, // IN
uint32 newVal) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_compare_exchange_strong(
+ (_Atomic uint32 *)&var->value, &oldVal, newVal);
+ return oldVal;
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 retVal;
uint32 res;
#define Atomic_ReadIfEqualWrite Atomic_ReadIfEqualWrite32
-#if defined VM_64BIT || defined VM_ARM_V7
+#if defined VM_64BIT || defined VM_ARM_V7 || defined VM_ATOMIC_USE_C11
/*
*-----------------------------------------------------------------------------
*
uint64 oldVal, // IN
uint64 newVal) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_compare_exchange_strong(
+ (_Atomic uint64 *)&var->value, &oldVal, newVal);
+ return oldVal;
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint64 retVal;
uint32 res;
Atomic_And32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_and((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 tmp;
Atomic_Or32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_or((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 tmp;
Atomic_Xor32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_xor((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 tmp;
Atomic_Xor64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_xor((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__
#if defined VM_ARM_64
_VMATOM_X(OP, 64, TRUE, &var->value, eor, val);
#else /* VM_X86_64 */
Atomic_Add32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_add((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 tmp;
Atomic_Sub32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_sub((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 tmp;
static INLINE void
Atomic_Inc32(Atomic_uint32 *var) // IN/OUT
{
-#ifdef __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ Atomic_Add32(var, 1);
+#elif defined __GNUC__
#if defined VM_ARM_ANY
Atomic_Add32(var, 1);
#else /* VM_X86_ANY */
static INLINE void
Atomic_Dec32(Atomic_uint32 *var) // IN/OUT
{
-#ifdef __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ Atomic_Sub32(var, 1);
+#elif defined __GNUC__
#if defined VM_ARM_ANY
Atomic_Sub32(var, 1);
#else /* VM_X86_ANY */
{
uint32 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_or((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 32, TRUE, &var->value, orr, val);
#else
do {
{
uint32 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_and((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 32, TRUE, &var->value, and, val);
#else
do {
{
uint64 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_or((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 64, TRUE, &var->value, orr, val);
#else
do {
{
uint64 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_and((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 64, TRUE, &var->value, and, val);
#else
do {
Atomic_ReadAdd32(Atomic_uint32 *var, // IN/OUT
uint32 val) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ return atomic_fetch_add((_Atomic uint32 *)&var->value, val);
+#elif defined __GNUC__
#ifdef VM_ARM_V7
uint32 res;
uint32 retVal;
uint64 oldVal, // IN
uint64 newVal) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ return atomic_compare_exchange_strong(
+ (_Atomic uint64 *)&var->value, &oldVal, newVal);
+#elif defined __GNUC__
#if defined VM_ARM_ANY
return Atomic_ReadIfEqualWrite64(var, oldVal, newVal) == oldVal;
#else /* VM_X86_ANY */
uint32 oldVal, // IN
uint32 newVal) // IN
{
-#if defined __GNUC__
+#if defined VM_ATOMIC_USE_C11
+ return atomic_compare_exchange_strong(
+ (_Atomic uint32 *)&var->value, &oldVal, newVal);
+#elif defined __GNUC__
#if defined VM_ARM_ANY
return Atomic_ReadIfEqualWrite32(var, oldVal, newVal) == oldVal;
#else /* VM_X86_ANY */
static INLINE uint64
Atomic_Read64(Atomic_uint64 const *var) // IN
{
+#if defined VM_ATOMIC_USE_C11
+ return atomic_load((const _Atomic uint64 *)&var->value);
+#else
#if defined __GNUC__
uint64 value;
#endif
#if defined __GNUC__
return value;
#endif
+#endif // !defined VM_ATOMIC_USE_C11
}
Atomic_ReadAdd64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ return atomic_fetch_add((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
return _VMATOM_X(ROP, 64, TRUE, &var->value, add, val);
#elif defined __x86_64__
Atomic_ReadSub64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ return atomic_fetch_sub((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
return _VMATOM_X(ROP, 64, TRUE, &var->value, sub, val);
#else
# ifdef _MSC_VER
Atomic_Add64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if !defined VM_64BIT
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_add((_Atomic uint64 *)&var->value, val);
+#elif !defined VM_64BIT
Atomic_ReadAdd64(var, val); /* Return value is unused. */
#elif defined __GNUC__
#if defined VM_ARM_64
Atomic_Sub64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if !defined VM_64BIT
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_sub((_Atomic uint64 *)&var->value, val);
+#elif !defined VM_64BIT
Atomic_ReadSub64(var, val); /* Return value is unused. */
#elif defined __GNUC__
#if defined VM_ARM_64
static INLINE void
Atomic_Inc64(Atomic_uint64 *var) // IN/OUT
{
-#if defined VM_ARM_64
+#if defined VM_ARM_64 || defined VM_ATOMIC_USE_C11
Atomic_Add64(var, 1);
#elif !defined __x86_64__
Atomic_ReadInc64(var); /* Return value is unused. */
static INLINE void
Atomic_Dec64(Atomic_uint64 *var) // IN/OUT
{
-#if defined VM_ARM_64
+#if defined VM_ARM_64 || defined VM_ATOMIC_USE_C11
Atomic_Sub64(var, 1);
#elif !defined __x86_64__
Atomic_ReadDec64(var); /* Return value is unused. */
Atomic_ReadWrite64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined __GNUC__ && defined __x86_64__
+#if defined VM_ATOMIC_USE_C11
+ return atomic_exchange((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined __x86_64__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"xchgq %0, %1"
ASSERT((uintptr_t)var % 8 == 0);
#endif
-#if defined __GNUC__ && defined __x86_64__
+#if defined VM_ATOMIC_USE_C11
+ atomic_store((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined __x86_64__
/*
* There is no move instruction for 64-bit immediate to memory, so unless
* the immediate value fits in 32-bit (i.e. can be sign-extended), GCC
Atomic_Or64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined __GNUC__ && defined __x86_64__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_or((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined __x86_64__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; orq %1, %0"
Atomic_And64(Atomic_uint64 *var, // IN/OUT
uint64 val) // IN
{
-#if defined __GNUC__ && defined __x86_64__
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_and((_Atomic uint64 *)&var->value, val);
+#elif defined __GNUC__ && defined __x86_64__
/* Checked against the AMD manual and GCC --hpreg */
__asm__ __volatile__(
"lock; andq %1, %0"
ASSERT((uintptr_t)var % 2 == 0);
#endif
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ value = atomic_load((_Atomic uint16 *)&var->value);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"movw %1, %0"
: "=r" (value)
Atomic_ReadWrite16(Atomic_uint16 *var, // IN/OUT:
uint16 val) // IN:
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ return atomic_exchange((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"xchgw %0, %1"
: "=r" (val),
ASSERT((uintptr_t)var % 2 == 0);
#endif
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_store((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"movw %1, %0"
: "=m" (var->value)
uint16 oldVal, // IN
uint16 newVal) // IN
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_compare_exchange_strong(
+ (_Atomic uint16 *)&var->value, &oldVal, newVal);
+ return oldVal;
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
uint16 val;
__asm__ __volatile__(
{
uint16 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_and((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 16, TRUE, &var->value, and, val);
#else
do {
Atomic_And16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_and((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; andw %1, %0"
: "+m" (var->value)
Atomic_Or16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_or((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; orw %1, %0"
: "+m" (var->value)
Atomic_Xor16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_xor((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; xorw %1, %0"
: "+m" (var->value)
Atomic_Add16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_add((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; addw %1, %0"
: "+m" (var->value)
Atomic_Sub16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ atomic_fetch_sub((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; subw %1, %0"
: "+m" (var->value)
{
uint16 res;
-#if defined __GNUC__ && defined VM_ARM_64
+#if defined VM_ATOMIC_USE_C11
+ res = atomic_fetch_or((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && defined VM_ARM_64
res = _VMATOM_X(ROP, 16, TRUE, &var->value, orr, val);
#else
do {
Atomic_ReadAdd16(Atomic_uint16 *var, // IN/OUT
uint16 val) // IN:
{
-#if defined __GNUC__ && (defined __x86_64__ || defined __i386__)
+#if defined VM_ATOMIC_USE_C11
+ return atomic_fetch_add((_Atomic uint16 *)&var->value, val);
+#elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
__asm__ __volatile__(
"lock; xaddw %0, %1"
: "=r" (val),