]> git.ipfire.org Git - thirdparty/open-vm-tools.git/commitdiff
Change to common header file not applicable to open-vm-tools.
authorKaty Feng <fkaty@vmware.com>
Thu, 5 Oct 2023 17:35:28 +0000 (10:35 -0700)
committerKaty Feng <fkaty@vmware.com>
Thu, 5 Oct 2023 17:35:28 +0000 (10:35 -0700)
open-vm-tools/lib/include/vm_atomic.h

index 6a58fd0a8276887b882ac05765ecc02b4aaf4286..6337e49d9a94bca172e612a54b027f6c5965f90d 100644 (file)
@@ -318,7 +318,7 @@ Atomic_Read8(Atomic_uint8 const *var)  // IN:
    uint8 val;
 
 #if defined VM_ATOMIC_USE_C11
-   val = atomic_load((const _Atomic uint8 *)&var->value);
+   val = atomic_load((const _Atomic(uint8) *)&var->value);
 #elif defined __GNUC__ && defined VM_ARM_32
    val = AtomicUndefined(var);
 #elif defined __GNUC__ && defined VM_ARM_64
@@ -360,7 +360,7 @@ Atomic_ReadWrite8(Atomic_uint8 *var,  // IN/OUT:
                   uint8 val)          // IN:
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_exchange((_Atomic uint8 *)&var->value, val);
+   return atomic_exchange((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_32
    return AtomicUndefined(var + val);
 #elif defined __GNUC__ && defined VM_ARM_64
@@ -404,7 +404,7 @@ Atomic_Write8(Atomic_uint8 *var,  // IN/OUT:
               uint8 val)          // IN:
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_store((_Atomic uint8 *)&var->value, val);
+   atomic_store((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_32
    AtomicUndefined(var + val);
 #elif defined __GNUC__ && defined VM_ARM_64
@@ -446,7 +446,7 @@ Atomic_ReadIfEqualWrite8(Atomic_uint8 *var,  // IN/OUT:
 {
 #if defined VM_ATOMIC_USE_C11
    atomic_compare_exchange_strong(
-      (_Atomic uint8 *)&var->value, &oldVal, newVal);
+      (_Atomic(uint8) *)&var->value, &oldVal, newVal);
    return oldVal;
 #elif defined __GNUC__ && defined VM_ARM_32
    return AtomicUndefined(var + oldVal + newVal);
@@ -498,7 +498,7 @@ Atomic_ReadAnd8(Atomic_uint8 *var, // IN/OUT
    uint8 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_and((_Atomic uint8 *)&var->value, val);
+   res = atomic_fetch_and((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 8, TRUE, &var->value, and, val);
 #else
@@ -562,7 +562,7 @@ Atomic_ReadOr8(Atomic_uint8 *var, // IN/OUT
    uint8 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_or((_Atomic uint8 *)&var->value, val);
+   res = atomic_fetch_or((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 8, TRUE, &var->value, orr, val);
 #else
@@ -626,7 +626,7 @@ Atomic_ReadXor8(Atomic_uint8 *var, // IN/OUT
    uint8 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_xor((_Atomic uint8 *)&var->value, val);
+   res = atomic_fetch_xor((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 8, TRUE, &var->value, eor, val);
 #else
@@ -690,7 +690,7 @@ Atomic_ReadAdd8(Atomic_uint8 *var, // IN/OUT
    uint8 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_add((_Atomic uint8 *)&var->value, val);
+   res = atomic_fetch_add((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 8, TRUE, &var->value, add, val);
 #else
@@ -752,7 +752,7 @@ Atomic_Sub8(Atomic_uint8 *var, // IN/OUT
             uint8 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_sub((_Atomic uint8 *)&var->value, val);
+   atomic_fetch_sub((_Atomic(uint8) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    _VMATOM_X(OP, 8, TRUE, &var->value, sub, val);
 #else
@@ -879,7 +879,7 @@ Atomic_Read32(Atomic_uint32 const *var) // IN
 #endif
 
 #if defined VM_ATOMIC_USE_C11
-   value = atomic_load((_Atomic uint32 *)&var->value);
+   value = atomic_load((_Atomic(uint32) *)&var->value);
 #elif defined __GNUC__
    /*
     * Use inline assembler to force using a single load instruction to
@@ -940,7 +940,7 @@ Atomic_ReadWrite32(Atomic_uint32 *var, // IN/OUT
                    uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_exchange((_Atomic uint32 *)&var->value, val);
+   return atomic_exchange((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 retVal;
@@ -1009,7 +1009,7 @@ Atomic_Write32(Atomic_uint32 *var, // OUT
 #endif
 
 #if defined VM_ATOMIC_USE_C11
-   atomic_store((_Atomic uint32 *)&var->value, val);
+   atomic_store((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #if defined VM_ARM_64
    _VMATOM_X(W, 32, &var->value, val);
@@ -1091,7 +1091,7 @@ Atomic_ReadIfEqualWrite32(Atomic_uint32 *var, // IN/OUT
 {
 #if defined VM_ATOMIC_USE_C11
    atomic_compare_exchange_strong(
-      (_Atomic uint32 *)&var->value, &oldVal, newVal);
+      (_Atomic(uint32) *)&var->value, &oldVal, newVal);
    return oldVal;
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
@@ -1167,7 +1167,7 @@ Atomic_ReadIfEqualWrite64(Atomic_uint64 *var, // IN/OUT
 {
 #if defined VM_ATOMIC_USE_C11
    atomic_compare_exchange_strong(
-      (_Atomic uint64 *)&var->value, &oldVal, newVal);
+      (_Atomic(uint64) *)&var->value, &oldVal, newVal);
    return oldVal;
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
@@ -1261,7 +1261,7 @@ Atomic_And32(Atomic_uint32 *var, // IN/OUT
              uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_and((_Atomic uint32 *)&var->value, val);
+   atomic_fetch_and((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 res;
@@ -1323,7 +1323,7 @@ Atomic_Or32(Atomic_uint32 *var, // IN/OUT
             uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_or((_Atomic uint32 *)&var->value, val);
+   atomic_fetch_or((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 res;
@@ -1385,7 +1385,7 @@ Atomic_Xor32(Atomic_uint32 *var, // IN/OUT
              uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_xor((_Atomic uint32 *)&var->value, val);
+   atomic_fetch_xor((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 res;
@@ -1448,7 +1448,7 @@ Atomic_Xor64(Atomic_uint64 *var, // IN/OUT
              uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_xor((_Atomic uint64 *)&var->value, val);
+   atomic_fetch_xor((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__
 #if defined VM_ARM_64
    _VMATOM_X(OP, 64, TRUE, &var->value, eor, val);
@@ -1492,7 +1492,7 @@ Atomic_Add32(Atomic_uint32 *var, // IN/OUT
              uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_add((_Atomic uint32 *)&var->value, val);
+   atomic_fetch_add((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 res;
@@ -1554,7 +1554,7 @@ Atomic_Sub32(Atomic_uint32 *var, // IN/OUT
              uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_sub((_Atomic uint32 *)&var->value, val);
+   atomic_fetch_sub((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 res;
@@ -1724,7 +1724,7 @@ Atomic_ReadOr32(Atomic_uint32 *var, // IN/OUT
    uint32 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_or((_Atomic uint32 *)&var->value, val);
+   res = atomic_fetch_or((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 32, TRUE, &var->value, orr, val);
 #else
@@ -1760,7 +1760,7 @@ Atomic_ReadAnd32(Atomic_uint32 *var, // IN/OUT
    uint32 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_and((_Atomic uint32 *)&var->value, val);
+   res = atomic_fetch_and((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 32, TRUE, &var->value, and, val);
 #else
@@ -1797,7 +1797,7 @@ Atomic_ReadOr64(Atomic_uint64 *var, // IN/OUT
    uint64 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_or((_Atomic uint64 *)&var->value, val);
+   res = atomic_fetch_or((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 64, TRUE, &var->value, orr, val);
 #else
@@ -1833,7 +1833,7 @@ Atomic_ReadAnd64(Atomic_uint64 *var, // IN/OUT
    uint64 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_and((_Atomic uint64 *)&var->value, val);
+   res = atomic_fetch_and((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 64, TRUE, &var->value, and, val);
 #else
@@ -1872,7 +1872,7 @@ Atomic_ReadAdd32(Atomic_uint32 *var, // IN/OUT
                  uint32 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_fetch_add((_Atomic uint32 *)&var->value, val);
+   return atomic_fetch_add((_Atomic(uint32) *)&var->value, val);
 #elif defined __GNUC__
 #ifdef VM_ARM_V7
    uint32 res;
@@ -1986,7 +1986,7 @@ Atomic_CMPXCHG64(Atomic_uint64 *var,   // IN/OUT
 {
 #if defined VM_ATOMIC_USE_C11
    return atomic_compare_exchange_strong(
-      (_Atomic uint64 *)&var->value, &oldVal, newVal);
+      (_Atomic(uint64) *)&var->value, &oldVal, newVal);
 #elif defined __GNUC__
 #if defined VM_ARM_ANY
    return Atomic_ReadIfEqualWrite64(var, oldVal, newVal) == oldVal;
@@ -2114,7 +2114,7 @@ Atomic_CMPXCHG32(Atomic_uint32 *var,   // IN/OUT
 {
 #if defined VM_ATOMIC_USE_C11
    return atomic_compare_exchange_strong(
-      (_Atomic uint32 *)&var->value, &oldVal, newVal);
+      (_Atomic(uint32) *)&var->value, &oldVal, newVal);
 #elif defined __GNUC__
 #if defined VM_ARM_ANY
    return Atomic_ReadIfEqualWrite32(var, oldVal, newVal) == oldVal;
@@ -2160,7 +2160,7 @@ static INLINE uint64
 Atomic_Read64(Atomic_uint64 const *var) // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_load((const _Atomic uint64 *)&var->value);
+   return atomic_load((const _Atomic(uint64) *)&var->value);
 #else
 #if defined __GNUC__
    uint64 value;
@@ -2284,7 +2284,7 @@ Atomic_ReadAdd64(Atomic_uint64 *var, // IN/OUT
                  uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_fetch_add((_Atomic uint64 *)&var->value, val);
+   return atomic_fetch_add((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    return _VMATOM_X(ROP, 64, TRUE, &var->value, add, val);
 #elif defined __x86_64__
@@ -2340,7 +2340,7 @@ Atomic_ReadSub64(Atomic_uint64 *var, // IN/OUT
                  uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_fetch_sub((_Atomic uint64 *)&var->value, val);
+   return atomic_fetch_sub((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    return _VMATOM_X(ROP, 64, TRUE, &var->value, sub, val);
 #else
@@ -2434,7 +2434,7 @@ Atomic_Add64(Atomic_uint64 *var, // IN/OUT
              uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_add((_Atomic uint64 *)&var->value, val);
+   atomic_fetch_add((_Atomic(uint64) *)&var->value, val);
 #elif !defined VM_64BIT
    Atomic_ReadAdd64(var, val); /* Return value is unused. */
 #elif defined __GNUC__
@@ -2479,7 +2479,7 @@ Atomic_Sub64(Atomic_uint64 *var, // IN/OUT
              uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_sub((_Atomic uint64 *)&var->value, val);
+   atomic_fetch_sub((_Atomic(uint64) *)&var->value, val);
 #elif !defined VM_64BIT
    Atomic_ReadSub64(var, val); /* Return value is unused. */
 #elif defined __GNUC__
@@ -2604,7 +2604,7 @@ Atomic_ReadWrite64(Atomic_uint64 *var, // IN/OUT
                    uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_exchange((_Atomic uint64 *)&var->value, val);
+   return atomic_exchange((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined __x86_64__
    /* Checked against the AMD manual and GCC --hpreg */
    __asm__ __volatile__(
@@ -2657,7 +2657,7 @@ Atomic_Write64(Atomic_uint64 *var, // OUT
 #endif
 
 #if defined VM_ATOMIC_USE_C11
-   atomic_store((_Atomic uint64 *)&var->value, val);
+   atomic_store((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined __x86_64__
    /*
     * There is no move instruction for 64-bit immediate to memory, so unless
@@ -2710,7 +2710,7 @@ Atomic_Or64(Atomic_uint64 *var, // IN/OUT
             uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_or((_Atomic uint64 *)&var->value, val);
+   atomic_fetch_or((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined __x86_64__
    /* Checked against the AMD manual and GCC --hpreg */
    __asm__ __volatile__(
@@ -2756,7 +2756,7 @@ Atomic_And64(Atomic_uint64 *var, // IN/OUT
              uint64 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_and((_Atomic uint64 *)&var->value, val);
+   atomic_fetch_and((_Atomic(uint64) *)&var->value, val);
 #elif defined __GNUC__ && defined __x86_64__
    /* Checked against the AMD manual and GCC --hpreg */
    __asm__ __volatile__(
@@ -2969,7 +2969,7 @@ Atomic_Read16(Atomic_uint16 const *var) // IN
 #endif
 
 #if defined VM_ATOMIC_USE_C11
-   value = atomic_load((_Atomic uint16 *)&var->value);
+   value = atomic_load((_Atomic(uint16) *)&var->value);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "movw %1, %0"
@@ -3015,7 +3015,7 @@ Atomic_ReadWrite16(Atomic_uint16 *var,  // IN/OUT:
                    uint16 val)          // IN:
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_exchange((_Atomic uint16 *)&var->value, val);
+   return atomic_exchange((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "xchgw %0, %1"
@@ -3080,7 +3080,7 @@ Atomic_Write16(Atomic_uint16 *var,  // OUT:
 #endif
 
 #if defined VM_ATOMIC_USE_C11
-   atomic_store((_Atomic uint16 *)&var->value, val);
+   atomic_store((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "movw %1, %0"
@@ -3124,7 +3124,7 @@ Atomic_ReadIfEqualWrite16(Atomic_uint16 *var,   // IN/OUT
 {
 #if defined VM_ATOMIC_USE_C11
    atomic_compare_exchange_strong(
-      (_Atomic uint16 *)&var->value, &oldVal, newVal);
+      (_Atomic(uint16) *)&var->value, &oldVal, newVal);
    return oldVal;
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    uint16 val;
@@ -3192,7 +3192,7 @@ Atomic_ReadAnd16(Atomic_uint16 *var, // IN/OUT
    uint16 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_and((_Atomic uint16 *)&var->value, val);
+   res = atomic_fetch_and((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 16, TRUE, &var->value, and, val);
 #else
@@ -3226,7 +3226,7 @@ Atomic_And16(Atomic_uint16 *var, // IN/OUT
              uint16 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_and((_Atomic uint16 *)&var->value, val);
+   atomic_fetch_and((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "lock; andw %1, %0"
@@ -3283,7 +3283,7 @@ Atomic_Or16(Atomic_uint16 *var, // IN/OUT
             uint16 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_or((_Atomic uint16 *)&var->value, val);
+   atomic_fetch_or((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "lock; orw %1, %0"
@@ -3340,7 +3340,7 @@ Atomic_Xor16(Atomic_uint16 *var, // IN/OUT
              uint16 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_xor((_Atomic uint16 *)&var->value, val);
+   atomic_fetch_xor((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "lock; xorw %1, %0"
@@ -3397,7 +3397,7 @@ Atomic_Add16(Atomic_uint16 *var, // IN/OUT
              uint16 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_add((_Atomic uint16 *)&var->value, val);
+   atomic_fetch_add((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "lock; addw %1, %0"
@@ -3454,7 +3454,7 @@ Atomic_Sub16(Atomic_uint16 *var, // IN/OUT
              uint16 val)         // IN
 {
 #if defined VM_ATOMIC_USE_C11
-   atomic_fetch_sub((_Atomic uint16 *)&var->value, val);
+   atomic_fetch_sub((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "lock; subw %1, %0"
@@ -3577,7 +3577,7 @@ Atomic_ReadOr16(Atomic_uint16 *var, // IN/OUT
    uint16 res;
 
 #if defined VM_ATOMIC_USE_C11
-   res = atomic_fetch_or((_Atomic uint16 *)&var->value, val);
+   res = atomic_fetch_or((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && defined VM_ARM_64
    res = _VMATOM_X(ROP, 16, TRUE, &var->value, orr, val);
 #else
@@ -3611,7 +3611,7 @@ Atomic_ReadAdd16(Atomic_uint16 *var,  // IN/OUT
                  uint16 val)          // IN:
 {
 #if defined VM_ATOMIC_USE_C11
-   return atomic_fetch_add((_Atomic uint16 *)&var->value, val);
+   return atomic_fetch_add((_Atomic(uint16) *)&var->value, val);
 #elif defined __GNUC__ && (defined __x86_64__ || defined __i386__)
    __asm__ __volatile__(
       "lock; xaddw %0, %1"