atomic_fetch_add_explicit((o), (v), memory_order_relaxed)
#define atomic_fetch_sub_relaxed(o, v) \
atomic_fetch_sub_explicit((o), (v), memory_order_relaxed)
+#define atomic_fetch_or_relaxed(o, v) \
+ atomic_fetch_or_explicit((o), (v), memory_order_relaxed)
+#define atomic_fetch_and_relaxed(o, v) \
+ atomic_fetch_and_explicit((o), (v), memory_order_relaxed)
#define atomic_exchange_relaxed(o, v) \
atomic_exchange_explicit((o), (v), memory_order_relaxed)
#define atomic_compare_exchange_weak_relaxed(o, e, d) \
uint64_t v;
} atomic_uint_fast64_t;
-
typedef struct atomic_bool_s {
isc_mutex_t m;
bool v;
#define atomic_init(obj, desired) \
{ \
isc_mutex_init(&(obj)->m); \
- REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
(obj)->v = desired; \
- REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
}
#define atomic_load_explicit(obj, order) \
({ \
typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
- ___v= (obj)->v; \
+ ___v = (obj)->v; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v; \
})
({ \
typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
- ___v= (obj)->v; \
+ ___v = (obj)->v; \
(obj)->v += arg; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v;\
#define atomic_fetch_sub_explicit(obj, arg, order) \
({ typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
- ___v= (obj)->v; \
+ ___v = (obj)->v; \
(obj)->v -= arg; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v;\
})
+#define atomic_fetch_and_explicit(obj, arg, order) \
+ ({ \
+ typeof((obj)->v) ___v; \
+ REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
+ ___v = (obj)->v; \
+ (obj)->v &= arg; \
+ REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
+ ___v;\
+ })
+#define atomic_fetch_or_explicit(obj, arg, order) \
+ ({ \
+ typeof((obj)->v) ___v; \
+ REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
+ ___v = (obj)->v; \
+ (obj)->v |= arg; \
+ REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
+ ___v;\
+ })
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, \
succ, fail) \
({ \
atomic_fetch_add_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_sub(obj, arg) \
atomic_fetch_sub_explicit(obj, arg, memory_order_seq_cst)
+#define atomic_fetch_and(obj, arg) \
+ atomic_fetch_and_explicit(obj, arg, memory_order_seq_cst)
+#define atomic_fetch_or(obj, arg) \
+ atomic_fetch_or_explicit(obj, arg, memory_order_seq_cst)
#define atomic_compare_exchange_strong(obj, expected, desired) \
atomic_compare_exchange_strong_explicit(obj, expected, desired, \
memory_order_seq_cst, \
__c11_atomic_fetch_add(obj, arg, order)
#define atomic_fetch_sub_explicit(obj, arg, order) \
__c11_atomic_fetch_sub(obj, arg, order)
+#define atomic_fetch_and_explicit(obj, arg, order) \
+ __c11_atomic_fetch_and(obj, arg, order)
+#define atomic_fetch_or_explicit(obj, arg, order) \
+ __c11_atomic_fetch_or(obj, arg, order)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) \
__c11_atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail)
#define atomic_compare_exchange_weak_explicit(obj, expected, desired, succ, fail) \
__atomic_fetch_add(obj, arg, order)
#define atomic_fetch_sub_explicit(obj, arg, order) \
__atomic_fetch_sub(obj, arg, order)
+#define atomic_fetch_and_explicit(obj, arg, order) \
+ __atomic_fetch_and(obj, arg, order)
+#define atomic_fetch_or_explicit(obj, arg, order) \
+ __atomic_fetch_or(obj, arg, order)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) \
__atomic_compare_exchange_n(obj, expected, desired, 0, succ, fail)
#define atomic_compare_exchange_weak_explicit(obj, expected, desired, succ, fail) \
*obj = desired; \
__sync_synchronize(); \
} while (0);
-#define atomic_fetch_add_explicit(obj, arg, order) \
+#define atomic_fetch_add_explicit(obj, arg, order) \
__sync_fetch_and_add(obj, arg)
-#define atomic_fetch_sub_explicit(obj, arg, order) \
+#define atomic_fetch_sub_explicit(obj, arg, order) \
__sync_fetch_and_sub(obj, arg, order)
+#define atomic_fetch_and_explicit(obj, arg, order) \
+ __sync_fetch_and_and(obj, arg, order)
+#define atomic_fetch_or_explicit(obj, arg, order) \
+ __sync_fetch_and_or(obj, arg, order)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) \
({ \
__typeof__(obj) __v; \
atomic_fetch_add_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_sub(obj, arg) \
atomic_fetch_sub_explicit(obj, arg, memory_order_seq_cst)
+#define atomic_fetch_and(obj, arg) \
+ atomic_fetch_and_explicit(obj, arg, memory_order_seq_cst)
+#define atomic_fetch_or(obj, arg) \
+ atomic_fetch_or_explicit(obj, arg, memory_order_seq_cst)
#define atomic_compare_exchange_strong(obj, expected, desired) \
atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst)
#define atomic_compare_exchange_weak(obj, expected, desired) \
#define atomic_fetch_sub(obj, arg) \
atomic_fetch_sub_explicit(obj, arg, memory_order_seq_cst)
+#define atomic_fetch_and_explicit8(obj, arg, order) \
+ InterlockedAnd8((atomic_int_fast8_t)obj, arg)
+
+#define atomic_fetch_and_explicit32(obj, arg, order) \
+ (order == memory_order_relaxed \
+ ? InterlockedAndNoFence((atomic_int_fast32_t *)obj, arg) \
+ : (order == memory_order_acquire \
+ ? InterlockedAndAcquire((atomic_int_fast32_t *)obj, arg) \
+ : (order == memory_order_release \
+ ? InterlockedAndRelease((atomic_int_fast32_t *)obj, arg) \
+ : InterlockedAnd((atomic_int_fast32_t *)obj, arg))))
+
+#ifdef _WIN64
+#define atomic_fetch_and_explicit64(obj, arg, order) \
+ (order == memory_order_relaxed \
+ ? InterlockedAnd64NoFence((atomic_int_fast64_t *)obj, arg) \
+ : (order == memory_order_acquire \
+ ? InterlockedAnd64Acquire((atomic_int_fast64_t *)obj, arg) \
+ : (order == memory_order_release \
+ ? InterlockedAnd64Release((atomic_int_fast64_t *)obj, arg) \
+ : InterlockedAnd64((atomic_int_fast64_t *)obj, arg))))
+#else
+#define atomic_fetch_and_explicit64(obj, arg, order) \
+ InterlockedAnd64((atomic_int_fast64_t *)obj, arg)
+#endif
+
+static inline
+int8_t
+atomic_and_abort() {
+ INSIST(0);
+ ISC_UNREACHABLE();
+}
+
+#define atomic_fetch_and_explicit(obj, arg, order) \
+ (sizeof(*(obj)) == 8 \
+ ? atomic_fetch_and_explicit64(obj, arg, order) \
+ : (sizeof(*(obj)) == 4 \
+ ? atomic_fetch_and_explicit32(obj, arg, order) \
+ : (sizeof(*(obj)) == 1 \
+ ? atomic_fetch_and_explicit8(obj, arg, order) \
+ : atomic_and_abort())))
+
+#define atomic_fetch_and(obj, arg) \
+ atomic_fetch_and_explicit(obj, arg, memory_order_seq_cst)
+
+#define atomic_fetch_or_explicit8(obj, arg, order) \
+ InterlockedOr8((atomic_int_fast8_t)obj, arg)
+
+#define atomic_fetch_or_explicit32(obj, arg, order) \
+ (order == memory_order_relaxed \
+ ? InterlockedOrNoFence((atomic_int_fast32_t *)obj, arg) \
+ : (order == memory_order_acquire \
+ ? InterlockedOrAcquire((atomic_int_fast32_t *)obj, arg) \
+ : (order == memory_order_release \
+ ? InterlockedOrRelease((atomic_int_fast32_t *)obj, arg) \
+ : InterlockedOr((atomic_int_fast32_t *)obj, arg))))
+
+#ifdef _WIN64
+#define atomic_fetch_or_explicit64(obj, arg, order) \
+ (order == memory_order_relaxed \
+ ? InterlockedOr64NoFence((atomic_int_fast64_t *)obj, arg) \
+ : (order == memory_order_acquire \
+ ? InterlockedOr64Acquire((atomic_int_fast64_t *)obj, arg) \
+ : (order == memory_order_release \
+ ? InterlockedOr64Release((atomic_int_fast64_t *)obj, arg) \
+ : InterlockedOr64((atomic_int_fast64_t *)obj, arg))))
+#else
+#define atomic_fetch_or_explicit64(obj, arg, order) \
+ InterlockedOr64((atomic_int_fast64_t *)obj, arg)
+#endif
+
+static inline
+int8_t
+atomic_or_abort() {
+ INSIST(0);
+ ISC_UNREACHABLE();
+}
+
+#define atomic_fetch_or_explicit(obj, arg, order) \
+ (sizeof(*(obj)) == 8 \
+ ? atomic_fetch_or_explicit64(obj, arg, order) \
+ : (sizeof(*(obj)) == 4 \
+ ? atomic_fetch_or_explicit32(obj, arg, order) \
+ : (sizeof(*(obj)) == 1 \
+ ? atomic_fetch_or_explicit8(obj, arg, order) \
+ : atomic_or_abort())))
+
+#define atomic_fetch_or(obj, arg) \
+ atomic_fetch_or_explicit(obj, arg, memory_order_seq_cst)
+
static inline bool
atomic_compare_exchange_strong_explicit8(atomic_int_fast8_t *obj,
int8_t *expected,