__ATOMIC_RELEASE);
/* if the idx hasn't changed, we're good, else try again */
- if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_ACQUIRE))
+ if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
+ __ATOMIC_ACQUIRE))
break;
/*
*/
data->thread_qps[i].depth--;
if (data->thread_qps[i].depth == 0) {
- ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
- __ATOMIC_RELEASE);
+ ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
+ VAL_READER, __ATOMIC_RELEASE);
OPENSSL_assert(ret != UINT64_MAX);
data->thread_qps[i].qp = NULL;
data->thread_qps[i].lock = NULL;
CRYPTO_RCU_LOCK *lock;
};
-#define MAX_QPS 10
+# define MAX_QPS 10
/*
* This is the per thread tracking data
* that is assigned to each thread participating
struct rcu_cb_item *cb_items, *tmpcb;
/* before we do anything else, lets grab the cb list */
- cb_items = InterlockedExchangePointer((void * volatile *)&lock->cb_items, NULL);
+ cb_items = InterlockedExchangePointer((void * volatile *)&lock->cb_items,
+ NULL);
qp = update_qp(lock);
new->data = data;
new->fn = cb;
- new->next = InterlockedExchangePointer((void * volatile *)&lock->cb_items, new);
+ new->next = InterlockedExchangePointer((void * volatile *)&lock->cb_items,
+ new);
return 1;
}
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
{
- *ret = (int)InterlockedExchangeAdd((long volatile *)val, (long)amount) + amount;
+ *ret = (int)InterlockedExchangeAdd((long volatile *)val, (long)amount)
+ + amount;
return 1;
}
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock)
{
-#if (defined(NO_INTERLOCKEDOR64))
+# if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
return 0;
*val += op;
return 0;
return 1;
-#else
+# else
*ret = (uint64_t)InterlockedAdd64((LONG64 volatile *)val, (LONG64)op);
return 1;
-#endif
+# endif
}
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock)
{
-#if (defined(NO_INTERLOCKEDOR64))
+# if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
return 0;
*val &= op;
return 0;
return 1;
-#else
+# else
*ret = (uint64_t)InterlockedAnd64((LONG64 volatile *)val, (LONG64)op) & op;
return 1;
-#endif
+# endif
}
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock)
{
-#if (defined(NO_INTERLOCKEDOR64))
+# if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
return 0;
*val |= op;
return 0;
return 1;
-#else
+# else
*ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op;
return 1;
-#endif
+# endif
}
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
{
-#if (defined(NO_INTERLOCKEDOR64))
+# if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
return 0;
*ret = *val;
return 0;
return 1;
-#else
+# else
*ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0);
return 1;
-#endif
+# endif
}
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
{
-#if (defined(NO_INTERLOCKEDOR64))
+# if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
return 0;
*dst = val;
return 0;
return 1;
-#else
+# else
InterlockedExchange64(dst, val);
return 1;
-#endif
+# endif
}
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
{
-#if (defined(NO_INTERLOCKEDOR64))
+# if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
return 0;
*ret = *val;
return 0;
return 1;
-#else
+# else
/* On Windows, LONG (but not long) is always the same size as int. */
*ret = (int)InterlockedOr((LONG volatile *)val, 0);
return 1;
-#endif
+# endif
}
int openssl_init_fork_handlers(void)