#define HA_ATOMIC_OR(val, flags) ({*(val) |= (flags);})
#define HA_ATOMIC_ADD(val, i) ({*(val) += (i);})
#define HA_ATOMIC_SUB(val, i) ({*(val) -= (i);})
+
+#define HA_ATOMIC_AND_FETCH(val, flags) ({ *(val) &= (flags); })
+#define HA_ATOMIC_OR_FETCH(val, flags) ({ *(val) |= (flags); })
+#define HA_ATOMIC_ADD_FETCH(val, i) ({ *(val) += (i); })
+#define HA_ATOMIC_SUB_FETCH(val, i) ({ *(val) -= (i); })
+
#define HA_ATOMIC_XADD(val, i) \
({ \
typeof((val)) __p_xadd = (val); \
#define HA_ATOMIC_OR(val, flags) __sync_or_and_fetch(val, flags)
#define HA_ATOMIC_ADD(val, i) __sync_add_and_fetch(val, i)
#define HA_ATOMIC_SUB(val, i) __sync_sub_and_fetch(val, i)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) __sync_and_and_fetch(val, flags)
+#define HA_ATOMIC_OR_FETCH(val, flags) __sync_or_and_fetch(val, flags)
+#define HA_ATOMIC_ADD_FETCH(val, i) __sync_add_and_fetch(val, i)
+#define HA_ATOMIC_SUB_FETCH(val, i) __sync_sub_and_fetch(val, i)
+
#define HA_ATOMIC_XADD(val, i) __sync_fetch_and_add(val, i)
#define HA_ATOMIC_BTS(val, bit) \
#define HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST)
#define HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
#define HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_OR_FETCH(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_ADD_FETCH(val, i) __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_SUB_FETCH(val, i) __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
+
#define HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
#define HA_ATOMIC_BTS(val, bit) \
#define _HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_RELAXED)
#define _HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
#define _HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
+
+#define _HA_ATOMIC_AND_FETCH(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_OR_FETCH(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_ADD_FETCH(val, i) __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_SUB_FETCH(val, i) __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
+
#define _HA_ATOMIC_XADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
#define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
/* warning, n is a pointer to the double value for dwcas */
#define _HA_ATOMIC_ADD HA_ATOMIC_ADD
#endif /* !_HA_ATOMIC_ADD */
+#ifndef _HA_ATOMIC_ADD_FETCH
+#define _HA_ATOMIC_ADD_FETCH HA_ATOMIC_ADD_FETCH
+#endif /* !_HA_ATOMIC_ADD_FETCH */
+
#ifndef _HA_ATOMIC_XADD
#define _HA_ATOMIC_XADD HA_ATOMIC_XADD
#endif /* !_HA_ATOMIC_SUB */
#define _HA_ATOMIC_SUB HA_ATOMIC_SUB
#endif /* !_HA_ATOMIC_SUB */
+#ifndef _HA_ATOMIC_SUB_FETCH
+#define _HA_ATOMIC_SUB_FETCH HA_ATOMIC_SUB_FETCH
+#endif /* !_HA_ATOMIC_SUB_FETCH */
+
#ifndef _HA_ATOMIC_AND
#define _HA_ATOMIC_AND HA_ATOMIC_AND
#endif /* !_HA_ATOMIC_AND */
+#ifndef _HA_ATOMIC_AND_FETCH
+#define _HA_ATOMIC_AND_FETCH HA_ATOMIC_AND_FETCH
+#endif /* !_HA_ATOMIC_AND_FETCH */
+
#ifndef _HA_ATOMIC_OR
#define _HA_ATOMIC_OR HA_ATOMIC_OR
#endif /* !_HA_ATOMIC_OR */
+#ifndef _HA_ATOMIC_OR_FETCH
+#define _HA_ATOMIC_OR_FETCH HA_ATOMIC_OR_FETCH
+#endif /* !_HA_ATOMIC_OR_FETCH */
+
#ifndef _HA_ATOMIC_XCHG
#define _HA_ATOMIC_XCHG HA_ATOMIC_XCHG
#endif /* !_HA_ATOMIC_XCHG */
{
unsigned long update_mask;
- update_mask = _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+ update_mask = _HA_ATOMIC_AND_FETCH(&fdtab[fd].update_mask, ~tid_bit);
while ((update_mask & all_threads_mask)== 0) {
/* If we were the last one that had to update that entry, remove it from the list */
fd_rm_from_fd_list(&update_list, fd, offsetof(struct fdtab, update));
*/
static inline long fd_clr_running(int fd)
{
- return _HA_ATOMIC_AND(&fdtab[fd].running_mask, ~tid_bit);
+ return _HA_ATOMIC_AND_FETCH(&fdtab[fd].running_mask, ~tid_bit);
}
/* Update events seen for FD <fd> and its state if needed. This should be
do {
now_tmp = global_now >> 32;
if (curr_sec == (now_tmp & 0x7fffffff))
- return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+ return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
/* remove the bit, used for the lock */
curr_sec &= 0x7fffffff;
/* release the lock and update the time in case of rotate. */
_HA_ATOMIC_STORE(&ctr->curr_sec, curr_sec & 0x7fffffff);
- return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+ return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
}
/* Update a frequency counter by <inc> incremental units. It is automatically
do {
now_ms_tmp = global_now_ms;
if (now_ms_tmp - curr_tick < period)
- return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+ return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
/* remove the bit, used for the lock */
curr_tick &= ~1;
/* release the lock and update the time in case of rotate. */
_HA_ATOMIC_STORE(&ctr->curr_tick, curr_tick);
- return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+ return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
}
/* Read a frequency counter taking history into account for missing time in
*/
static inline unsigned int pat_ref_newgen(struct pat_ref *ref)
{
- return HA_ATOMIC_ADD(&ref->next_gen, 1);
+ return HA_ATOMIC_ADD_FETCH(&ref->next_gen, 1);
}
/* Give up a previously assigned generation number. By doing this the caller
{
unsigned int curr;
- curr = _HA_ATOMIC_ADD(&srv->curr_used_conns, 1);
+ curr = _HA_ATOMIC_ADD_FETCH(&srv->curr_used_conns, 1);
/* It's ok not to do that atomically, we don't need an
* exact max.
!conn->mux->used_streams(conn) && conn->mux->avail_streams(conn)) {
int retadd;
- retadd = _HA_ATOMIC_ADD(&srv->curr_idle_conns, 1);
+ retadd = _HA_ATOMIC_ADD_FETCH(&srv->curr_idle_conns, 1);
if (retadd > srv->max_idle_conns) {
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
return 0;
{
unsigned int state;
- state = _HA_ATOMIC_OR(&t->state, f);
+ state = _HA_ATOMIC_OR_FETCH(&t->state, f);
while (!(state & (TASK_RUNNING | TASK_QUEUED))) {
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED)) {
#ifdef DEBUG_TASK
int count;
s->flags |= SF_CURR_SESS;
- count = _HA_ATOMIC_ADD(&srv->cur_sess, 1);
+ count = _HA_ATOMIC_ADD_FETCH(&srv->cur_sess, 1);
HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
if (s->be->lbprm.server_take_conn)
s->be->lbprm.server_take_conn(srv, 0);
if (!de)
return;
- if (HA_ATOMIC_SUB(&de->refcount, 1) != 0)
+ if (HA_ATOMIC_SUB_FETCH(&de->refcount, 1) != 0)
return;
HA_RWLOCK_WRLOCK(DICT_LOCK, &d->rwlock);
int ret = -1;
#ifndef HA_HAVE_CAS_DW
- if (_HA_ATOMIC_OR(&fdtab[fd].running_mask, tid_bit) == tid_bit) {
+ if (_HA_ATOMIC_OR_FETCH(&fdtab[fd].running_mask, tid_bit) == tid_bit) {
HA_RWLOCK_WRLOCK(OTHER_LOCK, &fd_mig_lock);
if (fdtab[fd].owner == expected_owner) {
fdtab[fd].thread_mask = tid_bit;
new_masks[0] = new_masks[1] = tid_bit;
- old_masks[0] = _HA_ATOMIC_OR(&fdtab[fd].running_mask, tid_bit);
+ old_masks[0] = _HA_ATOMIC_OR_FETCH(&fdtab[fd].running_mask, tid_bit);
old_masks[1] = fdtab[fd].thread_mask;
/* protect ourself against a delete then an insert for the same fd,
int i;
if (stopping) {
- if (_HA_ATOMIC_OR(&stopping_thread_mask, tid_bit) == tid_bit) {
+ if (_HA_ATOMIC_OR_FETCH(&stopping_thread_mask, tid_bit) == tid_bit) {
/* notify all threads that stopping was just set */
for (i = 0; i < global.nbthread; i++)
if (((all_threads_mask & ~stopping_thread_mask) >> i) & 1)
s->be = be;
HA_ATOMIC_UPDATE_MAX(&be->be_counters.conn_max,
- HA_ATOMIC_ADD(&be->beconn, 1));
+ HA_ATOMIC_ADD_FETCH(&be->beconn, 1));
proxy_inc_be_ctr(be);
/* assign new parameters to the stream from the new backend */
if (srv) {
unsigned int old_max, new_max;
- new_max = _HA_ATOMIC_ADD(&srv->nbpend, 1);
+ new_max = _HA_ATOMIC_ADD_FETCH(&srv->nbpend, 1);
old_max = srv->counters.nbpend_max;
while (new_max > old_max) {
if (likely(_HA_ATOMIC_CAS(&srv->counters.nbpend_max, &old_max, new_max)))
else {
unsigned int old_max, new_max;
- new_max = _HA_ATOMIC_ADD(&px->nbpend, 1);
+ new_max = _HA_ATOMIC_ADD_FETCH(&px->nbpend, 1);
old_max = px->be_counters.nbpend_max;
while (new_max > old_max) {
if (likely(_HA_ATOMIC_CAS(&px->be_counters.nbpend_max, &old_max, new_max)))
* number */
if (X509_set_version(newcrt, 2L) != 1)
goto mkcert_error;
- ASN1_INTEGER_set(X509_get_serialNumber(newcrt), _HA_ATOMIC_ADD(&ssl_ctx_serial, 1));
+ ASN1_INTEGER_set(X509_get_serialNumber(newcrt), _HA_ATOMIC_ADD_FETCH(&ssl_ctx_serial, 1));
/* Set duration for the certificate */
if (!X509_gmtime_adj(X509_getm_notBefore(newcrt), (long)-60*60*24) ||
HA_ATOMIC_ADD(&profile_entry->cpu_time, cpu);
}
- state = _HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
+ state = _HA_ATOMIC_AND_FETCH(&t->state, ~TASK_RUNNING);
if (unlikely(state & TASK_KILLED)) {
task_unlink_wq(t);
__task_free(t);