]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
CLEANUP: atomic: add an explicit _FETCH variant for add/sub/and/or
authorWilly Tarreau <w@1wt.eu>
Tue, 6 Apr 2021 09:44:07 +0000 (11:44 +0200)
committerWilly Tarreau <w@1wt.eu>
Wed, 7 Apr 2021 16:18:37 +0000 (18:18 +0200)
Currently our atomic ops return a value but it's never known whether
the fetch is done before or after the operation, which causes some
confusion each time the value is desired. Let's create an explicit
variant of these operations suffixed with _FETCH to explicitly mention
that the fetch occurs after the operation, and make use of it at the
few call places.

14 files changed:
include/haproxy/atomic.h
include/haproxy/fd.h
include/haproxy/freq_ctr.h
include/haproxy/pattern.h
include/haproxy/server.h
include/haproxy/task.h
src/backend.c
src/dict.c
src/fd.c
src/haproxy.c
src/proxy.c
src/queue.c
src/ssl_sock.c
src/task.c

index 8608b12f68f614201f08da03d30ac6d33383afec..7a357dae2f3dd2b032847ac1f5167863beae969a 100644 (file)
 #define HA_ATOMIC_OR(val, flags)     ({*(val) |= (flags);})
 #define HA_ATOMIC_ADD(val, i)        ({*(val) += (i);})
 #define HA_ATOMIC_SUB(val, i)        ({*(val) -= (i);})
+
+#define HA_ATOMIC_AND_FETCH(val, flags) ({ *(val) &= (flags); })
+#define HA_ATOMIC_OR_FETCH(val, flags)  ({ *(val) |= (flags); })
+#define HA_ATOMIC_ADD_FETCH(val, i)     ({ *(val) += (i); })
+#define HA_ATOMIC_SUB_FETCH(val, i)     ({ *(val) -= (i); })
+
 #define HA_ATOMIC_XADD(val, i)                                         \
        ({                                                              \
                typeof((val)) __p_xadd = (val);                         \
 #define HA_ATOMIC_OR(val, flags)     __sync_or_and_fetch(val,  flags)
 #define HA_ATOMIC_ADD(val, i)        __sync_add_and_fetch(val, i)
 #define HA_ATOMIC_SUB(val, i)        __sync_sub_and_fetch(val, i)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) __sync_and_and_fetch(val, flags)
+#define HA_ATOMIC_OR_FETCH(val, flags)  __sync_or_and_fetch(val,  flags)
+#define HA_ATOMIC_ADD_FETCH(val, i)     __sync_add_and_fetch(val, i)
+#define HA_ATOMIC_SUB_FETCH(val, i)     __sync_sub_and_fetch(val, i)
+
 #define HA_ATOMIC_XADD(val, i)       __sync_fetch_and_add(val, i)
 
 #define HA_ATOMIC_BTS(val, bit)                                                \
 #define HA_ATOMIC_OR(val, flags)     __atomic_or_fetch(val,  flags, __ATOMIC_SEQ_CST)
 #define HA_ATOMIC_ADD(val, i)        __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
 #define HA_ATOMIC_SUB(val, i)        __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_OR_FETCH(val, flags)  __atomic_or_fetch(val,  flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_ADD_FETCH(val, i)     __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_SUB_FETCH(val, i)     __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
+
 #define HA_ATOMIC_XADD(val, i)       __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
 
 #define HA_ATOMIC_BTS(val, bit)                                                \
 #define _HA_ATOMIC_OR(val, flags)     __atomic_or_fetch(val,  flags, __ATOMIC_RELAXED)
 #define _HA_ATOMIC_ADD(val, i)        __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
 #define _HA_ATOMIC_SUB(val, i)        __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
+
+#define _HA_ATOMIC_AND_FETCH(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_OR_FETCH(val, flags)  __atomic_or_fetch(val,  flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_ADD_FETCH(val, i)     __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_SUB_FETCH(val, i)     __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
+
 #define _HA_ATOMIC_XADD(val, i)       __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
 #define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
 /* warning, n is a pointer to the double value for dwcas */
@@ -634,6 +658,10 @@ static inline void __ha_compiler_barrier(void)
 #define _HA_ATOMIC_ADD HA_ATOMIC_ADD
 #endif /* !_HA_ATOMIC_ADD */
 
+#ifndef _HA_ATOMIC_ADD_FETCH
+#define _HA_ATOMIC_ADD_FETCH HA_ATOMIC_ADD_FETCH
+#endif /* !_HA_ATOMIC_ADD_FETCH */
+
 #ifndef _HA_ATOMIC_XADD
 #define _HA_ATOMIC_XADD HA_ATOMIC_XADD
 #endif /* !_HA_ATOMIC_SUB */
@@ -642,14 +670,26 @@ static inline void __ha_compiler_barrier(void)
 #define _HA_ATOMIC_SUB HA_ATOMIC_SUB
 #endif /* !_HA_ATOMIC_SUB */
 
+#ifndef _HA_ATOMIC_SUB_FETCH
+#define _HA_ATOMIC_SUB_FETCH HA_ATOMIC_SUB_FETCH
+#endif /* !_HA_ATOMIC_SUB_FETCH */
+
 #ifndef _HA_ATOMIC_AND
 #define _HA_ATOMIC_AND HA_ATOMIC_AND
 #endif /* !_HA_ATOMIC_AND */
 
+#ifndef _HA_ATOMIC_AND_FETCH
+#define _HA_ATOMIC_AND_FETCH HA_ATOMIC_AND_FETCH
+#endif /* !_HA_ATOMIC_AND_FETCH */
+
 #ifndef _HA_ATOMIC_OR
 #define _HA_ATOMIC_OR HA_ATOMIC_OR
 #endif /* !_HA_ATOMIC_OR */
 
+#ifndef _HA_ATOMIC_OR_FETCH
+#define _HA_ATOMIC_OR_FETCH HA_ATOMIC_OR_FETCH
+#endif /* !_HA_ATOMIC_OR_FETCH */
+
 #ifndef _HA_ATOMIC_XCHG
 #define _HA_ATOMIC_XCHG HA_ATOMIC_XCHG
 #endif /* !_HA_ATOMIC_XCHG */
index 37c9ac536200488804cd71446ef2b7d84813ee24..cb49e2585647760993e8843b5dd95a5e6b8a4a82 100644 (file)
@@ -126,7 +126,7 @@ static inline void done_update_polling(int fd)
 {
        unsigned long update_mask;
 
-       update_mask = _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+       update_mask = _HA_ATOMIC_AND_FETCH(&fdtab[fd].update_mask, ~tid_bit);
        while ((update_mask & all_threads_mask)== 0) {
                /* If we were the last one that had to update that entry, remove it from the list */
                fd_rm_from_fd_list(&update_list, fd, offsetof(struct fdtab, update));
@@ -346,7 +346,7 @@ static inline int fd_set_running(int fd)
  */
 static inline long fd_clr_running(int fd)
 {
-       return _HA_ATOMIC_AND(&fdtab[fd].running_mask, ~tid_bit);
+       return _HA_ATOMIC_AND_FETCH(&fdtab[fd].running_mask, ~tid_bit);
 }
 
 /* Update events seen for FD <fd> and its state if needed. This should be
index b1db43ef664e1704273632dca4e620c7e39b4554..87926253969b9b2f6c7d04ee48e9fd291041b3b5 100644 (file)
@@ -51,7 +51,7 @@ static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int in
        do {
                now_tmp = global_now >> 32;
                if (curr_sec == (now_tmp & 0x7fffffff))
-                       return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+                       return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
 
                /* remove the bit, used for the lock */
                curr_sec &= 0x7fffffff;
@@ -72,7 +72,7 @@ static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int in
        /* release the lock and update the time in case of rotate. */
        _HA_ATOMIC_STORE(&ctr->curr_sec, curr_sec & 0x7fffffff);
 
-       return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+       return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
 }
 
 /* Update a frequency counter by <inc> incremental units. It is automatically
@@ -90,7 +90,7 @@ static inline unsigned int update_freq_ctr_period(struct freq_ctr_period *ctr,
        do {
                now_ms_tmp = global_now_ms;
                if (now_ms_tmp - curr_tick < period)
-                       return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+                       return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
 
                /* remove the bit, used for the lock */
                curr_tick &= ~1;
@@ -112,7 +112,7 @@ static inline unsigned int update_freq_ctr_period(struct freq_ctr_period *ctr,
        /* release the lock and update the time in case of rotate. */
        _HA_ATOMIC_STORE(&ctr->curr_tick, curr_tick);
 
-       return _HA_ATOMIC_ADD(&ctr->curr_ctr, inc);
+       return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
 }
 
 /* Read a frequency counter taking history into account for missing time in
index 3f489a948cffb07c4da9ba99aa6cbca2cca5b4ab..5b64c0d29460a81d68b3cbd1278e364f7de3ca0b 100644 (file)
@@ -204,7 +204,7 @@ void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace);
  */
 static inline unsigned int pat_ref_newgen(struct pat_ref *ref)
 {
-       return HA_ATOMIC_ADD(&ref->next_gen, 1);
+       return HA_ATOMIC_ADD_FETCH(&ref->next_gen, 1);
 }
 
 /* Give up a previously assigned generation number. By doing this the caller
index d3dc1d6442a5f7078f80ddc908272b82fd16e029..e4450a8f1a4b7021f412db0f572fd151b6afc3ce 100644 (file)
@@ -252,7 +252,7 @@ static inline void srv_use_conn(struct server *srv, struct connection *conn)
 {
        unsigned int curr;
 
-       curr = _HA_ATOMIC_ADD(&srv->curr_used_conns, 1);
+       curr = _HA_ATOMIC_ADD_FETCH(&srv->curr_used_conns, 1);
 
        /* It's ok not to do that atomically, we don't need an
         * exact max.
@@ -318,7 +318,7 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
            !conn->mux->used_streams(conn) && conn->mux->avail_streams(conn)) {
                int retadd;
 
-               retadd = _HA_ATOMIC_ADD(&srv->curr_idle_conns, 1);
+               retadd = _HA_ATOMIC_ADD_FETCH(&srv->curr_idle_conns, 1);
                if (retadd > srv->max_idle_conns) {
                        _HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
                        return 0;
index f692a0afad9b88c3e91347cfb47006887548954e..3276c7e01c99cb76e1a2acfe5f4341f6c7773bf7 100644 (file)
@@ -207,7 +207,7 @@ static inline void _task_wakeup(struct task *t, unsigned int f, const char *file
 {
        unsigned int state;
 
-       state = _HA_ATOMIC_OR(&t->state, f);
+       state = _HA_ATOMIC_OR_FETCH(&t->state, f);
        while (!(state & (TASK_RUNNING | TASK_QUEUED))) {
                if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED)) {
 #ifdef DEBUG_TASK
index 62be510e77feb439b3e659dfe8d0ac8e50303d5a..1b9c704c162feff2de87e985d13273ddedb29fd2 100644 (file)
@@ -1703,7 +1703,7 @@ skip_reuse:
                int count;
 
                s->flags |= SF_CURR_SESS;
-               count = _HA_ATOMIC_ADD(&srv->cur_sess, 1);
+               count = _HA_ATOMIC_ADD_FETCH(&srv->cur_sess, 1);
                HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
                if (s->be->lbprm.server_take_conn)
                        s->be->lbprm.server_take_conn(srv, 0);
index f3c2a731477e78c23ba466d2e63b5ab428474205..ba076d0582f3e830f8d5699bcc9229daf99341be 100644 (file)
@@ -116,7 +116,7 @@ void dict_entry_unref(struct dict *d, struct dict_entry *de)
        if (!de)
                return;
 
-       if (HA_ATOMIC_SUB(&de->refcount, 1) != 0)
+       if (HA_ATOMIC_SUB_FETCH(&de->refcount, 1) != 0)
                return;
 
        HA_RWLOCK_WRLOCK(DICT_LOCK, &d->rwlock);
index 8d671bdc35de4cf6109f78123d99eb39216da3e3..588271f8e4b93ac2f388415722c655970d8fd811 100644 (file)
--- a/src/fd.c
+++ b/src/fd.c
@@ -374,7 +374,7 @@ int fd_takeover(int fd, void *expected_owner)
        int ret = -1;
 
 #ifndef HA_HAVE_CAS_DW
-       if (_HA_ATOMIC_OR(&fdtab[fd].running_mask, tid_bit) == tid_bit) {
+       if (_HA_ATOMIC_OR_FETCH(&fdtab[fd].running_mask, tid_bit) == tid_bit) {
                HA_RWLOCK_WRLOCK(OTHER_LOCK, &fd_mig_lock);
                if (fdtab[fd].owner == expected_owner) {
                        fdtab[fd].thread_mask = tid_bit;
@@ -388,7 +388,7 @@ int fd_takeover(int fd, void *expected_owner)
 
        new_masks[0] = new_masks[1] = tid_bit;
 
-       old_masks[0] = _HA_ATOMIC_OR(&fdtab[fd].running_mask, tid_bit);
+       old_masks[0] = _HA_ATOMIC_OR_FETCH(&fdtab[fd].running_mask, tid_bit);
        old_masks[1] = fdtab[fd].thread_mask;
 
        /* protect ourself against a delete then an insert for the same fd,
index e7d30b01feca023d7b79ce3e7a78da1448eef264..a8f2274b26921863e3c49e9b3dca10ff7813c624 100644 (file)
@@ -2385,7 +2385,7 @@ void run_poll_loop()
                        int i;
 
                        if (stopping) {
-                               if (_HA_ATOMIC_OR(&stopping_thread_mask, tid_bit) == tid_bit) {
+                               if (_HA_ATOMIC_OR_FETCH(&stopping_thread_mask, tid_bit) == tid_bit) {
                                        /* notify all threads that stopping was just set */
                                        for (i = 0; i < global.nbthread; i++)
                                                if (((all_threads_mask & ~stopping_thread_mask) >> i) & 1)
index fb60bf4a4564eb2b0451dabbd6a3202763dc4b32..1b2cd2e477e8430b6f6c16984cfb1bc98b7b157e 100644 (file)
@@ -2126,7 +2126,7 @@ int stream_set_backend(struct stream *s, struct proxy *be)
 
        s->be = be;
        HA_ATOMIC_UPDATE_MAX(&be->be_counters.conn_max,
-                            HA_ATOMIC_ADD(&be->beconn, 1));
+                            HA_ATOMIC_ADD_FETCH(&be->beconn, 1));
        proxy_inc_be_ctr(be);
 
        /* assign new parameters to the stream from the new backend */
index e5ab5db4578a442e7a314b5b8eaaadb75772f5cf..58f98752a486c6ae2c64b2825fc2387bf462ac03 100644 (file)
@@ -400,7 +400,7 @@ struct pendconn *pendconn_add(struct stream *strm)
        if (srv) {
                unsigned int old_max, new_max;
 
-               new_max = _HA_ATOMIC_ADD(&srv->nbpend, 1);
+               new_max = _HA_ATOMIC_ADD_FETCH(&srv->nbpend, 1);
                old_max = srv->counters.nbpend_max;
                while (new_max > old_max) {
                        if (likely(_HA_ATOMIC_CAS(&srv->counters.nbpend_max, &old_max, new_max)))
@@ -416,7 +416,7 @@ struct pendconn *pendconn_add(struct stream *strm)
        else {
                unsigned int old_max, new_max;
 
-               new_max = _HA_ATOMIC_ADD(&px->nbpend, 1);
+               new_max = _HA_ATOMIC_ADD_FETCH(&px->nbpend, 1);
                old_max = px->be_counters.nbpend_max;
                while (new_max > old_max) {
                        if (likely(_HA_ATOMIC_CAS(&px->be_counters.nbpend_max, &old_max, new_max)))
index b06c2ae9682b9bba0f10f9e17f8401ccc4283da0..525d02f8168d7844241bda0aaaf9586f6e317d70 100644 (file)
@@ -1943,7 +1943,7 @@ ssl_sock_do_create_cert(const char *servername, struct bind_conf *bind_conf, SSL
         * number */
        if (X509_set_version(newcrt, 2L) != 1)
                goto mkcert_error;
-       ASN1_INTEGER_set(X509_get_serialNumber(newcrt), _HA_ATOMIC_ADD(&ssl_ctx_serial, 1));
+       ASN1_INTEGER_set(X509_get_serialNumber(newcrt), _HA_ATOMIC_ADD_FETCH(&ssl_ctx_serial, 1));
 
        /* Set duration for the certificate */
        if (!X509_gmtime_adj(X509_getm_notBefore(newcrt), (long)-60*60*24) ||
index be3262af35dbae35d4af0bcbe1f9c99866ef658f..59f6ff713e0fa1aa7b504cf5b87a5f47df910e2c 100644 (file)
@@ -585,7 +585,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
                                HA_ATOMIC_ADD(&profile_entry->cpu_time, cpu);
                        }
 
-                       state = _HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
+                       state = _HA_ATOMIC_AND_FETCH(&t->state, ~TASK_RUNNING);
                        if (unlikely(state & TASK_KILLED)) {
                                task_unlink_wq(t);
                                __task_free(t);