]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: lists: rename some MT_LIST operations to clarify them
authorWilly Tarreau <w@1wt.eu>
Fri, 10 Jul 2020 06:10:29 +0000 (08:10 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 10 Jul 2020 06:50:41 +0000 (08:50 +0200)
Initially when mt_lists were added, their purpose was to be used with
the scheduler, where anyone may concurrently add the same tasklet, so
it sounded natural to implement a check in MT_LIST_ADD{,Q}. Later their
usage was extended and MT_LIST_ADD{,Q} started to be used on situations
where the element to be added was exclusively owned by the one performing
the operation so a conflict was impossible. This became more obvious with
the idle connections and the new macro was called MT_LIST_ADDQ_NOCHECK.

But this remains confusing and at many places it's not expected that
an MT_LIST_ADD could possibly fail, and worse, at some places we start
by initializing it before adding (and the test is superflous) so let's
rename them to something more conventional to denote the presence of the
check or not:

   MT_LIST_ADD{,Q}    : inconditional operation, the caller owns the
                        element, and doesn't care about the element's
                        current state (exactly like LIST_ADD)
   MT_LIST_TRY_ADD{,Q}: only perform the operation if the element is not
                        already added or in the process of being added.

This means that the previously "safe" MT_LIST_ADD{,Q} are not "safe"
anymore. This also means that in case of backport mistakes in the
future causing this to be overlooked, the slower and safer functions
will still be used by default.

Note that the missing unchecked MT_LIST_ADD macro was added.

The rest of the code will have to be reviewed so that a number of
callers of MT_LIST_TRY_ADDQ are changed to MT_LIST_ADDQ to remove
the unneeded test.

15 files changed:
include/haproxy/channel.h
include/haproxy/list.h
include/haproxy/server.h
include/haproxy/task.h
src/backend.c
src/flt_spoe.c
src/listener.c
src/mux_fcgi.c
src/mux_h1.c
src/mux_h2.c
src/server.c
src/ssl_sock.c
src/stream.c
src/task.c
tests/test-list.c

index 3cd3bb3b1f277ab3b0da65b485c21bacdd340178..004fc0374b9bebf082a961cf3b08fa277686b6cd 100644 (file)
@@ -850,7 +850,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
                return 1;
 
        if (!MT_LIST_ADDED(&wait->list))
-               MT_LIST_ADDQ(&buffer_wq, &wait->list);
+               MT_LIST_TRY_ADDQ(&buffer_wq, &wait->list);
 
        return 0;
 }
index 9d376494ea56854964d97391bcba906b739d7438..dae5ca2c39dc454913e175d8d12f1e0d56dd849c 100644 (file)
  * Returns 1 if we added the item, 0 otherwise (because it was already in a
  * list).
  */
-#define MT_LIST_ADD(_lh, _el)                                              \
+#define MT_LIST_TRY_ADD(_lh, _el)                                              \
      ({                                                                    \
         int _ret = 0;                                                      \
        struct mt_list *lh = (_lh), *el = (_el);                           \
  * Returns 1 if we added the item, 0 otherwise (because it was already in a
  * list).
  */
-#define MT_LIST_ADDQ(_lh, _el)                                             \
+#define MT_LIST_TRY_ADDQ(_lh, _el)                                             \
     ({                                                                     \
        int _ret = 0;                                                      \
        struct mt_list *lh = (_lh), *el = (_el);                           \
        (_ret);                                                            \
     })
 
+/*
+ * Add an item at the beginning of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked.
+ */
+#define MT_LIST_ADD(_lh, _el)                                              \
+     ({                                                                    \
+        int _ret = 0;                                                      \
+       struct mt_list *lh = (_lh), *el = (_el);                           \
+       while (1) {                                                        \
+               struct mt_list *n;                                         \
+               struct mt_list *p;                                         \
+               n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);            \
+               if (n == MT_LIST_BUSY)                                     \
+                       continue;                                          \
+               p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);               \
+               if (p == MT_LIST_BUSY) {                                   \
+                       (lh)->next = n;                                    \
+                       __ha_barrier_store();                              \
+                       continue;                                          \
+               }                                                          \
+               (el)->next = n;                                            \
+               (el)->prev = p;                                            \
+               __ha_barrier_store();                                      \
+               n->prev = (el);                                            \
+               __ha_barrier_store();                                      \
+               p->next = (el);                                            \
+               __ha_barrier_store();                                      \
+               _ret = 1;                                                  \
+               break;                                                     \
+       }                                                                  \
+       (_ret);                                                            \
+     })
+
 /*
  * Add an item at the end of a list.
  * It is assumed the element can't already be in a list, so it isn't checked
  */
-#define MT_LIST_ADDQ_NOCHECK(_lh, _el)                                     \
+#define MT_LIST_ADDQ(_lh, _el)                                     \
     ({                                                                     \
        int _ret = 0;                                                      \
        struct mt_list *lh = (_lh), *el = (_el);                           \
 /*
  * Detach a list from its head. A pointer to the first element is returned
  * and the list is closed. If the list was empty, NULL is returned. This may
- * exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This
+ * exclusively be used with lists modified by MT_LIST_TRY_ADD/MT_LIST_TRY_ADDQ. This
  * is incompatible with MT_LIST_DEL run concurrently.
  * If there's at least one element, the next of the last element will always
  * be NULL.
index 3ad60d0cde12fcebe321fe5ac64da041cd60ee3c..9a400fdb76dc6e7fc372c9f1951569fac7cca3a8 100644 (file)
@@ -273,11 +273,11 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
                conn->idle_time = now_ms;
                if (is_safe) {
                        conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
-                       MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
                        _HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
                } else {
                        conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
-                       MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
                        _HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
                }
                _HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
index db919913b5be70665ecccd61053fbd5a8fefd715..080240a752de93aa8cdd449d1d9a167e36ded1f5 100644 (file)
@@ -352,7 +352,7 @@ static inline void tasklet_wakeup_on(struct tasklet *tl, int thr)
                }
        } else {
                /* this tasklet runs on a specific thread */
-               if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
+               if (MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
                        _HA_ATOMIC_ADD(&tasks_run_queue, 1);
                        if (sleeping_thread_mask & (1UL << thr)) {
                                _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
@@ -652,7 +652,7 @@ static inline int notification_registered(struct list *wake)
 /* adds list item <item> to work list <work> and wake up the associated task */
 static inline void work_list_add(struct work_list *work, struct mt_list *item)
 {
-       MT_LIST_ADDQ(&work->head, item);
+       MT_LIST_TRY_ADDQ(&work->head, item);
        task_wakeup(work->task, TASK_WOKEN_OTHER);
 }
 
index 324d7565a4f2836f7bc0012c561e04855b59102b..4a162a43080218028a67ea71ec0c563583581e3c 100644 (file)
@@ -1345,7 +1345,7 @@ int connect_server(struct stream *s)
                                if (tokill_conn) {
                                        /* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
 
-                                       MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
+                                       MT_LIST_TRY_ADDQ(&idle_conns[i].toremove_conns,
                                            (struct mt_list *)&tokill_conn->list);
                                        task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
                                        HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
index 62e535ef118834a545e0795841b8b1b29b26abcf..b6359c63d2ed574e2364ca8e291c90f70eb79fa7 100644 (file)
@@ -2807,7 +2807,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
        if (b_alloc_margin(buf, global.tune.reserved_bufs))
                return 1;
 
-       MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
+       MT_LIST_TRY_ADDQ(&buffer_wq, &buffer_wait->list);
        return 0;
 }
 
index cc49c705eaa09bd3e5565713505b6dfdac822e97..cad0e0cc8474ac215f99cf3cf5aabb3e8dc3a575 100644 (file)
@@ -424,7 +424,7 @@ static void limit_listener(struct listener *l, struct mt_list *list)
 {
        HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
        if (l->state == LI_READY) {
-               MT_LIST_ADDQ(list, &l->wait_queue);
+               MT_LIST_TRY_ADDQ(list, &l->wait_queue);
                fd_stop_recv(l->fd);
                l->state = LI_LIMITED;
        }
index a0509e66a6a4c1ccfb409b9a7fff28e942b9d014..05723e8ce3d88cd1bac4805f455b62468975d7c9 100644 (file)
@@ -605,7 +605,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
            unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
                fconn->buf_wait.target = fconn;
                fconn->buf_wait.wakeup_cb = fcgi_buf_available;
-               MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
+               MT_LIST_TRY_ADDQ(&buffer_wq, &fconn->buf_wait.list);
        }
        return buf;
 }
@@ -2956,9 +2956,9 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
                struct server *srv = objt_server(conn->target);
 
                if (conn_in_list == CO_FL_SAFE_LIST)
-                       MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
                else
-                       MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
        }
        return NULL;
 }
index 80a7b7db51f78021e42c4554da41032283fe4eb7..0626a7dc2d57a7d80e1c275707e6e876ad9c92d6 100644 (file)
@@ -416,7 +416,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
            unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
                h1c->buf_wait.target = h1c;
                h1c->buf_wait.wakeup_cb = h1_buf_available;
-               MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
+               MT_LIST_TRY_ADDQ(&buffer_wq, &h1c->buf_wait.list);
        }
        return buf;
 }
@@ -2265,9 +2265,9 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
                struct server *srv = objt_server(conn->target);
 
                if (conn_in_list == CO_FL_SAFE_LIST)
-                       MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
                else
-                       MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
        }
        return NULL;
 }
index 6ec8d27cabe9a0b7d926f28e63d2af91c2401c37..ce26c927e87d8a37a54d7b1a4b99b7ea52db03ae 100644 (file)
@@ -683,7 +683,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
            unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
                h2c->buf_wait.target = h2c;
                h2c->buf_wait.wakeup_cb = h2_buf_available;
-               MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
+               MT_LIST_TRY_ADDQ(&buffer_wq, &h2c->buf_wait.list);
        }
        return buf;
 }
@@ -3565,9 +3565,9 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
                struct server *srv = objt_server(conn->target);
 
                if (conn_in_list == CO_FL_SAFE_LIST)
-                       MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
                else
-                       MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
        }
 
 leave:
index b0c061e88fa057766150424e11c7c2972d7fb8d3..05b19d4e107961f7aad19052428502fa92997816 100644 (file)
@@ -5207,7 +5207,7 @@ static int srv_migrate_conns_to_remove(struct mt_list *idle_list, struct mt_list
                if (toremove_nb != -1 && i >= toremove_nb)
                        break;
                MT_LIST_DEL_SAFE_NOINIT(elt1);
-               MT_LIST_ADDQ_NOCHECK(toremove_list, &conn->list);
+               MT_LIST_ADDQ(toremove_list, &conn->list);
                i++;
        }
        return i;
index 69f6835c4973eeb97cc9a3b181bd77d0f6604459..54da3a0acd4220e610fe8f9d29380ca9daddc561 100644 (file)
@@ -5676,9 +5676,9 @@ leave:
                struct server *srv = objt_server(conn->target);
 
                if (conn_in_list == CO_FL_SAFE_LIST)
-                       MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
                else
-                       MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+                       MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
        }
        return NULL;
 }
index 287f40647c1faa256ab96e46d6bdb966fec7b4c4..f3253c6852195d630009dc107c94cba0f948d121 100644 (file)
@@ -728,7 +728,7 @@ static int stream_alloc_work_buffer(struct stream *s)
        if (b_alloc_margin(&s->res.buf, 0))
                return 1;
 
-       MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
+       MT_LIST_TRY_ADDQ(&buffer_wq, &s->buffer_wait.list);
        return 0;
 }
 
index 72f09de30ddb4d9501a5cd4436d8cef35d831ac2..102462eb8fefdf0508ad918b837da26b68da972e 100644 (file)
@@ -97,7 +97,7 @@ void task_kill(struct task *t)
 
                        /* Beware: tasks that have never run don't have their ->list empty yet! */
                        LIST_INIT(&((struct tasklet *)t)->list);
-                       MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
+                       MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list,
                                     (struct mt_list *)&((struct tasklet *)t)->list);
                        _HA_ATOMIC_ADD(&tasks_run_queue, 1);
                        _HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
@@ -582,7 +582,7 @@ void process_runnable_tasks()
         * 100% due to rounding, this is not a problem. Note that while in
         * theory the sum cannot be NULL as we cannot get there without tasklets
         * to process, in practice it seldom happens when multiple writers
-        * conflict and rollback on MT_LIST_ADDQ(shared_tasklet_list), causing
+        * conflict and rollback on MT_LIST_TRY_ADDQ(shared_tasklet_list), causing
         * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
         * one above to finally fail. This is extremely rare and not a problem.
         */
index 5e54f6034f6acc004c6fd28c1dd8ca28113495d8..e2e07f9777b7e3f1ab0bb2956493b60ae13ac87a 100644 (file)
@@ -33,12 +33,12 @@ void *thread(void *pouet)
                case 0:
                        lol = malloc(sizeof(*lol));
                        MT_LIST_INIT(&lol->list_elt);
-                       MT_LIST_ADD(&pouet_list, &lol->list_elt);
+                       MT_LIST_TRY_ADD(&pouet_list, &lol->list_elt);
                        break;
                case 1:
                        lol = malloc(sizeof(*lol));
                        MT_LIST_INIT(&lol->list_elt);
-                       MT_LIST_ADDQ(&pouet_list, &lol->list_elt);
+                       MT_LIST_TRY_ADDQ(&pouet_list, &lol->list_elt);
                        break;
 
                case 2: