]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
REORG: thread/sched: move the last dynamic thread_info to thread_ctx
authorWilly Tarreau <w@1wt.eu>
Thu, 30 Sep 2021 17:02:18 +0000 (19:02 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 8 Oct 2021 15:22:26 +0000 (17:22 +0200)
The last 3 fields were 3 list heads that are per-thread, and which are:
  - the pool's LRU head
  - the buffer_wq
  - the streams list head

Moving them into thread_ctx completes the removal of dynamic elements
from the struct thread_info. Now all these dynamic elements are packed
together at a single place for a thread.

14 files changed:
include/haproxy/channel.h
include/haproxy/dynbuf.h
include/haproxy/tinfo-t.h
src/check.c
src/dynbuf.c
src/flt_spoe.c
src/h3.c
src/mux_fcgi.c
src/mux_h1.c
src/mux_h2.c
src/mux_quic.c
src/pool.c
src/proxy.c
src/stream.c

index 0f6e9e0fd696811cd8d6cb8666887a1e378dd433..abc1de15d205a231c05b561f0e0cacc2bd020aad 100644 (file)
@@ -846,7 +846,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
                return 1;
 
        if (!LIST_INLIST(&wait->list))
-               LIST_APPEND(&ti->buffer_wq, &wait->list);
+               LIST_APPEND(&th_ctx->buffer_wq, &wait->list);
 
        return 0;
 }
index c38b9c7e127434fd89ef256491c0612590387c2d..5e8ece2f7697bc080dada3d1dac6d95ad71ee667 100644 (file)
@@ -112,7 +112,7 @@ void __offer_buffers(void *from, unsigned int count);
 
 static inline void offer_buffers(void *from, unsigned int count)
 {
-       if (!LIST_ISEMPTY(&ti->buffer_wq))
+       if (!LIST_ISEMPTY(&th_ctx->buffer_wq))
                __offer_buffers(from, count);
 }
 
index dc6268e21f3ff545c488fb8618608916e4177579..737431f7a39166905eea4eca6339752d42cb383c 100644 (file)
@@ -42,12 +42,6 @@ enum {
  * disabled, it contains the same info for the single running thread.
  */
 struct thread_info {
-#ifdef CONFIG_HAP_POOLS
-       struct list pool_lru_head;                         /* oldest objects   */
-#endif
-       struct list buffer_wq;     /* buffer waiters */
-       struct list streams;       /* list of streams attached to this thread */
-
        /* pad to cache line (64B) */
        char __pad[0];            /* unused except to check remaining room */
        char __end[0] __attribute__((aligned(64)));
@@ -70,6 +64,12 @@ struct thread_ctx {
        uint8_t tl_class_mask;              /* bit mask of non-empty tasklets classes */
 
        // 7 bytes hole here
+#ifdef CONFIG_HAP_POOLS
+       struct list pool_lru_head;          /* oldest objects   */
+#endif
+       struct list buffer_wq;              /* buffer waiters */
+       struct list streams;                /* list of streams attached to this thread */
+
        ALWAYS_ALIGN(2*sizeof(void*));
        struct list tasklets[TL_CLASSES];   /* tasklets (and/or tasks) to run, by class */
 
index 9ac66a54592cedb1892b6034147472daaa35be2f..ad6df9c6039985e66ad553c9cf2db919ffa5ef26 100644 (file)
@@ -1296,7 +1296,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
            unlikely((buf = b_alloc(bptr)) == NULL)) {
                check->buf_wait.target = check;
                check->buf_wait.wakeup_cb = check_buf_available;
-               LIST_APPEND(&ti->buffer_wq, &check->buf_wait.list);
+               LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list);
        }
        return buf;
 }
index a200237923f74f7f326a46e49c7b9dda6d473823..0b12c7505411176f12bafc3202a16b4ef088cba4 100644 (file)
@@ -34,7 +34,7 @@ int init_buffer()
                return 0;
 
        for (thr = 0; thr < MAX_THREADS; thr++)
-               LIST_INIT(&ha_thread_info[thr].buffer_wq);
+               LIST_INIT(&ha_thread_ctx[thr].buffer_wq);
 
 
        /* The reserved buffer is what we leave behind us. Thus we always need
@@ -109,7 +109,7 @@ void __offer_buffers(void *from, unsigned int count)
         * other tasks, but that's a rough estimate. Similarly, for each cached
         * event we'll need 1 buffer.
         */
-       list_for_each_entry_safe(wait, wait_back, &ti->buffer_wq, list) {
+       list_for_each_entry_safe(wait, wait_back, &th_ctx->buffer_wq, list) {
                if (!count)
                        break;
 
index 3262fd0cf5bceb7a16b89126e0058c4ce729e4bf..3f17bcb615cb2ad99881130cf44c1d1ec48b7eba 100644 (file)
@@ -2867,7 +2867,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
        if (b_alloc(buf))
                return 1;
 
-       LIST_APPEND(&ti->buffer_wq, &buffer_wait->list);
+       LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list);
        return 0;
 }
 
index 45406803475b35ed20c59631f887fbb53637211b..4acd95afe64c3e875ac229a2397ab1c0cd340f85 100644 (file)
--- a/src/h3.c
+++ b/src/h3.c
@@ -99,7 +99,7 @@ static struct buffer *h3_uqs_get_buf(struct h3_uqs *h3_uqs)
            unlikely((buf = b_alloc(&h3_uqs->qcs->tx.buf)) == NULL)) {
                h3->buf_wait.target = h3_uqs;
                h3->buf_wait.wakeup_cb = qcs_buf_available;
-               LIST_APPEND(&ti->buffer_wq, &h3->buf_wait.list);
+               LIST_APPEND(&th_ctx->buffer_wq, &h3->buf_wait.list);
        }
 
        return buf;
index 3c030210223601f4e3278d8f4d0f4d10af41a98c..5fa75020433428c1c6c1a642e7ccc8794da99170 100644 (file)
@@ -612,7 +612,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
            unlikely((buf = b_alloc(bptr)) == NULL)) {
                fconn->buf_wait.target = fconn;
                fconn->buf_wait.wakeup_cb = fcgi_buf_available;
-               LIST_APPEND(&ti->buffer_wq, &fconn->buf_wait.list);
+               LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list);
        }
        return buf;
 }
index 3e2e094fcb780ada218990af282b1264c12ef03a..814b3fbfcb0e18c2c61aca9eed5e5db2b5e32237 100644 (file)
@@ -449,7 +449,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
            unlikely((buf = b_alloc(bptr)) == NULL)) {
                h1c->buf_wait.target = h1c;
                h1c->buf_wait.wakeup_cb = h1_buf_available;
-               LIST_APPEND(&ti->buffer_wq, &h1c->buf_wait.list);
+               LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list);
        }
        return buf;
 }
index 150a559067fc8eaba0fc79270ea7ab4a9fc10788..f32766d0bf903dd1fa19520935c70680d575d7e7 100644 (file)
@@ -817,7 +817,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
            unlikely((buf = b_alloc(bptr)) == NULL)) {
                h2c->buf_wait.target = h2c;
                h2c->buf_wait.wakeup_cb = h2_buf_available;
-               LIST_APPEND(&ti->buffer_wq, &h2c->buf_wait.list);
+               LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
        }
        return buf;
 }
index c89490f67e53351a7e8d3978eaca06ca6f94e5ec..cb40e0b753eeb1a8d470bcfab1de2b0e38b3b9f4 100644 (file)
@@ -442,7 +442,7 @@ struct buffer *qc_get_buf(struct qcc *qcc, struct buffer *bptr)
            unlikely((buf = b_alloc(bptr)) == NULL)) {
                qcc->buf_wait.target = qcc;
                qcc->buf_wait.wakeup_cb = qc_buf_available;
-               LIST_APPEND(&ti->buffer_wq, &qcc->buf_wait.list);
+               LIST_APPEND(&th_ctx->buffer_wq, &qcc->buf_wait.list);
        }
 
        return buf;
index cb0179bd6c33df52e0de29712e0e1b5b5e643de6..eb1484ba102f051dcbdb872eae2fca2378d6c2a4 100644 (file)
@@ -289,7 +289,7 @@ void pool_evict_from_local_caches()
        struct pool_head *pool;
 
        do {
-               item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
+               item = LIST_PREV(&th_ctx->pool_lru_head, struct pool_cache_item *, by_lru);
                /* note: by definition we remove oldest objects so they also are the
                 * oldest in their own pools, thus their next is the pool's head.
                 */
@@ -315,7 +315,7 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr)
        struct pool_cache_head *ph = &pool->cache[tid];
 
        LIST_INSERT(&ph->list, &item->by_pool);
-       LIST_INSERT(&ti->pool_lru_head, &item->by_lru);
+       LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
        ph->count++;
        pool_cache_count++;
        pool_cache_bytes += pool->size;
@@ -640,7 +640,7 @@ static void init_pools()
        int thr;
 
        for (thr = 0; thr < MAX_THREADS; thr++) {
-               LIST_INIT(&ha_thread_info[thr].pool_lru_head);
+               LIST_INIT(&ha_thread_ctx[thr].pool_lru_head);
        }
 #endif
        detect_allocator();
index db876e60d5d72f7527b64378478faba157ac53e4..6868eff44d45e3d62345d6d43981b87d33e51021 100644 (file)
@@ -2017,7 +2017,7 @@ struct task *hard_stop(struct task *t, void *context, unsigned int state)
        thread_isolate();
 
        for (thr = 0; thr < global.nbthread; thr++) {
-               list_for_each_entry(s, &ha_thread_info[thr].streams, list) {
+               list_for_each_entry(s, &ha_thread_ctx[thr].streams, list) {
                        stream_shutdown(s, SF_ERR_KILLED);
                }
        }
index e4d5ac9649bb4b6e2f9fc082561dab8482aca28f..18d4f122e77af6e2f5855aad5cc701dade0de291 100644 (file)
@@ -548,7 +548,7 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin, struct bu
 
        s->tunnel_timeout = TICK_ETERNITY;
 
-       LIST_APPEND(&ti->streams, &s->list);
+       LIST_APPEND(&th_ctx->streams, &s->list);
 
        if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
                goto out_fail_accept;
@@ -720,7 +720,7 @@ static void stream_free(struct stream *s)
                 * only touch their node under thread isolation.
                 */
                LIST_DEL_INIT(&bref->users);
-               if (s->list.n != &ti->streams)
+               if (s->list.n != &th_ctx->streams)
                        LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
                bref->ref = s->list.n;
                __ha_barrier_store();
@@ -778,7 +778,7 @@ static int stream_alloc_work_buffer(struct stream *s)
        if (b_alloc(&s->res.buf))
                return 1;
 
-       LIST_APPEND(&ti->buffer_wq, &s->buffer_wait.list);
+       LIST_APPEND(&th_ctx->buffer_wq, &s->buffer_wait.list);
        return 0;
 }
 
@@ -2818,7 +2818,7 @@ static void init_stream()
        int thr;
 
        for (thr = 0; thr < MAX_THREADS; thr++)
-               LIST_INIT(&ha_thread_info[thr].streams);
+               LIST_INIT(&ha_thread_ctx[thr].streams);
 }
 INITCALL0(STG_INIT, init_stream);
 
@@ -3495,7 +3495,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
                 * pointer points back to the head of the streams list.
                 */
                LIST_INIT(&appctx->ctx.sess.bref.users);
-               appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
+               appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
                appctx->st2 = STAT_ST_LIST;
                /* fall through */
 
@@ -3512,7 +3512,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
                        struct stream *curr_strm;
                        int done= 0;
 
-                       if (appctx->ctx.sess.bref.ref == &ha_thread_info[appctx->ctx.sess.thr].streams)
+                       if (appctx->ctx.sess.bref.ref == &ha_thread_ctx[appctx->ctx.sess.thr].streams)
                                done = 1;
                        else {
                                /* check if we've found a stream created after issuing the "show sess" */
@@ -3525,7 +3525,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
                                appctx->ctx.sess.thr++;
                                if (appctx->ctx.sess.thr >= global.nbthread)
                                        break;
-                               appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
+                               appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
                                continue;
                        }
 
@@ -3732,7 +3732,7 @@ static int cli_parse_shutdown_session(char **args, char *payload, struct appctx
 
        /* first, look for the requested stream in the stream table */
        for (thr = 0; !strm && thr < global.nbthread; thr++) {
-               list_for_each_entry(strm, &ha_thread_info[thr].streams, list) {
+               list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
                        if (strm == ptr) {
                                stream_shutdown(strm, SF_ERR_KILLED);
                                break;