From 5fe4677231cd8adc85d5715bbed306769dd16b70 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 6 Nov 2025 13:12:04 +0100 Subject: [PATCH] MINOR: server: move the lock inside srv_add_idle() Almost all callers of _srv_add_idle() lock the list then call the function. It's not the most efficient and it requires some care from the caller to take care of that lock. Let's change this a little bit by having srv_add_idle() that takes the lock and calls _srv_add_idle() that is now inlined. This way callers don't have to handle the lock themselves anymore, and the lock is only taken around the sensitive parts, not the function call+return. Interestingly, perf tests show a small perf increase from 2.28-2.32M RPS to 2.32-2.37M RPS on a 128-thread system. --- include/haproxy/server.h | 2 +- src/connection.c | 4 +--- src/mux_fcgi.c | 4 +--- src/mux_h1.c | 4 +--- src/mux_h2.c | 4 +--- src/mux_quic.c | 4 +--- src/mux_spop.c | 4 +--- src/server.c | 18 +++++++++++++++++- src/ssl_sock.c | 4 +--- 9 files changed, 25 insertions(+), 23 deletions(-) diff --git a/include/haproxy/server.h b/include/haproxy/server.h index 39f71e05e..e8e00a5e7 100644 --- a/include/haproxy/server.h +++ b/include/haproxy/server.h @@ -99,7 +99,7 @@ void srv_release_conn(struct server *srv, struct connection *conn); struct connection *srv_lookup_conn(struct ceb_root **tree, uint64_t hash); struct connection *srv_lookup_conn_next(struct ceb_root **tree, struct connection *conn); -void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe); +void srv_add_idle(struct server *srv, struct connection *conn, int is_safe); int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe); void srv_add_to_avail_list(struct server *srv, struct connection *conn); struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state); diff --git a/src/connection.c b/src/connection.c index b4d54cca6..201cd3497 100644 --- a/src/connection.c +++ b/src/connection.c @@ -257,9 +257,7 @@ int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake) } else { ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } } } diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index 0b97b4843..ff9521bc5 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -3107,9 +3107,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state) } else { ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } } else { diff --git a/src/mux_h1.c b/src/mux_h1.c index fd77700c0..6163d42c2 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -4374,9 +4374,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state) } else { ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } } else { diff --git a/src/mux_h2.c b/src/mux_h2.c index df59f749e..54d3c212b 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -5027,9 +5027,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state) } else { ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } } else { diff --git a/src/mux_quic.c b/src/mux_quic.c index bdabfb082..135c7d8ec 100644 --- a/src/mux_quic.c +++ b/src/mux_quic.c @@ -3448,9 +3448,7 @@ struct task *qcc_io_cb(struct task *t, void *ctx, unsigned int state) } } else { - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } /* Do not access conn without protection as soon as it is reinserted in idle list. */ diff --git a/src/mux_spop.c b/src/mux_spop.c index b3a43f03a..00878b669 100644 --- a/src/mux_spop.c +++ b/src/mux_spop.c @@ -2603,9 +2603,7 @@ static struct task *spop_io_cb(struct task *t, void *ctx, unsigned int state) } else { ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } } else { diff --git a/src/server.c b/src/server.c index ed49dd6e0..2487e13ee 100644 --- a/src/server.c +++ b/src/server.c @@ -7318,7 +7318,7 @@ struct connection *srv_lookup_conn_next(struct ceb_root **tree, struct connectio * * Must be called with idle_conns_lock. */ -void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe) +static inline void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe) { struct ceb_root **tree = is_safe ? &srv->per_thr[tid].safe_conns : &srv->per_thr[tid].idle_conns; @@ -7330,6 +7330,22 @@ void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe) LIST_APPEND(&srv->per_thr[tid].idle_conn_list, &conn->idle_list); } +/* Add in idle trees. Set if connection is deemed safe + * for reuse. + * + * This function is a simple wrapper for tree insert. It should only be used + * for internal usage or when removing briefly the connection to avoid takeover + * on it before reinserting it with this function. In other context, prefer to + * use the full feature srv_add_to_idle_list(). This function takes the idle + * conns lock for the current thread (thus the owner must not already have it). + */ +void srv_add_idle(struct server *srv, struct connection *conn, int is_safe) +{ + HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + _srv_add_idle(srv, conn, is_safe); + HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); +} + /* This adds an idle connection to the server's list if the connection is * reusable, not held by any owner anymore, but still has available streams. */ diff --git a/src/ssl_sock.c b/src/ssl_sock.c index 4c9f10581..f0cfb1458 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -6878,9 +6878,7 @@ leave: else { ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ TRACE_DEVEL("adding conn back to idle list", SSL_EV_CONN_IO_CB, conn); - HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); - _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); - HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); + srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST); } } else { -- 2.47.3