From: Willy Tarreau Date: Thu, 7 Jul 2022 07:12:45 +0000 (+0200) Subject: MEDIUM: conn: make conn_backend_get always scan the same group X-Git-Tag: v2.7-dev2~28 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=15c5500b6e7cb503f1feb23c38f48103444d06d4;p=thirdparty%2Fhaproxy.git MEDIUM: conn: make conn_backend_get always scan the same group We don't want to pick idle connections from another thread group, this would be very slow by forcing to share undesirable data. This patch makes sure that we start seeking from the current thread group's threads only and loops over that range exclusively. It's worth noting that the next_takeover pointer remains per-server and will bounce when multiple groups use it at the same time. But we preserve the perturbation by applying a modulo when retrieving it, so that when groups are of the same size (most common case), the index will not even change. At this time it doesn't seem worth storing one index per group in servers, but that might be an option if any contention is detected later. --- diff --git a/src/backend.c b/src/backend.c index 2909b87efe..ce170af9f7 100644 --- a/src/backend.c +++ b/src/backend.c @@ -1204,12 +1204,13 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv, goto done; /* Lookup all other threads for an idle connection, starting from last - * unvisited thread. + * unvisited thread, but always staying in the same group. */ stop = srv->next_takeover; - if (stop >= global.nbthread) - stop = 0; + if (stop >= tg->count) + stop %= tg->count; + stop += tg->base; i = stop; do { if (!srv->curr_idle_thr[i] || i == tid) @@ -1244,13 +1245,13 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv, } } HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock); - } while (!found && (i = (i + 1 == global.nbthread) ? 0 : i + 1) != stop); + } while (!found && (i = (i + 1 == tg->base + tg->count) ? tg->base : i + 1) != stop); if (!found) conn = NULL; done: if (conn) { - _HA_ATOMIC_STORE(&srv->next_takeover, (i + 1 == global.nbthread) ? 0 : i + 1); + _HA_ATOMIC_STORE(&srv->next_takeover, (i + 1 == tg->base + tg->count) ? tg->base : i + 1); srv_use_conn(srv, conn);