From: Willy Tarreau Date: Sat, 17 Oct 2020 16:55:18 +0000 (+0200) Subject: MINOR: lb/map: use seek lock and read locks where appropriate X-Git-Tag: v2.3-dev8~50 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ae99aeb135ac18b1442ddf9eeccb16cb35d2d04d;p=thirdparty%2Fhaproxy.git MINOR: lb/map: use seek lock and read locks where appropriate - map_get_server_hash() doesn't need a write lock since it only reads the array, let's only use a read lock here. - map_get_server_rr() only needs exclusivity to adjust the rr_idx while looking for its entry. Since this one is not used by map_get_server_hash(), let's turn this lock to a seek lock that doesn't block reads. With 8 threads, no significant performance difference was noticed given that lookups are usually instant with this LB algo so the lock contention is rare. --- diff --git a/src/lb_map.c b/src/lb_map.c index 1432913316..b735678a8c 100644 --- a/src/lb_map.c +++ b/src/lb_map.c @@ -216,7 +216,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid) int newidx, avoididx; struct server *srv, *avoided; - HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_SKLOCK(LBPRM_LOCK, &px->lbprm.lock); if (px->lbprm.tot_weight == 0) { avoided = NULL; goto out; @@ -248,7 +248,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid) px->lbprm.map.rr_idx = avoididx; out: - HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_SKUNLOCK(LBPRM_LOCK, &px->lbprm.lock); /* return NULL or srvtoavoid if found */ return avoided; } @@ -265,10 +265,10 @@ struct server *map_get_server_hash(struct proxy *px, unsigned int hash) { struct server *srv = NULL; - HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_RDLOCK(LBPRM_LOCK, &px->lbprm.lock); if (px->lbprm.tot_weight) srv = px->lbprm.map.srv[hash % px->lbprm.tot_weight]; - HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &px->lbprm.lock); return srv; }