From: Olivier Houchard Date: Mon, 29 Jun 2020 18:15:59 +0000 (+0200) Subject: CLEANUP: connections: rename the toremove_lock to takeover_lock X-Git-Tag: v2.2-dev12~24 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f8f4c2ef6039a7607e82df9c6d5e5916ff694173;p=thirdparty%2Fhaproxy.git CLEANUP: connections: rename the toremove_lock to takeover_lock This lock was misnamed and a bit confusing. It's only used for takeover so let's call it takeover_lock. --- diff --git a/include/haproxy/connection-t.h b/include/haproxy/connection-t.h index 1ff1c93818..8b48c9815d 100644 --- a/include/haproxy/connection-t.h +++ b/include/haproxy/connection-t.h @@ -606,7 +606,7 @@ struct tlv_ssl { */ struct idle_conns { struct mt_list toremove_conns; - __decl_thread(HA_SPINLOCK_T toremove_lock); + __decl_thread(HA_SPINLOCK_T takeover_lock); struct task *cleanup_task; } THREAD_ALIGNED(64); diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h index a7b942cebc..6e8862b07d 100644 --- a/include/haproxy/connection.h +++ b/include/haproxy/connection.h @@ -494,9 +494,9 @@ static inline void conn_free(struct connection *conn) } conn_force_unsubscribe(conn); - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); MT_LIST_DEL((struct mt_list *)&conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); pool_free(pool_head_connection, conn); } diff --git a/src/backend.c b/src/backend.c index 97c3231d51..0b4cb4c938 100644 --- a/src/backend.c +++ b/src/backend.c @@ -1086,7 +1086,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe) * to end up with two threads using the same connection. */ i = tid; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); conn = MT_LIST_POP(&mt_list[tid], struct connection *, list); /* If we failed to pick a connection from the idle list, let's try again with @@ -1099,7 +1099,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe) mt_list = srv->safe_conns; } } - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); /* If we found a connection in our own list, and we don't have to * steal one from another thread, then we're done. @@ -1129,7 +1129,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe) if (!srv->curr_idle_thr[i] || i == tid) continue; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock); mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) { if (conn->mux->takeover && conn->mux->takeover(conn) == 0) { MT_LIST_DEL_SAFE(elt1); @@ -1151,7 +1151,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe) } } } - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock); } if (!found) @@ -1328,7 +1328,7 @@ int connect_server(struct stream *s) // see it possibly larger. ALREADY_CHECKED(i); - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); tokill_conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list); if (!tokill_conn) @@ -1340,10 +1340,10 @@ int connect_server(struct stream *s) MT_LIST_ADDQ(&idle_conns[i].toremove_conns, (struct mt_list *)&tokill_conn->list); task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); break; } - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); } } diff --git a/src/cfgparse.c b/src/cfgparse.c index e3f3d9fae6..443c40674d 100644 --- a/src/cfgparse.c +++ b/src/cfgparse.c @@ -3571,7 +3571,7 @@ out_uri_auth_compat: goto err; idle_conns[i].cleanup_task->process = srv_cleanup_toremove_connections; idle_conns[i].cleanup_task->context = NULL; - HA_SPIN_INIT(&idle_conns[i].toremove_lock); + HA_SPIN_INIT(&idle_conns[i].takeover_lock); MT_LIST_INIT(&idle_conns[i].toremove_conns); } } diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index bd27a63f00..1cf8a33b41 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -2918,13 +2918,13 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status) int ret = 0; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (tl->context == NULL) { /* The connection has been taken over by another thread, * we're no longer responsible for it, so just free the * tasklet, and do nothing. */ - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); tasklet_free(tl); return NULL; @@ -2938,7 +2938,7 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status) if (conn_in_list) MT_LIST_DEL(&conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (!(fconn->wait_event.events & SUB_RETRY_SEND)) ret = fcgi_send(fconn); @@ -3093,7 +3093,7 @@ static struct task *fcgi_timeout_task(struct task *t, void *context, unsigned sh /* We're about to destroy the connection, so make sure nobody attempts * to steal it from us. */ - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (fconn->conn->flags & CO_FL_LIST_MASK) MT_LIST_DEL(&fconn->conn->list); @@ -3104,7 +3104,7 @@ static struct task *fcgi_timeout_task(struct task *t, void *context, unsigned sh if (!t->context) fconn = NULL; - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); } task_destroy(t); diff --git a/src/mux_h1.c b/src/mux_h1.c index ace04cb68f..a294c6542c 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -2219,13 +2219,13 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status) int ret = 0; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (tl->context == NULL) { /* The connection has been taken over by another thread, * we're no longer responsible for it, so just free the * tasklet, and do nothing. */ - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); tasklet_free(tl); return NULL; } @@ -2241,7 +2241,7 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status) if (conn_in_list) MT_LIST_DEL(&conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (!(h1c->wait_event.events & SUB_RETRY_SEND)) ret = h1_send(h1c); @@ -2309,7 +2309,7 @@ static struct task *h1_timeout_task(struct task *t, void *context, unsigned shor /* We're about to destroy the connection, so make sure nobody attempts * to steal it from us. */ - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (h1c->conn->flags & CO_FL_LIST_MASK) MT_LIST_DEL(&h1c->conn->list); @@ -2320,7 +2320,7 @@ static struct task *h1_timeout_task(struct task *t, void *context, unsigned shor if (!t->context) h1c = NULL; - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); } task_destroy(t); diff --git a/src/mux_h2.c b/src/mux_h2.c index 0ad066bd29..adcc6c270d 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -3524,13 +3524,13 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status) int ret = 0; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (t->context == NULL) { /* The connection has been taken over by another thread, * we're no longer responsible for it, so just free the * tasklet, and do nothing. */ - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); tasklet_free(tl); goto leave; } @@ -3547,7 +3547,7 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status) if (conn_in_list) MT_LIST_DEL(&conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (!(h2c->wait_event.events & SUB_RETRY_SEND)) ret = h2_send(h2c); @@ -3643,15 +3643,15 @@ static int h2_process(struct h2c *h2c) } /* connections in error must be removed from the idle lists */ - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); MT_LIST_DEL((struct mt_list *)&conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); } else if (h2c->st0 == H2_CS_ERROR) { /* connections in error must be removed from the idle lists */ - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); MT_LIST_DEL((struct mt_list *)&conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); } if (!b_data(&h2c->dbuf)) @@ -3722,7 +3722,7 @@ static struct task *h2_timeout_task(struct task *t, void *context, unsigned shor /* We're about to destroy the connection, so make sure nobody attempts * to steal it from us. */ - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); if (h2c->conn->flags & CO_FL_LIST_MASK) MT_LIST_DEL(&h2c->conn->list); @@ -3733,7 +3733,7 @@ static struct task *h2_timeout_task(struct task *t, void *context, unsigned shor if (!t->context) h2c = NULL; - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); } task_destroy(t); @@ -3780,9 +3780,9 @@ static struct task *h2_timeout_task(struct task *t, void *context, unsigned shor } /* in any case this connection must not be considered idle anymore */ - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); MT_LIST_DEL((struct mt_list *)&h2c->conn->list); - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); /* either we can release everything now or it will be done later once * the last stream closes. diff --git a/src/server.c b/src/server.c index 4c2eb77473..a6824f7574 100644 --- a/src/server.c +++ b/src/server.c @@ -5207,7 +5207,7 @@ static void srv_cleanup_connections(struct server *srv) HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock); for (i = tid;;) { did_remove = 0; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock); for (j = 0; j < srv->curr_idle_conns; j++) { conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list); if (!conn) @@ -5218,7 +5218,7 @@ static void srv_cleanup_connections(struct server *srv) did_remove = 1; MT_LIST_ADDQ(&idle_conns[i].toremove_conns, (struct mt_list *)&conn->list); } - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock); if (did_remove) task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER); @@ -5287,7 +5287,7 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi max_conn = (exceed_conns * srv->curr_idle_thr[i]) / curr_idle + 1; - HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].toremove_lock); + HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock); for (j = 0; j < max_conn; j++) { struct connection *conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list); if (!conn) @@ -5298,7 +5298,7 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi did_remove = 1; MT_LIST_ADDQ(&idle_conns[i].toremove_conns, (struct mt_list *)&conn->list); } - HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].toremove_lock); + HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock); if (did_remove && max_conn < srv->curr_idle_thr[i]) srv_is_empty = 0; if (did_remove)