From: Amaury Denoyelle Date: Fri, 15 Mar 2024 14:36:33 +0000 (+0100) Subject: MINOR: connection: extend takeover with release option X-Git-Tag: v3.0-dev6~69 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=f3862a9bc722fe1de705c1f0b2cacb959f3617a9;p=thirdparty%2Fhaproxy.git MINOR: connection: extend takeover with release option Extend takeover API both for MUX and XPRT with a new boolean argument . Its purpose is to signal if the connection will be freed immediately after the takeover, rendering new resources allocation unnecessary. For the moment, release argument is always false. However, it will be set to true on delete server CLI handler to proactively close server idle connections. --- diff --git a/include/haproxy/connection-t.h b/include/haproxy/connection-t.h index 3c65013bfb..00639dd6bc 100644 --- a/include/haproxy/connection-t.h +++ b/include/haproxy/connection-t.h @@ -393,7 +393,7 @@ struct xprt_ops { int (*prepare_srv)(struct server *srv); /* prepare a server context */ void (*destroy_srv)(struct server *srv); /* destroy a server context */ int (*get_alpn)(const struct connection *conn, void *xprt_ctx, const char **str, int *len); /* get application layer name */ - int (*takeover)(struct connection *conn, void *xprt_ctx, int orig_tid); /* Let the xprt know the fd have been taken over */ + int (*takeover)(struct connection *conn, void *xprt_ctx, int orig_tid, int release); /* Let the xprt know the fd have been taken over */ void (*set_idle)(struct connection *conn, void *xprt_ctx); /* notify the xprt that the connection becomes idle. implies set_used. */ void (*set_used)(struct connection *conn, void *xprt_ctx); /* notify the xprt that the connection leaves idle. implies set_idle. */ char name[8]; /* transport layer name, zero-terminated */ @@ -438,7 +438,12 @@ struct mux_ops { int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */ void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */ int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */ - int (*takeover)(struct connection *conn, int orig_tid); /* Attempts to migrate the connection to the current thread */ + + /* Attempts to migrate from to the current thread. If + * is true, it will be destroyed immediately after by caller. + */ + int (*takeover)(struct connection *conn, int orig_tid, int release); + unsigned int flags; /* some flags characterizing the mux's capabilities (MX_FL_*) */ char name[8]; /* mux layer name, zero-terminated */ }; diff --git a/src/backend.c b/src/backend.c index 7dd8ceb767..18df7ea0f4 100644 --- a/src/backend.c +++ b/src/backend.c @@ -1232,7 +1232,7 @@ struct connection *conn_backend_get(struct stream *s, struct server *srv, int is continue; conn = srv_lookup_conn(is_safe ? &srv->per_thr[i].safe_conns : &srv->per_thr[i].idle_conns, hash); while (conn) { - if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) { + if (conn->mux->takeover && conn->mux->takeover(conn, i, 0) == 0) { conn_delete_from_tree(conn); _HA_ATOMIC_INC(&activity[tid].fd_takeover); found = 1; @@ -1245,7 +1245,7 @@ struct connection *conn_backend_get(struct stream *s, struct server *srv, int is if (!found && !is_safe && srv->curr_safe_nb > 0) { conn = srv_lookup_conn(&srv->per_thr[i].safe_conns, hash); while (conn) { - if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) { + if (conn->mux->takeover && conn->mux->takeover(conn, i, 0) == 0) { conn_delete_from_tree(conn); _HA_ATOMIC_INC(&activity[tid].fd_takeover); found = 1; diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index d192cf9d1f..ebd3032cdd 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -4154,25 +4154,27 @@ static int fcgi_show_fd(struct buffer *msg, struct connection *conn) * Return 0 if successful, non-zero otherwise. * Expected to be called with the old thread lock held. */ -static int fcgi_takeover(struct connection *conn, int orig_tid) +static int fcgi_takeover(struct connection *conn, int orig_tid, int release) { struct fcgi_conn *fcgi = conn->ctx; struct task *task; - struct task *new_task; - struct tasklet *new_tasklet; + struct task *new_task = NULL; + struct tasklet *new_tasklet = NULL; /* Pre-allocate tasks so that we don't have to roll back after the xprt * has been migrated. */ - new_task = task_new_here(); - new_tasklet = tasklet_new(); - if (!new_task || !new_tasklet) - goto fail; + if (!release) { + new_task = task_new_here(); + new_tasklet = tasklet_new(); + if (!new_task || !new_tasklet) + goto fail; + } if (fd_takeover(conn->handle.fd, conn) != 0) goto fail; - if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) { + if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid, release) != 0) { /* We failed to takeover the xprt, even if the connection may * still be valid, flag it as error'd, as we have already * taken over the fd, and wake the tasklet, so that it will @@ -4199,8 +4201,10 @@ static int fcgi_takeover(struct connection *conn, int orig_tid) fcgi->task = new_task; new_task = NULL; - fcgi->task->process = fcgi_timeout_task; - fcgi->task->context = fcgi; + if (!release) { + fcgi->task->process = fcgi_timeout_task; + fcgi->task->context = fcgi; + } } /* To let the tasklet know it should free itself, and do nothing else, @@ -4210,10 +4214,12 @@ static int fcgi_takeover(struct connection *conn, int orig_tid) tasklet_wakeup_on(fcgi->wait_event.tasklet, orig_tid); fcgi->wait_event.tasklet = new_tasklet; - fcgi->wait_event.tasklet->process = fcgi_io_cb; - fcgi->wait_event.tasklet->context = fcgi; - fcgi->conn->xprt->subscribe(fcgi->conn, fcgi->conn->xprt_ctx, - SUB_RETRY_RECV, &fcgi->wait_event); + if (!release) { + fcgi->wait_event.tasklet->process = fcgi_io_cb; + fcgi->wait_event.tasklet->context = fcgi; + fcgi->conn->xprt->subscribe(fcgi->conn, fcgi->conn->xprt_ctx, + SUB_RETRY_RECV, &fcgi->wait_event); + } if (new_task) __task_free(new_task); diff --git a/src/mux_h1.c b/src/mux_h1.c index 343c23e086..8e9ec78fd1 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -5217,25 +5217,27 @@ static int add_hdr_case_adjust(const char *from, const char *to, char **err) * Return 0 if successful, non-zero otherwise. * Expected to be called with the old thread lock held. */ -static int h1_takeover(struct connection *conn, int orig_tid) +static int h1_takeover(struct connection *conn, int orig_tid, int release) { struct h1c *h1c = conn->ctx; struct task *task; - struct task *new_task; - struct tasklet *new_tasklet; + struct task *new_task = NULL; + struct tasklet *new_tasklet = NULL; /* Pre-allocate tasks so that we don't have to roll back after the xprt * has been migrated. */ - new_task = task_new_here(); - new_tasklet = tasklet_new(); - if (!new_task || !new_tasklet) - goto fail; + if (!release) { + new_task = task_new_here(); + new_tasklet = tasklet_new(); + if (!new_task || !new_tasklet) + goto fail; + } if (fd_takeover(conn->handle.fd, conn) != 0) goto fail; - if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) { + if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid, release) != 0) { /* We failed to takeover the xprt, even if the connection may * still be valid, flag it as error'd, as we have already * taken over the fd, and wake the tasklet, so that it will @@ -5262,8 +5264,10 @@ static int h1_takeover(struct connection *conn, int orig_tid) h1c->task = new_task; new_task = NULL; - h1c->task->process = h1_timeout_task; - h1c->task->context = h1c; + if (!release) { + h1c->task->process = h1_timeout_task; + h1c->task->context = h1c; + } } /* To let the tasklet know it should free itself, and do nothing else, @@ -5273,10 +5277,12 @@ static int h1_takeover(struct connection *conn, int orig_tid) tasklet_wakeup_on(h1c->wait_event.tasklet, orig_tid); h1c->wait_event.tasklet = new_tasklet; - h1c->wait_event.tasklet->process = h1_io_cb; - h1c->wait_event.tasklet->context = h1c; - h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, - SUB_RETRY_RECV, &h1c->wait_event); + if (!release) { + h1c->wait_event.tasklet->process = h1_io_cb; + h1c->wait_event.tasklet->context = h1c; + h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, + SUB_RETRY_RECV, &h1c->wait_event); + } if (new_task) __task_free(new_task); diff --git a/src/mux_h2.c b/src/mux_h2.c index d72fe63442..e829c49478 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -7500,25 +7500,27 @@ static int h2_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx) * Return 0 if successful, non-zero otherwise. * Expected to be called with the old thread lock held. */ -static int h2_takeover(struct connection *conn, int orig_tid) +static int h2_takeover(struct connection *conn, int orig_tid, int release) { struct h2c *h2c = conn->ctx; struct task *task; - struct task *new_task; - struct tasklet *new_tasklet; + struct task *new_task = NULL; + struct tasklet *new_tasklet = NULL; /* Pre-allocate tasks so that we don't have to roll back after the xprt * has been migrated. */ - new_task = task_new_here(); - new_tasklet = tasklet_new(); - if (!new_task || !new_tasklet) - goto fail; + if (!release) { + new_task = task_new_here(); + new_tasklet = tasklet_new(); + if (!new_task || !new_tasklet) + goto fail; + } if (fd_takeover(conn->handle.fd, conn) != 0) goto fail; - if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) { + if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid, release) != 0) { /* We failed to takeover the xprt, even if the connection may * still be valid, flag it as error'd, as we have already * taken over the fd, and wake the tasklet, so that it will @@ -7545,8 +7547,10 @@ static int h2_takeover(struct connection *conn, int orig_tid) h2c->task = new_task; new_task = NULL; - h2c->task->process = h2_timeout_task; - h2c->task->context = h2c; + if (!release) { + h2c->task->process = h2_timeout_task; + h2c->task->context = h2c; + } } /* To let the tasklet know it should free itself, and do nothing else, @@ -7556,10 +7560,12 @@ static int h2_takeover(struct connection *conn, int orig_tid) tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid); h2c->wait_event.tasklet = new_tasklet; - h2c->wait_event.tasklet->process = h2_io_cb; - h2c->wait_event.tasklet->context = h2c; - h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx, - SUB_RETRY_RECV, &h2c->wait_event); + if (!release) { + h2c->wait_event.tasklet->process = h2_io_cb; + h2c->wait_event.tasklet->context = h2c; + h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx, + SUB_RETRY_RECV, &h2c->wait_event); + } if (new_task) __task_free(new_task); diff --git a/src/ssl_sock.c b/src/ssl_sock.c index f1ee083650..f5382492ee 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -6151,19 +6151,26 @@ static int ssl_unsubscribe(struct connection *conn, void *xprt_ctx, int event_ty * It should be called with the takeover lock for the old thread held. * Returns 0 on success, and -1 on failure */ -static int ssl_takeover(struct connection *conn, void *xprt_ctx, int orig_tid) +static int ssl_takeover(struct connection *conn, void *xprt_ctx, int orig_tid, int release) { struct ssl_sock_ctx *ctx = xprt_ctx; - struct tasklet *tl = tasklet_new(); + struct tasklet *tl = NULL; - if (!tl) - return -1; + if (!release) { + tl = tasklet_new(); + if (!tl) + return -1; + } ctx->wait_event.tasklet->context = NULL; tasklet_wakeup_on(ctx->wait_event.tasklet, orig_tid); + ctx->wait_event.tasklet = tl; - ctx->wait_event.tasklet->process = ssl_sock_io_cb; - ctx->wait_event.tasklet->context = ctx; + if (!release) { + ctx->wait_event.tasklet->process = ssl_sock_io_cb; + ctx->wait_event.tasklet->context = ctx; + } + return 0; }