int (*prepare_srv)(struct server *srv); /* prepare a server context */
void (*destroy_srv)(struct server *srv); /* destroy a server context */
int (*get_alpn)(const struct connection *conn, void *xprt_ctx, const char **str, int *len); /* get application layer name */
- int (*takeover)(struct connection *conn, void *xprt_ctx, int orig_tid); /* Let the xprt know the fd have been taken over */
+ int (*takeover)(struct connection *conn, void *xprt_ctx, int orig_tid, int release); /* Let the xprt know the fd have been taken over */
void (*set_idle)(struct connection *conn, void *xprt_ctx); /* notify the xprt that the connection becomes idle. implies set_used. */
void (*set_used)(struct connection *conn, void *xprt_ctx); /* notify the xprt that the connection leaves idle. implies set_idle. */
char name[8]; /* transport layer name, zero-terminated */
int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */
void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */
int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */
- int (*takeover)(struct connection *conn, int orig_tid); /* Attempts to migrate the connection to the current thread */
+
+ /* Attempts to migrate <conn> from <orig_tid> to the current thread. If
+ * <release> is true, it will be destroyed immediately after by caller.
+ */
+ int (*takeover)(struct connection *conn, int orig_tid, int release);
+
unsigned int flags; /* some flags characterizing the mux's capabilities (MX_FL_*) */
char name[8]; /* mux layer name, zero-terminated */
};
continue;
conn = srv_lookup_conn(is_safe ? &srv->per_thr[i].safe_conns : &srv->per_thr[i].idle_conns, hash);
while (conn) {
- if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
+ if (conn->mux->takeover && conn->mux->takeover(conn, i, 0) == 0) {
conn_delete_from_tree(conn);
_HA_ATOMIC_INC(&activity[tid].fd_takeover);
found = 1;
if (!found && !is_safe && srv->curr_safe_nb > 0) {
conn = srv_lookup_conn(&srv->per_thr[i].safe_conns, hash);
while (conn) {
- if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
+ if (conn->mux->takeover && conn->mux->takeover(conn, i, 0) == 0) {
conn_delete_from_tree(conn);
_HA_ATOMIC_INC(&activity[tid].fd_takeover);
found = 1;
* Return 0 if successful, non-zero otherwise.
* Expected to be called with the old thread lock held.
*/
-static int fcgi_takeover(struct connection *conn, int orig_tid)
+static int fcgi_takeover(struct connection *conn, int orig_tid, int release)
{
struct fcgi_conn *fcgi = conn->ctx;
struct task *task;
- struct task *new_task;
- struct tasklet *new_tasklet;
+ struct task *new_task = NULL;
+ struct tasklet *new_tasklet = NULL;
/* Pre-allocate tasks so that we don't have to roll back after the xprt
* has been migrated.
*/
- new_task = task_new_here();
- new_tasklet = tasklet_new();
- if (!new_task || !new_tasklet)
- goto fail;
+ if (!release) {
+ new_task = task_new_here();
+ new_tasklet = tasklet_new();
+ if (!new_task || !new_tasklet)
+ goto fail;
+ }
if (fd_takeover(conn->handle.fd, conn) != 0)
goto fail;
- if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
+ if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid, release) != 0) {
/* We failed to takeover the xprt, even if the connection may
* still be valid, flag it as error'd, as we have already
* taken over the fd, and wake the tasklet, so that it will
fcgi->task = new_task;
new_task = NULL;
- fcgi->task->process = fcgi_timeout_task;
- fcgi->task->context = fcgi;
+ if (!release) {
+ fcgi->task->process = fcgi_timeout_task;
+ fcgi->task->context = fcgi;
+ }
}
/* To let the tasklet know it should free itself, and do nothing else,
tasklet_wakeup_on(fcgi->wait_event.tasklet, orig_tid);
fcgi->wait_event.tasklet = new_tasklet;
- fcgi->wait_event.tasklet->process = fcgi_io_cb;
- fcgi->wait_event.tasklet->context = fcgi;
- fcgi->conn->xprt->subscribe(fcgi->conn, fcgi->conn->xprt_ctx,
- SUB_RETRY_RECV, &fcgi->wait_event);
+ if (!release) {
+ fcgi->wait_event.tasklet->process = fcgi_io_cb;
+ fcgi->wait_event.tasklet->context = fcgi;
+ fcgi->conn->xprt->subscribe(fcgi->conn, fcgi->conn->xprt_ctx,
+ SUB_RETRY_RECV, &fcgi->wait_event);
+ }
if (new_task)
__task_free(new_task);
* Return 0 if successful, non-zero otherwise.
* Expected to be called with the old thread lock held.
*/
-static int h1_takeover(struct connection *conn, int orig_tid)
+static int h1_takeover(struct connection *conn, int orig_tid, int release)
{
struct h1c *h1c = conn->ctx;
struct task *task;
- struct task *new_task;
- struct tasklet *new_tasklet;
+ struct task *new_task = NULL;
+ struct tasklet *new_tasklet = NULL;
/* Pre-allocate tasks so that we don't have to roll back after the xprt
* has been migrated.
*/
- new_task = task_new_here();
- new_tasklet = tasklet_new();
- if (!new_task || !new_tasklet)
- goto fail;
+ if (!release) {
+ new_task = task_new_here();
+ new_tasklet = tasklet_new();
+ if (!new_task || !new_tasklet)
+ goto fail;
+ }
if (fd_takeover(conn->handle.fd, conn) != 0)
goto fail;
- if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
+ if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid, release) != 0) {
/* We failed to takeover the xprt, even if the connection may
* still be valid, flag it as error'd, as we have already
* taken over the fd, and wake the tasklet, so that it will
h1c->task = new_task;
new_task = NULL;
- h1c->task->process = h1_timeout_task;
- h1c->task->context = h1c;
+ if (!release) {
+ h1c->task->process = h1_timeout_task;
+ h1c->task->context = h1c;
+ }
}
/* To let the tasklet know it should free itself, and do nothing else,
tasklet_wakeup_on(h1c->wait_event.tasklet, orig_tid);
h1c->wait_event.tasklet = new_tasklet;
- h1c->wait_event.tasklet->process = h1_io_cb;
- h1c->wait_event.tasklet->context = h1c;
- h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx,
- SUB_RETRY_RECV, &h1c->wait_event);
+ if (!release) {
+ h1c->wait_event.tasklet->process = h1_io_cb;
+ h1c->wait_event.tasklet->context = h1c;
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx,
+ SUB_RETRY_RECV, &h1c->wait_event);
+ }
if (new_task)
__task_free(new_task);
* Return 0 if successful, non-zero otherwise.
* Expected to be called with the old thread lock held.
*/
-static int h2_takeover(struct connection *conn, int orig_tid)
+static int h2_takeover(struct connection *conn, int orig_tid, int release)
{
struct h2c *h2c = conn->ctx;
struct task *task;
- struct task *new_task;
- struct tasklet *new_tasklet;
+ struct task *new_task = NULL;
+ struct tasklet *new_tasklet = NULL;
/* Pre-allocate tasks so that we don't have to roll back after the xprt
* has been migrated.
*/
- new_task = task_new_here();
- new_tasklet = tasklet_new();
- if (!new_task || !new_tasklet)
- goto fail;
+ if (!release) {
+ new_task = task_new_here();
+ new_tasklet = tasklet_new();
+ if (!new_task || !new_tasklet)
+ goto fail;
+ }
if (fd_takeover(conn->handle.fd, conn) != 0)
goto fail;
- if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
+ if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid, release) != 0) {
/* We failed to takeover the xprt, even if the connection may
* still be valid, flag it as error'd, as we have already
* taken over the fd, and wake the tasklet, so that it will
h2c->task = new_task;
new_task = NULL;
- h2c->task->process = h2_timeout_task;
- h2c->task->context = h2c;
+ if (!release) {
+ h2c->task->process = h2_timeout_task;
+ h2c->task->context = h2c;
+ }
}
/* To let the tasklet know it should free itself, and do nothing else,
tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
h2c->wait_event.tasklet = new_tasklet;
- h2c->wait_event.tasklet->process = h2_io_cb;
- h2c->wait_event.tasklet->context = h2c;
- h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
- SUB_RETRY_RECV, &h2c->wait_event);
+ if (!release) {
+ h2c->wait_event.tasklet->process = h2_io_cb;
+ h2c->wait_event.tasklet->context = h2c;
+ h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
+ SUB_RETRY_RECV, &h2c->wait_event);
+ }
if (new_task)
__task_free(new_task);
* It should be called with the takeover lock for the old thread held.
* Returns 0 on success, and -1 on failure
*/
-static int ssl_takeover(struct connection *conn, void *xprt_ctx, int orig_tid)
+static int ssl_takeover(struct connection *conn, void *xprt_ctx, int orig_tid, int release)
{
struct ssl_sock_ctx *ctx = xprt_ctx;
- struct tasklet *tl = tasklet_new();
+ struct tasklet *tl = NULL;
- if (!tl)
- return -1;
+ if (!release) {
+ tl = tasklet_new();
+ if (!tl)
+ return -1;
+ }
ctx->wait_event.tasklet->context = NULL;
tasklet_wakeup_on(ctx->wait_event.tasklet, orig_tid);
+
ctx->wait_event.tasklet = tl;
- ctx->wait_event.tasklet->process = ssl_sock_io_cb;
- ctx->wait_event.tasklet->context = ctx;
+ if (!release) {
+ ctx->wait_event.tasklet->process = ssl_sock_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
+ }
+
return 0;
}