static inline struct wait_event *wl_set_waitcb(struct wait_event *wl, struct task *(*cb)(struct task *, void *, unsigned short), void *ctx)
{
- if (!wl->task->process) {
- wl->task->process = cb;
- wl->task->context = ctx;
+ if (!wl->tasklet->process) {
+ wl->tasklet->process = cb;
+ wl->tasklet->context = ctx;
}
return wl;
}
si->end = NULL;
si->state = si->prev_state = SI_ST_INI;
si->ops = &si_embedded_ops;
- si->wait_event.task = tasklet_new();
- if (!si->wait_event.task)
+ si->wait_event.tasklet = tasklet_new();
+ if (!si->wait_event.tasklet)
return -1;
- si->wait_event.task->process = si_cs_io_cb;
- si->wait_event.task->context = si;
+ si->wait_event.tasklet->process = si_cs_io_cb;
+ si->wait_event.tasklet->context = si;
si->wait_event.events = 0;
return 0;
}
};
struct wait_event {
- struct tasklet *task;
+ struct tasklet *tasklet;
int events; /* set of enum sub_event_type above */
};
* I/O handler expects to have a cs, so remove
* the tasklet
*/
- task_remove_from_tasklet_list((struct task *)check->wait_list.task);
+ task_remove_from_tasklet_list((struct task *)check->wait_list.tasklet);
cs_destroy(cs);
cs = check->cs = NULL;
conn = NULL;
* I/O handler expects to have a cs, so remove
* the tasklet
*/
- task_remove_from_tasklet_list((struct task *)check->wait_list.task);
+ task_remove_from_tasklet_list((struct task *)check->wait_list.tasklet);
cs_destroy(cs);
cs = check->cs = NULL;
conn = NULL;
* I/O handler expects to have a cs, so remove
* the tasklet
*/
- task_remove_from_tasklet_list((struct task *)check->wait_list.task);
+ task_remove_from_tasklet_list((struct task *)check->wait_list.tasklet);
cs_destroy(check->cs);
if (!check->bi.area || !check->bo.area)
return "out of memory while allocating check buffer";
- check->wait_list.task = tasklet_new();
- if (!check->wait_list.task)
+ check->wait_list.tasklet = tasklet_new();
+ if (!check->wait_list.tasklet)
return "out of memroy while allocating check tasklet";
check->wait_list.events = 0;
- check->wait_list.task->process = event_srv_chk_io;
- check->wait_list.task->context = check;
+ check->wait_list.tasklet->process = event_srv_chk_io;
+ check->wait_list.tasklet->context = check;
return NULL;
}
flags = 0;
if (conn->send_wait != NULL) {
conn->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(conn->send_wait->task);
+ tasklet_wakeup(conn->send_wait->tasklet);
conn->send_wait = NULL;
} else
io_available = 1;
flags = 0;
if (conn->recv_wait) {
conn->recv_wait->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(conn->recv_wait->task);
+ tasklet_wakeup(conn->recv_wait->tasklet);
conn->recv_wait = NULL;
} else
io_available = 1;
task_destroy(s->check.task);
task_destroy(s->agent.task);
- if (s->check.wait_list.task)
- tasklet_free(s->check.wait_list.task);
- if (s->agent.wait_list.task)
- tasklet_free(s->agent.wait_list.task);
+ if (s->check.wait_list.tasklet)
+ tasklet_free(s->check.wait_list.tasklet);
+ if (s->agent.wait_list.tasklet)
+ tasklet_free(s->agent.wait_list.tasklet);
task_destroy(s->warmup);
if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
h1c->flags &= ~H1C_F_IN_ALLOC;
if (h1_recv_allowed(h1c))
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
return 1;
}
if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
h1c->flags &= ~H1C_F_OUT_ALLOC;
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
return 1;
}
h1c->task = NULL;
LIST_INIT(&h1c->buf_wait.list);
- h1c->wait_event.task = tasklet_new();
- if (!h1c->wait_event.task)
+ h1c->wait_event.tasklet = tasklet_new();
+ if (!h1c->wait_event.tasklet)
goto fail;
- h1c->wait_event.task->process = h1_io_cb;
- h1c->wait_event.task->context = h1c;
+ h1c->wait_event.tasklet->process = h1_io_cb;
+ h1c->wait_event.tasklet->context = h1c;
h1c->wait_event.events = 0;
if (conn_is_back(conn)) {
task_queue(t);
/* Try to read, if nothing is available yet we'll just subscribe */
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
/* mux->wake will be called soon to complete the operation */
return 0;
fail:
task_destroy(t);
- if (h1c->wait_event.task)
- tasklet_free(h1c->wait_event.task);
+ if (h1c->wait_event.tasklet)
+ tasklet_free(h1c->wait_event.tasklet);
pool_free(pool_head_h1c, h1c);
fail_h1c:
return -1;
h1c->task = NULL;
}
- if (h1c->wait_event.task)
- tasklet_free(h1c->wait_event.task);
+ if (h1c->wait_event.tasklet)
+ tasklet_free(h1c->wait_event.tasklet);
h1s_destroy(h1c->h1s);
if (conn && h1c->wait_event.events != 0)
h1s->req.state = H1_MSG_TUNNEL;
if (h1s->h1c->flags & H1C_F_IN_BUSY) {
h1s->h1c->flags &= ~H1C_F_IN_BUSY;
- tasklet_wakeup(h1s->h1c->wait_event.task);
+ tasklet_wakeup(h1s->h1c->wait_event.tasklet);
}
}
}
ret = htx->data - data;
if (h1c->flags & H1C_F_IN_FULL && buf_room_for_htx_data(&h1c->ibuf)) {
h1c->flags &= ~H1C_F_IN_FULL;
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
}
h1s->cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
{
if (h1s && h1s->recv_wait) {
h1s->recv_wait->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(h1s->recv_wait->task);
+ tasklet_wakeup(h1s->recv_wait->tasklet);
h1s->recv_wait = NULL;
}
}
{
if (h1s && h1s->send_wait) {
h1s->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(h1s->send_wait->task);
+ tasklet_wakeup(h1s->send_wait->tasklet);
h1s->send_wait = NULL;
}
}
/* The server doesn't want it, let's kill the connection right away */
h1c->conn->mux->destroy(h1c->conn);
else
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
return;
}
/* The connection was added to the server list,
* wake the task so we can subscribe to events
*/
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
return;
}
}
(h1c->conn->flags & CO_FL_ERROR) || !h1c->conn->owner)
h1_release(h1c);
else {
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
if (h1c->task) {
h1c->task->expire = TICK_ETERNITY;
if (b_data(&h1c->obuf)) {
else if (ret > 0 || (h1s->flags & H1S_F_SPLICED_DATA)) {
h1s->flags &= ~H1S_F_SPLICED_DATA;
if (!(h1c->wait_event.events & SUB_RETRY_RECV))
- tasklet_wakeup(h1c->wait_event.task);
+ tasklet_wakeup(h1c->wait_event.tasklet);
}
return ret;
}
if ((!consider_buffer || !b_data(&h2c->dbuf))
&& (h2c->wait_event.events & SUB_RETRY_RECV))
return;
- tasklet_wakeup(h2c->wait_event.task);
+ tasklet_wakeup(h2c->wait_event.tasklet);
}
t->expire = tick_add(now_ms, h2c->timeout);
}
- h2c->wait_event.task = tasklet_new();
- if (!h2c->wait_event.task)
+ h2c->wait_event.tasklet = tasklet_new();
+ if (!h2c->wait_event.tasklet)
goto fail;
- h2c->wait_event.task->process = h2_io_cb;
- h2c->wait_event.task->context = h2c;
+ h2c->wait_event.tasklet->process = h2_io_cb;
+ h2c->wait_event.tasklet->context = h2c;
h2c->wait_event.events = 0;
h2c->ddht = hpack_dht_alloc(h2_settings_header_table_size);
hpack_dht_free(h2c->ddht);
fail:
task_destroy(t);
- if (h2c->wait_event.task)
- tasklet_free(h2c->wait_event.task);
+ if (h2c->wait_event.tasklet)
+ tasklet_free(h2c->wait_event.tasklet);
pool_free(pool_head_h2c, h2c);
fail_no_h2c:
return -1;
task_wakeup(h2c->task, TASK_WOKEN_OTHER);
h2c->task = NULL;
}
- if (h2c->wait_event.task)
- tasklet_free(h2c->wait_event.task);
+ if (h2c->wait_event.tasklet)
+ tasklet_free(h2c->wait_event.tasklet);
if (h2c->wait_event.events != 0)
conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
&h2c->wait_event);
if (h2s->recv_wait) {
sw = h2s->recv_wait;
sw->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(sw->task);
+ tasklet_wakeup(sw->tasklet);
h2s->recv_wait = NULL;
}
}
sw = h2s->send_wait;
sw->events &= ~SUB_RETRY_SEND;
LIST_ADDQ(&h2s->h2c->sending_list, &h2s->sending_list);
- tasklet_wakeup(sw->task);
+ tasklet_wakeup(sw->tasklet);
}
}
*/
LIST_DEL_INIT(&h2s->list);
if (LIST_ADDED(&h2s->sending_list)) {
- task_remove_from_tasklet_list((struct task *)h2s->send_wait->task);
+ task_remove_from_tasklet_list((struct task *)h2s->send_wait->tasklet);
LIST_DEL_INIT(&h2s->sending_list);
}
- tasklet_free(h2s->wait_event.task);
+ tasklet_free(h2s->wait_event.tasklet);
pool_free(pool_head_h2s, h2s);
}
if (!h2s)
goto out;
- h2s->wait_event.task = tasklet_new();
- if (!h2s->wait_event.task) {
+ h2s->wait_event.tasklet = tasklet_new();
+ if (!h2s->wait_event.tasklet) {
pool_free(pool_head_h2s, h2s);
goto out;
}
h2s->send_wait = NULL;
h2s->recv_wait = NULL;
- h2s->wait_event.task->process = h2_deferred_shut;
- h2s->wait_event.task->context = h2s;
+ h2s->wait_event.tasklet->process = h2_deferred_shut;
+ h2s->wait_event.tasklet->context = h2s;
h2s->wait_event.events = 0;
LIST_INIT(&h2s->list);
LIST_INIT(&h2s->sending_list);
}
h2s->send_wait->events &= ~SUB_RETRY_SEND;
LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
- tasklet_wakeup(h2s->send_wait->task);
+ tasklet_wakeup(h2s->send_wait->tasklet);
}
list_for_each_entry_safe(h2s, h2s_back, &h2c->send_list, list) {
h2s->flags &= ~H2_SF_BLK_ANY;
h2s->send_wait->events &= ~SUB_RETRY_SEND;
LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
- tasklet_wakeup(h2s->send_wait->task);
+ tasklet_wakeup(h2s->send_wait->tasklet);
}
fail:
}
h2s->flags &= ~H2_SF_BLK_ANY;
h2s->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(h2s->send_wait->task);
+ tasklet_wakeup(h2s->send_wait->tasklet);
LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
}
}
return sent;
}
-/* this is the tasklet referenced in h2c->wait_event.task */
+/* this is the tasklet referenced in h2c->wait_event.tasklet */
static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
{
struct h2c *h2c = ctx;
/* The stream is about to die, so no need to attempt to run its task */
if (LIST_ADDED(&h2s->sending_list) &&
h2s->send_wait != &h2s->wait_event) {
- task_remove_from_tasklet_list((struct task *)h2s->send_wait->task);
+ task_remove_from_tasklet_list((struct task *)h2s->send_wait->tasklet);
LIST_DEL_INIT(&h2s->sending_list);
/*
* At this point, the stream_interface is supposed to have called
goto add_to_list;
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
- tasklet_wakeup(h2c->wait_event.task);
+ tasklet_wakeup(h2c->wait_event.tasklet);
h2s_close(h2s);
done:
h2s->flags &= ~H2_SF_WANT_SHUTR;
}
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
- tasklet_wakeup(h2c->wait_event.task);
+ tasklet_wakeup(h2c->wait_event.tasklet);
done:
h2s->flags &= ~H2_SF_WANT_SHUTW;
return;
return;
}
-/* This is the tasklet referenced in h2s->wait_event.task, it is used for
+/* This is the tasklet referenced in h2s->wait_event.tasklet, it is used for
* deferred shutdowns when the h2_detach() was done but the mux buffer was full
* and prevented the last frame from being emitted.
*/
/* We were about to send, make sure it does not happen */
if (LIST_ADDED(&h2s->sending_list) &&
h2s->send_wait != &h2s->wait_event) {
- task_remove_from_tasklet_list((struct task *)h2s->send_wait->task);
+ task_remove_from_tasklet_list((struct task *)h2s->send_wait->tasklet);
LIST_DEL_INIT(&h2s->sending_list);
}
h2s->send_wait = NULL;
list_for_each_entry_safe(h2s, h2s_back, &h2c->sending_list, sending_list) {
LIST_DEL_INIT(&h2s->sending_list);
- task_remove_from_tasklet_list((struct task *)h2s->send_wait->task);
+ task_remove_from_tasklet_list((struct task *)h2s->send_wait->tasklet);
h2s->send_wait->events |= SUB_RETRY_SEND;
}
}
if (total > 0) {
if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND))
- tasklet_wakeup(h2s->h2c->wait_event.task);
+ tasklet_wakeup(h2s->h2c->wait_event.tasklet);
}
/* If we're waiting for flow control, and we got a shutr on the
conn_stop_tracking(conn);
conn_full_close(conn);
- tasklet_free(ctx->wait_event.task);
+ tasklet_free(ctx->wait_event.tasklet);
conn->mux = NULL;
conn->ctx = NULL;
if (conn->destroy_cb)
if (!ctx)
goto fail;
- ctx->wait_event.task = tasklet_new();
- if (!ctx->wait_event.task)
+ ctx->wait_event.tasklet = tasklet_new();
+ if (!ctx->wait_event.tasklet)
goto fail_free_ctx;
- ctx->wait_event.task->context = ctx;
- ctx->wait_event.task->process = mux_pt_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
+ ctx->wait_event.tasklet->process = mux_pt_io_cb;
ctx->wait_event.events = 0;
ctx->conn = conn;
fail_free:
cs_free(cs);
fail_free_ctx:
- if (ctx->wait_event.task)
- tasklet_free(ctx->wait_event.task);
+ if (ctx->wait_event.tasklet)
+ tasklet_free(ctx->wait_event.tasklet);
pool_free(pool_head_pt_ctx, ctx);
fail:
return -1;
conn->err_code = CO_ER_SSL_NO_MEM;
return -1;
}
- ctx->wait_event.task = tasklet_new();
- if (!ctx->wait_event.task) {
+ ctx->wait_event.tasklet = tasklet_new();
+ if (!ctx->wait_event.tasklet) {
conn->err_code = CO_ER_SSL_NO_MEM;
pool_free(ssl_sock_ctx_pool, ctx);
return -1;
}
- ctx->wait_event.task->process = ssl_sock_io_cb;
- ctx->wait_event.task->context = ctx;
+ ctx->wait_event.tasklet->process = ssl_sock_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
ctx->wait_event.events = 0;
ctx->sent_early_data = 0;
ctx->tmp_early_data = -1;
_HA_ATOMIC_ADD(&totalsslconns, 1);
*xprt_ctx = ctx;
/* Start the handshake */
- tasklet_wakeup(ctx->wait_event.task);
+ tasklet_wakeup(ctx->wait_event.tasklet);
if (conn->flags & CO_FL_ERROR)
goto err;
return 0;
_HA_ATOMIC_ADD(&totalsslconns, 1);
*xprt_ctx = ctx;
/* Start the handshake */
- tasklet_wakeup(ctx->wait_event.task);
+ tasklet_wakeup(ctx->wait_event.tasklet);
if (conn->flags & CO_FL_ERROR)
goto err;
return 0;
/* don't know how to handle such a target */
conn->err_code = CO_ER_SSL_NO_TARGET;
err:
- if (ctx && ctx->wait_event.task)
- tasklet_free(ctx->wait_event.task);
+ if (ctx && ctx->wait_event.tasklet)
+ tasklet_free(ctx->wait_event.tasklet);
pool_free(ssl_sock_ctx_pool, ctx);
return -1;
}
/* On error, wake any waiter */
if (ctx->recv_wait) {
ctx->recv_wait->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(ctx->recv_wait->task);
+ tasklet_wakeup(ctx->recv_wait->tasklet);
ctx->recv_wait = NULL;
woke = 1;
}
if (ctx->send_wait) {
ctx->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(ctx->send_wait->task);
+ tasklet_wakeup(ctx->send_wait->tasklet);
ctx->send_wait = NULL;
woke = 1;
}
&ctx->wait_event);
if (ctx->send_wait) {
ctx->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(ctx->send_wait->task);
+ tasklet_wakeup(ctx->send_wait->tasklet);
}
if (ctx->recv_wait) {
ctx->recv_wait->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(ctx->recv_wait->task);
+ tasklet_wakeup(ctx->recv_wait->tasklet);
}
if (ctx->xprt->close)
ctx->xprt->close(conn, ctx->xprt_ctx);
*/
fd_cant_recv(afd);
}
- tasklet_free(ctx->wait_event.task);
+ tasklet_free(ctx->wait_event.tasklet);
pool_free(ssl_sock_ctx_pool, ctx);
_HA_ATOMIC_ADD(&jobs, 1);
return;
}
#endif
SSL_free(ctx->ssl);
- tasklet_free(ctx->wait_event.task);
+ tasklet_free(ctx->wait_event.tasklet);
pool_free(ssl_sock_ctx_pool, ctx);
_HA_ATOMIC_SUB(&sslconns, 1);
}
out_fail_accept:
flt_stream_release(s, 0);
task_destroy(t);
- tasklet_free(s->si[1].wait_event.task);
+ tasklet_free(s->si[1].wait_event.tasklet);
LIST_DEL(&s->list);
out_fail_alloc_si1:
- tasklet_free(s->si[0].wait_event.task);
+ tasklet_free(s->si[0].wait_event.tasklet);
out_fail_alloc:
pool_free(pool_head_stream, s);
return NULL;
si_release_endpoint(&s->si[1]);
si_release_endpoint(&s->si[0]);
- tasklet_free(s->si[0].wait_event.task);
- tasklet_free(s->si[1].wait_event.task);
+ tasklet_free(s->si[0].wait_event.tasklet);
+ tasklet_free(s->si[1].wait_event.tasklet);
b_free(&s->si[1].l7_buffer);
if (must_free_sess) {
}
else {
/* (re)start reading */
- tasklet_wakeup(si->wait_event.task);
+ tasklet_wakeup(si->wait_event.tasklet);
if (!(si->flags & SI_FL_DONT_WAKE))
task_wakeup(si_task(si), TASK_WOKEN_IO);
}
{
/* (re)start reading */
if (si_state_in(si->state, SI_SB_CON|SI_SB_RDY|SI_SB_EST))
- tasklet_wakeup(si->wait_event.task);
+ tasklet_wakeup(si->wait_event.tasklet);
}
/* On error, wake any waiter */
if (ctx->recv_wait) {
ctx->recv_wait->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(ctx->recv_wait->task);
+ tasklet_wakeup(ctx->recv_wait->tasklet);
woke = 1;
ctx->recv_wait = NULL;
}
if (ctx->send_wait) {
ctx->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(ctx->send_wait->task);
+ tasklet_wakeup(ctx->send_wait->tasklet);
woke = 1;
ctx->send_wait = NULL;
}
if (ret >= 0 && !woke && ctx->conn->mux && ctx->conn->mux->wake)
ret = ctx->conn->mux->wake(ctx->conn);
}
- tasklet_free(ctx->wait_event.task);
+ tasklet_free(ctx->wait_event.tasklet);
pool_free(xprt_handshake_ctx_pool, ctx);
}
return NULL;
return -1;
}
ctx->conn = conn;
- ctx->wait_event.task = tasklet_new();
- if (!ctx->wait_event.task) {
+ ctx->wait_event.tasklet = tasklet_new();
+ if (!ctx->wait_event.tasklet) {
conn->err_code = CO_ER_SSL_NO_MEM;
pool_free(xprt_handshake_ctx_pool, ctx);
return -1;
}
- ctx->wait_event.task->process = xprt_handshake_io_cb;
- ctx->wait_event.task->context = ctx;
+ ctx->wait_event.tasklet->process = xprt_handshake_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
ctx->wait_event.events = 0;
/* This XPRT expects the underlying XPRT to be provided later,
* with an add_xprt() call, so we start trying to do the handshake
&ctx->wait_event);
if (ctx->send_wait) {
ctx->send_wait->events &= ~SUB_RETRY_SEND;
- tasklet_wakeup(ctx->send_wait->task);
+ tasklet_wakeup(ctx->send_wait->tasklet);
}
if (ctx->recv_wait) {
ctx->recv_wait->events &= ~SUB_RETRY_RECV;
- tasklet_wakeup(ctx->recv_wait->task);
+ tasklet_wakeup(ctx->recv_wait->tasklet);
}
if (ctx->xprt && ctx->xprt->close)
conn->flags &= ~CO_FL_HANDSHAKE_NOSSL;
if (conn->xprt == xprt_get(XPRT_HANDSHAKE))
conn->xprt = xprt_get(XPRT_RAW);
- tasklet_free(ctx->wait_event.task);
+ tasklet_free(ctx->wait_event.tasklet);
pool_free(xprt_handshake_ctx_pool, ctx);
}
}
ctx->xprt = toadd_ops;
ctx->xprt_ctx = toadd_ctx;
/* Ok we know have an xprt, so let's try to do the handshake */
- tasklet_wakeup(ctx->wait_event.task);
+ tasklet_wakeup(ctx->wait_event.tasklet);
return 0;
}