*/
static inline void __appctx_free(struct appctx *appctx)
{
- task_delete(appctx->t);
- task_free(appctx->t);
+ task_destroy(appctx->t);
if (!LIST_ISEMPTY(&appctx->buffer_wait.list)) {
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&appctx->buffer_wait.list);
__task_remove_from_tasklet_list(t);
}
-/*
- * Unlinks the task and adjusts run queue stats.
- * A pointer to the task itself is returned.
- */
-static inline struct task *task_delete(struct task *t)
-{
- task_unlink_wq(t);
- task_unlink_rq(t);
- return t;
-}
-
/*
* Initialize a new task. The bare minimum is performed (queue pointers and
* state). The task is returned. This function should not be used outside of
_HA_ATOMIC_SUB(&nb_tasks, 1);
}
-static inline void task_free(struct task *t)
+static inline void task_destroy(struct task *t)
{
+ task_unlink_wq(t);
+ /* We don't have to explicitely remove from the run queue.
+ * If we are in the runqueue, the test below will set t->process
+ * to NULL, and the task will be free'd when it'll be its turn
+ * to run.
+ */
+
/* There's no need to protect t->state with a lock, as the task
* has to run on the current thread.
*/
struct check *check = &q->check;
if (check->task) {
- task_delete(check->task);
- task_free(check->task);
+ task_destroy(check->task);
check->task = NULL;
}
free_check(check);
free(resolvers->id);
free((char *)resolvers->conf.file);
- task_delete(resolvers->t);
- task_free(resolvers->t);
+ task_destroy(resolvers->t);
LIST_DEL(&resolvers->list);
free(resolvers);
}
/* Destroy the task attached to this applet */
if (spoe_appctx->task) {
- task_delete(spoe_appctx->task);
- task_free(spoe_appctx->task);
+ task_destroy(spoe_appctx->task);
}
/* Notify all waiting streams */
out_free_sess:
session_free(sess);
out_free_spoe:
- task_free(SPOE_APPCTX(appctx)->task);
+ task_destroy(SPOE_APPCTX(appctx)->task);
out_free_spoe_appctx:
pool_free(pool_head_spoe_appctx, SPOE_APPCTX(appctx));
out_free_appctx:
while (s) {
s_next = s->next;
- if (s->check.task) {
- task_delete(s->check.task);
- task_free(s->check.task);
- }
- if (s->agent.task) {
- task_delete(s->agent.task);
- task_free(s->agent.task);
- }
+ if (s->check.task)
+ task_destroy(s->check.task);
+ if (s->agent.task)
+ task_destroy(s->agent.task);
- if (s->warmup) {
- task_delete(s->warmup);
- task_free(s->warmup);
- }
+ if (s->warmup)
+ task_destroy(s->warmup);
free(s->id);
free(s->cookie);
free_http_req_rules(&p->http_req_rules);
free_http_res_rules(&p->http_res_rules);
- task_free(p->task);
+ task_destroy(p->task);
pool_destroy(p->req_cap_pool);
pool_destroy(p->rsp_cap_pool);
free(global.node); global.node = NULL;
free(global.desc); global.desc = NULL;
free(oldpids); oldpids = NULL;
- task_free(global_listener_queue_task); global_listener_queue_task = NULL;
- task_free(idle_conn_task);
+ task_destroy(global_listener_queue_task); global_listener_queue_task = NULL;
+ task_destroy(idle_conn_task);
idle_conn_task = NULL;
list_for_each_entry_safe(log, logb, &global.logsrvs, list) {
stop_proxy(curpeers->peers_fe);
/* disable this peer section so that it kills itself */
signal_unregister_handler(curpeers->sighandler);
- task_delete(curpeers->sync_task);
- task_free(curpeers->sync_task);
+ task_destroy(curpeers->sync_task);
curpeers->sync_task = NULL;
- task_free(curpeers->peers_fe->task);
+ task_destroy(curpeers->peers_fe->task);
curpeers->peers_fe->task = NULL;
curpeers->peers_fe = NULL;
}
/* finished or yield */
case HLUA_E_OK:
hlua_ctx_destroy(hlua);
- task_delete(task);
- task_free(task);
+ task_destroy(task);
task = NULL;
break;
case HLUA_E_ERRMSG:
SEND_ERR(NULL, "Lua task: %s.\n", lua_tostring(hlua->T, -1));
hlua_ctx_destroy(hlua);
- task_delete(task);
- task_free(task);
+ task_destroy(task);
task = NULL;
break;
default:
SEND_ERR(NULL, "Lua task: unknown error.\n");
hlua_ctx_destroy(hlua);
- task_delete(task);
- task_free(task);
+ task_destroy(task);
task = NULL;
break;
}
static void hlua_applet_tcp_release(struct appctx *ctx)
{
- task_delete(ctx->ctx.hlua_apptcp.task);
- task_free(ctx->ctx.hlua_apptcp.task);
+ task_destroy(ctx->ctx.hlua_apptcp.task);
ctx->ctx.hlua_apptcp.task = NULL;
hlua_ctx_destroy(ctx->ctx.hlua_apptcp.hlua);
ctx->ctx.hlua_apptcp.hlua = NULL;
static void hlua_applet_http_release(struct appctx *ctx)
{
- task_delete(ctx->ctx.hlua_apphttp.task);
- task_free(ctx->ctx.hlua_apphttp.task);
+ task_destroy(ctx->ctx.hlua_apphttp.task);
ctx->ctx.hlua_apphttp.task = NULL;
hlua_ctx_destroy(ctx->ctx.hlua_apphttp.hlua);
ctx->ctx.hlua_apphttp.hlua = NULL;
fail:
if (t)
- task_free(t);
+ task_destroy(t);
if (h1c->wait_event.task)
tasklet_free(h1c->wait_event.task);
pool_free(pool_head_h1c, h1c);
if (!expired && h1c)
return t;
- task_delete(t);
- task_free(t);
+ task_destroy(t);
if (!h1c) {
/* resources were already deleted */
hpack_dht_free(h2c->ddht);
fail:
if (t)
- task_free(t);
+ task_destroy(t);
if (h2c->wait_event.task)
tasklet_free(h2c->wait_event.task);
pool_free(pool_head_h2c, h2c);
if (!expired && h2c)
return t;
- task_delete(t);
- task_free(t);
+ task_destroy(t);
if (!h2c) {
/* resources were already deleted */
stop_proxy(curpeers->peers_fe);
/* disable this peer section so that it kills itself */
signal_unregister_handler(curpeers->sighandler);
- task_delete(curpeers->sync_task);
- task_free(curpeers->sync_task);
+ task_destroy(curpeers->sync_task);
curpeers->sync_task = NULL;
- task_free(curpeers->peers_fe->task);
+ task_destroy(curpeers->peers_fe->task);
curpeers->peers_fe->task = NULL;
curpeers->peers_fe = NULL;
}
if (!peers->peers_fe) {
/* this one was never started, kill it */
signal_unregister_handler(peers->sighandler);
- task_delete(peers->sync_task);
- task_free(peers->sync_task);
+ task_destroy(peers->sync_task);
peers->sync_task = NULL;
return NULL;
}
conn_free(conn);
sess->origin = NULL;
- task_delete(task);
- task_free(task);
+ task_destroy(task);
session_free(sess);
}
/* the embryonic session's task is not needed anymore */
if (sess->task) {
- task_delete(sess->task);
- task_free(sess->task);
+ task_destroy(sess->task);
sess->task = NULL;
}
/* Error unrolling */
out_fail_accept:
flt_stream_release(s, 0);
- task_free(t);
+ task_destroy(t);
tasklet_free(s->si[1].wait_event.task);
LIST_DEL(&s->list);
out_fail_alloc_si1:
/* the task MUST not be in the run queue anymore */
stream_free(s);
- task_delete(t);
- task_free(t);
+ task_destroy(t);
return NULL;
}
while (tmp_rq) {
t = eb32sc_entry(tmp_rq, struct task, rq);
tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
- task_delete(t);
- task_free(t);
+ task_destroy(t);
}
/* cleanup the timers queue */
tmp_wq = eb32_first(&timers);
while (tmp_wq) {
t = eb32_entry(tmp_wq, struct task, wq);
tmp_wq = eb32_next(tmp_wq);
- task_delete(t);
- task_free(t);
+ task_destroy(t);
}
#endif
/* clean the per thread run queue */
while (tmp_rq) {
t = eb32sc_entry(tmp_rq, struct task, rq);
tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
- task_delete(t);
- task_free(t);
+ task_destroy(t);
}
/* cleanup the per thread timers queue */
tmp_wq = eb32_first(&task_per_thread[i].timers);
while (tmp_wq) {
t = eb32_entry(tmp_wq, struct task, wq);
tmp_wq = eb32_next(tmp_wq);
- task_delete(t);
- task_free(t);
+ task_destroy(t);
}
}
}