if (l->state == LI_READY)
goto end;
- /* the listener might have been stopped in parallel */
- if (l->state < LI_PAUSED)
- goto end;
-
if (l->rx.proto->resume)
ret = l->rx.proto->resume(l);
}
/* Marks a ready listener as full so that the stream code tries to re-enable
- * it upon next close() using resume_listener().
+ * it upon next close() using relax_listener().
*/
static void listener_full(struct listener *l)
{
/* This cannot fail because the listeners are by definition in
* the LI_LIMITED state.
*/
- resume_listener(listener, 0, 0);
+ relax_listener(listener, 0, 0);
}
}
/* This cannot fail because the listeners are by definition in
* the LI_LIMITED state.
*/
- resume_listener(listener, 0, 0);
+ relax_listener(listener, 0, 0);
}
}
(!tick_isset(global_listener_queue_task->expire) ||
tick_is_expired(global_listener_queue_task->expire, now_ms))))) {
/* at least one thread has to this when quitting */
- resume_listener(l, 0, 0);
+ relax_listener(l, 0, 0);
/* Dequeues all of the listeners waiting for a resource */
dequeue_all_listeners();
_HA_ATOMIC_DEC(&l->thr_conn[tid]);
if (l->state == LI_FULL || l->state == LI_LIMITED)
- resume_listener(l, 0, 0);
+ relax_listener(l, 0, 0);
/* Dequeues all of the listeners waiting for a resource */
dequeue_all_listeners();
px->maxconn = v;
list_for_each_entry(l, &px->conf.listeners, by_fe) {
if (l->state == LI_FULL)
- resume_listener(l, 1, 0);
+ relax_listener(l, 1, 0);
}
if (px->maxconn > px->feconn)