/* unused 0x00000008 */
#define TASK_SELF_WAKING 0x00000010 /* task/tasklet found waking itself */
#define TASK_KILLED 0x00000020 /* task/tasklet killed, may now be freed */
-#define TASK_IN_LIST 0x00000040 /* tasklet is in a tasklet list */
#define TASK_HEAVY 0x00000080 /* this task/tasklet is extremely heavy */
#define TASK_WOKEN_INIT 0x00000100 /* woken up for initialisation purposes */
_(0);
/* flags */
_(TASK_RUNNING, _(TASK_QUEUED, _(TASK_SELF_WAKING,
- _(TASK_KILLED, _(TASK_IN_LIST, _(TASK_HEAVY, _(TASK_WOKEN_INIT,
+ _(TASK_KILLED, _(TASK_HEAVY, _(TASK_WOKEN_INIT,
_(TASK_WOKEN_TIMER, _(TASK_WOKEN_IO, _(TASK_WOKEN_SIGNAL,
_(TASK_WOKEN_MSG, _(TASK_WOKEN_RES, _(TASK_WOKEN_OTHER,
- _(TASK_F_TASKLET, _(TASK_F_USR1)))))))))))))));
+ _(TASK_F_TASKLET, _(TASK_F_USR1))))))))))))));
/* epilogue */
_(~0U);
return buf;
do {
/* do nothing if someone else already added it */
- if (state & TASK_IN_LIST)
+ if (state & TASK_QUEUED)
return;
- } while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_IN_LIST));
+ } while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_QUEUED));
/* at this point we're the first ones to add this task to the list */
if (likely(caller)) {
do {
/* do nothing if someone else already added it */
- if (state & TASK_IN_LIST)
+ if (state & TASK_QUEUED)
return head;
- } while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_IN_LIST));
+ } while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_QUEUED));
/* at this point we're the first one to add this task to the list */
if (likely(caller)) {
/* Try to remove a tasklet from the list. This call is inherently racy and may
* only be performed on the thread that was supposed to dequeue this tasklet.
* This way it is safe to call MT_LIST_DELETE without first removing the
- * TASK_IN_LIST bit, which must absolutely be removed afterwards in case
+ * TASK_QUEUED bit, which must absolutely be removed afterwards in case
* another thread would want to wake this tasklet up in parallel.
*/
static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
{
if (MT_LIST_DELETE(list_to_mt_list(&t->list))) {
- _HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
+ _HA_ATOMIC_AND(&t->state, ~TASK_QUEUED);
_HA_ATOMIC_DEC(&ha_thread_ctx[t->tid >= 0 ? t->tid : tid].rq_total);
}
}
BUG_ON(state & TASK_KILLED);
while (1) {
- while (state & (TASK_IN_LIST)) {
+ while (state & (TASK_QUEUED)) {
/* Tasklet already in the list ready to be executed. Add
* the killed flag and wait for the process loop to
* detect it.
/* Mark the tasklet as killed and wake the thread to process it
* as soon as possible.
*/
- if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_IN_LIST | TASK_KILLED)) {
+ if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED | TASK_KILLED)) {
thr = t->tid >= 0 ? t->tid : tid;
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
list_to_mt_list(&t->list));
/* Do not call this one, please use tasklet_wakeup_on() instead, as this one is
* the slow path of tasklet_wakeup_on() which performs some preliminary checks
- * and sets TASK_IN_LIST before calling this one. A negative <thr> designates
+ * and sets TASK_QUEUED before calling this one. A negative <thr> designates
* the current thread.
*/
void __tasklet_wakeup_on(struct tasklet *tl, int thr)
/* Do not call this one, please use tasklet_wakeup_after_on() instead, as this one is
* the slow path of tasklet_wakeup_after() which performs some preliminary checks
- * and sets TASK_IN_LIST before calling this one.
+ * and sets TASK_QUEUED before calling this one.
*/
struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl)
{