static void task_unlink_from_dsq(struct task_struct *p,
struct scx_dispatch_q *dsq)
{
+ WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
+
if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
rb_erase(&p->scx.dsq_priq, &dsq->priq);
RB_CLEAR_NODE(&p->scx.dsq_priq);
}
list_del_init(&p->scx.dsq_list.node);
+ dsq_mod_nr(dsq, -1);
}
static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
*/
if (p->scx.holding_cpu < 0) {
/* @p must still be on @dsq, dequeue */
- WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
task_unlink_from_dsq(p, dsq);
- dsq_mod_nr(dsq, -1);
} else {
/*
* We're racing against dispatch_to_local_dsq() which already
WARN_ON_ONCE(p->scx.holding_cpu >= 0);
task_unlink_from_dsq(p, dsq);
list_add_tail(&p->scx.dsq_list.node, &rq->scx.local_dsq.list);
- dsq_mod_nr(dsq, -1);
dsq_mod_nr(&rq->scx.local_dsq, 1);
p->scx.dsq = &rq->scx.local_dsq;
raw_spin_unlock(&dsq->lock);
WARN_ON_ONCE(p->scx.holding_cpu >= 0);
task_unlink_from_dsq(p, dsq);
- dsq_mod_nr(dsq, -1);
p->scx.holding_cpu = cpu;
raw_spin_unlock(&dsq->lock);