{
int sticky_cpu = p->scx.sticky_cpu;
+ if (enq_flags & ENQUEUE_WAKEUP)
+ rq->scx.flags |= SCX_RQ_IN_WAKEUP;
+
enq_flags |= rq->scx.extra_enq_flags;
if (sticky_cpu >= 0)
if (p->scx.flags & SCX_TASK_QUEUED) {
WARN_ON_ONCE(!task_runnable(p));
- return;
+ goto out;
}
set_task_runnable(rq, p);
touch_core_sched(rq, p);
do_enqueue_task(rq, p, enq_flags, sticky_cpu);
+out:
+ rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
}
static void ops_dequeue(struct task_struct *p, u64 deq_flags)
bool has_tasks = false;
lockdep_assert_rq_held(rq);
- rq->scx.flags |= SCX_RQ_BALANCING;
+ rq->scx.flags |= SCX_RQ_IN_BALANCE;
if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
unlikely(rq->scx.cpu_released)) {
has_tasks:
has_tasks = true;
out:
- rq->scx.flags &= ~SCX_RQ_BALANCING;
+ rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
return has_tasks;
}
* The race window is small and we don't and can't guarantee that @rq is
* only kicked while idle anyway. Skip only when sure.
*/
- return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_BALANCING);
+ return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
}
static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
* only while the BPF scheduler considers the CPU to be online.
*/
SCX_RQ_ONLINE = 1 << 0,
- SCX_RQ_BALANCING = 1 << 1,
- SCX_RQ_CAN_STOP_TICK = 1 << 2,
+ SCX_RQ_CAN_STOP_TICK = 1 << 1,
+
+ SCX_RQ_IN_WAKEUP = 1 << 16,
+ SCX_RQ_IN_BALANCE = 1 << 17,
};
struct scx_rq {