SCX_TASK_READY = 2 << SCX_TASK_STATE_SHIFT,
SCX_TASK_ENABLED = 3 << SCX_TASK_STATE_SHIFT,
+ /*
+ * Bits 12 and 13 are used to carry reenqueue reason. In addition to
+ * %SCX_ENQ_REENQ flag, ops.enqueue() can also test for
+ * %SCX_TASK_REENQ_REASON_NONE to distinguish reenqueues.
+ *
+ * NONE not being reenqueued
+ * KFUNC reenqueued by scx_bpf_dsq_reenq() and friends
+ */
+ SCX_TASK_REENQ_REASON_SHIFT = 12,
+ SCX_TASK_REENQ_REASON_BITS = 2,
+ SCX_TASK_REENQ_REASON_MASK = ((1 << SCX_TASK_REENQ_REASON_BITS) - 1) << SCX_TASK_REENQ_REASON_SHIFT,
+
+ SCX_TASK_REENQ_NONE = 0 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_KFUNC = 1 << SCX_TASK_REENQ_REASON_SHIFT,
+
/* iteration cursor, not a task */
SCX_TASK_CURSOR = 1 << 31,
};
}
}
-static bool task_should_reenq(struct task_struct *p, u64 reenq_flags)
+static bool task_should_reenq(struct task_struct *p, u64 reenq_flags, u32 *reason)
{
+ *reason = SCX_TASK_REENQ_KFUNC;
+
if (reenq_flags & SCX_REENQ_ANY)
return true;
return false;
list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
scx.dsq_list.node) {
struct scx_sched *task_sch = scx_task_sched(p);
+ u32 reason;
/*
* If @p is being migrated, @p's current CPU may not agree with
if (!scx_is_descendant(task_sch, sch))
continue;
- if (!task_should_reenq(p, reenq_flags))
+ if (!task_should_reenq(p, reenq_flags, &reason))
continue;
dispatch_dequeue(rq, p);
+
+ if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
+ p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
+ p->scx.flags |= reason;
+
list_add_tail(&p->scx.dsq_list.node, &tasks);
}
list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
list_del_init(&p->scx.dsq_list.node);
+
do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
+
+ p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
nr_enqueued++;
}
while (likely(!READ_ONCE(sch->bypass_depth))) {
struct rq *task_rq;
+ u32 reason;
p = nldsq_cursor_next_task(&cursor, dsq);
if (!p)
break;
- if (!task_should_reenq(p, reenq_flags))
+ if (!task_should_reenq(p, reenq_flags, &reason))
continue;
task_rq = task_rq(p);
/* @p is on @dsq, its rq and @dsq are locked */
dispatch_dequeue_locked(p, dsq);
raw_spin_unlock(&dsq->lock);
+
+ if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
+ p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
+ p->scx.flags |= reason;
+
do_enqueue_task(task_rq, p, SCX_ENQ_REENQ, -1);
+ p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
+
if (!(++nr_enqueued % SCX_TASK_ITER_BATCH)) {
raw_spin_rq_unlock(locked_rq);
locked_rq = NULL;
SCX_ENQ_PREEMPT = 1LLU << 32,
/*
- * The task being enqueued was previously enqueued on the current CPU's
- * %SCX_DSQ_LOCAL, but was removed from it in a call to the
- * scx_bpf_reenqueue_local() kfunc. If scx_bpf_reenqueue_local() was
- * invoked in a ->cpu_release() callback, and the task is again
- * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
- * task will not be scheduled on the CPU until at least the next invocation
- * of the ->cpu_acquire() callback.
+ * The task being enqueued was previously enqueued on a DSQ, but was
+ * removed and is being re-enqueued. See SCX_TASK_REENQ_* flags to find
+ * out why a given task is being reenqueued.
*/
SCX_ENQ_REENQ = 1LLU << 40,