From: Andrea Righi Date: Thu, 15 May 2025 19:11:43 +0000 (+0200) Subject: sched_ext: idle: Validate locking correctness in scx_bpf_select_cpu_and() X-Git-Tag: v6.16-rc1~153^2~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=686d1337237161833684d8554c6e3ea2d692bd41;p=thirdparty%2Flinux.git sched_ext: idle: Validate locking correctness in scx_bpf_select_cpu_and() Validate locking correctness when accessing p->nr_cpus_allowed and p->cpus_ptr inside scx_bpf_select_cpu_and(): if the rq lock is held, access is safe; otherwise, require that p->pi_lock is held. This allows to catch potential unsafe calls to scx_bpf_select_cpu_and(). Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index f0ebf8b5b908e..716863f1f8cee 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -935,6 +935,7 @@ prev_cpu: __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, const struct cpumask *cpus_allowed, u64 flags) { + struct rq *rq; s32 cpu; if (!kf_cpu_valid(prev_cpu, NULL)) @@ -946,6 +947,15 @@ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 if (!scx_kf_allowed(SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE)) return -EPERM; + /* + * Validate locking correctness to access p->cpus_ptr and + * p->nr_cpus_allowed: if we're holding an rq lock, we're safe; + * otherwise, assert that p->pi_lock is held. + */ + rq = scx_locked_rq(); + if (!rq) + lockdep_assert_held(&p->pi_lock); + #ifdef CONFIG_SMP /* * This may also be called from ops.enqueue(), so we need to handle