From: Christian Loehle Date: Wed, 3 Sep 2025 21:23:10 +0000 (+0100) Subject: sched_ext: Introduce scx_bpf_cpu_curr() X-Git-Tag: v6.18-rc1~200^2~21 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=20b158094a1adc9bbfdcc41780059b5cd8866ad8;p=thirdparty%2Flinux.git sched_ext: Introduce scx_bpf_cpu_curr() Provide scx_bpf_cpu_curr() as a way for scx schedulers to check the curr task of a remote rq without assuming its lock is held. Many scx schedulers make use of scx_bpf_cpu_rq() to check a remote curr (e.g. to see if it should be preempted). This is problematic because scx_bpf_cpu_rq() provides access to all fields of struct rq, most of which aren't safe to use without holding the associated rq lock. Signed-off-by: Christian Loehle Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index a319ea5bb25a3..4bd9b491b376a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6379,6 +6379,19 @@ __bpf_kfunc struct rq *scx_bpf_locked_rq(void) return rq; } +/** + * scx_bpf_cpu_curr - Return remote CPU's curr task + * @cpu: CPU of interest + * + * Callers must hold RCU read lock (KF_RCU). + */ +__bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu) +{ + if (!kf_cpu_valid(cpu, NULL)) + return NULL; + return rcu_dereference(cpu_rq(cpu)->curr); +} + /** * scx_bpf_task_cgroup - Return the sched cgroup of a task * @p: task of interest @@ -6544,6 +6557,7 @@ BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_cpu_rq) BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL) +BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU) #ifdef CONFIG_CGROUP_SCHED BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) #endif diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index e2a57b7458625..342c7c48df5ab 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -104,6 +104,7 @@ bool scx_bpf_task_running(const struct task_struct *p) __ksym; s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; struct rq *scx_bpf_locked_rq(void) __ksym; +struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym; struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; u64 scx_bpf_now(void) __ksym __weak; void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;