From: Tejun Heo Date: Sat, 25 Oct 2025 00:18:48 +0000 (-1000) Subject: sched_ext: Factor out reenq_local() from scx_bpf_reenqueue_local() X-Git-Tag: v6.19-rc1~177^2~25 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8803e6a7fb687795ab4326f3e96e9f666605d883;p=thirdparty%2Fkernel%2Flinux.git sched_ext: Factor out reenq_local() from scx_bpf_reenqueue_local() Factor out the core re-enqueue logic from scx_bpf_reenqueue_local() into a new reenq_local() helper function. scx_bpf_reenqueue_local() now handles the BPF kfunc checks and calls reenq_local() to perform the actual work. This is a prep patch to allow reenq_local() to be called from other contexts. Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 93ee196841d80..d13ce92c3f018 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5879,32 +5879,12 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; -__bpf_kfunc_start_defs(); - -/** - * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ - * - * Iterate over all of the tasks currently enqueued on the local DSQ of the - * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of - * processed tasks. Can only be called from ops.cpu_release(). - */ -__bpf_kfunc u32 scx_bpf_reenqueue_local(void) +static u32 reenq_local(struct rq *rq) { - struct scx_sched *sch; LIST_HEAD(tasks); u32 nr_enqueued = 0; - struct rq *rq; struct task_struct *p, *n; - guard(rcu)(); - sch = rcu_dereference(scx_root); - if (unlikely(!sch)) - return 0; - - if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) - return 0; - - rq = cpu_rq(smp_processor_id()); lockdep_assert_rq_held(rq); /* @@ -5941,6 +5921,34 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void) return nr_enqueued; } +__bpf_kfunc_start_defs(); + +/** + * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ + * + * Iterate over all of the tasks currently enqueued on the local DSQ of the + * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of + * processed tasks. Can only be called from ops.cpu_release(). + */ +__bpf_kfunc u32 scx_bpf_reenqueue_local(void) +{ + struct scx_sched *sch; + struct rq *rq; + + guard(rcu)(); + sch = rcu_dereference(scx_root); + if (unlikely(!sch)) + return 0; + + if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) + return 0; + + rq = cpu_rq(smp_processor_id()); + lockdep_assert_rq_held(rq); + + return reenq_local(rq); +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)