]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Relocate scx_bpf_task_cgroup() and its BTF_ID to the end of kfunc section
authorTejun Heo <tj@kernel.org>
Sat, 7 Mar 2026 15:29:49 +0000 (05:29 -1000)
committerTejun Heo <tj@kernel.org>
Sat, 7 Mar 2026 15:29:49 +0000 (05:29 -1000)
Move scx_bpf_task_cgroup() kfunc definition and its BTF_ID entry to the end
of the kfunc section before __bpf_kfunc_end_defs() for cleaner code
organization.

No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c

index 47f65041b363d3d42614a40136460ae88148d2f3..fa8968164fd2815d02f895a95303c710ec285cfc 100644 (file)
@@ -8627,43 +8627,6 @@ __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu, const struct bpf_prog_
        return rcu_dereference(cpu_rq(cpu)->curr);
 }
 
-/**
- * scx_bpf_task_cgroup - Return the sched cgroup of a task
- * @p: task of interest
- * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
- *
- * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
- * from the scheduler's POV. SCX operations should use this function to
- * determine @p's current cgroup as, unlike following @p->cgroups,
- * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
- * rq-locked operations. Can be called on the parameter tasks of rq-locked
- * operations. The restriction guarantees that @p's rq is locked by the caller.
- */
-#ifdef CONFIG_CGROUP_SCHED
-__bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p,
-                                              const struct bpf_prog_aux *aux)
-{
-       struct task_group *tg = p->sched_task_group;
-       struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
-       struct scx_sched *sch;
-
-       guard(rcu)();
-
-       sch = scx_prog_sched(aux);
-       if (unlikely(!sch))
-               goto out;
-
-       if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p))
-               goto out;
-
-       cgrp = tg_cgrp(tg);
-
-out:
-       cgroup_get(cgrp);
-       return cgrp;
-}
-#endif
-
 /**
  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
  * clock for the current CPU. The clock returned is in nanoseconds.
@@ -8778,6 +8741,43 @@ __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
        memcpy(events, &e_sys, events__sz);
 }
 
+#ifdef CONFIG_CGROUP_SCHED
+/**
+ * scx_bpf_task_cgroup - Return the sched cgroup of a task
+ * @p: task of interest
+ * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
+ *
+ * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
+ * from the scheduler's POV. SCX operations should use this function to
+ * determine @p's current cgroup as, unlike following @p->cgroups,
+ * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
+ * rq-locked operations. Can be called on the parameter tasks of rq-locked
+ * operations. The restriction guarantees that @p's rq is locked by the caller.
+ */
+__bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p,
+                                              const struct bpf_prog_aux *aux)
+{
+       struct task_group *tg = p->sched_task_group;
+       struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
+       struct scx_sched *sch;
+
+       guard(rcu)();
+
+       sch = scx_prog_sched(aux);
+       if (unlikely(!sch))
+               goto out;
+
+       if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p))
+               goto out;
+
+       cgrp = tg_cgrp(tg);
+
+out:
+       cgroup_get(cgrp);
+       return cgrp;
+}
+#endif /* CONFIG_CGROUP_SCHED */
+
 __bpf_kfunc_end_defs();
 
 BTF_KFUNCS_START(scx_kfunc_ids_any)
@@ -8807,11 +8807,11 @@ BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
 BTF_ID_FLAGS(func, scx_bpf_cpu_rq, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_IMPLICIT_ARGS | KF_RET_NULL)
 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_IMPLICIT_ARGS | KF_RET_NULL | KF_RCU_PROTECTED)
+BTF_ID_FLAGS(func, scx_bpf_now)
+BTF_ID_FLAGS(func, scx_bpf_events)
 #ifdef CONFIG_CGROUP_SCHED
 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_IMPLICIT_ARGS | KF_RCU | KF_ACQUIRE)
 #endif
-BTF_ID_FLAGS(func, scx_bpf_now)
-BTF_ID_FLAGS(func, scx_bpf_events)
 BTF_KFUNCS_END(scx_kfunc_ids_any)
 
 static const struct btf_kfunc_id_set scx_kfunc_set_any = {