]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched_ext: Fix scx_kick_pseqs corruption on concurrent scheduler loads
authorAndrea Righi <arighi@nvidia.com>
Mon, 13 Oct 2025 20:36:34 +0000 (22:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Dec 2025 10:46:08 +0000 (11:46 +0100)
commit 05e63305c85c88141500f0a2fb02afcfba9396e1 upstream.

If we load a BPF scheduler while another scheduler is already running,
alloc_kick_pseqs() would be called again, overwriting the previously
allocated arrays.

Fix by moving the alloc_kick_pseqs() call after the scx_enable_state()
check, ensuring that the arrays are only allocated when a scheduler can
actually be loaded.

Fixes: 14c1da3895a11 ("sched_ext: Allocate scx_kick_cpus_pnt_seqs lazily using kvzalloc()")
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/sched/ext.c

index d6d2eea9d1483e37964e370e481135fc743e1592..a1261ebf4e2a6626438a0d70c9aaac2f87294116 100644 (file)
@@ -4632,15 +4632,15 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
 
        mutex_lock(&scx_enable_mutex);
 
-       ret = alloc_kick_pseqs();
-       if (ret)
-               goto err_unlock;
-
        if (scx_enable_state() != SCX_DISABLED) {
                ret = -EBUSY;
-               goto err_free_pseqs;
+               goto err_unlock;
        }
 
+       ret = alloc_kick_pseqs();
+       if (ret)
+               goto err_unlock;
+
        sch = scx_alloc_and_add_sched(ops);
        if (IS_ERR(sch)) {
                ret = PTR_ERR(sch);