]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Use kobject_put() for kobject_init_and_add() failure in scx_alloc_and_add_...
authorTejun Heo <tj@kernel.org>
Mon, 16 Mar 2026 05:43:28 +0000 (19:43 -1000)
committerTejun Heo <tj@kernel.org>
Mon, 16 Mar 2026 09:27:04 +0000 (23:27 -1000)
kobject_init_and_add() failure requires kobject_put() for proper cleanup, but
the error paths were using kfree(sch) possibly leaking the kobject name. The
kset_create_and_add() failure was already using kobject_put() correctly.

Switch the kobject_init_and_add() error paths to use kobject_put(). As the
release path puts the cgroup ref, make scx_alloc_and_add_sched() always
consume @cgrp via a new err_put_cgrp label at the bottom of the error chain
and update scx_sub_enable_workfn() accordingly.

Fixes: 17108735b47d ("sched_ext: Use dynamic allocation for scx_sched")
Reported-by: David Carlier <devnexen@gmail.com>
Link: https://lore.kernel.org/r/20260314134457.46216-1-devnexen@gmail.com
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c

index 2f70effcc4a656cb2d84bd15697a20f5ffc93356..b942918fa3641db3e85d0e6d12e6886597c39bf0 100644 (file)
@@ -6353,6 +6353,10 @@ static struct scx_sched_pnode *alloc_pnode(struct scx_sched *sch, int node)
        return pnode;
 }
 
+/*
+ * Allocate and initialize a new scx_sched. @cgrp's reference is always
+ * consumed whether the function succeeds or fails.
+ */
 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
                                                 struct cgroup *cgrp,
                                                 struct scx_sched *parent)
@@ -6362,8 +6366,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
        s32 node, cpu, ret, bypass_fail_cpu = nr_cpu_ids;
 
        sch = kzalloc_flex(*sch, ancestors, level);
-       if (!sch)
-               return ERR_PTR(-ENOMEM);
+       if (!sch) {
+               ret = -ENOMEM;
+               goto err_put_cgrp;
+       }
 
        sch->exit_info = alloc_exit_info(ops->exit_dump_len);
        if (!sch->exit_info) {
@@ -6468,8 +6474,8 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
                ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
 
        if (ret < 0) {
-               kfree(sch->cgrp_path);
-               goto err_stop_helper;
+               kobject_put(&sch->kobj);
+               return ERR_PTR(ret);
        }
 
        if (ops->sub_attach) {
@@ -6479,11 +6485,12 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
                        return ERR_PTR(-ENOMEM);
                }
        }
-
 #else  /* CONFIG_EXT_SUB_SCHED */
        ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
-       if (ret < 0)
-               goto err_stop_helper;
+       if (ret < 0) {
+               kobject_put(&sch->kobj);
+               return ERR_PTR(ret);
+       }
 #endif /* CONFIG_EXT_SUB_SCHED */
        return sch;
 
@@ -6506,6 +6513,8 @@ err_free_ei:
        free_exit_info(sch->exit_info);
 err_free_sch:
        kfree(sch);
+err_put_cgrp:
+       cgroup_put(cgrp);
        return ERR_PTR(ret);
 }
 
@@ -6577,6 +6586,7 @@ static void scx_root_enable_workfn(struct kthread_work *work)
 {
        struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
        struct sched_ext_ops *ops = cmd->ops;
+       struct cgroup *cgrp = root_cgroup();
        struct scx_sched *sch;
        struct scx_task_iter sti;
        struct task_struct *p;
@@ -6593,7 +6603,8 @@ static void scx_root_enable_workfn(struct kthread_work *work)
        if (ret)
                goto err_unlock;
 
-       sch = scx_alloc_and_add_sched(ops, root_cgroup(), NULL);
+       cgroup_get(cgrp);
+       sch = scx_alloc_and_add_sched(ops, cgrp, NULL);
        if (IS_ERR(sch)) {
                ret = PTR_ERR(sch);
                goto err_free_ksyncs;
@@ -6887,11 +6898,12 @@ static void scx_sub_enable_workfn(struct kthread_work *work)
        kobject_get(&parent->kobj);
        raw_spin_unlock_irq(&scx_sched_lock);
 
+       /* scx_alloc_and_add_sched() consumes @cgrp whether it succeeds or not */
        sch = scx_alloc_and_add_sched(ops, cgrp, parent);
        kobject_put(&parent->kobj);
        if (IS_ERR(sch)) {
                ret = PTR_ERR(sch);
-               goto out_put_cgrp;
+               goto out_unlock;
        }
 
        ret = scx_link_sched(sch);