]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpuset: add cpuset1_online_css helper for v1-specific operations
authorChen Ridong <chenridong@huawei.com>
Thu, 18 Dec 2025 09:31:37 +0000 (09:31 +0000)
committerTejun Heo <tj@kernel.org>
Thu, 18 Dec 2025 18:36:08 +0000 (08:36 -1000)
This commit introduces the cpuset1_online_css helper to centralize
v1-specific handling during cpuset online. It performs operations such as
updating the CS_SPREAD_PAGE, CS_SPREAD_SLAB, and CGRP_CPUSET_CLONE_CHILDREN
flags, which are unique to the cpuset v1 control group interface.

The helper is now placed in cpuset-v1.c to maintain clear separation
between v1 and v2 logic.

Signed-off-by: Chen Ridong <chenridong@huawei.com>
Reviewed-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/cgroup/cpuset-internal.h
kernel/cgroup/cpuset-v1.c
kernel/cgroup/cpuset.c

index 01976c8e7d496410bb87c8ccf6982e16c4ad4b3c..6c03cad02302320ed243d3dde53b04e4aa529c16 100644 (file)
@@ -293,6 +293,7 @@ void cpuset1_hotplug_update_tasks(struct cpuset *cs,
                            struct cpumask *new_cpus, nodemask_t *new_mems,
                            bool cpus_updated, bool mems_updated);
 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
+void cpuset1_online_css(struct cgroup_subsys_state *css);
 #else
 static inline void fmeter_init(struct fmeter *fmp) {}
 static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,
@@ -303,6 +304,7 @@ static inline void cpuset1_hotplug_update_tasks(struct cpuset *cs,
                            bool cpus_updated, bool mems_updated) {}
 static inline int cpuset1_validate_change(struct cpuset *cur,
                                struct cpuset *trial) { return 0; }
+static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {}
 #endif /* CONFIG_CPUSETS_V1 */
 
 #endif /* __CPUSET_INTERNAL_H */
index 12e76774c75b0e5056769f755d4c644a7a350a20..c296ce47616a83657c0123ec0350d9d6b6ffc1c3 100644 (file)
@@ -499,6 +499,54 @@ out_unlock:
        return retval;
 }
 
+void cpuset1_online_css(struct cgroup_subsys_state *css)
+{
+       struct cpuset *tmp_cs;
+       struct cgroup_subsys_state *pos_css;
+       struct cpuset *cs = css_cs(css);
+       struct cpuset *parent = parent_cs(cs);
+
+       lockdep_assert_cpus_held();
+       lockdep_assert_cpuset_lock_held();
+
+       if (is_spread_page(parent))
+               set_bit(CS_SPREAD_PAGE, &cs->flags);
+       if (is_spread_slab(parent))
+               set_bit(CS_SPREAD_SLAB, &cs->flags);
+
+       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
+               return;
+
+       /*
+        * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
+        * set.  This flag handling is implemented in cgroup core for
+        * historical reasons - the flag may be specified during mount.
+        *
+        * Currently, if any sibling cpusets have exclusive cpus or mem, we
+        * refuse to clone the configuration - thereby refusing the task to
+        * be entered, and as a result refusing the sys_unshare() or
+        * clone() which initiated it.  If this becomes a problem for some
+        * users who wish to allow that scenario, then this could be
+        * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
+        * (and likewise for mems) to the new cgroup.
+        */
+       rcu_read_lock();
+       cpuset_for_each_child(tmp_cs, pos_css, parent) {
+               if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
+                       rcu_read_unlock();
+                       return;
+               }
+       }
+       rcu_read_unlock();
+
+       cpuset_callback_lock_irq();
+       cs->mems_allowed = parent->mems_allowed;
+       cs->effective_mems = parent->mems_allowed;
+       cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+       cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
+       cpuset_callback_unlock_irq();
+}
+
 /*
  * for the common functions, 'private' gives the type of file
  */
index 4fa3080da3bee9c2d2ef104bc575c85a89d8c8ef..5d9dbd1aeed38d24ef66955f5955d46d7f5197d2 100644 (file)
@@ -3616,17 +3616,11 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
 {
        struct cpuset *cs = css_cs(css);
        struct cpuset *parent = parent_cs(cs);
-       struct cpuset *tmp_cs;
-       struct cgroup_subsys_state *pos_css;
 
        if (!parent)
                return 0;
 
        cpuset_full_lock();
-       if (is_spread_page(parent))
-               set_bit(CS_SPREAD_PAGE, &cs->flags);
-       if (is_spread_slab(parent))
-               set_bit(CS_SPREAD_SLAB, &cs->flags);
        /*
         * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
         */
@@ -3641,39 +3635,8 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
                cs->effective_mems = parent->effective_mems;
        }
        spin_unlock_irq(&callback_lock);
+       cpuset1_online_css(css);
 
-       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
-               goto out_unlock;
-
-       /*
-        * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
-        * set.  This flag handling is implemented in cgroup core for
-        * historical reasons - the flag may be specified during mount.
-        *
-        * Currently, if any sibling cpusets have exclusive cpus or mem, we
-        * refuse to clone the configuration - thereby refusing the task to
-        * be entered, and as a result refusing the sys_unshare() or
-        * clone() which initiated it.  If this becomes a problem for some
-        * users who wish to allow that scenario, then this could be
-        * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
-        * (and likewise for mems) to the new cgroup.
-        */
-       rcu_read_lock();
-       cpuset_for_each_child(tmp_cs, pos_css, parent) {
-               if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
-                       rcu_read_unlock();
-                       goto out_unlock;
-               }
-       }
-       rcu_read_unlock();
-
-       spin_lock_irq(&callback_lock);
-       cs->mems_allowed = parent->mems_allowed;
-       cs->effective_mems = parent->mems_allowed;
-       cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
-       cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
-       spin_unlock_irq(&callback_lock);
-out_unlock:
        cpuset_full_unlock();
        return 0;
 }