]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
cgroup: adjust criteria for rstat subsystem cpu lock access
authorJP Kobryn <inwardvessel@gmail.com>
Wed, 28 May 2025 23:51:30 +0000 (16:51 -0700)
committerTejun Heo <tj@kernel.org>
Sat, 31 May 2025 00:36:13 +0000 (14:36 -1000)
Previously it was found that on uniprocessor machines the size of
raw_spinlock_t could be zero so a pre-processor conditional was used to
avoid the allocation of ss->rstat_ss_cpu_lock. The conditional did not take
into account cases where lock debugging features were enabled. Cover these
cases along with the original non-smp case by explicitly using the size of
size of the lock type as criteria for allocation/access where applicable.

Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Fixes: 748922dcfabd "cgroup: use subsystem-specific rstat locks to avoid contention"
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202505281034.7ae1668d-lkp@intel.com
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/cgroup/rstat.c

index ce4752ab9e09b0a1b799ba25d8b79746b26d5313..cbeaa499a96af3d11f5d31e8259c7858b7c65ef5 100644 (file)
@@ -47,8 +47,20 @@ static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
 
 static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu)
 {
-       if (ss)
+       if (ss) {
+               /*
+                * Depending on config, the subsystem per-cpu lock type may be an
+                * empty struct. In enviromnents where this is the case, allocation
+                * of this field is not performed in ss_rstat_init(). Avoid a
+                * cpu-based offset relative to NULL by returning early. When the
+                * lock type is zero in size, the corresponding lock functions are
+                * no-ops so passing them NULL is acceptable.
+                */
+               if (sizeof(*ss->rstat_ss_cpu_lock) == 0)
+                       return NULL;
+
                return per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu);
+       }
 
        return per_cpu_ptr(&rstat_base_cpu_lock, cpu);
 }
@@ -510,20 +522,15 @@ int __init ss_rstat_init(struct cgroup_subsys *ss)
 {
        int cpu;
 
-#ifdef CONFIG_SMP
        /*
-        * On uniprocessor machines, arch_spinlock_t is defined as an empty
-        * struct. Avoid allocating a size of zero by having this block
-        * excluded in this case. It's acceptable to leave the subsystem locks
-        * unitialized since the associated lock functions are no-ops in the
-        * non-smp case.
+        * Depending on config, the subsystem per-cpu lock type may be an empty
+        * struct. Avoid allocating a size of zero in this case.
         */
-       if (ss) {
+       if (ss && sizeof(*ss->rstat_ss_cpu_lock)) {
                ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t);
                if (!ss->rstat_ss_cpu_lock)
                        return -ENOMEM;
        }
-#endif
 
        spin_lock_init(ss_rstat_lock(ss));
        for_each_possible_cpu(cpu)