]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cgroup: avoid per-cpu allocation of size zero rstat cpu locks
authorJP Kobryn <inwardvessel@gmail.com>
Thu, 22 May 2025 01:32:02 +0000 (18:32 -0700)
committerTejun Heo <tj@kernel.org>
Thu, 22 May 2025 01:54:58 +0000 (15:54 -1000)
Subsystem rstat locks are dynamically allocated per-cpu. It was discovered
that a panic can occur during this allocation when the lock size is zero.
This is the case on non-smp systems, since arch_spinlock_t is defined as an
empty struct. Prevent this allocation when !CONFIG_SMP by adding a
pre-processor conditional around the affected block.

Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Reported-by: Klara Modin <klarasmodin@gmail.com>
Fixes: 748922dcfabd ("cgroup: use subsystem-specific rstat locks to avoid contention")
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/cgroup/rstat.c

index 7dd396ae3c68213c36840dc6d17f810ed372dffa..ce4752ab9e09b0a1b799ba25d8b79746b26d5313 100644 (file)
@@ -510,11 +510,20 @@ int __init ss_rstat_init(struct cgroup_subsys *ss)
 {
        int cpu;
 
+#ifdef CONFIG_SMP
+       /*
+        * On uniprocessor machines, arch_spinlock_t is defined as an empty
+        * struct. Avoid allocating a size of zero by having this block
+        * excluded in this case. It's acceptable to leave the subsystem locks
+        * unitialized since the associated lock functions are no-ops in the
+        * non-smp case.
+        */
        if (ss) {
                ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t);
                if (!ss->rstat_ss_cpu_lock)
                        return -ENOMEM;
        }
+#endif
 
        spin_lock_init(ss_rstat_lock(ss));
        for_each_possible_cpu(cpu)