]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: vmstat: Prepare to protect against concurrent isolated cpuset change
authorFrederic Weisbecker <frederic@kernel.org>
Thu, 15 May 2025 15:25:07 +0000 (17:25 +0200)
committerFrederic Weisbecker <frederic@kernel.org>
Tue, 3 Feb 2026 14:21:12 +0000 (15:21 +0100)
The HK_TYPE_DOMAIN housekeeping cpumask will soon be made modifiable at
runtime. In order to synchronize against vmstat workqueue to make sure
that no asynchronous vmstat work is pending or executing on a newly made
isolated CPU, target and queue a vmstat work under the same RCU read
side critical section.

Whenever housekeeping will update the HK_TYPE_DOMAIN cpumask, a vmstat
workqueue flush will also be issued in a further change to make sure
that no work remains pending after a CPU has been made isolated.

Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Marco Crivellari <marco.crivellari@suse.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Waiman Long <longman@redhat.com>
Cc: linux-mm@kvack.org
mm/vmstat.c

index 65de88cdf40e62f4cc6748a438b81bad511448d4..ed19c0d42de621d699d5e8e73096dc90cf1cf688 100644 (file)
@@ -2144,11 +2144,13 @@ static void vmstat_shepherd(struct work_struct *w)
                 * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
                 * for all isolated CPUs to avoid interference with the isolated workload.
                 */
-               if (cpu_is_isolated(cpu))
-                       continue;
+               scoped_guard(rcu) {
+                       if (cpu_is_isolated(cpu))
+                               continue;
 
-               if (!delayed_work_pending(dw) && need_update(cpu))
-                       queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
+                       if (!delayed_work_pending(dw) && need_update(cpu))
+                               queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
+               }
 
                cond_resched();
        }