]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
memcg: Prepare to protect against concurrent isolated cpuset change
authorFrederic Weisbecker <frederic@kernel.org>
Thu, 15 May 2025 15:10:51 +0000 (17:10 +0200)
committerFrederic Weisbecker <frederic@kernel.org>
Tue, 3 Feb 2026 14:19:38 +0000 (15:19 +0100)
The HK_TYPE_DOMAIN housekeeping cpumask will soon be made modifiable at
runtime. In order to synchronize against memcg workqueue to make sure
that no asynchronous draining is pending or executing on a newly made
isolated CPU, target and queue a drain work under the same RCU critical
section.

Whenever housekeeping will update the HK_TYPE_DOMAIN cpumask, a memcg
workqueue flush will also be issued in a further change to make sure
that no work remains pending after a CPU has been made isolated.

Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Marco Crivellari <marco.crivellari@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Waiman Long <longman@redhat.com>
mm/memcontrol.c

index be810c1fbfc3e1898a66900fb57514a6ee09012b..2289a0299331809da0d192ad6e1acdec68caa71f 100644 (file)
@@ -2003,6 +2003,19 @@ static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
        return flush;
 }
 
+static void schedule_drain_work(int cpu, struct work_struct *work)
+{
+       /*
+        * Protect housekeeping cpumask read and work enqueue together
+        * in the same RCU critical section so that later cpuset isolated
+        * partition update only need to wait for an RCU GP and flush the
+        * pending work on newly isolated CPUs.
+        */
+       guard(rcu)();
+       if (!cpu_is_isolated(cpu))
+               schedule_work_on(cpu, work);
+}
+
 /*
  * Drains all per-CPU charge caches for given root_memcg resp. subtree
  * of the hierarchy under it.
@@ -2032,8 +2045,8 @@ void drain_all_stock(struct mem_cgroup *root_memcg)
                                      &memcg_st->flags)) {
                        if (cpu == curcpu)
                                drain_local_memcg_stock(&memcg_st->work);
-                       else if (!cpu_is_isolated(cpu))
-                               schedule_work_on(cpu, &memcg_st->work);
+                       else
+                               schedule_drain_work(cpu, &memcg_st->work);
                }
 
                if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
@@ -2042,8 +2055,8 @@ void drain_all_stock(struct mem_cgroup *root_memcg)
                                      &obj_st->flags)) {
                        if (cpu == curcpu)
                                drain_local_obj_stock(&obj_st->work);
-                       else if (!cpu_is_isolated(cpu))
-                               schedule_work_on(cpu, &obj_st->work);
+                       else
+                               schedule_drain_work(cpu, &obj_st->work);
                }
        }
        migrate_enable();