From: Shakeel Butt Date: Wed, 14 May 2025 18:41:52 +0000 (-0700) Subject: memcg: memcg_rstat_updated re-entrant safe against irqs X-Git-Tag: v6.16-rc1~92^2~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8a4b42b955286c122c4402b458acbc1d92953fbd;p=thirdparty%2Fkernel%2Flinux.git memcg: memcg_rstat_updated re-entrant safe against irqs Patch series "memcg: make memcg stats irq safe", v2. This series converts memcg stats to be irq safe i.e. memcg stats can be updated in any context (task, softirq or hardirq) without disabling the irqs. This is still not nmi-safe on all architectures but after this series converting memcg charging and stats nmi-safe will be easier. This patch (of 7): memcg_rstat_updated() is used to track the memcg stats updates for optimizing the flushes. At the moment, it is not re-entrant safe and the callers disabled irqs before calling. However to achieve the goal of updating memcg stats without irqs, memcg_rstat_updated() needs to be re-entrant safe against irqs. This patch makes memcg_rstat_updated() re-entrant safe using this_cpu_* ops. On archs with CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, this patch is also making memcg_rstat_updated() nmi safe. [lorenzo.stoakes@oracle.com: fix build] Link: https://lkml.kernel.org/r/22f69e6e-7908-4e92-96ca-5c70d535c439@lucifer.local Link: https://lkml.kernel.org/r/20250514184158.3471331-1-shakeel.butt@linux.dev Link: https://lkml.kernel.org/r/20250514184158.3471331-2-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Tested-by: Alexei Starovoitov Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton --- diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2b6768f0e916b..01560e22dd060 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -503,8 +503,8 @@ struct memcg_vmstats_percpu { unsigned int stats_updates; /* Cached pointers for fast iteration in memcg_rstat_updated() */ - struct memcg_vmstats_percpu *parent; - struct memcg_vmstats *vmstats; + struct memcg_vmstats_percpu __percpu *parent_pcpu; + struct memcg_vmstats *vmstats; /* The above should fit a single cacheline for memcg_rstat_updated() */ @@ -586,16 +586,21 @@ static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) { + struct memcg_vmstats_percpu __percpu *statc_pcpu; struct memcg_vmstats_percpu *statc; - int cpu = smp_processor_id(); + int cpu; unsigned int stats_updates; if (!val) return; + /* Don't assume callers have preemption disabled. */ + cpu = get_cpu(); + cgroup_rstat_updated(memcg->css.cgroup, cpu); - statc = this_cpu_ptr(memcg->vmstats_percpu); - for (; statc; statc = statc->parent) { + statc_pcpu = memcg->vmstats_percpu; + for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) { + statc = this_cpu_ptr(statc_pcpu); /* * If @memcg is already flushable then all its ancestors are * flushable as well and also there is no need to increase @@ -604,14 +609,15 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) if (memcg_vmstats_needs_flush(statc->vmstats)) break; - stats_updates = READ_ONCE(statc->stats_updates) + abs(val); - WRITE_ONCE(statc->stats_updates, stats_updates); + stats_updates = this_cpu_add_return(statc_pcpu->stats_updates, + abs(val)); if (stats_updates < MEMCG_CHARGE_BATCH) continue; + stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0); atomic64_add(stats_updates, &statc->vmstats->stats_updates); - WRITE_ONCE(statc->stats_updates, 0); } + put_cpu(); } static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force) @@ -3689,7 +3695,8 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) { - struct memcg_vmstats_percpu *statc, *pstatc; + struct memcg_vmstats_percpu *statc; + struct memcg_vmstats_percpu __percpu *pstatc_pcpu; struct mem_cgroup *memcg; int node, cpu; int __maybe_unused i; @@ -3720,9 +3727,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) for_each_possible_cpu(cpu) { if (parent) - pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); + pstatc_pcpu = parent->vmstats_percpu; statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); - statc->parent = parent ? pstatc : NULL; + statc->parent_pcpu = parent ? pstatc_pcpu : NULL; statc->vmstats = memcg->vmstats; }