From: Shakeel Butt Date: Mon, 19 May 2025 06:31:42 +0000 (-0700) Subject: memcg: make memcg_rstat_updated nmi safe X-Git-Tag: v6.16-rc1~73^2~30 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3ac4638a734abbeb1d4a02b042e26a80a4205975;p=thirdparty%2Flinux.git memcg: make memcg_rstat_updated nmi safe Currently kernel maintains memory related stats updates per-cgroup to optimize stats flushing. The stats_updates is defined as atomic64_t which is not nmi-safe on some archs. Actually we don't really need 64bit atomic as the max value stats_updates can get should be less than nr_cpus * MEMCG_CHARGE_BATCH. A normal atomic_t should suffice. Also the function cgroup_rstat_updated() is still not nmi-safe but there is parallel effort to make it nmi-safe, so until then let's ignore it in the nmi context. Link: https://lkml.kernel.org/r/20250519063142.111219-6-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Alexei Starovoitov Cc: Johannes Weiner Cc: Mathieu Desnoyers Cc: Michal Hocko Cc: Muchun Song Cc: Peter Zijlstra Cc: Roman Gushchin Cc: Sebastian Andrzej Siewior Cc: Tejun Heo Signed-off-by: Andrew Morton --- diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 74de4e5230946..7e64dbf578d78 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -531,7 +531,7 @@ struct memcg_vmstats { unsigned long events_pending[NR_MEMCG_EVENTS]; /* Stats updates since the last flush */ - atomic64_t stats_updates; + atomic_t stats_updates; }; /* @@ -557,7 +557,7 @@ static u64 flush_last_time; static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) { - return atomic64_read(&vmstats->stats_updates) > + return atomic_read(&vmstats->stats_updates) > MEMCG_CHARGE_BATCH * num_online_cpus(); } @@ -571,7 +571,9 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val, if (!val) return; - cgroup_rstat_updated(memcg->css.cgroup, cpu); + /* TODO: add to cgroup update tree once it is nmi-safe. */ + if (!in_nmi()) + cgroup_rstat_updated(memcg->css.cgroup, cpu); statc_pcpu = memcg->vmstats_percpu; for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) { statc = this_cpu_ptr(statc_pcpu); @@ -589,7 +591,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val, continue; stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0); - atomic64_add(stats_updates, &statc->vmstats->stats_updates); + atomic_add(stats_updates, &statc->vmstats->stats_updates); } } @@ -597,7 +599,7 @@ static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force) { bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); - trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates), + trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates), force, needs_flush); if (!force && !needs_flush) @@ -4119,8 +4121,8 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) } WRITE_ONCE(statc->stats_updates, 0); /* We are in a per-cpu loop here, only do the atomic write once */ - if (atomic64_read(&memcg->vmstats->stats_updates)) - atomic64_set(&memcg->vmstats->stats_updates, 0); + if (atomic_read(&memcg->vmstats->stats_updates)) + atomic_set(&memcg->vmstats->stats_updates, 0); } static void mem_cgroup_fork(struct task_struct *task)