]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: memcg: dump memcg protection info on oom or alloc failures
authorShakeel Butt <shakeel.butt@linux.dev>
Fri, 7 Nov 2025 23:40:41 +0000 (15:40 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 20 Nov 2025 21:43:59 +0000 (13:43 -0800)
Currently kernel dumps memory state on oom and allocation failures.  One
of the question usually raised on those dumps is why the kernel has not
reclaimed the reclaimable memory instead of triggering oom.  One potential
reason is the usage of memory protection provided by memcg.  So, let's
also dump the memory protected by the memcg in such reports to ease the
debugging.

Link: https://lkml.kernel.org/r/20251107234041.3632644-1-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/oom_kill.c
mm/page_alloc.c

index 8c0f15e5978fa5fc4e6d2fc161e5619f7edcbc66..966f7c1a0128f3c19fe5024e86ce0e7e25e3b092 100644 (file)
@@ -1764,6 +1764,7 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
 
 bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
 
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
 #else
 static inline bool mem_cgroup_kmem_disabled(void)
 {
@@ -1830,6 +1831,10 @@ static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
 {
        return true;
 }
+
+static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+}
 #endif /* CONFIG_MEMCG */
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
index 025da46d9959fd72e055144f49ba03bf21ec3542..bfc986da3289a7b08ff5d00af1a225adf1237ae8 100644 (file)
@@ -5635,3 +5635,16 @@ bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
 {
        return memcg ? cpuset_node_allowed(memcg->css.cgroup, nid) : true;
 }
+
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+       if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return;
+
+       if (!memcg)
+               memcg = root_mem_cgroup;
+
+       pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
+               K(atomic_long_read(&memcg->memory.children_min_usage)*PAGE_SIZE),
+               K(atomic_long_read(&memcg->memory.children_low_usage)*PAGE_SIZE));
+}
index c145b0feecc1f29971f54e49c028db985ece69b8..5eb11fbba704325751d0b2195551ee26a56e0f64 100644 (file)
@@ -472,6 +472,7 @@ static void dump_header(struct oom_control *oc)
                if (should_dump_unreclaim_slab())
                        dump_unreclaimable_slab();
        }
+       mem_cgroup_show_protected_memory(oc->memcg);
        if (sysctl_oom_dump_tasks)
                dump_tasks(oc);
 }
index e4efda1158b25080c2b8dc224261b6ec5e88fe51..26be5734253f35ff04f2c09dca26c25f5bf56025 100644 (file)
@@ -3977,6 +3977,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
                filter &= ~SHOW_MEM_FILTER_NODES;
 
        __show_mem(filter, nodemask, gfp_zone(gfp_mask));
+       mem_cgroup_show_protected_memory(NULL);
 }
 
 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)