]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: add per-order mTHP swap-in fallback/fallback_charge counters
authorWenchao Hao <haowenchao22@gmail.com>
Mon, 2 Dec 2024 12:47:30 +0000 (20:47 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:49 +0000 (22:40 -0800)
Currently, large folio swap-in is supported, but we lack a method to
analyze their success ratio.  Similar to anon_fault_fallback, we introduce
per-order mTHP swpin_fallback and swpin_fallback_charge counters for
calculating their success ratio.  The new counters are located at:

/sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats/
swpin_fallback
swpin_fallback_charge

Link: https://lkml.kernel.org/r/20241202124730.2407037-1-haowenchao22@gmail.com
Signed-off-by: Wenchao Hao <haowenchao22@gmail.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Reviewed-by: Lance Yang <ioworker0@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/mm/transhuge.rst
include/linux/huge_mm.h
mm/huge_memory.c
mm/memory.c

index d870f83775bcb480323dfa4ca40df1f1b11fe6c1..dff8d5985f0f2e60f4aa1b7a936396dd6002db77 100644 (file)
@@ -591,6 +591,16 @@ swpin
        is incremented every time a huge page is swapped in from a non-zswap
        swap device in one piece.
 
+swpin_fallback
+       is incremented if swapin fails to allocate or charge a huge page
+       and instead falls back to using huge pages with lower orders or
+       small pages.
+
+swpin_fallback_charge
+       is incremented if swapin fails to charge a huge page and instead
+       falls back to using  huge pages with lower orders or small pages
+       even though the allocation was successful.
+
 swpout
        is incremented every time a huge page is swapped out to a non-zswap
        swap device in one piece without splitting.
index b94c2e8ee91885de2586dcf44952ad53756fd3ee..93e509b6c00eb353e3f2fef1257c8ef3a41f1814 100644 (file)
@@ -121,6 +121,8 @@ enum mthp_stat_item {
        MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
        MTHP_STAT_ZSWPOUT,
        MTHP_STAT_SWPIN,
+       MTHP_STAT_SWPIN_FALLBACK,
+       MTHP_STAT_SWPIN_FALLBACK_CHARGE,
        MTHP_STAT_SWPOUT,
        MTHP_STAT_SWPOUT_FALLBACK,
        MTHP_STAT_SHMEM_ALLOC,
index 45901dc6710c9228ca9476c28a86ea3231233c03..6d87db53db336725d0803278d7082177b9a0c657 100644 (file)
@@ -617,6 +617,8 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
 #ifdef CONFIG_SHMEM
@@ -637,6 +639,8 @@ static struct attribute *anon_stats_attrs[] = {
 #ifndef CONFIG_SHMEM
        &zswpout_attr.attr,
        &swpin_attr.attr,
+       &swpin_fallback_attr.attr,
+       &swpin_fallback_charge_attr.attr,
        &swpout_attr.attr,
        &swpout_fallback_attr.attr,
 #endif
@@ -669,6 +673,8 @@ static struct attribute *any_stats_attrs[] = {
 #ifdef CONFIG_SHMEM
        &zswpout_attr.attr,
        &swpin_attr.attr,
+       &swpin_fallback_attr.attr,
+       &swpin_fallback_charge_attr.attr,
        &swpout_attr.attr,
        &swpout_fallback_attr.attr,
 #endif
index 560520e20ead4c19fb32d71f6cf8fc154c9ff3d6..2d97a17dd3ba7bc2ba539b712ce594da95a7051f 100644 (file)
@@ -4258,8 +4258,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
                        if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
                                                            gfp, entry))
                                return folio;
+                       count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
                        folio_put(folio);
                }
+               count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
                order = next_order(&orders, order);
        }