]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: shmem: fix ShmemHugePages at swapout
authorHugh Dickins <hughd@google.com>
Thu, 5 Dec 2024 06:50:06 +0000 (22:50 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Dec 2024 03:04:42 +0000 (19:04 -0800)
/proc/meminfo ShmemHugePages has been showing overlarge amounts (more than
Shmem) after swapping out THPs: we forgot to update NR_SHMEM_THPS.

Add shmem_update_stats(), to avoid repetition, and risk of making that
mistake again: the call from shmem_delete_from_page_cache() is the bugfix;
the call from shmem_replace_folio() is reassuring, but not really a bugfix
(replace corrects misplaced swapin readahead, but huge swapin readahead
would be a mistake).

Link: https://lkml.kernel.org/r/5ba477c8-a569-70b5-923e-09ab221af45b@google.com
Fixes: 809bc86517cc ("mm: shmem: support large folio swap out")
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index ccb9629a0f70d446caa1bbee1770fd5bac3ba982..f6fb053ac50dc4f873c8f002239daaf5eb0ff81e 100644 (file)
@@ -787,6 +787,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+static void shmem_update_stats(struct folio *folio, int nr_pages)
+{
+       if (folio_test_pmd_mappable(folio))
+               __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
+       __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+       __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
+}
+
 /*
  * Somewhat like filemap_add_folio, but error if expected item has gone.
  */
@@ -821,10 +829,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
                xas_store(&xas, folio);
                if (xas_error(&xas))
                        goto unlock;
-               if (folio_test_pmd_mappable(folio))
-                       __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
-               __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
-               __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
+               shmem_update_stats(folio, nr);
                mapping->nrpages += nr;
 unlock:
                xas_unlock_irq(&xas);
@@ -852,8 +857,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
        error = shmem_replace_entry(mapping, folio->index, folio, radswap);
        folio->mapping = NULL;
        mapping->nrpages -= nr;
-       __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
-       __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
+       shmem_update_stats(folio, -nr);
        xa_unlock_irq(&mapping->i_pages);
        folio_put_refs(folio, nr);
        BUG_ON(error);
@@ -1969,10 +1973,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
        }
        if (!error) {
                mem_cgroup_replace_folio(old, new);
-               __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
-               __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
-               __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
-               __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
+               shmem_update_stats(new, nr_pages);
+               shmem_update_stats(old, -nr_pages);
        }
        xa_unlock_irq(&swap_mapping->i_pages);