]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
proc: convert smaps_account() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 2 Apr 2024 20:12:49 +0000 (21:12 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:34 +0000 (20:56 -0700)
Replace seven calls to compound_head() with one.

Link: https://lkml.kernel.org/r/20240402201252.917342-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c

index b94101cd27068c88c66a5a8bcaf1aba751f5d050..e8d1008a838d7a00bc52b3dc52fc17d679586138 100644 (file)
@@ -444,6 +444,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
                bool compound, bool young, bool dirty, bool locked,
                bool migration)
 {
+       struct folio *folio = page_folio(page);
        int i, nr = compound ? compound_nr(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
@@ -451,27 +452,28 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
         * First accumulate quantities that depend only on |size| and the type
         * of the compound page.
         */
-       if (PageAnon(page)) {
+       if (folio_test_anon(folio)) {
                mss->anonymous += size;
-               if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
+               if (!folio_test_swapbacked(folio) && !dirty &&
+                   !folio_test_dirty(folio))
                        mss->lazyfree += size;
        }
 
-       if (PageKsm(page))
+       if (folio_test_ksm(folio))
                mss->ksm += size;
 
        mss->resident += size;
        /* Accumulate the size in pages that have been accessed. */
-       if (young || page_is_young(page) || PageReferenced(page))
+       if (young || folio_test_young(folio) || folio_test_referenced(folio))
                mss->referenced += size;
 
        /*
         * Then accumulate quantities that may depend on sharing, or that may
         * differ page-by-page.
         *
-        * page_count(page) == 1 guarantees the page is mapped exactly once.
+        * refcount == 1 guarantees the page is mapped exactly once.
         * If any subpage of the compound page mapped with PTE it would elevate
-        * page_count().
+        * the refcount.
         *
         * The page_mapcount() is called to get a snapshot of the mapcount.
         * Without holding the page lock this snapshot can be slightly wrong as
@@ -480,7 +482,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
         * especially for migration entries.  Treat regular migration entries
         * as mapcount == 1.
         */
-       if ((page_count(page) == 1) || migration) {
+       if ((folio_ref_count(folio) == 1) || migration) {
                smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
                        locked, true);
                return;