]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/memory-failure: convert memory_failure() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 12 Apr 2024 19:35:04 +0000 (20:35 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:46 +0000 (17:53 -0700)
Saves dozens of calls to compound_head().

Link: https://lkml.kernel.org/r/20240412193510.2356957-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory-failure.c

index 1a5f3403dd2a9058de8420d545c366de2c41b50c..03ec7754787be3ee84e9e6788df6d2d23f690421 100644 (file)
@@ -2189,7 +2189,7 @@ out:
 int memory_failure(unsigned long pfn, int flags)
 {
        struct page *p;
-       struct page *hpage;
+       struct folio *folio;
        struct dev_pagemap *pgmap;
        int res = 0;
        unsigned long page_flags;
@@ -2277,8 +2277,8 @@ try_again:
                }
        }
 
-       hpage = compound_head(p);
-       if (PageTransHuge(hpage)) {
+       folio = page_folio(p);
+       if (folio_test_large(folio)) {
                /*
                 * The flag must be set after the refcount is bumped
                 * otherwise it may race with THP split.
@@ -2292,12 +2292,13 @@ try_again:
                 * or unhandlable page.  The refcount is bumped iff the
                 * page is a valid handlable page.
                 */
-               SetPageHasHWPoisoned(hpage);
+               folio_set_has_hwpoisoned(folio);
                if (try_to_split_thp_page(p) < 0) {
                        res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
                        goto unlock_mutex;
                }
                VM_BUG_ON_PAGE(!page_count(p), p);
+               folio = page_folio(p);
        }
 
        /*
@@ -2308,9 +2309,9 @@ try_again:
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
         */
-       shake_page(p);
+       shake_folio(folio);
 
-       lock_page(p);
+       folio_lock(folio);
 
        /*
         * We're only intended to deal with the non-Compound page here.
@@ -2318,11 +2319,11 @@ try_again:
         * race window. If this happens, we could try again to hopefully
         * handle the page next round.
         */
-       if (PageCompound(p)) {
+       if (folio_test_large(folio)) {
                if (retry) {
                        ClearPageHWPoison(p);
-                       unlock_page(p);
-                       put_page(p);
+                       folio_unlock(folio);
+                       folio_put(folio);
                        flags &= ~MF_COUNT_INCREASED;
                        retry = false;
                        goto try_again;
@@ -2338,29 +2339,29 @@ try_again:
         * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
         * status correctly, we save a copy of the page flags at this time.
         */
-       page_flags = p->flags;
+       page_flags = folio->flags;
 
        if (hwpoison_filter(p)) {
                ClearPageHWPoison(p);
-               unlock_page(p);
-               put_page(p);
+               folio_unlock(folio);
+               folio_put(folio);
                res = -EOPNOTSUPP;
                goto unlock_mutex;
        }
 
        /*
-        * __munlock_folio() may clear a writeback page's LRU flag without
-        * page_lock. We need wait writeback completion for this page or it
-        * may trigger vfs BUG while evict inode.
+        * __munlock_folio() may clear a writeback folio's LRU flag without
+        * the folio lock. We need to wait for writeback completion for this
+        * folio or it may trigger a vfs BUG while evicting inode.
         */
-       if (!PageLRU(p) && !PageWriteback(p))
+       if (!folio_test_lru(folio) && !folio_test_writeback(folio))
                goto identify_page_state;
 
        /*
         * It's very difficult to mess with pages currently under IO
         * and in many cases impossible, so we just avoid it here.
         */
-       wait_on_page_writeback(p);
+       folio_wait_writeback(folio);
 
        /*
         * Now take care of user space mappings.
@@ -2374,7 +2375,8 @@ try_again:
        /*
         * Torn down by someone else?
         */
-       if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
+       if (folio_test_lru(folio) && !folio_test_swapcache(folio) &&
+           folio->mapping == NULL) {
                res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
                goto unlock_page;
        }
@@ -2384,7 +2386,7 @@ identify_page_state:
        mutex_unlock(&mf_mutex);
        return res;
 unlock_page:
-       unlock_page(p);
+       folio_unlock(folio);
 unlock_mutex:
        mutex_unlock(&mf_mutex);
        return res;