]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/memory-failure: convert shake_page() to shake_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 12 Apr 2024 19:35:02 +0000 (20:35 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:45 +0000 (17:53 -0700)
Removes two calls to compound_head().  Move the prototype to internal.h;
we definitely don't want code outside mm using it.

Link: https://lkml.kernel.org/r/20240412193510.2356957-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/hwpoison-inject.c
mm/internal.h
mm/memory-failure.c

index 78e583b50e421be86db95a54461a8454b3f5cd4c..b9ac49c9eb008303dfdacccc2b96860072f660c5 100644 (file)
@@ -4033,7 +4033,6 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
 extern int memory_failure(unsigned long pfn, int flags);
 extern void memory_failure_queue_kick(int cpu);
 extern int unpoison_memory(unsigned long pfn);
-extern void shake_page(struct page *p);
 extern atomic_long_t num_poisoned_pages __read_mostly;
 extern int soft_offline_page(unsigned long pfn, int flags);
 #ifdef CONFIG_MEMORY_FAILURE
index d0548e382b6ba2ba6104b7cc513e9a07815263e4..c9d653f51e45bb2607edd2779578b855293f2dee 100644 (file)
@@ -15,7 +15,7 @@ static int hwpoison_inject(void *data, u64 val)
 {
        unsigned long pfn = val;
        struct page *p;
-       struct page *hpage;
+       struct folio *folio;
        int err;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -25,16 +25,17 @@ static int hwpoison_inject(void *data, u64 val)
                return -ENXIO;
 
        p = pfn_to_page(pfn);
-       hpage = compound_head(p);
+       folio = page_folio(p);
 
        if (!hwpoison_filter_enable)
                goto inject;
 
-       shake_page(hpage);
+       shake_folio(folio);
        /*
         * This implies unable to support non-LRU pages except free page.
         */
-       if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p))
+       if (!folio_test_lru(folio) && !folio_test_hugetlb(folio) &&
+           !is_free_buddy_page(p))
                return 0;
 
        /*
@@ -42,7 +43,7 @@ static int hwpoison_inject(void *data, u64 val)
         * the targeted owner (or on a free page).
         * memory_failure() will redo the check reliably inside page lock.
         */
-       err = hwpoison_filter(hpage);
+       err = hwpoison_filter(&folio->page);
        if (err)
                return 0;
 
index 5d5e49b86fe32f523ac1378e8744c0e4e2c7dbe7..6803c7b17c1f304eab1405f9f03ba9850ce52cbf 100644 (file)
@@ -1037,6 +1037,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
 /*
  * mm/memory-failure.c
  */
+void shake_folio(struct folio *folio);
 extern int hwpoison_filter(struct page *p);
 
 extern u32 hwpoison_filter_dev_major;
index 12e5d2844cb15190d93f0a74d80079659558ac9a..4daf581e3878431b9f9dd43c0f43a4d852a087c1 100644 (file)
@@ -369,20 +369,25 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
  * Unknown page type encountered. Try to check whether it can turn PageLRU by
  * lru_add_drain_all.
  */
-void shake_page(struct page *p)
+void shake_folio(struct folio *folio)
 {
-       if (PageHuge(p))
+       if (folio_test_hugetlb(folio))
                return;
        /*
         * TODO: Could shrink slab caches here if a lightweight range-based
         * shrinker will be available.
         */
-       if (PageSlab(p))
+       if (folio_test_slab(folio))
                return;
 
        lru_add_drain_all();
 }
-EXPORT_SYMBOL_GPL(shake_page);
+EXPORT_SYMBOL_GPL(shake_folio);
+
+static void shake_page(struct page *page)
+{
+       shake_folio(page_folio(page));
+}
 
 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
                unsigned long address)
@@ -1639,7 +1644,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * shake_page() again to ensure that it's flushed.
         */
        if (mlocked)
-               shake_page(hpage);
+               shake_folio(folio);
 
        /*
         * Now that the dirty bit has been propagated to the