]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/huge_memory: preserve PG_has_hwpoisoned if a folio is split to >0 order
authorZi Yan <ziy@nvidia.com>
Thu, 23 Oct 2025 03:05:21 +0000 (23:05 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 24 Nov 2025 09:36:07 +0000 (10:36 +0100)
commit fa5a061700364bc28ee1cb1095372f8033645dcb upstream.

folio split clears PG_has_hwpoisoned, but the flag should be preserved in
after-split folios containing pages with PG_hwpoisoned flag if the folio
is split to >0 order folios.  Scan all pages in a to-be-split folio to
determine which after-split folios need the flag.

An alternatives is to change PG_has_hwpoisoned to PG_maybe_hwpoisoned to
avoid the scan and set it on all after-split folios, but resulting false
positive has undesirable negative impact.  To remove false positive,
caller of folio_test_has_hwpoisoned() and folio_contain_hwpoisoned_page()
needs to do the scan.  That might be causing a hassle for current and
future callers and more costly than doing the scan in the split code.
More details are discussed in [1].

This issue can be exposed via:
1. splitting a has_hwpoisoned folio to >0 order from debugfs interface;
2. truncating part of a has_hwpoisoned folio in
   truncate_inode_partial_folio().

And later accesses to a hwpoisoned page could be possible due to the
missing has_hwpoisoned folio flag.  This will lead to MCE errors.

Link: https://lore.kernel.org/all/CAHbLzkoOZm0PXxE9qwtF4gKR=cpRXrSrJ9V9Pm2DJexs985q4g@mail.gmail.com/
Link: https://lkml.kernel.org/r/20251023030521.473097-1-ziy@nvidia.com
Fixes: c010d47f107f ("mm: thp: split huge page to any lower order pages")
Signed-off-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yang Shi <yang@os.amperecomputing.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Pankaj Raghav <kernel@pankajraghav.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Luis Chamberalin <mcgrof@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/huge_memory.c

index 0bb0ce0c106b0d0a39515f74d4b666d2bb39f3c5..d68a22c729fb387ab0c150156a89034c2b72926e 100644 (file)
@@ -3091,9 +3091,17 @@ static void lru_add_page_tail(struct folio *folio, struct page *tail,
        }
 }
 
+static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
+{
+       for (; nr_pages; page++, nr_pages--)
+               if (PageHWPoison(page))
+                       return true;
+       return false;
+}
+
 static void __split_huge_page_tail(struct folio *folio, int tail,
                struct lruvec *lruvec, struct list_head *list,
-               unsigned int new_order)
+               unsigned int new_order, const bool handle_hwpoison)
 {
        struct page *head = &folio->page;
        struct page *page_tail = head + tail;
@@ -3170,6 +3178,11 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
                folio_set_large_rmappable(new_folio);
        }
 
+       /* Set has_hwpoisoned flag on new_folio if any of its pages is HWPoison */
+       if (handle_hwpoison &&
+           page_range_has_hwpoisoned(page_tail, 1 << new_order))
+               folio_set_has_hwpoisoned(new_folio);
+
        /* Finally unfreeze refcount. Additional reference from page cache. */
        page_ref_unfreeze(page_tail,
                1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
@@ -3194,6 +3207,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                pgoff_t end, unsigned int new_order)
 {
        struct folio *folio = page_folio(page);
+       /* Scan poisoned pages when split a poisoned folio to large folios */
+       const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
        struct page *head = &folio->page;
        struct lruvec *lruvec;
        struct address_space *swap_cache = NULL;
@@ -3217,8 +3232,14 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 
        ClearPageHasHWPoisoned(head);
 
+       /* Check first new_nr pages since the loop below skips them */
+       if (handle_hwpoison &&
+           page_range_has_hwpoisoned(folio_page(folio, 0), new_nr))
+               folio_set_has_hwpoisoned(folio);
+
        for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
-               __split_huge_page_tail(folio, i, lruvec, list, new_order);
+               __split_huge_page_tail(folio, i, lruvec, list, new_order,
+                                      handle_hwpoison);
                /* Some pages can be beyond EOF: drop them from page cache */
                if (head[i].index >= end) {
                        struct folio *tail = page_folio(head + i);