}
}
+static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
+{
+ for (; nr_pages; page++, nr_pages--)
+ if (PageHWPoison(page))
+ return true;
+ return false;
+}
+
static void __split_huge_page_tail(struct folio *folio, int tail,
struct lruvec *lruvec, struct list_head *list,
- unsigned int new_order)
+ unsigned int new_order, const bool handle_hwpoison)
{
struct page *head = &folio->page;
struct page *page_tail = head + tail;
folio_set_large_rmappable(new_folio);
}
+ /* Set has_hwpoisoned flag on new_folio if any of its pages is HWPoison */
+ if (handle_hwpoison &&
+ page_range_has_hwpoisoned(page_tail, 1 << new_order))
+ folio_set_has_hwpoisoned(new_folio);
+
/* Finally unfreeze refcount. Additional reference from page cache. */
page_ref_unfreeze(page_tail,
1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
pgoff_t end, unsigned int new_order)
{
struct folio *folio = page_folio(page);
+ /* Scan poisoned pages when split a poisoned folio to large folios */
+ const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
struct page *head = &folio->page;
struct lruvec *lruvec;
struct address_space *swap_cache = NULL;
ClearPageHasHWPoisoned(head);
+ /* Check first new_nr pages since the loop below skips them */
+ if (handle_hwpoison &&
+ page_range_has_hwpoisoned(folio_page(folio, 0), new_nr))
+ folio_set_has_hwpoisoned(folio);
+
for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
- __split_huge_page_tail(folio, i, lruvec, list, new_order);
+ __split_huge_page_tail(folio, i, lruvec, list, new_order,
+ handle_hwpoison);
/* Some pages can be beyond EOF: drop them from page cache */
if (head[i].index >= end) {
struct folio *tail = page_folio(head + i);