]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: always initialise folio->_deferred_list
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 21 Mar 2024 14:24:39 +0000 (14:24 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:55:59 +0000 (20:55 -0700)
Patch series "Various significant MM patches".

These patches all interact in annoying ways which make it tricky to send
them out in any way other than a big batch, even though there's not really
an overarching theme to connect them.

The big effects of this patch series are:

 - folio_test_hugetlb() becomes reliable, even when called without a
   page reference
 - We free up PG_slab, and we could always use more page flags
 - We no longer need to check PageSlab before calling page_mapcount()

This patch (of 9):

For compound pages which are at least order-2 (and hence have a
deferred_list), initialise it and then we can check at free that the page
is not part of a deferred list.  We recently found this useful to rule out
a source of corruption.

[peterx@redhat.com: always initialise folio->_deferred_list]
Link: https://lkml.kernel.org/r/20240417211836.2742593-2-peterx@redhat.com
Link: https://lkml.kernel.org/r/20240321142448.1645400-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20240321142448.1645400-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/memcontrol.c
mm/page_alloc.c

index 25191ab70631b903fa56fc97906fdc053ae0413e..4cc7133aaa4be2f001c379832e9bc68f7e686f40 100644 (file)
@@ -793,8 +793,6 @@ void folio_prep_large_rmappable(struct folio *folio)
 {
        if (!folio || !folio_test_large(folio))
                return;
-       if (folio_order(folio) > 1)
-               INIT_LIST_HEAD(&folio->_deferred_list);
        folio_set_large_rmappable(folio);
 }
 
index ce7be5c244429f71bc686399889fba7f4b6e1cf8..378181547b7bb1c7d772cf561495805895f6e0e5 100644 (file)
@@ -1796,7 +1796,8 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
                destroy_compound_gigantic_folio(folio, huge_page_order(h));
                free_gigantic_folio(folio, huge_page_order(h));
        } else {
-               __free_pages(&folio->page, huge_page_order(h));
+               INIT_LIST_HEAD(&folio->_deferred_list);
+               folio_put(folio);
        }
 }
 
index 85c3db43454de38465ff8e431250c710f97f7098..5c0c57c9cd196483e026d605d36f0a909d488f31 100644 (file)
@@ -525,6 +525,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
        atomic_set(&folio->_entire_mapcount, -1);
        atomic_set(&folio->_nr_pages_mapped, 0);
        atomic_set(&folio->_pincount, 0);
+       if (order > 1)
+               INIT_LIST_HEAD(&folio->_deferred_list);
 }
 
 static inline void prep_compound_tail(struct page *head, int tail_idx)
index 896b4bf05b9cf3193d5f11b179625a42322ddecf..45dd209012827b3100d9f078f7dd7297611779d5 100644 (file)
@@ -7400,6 +7400,9 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
        struct obj_cgroup *objcg;
 
        VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+       VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
+                       !folio_test_hugetlb(folio) &&
+                       !list_empty(&folio->_deferred_list), folio);
 
        /*
         * Nobody should be changing or seriously looking at
index e1241ecef27167f6f32bbd3771449ef485be5c2a..7e8f4b751801de385b4a9a5294b4e81ba9b1da83 100644 (file)
@@ -1007,10 +1007,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                }
                break;
        case 2:
-               /*
-                * the second tail page: ->mapping is
-                * deferred_list.next -- ignore value.
-                */
+               /* the second tail page: deferred_list overlaps ->mapping */
+               if (unlikely(!list_empty(&folio->_deferred_list))) {
+                       bad_page(page, "on deferred list");
+                       goto out;
+               }
                break;
        default:
                if (page->mapping != TAIL_MAPPING) {