]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/memory_hotplug: move debug_pagealloc_map_pages() into online_pages_range()
authorDavid Hildenbrand <david@redhat.com>
Tue, 3 Dec 2024 10:20:50 +0000 (11:20 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:44 +0000 (22:40 -0800)
In the near future, we want to have a single way to handover PageOffline
pages to the buddy, whereby they could have:

(a) Never been exposed to the buddy before: kept PageOffline when onlining
    the memory block.
(b) Been allocated from the buddy, for example using
    alloc_contig_range() to then be set PageOffline,

Let's start by making generic_online_page()->__free_pages_core() less
special compared to ordinary page freeing (e.g., free_contig_range()),
and perform the debug_pagealloc_map_pages() call unconditionally, even
when the online callback might decide to keep the pages offline.

All pages are already initialized with PageOffline, so nobody touches them
either way.

Link: https://lkml.kernel.org/r/20241203102050.223318-1-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory_hotplug.c
mm/page_alloc.c

index c43b4e7fb2984f08a637539c5eec2da470a903ca..20af14e695c795bbebafe5a7c966ac7ed78dc205 100644 (file)
@@ -650,6 +650,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
         * this and the first chunk to online will be pageblock_nr_pages.
         */
        for (pfn = start_pfn; pfn < end_pfn;) {
+               struct page *page = pfn_to_page(pfn);
                int order;
 
                /*
@@ -664,7 +665,14 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
                else
                        order = MAX_PAGE_ORDER;
 
-               (*online_page_callback)(pfn_to_page(pfn), order);
+               /*
+                * Exposing the page to the buddy by freeing can cause
+                * issues with debug_pagealloc enabled: some archs don't
+                * like double-unmappings. So treat them like any pages that
+                * were allocated from the buddy.
+                */
+               debug_pagealloc_map_pages(page, 1 << order);
+               (*online_page_callback)(page, order);
                pfn += (1UL << order);
        }
 
index a887ba2cc91d1f12201ca67815550c322c6e9e8d..75de3711784eea0b671acc577f564ce27ffeab31 100644 (file)
@@ -1295,12 +1295,6 @@ void __meminit __free_pages_core(struct page *page, unsigned int order,
                        set_page_count(p, 0);
                }
 
-               /*
-                * Freeing the page with debug_pagealloc enabled will try to
-                * unmap it; some archs don't like double-unmappings, so
-                * map it first.
-                */
-               debug_pagealloc_map_pages(page, nr_pages);
                adjust_managed_page_count(page, nr_pages);
        } else {
                for (loop = 0; loop < nr_pages; loop++, p++) {