]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
powerpc/mem: Move CMA reservations to arch_mm_preinit
authorRitesh Harjani (IBM) <ritesh.list@gmail.com>
Sat, 28 Feb 2026 18:47:59 +0000 (00:17 +0530)
committerMadhavan Srinivasan <maddy@linux.ibm.com>
Thu, 12 Mar 2026 05:27:31 +0000 (10:57 +0530)
commit 4267739cabb8 ("arch, mm: consolidate initialization of SPARSE memory model"),
changed the initialization order of "pageblock_order" from...
start_kernel()
    - setup_arch()
       - initmem_init()
         - sparse_init()
           - set_pageblock_order();  // this sets the pageblock_order
       - xxx_cma_reserve();

to...
start_kernel()
    - setup_arch()
       - xxx_cma_reserve();
    - mm_core_init_early()
       - free_area_init()
          - sparse_init()
             - set_pageblock_order() // this sets the pageblock_order.

So this means, pageblock_order is not initialized before these cma
reservation function calls, hence we are seeing CMA failures like...

[    0.000000] kvm_cma_reserve: reserving 3276 MiB for global area
[    0.000000] cma: pageblock_order not yet initialized. Called during early boot?
[    0.000000] cma: Failed to reserve 3276 MiB
....
[    0.000000][    T0] cma: pageblock_order not yet initialized. Called during early boot?
[    0.000000][    T0] cma: Failed to reserve 1024 MiB

This patch moves these CMA reservations to arch_mm_preinit() which
happens in mm_core_init() (which happens after pageblock_order is
initialized), but before the memblock moves the free memory to buddy.

Fixes: 4267739cabb8 ("arch, mm: consolidate initialization of SPARSE memory model")
Suggested-by: Mike Rapoport <rppt@kernel.org>
Reported-and-tested-by: Sourabh Jain <sourabhjain@linux.ibm.com>
Closes: https://lore.kernel.org/linuxppc-dev/4c338a29-d190-44f3-8874-6cfa0a031f0b@linux.ibm.com/
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Tested-by: Dan HorĂ¡k <dan@danny.cz>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/6e532cf0db5be99afbe20eed699163d5e86cd71f.1772303986.git.ritesh.list@gmail.com
arch/powerpc/kernel/setup-common.c
arch/powerpc/mm/mem.c

index cb5b73adc2506998d8c45fe776e75cb35a2404eb..b1761909c23fec9da726faeba94613207f60831c 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/of_irq.h>
 #include <linux/hugetlb.h>
 #include <linux/pgtable.h>
-#include <asm/kexec.h>
 #include <asm/io.h>
 #include <asm/paca.h>
 #include <asm/processor.h>
@@ -995,15 +994,6 @@ void __init setup_arch(char **cmdline_p)
 
        initmem_init();
 
-       /*
-        * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM and
-        * hugetlb. These must be called after initmem_init(), so that
-        * pageblock_order is initialised.
-        */
-       fadump_cma_init();
-       kdump_cma_reserve();
-       kvm_cma_reserve();
-
        early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
 
        if (ppc_md.setup_arch)
index a985fc96b9530fae9544f51dfc8b3f43bf647c34..b7982d0243d487c98b4f92430959eb50245cdcd5 100644 (file)
 #include <asm/setup.h>
 #include <asm/fixmap.h>
 
+#include <asm/fadump.h>
+#include <asm/kexec.h>
+#include <asm/kvm_ppc.h>
+
 #include <mm/mmu_decl.h>
 
 unsigned long long memory_limit __initdata;
@@ -268,6 +272,16 @@ void __init paging_init(void)
 
 void __init arch_mm_preinit(void)
 {
+
+       /*
+        * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM
+        * and hugetlb. These must be called after pageblock_order is
+        * initialised.
+        */
+       fadump_cma_init();
+       kdump_cma_reserve();
+       kvm_cma_reserve();
+
        /*
         * book3s is limited to 16 page sizes due to encoding this in
         * a 4-bit field for slices.