From: Mike Rapoport (Microsoft) Date: Sun, 11 Jan 2026 08:21:02 +0000 (+0200) Subject: mm/hugetlb: drop hugetlb_cma_check() X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7a9c0bf0aec621bba6d224e8c08713cf2cbcca0f;p=thirdparty%2Fkernel%2Flinux.git mm/hugetlb: drop hugetlb_cma_check() hugetlb_cma_check() was required when the ordering of hugetlb_cma_reserve() and hugetlb_bootmem_alloc() was architecture depended. Since hugetlb_cma_reserve() is always called before hugetlb_bootmem_alloc() there is no need to check whether hugetlb_cma_reserve() was already called. Drop unneeded hugetlb_cma_check() function. Link: https://lkml.kernel.org/r/20260111082105.290734-29-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Muchun Song Cc: Alexander Gordeev Cc: Alex Shi Cc: Andreas Larsson Cc: "Borislav Petkov (AMD)" Cc: Catalin Marinas Cc: David Hildenbrand Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Magnus Lindholm Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Hocko Cc: Michal Simek Cc: Oscar Salvador Cc: Palmer Dabbelt Cc: Pratyush Yadav Cc: Richard Weinberger Cc: "Ritesh Harjani (IBM)" Cc: Russell King Cc: Stafford Horne Cc: Suren Baghdasaryan Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1832da0f6236..fe4b9f2ebdb62 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4159,7 +4159,6 @@ static int __init hugetlb_init(void) } } - hugetlb_cma_check(); hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c index b1eb5998282c5..f5e79103e110c 100644 --- a/mm/hugetlb_cma.c +++ b/mm/hugetlb_cma.c @@ -85,9 +85,6 @@ hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact) return m; } - -static bool cma_reserve_called __initdata; - static int __init cmdline_parse_hugetlb_cma(char *p) { int nid, count = 0; @@ -149,8 +146,10 @@ void __init hugetlb_cma_reserve(void) return; order = arch_hugetlb_cma_order(); - if (!order) + if (!order) { + pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); return; + } /* * HugeTLB CMA reservation is required for gigantic @@ -159,7 +158,6 @@ void __init hugetlb_cma_reserve(void) * breaking this assumption. */ VM_WARN_ON(order <= MAX_PAGE_ORDER); - cma_reserve_called = true; hugetlb_bootmem_set_nodes(); @@ -253,14 +251,6 @@ void __init hugetlb_cma_reserve(void) hugetlb_cma_size = 0; } -void __init hugetlb_cma_check(void) -{ - if (!hugetlb_cma_size || cma_reserve_called) - return; - - pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); -} - bool hugetlb_cma_exclusive_alloc(void) { return hugetlb_cma_only; diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h index 2c2ec8a7e1340..78186839df3a7 100644 --- a/mm/hugetlb_cma.h +++ b/mm/hugetlb_cma.h @@ -8,7 +8,6 @@ struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask, int nid, nodemask_t *nodemask); struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact); -void hugetlb_cma_check(void); bool hugetlb_cma_exclusive_alloc(void); unsigned long hugetlb_cma_total_size(void); void hugetlb_cma_validate_params(void); @@ -31,10 +30,6 @@ struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, return NULL; } -static inline void hugetlb_cma_check(void) -{ -} - static inline bool hugetlb_cma_exclusive_alloc(void) { return false;