]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: hugetlb_cma: optimize hugetlb_cma_alloc_frozen_folio()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Mon, 12 Jan 2026 15:09:53 +0000 (23:09 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 31 Jan 2026 22:22:43 +0000 (14:22 -0800)
Check hugetlb_cma_size which helps to avoid unnecessary gfp check or
nodemask traversal.

Link: https://lkml.kernel.org/r/20260112150954.1802953-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb_cma.c

index 0ddf9755c0905ac9443a63e9708d7f19f7b56db8..d8fa93825992f0191d8f772a9db4b173620b0a1d 100644 (file)
@@ -16,7 +16,7 @@
 static struct cma *hugetlb_cma[MAX_NUMNODES];
 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
 static bool hugetlb_cma_only;
-static unsigned long hugetlb_cma_size __initdata;
+static unsigned long hugetlb_cma_size __ro_after_init;
 
 void hugetlb_cma_free_frozen_folio(struct folio *folio)
 {
@@ -31,6 +31,9 @@ struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask,
        struct folio *folio;
        struct page *page = NULL;
 
+       if (!hugetlb_cma_size)
+               return NULL;
+
        if (hugetlb_cma[nid])
                page = cma_alloc_frozen_compound(hugetlb_cma[nid], order);