From d925730734e9e936146b7ba691aa02f1b60f2c61 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 12 Jan 2026 23:09:53 +0800 Subject: [PATCH] mm: hugetlb_cma: optimize hugetlb_cma_alloc_frozen_folio() Check hugetlb_cma_size which helps to avoid unnecessary gfp check or nodemask traversal. Link: https://lkml.kernel.org/r/20260112150954.1802953-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Zi Yan Cc: Brendan Jackman Cc: David Hildenbrand Cc: Jane Chu Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Oscar Salvador Cc: Sidhartha Kumar Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/hugetlb_cma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c index 0ddf9755c0905..d8fa93825992f 100644 --- a/mm/hugetlb_cma.c +++ b/mm/hugetlb_cma.c @@ -16,7 +16,7 @@ static struct cma *hugetlb_cma[MAX_NUMNODES]; static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; static bool hugetlb_cma_only; -static unsigned long hugetlb_cma_size __initdata; +static unsigned long hugetlb_cma_size __ro_after_init; void hugetlb_cma_free_frozen_folio(struct folio *folio) { @@ -31,6 +31,9 @@ struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask, struct folio *folio; struct page *page = NULL; + if (!hugetlb_cma_size) + return NULL; + if (hugetlb_cma[nid]) page = cma_alloc_frozen_compound(hugetlb_cma[nid], order); -- 2.47.3