]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: hugeltb: check NUMA_NO_NODE in only_alloc_fresh_hugetlb_folio()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 10 Sep 2025 13:39:58 +0000 (21:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 21 Sep 2025 21:22:12 +0000 (14:22 -0700)
Move the NUMA_NO_NODE check out of buddy and gigantic folio allocation to
cleanup code a bit, also this will avoid NUMA_NO_NODE passed as 'nid' to
node_isset() in alloc_buddy_hugetlb_folio().

Link: https://lkml.kernel.org/r/20250910133958.301467-6-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 1783b9e7c338e0ed9d5c42538df57d7fadb787ac..d2471a0b6002d74e78715b9fd937934ae93ca3fd 100644 (file)
@@ -1479,8 +1479,6 @@ static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
        struct folio *folio;
        bool retried = false;
 
-       if (nid == NUMA_NO_NODE)
-               nid = numa_mem_id();
 retry:
        folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
        if (!folio) {
@@ -1942,8 +1940,6 @@ static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
                alloc_try_hard = false;
        if (alloc_try_hard)
                gfp_mask |= __GFP_RETRY_MAYFAIL;
-       if (nid == NUMA_NO_NODE)
-               nid = numa_mem_id();
 
        folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
 
@@ -1979,6 +1975,9 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
        struct folio *folio;
        int order = huge_page_order(h);
 
+       if (nid == NUMA_NO_NODE)
+               nid = numa_mem_id();
+
        if (order_is_gigantic(order))
                folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
        else