]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: hugetlb: directly pass order when allocate a hugetlb folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 10 Sep 2025 13:39:56 +0000 (21:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 21 Sep 2025 21:22:11 +0000 (14:22 -0700)
Use order instead of struct hstate to remove huge_page_order() call from
all hugetlb folio allocation, also order_is_gigantic() is added to check
whether it is a gigantic order.

Link: https://lkml.kernel.org/r/20250910133958.301467-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/hugetlb_cma.c
mm/hugetlb_cma.h

index 526d27e88b3b274cec5c33892c2dfcfcf7e6eb3a..8e63e46b8e1f0ea5d88ed728f6de72b93b8901de 100644 (file)
@@ -788,9 +788,14 @@ static inline unsigned huge_page_shift(struct hstate *h)
        return h->order + PAGE_SHIFT;
 }
 
+static inline bool order_is_gigantic(unsigned int order)
+{
+       return order > MAX_PAGE_ORDER;
+}
+
 static inline bool hstate_is_gigantic(struct hstate *h)
 {
-       return huge_page_order(h) > MAX_PAGE_ORDER;
+       return order_is_gigantic(huge_page_order(h));
 }
 
 static inline unsigned int pages_per_huge_page(const struct hstate *h)
index ef6284ec85b651f5f845ad2990124fe08d2fc1cf..7f33e4a158c61bc7cee3f2a60fb831fa65f474c6 100644 (file)
@@ -1473,17 +1473,16 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 
 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
 #ifdef CONFIG_CONTIG_ALLOC
-static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
+static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
                int nid, nodemask_t *nodemask)
 {
        struct folio *folio;
-       int order = huge_page_order(h);
        bool retried = false;
 
        if (nid == NUMA_NO_NODE)
                nid = numa_mem_id();
 retry:
-       folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
+       folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
        if (!folio) {
                if (hugetlb_cma_exclusive_alloc())
                        return NULL;
@@ -1506,16 +1505,16 @@ retry:
 }
 
 #else /* !CONFIG_CONTIG_ALLOC */
-static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
-                                       int nid, nodemask_t *nodemask)
+static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
+                                         nodemask_t *nodemask)
 {
        return NULL;
 }
 #endif /* CONFIG_CONTIG_ALLOC */
 
 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
-static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
-                                       int nid, nodemask_t *nodemask)
+static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
+                                         nodemask_t *nodemask)
 {
        return NULL;
 }
@@ -1926,11 +1925,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
        return NULL;
 }
 
-static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
-               gfp_t gfp_mask, int nid, nodemask_t *nmask,
-               nodemask_t *node_alloc_noretry)
+static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
+               int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
 {
-       int order = huge_page_order(h);
        struct folio *folio;
        bool alloc_try_hard = true;
 
@@ -1980,11 +1977,13 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
                nodemask_t *node_alloc_noretry)
 {
        struct folio *folio;
+       int order = huge_page_order(h);
 
-       if (hstate_is_gigantic(h))
-               folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
+       if (order_is_gigantic(order))
+               folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
        else
-               folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry);
+               folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
+                                                 node_alloc_noretry);
        if (folio)
                init_new_hugetlb_folio(h, folio);
        return folio;
@@ -2872,7 +2871,7 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
         * alloc_contig_range and them. Return -ENOMEM as this has the effect
         * of bailing out right away without further retrying.
         */
-       if (folio_order(folio) > MAX_PAGE_ORDER)
+       if (order_is_gigantic(folio_order(folio)))
                return -ENOMEM;
 
        if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
index f58ef4969e7a1c57a1ab818cc52343d7b00c26fa..e8e4dc7182d5457b31091d9562e064823d2c2af7 100644 (file)
@@ -26,11 +26,10 @@ void hugetlb_cma_free_folio(struct folio *folio)
 }
 
 
-struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
+struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
                                      int nid, nodemask_t *nodemask)
 {
        int node;
-       int order = huge_page_order(h);
        struct folio *folio = NULL;
 
        if (hugetlb_cma[nid])
index f7d7fb9880a2699528f95ccfc7a3203facba3738..2c2ec8a7e134027728e249451679fb5443f0f6bd 100644 (file)
@@ -4,7 +4,7 @@
 
 #ifdef CONFIG_CMA
 void hugetlb_cma_free_folio(struct folio *folio);
-struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
+struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
                                      int nid, nodemask_t *nodemask);
 struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
                                                    bool node_exact);
@@ -18,8 +18,8 @@ static inline void hugetlb_cma_free_folio(struct folio *folio)
 {
 }
 
-static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h,
-           gfp_t gfp_mask, int nid, nodemask_t *nodemask)
+static inline struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
+               int nid, nodemask_t *nodemask)
 {
        return NULL;
 }