]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
hugetlb: increase number of reserving hugepages via cmdline
authorLi Zhe <lizhe.67@bytedance.com>
Fri, 19 Sep 2025 09:23:53 +0000 (17:23 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 28 Sep 2025 18:51:32 +0000 (11:51 -0700)
Commit 79359d6d24df ("hugetlb: perform vmemmap optimization on a list of
pages") batches the submission of HugeTLB vmemmap optimization (HVO)
during hugepage reservation.  With HVO enabled, hugepages obtained from
the buddy allocator are not submitted for optimization and their
struct-page memory is therefore not released—until the entire
reservation request has been satisfied.  As a result, any struct-page
memory freed in the course of the allocation cannot be reused for the
ongoing reservation, artificially limiting the number of huge pages that
can ultimately be provided.

As commit b1222550fbf7 ("mm/hugetlb: do pre-HVO for bootmem allocated
pages") already applies early HVO to bootmem-allocated huge pages, this
patch extends the same benefit to non-bootmem pages by incrementally
submitting them for HVO as they are allocated, thereby returning
struct-page memory to the buddy allocator in real time.  The change raises
the maximum 2 MiB hugepage reservation from just under 376 GB to more than
381 GB on a 384 GB x86 VM.

Link: https://lkml.kernel.org/r/20250919092353.41671-1-lizhe.67@bytedance.com
Signed-off-by: Li Zhe <lizhe.67@bytedance.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index d2471a0b6002d74e78715b9fd937934ae93ca3fd..1f65609cb7245aa60d1dc2f538f29f92fc6969b7 100644 (file)
@@ -3538,7 +3538,14 @@ static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned l
        nodes_clear(node_alloc_noretry);
 
        for (i = 0; i < num; ++i) {
-               struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
+               struct folio *folio;
+
+               if (hugetlb_vmemmap_optimizable_size(h) &&
+                   (si_mem_available() == 0) && !list_empty(&folio_list)) {
+                       prep_and_add_allocated_folios(h, &folio_list);
+                       INIT_LIST_HEAD(&folio_list);
+               }
+               folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
                                                &node_alloc_noretry, &next_node);
                if (!folio)
                        break;