]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/hugetlb: add hugetlb_cma_only cmdline option
authorFrank van der Linden <fvdl@google.com>
Fri, 28 Feb 2025 18:29:26 +0000 (18:29 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 05:06:31 +0000 (22:06 -0700)
Add an option to force hugetlb gigantic pages to be allocated using CMA
only (if hugetlb_cma is enabled).  This avoids a fallback to allocation
from the rest of system memory if the CMA allocation fails.  This makes
the size of hugetlb_cma a hard upper boundary for gigantic hugetlb page
allocations.

This is useful because, with a large CMA area, the kernel's unmovable
allocations will have less room to work with and it is undesirable for new
hugetlb gigantic page allocations to be done from that remaining area.  It
will eat in to the space available for unmovable allocations, leading to
unwanted system behavior (OOMs because the kernel fails to do unmovable
allocations).

So, with this enabled, an administrator can force a hard upper bound for
runtime gigantic page allocations, and have more predictable system
behavior.

Link: https://lkml.kernel.org/r/20250228182928.2645936-26-fvdl@google.com
Signed-off-by: Frank van der Linden <fvdl@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/kernel-parameters.txt
mm/hugetlb.c

index ae21d911d1c7cede1638753e2a95def61f905130..491628ac071a5327677d6cb5a86fe4922e65e204 100644 (file)
                        hugepages using the CMA allocator. If enabled, the
                        boot-time allocation of gigantic hugepages is skipped.
 
+       hugetlb_cma_only=
+                       [HW,CMA,EARLY] When allocating new HugeTLB pages, only
+                       try to allocate from the CMA areas.
+
+                       This option does nothing if hugetlb_cma= is not also
+                       specified.
+
        hugetlb_free_vmemmap=
                        [KNL] Requires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
                        enabled.
index d1134e9159276df87c41f6e4dd17fea6b54443b8..80d401593669ada6f42ada7087020e26e399ee93 100644 (file)
@@ -59,6 +59,7 @@ struct hstate hstates[HUGE_MAX_HSTATE];
 static struct cma *hugetlb_cma[MAX_NUMNODES];
 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
 #endif
+static bool hugetlb_cma_only;
 static unsigned long hugetlb_cma_size __initdata;
 
 __initdata struct list_head huge_boot_pages[MAX_NUMNODES];
@@ -1510,6 +1511,9 @@ retry:
        }
 #endif
        if (!folio) {
+               if (hugetlb_cma_only)
+                       return NULL;
+
                folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
                if (!folio)
                        return NULL;
@@ -4750,6 +4754,9 @@ static __init void hugetlb_parse_params(void)
 
                hcp->setup(hcp->val);
        }
+
+       if (!hugetlb_cma_size)
+               hugetlb_cma_only = false;
 }
 
 /*
@@ -7862,6 +7869,13 @@ static int __init cmdline_parse_hugetlb_cma(char *p)
 
 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
 
+static int __init cmdline_parse_hugetlb_cma_only(char *p)
+{
+       return kstrtobool(p, &hugetlb_cma_only);
+}
+
+early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
+
 void __init hugetlb_cma_reserve(int order)
 {
        unsigned long size, reserved, per_node;