]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/hugetlb: convert cmdline parameters from setup to early
authorFrank van der Linden <fvdl@google.com>
Fri, 28 Feb 2025 18:29:08 +0000 (18:29 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 05:06:26 +0000 (22:06 -0700)
Convert the cmdline parameters (hugepagesz, hugepages, default_hugepagesz
and hugetlb_free_vmemmap) to early parameters.

Since parse_early_param might run before MMU setups on some platforms
(powerpc), validation of huge page sizes as specified in command line
parameters would fail.  So instead, for the hstate-related values, just
record the them and parse them on demand, from hugetlb_bootmem_alloc.

The allocation of hugetlb bootmem pages is now done in
hugetlb_bootmem_alloc, which is called explicitly at the start of
mm_core_init().  core_initcall would be too late, as that happens with
memblock already torn down.

This change will allow earlier allocation and initialization of bootmem
hugetlb pages later on.

No functional change intended.

Link: https://lkml.kernel.org/r/20250228182928.2645936-8-fvdl@google.com
Signed-off-by: Frank van der Linden <fvdl@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/kernel-parameters.txt
include/linux/hugetlb.h
mm/hugetlb.c
mm/hugetlb_vmemmap.c
mm/mm_init.c

index fb8752b42ec8582b8750d7e014c4d76166fa2fc1..ae21d911d1c7cede1638753e2a95def61f905130 100644 (file)
        hpet_mmap=      [X86, HPET_MMAP] Allow userspace to mmap HPET
                        registers.  Default set by CONFIG_HPET_MMAP_DEFAULT.
 
-       hugepages=      [HW] Number of HugeTLB pages to allocate at boot.
+       hugepages=      [HW,EARLY] Number of HugeTLB pages to allocate at boot.
                        If this follows hugepagesz (below), it specifies
                        the number of pages of hugepagesz to be allocated.
                        If this is the first HugeTLB parameter on the command
                                <node>:<integer>[,<node>:<integer>]
 
        hugepagesz=
-                       [HW] The size of the HugeTLB pages.  This is used in
-                       conjunction with hugepages (above) to allocate huge
-                       pages of a specific size at boot.  The pair
-                       hugepagesz=X hugepages=Y can be specified once for
-                       each supported huge page size. Huge page sizes are
-                       architecture dependent.  See also
+                       [HW,EARLY] The size of the HugeTLB pages.  This is
+                       used in conjunction with hugepages (above) to
+                       allocate huge pages of a specific size at boot. The
+                       pair hugepagesz=X hugepages=Y can be specified once
+                       for each supported huge page size. Huge page sizes
+                       are architecture dependent. See also
                        Documentation/admin-guide/mm/hugetlbpage.rst.
                        Format: size[KMG]
 
index 76a75ec03dd6b51bce90c827fa17a2635f6a392f..a596aaa178d1c06dceacc4e4bf54522ed7d6cefd 100644 (file)
@@ -174,6 +174,8 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
 extern int sysctl_hugetlb_shm_group;
 extern struct list_head huge_boot_pages[MAX_NUMNODES];
 
+void hugetlb_bootmem_alloc(void);
+
 /* arch callbacks */
 
 #ifndef CONFIG_HIGHPTE
@@ -1257,6 +1259,10 @@ static inline bool hugetlbfs_pagecache_present(
 {
        return false;
 }
+
+static inline void hugetlb_bootmem_alloc(void)
+{
+}
 #endif /* CONFIG_HUGETLB_PAGE */
 
 static inline spinlock_t *huge_pte_lock(struct hstate *h,
index 38ff808fcc8344ec864914d21f7cd2706197a3bb..d1178059a52b1e41882e6f6cd9fa7e7864d7f0c0 100644 (file)
@@ -40,6 +40,7 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
+#include <asm/setup.h>
 
 #include <linux/io.h>
 #include <linux/hugetlb.h>
@@ -62,6 +63,24 @@ static unsigned long hugetlb_cma_size __initdata;
 
 __initdata struct list_head huge_boot_pages[MAX_NUMNODES];
 
+/*
+ * Due to ordering constraints across the init code for various
+ * architectures, hugetlb hstate cmdline parameters can't simply
+ * be early_param. early_param might call the setup function
+ * before valid hugetlb page sizes are determined, leading to
+ * incorrect rejection of valid hugepagesz= options.
+ *
+ * So, record the parameters early and consume them whenever the
+ * init code is ready for them, by calling hugetlb_parse_params().
+ */
+
+/* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
+#define HUGE_MAX_CMDLINE_ARGS  (2 * HUGE_MAX_HSTATE + 1)
+struct hugetlb_cmdline {
+       char *val;
+       int (*setup)(char *val);
+};
+
 /* for command line parsing */
 static struct hstate * __initdata parsed_hstate;
 static unsigned long __initdata default_hstate_max_huge_pages;
@@ -69,6 +88,20 @@ static bool __initdata parsed_valid_hugepagesz = true;
 static bool __initdata parsed_default_hugepagesz;
 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
 
+static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
+static int hstate_cmdline_index __initdata;
+static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
+static int hugetlb_param_index __initdata;
+static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
+static __init void hugetlb_parse_params(void);
+
+#define hugetlb_early_param(str, func) \
+static __init int func##args(char *s) \
+{ \
+       return hugetlb_add_param(s, func); \
+} \
+early_param(str, func##args)
+
 /*
  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  * free_huge_pages, and surplus_huge_pages.
@@ -3496,6 +3529,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 
                for (i = 0; i < MAX_NUMNODES; i++)
                        INIT_LIST_HEAD(&huge_boot_pages[i]);
+               h->next_nid_to_alloc = first_online_node;
+               h->next_nid_to_free = first_online_node;
                initialized = true;
        }
 
@@ -4558,8 +4593,6 @@ void __init hugetlb_add_hstate(unsigned int order)
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
        INIT_LIST_HEAD(&h->hugepage_activelist);
-       h->next_nid_to_alloc = first_online_node;
-       h->next_nid_to_free = first_online_node;
        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
                                        huge_page_size(h)/SZ_1K);
 
@@ -4584,6 +4617,42 @@ static void __init hugepages_clear_pages_in_node(void)
        }
 }
 
+static __init int hugetlb_add_param(char *s, int (*setup)(char *))
+{
+       size_t len;
+       char *p;
+
+       if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
+               return -EINVAL;
+
+       len = strlen(s) + 1;
+       if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
+               return -EINVAL;
+
+       p = &hstate_cmdline_buf[hstate_cmdline_index];
+       memcpy(p, s, len);
+       hstate_cmdline_index += len;
+
+       hugetlb_params[hugetlb_param_index].val = p;
+       hugetlb_params[hugetlb_param_index].setup = setup;
+
+       hugetlb_param_index++;
+
+       return 0;
+}
+
+static __init void hugetlb_parse_params(void)
+{
+       int i;
+       struct hugetlb_cmdline *hcp;
+
+       for (i = 0; i < hugetlb_param_index; i++) {
+               hcp = &hugetlb_params[i];
+
+               hcp->setup(hcp->val);
+       }
+}
+
 /*
  * hugepages command line processing
  * hugepages normally follows a valid hugepagsz or default_hugepagsz
@@ -4603,7 +4672,7 @@ static int __init hugepages_setup(char *s)
        if (!parsed_valid_hugepagesz) {
                pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
                parsed_valid_hugepagesz = true;
-               return 1;
+               return -EINVAL;
        }
 
        /*
@@ -4657,24 +4726,16 @@ static int __init hugepages_setup(char *s)
                }
        }
 
-       /*
-        * Global state is always initialized later in hugetlb_init.
-        * But we need to allocate gigantic hstates here early to still
-        * use the bootmem allocator.
-        */
-       if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
-               hugetlb_hstate_alloc_pages(parsed_hstate);
-
        last_mhp = mhp;
 
-       return 1;
+       return 0;
 
 invalid:
        pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
        hugepages_clear_pages_in_node();
-       return 1;
+       return -EINVAL;
 }
-__setup("hugepages=", hugepages_setup);
+hugetlb_early_param("hugepages", hugepages_setup);
 
 /*
  * hugepagesz command line processing
@@ -4693,7 +4754,7 @@ static int __init hugepagesz_setup(char *s)
 
        if (!arch_hugetlb_valid_size(size)) {
                pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
-               return 1;
+               return -EINVAL;
        }
 
        h = size_to_hstate(size);
@@ -4708,7 +4769,7 @@ static int __init hugepagesz_setup(char *s)
                if (!parsed_default_hugepagesz ||  h != &default_hstate ||
                    default_hstate.max_huge_pages) {
                        pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
-                       return 1;
+                       return -EINVAL;
                }
 
                /*
@@ -4718,14 +4779,14 @@ static int __init hugepagesz_setup(char *s)
                 */
                parsed_hstate = h;
                parsed_valid_hugepagesz = true;
-               return 1;
+               return 0;
        }
 
        hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
        parsed_valid_hugepagesz = true;
-       return 1;
+       return 0;
 }
-__setup("hugepagesz=", hugepagesz_setup);
+hugetlb_early_param("hugepagesz", hugepagesz_setup);
 
 /*
  * default_hugepagesz command line input
@@ -4739,14 +4800,14 @@ static int __init default_hugepagesz_setup(char *s)
        parsed_valid_hugepagesz = false;
        if (parsed_default_hugepagesz) {
                pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
-               return 1;
+               return -EINVAL;
        }
 
        size = (unsigned long)memparse(s, NULL);
 
        if (!arch_hugetlb_valid_size(size)) {
                pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
-               return 1;
+               return -EINVAL;
        }
 
        hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
@@ -4763,17 +4824,33 @@ static int __init default_hugepagesz_setup(char *s)
         */
        if (default_hstate_max_huge_pages) {
                default_hstate.max_huge_pages = default_hstate_max_huge_pages;
-               for_each_online_node(i)
-                       default_hstate.max_huge_pages_node[i] =
-                               default_hugepages_in_node[i];
-               if (hstate_is_gigantic(&default_hstate))
-                       hugetlb_hstate_alloc_pages(&default_hstate);
+               /*
+                * Since this is an early parameter, we can't check
+                * NUMA node state yet, so loop through MAX_NUMNODES.
+                */
+               for (i = 0; i < MAX_NUMNODES; i++) {
+                       if (default_hugepages_in_node[i] != 0)
+                               default_hstate.max_huge_pages_node[i] =
+                                       default_hugepages_in_node[i];
+               }
                default_hstate_max_huge_pages = 0;
        }
 
-       return 1;
+       return 0;
+}
+hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
+
+void __init hugetlb_bootmem_alloc(void)
+{
+       struct hstate *h;
+
+       hugetlb_parse_params();
+
+       for_each_hstate(h) {
+               if (hstate_is_gigantic(h))
+                       hugetlb_hstate_alloc_pages(h);
+       }
 }
-__setup("default_hugepagesz=", default_hugepagesz_setup);
 
 static unsigned int allowed_mems_nr(struct hstate *h)
 {
index 7735972add012e22dfbc154efad27d296086acc0..5b484758f813e7b3b0dab7f66e29239a516e1d9f 100644 (file)
@@ -444,7 +444,11 @@ DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 
 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
-core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
+static int __init hugetlb_vmemmap_optimize_param(char *buf)
+{
+       return kstrtobool(buf, &vmemmap_optimize_enabled);
+}
+early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_optimize_param);
 
 static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
                                           struct folio *folio, unsigned long flags)
index c767946e8f5fd461eaac4ef5a32de11931c95beb..45bc4b55fd6a52ce89b2a2c2ad893ec2e935a420 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/crash_dump.h>
 #include <linux/execmem.h>
 #include <linux/vmstat.h>
+#include <linux/hugetlb.h>
 #include "internal.h"
 #include "slab.h"
 #include "shuffle.h"
@@ -2641,6 +2642,8 @@ static void __init mem_init_print_info(void)
  */
 void __init mm_core_init(void)
 {
+       hugetlb_bootmem_alloc();
+
        /* Initializations relying on SMP setup */
        BUILD_BUG_ON(MAX_ZONELISTS > 2);
        build_all_zonelists(NULL);