]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: Move mm_cachep initialization to mm_init()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 25 Oct 2022 19:38:18 +0000 (21:38 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Aug 2023 17:58:33 +0000 (19:58 +0200)
commit af80602799681c78f14fbe20b6185a56020dedee upstream.

In order to allow using mm_alloc() much earlier, move initializing
mm_cachep into mm_init().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20221025201057.751153381@infradead.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/sched/task.h
init/main.c
kernel/fork.c

index a702d4d1a4958274a286c008df80c67b6b4d4583..d23977e9035d4454c9a24de9eed874579a78bf3b 100644 (file)
@@ -61,6 +61,7 @@ extern void sched_dead(struct task_struct *p);
 void __noreturn do_task_dead(void);
 void __noreturn make_task_dead(int signr);
 
+extern void mm_cache_init(void);
 extern void proc_caches_init(void);
 
 extern void fork_init(void);
index 1efd414cefebf9c0538b7342eb1d84b788798716..45b3a2beb383c2e85a085e22137c0d0494535674 100644 (file)
@@ -855,6 +855,7 @@ static void __init mm_init(void)
        init_espfix_bsp();
        /* Should be run after espfix64 is set up. */
        pti_init();
+       mm_cache_init();
 }
 
 #ifdef CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
index 0835c3e89311623b5111a434143cdc4b9fc4b183..ace0717c71e27ae0f04e95ae92866a8551003f78 100644 (file)
@@ -2916,10 +2916,27 @@ static void sighand_ctor(void *data)
        init_waitqueue_head(&sighand->signalfd_wqh);
 }
 
-void __init proc_caches_init(void)
+void __init mm_cache_init(void)
 {
        unsigned int mm_size;
 
+       /*
+        * The mm_cpumask is located at the end of mm_struct, and is
+        * dynamically sized based on the maximum CPU number this system
+        * can have, taking hotplug into account (nr_cpu_ids).
+        */
+       mm_size = sizeof(struct mm_struct) + cpumask_size();
+
+       mm_cachep = kmem_cache_create_usercopy("mm_struct",
+                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+                       offsetof(struct mm_struct, saved_auxv),
+                       sizeof_field(struct mm_struct, saved_auxv),
+                       NULL);
+}
+
+void __init proc_caches_init(void)
+{
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -2937,19 +2954,6 @@ void __init proc_caches_init(void)
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
 
-       /*
-        * The mm_cpumask is located at the end of mm_struct, and is
-        * dynamically sized based on the maximum CPU number this system
-        * can have, taking hotplug into account (nr_cpu_ids).
-        */
-       mm_size = sizeof(struct mm_struct) + cpumask_size();
-
-       mm_cachep = kmem_cache_create_usercopy("mm_struct",
-                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
-                       offsetof(struct mm_struct, saved_auxv),
-                       sizeof_field(struct mm_struct, saved_auxv),
-                       NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
        mmap_init();
        nsproxy_cache_init();