]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
fork: clean-up ifdef logic around stack allocation
authorPasha Tatashin <pasha.tatashin@soleen.com>
Fri, 9 May 2025 06:29:26 +0000 (08:29 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 21 May 2025 17:48:22 +0000 (10:48 -0700)
Patch series "fork: Page operation cleanups in the fork code", v3.

This patchset consists of outtakes from a 1 year+ old patchset from Pasha,
which all stand on their own.  See:
https://lore.kernel.org/all/20240311164638.2015063-1-pasha.tatashin@soleen.com/

These are good cleanups for readability so I split these off, rebased on
v6.15-rc1, addressed review comments and send them separately.

All mentions of dynamic stack are removed from the patchset as we have no
idea whether that will go anywhere.

This patch (of 3):

There is unneeded OR in the ifdef functions that are used to allocate and
free kernel stacks based on direct map or vmap.

Therefore, clean up by changing the order so OR is no longer needed.

[linus.walleij@linaro.org: rebased]
Link: https://lkml.kernel.org/r/20250509-fork-fixes-v3-1-e6c69dd356f2@linaro.org
Link: https://lkml.kernel.org/r/20250509-fork-fixes-v3-0-e6c69dd356f2@linaro.org
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Link: https://lore.kernel.org/20240311164638.2015063-3-pasha.tatashin@soleen.com
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/fork.c

index c4b26cd8998b8e7b2b516e0bb0b1d4676ff644dc..7b9e1ad141baaeb158b1807ea9fc3ef246f5f3a7 100644 (file)
@@ -185,13 +185,7 @@ static inline void free_task_struct(struct task_struct *tsk)
        kmem_cache_free(task_struct_cachep, tsk);
 }
 
-/*
- * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
- * kmemcache based allocator.
- */
-# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
-
-#  ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_VMAP_STACK
 /*
  * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
  * flush.  Try to minimize the number of calls by caching stacks.
@@ -342,7 +336,13 @@ static void free_thread_stack(struct task_struct *tsk)
        tsk->stack_vm_area = NULL;
 }
 
-#  else /* !CONFIG_VMAP_STACK */
+#else /* !CONFIG_VMAP_STACK */
+
+/*
+ * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
+ * kmemcache based allocator.
+ */
+#if THREAD_SIZE >= PAGE_SIZE
 
 static void thread_stack_free_rcu(struct rcu_head *rh)
 {
@@ -374,8 +374,7 @@ static void free_thread_stack(struct task_struct *tsk)
        tsk->stack = NULL;
 }
 
-#  endif /* CONFIG_VMAP_STACK */
-# else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
+#else /* !(THREAD_SIZE >= PAGE_SIZE) */
 
 static struct kmem_cache *thread_stack_cache;
 
@@ -414,7 +413,8 @@ void thread_stack_cache_init(void)
        BUG_ON(thread_stack_cache == NULL);
 }
 
-# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
+#endif /* THREAD_SIZE >= PAGE_SIZE */
+#endif /* CONFIG_VMAP_STACK */
 
 /* SLAB cache for signal_struct structures (tsk->signal) */
 static struct kmem_cache *signal_cachep;