]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
fork: clean-up naming of vm_stack/vm_struct variables in vmap stacks code
authorPasha Tatashin <pasha.tatashin@soleen.com>
Fri, 9 May 2025 06:29:27 +0000 (08:29 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 21 May 2025 17:48:23 +0000 (10:48 -0700)
There are two data types: "struct vm_struct" and "struct vm_stack" that
have the same local variable names: vm_stack, or vm, or s, which makes the
code confusing to read.

Change the code so the naming is consistent:

struct vm_struct is always called vm_area
struct vm_stack is always called vm_stack

One change altering vfree(vm_stack) to vfree(vm_area->addr) may look like
a semantic change but it is not: vm_area->addr points to the vm_stack.
This was done to improve readability.

[linus.walleij@linaro.org: rebased and added new users of the variable names, address review comments]
Link: https://lore.kernel.org/20240311164638.2015063-4-pasha.tatashin@soleen.com
Link: https://lkml.kernel.org/r/20250509-fork-fixes-v3-2-e6c69dd356f2@linaro.org
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/fork.c

index 7b9e1ad141baaeb158b1807ea9fc3ef246f5f3a7..8b8457562740c114c640a8cc230876f6a286b246 100644 (file)
@@ -198,14 +198,14 @@ struct vm_stack {
        struct vm_struct *stack_vm_area;
 };
 
-static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
+static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
 {
        unsigned int i;
 
        for (i = 0; i < NR_CACHED_STACKS; i++) {
                struct vm_struct *tmp = NULL;
 
-               if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm))
+               if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm_area))
                        return true;
        }
        return false;
@@ -214,11 +214,12 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
 static void thread_stack_free_rcu(struct rcu_head *rh)
 {
        struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
+       struct vm_struct *vm_area = vm_stack->stack_vm_area;
 
        if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
                return;
 
-       vfree(vm_stack);
+       vfree(vm_area->addr);
 }
 
 static void thread_stack_delayed_free(struct task_struct *tsk)
@@ -231,32 +232,32 @@ static void thread_stack_delayed_free(struct task_struct *tsk)
 
 static int free_vm_stack_cache(unsigned int cpu)
 {
-       struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
+       struct vm_struct **cached_vm_stack_areas = per_cpu_ptr(cached_stacks, cpu);
        int i;
 
        for (i = 0; i < NR_CACHED_STACKS; i++) {
-               struct vm_struct *vm_stack = cached_vm_stacks[i];
+               struct vm_struct *vm_area = cached_vm_stack_areas[i];
 
-               if (!vm_stack)
+               if (!vm_area)
                        continue;
 
-               vfree(vm_stack->addr);
-               cached_vm_stacks[i] = NULL;
+               vfree(vm_area->addr);
+               cached_vm_stack_areas[i] = NULL;
        }
 
        return 0;
 }
 
-static int memcg_charge_kernel_stack(struct vm_struct *vm)
+static int memcg_charge_kernel_stack(struct vm_struct *vm_area)
 {
        int i;
        int ret;
        int nr_charged = 0;
 
-       BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
+       BUG_ON(vm_area->nr_pages != THREAD_SIZE / PAGE_SIZE);
 
        for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
-               ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
+               ret = memcg_kmem_charge_page(vm_area->pages[i], GFP_KERNEL, 0);
                if (ret)
                        goto err;
                nr_charged++;
@@ -264,38 +265,35 @@ static int memcg_charge_kernel_stack(struct vm_struct *vm)
        return 0;
 err:
        for (i = 0; i < nr_charged; i++)
-               memcg_kmem_uncharge_page(vm->pages[i], 0);
+               memcg_kmem_uncharge_page(vm_area->pages[i], 0);
        return ret;
 }
 
 static int alloc_thread_stack_node(struct task_struct *tsk, int node)
 {
-       struct vm_struct *vm;
+       struct vm_struct *vm_area;
        void *stack;
        int i;
 
        for (i = 0; i < NR_CACHED_STACKS; i++) {
-               struct vm_struct *s;
-
-               s = this_cpu_xchg(cached_stacks[i], NULL);
-
-               if (!s)
+               vm_area = this_cpu_xchg(cached_stacks[i], NULL);
+               if (!vm_area)
                        continue;
 
                /* Reset stack metadata. */
-               kasan_unpoison_range(s->addr, THREAD_SIZE);
+               kasan_unpoison_range(vm_area->addr, THREAD_SIZE);
 
-               stack = kasan_reset_tag(s->addr);
+               stack = kasan_reset_tag(vm_area->addr);
 
                /* Clear stale pointers from reused stack. */
                memset(stack, 0, THREAD_SIZE);
 
-               if (memcg_charge_kernel_stack(s)) {
-                       vfree(s->addr);
+               if (memcg_charge_kernel_stack(vm_area)) {
+                       vfree(vm_area->addr);
                        return -ENOMEM;
                }
 
-               tsk->stack_vm_area = s;
+               tsk->stack_vm_area = vm_area;
                tsk->stack = stack;
                return 0;
        }
@@ -311,8 +309,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
        if (!stack)
                return -ENOMEM;
 
-       vm = find_vm_area(stack);
-       if (memcg_charge_kernel_stack(vm)) {
+       vm_area = find_vm_area(stack);
+       if (memcg_charge_kernel_stack(vm_area)) {
                vfree(stack);
                return -ENOMEM;
        }
@@ -321,7 +319,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
         * free_thread_stack() can be called in interrupt context,
         * so cache the vm_struct.
         */
-       tsk->stack_vm_area = vm;
+       tsk->stack_vm_area = vm_area;
        stack = kasan_reset_tag(stack);
        tsk->stack = stack;
        return 0;
@@ -517,11 +515,11 @@ void vm_area_free(struct vm_area_struct *vma)
 static void account_kernel_stack(struct task_struct *tsk, int account)
 {
        if (IS_ENABLED(CONFIG_VMAP_STACK)) {
-               struct vm_struct *vm = task_stack_vm_area(tsk);
+               struct vm_struct *vm_area = task_stack_vm_area(tsk);
                int i;
 
                for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
-                       mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
+                       mod_lruvec_page_state(vm_area->pages[i], NR_KERNEL_STACK_KB,
                                              account * (PAGE_SIZE / 1024));
        } else {
                void *stack = task_stack_page(tsk);
@@ -537,12 +535,12 @@ void exit_task_stack_account(struct task_struct *tsk)
        account_kernel_stack(tsk, -1);
 
        if (IS_ENABLED(CONFIG_VMAP_STACK)) {
-               struct vm_struct *vm;
+               struct vm_struct *vm_area;
                int i;
 
-               vm = task_stack_vm_area(tsk);
+               vm_area = task_stack_vm_area(tsk);
                for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
-                       memcg_kmem_uncharge_page(vm->pages[i], 0);
+                       memcg_kmem_uncharge_page(vm_area->pages[i], 0);
        }
 }