]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: Make sure stashed kptr in local kptr is freed recursively
authorAmery Hung <amery.hung@bytedance.com>
Tue, 27 Aug 2024 01:13:01 +0000 (01:13 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 29 Aug 2024 19:18:26 +0000 (12:18 -0700)
When dropping a local kptr, any kptr stashed into it is supposed to be
freed through bpf_obj_free_fields->__bpf_obj_drop_impl recursively. Add a
test to make sure it happens.

The test first stashes a referenced kptr to "struct task" into a local
kptr and gets the reference count of the task. Then, it drops the local
kptr and reads the reference count of the task again. Since
bpf_obj_free_fields and __bpf_obj_drop_impl will go through the local kptr
recursively during bpf_obj_drop, the dtor of the stashed task kptr should
eventually be called. The second reference count should be one less than
the first one.

Signed-off-by: Amery Hung <amery.hung@bytedance.com>
Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240827011301.608620-1-amery.hung@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/task_kfunc_success.c

index 3138bb689b0b87bf8b8884de0dffa84652e22960..a551490150630a43be0691cf48b3a76f29a67eff 100644 (file)
@@ -143,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone
 SEC("tp_btf/task_newtask")
 int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
 {
-       struct task_struct *kptr;
+       struct task_struct *kptr, *acquired;
        struct __tasks_kfunc_map_value *v, *local;
+       int refcnt, refcnt_after_drop;
        long status;
 
        if (!is_test_kfunc_task())
@@ -190,7 +191,34 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
                return 0;
        }
 
+       /* Stash a copy into local kptr and check if it is released recursively */
+       acquired = bpf_task_acquire(kptr);
+       if (!acquired) {
+               err = 7;
+               bpf_obj_drop(local);
+               bpf_task_release(kptr);
+               return 0;
+       }
+       bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
+
+       acquired = bpf_kptr_xchg(&local->task, acquired);
+       if (acquired) {
+               err = 8;
+               bpf_obj_drop(local);
+               bpf_task_release(kptr);
+               bpf_task_release(acquired);
+               return 0;
+       }
+
        bpf_obj_drop(local);
+
+       bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
+       if (refcnt != refcnt_after_drop + 1) {
+               err = 9;
+               bpf_task_release(kptr);
+               return 0;
+       }
+
        bpf_task_release(kptr);
 
        return 0;