]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Migrate bpf_task_work_schedule_* kfuncs to KF_IMPLICIT_ARGS
authorIhor Solodrai <ihor.solodrai@linux.dev>
Tue, 20 Jan 2026 22:26:34 +0000 (14:26 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 21 Jan 2026 00:22:20 +0000 (16:22 -0800)
Implement bpf_task_work_schedule_* with an implicit bpf_prog_aux
argument, and remove corresponding _impl funcs from the kernel.

Update special kfunc checks in the verifier accordingly.

Update the selftests to use the new API with implicit argument.

Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev>
Link: https://lore.kernel.org/r/20260120222638.3976562-10-ihor.solodrai@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/helpers.c
kernel/bpf/verifier.c
tools/testing/selftests/bpf/progs/file_reader.c
tools/testing/selftests/bpf/progs/task_work.c
tools/testing/selftests/bpf/progs/task_work_fail.c
tools/testing/selftests/bpf/progs/task_work_stress.c
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c

index c76a9003b221889ed99c7114df205031127e5f69..f2f974b5fb3b142284791dbba8e80615df05dcda 100644 (file)
@@ -4274,41 +4274,39 @@ release_prog:
 }
 
 /**
- * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
+ * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL
  * mode
  * @task: Task struct for which callback should be scheduled
  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
  * @map__map: bpf_map that embeds struct bpf_task_work in the values
  * @callback: pointer to BPF subprogram to call
- * @aux__prog: user should pass NULL
+ * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
  *
  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
  */
-__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
-                                                  struct bpf_task_work *tw, void *map__map,
-                                                  bpf_task_work_callback_t callback,
-                                                  void *aux__prog)
+__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
+                                             void *map__map, bpf_task_work_callback_t callback,
+                                             struct bpf_prog_aux *aux)
 {
-       return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
+       return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
 }
 
 /**
- * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
+ * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME
  * mode
  * @task: Task struct for which callback should be scheduled
  * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
  * @map__map: bpf_map that embeds struct bpf_task_work in the values
  * @callback: pointer to BPF subprogram to call
- * @aux__prog: user should pass NULL
+ * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
  *
  * Return: 0 if task work has been scheduled successfully, negative error code otherwise
  */
-__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
-                                                  struct bpf_task_work *tw, void *map__map,
-                                                  bpf_task_work_callback_t callback,
-                                                  void *aux__prog)
+__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
+                                             void *map__map, bpf_task_work_callback_t callback,
+                                             struct bpf_prog_aux *aux)
 {
-       return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
+       return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_RESUME);
 }
 
 static int make_file_dynptr(struct file *file, u32 flags, bool may_sleep,
@@ -4536,8 +4534,8 @@ BTF_ID_FLAGS(func, bpf_strncasestr);
 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
 #endif
 BTF_ID_FLAGS(func, bpf_stream_vprintk_impl)
-BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl)
-BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_dynptr_from_file)
 BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
 BTF_KFUNCS_END(common_btf_ids)
index 51e8c9f7086805176b8471fd2af72115d98b6077..8e8570e9d167a49f2206eda51f7157f3dce598fd 100644 (file)
@@ -12457,8 +12457,8 @@ enum special_kfunc_type {
        KF_bpf_dynptr_from_file,
        KF_bpf_dynptr_file_discard,
        KF___bpf_trap,
-       KF_bpf_task_work_schedule_signal_impl,
-       KF_bpf_task_work_schedule_resume_impl,
+       KF_bpf_task_work_schedule_signal,
+       KF_bpf_task_work_schedule_resume,
        KF_bpf_arena_alloc_pages,
        KF_bpf_arena_free_pages,
        KF_bpf_arena_reserve_pages,
@@ -12534,16 +12534,16 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore)
 BTF_ID(func, bpf_dynptr_from_file)
 BTF_ID(func, bpf_dynptr_file_discard)
 BTF_ID(func, __bpf_trap)
-BTF_ID(func, bpf_task_work_schedule_signal_impl)
-BTF_ID(func, bpf_task_work_schedule_resume_impl)
+BTF_ID(func, bpf_task_work_schedule_signal)
+BTF_ID(func, bpf_task_work_schedule_resume)
 BTF_ID(func, bpf_arena_alloc_pages)
 BTF_ID(func, bpf_arena_free_pages)
 BTF_ID(func, bpf_arena_reserve_pages)
 
 static bool is_task_work_add_kfunc(u32 func_id)
 {
-       return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
-              func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
+       return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
+              func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
 }
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
index 4d756b6235579ee4c42ba70274e4f5615afce43a..462712ff3b8a0b0377531b72b4b9ee6b6d55eb7d 100644 (file)
@@ -77,7 +77,7 @@ int on_open_validate_file_read(void *c)
                err = 1;
                return 0;
        }
-       bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, task_work_callback, NULL);
+       bpf_task_work_schedule_signal(task, &work->tw, &arrmap, task_work_callback);
        return 0;
 }
 
index 663a80990f8f8759f470d2f874d02485b072a5e1..a6009d105158849f6e8f9008dd7163dd14ecb561 100644 (file)
@@ -65,8 +65,7 @@ int oncpu_hash_map(struct pt_regs *args)
        work = bpf_map_lookup_elem(&hmap, &key);
        if (!work)
                return 0;
-
-       bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+       bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
        return 0;
 }
 
@@ -80,7 +79,7 @@ int oncpu_array_map(struct pt_regs *args)
        work = bpf_map_lookup_elem(&arrmap, &key);
        if (!work)
                return 0;
-       bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
+       bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work);
        return 0;
 }
 
@@ -102,6 +101,6 @@ int oncpu_lru_map(struct pt_regs *args)
        work = bpf_map_lookup_elem(&lrumap, &key);
        if (!work || work->data[0])
                return 0;
-       bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
+       bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work);
        return 0;
 }
index 1270953fd0926f83f9ad7112d78e1b86bd1e802c..82e4b891333385d26abbf9987e23bb545f8728de 100644 (file)
@@ -53,7 +53,7 @@ int mismatch_map(struct pt_regs *args)
        work = bpf_map_lookup_elem(&arrmap, &key);
        if (!work)
                return 0;
-       bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+       bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
        return 0;
 }
 
@@ -65,7 +65,7 @@ int no_map_task_work(struct pt_regs *args)
        struct bpf_task_work tw;
 
        task = bpf_get_current_task_btf();
-       bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
+       bpf_task_work_schedule_resume(task, &tw, &hmap, process_work);
        return 0;
 }
 
@@ -76,7 +76,7 @@ int task_work_null(struct pt_regs *args)
        struct task_struct *task;
 
        task = bpf_get_current_task_btf();
-       bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
+       bpf_task_work_schedule_resume(task, NULL, &hmap, process_work);
        return 0;
 }
 
@@ -91,6 +91,6 @@ int map_null(struct pt_regs *args)
        work = bpf_map_lookup_elem(&arrmap, &key);
        if (!work)
                return 0;
-       bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
+       bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work);
        return 0;
 }
index 55e555f7f41be694f9b3d98f324b402f9355f77d..1d4378f351ef1a36df86749ce4dc14e6502972ee 100644 (file)
@@ -51,8 +51,8 @@ int schedule_task_work(void *ctx)
                if (!work)
                        return 0;
        }
-       err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
-                                                process_work, NULL);
+       err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
+                                           process_work);
        if (err)
                __sync_fetch_and_add(&schedule_error, 1);
        else
index 5d5e1cd4d51d996cfa200a2845507765a8c7cfae..39aff82549c9c661d3f2c9eb9809af978bb0cb35 100644 (file)
@@ -156,7 +156,7 @@ int task_work_non_sleepable_prog(void *ctx)
        if (!task)
                return 0;
 
-       bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL);
+       bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb);
        return 0;
 }
 
@@ -176,6 +176,6 @@ int task_work_sleepable_prog(void *ctx)
        if (!task)
                return 0;
 
-       bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL);
+       bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb);
        return 0;
 }