]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Migrate bpf_wq_set_callback_impl() to KF_IMPLICIT_ARGS
authorIhor Solodrai <ihor.solodrai@linux.dev>
Tue, 20 Jan 2026 22:26:32 +0000 (14:26 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 21 Jan 2026 00:15:57 +0000 (16:15 -0800)
Implement bpf_wq_set_callback() with an implicit bpf_prog_aux
argument, and remove bpf_wq_set_callback_impl().

Update special kfunc checks in the verifier accordingly.

Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev>
Link: https://lore.kernel.org/r/20260120222638.3976562-8-ihor.solodrai@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/helpers.c
kernel/bpf/verifier.c
tools/testing/selftests/bpf/bpf_experimental.h
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
tools/testing/selftests/bpf/progs/wq_failures.c

index 9eaa4185e0a79b903c6fc2ccb310f521a4b14a1d..c76a9003b221889ed99c7114df205031127e5f69 100644 (file)
@@ -3120,12 +3120,11 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
        return 0;
 }
 
-__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
-                                        int (callback_fn)(void *map, int *key, void *value),
-                                        unsigned int flags,
-                                        void *aux__prog)
+__bpf_kfunc int bpf_wq_set_callback(struct bpf_wq *wq,
+                                   int (callback_fn)(void *map, int *key, void *value),
+                                   unsigned int flags,
+                                   struct bpf_prog_aux *aux)
 {
-       struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
        struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
 
        if (flags)
@@ -4488,7 +4487,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_memset)
 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
 #endif
 BTF_ID_FLAGS(func, bpf_wq_init)
-BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
+BTF_ID_FLAGS(func, bpf_wq_set_callback, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_wq_start)
 BTF_ID_FLAGS(func, bpf_preempt_disable)
 BTF_ID_FLAGS(func, bpf_preempt_enable)
index adc24a2ce5b6f19cf135b2b644e36b227ae7242f..51e8c9f7086805176b8471fd2af72115d98b6077 100644 (file)
@@ -520,7 +520,7 @@ static bool is_async_callback_calling_kfunc(u32 btf_id);
 static bool is_callback_calling_kfunc(u32 btf_id);
 static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
 
-static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
+static bool is_bpf_wq_set_callback_kfunc(u32 btf_id);
 static bool is_task_work_add_kfunc(u32 func_id);
 
 static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
@@ -562,7 +562,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
 
        /* bpf_wq and bpf_task_work callbacks are always sleepable. */
        if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
-           (is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
+           (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
                return true;
 
        verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
@@ -12437,7 +12437,7 @@ enum special_kfunc_type {
        KF_bpf_percpu_obj_new_impl,
        KF_bpf_percpu_obj_drop_impl,
        KF_bpf_throw,
-       KF_bpf_wq_set_callback_impl,
+       KF_bpf_wq_set_callback,
        KF_bpf_preempt_disable,
        KF_bpf_preempt_enable,
        KF_bpf_iter_css_task_new,
@@ -12501,7 +12501,7 @@ BTF_ID(func, bpf_dynptr_clone)
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
-BTF_ID(func, bpf_wq_set_callback_impl)
+BTF_ID(func, bpf_wq_set_callback)
 BTF_ID(func, bpf_preempt_disable)
 BTF_ID(func, bpf_preempt_enable)
 #ifdef CONFIG_CGROUPS
@@ -12994,7 +12994,7 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
 
 static bool is_async_callback_calling_kfunc(u32 btf_id)
 {
-       return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl] ||
+       return is_bpf_wq_set_callback_kfunc(btf_id) ||
               is_task_work_add_kfunc(btf_id);
 }
 
@@ -13004,9 +13004,9 @@ static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
               insn->imm == special_kfunc_list[KF_bpf_throw];
 }
 
-static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
+static bool is_bpf_wq_set_callback_kfunc(u32 btf_id)
 {
-       return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
+       return btf_id == special_kfunc_list[KF_bpf_wq_set_callback];
 }
 
 static bool is_callback_calling_kfunc(u32 btf_id)
@@ -14085,7 +14085,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                meta.r0_rdonly = false;
        }
 
-       if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
+       if (is_bpf_wq_set_callback_kfunc(meta.func_id)) {
                err = push_callback_call(env, insn, insn_idx, meta.subprogno,
                                         set_timer_callback_state);
                if (err) {
index 2cd9165c734839427041339acd3f562ed4e462a2..68a49b1f77ae431d1ebc17bdc824f1fe59324dfa 100644 (file)
@@ -580,11 +580,6 @@ extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
 
 extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
 extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
-extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
-               int (callback_fn)(void *map, int *key, void *value),
-               unsigned int flags__k, void *aux__ign) __ksym;
-#define bpf_wq_set_callback(timer, cb, flags) \
-       bpf_wq_set_callback_impl(timer, cb, flags, NULL)
 
 struct bpf_iter_kmem_cache;
 extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
index 7efa9521105e1b2597f23b1f07928f89c1bba0f2..5d5e1cd4d51d996cfa200a2845507765a8c7cfae 100644 (file)
@@ -96,7 +96,7 @@ int wq_non_sleepable_prog(void *ctx)
 
        if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
                return 0;
-       if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
+       if (bpf_wq_set_callback(&val->w, wq_cb, 0) != 0)
                return 0;
        return 0;
 }
@@ -114,7 +114,7 @@ int wq_sleepable_prog(void *ctx)
 
        if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
                return 0;
-       if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
+       if (bpf_wq_set_callback(&val->w, wq_cb, 0) != 0)
                return 0;
        return 0;
 }
index d06f6d40594a6585f9813f9e54f54022959d391a..3767f5595bbc2db9b003d1dc2641d150c016c190 100644 (file)
@@ -97,7 +97,7 @@ __failure
 /* check that the first argument of bpf_wq_set_callback()
  * is a correct bpf_wq pointer.
  */
-__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */
+__msg(": (85) call bpf_wq_set_callback#") /* anchor message */
 __msg("arg#0 doesn't point to a map value")
 long test_wrong_wq_pointer(void *ctx)
 {
@@ -123,7 +123,7 @@ __failure
 /* check that the first argument of bpf_wq_set_callback()
  * is a correct bpf_wq pointer.
  */
-__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */
+__msg(": (85) call bpf_wq_set_callback#") /* anchor message */
 __msg("off 1 doesn't point to 'struct bpf_wq' that is at 0")
 long test_wrong_wq_pointer_offset(void *ctx)
 {