.arg3_type = ARG_ANYTHING,
};
+static int bpf_async_update_prog_callback(struct bpf_async_cb *cb, void *callback_fn,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *prev;
+
+ /* Acquire a guard reference on prog to prevent it from being freed during the loop */
+ if (prog) {
+ prog = bpf_prog_inc_not_zero(prog);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ do {
+ if (prog)
+ prog = bpf_prog_inc_not_zero(prog);
+ prev = xchg(&cb->prog, prog);
+ rcu_assign_pointer(cb->callback_fn, callback_fn);
+
+ /*
+ * Release previous prog, make sure that if other CPU is contending,
+ * to set bpf_prog, references are not leaked as each iteration acquires and
+ * releases one reference.
+ */
+ if (prev)
+ bpf_prog_put(prev);
+
+ } while (READ_ONCE(cb->prog) != prog || READ_ONCE(cb->callback_fn) != callback_fn);
+
+ if (prog)
+ bpf_prog_put(prog);
+
+ return 0;
+}
+
static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
struct bpf_prog *prog)
{
- struct bpf_prog *prev;
struct bpf_async_cb *cb;
int ret = 0;
ret = -EPERM;
goto out;
}
- prev = cb->prog;
- if (prev != prog) {
- /* Bump prog refcnt once. Every bpf_timer_set_callback()
- * can pick different callback_fn-s within the same prog.
- */
- prog = bpf_prog_inc_not_zero(prog);
- if (IS_ERR(prog)) {
- ret = PTR_ERR(prog);
- goto out;
- }
- if (prev)
- /* Drop prev prog refcnt when swapping with new prog */
- bpf_prog_put(prev);
- cb->prog = prog;
- }
- rcu_assign_pointer(cb->callback_fn, callback_fn);
+ ret = bpf_async_update_prog_callback(cb, callback_fn, prog);
out:
__bpf_spin_unlock_irqrestore(&async->lock);
return ret;
.arg3_type = ARG_ANYTHING,
};
-static void drop_prog_refcnt(struct bpf_async_cb *async)
-{
- struct bpf_prog *prog = async->prog;
-
- if (prog) {
- bpf_prog_put(prog);
- async->prog = NULL;
- rcu_assign_pointer(async->callback_fn, NULL);
- }
-}
-
BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
{
struct bpf_hrtimer *t, *cur_t;
goto out;
}
drop:
- drop_prog_refcnt(&t->cb);
+ bpf_async_update_prog_callback(&t->cb, NULL, NULL);
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
/* Cancel the timer and wait for associated callback to finish
cb = async->cb;
if (!cb)
goto out;
- drop_prog_refcnt(cb);
+ bpf_async_update_prog_callback(cb, NULL, NULL);
/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
* this timer, since it won't be initialized.
*/