]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf: Ensure bpf_perf_link path is properly serialized
authorPeter Zijlstra <peterz@infradead.org>
Fri, 17 Jan 2025 09:54:50 +0000 (10:54 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Jun 2025 13:32:32 +0000 (15:32 +0200)
[ Upstream commit 7ed9138a72829d2035ecbd8dbd35b1bc3c137c40 ]

Ravi reported that the bpf_perf_link_attach() usage of
perf_event_set_bpf_prog() is not serialized by ctx->mutex, unlike the
PERF_EVENT_IOC_SET_BPF case.

Reported-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lkml.kernel.org/r/20250307193305.486326750@infradead.org
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/events/core.c

index 8352376d8215447a4d975141d1f63be557c4c5e3..9ce82904f761d2380f2f6b56071ab2291d304a3d 100644 (file)
@@ -6031,6 +6031,9 @@ static int perf_event_set_output(struct perf_event *event,
 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
 static int perf_copy_attr(struct perf_event_attr __user *uattr,
                          struct perf_event_attr *attr);
+static int __perf_event_set_bpf_prog(struct perf_event *event,
+                                    struct bpf_prog *prog,
+                                    u64 bpf_cookie);
 
 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
 {
@@ -6099,7 +6102,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
                if (IS_ERR(prog))
                        return PTR_ERR(prog);
 
-               err = perf_event_set_bpf_prog(event, prog, 0);
+               err = __perf_event_set_bpf_prog(event, prog, 0);
                if (err) {
                        bpf_prog_put(prog);
                        return err;
@@ -10756,8 +10759,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
        return false;
 }
 
-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
-                           u64 bpf_cookie)
+static int __perf_event_set_bpf_prog(struct perf_event *event,
+                                    struct bpf_prog *prog,
+                                    u64 bpf_cookie)
 {
        bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
 
@@ -10795,6 +10799,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
        return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
 }
 
+int perf_event_set_bpf_prog(struct perf_event *event,
+                           struct bpf_prog *prog,
+                           u64 bpf_cookie)
+{
+       struct perf_event_context *ctx;
+       int ret;
+
+       ctx = perf_event_ctx_lock(event);
+       ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
+       perf_event_ctx_unlock(event, ctx);
+
+       return ret;
+}
+
 void perf_event_free_bpf_prog(struct perf_event *event)
 {
        if (!perf_event_is_tracing(event)) {
@@ -10814,7 +10832,15 @@ static void perf_event_free_filter(struct perf_event *event)
 {
 }
 
-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+static int __perf_event_set_bpf_prog(struct perf_event *event,
+                                    struct bpf_prog *prog,
+                                    u64 bpf_cookie)
+{
+       return -ENOENT;
+}
+
+int perf_event_set_bpf_prog(struct perf_event *event,
+                           struct bpf_prog *prog,
                            u64 bpf_cookie)
 {
        return -ENOENT;