]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/core: Simplify the perf_event_alloc() error path
authorPeter Zijlstra <peterz@infradead.org>
Mon, 4 Nov 2024 13:39:13 +0000 (14:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 08:42:14 +0000 (09:42 +0100)
The error cleanup sequence in perf_event_alloc() is a subset of the
existing _free_event() function (it must of course be).

Split this out into __free_event() and simplify the error path.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20241104135517.967889521@infradead.org
include/linux/perf_event.h
kernel/events/core.c

index c4525bae2fe9f98f731234d560e74f6dd499df46..8c0117bcbdb9f00477fd5cef5d992e351dd209e1 100644 (file)
@@ -673,13 +673,15 @@ struct swevent_hlist {
        struct rcu_head                 rcu_head;
 };
 
-#define PERF_ATTACH_CONTEXT    0x01
-#define PERF_ATTACH_GROUP      0x02
-#define PERF_ATTACH_TASK       0x04
-#define PERF_ATTACH_TASK_DATA  0x08
-#define PERF_ATTACH_ITRACE     0x10
-#define PERF_ATTACH_SCHED_CB   0x20
-#define PERF_ATTACH_CHILD      0x40
+#define PERF_ATTACH_CONTEXT    0x0001
+#define PERF_ATTACH_GROUP      0x0002
+#define PERF_ATTACH_TASK       0x0004
+#define PERF_ATTACH_TASK_DATA  0x0008
+#define PERF_ATTACH_ITRACE     0x0010
+#define PERF_ATTACH_SCHED_CB   0x0020
+#define PERF_ATTACH_CHILD      0x0040
+#define PERF_ATTACH_EXCLUSIVE  0x0080
+#define PERF_ATTACH_CALLCHAIN  0x0100
 
 struct bpf_prog;
 struct perf_cgroup;
index 6ccf363d73cc65344491926421008939797904d0..1b8b1c8f41ff2fb572af7b0cd680c84ab623d6a6 100644 (file)
@@ -5289,6 +5289,8 @@ static int exclusive_event_init(struct perf_event *event)
                        return -EBUSY;
        }
 
+       event->attach_state |= PERF_ATTACH_EXCLUSIVE;
+
        return 0;
 }
 
@@ -5296,14 +5298,13 @@ static void exclusive_event_destroy(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
 
-       if (!is_exclusive_pmu(pmu))
-               return;
-
        /* see comment in exclusive_event_init() */
        if (event->attach_state & PERF_ATTACH_TASK)
                atomic_dec(&pmu->exclusive_cnt);
        else
                atomic_inc(&pmu->exclusive_cnt);
+
+       event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
 }
 
 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
@@ -5362,40 +5363,20 @@ static void perf_pending_task_sync(struct perf_event *event)
        rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
 }
 
-static void _free_event(struct perf_event *event)
+/* vs perf_event_alloc() error */
+static void __free_event(struct perf_event *event)
 {
-       irq_work_sync(&event->pending_irq);
-       irq_work_sync(&event->pending_disable_irq);
-       perf_pending_task_sync(event);
+       if (event->attach_state & PERF_ATTACH_CALLCHAIN)
+               put_callchain_buffers();
 
-       unaccount_event(event);
+       kfree(event->addr_filter_ranges);
 
-       security_perf_event_free(event);
-
-       if (event->rb) {
-               /*
-                * Can happen when we close an event with re-directed output.
-                *
-                * Since we have a 0 refcount, perf_mmap_close() will skip
-                * over us; possibly making our ring_buffer_put() the last.
-                */
-               mutex_lock(&event->mmap_mutex);
-               ring_buffer_attach(event, NULL);
-               mutex_unlock(&event->mmap_mutex);
-       }
+       if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
+               exclusive_event_destroy(event);
 
        if (is_cgroup_event(event))
                perf_detach_cgroup(event);
 
-       if (!event->parent) {
-               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
-                       put_callchain_buffers();
-       }
-
-       perf_event_free_bpf_prog(event);
-       perf_addr_filters_splice(event, NULL);
-       kfree(event->addr_filter_ranges);
-
        if (event->destroy)
                event->destroy(event);
 
@@ -5406,22 +5387,58 @@ static void _free_event(struct perf_event *event)
        if (event->hw.target)
                put_task_struct(event->hw.target);
 
-       if (event->pmu_ctx)
+       if (event->pmu_ctx) {
+               /*
+                * put_pmu_ctx() needs an event->ctx reference, because of
+                * epc->ctx.
+                */
+               WARN_ON_ONCE(!event->ctx);
+               WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
                put_pmu_ctx(event->pmu_ctx);
+       }
 
        /*
-        * perf_event_free_task() relies on put_ctx() being 'last', in particular
-        * all task references must be cleaned up.
+        * perf_event_free_task() relies on put_ctx() being 'last', in
+        * particular all task references must be cleaned up.
         */
        if (event->ctx)
                put_ctx(event->ctx);
 
-       exclusive_event_destroy(event);
-       module_put(event->pmu->module);
+       if (event->pmu)
+               module_put(event->pmu->module);
 
        call_rcu(&event->rcu_head, free_event_rcu);
 }
 
+/* vs perf_event_alloc() success */
+static void _free_event(struct perf_event *event)
+{
+       irq_work_sync(&event->pending_irq);
+       irq_work_sync(&event->pending_disable_irq);
+       perf_pending_task_sync(event);
+
+       unaccount_event(event);
+
+       security_perf_event_free(event);
+
+       if (event->rb) {
+               /*
+                * Can happen when we close an event with re-directed output.
+                *
+                * Since we have a 0 refcount, perf_mmap_close() will skip
+                * over us; possibly making our ring_buffer_put() the last.
+                */
+               mutex_lock(&event->mmap_mutex);
+               ring_buffer_attach(event, NULL);
+               mutex_unlock(&event->mmap_mutex);
+       }
+
+       perf_event_free_bpf_prog(event);
+       perf_addr_filters_splice(event, NULL);
+
+       __free_event(event);
+}
+
 /*
  * Used to free events which have a known refcount of 1, such as in error paths
  * where the event isn't exposed yet and inherited events.
@@ -12093,8 +12110,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
                        event->destroy(event);
        }
 
-       if (ret)
+       if (ret) {
+               event->pmu = NULL;
                module_put(pmu->module);
+       }
 
        return ret;
 }
@@ -12422,7 +12441,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
         * See perf_output_read().
         */
        if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
-               goto err_ns;
+               goto err;
 
        if (!has_branch_stack(event))
                event->attr.branch_sample_type = 0;
@@ -12430,7 +12449,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        pmu = perf_init_event(event);
        if (IS_ERR(pmu)) {
                err = PTR_ERR(pmu);
-               goto err_ns;
+               goto err;
        }
 
        /*
@@ -12440,25 +12459,25 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
         */
        if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
                err = -EINVAL;
-               goto err_pmu;
+               goto err;
        }
 
        if (event->attr.aux_output &&
            (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
             event->attr.aux_pause || event->attr.aux_resume)) {
                err = -EOPNOTSUPP;
-               goto err_pmu;
+               goto err;
        }
 
        if (event->attr.aux_pause && event->attr.aux_resume) {
                err = -EINVAL;
-               goto err_pmu;
+               goto err;
        }
 
        if (event->attr.aux_start_paused) {
                if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
                        err = -EOPNOTSUPP;
-                       goto err_pmu;
+                       goto err;
                }
                event->hw.aux_paused = 1;
        }
@@ -12466,12 +12485,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        if (cgroup_fd != -1) {
                err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
                if (err)
-                       goto err_pmu;
+                       goto err;
        }
 
        err = exclusive_event_init(event);
        if (err)
-               goto err_pmu;
+               goto err;
 
        if (has_addr_filter(event)) {
                event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
@@ -12479,7 +12498,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                                                    GFP_KERNEL);
                if (!event->addr_filter_ranges) {
                        err = -ENOMEM;
-                       goto err_per_task;
+                       goto err;
                }
 
                /*
@@ -12504,41 +12523,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
                        err = get_callchain_buffers(attr->sample_max_stack);
                        if (err)
-                               goto err_addr_filters;
+                               goto err;
+                       event->attach_state |= PERF_ATTACH_CALLCHAIN;
                }
        }
 
        err = security_perf_event_alloc(event);
        if (err)
-               goto err_callchain_buffer;
+               goto err;
 
        /* symmetric to unaccount_event() in _free_event() */
        account_event(event);
 
        return event;
 
-err_callchain_buffer:
-       if (!event->parent) {
-               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
-                       put_callchain_buffers();
-       }
-err_addr_filters:
-       kfree(event->addr_filter_ranges);
-
-err_per_task:
-       exclusive_event_destroy(event);
-
-err_pmu:
-       if (is_cgroup_event(event))
-               perf_detach_cgroup(event);
-       if (event->destroy)
-               event->destroy(event);
-       module_put(pmu->module);
-err_ns:
-       if (event->hw.target)
-               put_task_struct(event->hw.target);
-       call_rcu(&event->rcu_head, free_event_rcu);
-
+err:
+       __free_event(event);
        return ERR_PTR(err);
 }