]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/core: Simplify perf_event_alloc()
authorPeter Zijlstra <peterz@infradead.org>
Mon, 4 Nov 2024 13:39:17 +0000 (14:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 08:42:40 +0000 (09:42 +0100)
Using the previous simplifications, transition perf_event_alloc() to
the cleanup way of things -- reducing error path magic.

[ mingo: Ported it to recent kernels. ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20241104135518.410755241@infradead.org
kernel/events/core.c

index fd352361259ca189eeafa7adb345167493d731a5..348a379d4f05102fddf1153286dc5aa7e4151a42 100644 (file)
@@ -5410,6 +5410,8 @@ static void __free_event(struct perf_event *event)
        call_rcu(&event->rcu_head, free_event_rcu);
 }
 
+DEFINE_FREE(__free_event, struct perf_event *, if (_T) __free_event(_T))
+
 /* vs perf_event_alloc() success */
 static void _free_event(struct perf_event *event)
 {
@@ -12291,7 +12293,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                 void *context, int cgroup_fd)
 {
        struct pmu *pmu;
-       struct perf_event *event;
        struct hw_perf_event *hwc;
        long err = -EINVAL;
        int node;
@@ -12306,8 +12307,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        }
 
        node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
-       event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
-                                     node);
+       struct perf_event *event __free(__free_event) =
+               kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, node);
        if (!event)
                return ERR_PTR(-ENOMEM);
 
@@ -12414,65 +12415,53 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
         * See perf_output_read().
         */
        if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
-               goto err;
+               return ERR_PTR(-EINVAL);
 
        if (!has_branch_stack(event))
                event->attr.branch_sample_type = 0;
 
        pmu = perf_init_event(event);
-       if (IS_ERR(pmu)) {
-               err = PTR_ERR(pmu);
-               goto err;
-       }
+       if (IS_ERR(pmu))
+               return (void*)pmu;
 
        /*
         * Disallow uncore-task events. Similarly, disallow uncore-cgroup
         * events (they don't make sense as the cgroup will be different
         * on other CPUs in the uncore mask).
         */
-       if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
-               err = -EINVAL;
-               goto err;
-       }
+       if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1))
+               return ERR_PTR(-EINVAL);
 
        if (event->attr.aux_output &&
            (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
-            event->attr.aux_pause || event->attr.aux_resume)) {
-               err = -EOPNOTSUPP;
-               goto err;
-       }
+            event->attr.aux_pause || event->attr.aux_resume))
+               return ERR_PTR(-EOPNOTSUPP);
 
-       if (event->attr.aux_pause && event->attr.aux_resume) {
-               err = -EINVAL;
-               goto err;
-       }
+       if (event->attr.aux_pause && event->attr.aux_resume)
+               return ERR_PTR(-EINVAL);
 
        if (event->attr.aux_start_paused) {
-               if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
-                       err = -EOPNOTSUPP;
-                       goto err;
-               }
+               if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
+                       return ERR_PTR(-EOPNOTSUPP);
                event->hw.aux_paused = 1;
        }
 
        if (cgroup_fd != -1) {
                err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
                if (err)
-                       goto err;
+                       return ERR_PTR(err);
        }
 
        err = exclusive_event_init(event);
        if (err)
-               goto err;
+               return ERR_PTR(err);
 
        if (has_addr_filter(event)) {
                event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
                                                    sizeof(struct perf_addr_filter_range),
                                                    GFP_KERNEL);
-               if (!event->addr_filter_ranges) {
-                       err = -ENOMEM;
-                       goto err;
-               }
+               if (!event->addr_filter_ranges)
+                       return ERR_PTR(-ENOMEM);
 
                /*
                 * Clone the parent's vma offsets: they are valid until exec()
@@ -12496,23 +12485,19 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
                        err = get_callchain_buffers(attr->sample_max_stack);
                        if (err)
-                               goto err;
+                               return ERR_PTR(err);
                        event->attach_state |= PERF_ATTACH_CALLCHAIN;
                }
        }
 
        err = security_perf_event_alloc(event);
        if (err)
-               goto err;
+               return ERR_PTR(err);
 
        /* symmetric to unaccount_event() in _free_event() */
        account_event(event);
 
-       return event;
-
-err:
-       __free_event(event);
-       return ERR_PTR(err);
+       return_ptr(event);
 }
 
 static int perf_copy_attr(struct perf_event_attr __user *uattr,