]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf/core: Clean up perf_try_init_event()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 5 Feb 2025 10:21:28 +0000 (11:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 May 2025 09:12:57 +0000 (11:12 +0200)
[ Upstream commit da02f54e81db2f7bf6af9d1d0cfc5b41ec6d0dcb ]

Make sure that perf_try_init_event() doesn't leave event->pmu nor
event->destroy set on failure.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20250205102449.110145835@infradead.org
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/events/core.c

index 93ce810384c92ce6efc7249f4fa35cab52a64c26..de838d3819ca7f791772a23c18011e2f70589441 100644 (file)
@@ -12020,40 +12020,51 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
        if (ctx)
                perf_event_ctx_unlock(event->group_leader, ctx);
 
-       if (!ret) {
-               if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
-                   has_extended_regs(event))
-                       ret = -EOPNOTSUPP;
+       if (ret)
+               goto err_pmu;
 
-               if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
-                   event_has_any_exclude_flag(event))
-                       ret = -EINVAL;
+       if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
+           has_extended_regs(event)) {
+               ret = -EOPNOTSUPP;
+               goto err_destroy;
+       }
 
-               if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
-                       const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
-                       struct cpumask *pmu_cpumask = perf_scope_cpumask(pmu->scope);
-                       int cpu;
-
-                       if (pmu_cpumask && cpumask) {
-                               cpu = cpumask_any_and(pmu_cpumask, cpumask);
-                               if (cpu >= nr_cpu_ids)
-                                       ret = -ENODEV;
-                               else
-                                       event->event_caps |= PERF_EV_CAP_READ_SCOPE;
-                       } else {
-                               ret = -ENODEV;
-                       }
-               }
+       if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
+           event_has_any_exclude_flag(event)) {
+               ret = -EINVAL;
+               goto err_destroy;
+       }
 
-               if (ret && event->destroy)
-                       event->destroy(event);
+       if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
+               const struct cpumask *cpumask;
+               struct cpumask *pmu_cpumask;
+               int cpu;
+
+               cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
+               pmu_cpumask = perf_scope_cpumask(pmu->scope);
+
+               ret = -ENODEV;
+               if (!pmu_cpumask || !cpumask)
+                       goto err_destroy;
+
+               cpu = cpumask_any_and(pmu_cpumask, cpumask);
+               if (cpu >= nr_cpu_ids)
+                       goto err_destroy;
+
+               event->event_caps |= PERF_EV_CAP_READ_SCOPE;
        }
 
-       if (ret) {
-               event->pmu = NULL;
-               module_put(pmu->module);
+       return 0;
+
+err_destroy:
+       if (event->destroy) {
+               event->destroy(event);
+               event->destroy = NULL;
        }
 
+err_pmu:
+       event->pmu = NULL;
+       module_put(pmu->module);
        return ret;
 }