]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/core: Simplify the perf_pmu_register() error path
authorPeter Zijlstra <peterz@infradead.org>
Mon, 4 Nov 2024 13:39:14 +0000 (14:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 08:42:26 +0000 (09:42 +0100)
The error path of perf_pmu_register() is of course very similar to a
subset of perf_pmu_unregister(). Extract this common part in
perf_pmu_free() and simplify things.

[ mingo: Forward ported it ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20241104135518.090915501@infradead.org
kernel/events/core.c

index 1b8b1c8f41ff2fb572af7b0cd680c84ab623d6a6..ee5cdd692383a4080a2ad0069a421a0a5d7ff7f3 100644 (file)
@@ -11675,11 +11675,6 @@ static int perf_event_idx_default(struct perf_event *event)
        return 0;
 }
 
-static void free_pmu_context(struct pmu *pmu)
-{
-       free_percpu(pmu->cpu_pmu_context);
-}
-
 /*
  * Let userspace know that this PMU supports address range filtering:
  */
@@ -11885,6 +11880,7 @@ del_dev:
 
 free_dev:
        put_device(pmu->dev);
+       pmu->dev = NULL;
        goto out;
 }
 
@@ -11906,25 +11902,38 @@ static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new)
        return true;
 }
 
+static void perf_pmu_free(struct pmu *pmu)
+{
+       free_percpu(pmu->pmu_disable_count);
+       if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
+               if (pmu->nr_addr_filters)
+                       device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
+               device_del(pmu->dev);
+               put_device(pmu->dev);
+       }
+       free_percpu(pmu->cpu_pmu_context);
+}
+
 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
 {
        int cpu, ret, max = PERF_TYPE_MAX;
 
+       pmu->type = -1;
+
        mutex_lock(&pmus_lock);
        ret = -ENOMEM;
        pmu->pmu_disable_count = alloc_percpu(int);
        if (!pmu->pmu_disable_count)
                goto unlock;
 
-       pmu->type = -1;
        if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) {
                ret = -EINVAL;
-               goto free_pdc;
+               goto free;
        }
 
        if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) {
                ret = -EINVAL;
-               goto free_pdc;
+               goto free;
        }
 
        pmu->name = name;
@@ -11934,24 +11943,23 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
 
        ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL);
        if (ret < 0)
-               goto free_pdc;
+               goto free;
 
        WARN_ON(type >= 0 && ret != type);
 
-       type = ret;
-       pmu->type = type;
+       pmu->type = ret;
        atomic_set(&pmu->exclusive_cnt, 0);
 
        if (pmu_bus_running && !pmu->dev) {
                ret = pmu_dev_alloc(pmu);
                if (ret)
-                       goto free_idr;
+                       goto free;
        }
 
        ret = -ENOMEM;
        pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
        if (!pmu->cpu_pmu_context)
-               goto free_dev;
+               goto free;
 
        for_each_possible_cpu(cpu) {
                struct perf_cpu_pmu_context *cpc;
@@ -11992,8 +12000,10 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
        /*
         * Now that the PMU is complete, make it visible to perf_try_init_event().
         */
-       if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
-               goto free_context;
+       if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) {
+               ret = -EINVAL;
+               goto free;
+       }
        list_add_rcu(&pmu->entry, &pmus);
 
        ret = 0;
@@ -12002,20 +12012,10 @@ unlock:
 
        return ret;
 
-free_context:
-       free_percpu(pmu->cpu_pmu_context);
-
-free_dev:
-       if (pmu->dev && pmu->dev != PMU_NULL_DEV) {
-               device_del(pmu->dev);
-               put_device(pmu->dev);
-       }
-
-free_idr:
-       idr_remove(&pmu_idr, pmu->type);
-
-free_pdc:
-       free_percpu(pmu->pmu_disable_count);
+free:
+       if (pmu->type >= 0)
+               idr_remove(&pmu_idr, pmu->type);
+       perf_pmu_free(pmu);
        goto unlock;
 }
 EXPORT_SYMBOL_GPL(perf_pmu_register);
@@ -12034,14 +12034,7 @@ void perf_pmu_unregister(struct pmu *pmu)
        synchronize_srcu(&pmus_srcu);
        synchronize_rcu();
 
-       free_percpu(pmu->pmu_disable_count);
-       if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
-               if (pmu->nr_addr_filters)
-                       device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
-               device_del(pmu->dev);
-               put_device(pmu->dev);
-       }
-       free_pmu_context(pmu);
+       perf_pmu_free(pmu);
 }
 EXPORT_SYMBOL_GPL(perf_pmu_unregister);