]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/core: Simplify perf_pmu_register()
authorPeter Zijlstra <peterz@infradead.org>
Mon, 4 Nov 2024 13:39:15 +0000 (14:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 08:42:29 +0000 (09:42 +0100)
Using the previously introduced perf_pmu_free() and a new IDR helper,
simplify the perf_pmu_register error paths.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20241104135518.198937277@infradead.org
include/linux/idr.h
kernel/events/core.c

index da5f5fa4a3a6ae2be38dec9bb7c5ac73ff8df6e2..cd729be369b36dd6e3aff2ec94ca2a80f5d95d31 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/radix-tree.h>
 #include <linux/gfp.h>
 #include <linux/percpu.h>
+#include <linux/cleanup.h>
 
 struct idr {
        struct radix_tree_root  idr_rt;
@@ -124,6 +125,22 @@ void *idr_get_next_ul(struct idr *, unsigned long *nextid);
 void *idr_replace(struct idr *, void *, unsigned long id);
 void idr_destroy(struct idr *);
 
+struct __class_idr {
+       struct idr *idr;
+       int id;
+};
+
+#define idr_null ((struct __class_idr){ NULL, -1 })
+#define take_idr_id(id) __get_and_null(id, idr_null)
+
+DEFINE_CLASS(idr_alloc, struct __class_idr,
+            if (_T.id >= 0) idr_remove(_T.idr, _T.id),
+            ((struct __class_idr){
+               .idr = idr,
+               .id = idr_alloc(idr, ptr, start, end, gfp),
+            }),
+            struct idr *idr, void *ptr, int start, int end, gfp_t gfp);
+
 /**
  * idr_init_base() - Initialise an IDR.
  * @idr: IDR handle.
index ee5cdd692383a4080a2ad0069a421a0a5d7ff7f3..215dad53aa1b50e951590cfb9ab75c4085e019b4 100644 (file)
@@ -11914,52 +11914,49 @@ static void perf_pmu_free(struct pmu *pmu)
        free_percpu(pmu->cpu_pmu_context);
 }
 
-int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T))
+
+int perf_pmu_register(struct pmu *_pmu, const char *name, int type)
 {
-       int cpu, ret, max = PERF_TYPE_MAX;
+       int cpu, max = PERF_TYPE_MAX;
 
-       pmu->type = -1;
+       struct pmu *pmu __free(pmu_unregister) = _pmu;
+       guard(mutex)(&pmus_lock);
 
-       mutex_lock(&pmus_lock);
-       ret = -ENOMEM;
        pmu->pmu_disable_count = alloc_percpu(int);
        if (!pmu->pmu_disable_count)
-               goto unlock;
+               return -ENOMEM;
 
-       if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) {
-               ret = -EINVAL;
-               goto free;
-       }
+       if (WARN_ONCE(!name, "Can not register anonymous pmu.\n"))
+               return -EINVAL;
 
-       if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) {
-               ret = -EINVAL;
-               goto free;
-       }
+       if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE,
+                     "Can not register a pmu with an invalid scope.\n"))
+               return -EINVAL;
 
        pmu->name = name;
 
        if (type >= 0)
                max = type;
 
-       ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL);
-       if (ret < 0)
-               goto free;
+       CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL);
+       if (pmu_type.id < 0)
+               return pmu_type.id;
 
-       WARN_ON(type >= 0 && ret != type);
+       WARN_ON(type >= 0 && pmu_type.id != type);
 
-       pmu->type = ret;
+       pmu->type = pmu_type.id;
        atomic_set(&pmu->exclusive_cnt, 0);
 
        if (pmu_bus_running && !pmu->dev) {
-               ret = pmu_dev_alloc(pmu);
+               int ret = pmu_dev_alloc(pmu);
                if (ret)
-                       goto free;
+                       return ret;
        }
 
-       ret = -ENOMEM;
        pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
        if (!pmu->cpu_pmu_context)
-               goto free;
+               return -ENOMEM;
 
        for_each_possible_cpu(cpu) {
                struct perf_cpu_pmu_context *cpc;
@@ -12000,32 +11997,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
        /*
         * Now that the PMU is complete, make it visible to perf_try_init_event().
         */
-       if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) {
-               ret = -EINVAL;
-               goto free;
-       }
+       if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
+               return -EINVAL;
        list_add_rcu(&pmu->entry, &pmus);
 
-       ret = 0;
-unlock:
-       mutex_unlock(&pmus_lock);
-
-       return ret;
-
-free:
-       if (pmu->type >= 0)
-               idr_remove(&pmu_idr, pmu->type);
-       perf_pmu_free(pmu);
-       goto unlock;
+       take_idr_id(pmu_type);
+       _pmu = no_free_ptr(pmu); // let it rip
+       return 0;
 }
 EXPORT_SYMBOL_GPL(perf_pmu_register);
 
 void perf_pmu_unregister(struct pmu *pmu)
 {
-       mutex_lock(&pmus_lock);
-       list_del_rcu(&pmu->entry);
-       idr_remove(&pmu_idr, pmu->type);
-       mutex_unlock(&pmus_lock);
+       scoped_guard (mutex, &pmus_lock) {
+               list_del_rcu(&pmu->entry);
+               idr_remove(&pmu_idr, pmu->type);
+       }
 
        /*
         * We dereference the pmu list under both SRCU and regular RCU, so