]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf/x86/amd/uncore: Avoid PMU registration if counters are unavailable
authorSandipan Das <sandipan.das@amd.com>
Wed, 26 Jun 2024 07:44:04 +0000 (13:14 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 3 Aug 2024 06:59:43 +0000 (08:59 +0200)
[ Upstream commit f997e208b6c96858a2f6c0855debfbdb9b52f131 ]

X86_FEATURE_PERFCTR_NB and X86_FEATURE_PERFCTR_LLC are derived from
CPUID leaf 0x80000001 ECX bits 24 and 28 respectively and denote the
availability of DF and L3 counters. When these bits are not set, the
corresponding PMUs have no counters and hence, should not be registered.

Fixes: 07888daa056e ("perf/x86/amd/uncore: Move discovery and registration")
Signed-off-by: Sandipan Das <sandipan.das@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20240626074404.1044230-1-sandipan.das@amd.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/events/amd/uncore.c

index 4ccb8fa483e613af8bade68e3718bda0e91acd38..b78e05ab4a73762ef0cb96c6aafd5db41fd906c4 100644 (file)
@@ -654,17 +654,20 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
 {
        struct attribute **df_attr = amd_uncore_df_format_attr;
        struct amd_uncore_pmu *pmu;
+       int num_counters;
 
        /* Run just once */
        if (uncore->init_done)
                return amd_uncore_ctx_init(uncore, cpu);
 
+       num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       if (!num_counters)
+               goto done;
+
        /* No grouping, single instance for a system */
        uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL);
-       if (!uncore->pmus) {
-               uncore->num_pmus = 0;
+       if (!uncore->pmus)
                goto done;
-       }
 
        /*
         * For Family 17h and above, the Northbridge counters are repurposed
@@ -674,7 +677,7 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
        pmu = &uncore->pmus[0];
        strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_df" : "amd_nb",
                sizeof(pmu->name));
-       pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       pmu->num_counters = num_counters;
        pmu->msr_base = MSR_F15H_NB_PERF_CTL;
        pmu->rdpmc_base = RDPMC_BASE_NB;
        pmu->group = amd_uncore_ctx_gid(uncore, cpu);
@@ -785,17 +788,20 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
 {
        struct attribute **l3_attr = amd_uncore_l3_format_attr;
        struct amd_uncore_pmu *pmu;
+       int num_counters;
 
        /* Run just once */
        if (uncore->init_done)
                return amd_uncore_ctx_init(uncore, cpu);
 
+       num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       if (!num_counters)
+               goto done;
+
        /* No grouping, single instance for a system */
        uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL);
-       if (!uncore->pmus) {
-               uncore->num_pmus = 0;
+       if (!uncore->pmus)
                goto done;
-       }
 
        /*
         * For Family 17h and above, L3 cache counters are available instead
@@ -805,7 +811,7 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
        pmu = &uncore->pmus[0];
        strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_l3" : "amd_l2",
                sizeof(pmu->name));
-       pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       pmu->num_counters = num_counters;
        pmu->msr_base = MSR_F16H_L2I_PERF_CTL;
        pmu->rdpmc_base = RDPMC_BASE_LLC;
        pmu->group = amd_uncore_ctx_gid(uncore, cpu);