]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf: arm_pmu: Request specific affinities for per CPU NMIs/interrupts
authorWill Deacon <will@kernel.org>
Mon, 20 Oct 2025 12:29:35 +0000 (13:29 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 27 Oct 2025 16:16:35 +0000 (17:16 +0100)
Let the PMU driver request both NMIs and normal interrupts with an affinity mask
matching the PMU affinity.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20251020122944.3074811-19-maz@kernel.org
drivers/perf/arm_pmu.c
drivers/perf/arm_pmu_acpi.c
drivers/perf/arm_pmu_platform.c
include/linux/perf/arm_pmu.h

index 22c601b46c858af59eca5883718420df99d79e10..959ceb3d1f556849e8100725a815ac3a0a7a9763 100644 (file)
@@ -26,7 +26,8 @@
 
 #include <asm/irq_regs.h>
 
-static int armpmu_count_irq_users(const int irq);
+static int armpmu_count_irq_users(const struct cpumask *affinity,
+                                 const int irq);
 
 struct pmu_irq_ops {
        void (*enable_pmuirq)(unsigned int irq);
@@ -64,7 +65,9 @@ static void armpmu_enable_percpu_pmuirq(unsigned int irq)
 static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
                                   void __percpu *devid)
 {
-       if (armpmu_count_irq_users(irq) == 1)
+       struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
+
+       if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
                free_percpu_irq(irq, devid);
 }
 
@@ -89,7 +92,9 @@ static void armpmu_disable_percpu_pmunmi(unsigned int irq)
 static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
                                      void __percpu *devid)
 {
-       if (armpmu_count_irq_users(irq) == 1)
+       struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
+
+       if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
                free_percpu_nmi(irq, devid);
 }
 
@@ -580,11 +585,11 @@ static const struct attribute_group armpmu_common_attr_group = {
        .attrs = armpmu_common_attrs,
 };
 
-static int armpmu_count_irq_users(const int irq)
+static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq)
 {
        int cpu, count = 0;
 
-       for_each_possible_cpu(cpu) {
+       for_each_cpu(cpu, affinity) {
                if (per_cpu(cpu_irq, cpu) == irq)
                        count++;
        }
@@ -592,12 +597,13 @@ static int armpmu_count_irq_users(const int irq)
        return count;
 }
 
-static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
+static const struct pmu_irq_ops *
+armpmu_find_irq_ops(const struct cpumask *affinity, int irq)
 {
        const struct pmu_irq_ops *ops = NULL;
        int cpu;
 
-       for_each_possible_cpu(cpu) {
+       for_each_cpu(cpu, affinity) {
                if (per_cpu(cpu_irq, cpu) != irq)
                        continue;
 
@@ -609,22 +615,25 @@ static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
        return ops;
 }
 
-void armpmu_free_irq(int irq, int cpu)
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu)
 {
        if (per_cpu(cpu_irq, cpu) == 0)
                return;
        if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
                return;
 
-       per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
+       per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
 
        per_cpu(cpu_irq, cpu) = 0;
        per_cpu(cpu_irq_ops, cpu) = NULL;
 }
 
-int armpmu_request_irq(int irq, int cpu)
+int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu)
 {
        int err = 0;
+       struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu);
+       const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus :
+                                                  cpu_possible_mask; /* ACPI */
        const irq_handler_t handler = armpmu_dispatch_irq;
        const struct pmu_irq_ops *irq_ops;
 
@@ -646,25 +655,24 @@ int armpmu_request_irq(int irq, int cpu)
                            IRQF_NOBALANCING | IRQF_NO_AUTOEN |
                            IRQF_NO_THREAD;
 
-               err = request_nmi(irq, handler, irq_flags, "arm-pmu",
-                                 per_cpu_ptr(&cpu_armpmu, cpu));
+               err = request_nmi(irq, handler, irq_flags, "arm-pmu", armpmu);
 
                /* If cannot get an NMI, get a normal interrupt */
                if (err) {
                        err = request_irq(irq, handler, irq_flags, "arm-pmu",
-                                         per_cpu_ptr(&cpu_armpmu, cpu));
+                                         armpmu);
                        irq_ops = &pmuirq_ops;
                } else {
                        has_nmi = true;
                        irq_ops = &pmunmi_ops;
                }
-       } else if (armpmu_count_irq_users(irq) == 0) {
-               err = request_percpu_nmi(irq, handler, "arm-pmu", NULL, &cpu_armpmu);
+       } else if (armpmu_count_irq_users(affinity, irq) == 0) {
+               err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu);
 
                /* If cannot get an NMI, get a normal interrupt */
                if (err) {
-                       err = request_percpu_irq(irq, handler, "arm-pmu",
-                                                &cpu_armpmu);
+                       err = request_percpu_irq_affinity(irq, handler, "arm-pmu",
+                                                         affinity, pcpu_armpmu);
                        irq_ops = &percpu_pmuirq_ops;
                } else {
                        has_nmi = true;
@@ -672,7 +680,7 @@ int armpmu_request_irq(int irq, int cpu)
                }
        } else {
                /* Per cpudevid irq was already requested by another CPU */
-               irq_ops = armpmu_find_irq_ops(irq);
+               irq_ops = armpmu_find_irq_ops(affinity, irq);
 
                if (WARN_ON(!irq_ops))
                        err = -EINVAL;
index 05dda19c5359a39849ac9a3c6b1a5cf14eb0614a..e80f76d95e68b20b26c6ab2c93d513f503b6f289 100644 (file)
@@ -218,7 +218,7 @@ static int arm_pmu_acpi_parse_irqs(void)
                 * them with their PMUs.
                 */
                per_cpu(pmu_irqs, cpu) = irq;
-               err = armpmu_request_irq(irq, cpu);
+               err = armpmu_request_irq(&probed_pmus, irq, cpu);
                if (err)
                        goto out_err;
        }
index 9c0494d8a867aa06ada1f3f04a2a5de6dbc259a3..1c9e50a13201528edf4cfbdc2504d305b49d4913 100644 (file)
@@ -165,7 +165,7 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu)
                if (!irq)
                        continue;
 
-               err = armpmu_request_irq(irq, cpu);
+               err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu);
                if (err)
                        break;
        }
@@ -181,7 +181,7 @@ static void armpmu_free_irqs(struct arm_pmu *armpmu)
        for_each_cpu(cpu, &armpmu->supported_cpus) {
                int irq = per_cpu(hw_events->irq, cpu);
 
-               armpmu_free_irq(irq, cpu);
+               armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu);
        }
 }
 
index 93c9a26492fcfd287797d535803c48f2711b8e7f..6690bd77aa4ee0173af928eb4014f0f0f1699227 100644 (file)
@@ -190,8 +190,8 @@ bool arm_pmu_irq_is_nmi(void);
 struct arm_pmu *armpmu_alloc(void);
 void armpmu_free(struct arm_pmu *pmu);
 int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irq(int irq, int cpu);
-void armpmu_free_irq(int irq, int cpu);
+int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
 
 #define ARMV8_PMU_PDEV_NAME "armv8-pmu"