]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Jun 2021 10:24:12 +0000 (12:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Jun 2021 10:24:12 +0000 (12:24 +0200)
added patches:
arm64-perf-disable-pmu-while-processing-counter-overflows.patch

queue-4.9/arm64-perf-disable-pmu-while-processing-counter-overflows.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/arm64-perf-disable-pmu-while-processing-counter-overflows.patch b/queue-4.9/arm64-perf-disable-pmu-while-processing-counter-overflows.patch
new file mode 100644 (file)
index 0000000..cb04be7
--- /dev/null
@@ -0,0 +1,112 @@
+From 3cce50dfec4a5b0414c974190940f47dd32c6dee Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 10 Jul 2018 09:58:03 +0100
+Subject: arm64: perf: Disable PMU while processing counter overflows
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 3cce50dfec4a5b0414c974190940f47dd32c6dee upstream.
+
+The arm64 PMU updates the event counters and reprograms the
+counters in the overflow IRQ handler without disabling the
+PMU. This could potentially cause skews in for group counters,
+where the overflowed counters may potentially loose some event
+counts, while they are reprogrammed. To prevent this, disable
+the PMU while we process the counter overflows and enable it
+right back when we are done.
+
+This patch also moves the PMU stop/start routines to avoid a
+forward declaration.
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Aman Priyadarshi <apeureka@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/perf_event.c |   50 ++++++++++++++++++++++-------------------
+ 1 file changed, 28 insertions(+), 22 deletions(-)
+
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -748,6 +748,28 @@ static void armv8pmu_disable_event(struc
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ }
++static void armv8pmu_start(struct arm_pmu *cpu_pmu)
++{
++      unsigned long flags;
++      struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
++
++      raw_spin_lock_irqsave(&events->pmu_lock, flags);
++      /* Enable all counters */
++      armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
++      raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
++}
++
++static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
++{
++      unsigned long flags;
++      struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
++
++      raw_spin_lock_irqsave(&events->pmu_lock, flags);
++      /* Disable all counters */
++      armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
++      raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
++}
++
+ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
+ {
+       u32 pmovsr;
+@@ -773,6 +795,11 @@ static irqreturn_t armv8pmu_handle_irq(i
+        */
+       regs = get_irq_regs();
++      /*
++       * Stop the PMU while processing the counter overflows
++       * to prevent skews in group events.
++       */
++      armv8pmu_stop(cpu_pmu);
+       for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+               struct perf_event *event = cpuc->events[idx];
+               struct hw_perf_event *hwc;
+@@ -797,6 +824,7 @@ static irqreturn_t armv8pmu_handle_irq(i
+               if (perf_event_overflow(event, &data, regs))
+                       cpu_pmu->disable(event);
+       }
++      armv8pmu_start(cpu_pmu);
+       /*
+        * Handle the pending perf events.
+@@ -810,28 +838,6 @@ static irqreturn_t armv8pmu_handle_irq(i
+       return IRQ_HANDLED;
+ }
+-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+-{
+-      unsigned long flags;
+-      struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+-
+-      raw_spin_lock_irqsave(&events->pmu_lock, flags);
+-      /* Enable all counters */
+-      armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+-      raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+-}
+-
+-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+-{
+-      unsigned long flags;
+-      struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+-
+-      raw_spin_lock_irqsave(&events->pmu_lock, flags);
+-      /* Disable all counters */
+-      armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
+-      raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+-}
+-
+ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
+                                 struct perf_event *event)
+ {
index cb13f8fe63ed1240a9e4a23cfb7ae75fc2f71191..b5dd84f305c995bd48bb21fed93545c37e49a239 100644 (file)
@@ -51,3 +51,4 @@ inet-use-bigger-hash-table-for-ip-id-generation.patch
 i40e-be-much-more-verbose-about-what-we-can-and-cannot-offload.patch
 arm-9081-1-fix-gcc-10-thumb2-kernel-regression.patch
 makefile-move-wno-unused-but-set-variable-out-of-gcc-only-block.patch
+arm64-perf-disable-pmu-while-processing-counter-overflows.patch