--- /dev/null
+From 3cce50dfec4a5b0414c974190940f47dd32c6dee Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 10 Jul 2018 09:58:03 +0100
+Subject: arm64: perf: Disable PMU while processing counter overflows
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 3cce50dfec4a5b0414c974190940f47dd32c6dee upstream.
+
+The arm64 PMU updates the event counters and reprograms the
+counters in the overflow IRQ handler without disabling the
+PMU. This could potentially cause skews in for group counters,
+where the overflowed counters may potentially loose some event
+counts, while they are reprogrammed. To prevent this, disable
+the PMU while we process the counter overflows and enable it
+right back when we are done.
+
+This patch also moves the PMU stop/start routines to avoid a
+forward declaration.
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Aman Priyadarshi <apeureka@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/perf_event.c | 50 ++++++++++++++++++++++-------------------
+ 1 file changed, 28 insertions(+), 22 deletions(-)
+
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -748,6 +748,28 @@ static void armv8pmu_disable_event(struc
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ }
+
++static void armv8pmu_start(struct arm_pmu *cpu_pmu)
++{
++ unsigned long flags;
++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
++
++ raw_spin_lock_irqsave(&events->pmu_lock, flags);
++ /* Enable all counters */
++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
++}
++
++static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
++{
++ unsigned long flags;
++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
++
++ raw_spin_lock_irqsave(&events->pmu_lock, flags);
++ /* Disable all counters */
++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
++}
++
+ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
+ {
+ u32 pmovsr;
+@@ -773,6 +795,11 @@ static irqreturn_t armv8pmu_handle_irq(i
+ */
+ regs = get_irq_regs();
+
++ /*
++ * Stop the PMU while processing the counter overflows
++ * to prevent skews in group events.
++ */
++ armv8pmu_stop(cpu_pmu);
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+@@ -797,6 +824,7 @@ static irqreturn_t armv8pmu_handle_irq(i
+ if (perf_event_overflow(event, &data, regs))
+ cpu_pmu->disable(event);
+ }
++ armv8pmu_start(cpu_pmu);
+
+ /*
+ * Handle the pending perf events.
+@@ -810,28 +838,6 @@ static irqreturn_t armv8pmu_handle_irq(i
+ return IRQ_HANDLED;
+ }
+
+-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+-{
+- unsigned long flags;
+- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+-
+- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+- /* Enable all counters */
+- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+-}
+-
+-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+-{
+- unsigned long flags;
+- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+-
+- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+- /* Disable all counters */
+- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
+- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+-}
+-
+ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+ {