From: Greg Kroah-Hartman Date: Fri, 25 Jun 2021 10:24:12 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v5.12.14~47 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=6c8960fff8ddb23265cc9e141eb9e2c49499d0f6;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: arm64-perf-disable-pmu-while-processing-counter-overflows.patch --- diff --git a/queue-4.9/arm64-perf-disable-pmu-while-processing-counter-overflows.patch b/queue-4.9/arm64-perf-disable-pmu-while-processing-counter-overflows.patch new file mode 100644 index 00000000000..cb04be7f233 --- /dev/null +++ b/queue-4.9/arm64-perf-disable-pmu-while-processing-counter-overflows.patch @@ -0,0 +1,112 @@ +From 3cce50dfec4a5b0414c974190940f47dd32c6dee Mon Sep 17 00:00:00 2001 +From: Suzuki K Poulose +Date: Tue, 10 Jul 2018 09:58:03 +0100 +Subject: arm64: perf: Disable PMU while processing counter overflows + +From: Suzuki K Poulose + +commit 3cce50dfec4a5b0414c974190940f47dd32c6dee upstream. + +The arm64 PMU updates the event counters and reprograms the +counters in the overflow IRQ handler without disabling the +PMU. This could potentially cause skews in for group counters, +where the overflowed counters may potentially loose some event +counts, while they are reprogrammed. To prevent this, disable +the PMU while we process the counter overflows and enable it +right back when we are done. + +This patch also moves the PMU stop/start routines to avoid a +forward declaration. + +Suggested-by: Mark Rutland +Cc: Will Deacon +Acked-by: Mark Rutland +Signed-off-by: Suzuki K Poulose +Signed-off-by: Will Deacon +Signed-off-by: Aman Priyadarshi +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/kernel/perf_event.c | 50 ++++++++++++++++++++++------------------- + 1 file changed, 28 insertions(+), 22 deletions(-) + +--- a/arch/arm64/kernel/perf_event.c ++++ b/arch/arm64/kernel/perf_event.c +@@ -748,6 +748,28 @@ static void armv8pmu_disable_event(struc + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + } + ++static void armv8pmu_start(struct arm_pmu *cpu_pmu) ++{ ++ unsigned long flags; ++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); ++ ++ raw_spin_lock_irqsave(&events->pmu_lock, flags); ++ /* Enable all counters */ ++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); ++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags); ++} ++ ++static void armv8pmu_stop(struct arm_pmu *cpu_pmu) ++{ ++ unsigned long flags; ++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); ++ ++ raw_spin_lock_irqsave(&events->pmu_lock, flags); ++ /* Disable all counters */ ++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); ++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags); ++} ++ + static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) + { + u32 pmovsr; +@@ -773,6 +795,11 @@ static irqreturn_t armv8pmu_handle_irq(i + */ + regs = get_irq_regs(); + ++ /* ++ * Stop the PMU while processing the counter overflows ++ * to prevent skews in group events. ++ */ ++ armv8pmu_stop(cpu_pmu); + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; +@@ -797,6 +824,7 @@ static irqreturn_t armv8pmu_handle_irq(i + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(event); + } ++ armv8pmu_start(cpu_pmu); + + /* + * Handle the pending perf events. +@@ -810,28 +838,6 @@ static irqreturn_t armv8pmu_handle_irq(i + return IRQ_HANDLED; + } + +-static void armv8pmu_start(struct arm_pmu *cpu_pmu) +-{ +- unsigned long flags; +- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); +- +- raw_spin_lock_irqsave(&events->pmu_lock, flags); +- /* Enable all counters */ +- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); +- raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +-} +- +-static void armv8pmu_stop(struct arm_pmu *cpu_pmu) +-{ +- unsigned long flags; +- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); +- +- raw_spin_lock_irqsave(&events->pmu_lock, flags); +- /* Disable all counters */ +- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); +- raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +-} +- + static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) + { diff --git a/queue-4.9/series b/queue-4.9/series index cb13f8fe63e..b5dd84f305c 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -51,3 +51,4 @@ inet-use-bigger-hash-table-for-ip-id-generation.patch i40e-be-much-more-verbose-about-what-we-can-and-cannot-offload.patch arm-9081-1-fix-gcc-10-thumb2-kernel-regression.patch makefile-move-wno-unused-but-set-variable-out-of-gcc-only-block.patch +arm64-perf-disable-pmu-while-processing-counter-overflows.patch