From: Greg Kroah-Hartman Date: Sun, 31 Jan 2016 19:23:03 +0000 (-0800) Subject: 4.4-stable patches X-Git-Tag: v4.1.17~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=36f71afb006868d702842a30c5929929c5504166;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch --- diff --git a/queue-4.4/arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch b/queue-4.4/arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch new file mode 100644 index 00000000000..76b0757eed8 --- /dev/null +++ b/queue-4.4/arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch @@ -0,0 +1,90 @@ +From f436b2ac90a095746beb6729b8ee8ed87c9eaede Mon Sep 17 00:00:00 2001 +From: Lorenzo Pieralisi +Date: Wed, 13 Jan 2016 14:50:03 +0000 +Subject: arm64: kernel: fix architected PMU registers unconditional access + +From: Lorenzo Pieralisi + +commit f436b2ac90a095746beb6729b8ee8ed87c9eaede upstream. + +The Performance Monitors extension is an optional feature of the +AArch64 architecture, therefore, in order to access Performance +Monitors registers safely, the kernel should detect the architected +PMU unit presence through the ID_AA64DFR0_EL1 register PMUVer field +before accessing them. + +This patch implements a guard by reading the ID_AA64DFR0_EL1 register +PMUVer field to detect the architected PMU presence and prevent accessing +PMU system registers if the Performance Monitors extension is not +implemented in the core. + +Cc: Peter Maydell +Cc: Mark Rutland +Fixes: 60792ad349f3 ("arm64: kernel: enforce pmuserenr_el0 initialization and restore") +Signed-off-by: Lorenzo Pieralisi +Reported-by: Guenter Roeck +Tested-by: Guenter Roeck +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/kernel/head.S | 5 +++++ + arch/arm64/mm/proc-macros.S | 12 ++++++++++++ + arch/arm64/mm/proc.S | 4 ++-- + 3 files changed, 19 insertions(+), 2 deletions(-) + +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -512,9 +512,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // C + #endif + + /* EL2 debug */ ++ mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer ++ sbfx x0, x0, #8, #4 ++ cmp x0, #1 ++ b.lt 4f // Skip if no PMU present + mrs x0, pmcr_el0 // Disable debug access traps + ubfx x0, x0, #11, #5 // to EL2 and allow access to + msr mdcr_el2, x0 // all PMU counters from EL1 ++4: + + /* Stage-2 translation */ + msr vttbr_el2, xzr +--- a/arch/arm64/mm/proc-macros.S ++++ b/arch/arm64/mm/proc-macros.S +@@ -62,3 +62,15 @@ + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH + #endif + .endm ++ ++/* ++ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present ++ */ ++ .macro reset_pmuserenr_el0, tmpreg ++ mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer ++ sbfx \tmpreg, \tmpreg, #8, #4 ++ cmp \tmpreg, #1 // Skip if no PMU present ++ b.lt 9000f ++ msr pmuserenr_el0, xzr // Disable PMU access from EL0 ++9000: ++ .endm +--- a/arch/arm64/mm/proc.S ++++ b/arch/arm64/mm/proc.S +@@ -117,7 +117,7 @@ ENTRY(cpu_do_resume) + */ + ubfx x11, x11, #1, #1 + msr oslar_el1, x11 +- msr pmuserenr_el0, xzr // Disable PMU access from EL0 ++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + mov x0, x12 + dsb nsh // Make sure local tlb invalidation completed + isb +@@ -156,7 +156,7 @@ ENTRY(__cpu_setup) + msr cpacr_el1, x0 // Enable FP/ASIMD + mov x0, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x0 // access to the DCC from EL0 +- msr pmuserenr_el0, xzr // Disable PMU access from EL0 ++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + /* + * Memory region attributes for LPAE: + * diff --git a/queue-4.4/series b/queue-4.4/series index 51a889dc906..6243747eefd 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -65,3 +65,4 @@ powerpc-module-handle-r_ppc64_entry-relocations.patch arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch arm64-kernel-enforce-pmuserenr_el0-initialization-and-restore.patch +arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch