--- /dev/null
+From f436b2ac90a095746beb6729b8ee8ed87c9eaede Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Wed, 13 Jan 2016 14:50:03 +0000
+Subject: arm64: kernel: fix architected PMU registers unconditional access
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit f436b2ac90a095746beb6729b8ee8ed87c9eaede upstream.
+
+The Performance Monitors extension is an optional feature of the
+AArch64 architecture, therefore, in order to access Performance
+Monitors registers safely, the kernel should detect the architected
+PMU unit presence through the ID_AA64DFR0_EL1 register PMUVer field
+before accessing them.
+
+This patch implements a guard by reading the ID_AA64DFR0_EL1 register
+PMUVer field to detect the architected PMU presence and prevent accessing
+PMU system registers if the Performance Monitors extension is not
+implemented in the core.
+
+Cc: Peter Maydell <peter.maydell@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Fixes: 60792ad349f3 ("arm64: kernel: enforce pmuserenr_el0 initialization and restore")
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/head.S | 5 +++++
+ arch/arm64/mm/proc-macros.S | 12 ++++++++++++
+ arch/arm64/mm/proc.S | 4 ++--
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -512,9 +512,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // C
+ #endif
+
+ /* EL2 debug */
++ mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
++ sbfx x0, x0, #8, #4
++ cmp x0, #1
++ b.lt 4f // Skip if no PMU present
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
++4:
+
+ /* Stage-2 translation */
+ msr vttbr_el2, xzr
+--- a/arch/arm64/mm/proc-macros.S
++++ b/arch/arm64/mm/proc-macros.S
+@@ -62,3 +62,15 @@
+ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ #endif
+ .endm
++
++/*
++ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
++ */
++ .macro reset_pmuserenr_el0, tmpreg
++ mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
++ sbfx \tmpreg, \tmpreg, #8, #4
++ cmp \tmpreg, #1 // Skip if no PMU present
++ b.lt 9000f
++ msr pmuserenr_el0, xzr // Disable PMU access from EL0
++9000:
++ .endm
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -117,7 +117,7 @@ ENTRY(cpu_do_resume)
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
+- msr pmuserenr_el0, xzr // Disable PMU access from EL0
++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+@@ -156,7 +156,7 @@ ENTRY(__cpu_setup)
+ msr cpacr_el1, x0 // Enable FP/ASIMD
+ mov x0, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x0 // access to the DCC from EL0
+- msr pmuserenr_el0, xzr // Disable PMU access from EL0
++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ /*
+ * Memory region attributes for LPAE:
+ *