--- /dev/null
+From 603afdc9438ac546181e843f807253d75d3dbc45 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Fri, 13 Sep 2019 10:57:50 +0100
+Subject: arm64: Allow CAVIUM_TX2_ERRATUM_219 to be selected
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 603afdc9438ac546181e843f807253d75d3dbc45 upstream.
+
+Allow the user to select the workaround for TX2-219, and update
+the silicon-errata.rst file to reflect this.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/arm64/silicon-errata.rst | 2 ++
+ arch/arm64/Kconfig | 17 +++++++++++++++++
+ 2 files changed, 19 insertions(+)
+
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -107,6 +107,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Cavium | ThunderX2 SMMUv3| #126 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+ +----------------+-----------------+-----------------+-----------------------------+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -601,6 +601,23 @@ config CAVIUM_ERRATUM_30115
+
+ If unsure, say Y.
+
++config CAVIUM_TX2_ERRATUM_219
++ bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
++ default y
++ help
++ On Cavium ThunderX2, a load, store or prefetch instruction between a
++ TTBR update and the corresponding context synchronizing operation can
++ cause a spurious Data Abort to be delivered to any hardware thread in
++ the CPU core.
++
++ Work around the issue by avoiding the problematic code sequence and
++ trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
++ trap handler performs the corresponding register access, skips the
++ instruction and ensures context synchronization by virtue of the
++ exception return.
++
++ If unsure, say Y.
++
+ config QCOM_FALKOR_ERRATUM_1003
+ bool "Falkor E1003: Incorrect translation due to ASID change"
+ default y
--- /dev/null
+From 9405447ef79bc93101373e130f72e9e6cbf17dbb Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 9 Apr 2019 16:22:24 +0100
+Subject: arm64: Avoid Cavium TX2 erratum 219 when switching TTBR
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9405447ef79bc93101373e130f72e9e6cbf17dbb upstream.
+
+As a PRFM instruction racing against a TTBR update can have undesirable
+effects on TX2, NOP-out such PRFM on cores that are affected by
+the TX2-219 erratum.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cpucaps.h | 3 ++-
+ arch/arm64/kernel/cpu_errata.c | 5 +++++
+ arch/arm64/kernel/entry.S | 2 ++
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -53,7 +53,8 @@
+ #define ARM64_HAS_DCPODP 43
+ #define ARM64_WORKAROUND_1463225 44
+ #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
++#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
+
+-#define ARM64_NCAPS 46
++#define ARM64_NCAPS 47
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -851,6 +851,11 @@ const struct arm64_cpu_capabilities arm6
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ },
++ {
++ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
++ },
+ #endif
+ {
+ }
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -1070,7 +1070,9 @@ alternative_insn isb, nop, ARM64_WORKARO
+ #else
+ ldr x30, =vectors
+ #endif
++alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
++alternative_else_nop_endif
+ msr vbar_el1, x30
+ add x30, x30, #(1b - tramp_vectors)
+ isb
--- /dev/null
+From 93916beb70143c46bf1d2bacf814be3a124b253b Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 9 Apr 2019 16:26:21 +0100
+Subject: arm64: Enable workaround for Cavium TX2 erratum 219 when running SMT
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 93916beb70143c46bf1d2bacf814be3a124b253b upstream.
+
+It appears that the only case where we need to apply the TX2_219_TVM
+mitigation is when the core is in SMT mode. So let's condition the
+enabling on detecting a CPU whose MPIDR_EL1.Aff0 is non-zero.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/cpu_errata.c | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -12,6 +12,7 @@
+ #include <asm/cpu.h>
+ #include <asm/cputype.h>
+ #include <asm/cpufeature.h>
++#include <asm/smp_plat.h>
+
+ static bool __maybe_unused
+ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm6
+ return (need_wa > 0);
+ }
+
++static const __maybe_unused struct midr_range tx2_family_cpus[] = {
++ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
++ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
++ {},
++};
++
++static bool __maybe_unused
++needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ int i;
++
++ if (!is_affected_midr_range_list(entry, scope) ||
++ !is_hyp_mode_available())
++ return false;
++
++ for_each_possible_cpu(i) {
++ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
++ return true;
++ }
++
++ return false;
++}
++
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+
+ static const struct midr_range arm64_harden_el2_vectors[] = {
+@@ -857,6 +882,14 @@ const struct arm64_cpu_capabilities arm6
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ },
+ #endif
++#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
++ {
++ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
++ .matches = needs_tx2_tvm_workaround,
++ },
++#endif
+ {
+ }
+ };
--- /dev/null
+From d3ec3a08fa700c8b46abb137dce4e2514a6f9668 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 7 Feb 2019 16:01:21 +0000
+Subject: arm64: KVM: Trap VM ops when ARM64_WORKAROUND_CAVIUM_TX2_219_TVM is set
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit d3ec3a08fa700c8b46abb137dce4e2514a6f9668 upstream.
+
+In order to workaround the TX2-219 erratum, it is necessary to trap
+TTBRx_EL1 accesses to EL2. This is done by setting HCR_EL2.TVM on
+guest entry, which has the side effect of trapping all the other
+VM-related sysregs as well.
+
+To minimize the overhead, a fast path is used so that we don't
+have to go all the way back to the main sysreg handling code,
+unless the rest of the hypervisor expects to see these accesses.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cpucaps.h | 3 +
+ arch/arm64/kvm/hyp/switch.c | 69 +++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 69 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -52,7 +52,8 @@
+ #define ARM64_HAS_IRQ_PRIO_MASKING 42
+ #define ARM64_HAS_DCPODP 43
+ #define ARM64_WORKAROUND_1463225 44
++#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
+
+-#define ARM64_NCAPS 45
++#define ARM64_NCAPS 46
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(
+ {
+ u64 hcr = vcpu->arch.hcr_el2;
+
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
++ hcr |= HCR_TVM;
++
+ write_sysreg(hcr, hcr_el2);
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
+@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_trap
+ * the crucial bit is "On taking a vSError interrupt,
+ * HCR_EL2.VSE is cleared to 0."
+ */
+- if (vcpu->arch.hcr_el2 & HCR_VSE)
+- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
++ if (vcpu->arch.hcr_el2 & HCR_VSE) {
++ vcpu->arch.hcr_el2 &= ~HCR_VSE;
++ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
++ }
+
+ if (has_vhe())
+ deactivate_traps_vhe();
+@@ -393,6 +398,61 @@ static bool __hyp_text __hyp_handle_fpsi
+ return true;
+ }
+
++static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
++{
++ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
++ int rt = kvm_vcpu_sys_get_rt(vcpu);
++ u64 val = vcpu_get_reg(vcpu, rt);
++
++ /*
++ * The normal sysreg handling code expects to see the traps,
++ * let's not do anything here.
++ */
++ if (vcpu->arch.hcr_el2 & HCR_TVM)
++ return false;
++
++ switch (sysreg) {
++ case SYS_SCTLR_EL1:
++ write_sysreg_el1(val, SYS_SCTLR);
++ break;
++ case SYS_TTBR0_EL1:
++ write_sysreg_el1(val, SYS_TTBR0);
++ break;
++ case SYS_TTBR1_EL1:
++ write_sysreg_el1(val, SYS_TTBR1);
++ break;
++ case SYS_TCR_EL1:
++ write_sysreg_el1(val, SYS_TCR);
++ break;
++ case SYS_ESR_EL1:
++ write_sysreg_el1(val, SYS_ESR);
++ break;
++ case SYS_FAR_EL1:
++ write_sysreg_el1(val, SYS_FAR);
++ break;
++ case SYS_AFSR0_EL1:
++ write_sysreg_el1(val, SYS_AFSR0);
++ break;
++ case SYS_AFSR1_EL1:
++ write_sysreg_el1(val, SYS_AFSR1);
++ break;
++ case SYS_MAIR_EL1:
++ write_sysreg_el1(val, SYS_MAIR);
++ break;
++ case SYS_AMAIR_EL1:
++ write_sysreg_el1(val, SYS_AMAIR);
++ break;
++ case SYS_CONTEXTIDR_EL1:
++ write_sysreg_el1(val, SYS_CONTEXTIDR);
++ break;
++ default:
++ return false;
++ }
++
++ __kvm_skip_instr(vcpu);
++ return true;
++}
++
+ /*
+ * Return true when we were able to fixup the guest exit and should return to
+ * the guest, false when we should restore the host state and return to the
+@@ -412,6 +472,11 @@ static bool __hyp_text fixup_guest_exit(
+ if (*exit_code != ARM_EXCEPTION_TRAP)
+ goto exit;
+
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
++ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
++ handle_tx2_tvm(vcpu))
++ return true;
++
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+ * and restore the guest context lazily.
--- /dev/null
+From 1e72e673b9d102ff2e8333e74b3308d012ddf75b Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 14 Oct 2019 18:19:18 +0100
+Subject: EDAC/ghes: Fix Use after free in ghes_edac remove path
+
+From: James Morse <james.morse@arm.com>
+
+commit 1e72e673b9d102ff2e8333e74b3308d012ddf75b upstream.
+
+ghes_edac models a single logical memory controller, and uses a global
+ghes_init variable to ensure only the first ghes_edac_register() will
+do anything.
+
+ghes_edac is registered the first time a GHES entry in the HEST is
+probed. There may be multiple entries, so subsequent attempts to
+register ghes_edac are silently ignored as the work has already been
+done.
+
+When a GHES entry is unregistered, it calls ghes_edac_unregister(),
+which free()s the memory behind the global variables in ghes_edac.
+
+But there may be multiple GHES entries, the next call to
+ghes_edac_unregister() will dereference the free()d memory, and attempt
+to free it a second time.
+
+This may also be triggered on a platform with one GHES entry, if the
+driver is unbound/re-bound and unbound. The re-bind step will do
+nothing because of ghes_init, the second unbind will then do the same
+work as the first.
+
+Doing the unregister work on the first call is unsafe, as another
+CPU may be processing a notification in ghes_edac_report_mem_error(),
+using the memory we are about to free.
+
+ghes_init is already half of the reference counting. We only need
+to do the register work for the first call, and the unregister work
+for the last. Add the unregister check.
+
+This means we no longer free ghes_edac's memory while there are
+GHES entries that may receive a notification.
+
+This was detected by KASAN and DEBUG_TEST_DRIVER_REMOVE.
+
+ [ bp: merge into a single patch. ]
+
+Fixes: 0fe5f281f749 ("EDAC, ghes: Model a single, logical memory controller")
+Reported-by: John Garry <john.garry@huawei.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Robert Richter <rrichter@marvell.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20191014171919.85044-2-james.morse@arm.com
+Link: https://lkml.kernel.org/r/304df85b-8b56-b77e-1a11-aa23769f2e7c@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/edac/ghes_edac.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/edac/ghes_edac.c
++++ b/drivers/edac/ghes_edac.c
+@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *g
+ if (!ghes_pvt)
+ return;
+
++ if (atomic_dec_return(&ghes_init))
++ return;
++
+ mci = ghes_pvt->mci;
++ ghes_pvt = NULL;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
+ }
--- /dev/null
+From 1638b8f096ca165965189b9626564c933c79fe63 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 21 Oct 2019 12:07:15 +0200
+Subject: lib/vdso: Make clock_getres() POSIX compliant again
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1638b8f096ca165965189b9626564c933c79fe63 upstream.
+
+A recent commit removed the NULL pointer check from the clock_getres()
+implementation causing a test case to fault.
+
+POSIX requires an explicit NULL pointer check for clock_getres() aside of
+the validity check of the clock_id argument for obscure reasons.
+
+Add it back for both 32bit and 64bit.
+
+Note, this is only a partial revert of the offending commit which does not
+bring back the broken fallback invocation in the the 32bit compat
+implementations of clock_getres() and clock_gettime().
+
+Fixes: a9446a906f52 ("lib/vdso/32: Remove inconsistent NULL pointer checks")
+Reported-by: Andreas Schwab <schwab@linux-m68k.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1910211202260.1904@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/vdso/gettimeofday.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/lib/vdso/gettimeofday.c
++++ b/lib/vdso/gettimeofday.c
+@@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_
+ return -1;
+ }
+
+- res->tv_sec = 0;
+- res->tv_nsec = ns;
+-
++ if (likely(res)) {
++ res->tv_sec = 0;
++ res->tv_nsec = ns;
++ }
+ return 0;
+ }
+
+@@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t cl
+ ret = clock_getres_fallback(clock, &ts);
+ #endif
+
+- if (likely(!ret)) {
++ if (likely(!ret && res)) {
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+ }
--- /dev/null
+From 513f7f747e1cba81f28a436911fba0b485878ebd Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 4 Oct 2019 19:23:37 +0200
+Subject: parisc: Fix vmap memory leak in ioremap()/iounmap()
+
+From: Helge Deller <deller@gmx.de>
+
+commit 513f7f747e1cba81f28a436911fba0b485878ebd upstream.
+
+Sven noticed that calling ioremap() and iounmap() multiple times leads
+to a vmap memory leak:
+ vmap allocation for size 4198400 failed:
+ use vmalloc=<size> to increase size
+
+It seems we missed calling vunmap() in iounmap().
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Noticed-by: Sven Schnelle <svens@stackframe.org>
+Cc: <stable@vger.kernel.org> # v3.16+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/mm/ioremap.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/arch/parisc/mm/ioremap.c
++++ b/arch/parisc/mm/ioremap.c
+@@ -3,7 +3,7 @@
+ * arch/parisc/mm/ioremap.c
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
++ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long p
+ addr = (void __iomem *) area->addr;
+ if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+ phys_addr, pgprot)) {
+- vfree(addr);
++ vunmap(addr);
+ return NULL;
+ }
+
+@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long p
+ }
+ EXPORT_SYMBOL(__ioremap);
+
+-void iounmap(const volatile void __iomem *addr)
++void iounmap(const volatile void __iomem *io_addr)
+ {
+- if (addr > high_memory)
+- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
++ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
++
++ if (is_vmalloc_addr((void *)addr))
++ vunmap((void *)addr);
+ }
+ EXPORT_SYMBOL(iounmap);
xtensa-fix-change_bit-in-exclusive-access-option.patch
s390-zcrypt-fix-memleak-at-release.patch
s390-kaslr-add-support-for-r_390_glob_dat-relocation-type.patch
+lib-vdso-make-clock_getres-posix-compliant-again.patch
+parisc-fix-vmap-memory-leak-in-ioremap-iounmap.patch
+edac-ghes-fix-use-after-free-in-ghes_edac-remove-path.patch
+arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch
+arm64-avoid-cavium-tx2-erratum-219-when-switching-ttbr.patch
+arm64-enable-workaround-for-cavium-tx2-erratum-219-when-running-smt.patch
+arm64-allow-cavium_tx2_erratum_219-to-be-selected.patch