]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Jan 2020 21:26:41 +0000 (22:26 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Jan 2020 21:26:41 +0000 (22:26 +0100)
added patches:
arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch

queue-4.19/arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch b/queue-4.19/arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch
new file mode 100644 (file)
index 0000000..1d77b37
--- /dev/null
@@ -0,0 +1,139 @@
+From d3ec3a08fa700c8b46abb137dce4e2514a6f9668 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 7 Feb 2019 16:01:21 +0000
+Subject: arm64: KVM: Trap VM ops when ARM64_WORKAROUND_CAVIUM_TX2_219_TVM is set
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit d3ec3a08fa700c8b46abb137dce4e2514a6f9668 upstream.
+
+In order to workaround the TX2-219 erratum, it is necessary to trap
+TTBRx_EL1 accesses to EL2. This is done by setting HCR_EL2.TVM on
+guest entry, which has the side effect of trapping all the other
+VM-related sysregs as well.
+
+To minimize the overhead, a fast path is used so that we don't
+have to go all the way back to the main sysreg handling code,
+unless the rest of the hypervisor expects to see these accesses.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cpucaps.h |    3 +
+ arch/arm64/kvm/hyp/switch.c      |   69 +++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 69 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -53,7 +53,8 @@
+ #define ARM64_HAS_STAGE2_FWB                  32
+ #define ARM64_WORKAROUND_1463225              33
+ #define ARM64_SSBS                            34
++#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM   35
+-#define ARM64_NCAPS                           35
++#define ARM64_NCAPS                           36
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -130,6 +130,9 @@ static void __hyp_text __activate_traps(
+ {
+       u64 hcr = vcpu->arch.hcr_el2;
++      if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
++              hcr |= HCR_TVM;
++
+       write_sysreg(hcr, hcr_el2);
+       if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
+@@ -172,8 +175,10 @@ static void __hyp_text __deactivate_trap
+        * the crucial bit is "On taking a vSError interrupt,
+        * HCR_EL2.VSE is cleared to 0."
+        */
+-      if (vcpu->arch.hcr_el2 & HCR_VSE)
+-              vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
++      if (vcpu->arch.hcr_el2 & HCR_VSE) {
++              vcpu->arch.hcr_el2 &= ~HCR_VSE;
++              vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
++      }
+       if (has_vhe())
+               deactivate_traps_vhe();
+@@ -379,6 +384,61 @@ static bool __hyp_text __hyp_switch_fpsi
+       return true;
+ }
++static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
++{
++      u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
++      int rt = kvm_vcpu_sys_get_rt(vcpu);
++      u64 val = vcpu_get_reg(vcpu, rt);
++
++      /*
++       * The normal sysreg handling code expects to see the traps,
++       * let's not do anything here.
++       */
++      if (vcpu->arch.hcr_el2 & HCR_TVM)
++              return false;
++
++      switch (sysreg) {
++      case SYS_SCTLR_EL1:
++              write_sysreg_el1(val, SYS_SCTLR);
++              break;
++      case SYS_TTBR0_EL1:
++              write_sysreg_el1(val, SYS_TTBR0);
++              break;
++      case SYS_TTBR1_EL1:
++              write_sysreg_el1(val, SYS_TTBR1);
++              break;
++      case SYS_TCR_EL1:
++              write_sysreg_el1(val, SYS_TCR);
++              break;
++      case SYS_ESR_EL1:
++              write_sysreg_el1(val, SYS_ESR);
++              break;
++      case SYS_FAR_EL1:
++              write_sysreg_el1(val, SYS_FAR);
++              break;
++      case SYS_AFSR0_EL1:
++              write_sysreg_el1(val, SYS_AFSR0);
++              break;
++      case SYS_AFSR1_EL1:
++              write_sysreg_el1(val, SYS_AFSR1);
++              break;
++      case SYS_MAIR_EL1:
++              write_sysreg_el1(val, SYS_MAIR);
++              break;
++      case SYS_AMAIR_EL1:
++              write_sysreg_el1(val, SYS_AMAIR);
++              break;
++      case SYS_CONTEXTIDR_EL1:
++              write_sysreg_el1(val, SYS_CONTEXTIDR);
++              break;
++      default:
++              return false;
++      }
++
++      __kvm_skip_instr(vcpu);
++      return true;
++}
++
+ /*
+  * Return true when we were able to fixup the guest exit and should return to
+  * the guest, false when we should restore the host state and return to the
+@@ -398,6 +458,11 @@ static bool __hyp_text fixup_guest_exit(
+       if (*exit_code != ARM_EXCEPTION_TRAP)
+               goto exit;
++      if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
++          kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
++          handle_tx2_tvm(vcpu))
++              return true;
++
+       /*
+        * We trap the first access to the FP/SIMD to save the host context
+        * and restore the guest context lazily.
index 03432a2b23c2a40d071b6192862cb11f9293f59e..13a6867835fc4ccabbd03543d94a717b02f7eee2 100644 (file)
@@ -62,3 +62,4 @@ powerpc-spinlocks-include-correct-header-for-static-key.patch
 cpufreq-imx6q-read-ocotp-through-nvmem-for-imx6ul-imx6ull.patch
 arm-dts-imx6ul-use-nvmem-cells-for-cpu-speed-grading.patch
 pci-switchtec-read-all-64-bits-of-part_event_bitmap.patch
+arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch