From: Greg Kroah-Hartman Date: Fri, 10 Jan 2020 21:26:41 +0000 (+0100) Subject: 4.19-stable patches X-Git-Tag: v4.4.209~13 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d8cdbeff5ab9b9c7b156c6724e5395213369b780;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch --- diff --git a/queue-4.19/arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch b/queue-4.19/arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch new file mode 100644 index 00000000000..1d77b378773 --- /dev/null +++ b/queue-4.19/arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch @@ -0,0 +1,139 @@ +From d3ec3a08fa700c8b46abb137dce4e2514a6f9668 Mon Sep 17 00:00:00 2001 +From: Marc Zyngier +Date: Thu, 7 Feb 2019 16:01:21 +0000 +Subject: arm64: KVM: Trap VM ops when ARM64_WORKAROUND_CAVIUM_TX2_219_TVM is set + +From: Marc Zyngier + +commit d3ec3a08fa700c8b46abb137dce4e2514a6f9668 upstream. + +In order to workaround the TX2-219 erratum, it is necessary to trap +TTBRx_EL1 accesses to EL2. This is done by setting HCR_EL2.TVM on +guest entry, which has the side effect of trapping all the other +VM-related sysregs as well. + +To minimize the overhead, a fast path is used so that we don't +have to go all the way back to the main sysreg handling code, +unless the rest of the hypervisor expects to see these accesses. + +Cc: +Signed-off-by: Marc Zyngier +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/include/asm/cpucaps.h | 3 + + arch/arm64/kvm/hyp/switch.c | 69 +++++++++++++++++++++++++++++++++++++-- + 2 files changed, 69 insertions(+), 3 deletions(-) + +--- a/arch/arm64/include/asm/cpucaps.h ++++ b/arch/arm64/include/asm/cpucaps.h +@@ -53,7 +53,8 @@ + #define ARM64_HAS_STAGE2_FWB 32 + #define ARM64_WORKAROUND_1463225 33 + #define ARM64_SSBS 34 ++#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 35 + +-#define ARM64_NCAPS 35 ++#define ARM64_NCAPS 36 + + #endif /* __ASM_CPUCAPS_H */ +--- a/arch/arm64/kvm/hyp/switch.c ++++ b/arch/arm64/kvm/hyp/switch.c +@@ -130,6 +130,9 @@ static void __hyp_text __activate_traps( + { + u64 hcr = vcpu->arch.hcr_el2; + ++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) ++ hcr |= HCR_TVM; ++ + write_sysreg(hcr, hcr_el2); + + if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) +@@ -172,8 +175,10 @@ static void __hyp_text __deactivate_trap + * the crucial bit is "On taking a vSError interrupt, + * HCR_EL2.VSE is cleared to 0." + */ +- if (vcpu->arch.hcr_el2 & HCR_VSE) +- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2); ++ if (vcpu->arch.hcr_el2 & HCR_VSE) { ++ vcpu->arch.hcr_el2 &= ~HCR_VSE; ++ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; ++ } + + if (has_vhe()) + deactivate_traps_vhe(); +@@ -379,6 +384,61 @@ static bool __hyp_text __hyp_switch_fpsi + return true; + } + ++static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu) ++{ ++ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu)); ++ int rt = kvm_vcpu_sys_get_rt(vcpu); ++ u64 val = vcpu_get_reg(vcpu, rt); ++ ++ /* ++ * The normal sysreg handling code expects to see the traps, ++ * let's not do anything here. ++ */ ++ if (vcpu->arch.hcr_el2 & HCR_TVM) ++ return false; ++ ++ switch (sysreg) { ++ case SYS_SCTLR_EL1: ++ write_sysreg_el1(val, SYS_SCTLR); ++ break; ++ case SYS_TTBR0_EL1: ++ write_sysreg_el1(val, SYS_TTBR0); ++ break; ++ case SYS_TTBR1_EL1: ++ write_sysreg_el1(val, SYS_TTBR1); ++ break; ++ case SYS_TCR_EL1: ++ write_sysreg_el1(val, SYS_TCR); ++ break; ++ case SYS_ESR_EL1: ++ write_sysreg_el1(val, SYS_ESR); ++ break; ++ case SYS_FAR_EL1: ++ write_sysreg_el1(val, SYS_FAR); ++ break; ++ case SYS_AFSR0_EL1: ++ write_sysreg_el1(val, SYS_AFSR0); ++ break; ++ case SYS_AFSR1_EL1: ++ write_sysreg_el1(val, SYS_AFSR1); ++ break; ++ case SYS_MAIR_EL1: ++ write_sysreg_el1(val, SYS_MAIR); ++ break; ++ case SYS_AMAIR_EL1: ++ write_sysreg_el1(val, SYS_AMAIR); ++ break; ++ case SYS_CONTEXTIDR_EL1: ++ write_sysreg_el1(val, SYS_CONTEXTIDR); ++ break; ++ default: ++ return false; ++ } ++ ++ __kvm_skip_instr(vcpu); ++ return true; ++} ++ + /* + * Return true when we were able to fixup the guest exit and should return to + * the guest, false when we should restore the host state and return to the +@@ -398,6 +458,11 @@ static bool __hyp_text fixup_guest_exit( + if (*exit_code != ARM_EXCEPTION_TRAP) + goto exit; + ++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && ++ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && ++ handle_tx2_tvm(vcpu)) ++ return true; ++ + /* + * We trap the first access to the FP/SIMD to save the host context + * and restore the guest context lazily. diff --git a/queue-4.19/series b/queue-4.19/series index 03432a2b23c..13a6867835f 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -62,3 +62,4 @@ powerpc-spinlocks-include-correct-header-for-static-key.patch cpufreq-imx6q-read-ocotp-through-nvmem-for-imx6ul-imx6ull.patch arm-dts-imx6ul-use-nvmem-cells-for-cpu-speed-grading.patch pci-switchtec-read-all-64-bits-of-part_event_bitmap.patch +arm64-kvm-trap-vm-ops-when-arm64_workaround_cavium_tx2_219_tvm-is-set.patch