From 86f925027118e28653d1926ba352ea620af46493 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2021 12:31:42 +0100 Subject: [PATCH] 5.10-stable patches added patches: kvm-arm64-ensure-i-cache-isolation-between-vcpus-of-a-same-vm.patch --- ...isolation-between-vcpus-of-a-same-vm.patch | 146 ++++++++++++++++++ queue-5.10/series | 1 + 2 files changed, 147 insertions(+) create mode 100644 queue-5.10/kvm-arm64-ensure-i-cache-isolation-between-vcpus-of-a-same-vm.patch diff --git a/queue-5.10/kvm-arm64-ensure-i-cache-isolation-between-vcpus-of-a-same-vm.patch b/queue-5.10/kvm-arm64-ensure-i-cache-isolation-between-vcpus-of-a-same-vm.patch new file mode 100644 index 00000000000..bb98b16aa92 --- /dev/null +++ b/queue-5.10/kvm-arm64-ensure-i-cache-isolation-between-vcpus-of-a-same-vm.patch @@ -0,0 +1,146 @@ +From foo@baz Mon Mar 15 12:27:09 PM CET 2021 +From: Marc Zyngier +Date: Mon, 15 Mar 2021 11:11:11 +0000 +Subject: KVM: arm64: Ensure I-cache isolation between vcpus of a same VM +To: gregkh@linuxfoundation.org +Cc: kvmarm@lists.cs.columbia.edu, kernel-team@android.com, stable@vger.kernel.org, Will Deacon , Catalin Marinas +Message-ID: <20210315111111.4136402-1-maz@kernel.org> + +From: Marc Zyngier + +Commit 01dc9262ff5797b675c32c0c6bc682777d23de05 upstream. + +It recently became apparent that the ARMv8 architecture has interesting +rules regarding attributes being used when fetching instructions +if the MMU is off at Stage-1. + +In this situation, the CPU is allowed to fetch from the PoC and +allocate into the I-cache (unless the memory is mapped with +the XN attribute at Stage-2). + +If we transpose this to vcpus sharing a single physical CPU, +it is possible for a vcpu running with its MMU off to influence +another vcpu running with its MMU on, as the latter is expected to +fetch from the PoU (and self-patching code doesn't flush below that +level). + +In order to solve this, reuse the vcpu-private TLB invalidation +code to apply the same policy to the I-cache, nuking it every time +the vcpu runs on a physical CPU that ran another vcpu of the same +VM in the past. + +This involve renaming __kvm_tlb_flush_local_vmid() to +__kvm_flush_cpu_context(), and inserting a local i-cache invalidation +there. + +Cc: stable@vger.kernel.org +Signed-off-by: Marc Zyngier +Acked-by: Will Deacon +Acked-by: Catalin Marinas +Link: https://lore.kernel.org/r/20210303164505.68492-1-maz@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm64/include/asm/kvm_asm.h | 4 ++-- + arch/arm64/kvm/arm.c | 7 ++++++- + arch/arm64/kvm/hyp/nvhe/hyp-main.c | 4 ++-- + arch/arm64/kvm/hyp/nvhe/tlb.c | 3 ++- + arch/arm64/kvm/hyp/vhe/tlb.c | 3 ++- + 5 files changed, 14 insertions(+), 7 deletions(-) + +--- a/arch/arm64/include/asm/kvm_asm.h ++++ b/arch/arm64/include/asm/kvm_asm.h +@@ -49,7 +49,7 @@ + #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2 + #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3 + #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4 +-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5 ++#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5 + #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6 + #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7 + #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8 +@@ -180,10 +180,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs + #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) + + extern void __kvm_flush_vm_context(void); ++extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu); + extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, + int level); + extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); +-extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu); + + extern void __kvm_timer_set_cntvoff(u64 cntvoff); + +--- a/arch/arm64/kvm/arm.c ++++ b/arch/arm64/kvm/arm.c +@@ -352,11 +352,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu + last_ran = this_cpu_ptr(mmu->last_vcpu_ran); + + /* ++ * We guarantee that both TLBs and I-cache are private to each ++ * vcpu. If detecting that a vcpu from the same VM has ++ * previously run on the same physical CPU, call into the ++ * hypervisor code to nuke the relevant contexts. ++ * + * We might get preempted before the vCPU actually runs, but + * over-invalidation doesn't affect correctness. + */ + if (*last_ran != vcpu->vcpu_id) { +- kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu); ++ kvm_call_hyp(__kvm_flush_cpu_context, mmu); + *last_ran = vcpu->vcpu_id; + } + +--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c ++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c +@@ -46,11 +46,11 @@ static void handle_host_hcall(unsigned l + __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); + break; + } +- case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): { ++ case KVM_HOST_SMCCC_FUNC(__kvm_flush_cpu_context): { + unsigned long r1 = host_ctxt->regs.regs[1]; + struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; + +- __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu)); ++ __kvm_flush_cpu_context(kern_hyp_va(mmu)); + break; + } + case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): { +--- a/arch/arm64/kvm/hyp/nvhe/tlb.c ++++ b/arch/arm64/kvm/hyp/nvhe/tlb.c +@@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_ + __tlb_switch_to_host(&cxt); + } + +-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) ++void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) + { + struct tlb_inv_context cxt; + +@@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct k + __tlb_switch_to_guest(mmu, &cxt); + + __tlbi(vmalle1); ++ asm volatile("ic iallu"); + dsb(nsh); + isb(); + +--- a/arch/arm64/kvm/hyp/vhe/tlb.c ++++ b/arch/arm64/kvm/hyp/vhe/tlb.c +@@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_ + __tlb_switch_to_host(&cxt); + } + +-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) ++void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) + { + struct tlb_inv_context cxt; + +@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct k + __tlb_switch_to_guest(mmu, &cxt); + + __tlbi(vmalle1); ++ asm volatile("ic iallu"); + dsb(nsh); + isb(); + diff --git a/queue-5.10/series b/queue-5.10/series index 8d36a635eba..74dc90ca0bc 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -283,3 +283,4 @@ kvm-arm64-reject-vm-creation-when-the-default-ipa-size-is-unsupported.patch kvm-arm64-fix-exclusive-limit-for-ipa-size.patch mm-userfaultfd-fix-memory-corruption-due-to-writeprotect.patch mm-madvise-replace-ptrace-attach-requirement-for-process_madvise.patch +kvm-arm64-ensure-i-cache-isolation-between-vcpus-of-a-same-vm.patch -- 2.47.3