]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: KVM: flush VM pages before letting the guest enable caches
authorMarc Zyngier <marc.zyngier@arm.com>
Wed, 15 Jan 2014 12:50:23 +0000 (12:50 +0000)
committerJiri Slaby <jslaby@suse.cz>
Thu, 30 Apr 2015 09:15:03 +0000 (11:15 +0200)
commit 9d218a1fcf4c6b759d442ef702842fae92e1ea61 upstream.

When the guest runs with caches disabled (like in an early boot
sequence, for example), all the writes are diectly going to RAM,
bypassing the caches altogether.

Once the MMU and caches are enabled, whatever sits in the cache
becomes suddenly visible, which isn't what the guest expects.

A way to avoid this potential disaster is to invalidate the cache
when the MMU is being turned on. For this, we hook into the SCTLR_EL1
trapping code, and scan the stage-2 page tables, invalidating the
pages/sections that have already been mapped in.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/mmu.c
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/sys_regs.c

index 5c946dfdcb94baecfdf7bf9c930686414bfeea38..0de650faf1af700c2958befdf1f3b6023747fb25 100644 (file)
@@ -143,6 +143,8 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
 
 #define kvm_flush_dcache_to_poc(a,l)   __cpuc_flush_dcache_area((a), (l))
 
+void stage2_flush_vm(struct kvm *kvm);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
index e747dc10c033e86b5750d44ddb146eb34cc66d5a..61c5a92f6d9d225488a0050f95136b8684c86b37 100644 (file)
@@ -162,6 +162,89 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        }
 }
 
+static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pte_t *pte;
+
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                       kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pmd_t *pmd;
+       phys_addr_t next;
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = kvm_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       stage2_flush_ptes(kvm, pmd, addr, next);
+               }
+       } while (pmd++, addr = next, addr != end);
+}
+
+static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pud_t *pud;
+       phys_addr_t next;
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = kvm_pud_addr_end(addr, end);
+               if (!pud_none(*pud)) {
+                       stage2_flush_pmds(kvm, pud, addr, next);
+               }
+       } while (pud++, addr = next, addr != end);
+}
+
+static void stage2_flush_memslot(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot)
+{
+       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+       phys_addr_t next;
+       pgd_t *pgd;
+
+       pgd = kvm->arch.pgd + pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               stage2_flush_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the stage 2 page tables and invalidate any cache lines
+ * backing memory already mapped to the VM.
+ */
+void stage2_flush_vm(struct kvm *kvm)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots)
+               stage2_flush_memslot(kvm, memslot);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+}
+
 /**
  * free_boot_hyp_pgd - free HYP boot page tables
  *
index 802bd971f1def9381fc902f88388beb52ba0c862..3b038b39ba9bbbe471cf402b8296f84c79ea7b4b 100644 (file)
@@ -142,5 +142,7 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
        }
 }
 
+void stage2_flush_vm(struct kvm *kvm);
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 2097e5ecba42a6342537f447f67599b1824a5be7..03244582bc555af4b303fca4370890e278114d16 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/kvm_host.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
 #include <trace/events/kvm.h>
@@ -154,8 +155,10 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
 {
        access_vm_reg(vcpu, p, r);
 
-       if (vcpu_has_cache_enabled(vcpu))       /* MMU+Caches enabled? */
+       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
                vcpu->arch.hcr_el2 &= ~HCR_TVM;
+               stage2_flush_vm(vcpu->kvm);
+       }
 
        return true;
 }