]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
authorWill Deacon <will@kernel.org>
Tue, 11 Aug 2020 10:27:24 +0000 (11:27 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 26 Aug 2020 09:42:24 +0000 (11:42 +0200)
commit fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 upstream.

The 'flags' field of 'struct mmu_notifier_range' is used to indicate
whether invalidate_range_{start,end}() are permitted to block. In the
case of kvm_mmu_notifier_invalidate_range_start(), this field is not
forwarded on to the architecture-specific implementation of
kvm_unmap_hva_range() and therefore the backend cannot sensibly decide
whether or not to block.

Add an extra 'flags' parameter to kvm_unmap_hva_range() so that
architectures are aware as to whether or not they are permitted to block.

Cc: <stable@vger.kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: James Morse <james.morse@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Message-Id: <20200811102725.7121-2-will@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/kvm_host.h
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/mmu.c
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/e500_mmu_host.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
virt/kvm/arm/mmu.c
virt/kvm/kvm_main.c

index 26fca93cd6972ee9dbd07cd646a1a3fb9e990da7..397e20a359752652ae1670925277b9f0094af42b 100644 (file)
@@ -440,7 +440,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva_range(struct kvm *kvm,
-                       unsigned long start, unsigned long end);
+                       unsigned long start, unsigned long end, unsigned flags);
 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
index caa2b936125ccf08eb49a756a5c99de960a24be4..8861e9d4eb1f95ee8e9d40687e98dd6486958cb9 100644 (file)
@@ -939,7 +939,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva_range(struct kvm *kvm,
-                       unsigned long start, unsigned long end);
+                       unsigned long start, unsigned long end, unsigned flags);
 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
index 7dad7a293eae93b851848d54511bbe1cb352e963..2514e51d908b408113b2bef69713b2403b6c3cd6 100644 (file)
@@ -518,7 +518,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
        return 1;
 }
 
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+                       unsigned flags)
 {
        handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
 
index 1dc63101ffe18538dd88b33aae4a7223fe88e079..b82e46ecd7fbdd563b97af49dbf9ea2892f683ba 100644 (file)
@@ -58,7 +58,8 @@
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 
 extern int kvm_unmap_hva_range(struct kvm *kvm,
-                              unsigned long start, unsigned long end);
+                              unsigned long start, unsigned long end,
+                              unsigned flags);
 extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 5690a1f9b9767afca2ee2a80b8883705d4b89bf4..13f107dff880e2818b3320cb3c6d1b51bf6fceff 100644 (file)
@@ -837,7 +837,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
        kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
 }
 
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+                       unsigned flags)
 {
        return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
 }
index df9989cf7ba3f3a537743641ad04da16019833bd..9b402c345154fc828b2b861532e853045e25ca66 100644 (file)
@@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
        return 0;
 }
 
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+                       unsigned flags)
 {
        /* kvm_unmap_hva flushes everything anyways */
        kvm_unmap_hva(kvm, start);
index 86e2e0272c576ee939a68a6d22a69a8dbba7f658..d4c5d1d6c6f5558ce738b9321f92c5a3cb18b286 100644 (file)
@@ -1606,7 +1606,8 @@ asmlinkage void kvm_spurious_fault(void);
        _ASM_EXTABLE(666b, 667b)
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+                       unsigned flags);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 70cf2c1a1423c91fc2fe540cd7f9da872f70fc35..59d096cacb26c3b6ab022464317237cfe8085fdd 100644 (file)
@@ -1972,7 +1972,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
        return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
 }
 
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+                       unsigned flags)
 {
        return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
 }
index 8a9d13e8e904fbd647a3322c1c291e2fb7329bc6..9510965789e3860efb57f89d525ad6c8ad4fc4c0 100644 (file)
@@ -2046,7 +2046,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
 }
 
 int kvm_unmap_hva_range(struct kvm *kvm,
-                       unsigned long start, unsigned long end)
+                       unsigned long start, unsigned long end, unsigned flags)
 {
        if (!kvm->arch.pgd)
                return 0;
index 77aa91fb08d2b90cb1ce33d15c167c85b0ef4d97..66b7a9dbb77dc12ac8ad51d86c22c21d10e9798f 100644 (file)
@@ -428,7 +428,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
         * count is also read inside the mmu_lock critical section.
         */
        kvm->mmu_notifier_count++;
-       need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
+       need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
+                                            range->flags);
        need_tlb_flush |= kvm->tlbs_dirty;
        /* we've to flush the tlb before the pages can be freed */
        if (need_tlb_flush)