]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: VMX: Hold mmu_lock until page is released when updating APIC access page
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:52 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:48 +0000 (13:00 -0400)
Hold mmu_lock across kvm_release_pfn_clean() when refreshing the APIC
access page address to ensure that KVM doesn't mark a page/folio as
accessed after it has been unmapped.  Practically speaking marking a folio
accesses is benign in this scenario, as KVM does hold a reference (it's
really just marking folios dirty that is problematic), but there's no
reason not to be paranoid (moving the APIC access page isn't a hot path),
and no reason to be different from other mmu_notifier-protected flows in
KVM.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-51-seanjc@google.com>

arch/x86/kvm/vmx/vmx.c

index 81ed596e4454c565b9a5ea001243cfbcf2d50bf2..bee90089c8a7b08f768bccda984ecbdad759864e 100644 (file)
@@ -6832,25 +6832,22 @@ void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
                return;
 
        read_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
+       if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn))
                kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
-               read_unlock(&vcpu->kvm->mmu_lock);
-               goto out;
-       }
-
-       vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
-       read_unlock(&vcpu->kvm->mmu_lock);
+       else
+               vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
 
-       /*
-        * No need for a manual TLB flush at this point, KVM has already done a
-        * flush if there were SPTEs pointing at the previous page.
-        */
-out:
        /*
         * Do not pin apic access page in memory, the MMU notifier
         * will call us again if it is migrated or swapped out.
         */
        kvm_release_pfn_clean(pfn);
+
+       /*
+        * No need for a manual TLB flush at this point, KVM has already done a
+        * flush if there were SPTEs pointing at the previous page.
+        */
+       read_unlock(&vcpu->kvm->mmu_lock);
 }
 
 void vmx_hwapic_isr_update(int max_isr)