]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Add helper to "finish" handling a guest page fault
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:42 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 16:59:08 +0000 (12:59 -0400)
Add a helper to finish/complete the handling of a guest page, e.g. to
mark the pages accessed and put any held references.  In the near
future, this will allow improving the logic without having to copy+paste
changes into all page fault paths.  And in the less near future, will
allow sharing the "finish" API across all architectures.

No functional change intended.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-41-seanjc@google.com>

arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index 25aed61c67275f377ff0de9fce2d79c126760c99..226648fd2150c9c6f716969c3dd568c098172b6a 100644 (file)
@@ -4364,6 +4364,12 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
        return max_level;
 }
 
+static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
+                                     struct kvm_page_fault *fault, int r)
+{
+       kvm_release_pfn_clean(fault->pfn);
+}
+
 static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
                                       struct kvm_page_fault *fault)
 {
@@ -4529,7 +4535,7 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
         * mmu_lock is acquired.
         */
        if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) {
-               kvm_release_pfn_clean(fault->pfn);
+               kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY);
                return RET_PF_RETRY;
        }
 
@@ -4605,8 +4611,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        r = direct_map(vcpu, fault);
 
 out_unlock:
+       kvm_mmu_finish_page_fault(vcpu, fault, r);
        write_unlock(&vcpu->kvm->mmu_lock);
-       kvm_release_pfn_clean(fault->pfn);
        return r;
 }
 
@@ -4692,8 +4698,8 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
        r = kvm_tdp_mmu_map(vcpu, fault);
 
 out_unlock:
+       kvm_mmu_finish_page_fault(vcpu, fault, r);
        read_unlock(&vcpu->kvm->mmu_lock);
-       kvm_release_pfn_clean(fault->pfn);
        return r;
 }
 #endif
index a476a542801741ae2b8f7b032f425d36223bb9fa..35d0c3f1a7895d6d5bc86bd7aca1b262c1a140cb 100644 (file)
@@ -836,8 +836,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        r = FNAME(fetch)(vcpu, fault, &walker);
 
 out_unlock:
+       kvm_mmu_finish_page_fault(vcpu, fault, r);
        write_unlock(&vcpu->kvm->mmu_lock);
-       kvm_release_pfn_clean(fault->pfn);
        return r;
 }