]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: MIPS: Use kvm_faultin_pfn() to map pfns into the guest
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:24:14 +0000 (11:24 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:49 +0000 (13:00 -0400)
Convert MIPS to kvm_faultin_pfn()+kvm_release_faultin_page(), which
are new APIs to consolidate arch code and provide consistent behavior
across all KVM architectures.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-73-seanjc@google.com>

arch/mips/kvm/mmu.c

index 69463ab24d97f6c2612df6580ccec157afa2de54..d2c3b6b41f1817dcee71867b9c81fc692bc1d4eb 100644 (file)
@@ -557,6 +557,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
        bool writeable;
        unsigned long prot_bits;
        unsigned long mmu_seq;
+       struct page *page;
 
        /* Try the fast path to handle old / clean pages */
        srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -578,7 +579,7 @@ retry:
        mmu_seq = kvm->mmu_invalidate_seq;
        /*
         * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
-        * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+        * in kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
         * risk the page we get a reference to getting unmapped before we have a
         * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
         *
@@ -590,7 +591,7 @@ retry:
        smp_rmb();
 
        /* Slow path - ask KVM core whether we can access this GPA */
-       pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
+       pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page);
        if (is_error_noslot_pfn(pfn)) {
                err = -EFAULT;
                goto out;
@@ -602,10 +603,10 @@ retry:
                /*
                 * This can happen when mappings are changed asynchronously, but
                 * also synchronously if a COW is triggered by
-                * gfn_to_pfn_prot().
+                * kvm_faultin_pfn().
                 */
                spin_unlock(&kvm->mmu_lock);
-               kvm_release_pfn_clean(pfn);
+               kvm_release_page_unused(page);
                goto retry;
        }
 
@@ -632,10 +633,7 @@ retry:
        if (out_buddy)
                *out_buddy = *ptep_buddy(ptep);
 
-       if (writeable)
-               kvm_set_pfn_dirty(pfn);
-       kvm_release_pfn_clean(pfn);
-
+       kvm_release_faultin_page(kvm, page, false, writeable);
        spin_unlock(&kvm->mmu_lock);
 out:
        srcu_read_unlock(&kvm->srcu, srcu_idx);