]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: RISC-V: Use kvm_faultin_pfn() when mapping pfns into the guest
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:24:01 +0000 (11:24 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:48 +0000 (13:00 -0400)
Convert RISC-V to __kvm_faultin_pfn()+kvm_release_faultin_page(), which
are new APIs to consolidate arch code and provide consistent behavior
across all KVM architectures.

Opportunisticaly fix a s/priort/prior typo in the related comment.

Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Acked-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-60-seanjc@google.com>

arch/riscv/kvm/mmu.c

index 2e9aee51814273fb5b9cc7db5eef2b15f60cb220..e11ad1b616f32e17cb733e58c552d6401a8a7611 100644 (file)
@@ -601,6 +601,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
        bool logging = (memslot->dirty_bitmap &&
                        !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
        unsigned long vma_pagesize, mmu_seq;
+       struct page *page;
 
        /* We need minimum second+third level pages */
        ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
@@ -631,7 +632,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
 
        /*
         * Read mmu_invalidate_seq so that KVM can detect if the results of
-        * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
+        * vma_lookup() or __kvm_faultin_pfn() become stale prior to acquiring
         * kvm->mmu_lock.
         *
         * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
@@ -647,7 +648,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
                return -EFAULT;
        }
 
-       hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
+       hfn = kvm_faultin_pfn(vcpu, gfn, is_write, &writable, &page);
        if (hfn == KVM_PFN_ERR_HWPOISON) {
                send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
                                vma_pageshift, current);
@@ -681,11 +682,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
                kvm_err("Failed to map in G-stage\n");
 
 out_unlock:
-       if ((!ret || ret == -EEXIST) && writable)
-               kvm_release_pfn_dirty(hfn);
-       else
-               kvm_release_pfn_clean(hfn);
-
+       kvm_release_faultin_page(kvm, page, ret && ret != -EEXIST, writable);
        spin_unlock(&kvm->mmu_lock);
        return ret;
 }