]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: guest_memfd: Provide "struct page" as output from kvm_gmem_get_pfn()
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:48 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:47 +0000 (13:00 -0400)
Provide the "struct page" associated with a guest_memfd pfn as an output
from __kvm_gmem_get_pfn() so that KVM guest page fault handlers can
directly put the page instead of having to rely on
kvm_pfn_to_refcounted_page().

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-47-seanjc@google.com>

arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/sev.c
include/linux/kvm_host.h
virt/kvm/guest_memfd.c

index 2bea2d20c57107829b3416186bddc5398c08fa2a..c657c3c449c87a40e673f5879fa66fbf20d82c43 100644 (file)
@@ -4407,7 +4407,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
        }
 
        r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
-                            &max_order);
+                            &fault->refcounted_page, &max_order);
        if (r) {
                kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
                return r;
index 4557ff3804ae27ea6c148405bdf4bdaa2d3a1460..c6c852485900117d71ca7881999e9bd172c531a1 100644 (file)
@@ -3849,6 +3849,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
        if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
                gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
                struct kvm_memory_slot *slot;
+               struct page *page;
                kvm_pfn_t pfn;
 
                slot = gfn_to_memslot(vcpu->kvm, gfn);
@@ -3859,7 +3860,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
                 * The new VMSA will be private memory guest memory, so
                 * retrieve the PFN from the gmem backend.
                 */
-               if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, NULL))
+               if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
                        return -EINVAL;
 
                /*
@@ -3888,7 +3889,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
                 * changes then care should be taken to ensure
                 * svm->sev_es.vmsa is pinned through some other means.
                 */
-               kvm_release_pfn_clean(pfn);
+               kvm_release_page_clean(page);
        }
 
        /*
@@ -4688,6 +4689,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
        struct kvm_memory_slot *slot;
        struct kvm *kvm = vcpu->kvm;
        int order, rmp_level, ret;
+       struct page *page;
        bool assigned;
        kvm_pfn_t pfn;
        gfn_t gfn;
@@ -4714,7 +4716,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
                return;
        }
 
-       ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &order);
+       ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &page, &order);
        if (ret) {
                pr_warn_ratelimited("SEV: Unexpected RMP fault, no backing page for private GPA 0x%llx\n",
                                    gpa);
@@ -4772,7 +4774,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
 out:
        trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret);
 out_no_trace:
-       put_page(pfn_to_page(pfn));
+       kvm_release_page_unused(page);
 }
 
 static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
index a63b0325d3e280d22bf6349cc0729166ec698e3b..6efdc00b4254ceed23fea5fbd749466385e3a9fd 100644 (file)
@@ -2487,11 +2487,13 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
 
 #ifdef CONFIG_KVM_PRIVATE_MEM
 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+                    gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+                    int *max_order);
 #else
 static inline int kvm_gmem_get_pfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn,
-                                  kvm_pfn_t *pfn, int *max_order)
+                                  kvm_pfn_t *pfn, struct page **page,
+                                  int *max_order)
 {
        KVM_BUG_ON(1, kvm);
        return -EIO;
index 8a878e57c5d4efb8d195264036bcd04a8de45ef8..47a9f68f7b247f4cba0c958b4c7cd9458e7c46b4 100644 (file)
@@ -594,7 +594,8 @@ static struct folio *__kvm_gmem_get_pfn(struct file *file,
 }
 
 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+                    gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+                    int *max_order)
 {
        pgoff_t index = kvm_gmem_get_index(slot, gfn);
        struct file *file = kvm_gmem_get_file(slot);
@@ -615,7 +616,10 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
                r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
 
        folio_unlock(folio);
-       if (r < 0)
+
+       if (!r)
+               *page = folio_file_page(folio, index);
+       else
                folio_put(folio);
 
 out: