]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: PPC: Explicitly require struct page memory for Ultravisor sharing
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:24:20 +0000 (11:24 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:50 +0000 (13:00 -0400)
Explicitly require "struct page" memory when sharing memory between
guest and host via an Ultravisor.  Given the number of pfn_to_page()
calls in the code, it's safe to assume that KVM already requires that the
pfn returned by gfn_to_pfn() is backed by struct page, i.e. this is
likely a bug fix, not a reduction in KVM capabilities.

Switching to gfn_to_page() will eventually allow removing gfn_to_pfn()
and kvm_pfn_to_refcounted_page().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-79-seanjc@google.com>

arch/powerpc/kvm/book3s_hv_uvmem.c

index 92f33115144b2837a28595eb467db4e836db52d3..3a6592a31a10a4bc7a61ab3e93aec2dcb64bdbbe 100644 (file)
@@ -879,9 +879,8 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
 {
 
        int ret = H_PARAMETER;
-       struct page *uvmem_page;
+       struct page *page, *uvmem_page;
        struct kvmppc_uvmem_page_pvt *pvt;
-       unsigned long pfn;
        unsigned long gfn = gpa >> page_shift;
        int srcu_idx;
        unsigned long uvmem_pfn;
@@ -901,8 +900,8 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
 
 retry:
        mutex_unlock(&kvm->arch.uvmem_lock);
-       pfn = gfn_to_pfn(kvm, gfn);
-       if (is_error_noslot_pfn(pfn))
+       page = gfn_to_page(kvm, gfn);
+       if (!page)
                goto out;
 
        mutex_lock(&kvm->arch.uvmem_lock);
@@ -911,16 +910,16 @@ retry:
                pvt = uvmem_page->zone_device_data;
                pvt->skip_page_out = true;
                pvt->remove_gfn = false; /* it continues to be a valid GFN */
-               kvm_release_pfn_clean(pfn);
+               kvm_release_page_unused(page);
                goto retry;
        }
 
-       if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
+       if (!uv_page_in(kvm->arch.lpid, page_to_pfn(page) << page_shift, gpa, 0,
                                page_shift)) {
                kvmppc_gfn_shared(gfn, kvm);
                ret = H_SUCCESS;
        }
-       kvm_release_pfn_clean(pfn);
+       kvm_release_page_clean(page);
        mutex_unlock(&kvm->arch.uvmem_lock);
 out:
        srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -1083,21 +1082,21 @@ out:
 
 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
 {
-       unsigned long pfn;
+       struct page *page;
        int ret = U_SUCCESS;
 
-       pfn = gfn_to_pfn(kvm, gfn);
-       if (is_error_noslot_pfn(pfn))
+       page = gfn_to_page(kvm, gfn);
+       if (!page)
                return -EFAULT;
 
        mutex_lock(&kvm->arch.uvmem_lock);
        if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
                goto out;
 
-       ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
-                        0, PAGE_SHIFT);
+       ret = uv_page_in(kvm->arch.lpid, page_to_pfn(page) << PAGE_SHIFT,
+                        gfn << PAGE_SHIFT, 0, PAGE_SHIFT);
 out:
-       kvm_release_pfn_clean(pfn);
+       kvm_release_page_clean(page);
        mutex_unlock(&kvm->arch.uvmem_lock);
        return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
 }