]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Drop @atomic param from gfn=>pfn and hva=>pfn APIs
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:14 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 16:57:58 +0000 (12:57 -0400)
Drop @atomic from the myriad "to_pfn" APIs now that all callers pass
"false", and remove a comment blurb about KVM running only the "GUP fast"
part in atomic context.

No functional change intended.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-13-seanjc@google.com>

Documentation/virt/kvm/locking.rst
arch/arm64/kvm/mmu.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/x86/kvm/mmu/mmu.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c
virt/kvm/kvm_mm.h
virt/kvm/pfncache.c

index 693090bfc66d6ad9fb7bcc723ff9bdb34c90ef4e..f463ac42ac7a7b2e44bf98b56c1cb0962fb673ad 100644 (file)
@@ -135,8 +135,8 @@ We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
 For direct sp, we can easily avoid it since the spte of direct sp is fixed
 to gfn.  For indirect sp, we disabled fast page fault for simplicity.
 
-A solution for indirect sp could be to pin the gfn, for example via
-gfn_to_pfn_memslot_atomic, before the cmpxchg.  After the pinning:
+A solution for indirect sp could be to pin the gfn before the cmpxchg.  After
+the pinning:
 
 - We have held the refcount of pfn; that means the pfn can not be freed and
   be reused for another gfn.
index 0f7658aefa1a3ddb0ee5a5e6a6cc25857d80b9c6..9fbc79fad292a98ca1439e43dc354d478de6004a 100644 (file)
@@ -1570,7 +1570,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        mmap_read_unlock(current->mm);
 
-       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
+       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
                                   write_fault, &writable, NULL);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
                kvm_send_hwpoison_signal(hva, vma_shift);
index 1b51b1c4713bf2347cd1c27c2cde80b95baf8285..8cd02ca4b1b8df139a00fe518f8962afe5003736 100644 (file)
@@ -613,7 +613,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
                write_ok = true;
        } else {
                /* Call KVM generic code to do the slow-path check */
-               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
+               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
                                           writing, &write_ok, NULL);
                if (is_error_noslot_pfn(pfn))
                        return -EFAULT;
index 408d98f8a51479364de60bfd3e8d0e3f5902d0c3..26a969e935e37e40122267e0a5e2786890af2c23 100644 (file)
@@ -852,7 +852,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
                unsigned long pfn;
 
                /* Call KVM generic code to do the slow-path check */
-               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
+               pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
                                           writing, upgrade_p, NULL);
                if (is_error_noslot_pfn(pfn))
                        return -EFAULT;
index 37c2f8d11e051f1d59555cf7789d542f94989dc6..e5e0bf7593e726bdb369d5e7875354cf47c39ef4 100644 (file)
@@ -4387,9 +4387,9 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
                return kvm_faultin_pfn_private(vcpu, fault);
 
        async = false;
-       fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, false,
-                                         &async, fault->write,
-                                         &fault->map_writable, &fault->hva);
+       fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, &async,
+                                         fault->write, &fault->map_writable,
+                                         &fault->hva);
        if (!async)
                return RET_PF_CONTINUE; /* *pfn has correct page already */
 
@@ -4409,9 +4409,9 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
         * to wait for IO.  Note, gup always bails if it is unable to quickly
         * get a page and a fatal signal, i.e. SIGKILL, is pending.
         */
-       fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
-                                         NULL, fault->write,
-                                         &fault->map_writable, &fault->hva);
+       fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, true, NULL,
+                                         fault->write, &fault->map_writable,
+                                         &fault->hva);
        return RET_PF_CONTINUE;
 }
 
index 6a3976c1a2182ce13ae29e6a49079e647773ead7..32e23e05a8c330c6caa7a64bf5b097a904b26ed8 100644 (file)
@@ -1232,9 +1232,8 @@ kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
                      bool *writable);
 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
-                              bool atomic, bool interruptible, bool *async,
+                              bool interruptible, bool *async,
                               bool write_fault, bool *writable, hva_t *hva);
 
 void kvm_release_pfn_clean(kvm_pfn_t pfn);
index aa7ae0f0f90eaaf9b5e50163969bfa1cc02a7559..c7506eb2308650ab9610dede78579058deb396ea 100644 (file)
@@ -2756,8 +2756,7 @@ static inline int check_user_page_hwpoison(unsigned long addr)
 
 /*
  * The fast path to get the writable pfn which will be stored in @pfn,
- * true indicates success, otherwise false is returned.  It's also the
- * only part that runs if we can in atomic context.
+ * true indicates success, otherwise false is returned.
  */
 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
                            bool *writable, kvm_pfn_t *pfn)
@@ -2922,7 +2921,6 @@ out:
 /*
  * Pin guest page in memory and return its pfn.
  * @addr: host virtual address which maps memory to the guest
- * @atomic: whether this function is forbidden from sleeping
  * @interruptible: whether the process can be interrupted by non-fatal signals
  * @async: whether this function need to wait IO complete if the
  *         host page is not in the memory
@@ -2934,22 +2932,16 @@ out:
  * 2): @write_fault = false && @writable, @writable will tell the caller
  *     whether the mapping is writable.
  */
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
-                    bool *async, bool write_fault, bool *writable)
+kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool *async,
+                    bool write_fault, bool *writable)
 {
        struct vm_area_struct *vma;
        kvm_pfn_t pfn;
        int npages, r;
 
-       /* we can do it either atomically or asynchronously, not both */
-       BUG_ON(atomic && async);
-
        if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
                return pfn;
 
-       if (atomic)
-               return KVM_PFN_ERR_FAULT;
-
        npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
                                 writable, &pfn);
        if (npages == 1)
@@ -2986,7 +2978,7 @@ exit:
 }
 
 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
-                              bool atomic, bool interruptible, bool *async,
+                              bool interruptible, bool *async,
                               bool write_fault, bool *writable, hva_t *hva)
 {
        unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
@@ -3008,33 +3000,24 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
                writable = NULL;
        }
 
-       return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
-                         writable);
+       return hva_to_pfn(addr, interruptible, async, write_fault, writable);
 }
 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
 
 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
                      bool *writable)
 {
-       return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
-                                   NULL, write_fault, writable, NULL);
+       return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
+                                   write_fault, writable, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
 
 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
-                                   NULL, NULL);
+       return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
 
-kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
-{
-       return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
-                                   NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
-
 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
        return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
index 715f19669d01f72912af9b7393ccd01f65c0527e..a3fa86f60d6c396eb09bb0b57dd4295e71e82541 100644 (file)
@@ -20,8 +20,8 @@
 #define KVM_MMU_UNLOCK(kvm)            spin_unlock(&(kvm)->mmu_lock)
 #endif /* KVM_HAVE_MMU_RWLOCK */
 
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
-                    bool *async, bool write_fault, bool *writable);
+kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool *async,
+                    bool write_fault, bool *writable);
 
 #ifdef CONFIG_HAVE_KVM_PFNCACHE
 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
index f0039efb9e1e34315450bcb589429a8cecb37a73..58c706a610e5b405bb0db5b06d947f6c111f655a 100644 (file)
@@ -198,7 +198,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
                }
 
                /* We always request a writeable mapping */
-               new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
+               new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
                if (is_error_noslot_pfn(new_pfn))
                        goto out_error;