]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: TDX: Fold tdx_sept_drop_private_spte() into tdx_sept_remove_private_spte()
authorSean Christopherson <seanjc@google.com>
Thu, 30 Oct 2025 20:09:33 +0000 (13:09 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 5 Nov 2025 19:05:42 +0000 (11:05 -0800)
Fold tdx_sept_drop_private_spte() into tdx_sept_remove_private_spte() as a
step towards having "remove" be the one and only function that deals with
removing/zapping/dropping a SPTE, e.g. to avoid having to differentiate
between "zap", "drop", and "remove".  Eliminating the "drop" helper also
gets rid of what is effectively dead code due to redundant checks, e.g. on
an HKID being assigned.

No functional change intended.

Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Link: https://patch.msgid.link/20251030200951.3402865-11-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/tdx.c

index 052a086335f9445869aa4ce6db256a8441bbb6d9..667cd089eec8d07588d025dee63f942e657c9a5c 100644 (file)
@@ -1648,55 +1648,6 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
        return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
 }
 
-static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
-                                     enum pg_level level, struct page *page)
-{
-       int tdx_level = pg_level_to_tdx_sept_level(level);
-       struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
-       gpa_t gpa = gfn_to_gpa(gfn);
-       u64 err, entry, level_state;
-
-       /* TODO: handle large pages. */
-       if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
-               return -EIO;
-
-       if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
-               return -EIO;
-
-       /*
-        * When zapping private page, write lock is held. So no race condition
-        * with other vcpu sept operation.
-        * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
-        */
-       err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
-                                 &level_state);
-
-       if (unlikely(tdx_operand_busy(err))) {
-               /*
-                * The second retry is expected to succeed after kicking off all
-                * other vCPUs and prevent them from invoking TDH.VP.ENTER.
-                */
-               tdx_no_vcpus_enter_start(kvm);
-               err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
-                                         &level_state);
-               tdx_no_vcpus_enter_stop(kvm);
-       }
-
-       if (KVM_BUG_ON(err, kvm)) {
-               pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
-               return -EIO;
-       }
-
-       err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
-
-       if (KVM_BUG_ON(err, kvm)) {
-               pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
-               return -EIO;
-       }
-       tdx_quirk_reset_page(page);
-       return 0;
-}
-
 static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
                                     enum pg_level level, void *private_spt)
 {
@@ -1858,7 +1809,11 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
 static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
                                        enum pg_level level, kvm_pfn_t pfn)
 {
+       int tdx_level = pg_level_to_tdx_sept_level(level);
+       struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
        struct page *page = pfn_to_page(pfn);
+       gpa_t gpa = gfn_to_gpa(gfn);
+       u64 err, entry, level_state;
        int ret;
 
        /*
@@ -1869,6 +1824,10 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
        if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
                return -EIO;
 
+       /* TODO: handle large pages. */
+       if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
+               return -EIO;
+
        ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
        if (ret <= 0)
                return ret;
@@ -1879,7 +1838,38 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
         */
        tdx_track(kvm);
 
-       return tdx_sept_drop_private_spte(kvm, gfn, level, page);
+       /*
+        * When zapping private page, write lock is held. So no race condition
+        * with other vcpu sept operation.
+        * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
+        */
+       err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+                                 &level_state);
+
+       if (unlikely(tdx_operand_busy(err))) {
+               /*
+                * The second retry is expected to succeed after kicking off all
+                * other vCPUs and prevent them from invoking TDH.VP.ENTER.
+                */
+               tdx_no_vcpus_enter_start(kvm);
+               err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+                                         &level_state);
+               tdx_no_vcpus_enter_stop(kvm);
+       }
+
+       if (KVM_BUG_ON(err, kvm)) {
+               pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
+               return -EIO;
+       }
+
+       err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
+       if (KVM_BUG_ON(err, kvm)) {
+               pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
+               return -EIO;
+       }
+
+       tdx_quirk_reset_page(page);
+       return 0;
 }
 
 void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,