]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Revert "KVM: arm64: Split kvm_pgtable_stage2_destroy()"
authorOliver Upton <oliver.upton@linux.dev>
Wed, 10 Sep 2025 18:09:29 +0000 (11:09 -0700)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 10 Sep 2025 18:11:22 +0000 (11:11 -0700)
This reverts commit 0e89ca13ee5ff41b437bb2a003c0eaf34ea43555.

The functional change that depended on this refactoring has been found
to be quite problematic. Reverting the whole pile to start fresh when
new fixes are available.

Message-ID: <20250910180930.3679473-3-oliver.upton@linux.dev>
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/include/asm/kvm_pkvm.h
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/pkvm.c

index 1246216616b518ec2d35992c12a48a5ae4449568..2888b5d037573621c4e126a42f5f21ff9e30b9bd 100644 (file)
@@ -355,11 +355,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
        return pteref;
 }
 
-static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
-{
-       return pteref;
-}
-
 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
 {
        /*
@@ -389,11 +384,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
        return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
 }
 
-static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
-{
-       return rcu_dereference_raw(pteref);
-}
-
 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
 {
        if (walker->flags & KVM_PGTABLE_WALK_SHARED)
@@ -561,26 +551,6 @@ static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2
  */
 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
 
-/**
- * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
- * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
- * @addr:      Intermediate physical address at which to place the mapping.
- * @size:      Size of the mapping.
- *
- * The page-table is assumed to be unreachable by any hardware walkers prior
- * to freeing and therefore no TLB invalidation is performed.
- */
-void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
-                                       u64 addr, u64 size);
-
-/**
- * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
- * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
- *
- * It is assumed that the rest of the page-table is freed before this operation.
- */
-void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
-
 /**
  * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
  * @mm_ops:    Memory management callbacks.
index 35f9d94780048c606c3717cb7a3295f9a695f2e9..ea58282f59bb4fe8841064bfafffe9af3c17b667 100644 (file)
@@ -179,9 +179,7 @@ struct pkvm_mapping {
 
 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
                             struct kvm_pgtable_mm_ops *mm_ops);
-void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
-                                       u64 addr, u64 size);
-void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
+void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
                            enum kvm_pgtable_prot prot, void *mc,
                            enum kvm_pgtable_walk_flags flags);
index c36f282a175dfc336a83ee913726de90194ad5cb..c351b4abd5dbfbcbe738ba0f9efc41a1c99ecc05 100644 (file)
@@ -1551,38 +1551,21 @@ static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
        return 0;
 }
 
-void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
-                                      u64 addr, u64 size)
+void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
 {
+       size_t pgd_sz;
        struct kvm_pgtable_walker walker = {
                .cb     = stage2_free_walker,
                .flags  = KVM_PGTABLE_WALK_LEAF |
                          KVM_PGTABLE_WALK_TABLE_POST,
        };
 
-       WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
-}
-
-void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
-{
-       size_t pgd_sz;
-
+       WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
        pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
-
-       /*
-        * Since the pgtable is unlinked at this point, and not shared with
-        * other walkers, safely deference pgd with kvm_dereference_pteref_raw()
-        */
-       pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
+       pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
        pgt->pgd = NULL;
 }
 
-void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
-{
-       kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
-       kvm_pgtable_stage2_destroy_pgd(pgt);
-}
-
 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
 {
        kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
index 862bfa272f0a97d3801be94c4d748bb61ce3b177..7363942925038eeac208bf0f58cc2f81566db19a 100644 (file)
@@ -904,14 +904,6 @@ static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
        return 0;
 }
 
-static void kvm_stage2_destroy(struct kvm_pgtable *pgt)
-{
-       unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr);
-
-       KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, 0, BIT(ia_bits));
-       KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt);
-}
-
 /**
  * kvm_init_stage2_mmu - Initialise a S2 MMU structure
  * @kvm:       The pointer to the KVM structure
@@ -988,7 +980,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
        return 0;
 
 out_destroy_pgtable:
-       kvm_stage2_destroy(pgt);
+       KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
 out_free_pgtable:
        kfree(pgt);
        return err;
@@ -1089,7 +1081,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
        write_unlock(&kvm->mmu_lock);
 
        if (pgt) {
-               kvm_stage2_destroy(pgt);
+               KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
                kfree(pgt);
        }
 }
index 61827cf6fea4aafa926f71f5f15e50655535a8b5..fcd70bfe44fb8cce247900c77721a202b46563d8 100644 (file)
@@ -316,16 +316,9 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
        return 0;
 }
 
-void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
-                                       u64 addr, u64 size)
+void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
 {
-       __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
-}
-
-void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
-{
-       /* Expected to be called after all pKVM mappings have been released. */
-       WARN_ON_ONCE(!RB_EMPTY_ROOT(&pgt->pkvm_mappings.rb_root));
+       __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
 }
 
 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,