]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Drop per-VM zapped_obsolete_pages list
authorVipin Sharma <vipinsh@google.com>
Fri, 1 Nov 2024 20:14:37 +0000 (13:14 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 5 Nov 2024 03:22:53 +0000 (19:22 -0800)
Drop the per-VM zapped_obsolete_pages list now that the usage from the
defunct mmu_shrinker is gone, and instead use a local list to track pages
in kvm_zap_obsolete_pages(), the sole remaining user of
zapped_obsolete_pages.

Opportunistically add an assertion to verify and document that slots_lock
must be held, i.e. that there can only be one active instance of
kvm_zap_obsolete_pages() at any given time, and by doing so also prove
that using a local list instead of a per-VM list doesn't change any
functionality (beyond trivialities like list initialization).

Signed-off-by: Vipin Sharma <vipinsh@google.com>
Link: https://lore.kernel.org/r/20241101201437.1604321-2-vipinsh@google.com
[sean: split to separate patch, write changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c

index aa8007bc520aae46dae50c7d2c871580eecd50d8..433e65974e2beca9cc72f225721760fddaa5c514 100644 (file)
@@ -1306,7 +1306,6 @@ struct kvm_arch {
        bool pre_fault_allowed;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        struct list_head active_mmu_pages;
-       struct list_head zapped_obsolete_pages;
        /*
         * A list of kvm_mmu_page structs that, if zapped, could possibly be
         * replaced by an NX huge page.  A shadow page is on this list if its
index 8919ca104374c4bf4f0a8ed78bab46b1db95dfaa..38128e7b9af1fb72a8001fb2a8c56ec3abe56c9c 100644 (file)
@@ -6367,8 +6367,11 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
        int nr_zapped, batch = 0;
+       LIST_HEAD(invalid_list);
        bool unstable;
 
+       lockdep_assert_held(&kvm->slots_lock);
+
 restart:
        list_for_each_entry_safe_reverse(sp, node,
              &kvm->arch.active_mmu_pages, link) {
@@ -6400,7 +6403,7 @@ restart:
                }
 
                unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
-                               &kvm->arch.zapped_obsolete_pages, &nr_zapped);
+                               &invalid_list, &nr_zapped);
                batch += nr_zapped;
 
                if (unstable)
@@ -6416,7 +6419,7 @@ restart:
         * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
         * running with an obsolete MMU.
         */
-       kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
 }
 
 /*
@@ -6483,7 +6486,6 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 {
        kvm->arch.shadow_mmio_value = shadow_mmio_value;
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
        INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
        spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);