]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - arch/x86/kvm/mmu.c
Revert "KVM: MMU: fast invalidate all pages"
[thirdparty/linux.git] / arch / x86 / kvm / mmu.c
index fa153d771f4759b0862fb742cb154558a2667c82..6d602d4c3ca4abdbbe35fe2667208cc0149665e6 100644 (file)
@@ -2060,12 +2060,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
        if (!direct)
                sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-
-       /*
-        * The active_mmu_pages list is the FIFO list, do not move the
-        * page until it is zapped. kvm_zap_obsolete_pages depends on
-        * this feature. See the comments in kvm_zap_obsolete_pages().
-        */
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
        kvm_mod_used_mmu_pages(vcpu->kvm, +1);
        return sp;
@@ -2214,7 +2208,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 #define for_each_valid_sp(_kvm, _sp, _gfn)                             \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
-               if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
+               if ((_sp)->role.invalid) {    \
                } else
 
 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                        \
@@ -2266,11 +2260,6 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
 static void mmu_audit_disable(void) { }
 #endif
 
-static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-       return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
-}
-
 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
 {
@@ -2495,7 +2484,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
        }
-       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        clear_page(sp->spt);
        trace_kvm_mmu_get_page(sp, true);
 
@@ -4206,14 +4194,6 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
                        return false;
 
                if (cached_root_available(vcpu, new_cr3, new_role)) {
-                       /*
-                        * It is possible that the cached previous root page is
-                        * obsolete because of a change in the MMU
-                        * generation number. However, that is accompanied by
-                        * KVM_REQ_MMU_RELOAD, which will free the root that we
-                        * have set here and allocate a new one.
-                        */
-
                        kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
                        if (!skip_tlb_flush) {
                                kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -5865,82 +5845,6 @@ restart:
        spin_unlock(&kvm->mmu_lock);
 }
 
-static void kvm_zap_obsolete_pages(struct kvm *kvm)
-{
-       struct kvm_mmu_page *sp, *node;
-       LIST_HEAD(invalid_list);
-
-restart:
-       list_for_each_entry_safe_reverse(sp, node,
-             &kvm->arch.active_mmu_pages, link) {
-               /*
-                * No obsolete page exists before new created page since
-                * active_mmu_pages is the FIFO list.
-                */
-               if (!is_obsolete_sp(kvm, sp))
-                       break;
-
-               /*
-                * Do not repeatedly zap a root page to avoid unnecessary
-                * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
-                * progress:
-                *    vcpu 0                        vcpu 1
-                *                         call vcpu_enter_guest():
-                *                            1): handle KVM_REQ_MMU_RELOAD
-                *                                and require mmu-lock to
-                *                                load mmu
-                * repeat:
-                *    1): zap root page and
-                *        send KVM_REQ_MMU_RELOAD
-                *
-                *    2): if (cond_resched_lock(mmu-lock))
-                *
-                *                            2): hold mmu-lock and load mmu
-                *
-                *                            3): see KVM_REQ_MMU_RELOAD bit
-                *                                on vcpu->requests is set
-                *                                then return 1 to call
-                *                                vcpu_enter_guest() again.
-                *            goto repeat;
-                *
-                * Since we are reversely walking the list and the invalid
-                * list will be moved to the head, skip the invalid page
-                * can help us to avoid the infinity list walking.
-                */
-               if (sp->role.invalid)
-                       continue;
-
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
-                       cond_resched_lock(&kvm->mmu_lock);
-                       goto restart;
-               }
-
-               if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
-                       goto restart;
-       }
-
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
-}
-
-/*
- * Fast invalidate all shadow pages and use lock-break technique
- * to zap obsolete pages.
- *
- * It's required when memslot is being deleted or VM is being
- * destroyed, in these cases, we should ensure that KVM MMU does
- * not use any resource of the being-deleted slot or all slots
- * after calling the function.
- */
-void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
-{
-       spin_lock(&kvm->mmu_lock);
-       kvm->arch.mmu_valid_gen++;
-
-       kvm_zap_obsolete_pages(kvm);
-       spin_unlock(&kvm->mmu_lock);
-}
-
 static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;