From: Paolo Bonzini Date: Sat, 14 Sep 2024 13:38:43 +0000 (-0400) Subject: Merge tag 'kvm-x86-mmu-6.12' of https://github.com/kvm-x86/linux into HEAD X-Git-Tag: v6.12-rc1~11^2~4 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5d55a052e369ff0d98a4dbd614635bc931c45d11;p=thirdparty%2Flinux.git Merge tag 'kvm-x86-mmu-6.12' of https://github.com/kvm-x86/linux into HEAD KVM x86 MMU changes for 6.12: - Overhaul the "unprotect and retry" logic to more precisely identify cases where retrying is actually helpful, and to harden all retry paths against putting the guest into an infinite retry loop. - Add support for yielding, e.g. to honor NEED_RESCHED, when zapping rmaps in the shadow MMU. - Refactor pieces of the shadow MMU related to aging SPTEs in prepartion for adding MGLRU support in KVM. - Misc cleanups --- 5d55a052e369ff0d98a4dbd614635bc931c45d11 diff --cc arch/x86/kvm/mmu/mmu.c index b278efb1d179b,b751e7e2a05e4..e081f785fb230 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@@ -6999,43 -7047,6 +7049,35 @@@ void kvm_arch_flush_shadow_all(struct k kvm_mmu_zap_all(kvm); } +/* + * Zapping leaf SPTEs with memslot range when a memslot is moved/deleted. + * + * Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst + * case scenario we'll have unused shadow pages lying around until they + * are recycled due to age or when the VM is destroyed. + */ +static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + struct kvm_gfn_range range = { + .slot = slot, + .start = slot->base_gfn, + .end = slot->base_gfn + slot->npages, + .may_block = true, + }; - bool flush = false; + + write_lock(&kvm->mmu_lock); - - if (kvm_memslots_have_rmaps(kvm)) - flush = kvm_handle_gfn_range(kvm, &range, kvm_zap_rmap); - - if (tdp_mmu_enabled) - flush = kvm_tdp_mmu_unmap_gfn_range(kvm, &range, flush); - - if (flush) ++ if (kvm_unmap_gfn_range(kvm, &range)) + kvm_flush_remote_tlbs_memslot(kvm, slot); + + write_unlock(&kvm->mmu_lock); +} + +static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm) +{ + return kvm->arch.vm_type == KVM_X86_DEFAULT_VM && + kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL); +} + void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {