]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: nVMX: Sync all PGDs on nested transition with shadow paging
authorSean Christopherson <seanjc@google.com>
Wed, 9 Jun 2021 23:42:21 +0000 (16:42 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Jul 2021 14:59:36 +0000 (16:59 +0200)
[ Upstream commit 07ffaf343e34b555c9e7ea39a9c81c439a706f13 ]

Trigger a full TLB flush on behalf of the guest on nested VM-Enter and
VM-Exit when VPID is disabled for L2.  kvm_mmu_new_pgd() syncs only the
current PGD, which can theoretically leave stale, unsync'd entries in a
previous guest PGD, which could be consumed if L2 is allowed to load CR3
with PCID_NOFLUSH=1.

Rename KVM_REQ_HV_TLB_FLUSH to KVM_REQ_TLB_FLUSH_GUEST so that it can
be utilized for its obvious purpose of emulating a guest TLB flush.

Note, there is no change the actual TLB flush executed by KVM, even
though the fast PGD switch uses KVM_REQ_TLB_FLUSH_CURRENT.  When VPID is
disabled for L2, vpid02 is guaranteed to be '0', and thus
nested_get_vpid02() will return the VPID that is shared by L1 and L2.

Generate the request outside of kvm_mmu_new_pgd(), as getting the common
helper to correctly identify which requested is needed is quite painful.
E.g. using KVM_REQ_TLB_FLUSH_GUEST when nested EPT is in play is wrong as
a TLB flush from the L1 kernel's perspective does not invalidate EPT
mappings.  And, by using KVM_REQ_TLB_FLUSH_GUEST, nVMX can do future
simplification by moving the logic into nested_vmx_transition_tlb_flush().

Fixes: 41fab65e7c44 ("KVM: nVMX: Skip MMU sync on nested VMX transition when possible")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210609234235.1244004-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index 0702adf2460b0cf00e742613cbabeb90801da95f..0758ff3008c6dcbfcfe75dc25c6aaebe69970047 100644 (file)
@@ -85,7 +85,7 @@
 #define KVM_REQ_APICV_UPDATE \
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_TLB_FLUSH_CURRENT      KVM_ARCH_REQ(26)
-#define KVM_REQ_HV_TLB_FLUSH \
+#define KVM_REQ_TLB_FLUSH_GUEST \
        KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_APF_READY              KVM_ARCH_REQ(28)
 #define KVM_REQ_MSR_FILTER_CHANGED     KVM_ARCH_REQ(29)
index f00830e5202fec90f68d2206bd3f8609490a4e3a..fdd1eca717fd64f0faee5d5e9a211313f7c2fd6d 100644 (file)
@@ -1704,7 +1704,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
         * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
         * analyze it here, flush TLB regardless of the specified address space.
         */
-       kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
+       kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
                                    NULL, vcpu_mask, &hv_vcpu->tlb_flush);
 
 ret_success:
index 8cb5a95e0c54285a498abff14777a2d1a6bce625..eca3db08d1831f313f7d8f0616076d4d6bf734dc 100644 (file)
@@ -1132,12 +1132,19 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
 
        /*
         * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
-        * flushes are handled by nested_vmx_transition_tlb_flush().  See
-        * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
+        * flushes are handled by nested_vmx_transition_tlb_flush().
         */
-       if (!nested_ept)
-               kvm_mmu_new_pgd(vcpu, cr3, true,
-                               !nested_vmx_transition_mmu_sync(vcpu));
+       if (!nested_ept) {
+               kvm_mmu_new_pgd(vcpu, cr3, true, true);
+
+               /*
+                * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
+                * across all PCIDs, i.e. all PGDs need to be synchronized.
+                * See nested_vmx_transition_mmu_sync() for more details.
+                */
+               if (nested_vmx_transition_mmu_sync(vcpu))
+                       kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+       }
 
        vcpu->arch.cr3 = cr3;
        kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
index d46a6182d0e91ca7a2584e44942d130ff5b0d4c4..615dd236e84292e0aeacfb0dd20f2a1f074963ee 100644 (file)
@@ -9022,7 +9022,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                }
                if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
                        kvm_vcpu_flush_tlb_current(vcpu);
-               if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+               if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
                        kvm_vcpu_flush_tlb_guest(vcpu);
 
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {