]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - arch/x86/kvm/svm/svm.c
Merge tag 'kvm-x86-svm-6.8' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / arch / x86 / kvm / svm / svm.c
index 7121463123584c5d382da47ba3a7f34a67eba706..2171b0cda8d4eea075e39f454eea45bd1af72e2c 100644 (file)
@@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
        { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
        { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
        { .index = MSR_IA32_LASTINTTOIP,                .always = false },
+       { .index = MSR_IA32_XSS,                        .always = false },
        { .index = MSR_EFER,                            .always = false },
        { .index = MSR_IA32_CR_PAT,                     .always = false },
        { .index = MSR_AMD64_SEV_ES_GHCB,               .always = true  },
@@ -1855,15 +1856,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        bool old_paging = is_paging(vcpu);
 
 #ifdef CONFIG_X86_64
-       if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
+       if (vcpu->arch.efer & EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
                        vcpu->arch.efer |= EFER_LMA;
-                       svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
+                       if (!vcpu->arch.guest_state_protected)
+                               svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
                }
 
                if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
                        vcpu->arch.efer &= ~EFER_LMA;
-                       svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
+                       if (!vcpu->arch.guest_state_protected)
+                               svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
                }
        }
 #endif
@@ -3560,8 +3563,15 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
        if (svm->nmi_l1_to_l2)
                return;
 
-       svm->nmi_masked = true;
-       svm_set_iret_intercept(svm);
+       /*
+        * No need to manually track NMI masking when vNMI is enabled, hardware
+        * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
+        * case where software directly injects an NMI.
+        */
+       if (!is_vnmi_enabled(svm)) {
+               svm->nmi_masked = true;
+               svm_set_iret_intercept(svm);
+       }
        ++vcpu->stat.nmi_injections;
 }
 
@@ -5076,6 +5086,13 @@ static __init void svm_set_cpu_caps(void)
                kvm_cpu_cap_set(X86_FEATURE_SVM);
                kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
 
+               /*
+                * KVM currently flushes TLBs on *every* nested SVM transition,
+                * and so for all intents and purposes KVM supports flushing by
+                * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
+                */
+               kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);
+
                if (nrips)
                        kvm_cpu_cap_set(X86_FEATURE_NRIPS);