]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: nSVM: Propagate SVM_EXIT_CR0_SEL_WRITE correctly for LMSW emulation
authorYosry Ahmed <yosry.ahmed@linux.dev>
Fri, 24 Oct 2025 19:29:17 +0000 (19:29 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 2 Jan 2026 11:57:21 +0000 (12:57 +0100)
commit 5674a76db0213f9db1e4d08e847ff649b46889c0 upstream.

When emulating L2 instructions, svm_check_intercept() checks whether a
write to CR0 should trigger a synthesized #VMEXIT with
SVM_EXIT_CR0_SEL_WRITE. For MOV-to-CR0, SVM_EXIT_CR0_SEL_WRITE is only
triggered if any bit other than CR0.MP and CR0.TS is updated. However,
according to the APM (24593—Rev.  3.42—March 2024, Table 15-7):

  The LMSW instruction treats the selective CR0-write
  intercept as a non-selective intercept (i.e., it intercepts
  regardless of the value being written).

Skip checking the changed bits for x86_intercept_lmsw and always inject
SVM_EXIT_CR0_SEL_WRITE.

Fixes: cfec82cb7d31 ("KVM: SVM: Add intercept check for emulated cr accesses")
Cc: stable@vger.kernel.org
Reported-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251024192918.3191141-3-yosry.ahmed@linux.dev
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/svm/svm.c

index 4154b220fec95da73b086f770cb644ebd1a028aa..a856f063b82502304e75897eb31412851ba60f67 100644 (file)
@@ -4553,20 +4553,20 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                if (info->intercept == x86_intercept_clts)
                        break;
 
-               cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
-               val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
-
+               /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */
                if (info->intercept == x86_intercept_lmsw) {
-                       cr0 &= 0xfUL;
-                       val &= 0xfUL;
-                       /* lmsw can't clear PE - catch this here */
-                       if (cr0 & X86_CR0_PE)
-                               val |= X86_CR0_PE;
+                       icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+                       break;
                }
 
+               /*
+                * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit
+                * other than SVM_CR0_SELECTIVE_MASK is changed.
+                */
+               cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
+               val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
                if (cr0 ^ val)
                        icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
-
                break;
        }
        case SVM_EXIT_READ_DR0: