]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: nSVM: Propagate SVM_EXIT_CR0_SEL_WRITE correctly for LMSW emulation
authorYosry Ahmed <yosry.ahmed@linux.dev>
Fri, 24 Oct 2025 19:29:17 +0000 (19:29 +0000)
committerSean Christopherson <seanjc@google.com>
Wed, 5 Nov 2025 21:32:36 +0000 (13:32 -0800)
When emulating L2 instructions, svm_check_intercept() checks whether a
write to CR0 should trigger a synthesized #VMEXIT with
SVM_EXIT_CR0_SEL_WRITE. For MOV-to-CR0, SVM_EXIT_CR0_SEL_WRITE is only
triggered if any bit other than CR0.MP and CR0.TS is updated. However,
according to the APM (24593—Rev.  3.42—March 2024, Table 15-7):

  The LMSW instruction treats the selective CR0-write
  intercept as a non-selective intercept (i.e., it intercepts
  regardless of the value being written).

Skip checking the changed bits for x86_intercept_lmsw and always inject
SVM_EXIT_CR0_SEL_WRITE.

Fixes: cfec82cb7d31 ("KVM: SVM: Add intercept check for emulated cr accesses")
Cc: stable@vger.kernel.org
Reported-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251024192918.3191141-3-yosry.ahmed@linux.dev
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/svm.c

index f14709a511aa4ed9d2a1f8ae5170e9389791face..bd8df212a59d38c57bbe5f817f024964590fd9c5 100644 (file)
@@ -4546,20 +4546,20 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                                        INTERCEPT_SELECTIVE_CR0)))
                        break;
 
-               cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
-               val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
-
+               /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */
                if (info->intercept == x86_intercept_lmsw) {
-                       cr0 &= 0xfUL;
-                       val &= 0xfUL;
-                       /* lmsw can't clear PE - catch this here */
-                       if (cr0 & X86_CR0_PE)
-                               val |= X86_CR0_PE;
+                       icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+                       break;
                }
 
+               /*
+                * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit
+                * other than SVM_CR0_SELECTIVE_MASK is changed.
+                */
+               cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
+               val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
                if (cr0 ^ val)
                        icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
-
                break;
        }
        case SVM_EXIT_READ_DR0: