]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: nSVM: Avoid incorrect injection of SVM_EXIT_CR0_SEL_WRITE
authorYosry Ahmed <yosry.ahmed@linux.dev>
Fri, 24 Oct 2025 19:29:18 +0000 (19:29 +0000)
committerSean Christopherson <seanjc@google.com>
Wed, 5 Nov 2025 21:33:46 +0000 (13:33 -0800)
When emulating L2 instructions, svm_check_intercept() checks whether a
write to CR0 should trigger a synthesized #VMEXIT with
SVM_EXIT_CR0_SEL_WRITE. However, it does not check whether L1 enabled
the intercept for SVM_EXIT_WRITE_CR0, which has higher priority
according to the APM (24593—Rev.  3.42—March 2024, Table 15-7):

  When both selective and non-selective CR0-write intercepts are active at
  the same time, the non-selective intercept takes priority. With respect
  to exceptions, the priority of this intercept is the same as the generic
  CR0-write intercept.

Make sure L1 does NOT intercept SVM_EXIT_WRITE_CR0 before checking if
SVM_EXIT_CR0_SEL_WRITE needs to be injected.

Opportunistically tweak the "not CR0" logic to explicitly bail early so
that it's more obvious that only CR0 has a selective intercept, and that
modifying icpt_info.exit_code is functionally necessary so that the call
to nested_svm_exit_handled() checks the correct exit code.

Fixes: cfec82cb7d31 ("KVM: SVM: Add intercept check for emulated cr accesses")
Cc: stable@vger.kernel.org
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251024192918.3191141-4-yosry.ahmed@linux.dev
[sean: isolate non-CR0 write logic, tweak comments accordingly]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/svm.c

index bd8df212a59d38c57bbe5f817f024964590fd9c5..1ae7b3c5a7c554573f19c5ca939ce13a2859d1db 100644 (file)
@@ -4535,15 +4535,29 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
        case SVM_EXIT_WRITE_CR0: {
                unsigned long cr0, val;
 
-               if (info->intercept == x86_intercept_cr_write)
+               /*
+                * Adjust the exit code accordingly if a CR other than CR0 is
+                * being written, and skip straight to the common handling as
+                * only CR0 has an additional selective intercept.
+                */
+               if (info->intercept == x86_intercept_cr_write && info->modrm_reg) {
                        icpt_info.exit_code += info->modrm_reg;
+                       break;
+               }
 
-               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
-                   info->intercept == x86_intercept_clts)
+               /*
+                * Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a
+                * selective CR0 intercept is triggered (the common logic will
+                * treat the selective intercept as being enabled).  Note, the
+                * unconditional intercept has higher priority, i.e. this is
+                * only relevant if *only* the selective intercept is enabled.
+                */
+               if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_CR0_WRITE) ||
+                   !(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))
                        break;
 
-               if (!(vmcb12_is_intercept(&svm->nested.ctl,
-                                       INTERCEPT_SELECTIVE_CR0)))
+               /* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */
+               if (info->intercept == x86_intercept_clts)
                        break;
 
                /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */