]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: nSVM: Restrict mapping vmcb12 on nested VMRUN
authorYosry Ahmed <yosry@kernel.org>
Tue, 3 Mar 2026 00:34:15 +0000 (00:34 +0000)
committerSean Christopherson <seanjc@google.com>
Thu, 5 Mar 2026 00:09:05 +0000 (16:09 -0800)
All accesses to the vmcb12 in the guest memory on nested VMRUN are
limited to nested_svm_vmrun() copying vmcb12 fields and writing them on
failed consistency checks. However, vmcb12 remains mapped throughout
nested_svm_vmrun().  Mapping and unmapping around usages is possible,
but it becomes easy-ish to introduce bugs where 'vmcb12' is used after
being unmapped.

Move reading the vmcb12, copying to cache, and consistency checks from
nested_svm_vmrun() into a new helper, nested_svm_copy_vmcb12_to_cache()
to limit the scope of the mapping.

Signed-off-by: Yosry Ahmed <yosry@kernel.org>
Link: https://patch.msgid.link/20260303003421.2185681-22-yosry@kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/nested.c

index 0c3f2db6ac0bc23f83eddc2bf55530e9a0e3f32d..c61b4923963e4a1f0a013297fa51c68fa683fd3f 100644 (file)
@@ -1041,12 +1041,39 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, bool from_vmrun)
        return 0;
 }
 
-int nested_svm_vmrun(struct kvm_vcpu *vcpu)
+static int nested_svm_copy_vmcb12_to_cache(struct kvm_vcpu *vcpu, u64 vmcb12_gpa)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       int ret;
-       struct vmcb *vmcb12;
        struct kvm_host_map map;
+       struct vmcb *vmcb12;
+       int r = 0;
+
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map))
+               return -EFAULT;
+
+       vmcb12 = map.hva;
+       nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
+       nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
+
+       if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
+           !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
+               vmcb12->control.exit_code = SVM_EXIT_ERR;
+               vmcb12->control.exit_info_1 = 0;
+               vmcb12->control.exit_info_2 = 0;
+               vmcb12->control.event_inj = 0;
+               vmcb12->control.event_inj_err = 0;
+               svm_set_gif(svm, false);
+               r = -EINVAL;
+       }
+
+       kvm_vcpu_unmap(vcpu, &map);
+       return r;
+}
+
+int nested_svm_vmrun(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int ret, err;
        u64 vmcb12_gpa;
        struct vmcb *vmcb01 = svm->vmcb01.ptr;
 
@@ -1067,32 +1094,23 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
                return ret;
        }
 
+       if (WARN_ON_ONCE(!svm->nested.initialized))
+               return -EINVAL;
+
        vmcb12_gpa = svm->vmcb->save.rax;
-       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) {
+       err = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
+       if (err == -EFAULT) {
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
 
+       /*
+        * Advance RIP if #GP or #UD are not injected, but otherwise stop if
+        * copying and checking vmcb12 failed.
+        */
        ret = kvm_skip_emulated_instruction(vcpu);
-
-       vmcb12 = map.hva;
-
-       if (WARN_ON_ONCE(!svm->nested.initialized))
-               return -EINVAL;
-
-       nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
-       nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
-
-       if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
-           !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
-               vmcb12->control.exit_code    = SVM_EXIT_ERR;
-               vmcb12->control.exit_info_1  = 0;
-               vmcb12->control.exit_info_2  = 0;
-               vmcb12->control.event_inj = 0;
-               vmcb12->control.event_inj_err = 0;
-               svm_set_gif(svm, false);
-               goto out;
-       }
+       if (err)
+               return ret;
 
        /*
         * Since vmcb01 is not in use, we can use it to store some of the L1
@@ -1109,23 +1127,18 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
 
        svm->nested.nested_run_pending = 1;
 
-       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true))
-               goto out_exit_err;
-
-       if (nested_svm_merge_msrpm(vcpu))
-               goto out;
-
-out_exit_err:
-       svm->nested.nested_run_pending = 0;
-
-       svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
-       svm->vmcb->control.exit_info_1  = 0;
-       svm->vmcb->control.exit_info_2  = 0;
+       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true) ||
+           !nested_svm_merge_msrpm(vcpu)) {
+               svm->nested.nested_run_pending = 0;
+               svm->nmi_l1_to_l2 = false;
+               svm->soft_int_injected = false;
 
-       nested_svm_vmexit(svm);
+               svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
+               svm->vmcb->control.exit_info_1  = 0;
+               svm->vmcb->control.exit_info_2  = 0;
 
-out:
-       kvm_vcpu_unmap(vcpu, &map);
+               nested_svm_vmexit(svm);
+       }
 
        return ret;
 }