]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 May 2026 08:21:51 +0000 (10:21 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 May 2026 08:21:51 +0000 (10:21 +0200)
added patches:
kvm-svm-check-validity-of-vmcb-controls-when-returning-from-smm.patch

queue-6.12/kvm-svm-check-validity-of-vmcb-controls-when-returning-from-smm.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/kvm-svm-check-validity-of-vmcb-controls-when-returning-from-smm.patch b/queue-6.12/kvm-svm-check-validity-of-vmcb-controls-when-returning-from-smm.patch
new file mode 100644 (file)
index 0000000..34b20df
--- /dev/null
@@ -0,0 +1,76 @@
+From be5fa8737d42c5ba16d2ea72c23681f8abbb07e8 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 9 Mar 2026 12:40:52 +0100
+Subject: KVM: SVM: check validity of VMCB controls when returning from SMM
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit be5fa8737d42c5ba16d2ea72c23681f8abbb07e8 upstream.
+
+The VMCB12 is stored in guest memory and can be mangled while in SMM; it
+is then reloaded by svm_leave_smm(), but it is not checked again for
+validity.
+
+Move the cached vmcb12 control and save consistency checks out of
+svm_set_nested_state() and into a helper, and reuse it in
+svm_leave_smm().
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c |   12 ++++++++++--
+ arch/x86/kvm/svm/svm.c    |    4 ++++
+ arch/x86/kvm/svm/svm.h    |    1 +
+ 3 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -416,6 +416,15 @@ void nested_copy_vmcb_save_to_cache(stru
+       __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
+ }
++int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu)
++{
++      if (!nested_vmcb_check_save(vcpu) ||
++          !nested_vmcb_check_controls(vcpu))
++              return -EINVAL;
++
++      return 0;
++}
++
+ /*
+  * Synchronize fields that are written by the processor, so that
+  * they can be copied back into the vmcb12.
+@@ -883,8 +892,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vc
+       nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
+       nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
+-      if (!nested_vmcb_check_save(vcpu) ||
+-          !nested_vmcb_check_controls(vcpu)) {
++      if (nested_svm_check_cached_vmcb12(vcpu) < 0) {
+               vmcb12->control.exit_code    = SVM_EXIT_ERR;
+               vmcb12->control.exit_code_hi = -1u;
+               vmcb12->control.exit_info_1  = 0;
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4934,6 +4934,10 @@ static int svm_leave_smm(struct kvm_vcpu
+       vmcb12 = map.hva;
+       nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
+       nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
++
++      if (nested_svm_check_cached_vmcb12(vcpu) < 0)
++              goto unmap_save;
++
+       ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
+       if (ret)
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -672,6 +672,7 @@ static inline int nested_svm_simple_vmex
+ int nested_svm_exit_handled(struct vcpu_svm *svm);
+ int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
++int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu);
+ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
+                              bool has_error_code, u32 error_code);
+ int nested_svm_exit_special(struct vcpu_svm *svm);
index b892e62d23778338dec102cd6eb5228c49e6de8f..ad80278e2c5b94adbaeb0126724d1c5b3bf11972 100644 (file)
@@ -20,3 +20,4 @@ iommu-amd-serialize-sequence-allocation-under-concur.patch
 flow_dissector-do-not-dissect-pppoe-pfc-frames.patch
 net-txgbe-fix-rtnl-assertion-warning-when-remove-mod.patch
 net-af_key-zero-aligned-sockaddr-tail-in-pf_key-expo.patch
+kvm-svm-check-validity-of-vmcb-controls-when-returning-from-smm.patch