]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 16 Aug 2021 17:06:47 +0000 (19:06 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 16 Aug 2021 17:06:47 +0000 (19:06 +0200)
added patches:
kvm-nsvm-always-intercept-vmload-vmsave-when-nested-cve-2021-3656.patch
kvm-nsvm-avoid-picking-up-unsupported-bits-from-l2-in-int_ctl-cve-2021-3653.patch

queue-5.13/kvm-nsvm-always-intercept-vmload-vmsave-when-nested-cve-2021-3656.patch [new file with mode: 0644]
queue-5.13/kvm-nsvm-avoid-picking-up-unsupported-bits-from-l2-in-int_ctl-cve-2021-3653.patch [new file with mode: 0644]
queue-5.13/series

diff --git a/queue-5.13/kvm-nsvm-always-intercept-vmload-vmsave-when-nested-cve-2021-3656.patch b/queue-5.13/kvm-nsvm-always-intercept-vmload-vmsave-when-nested-cve-2021-3656.patch
new file mode 100644 (file)
index 0000000..9b866c5
--- /dev/null
@@ -0,0 +1,39 @@
+From c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Mon, 19 Jul 2021 16:05:00 +0300
+Subject: KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc upstream.
+
+If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable
+Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor),
+then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only
+possible by making L0 intercept these instructions.
+
+Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted,
+and thus read/write portions of the host physical memory.
+
+Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature")
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -149,6 +149,9 @@ void recalc_intercepts(struct vcpu_svm *
+       for (i = 0; i < MAX_INTERCEPT; i++)
+               c->intercepts[i] |= g->intercepts[i];
++
++      vmcb_set_intercept(c, INTERCEPT_VMLOAD);
++      vmcb_set_intercept(c, INTERCEPT_VMSAVE);
+ }
+ static void copy_vmcb_control_area(struct vmcb_control_area *dst,
diff --git a/queue-5.13/kvm-nsvm-avoid-picking-up-unsupported-bits-from-l2-in-int_ctl-cve-2021-3653.patch b/queue-5.13/kvm-nsvm-avoid-picking-up-unsupported-bits-from-l2-in-int_ctl-cve-2021-3653.patch
new file mode 100644 (file)
index 0000000..e49c1c9
--- /dev/null
@@ -0,0 +1,89 @@
+From 0f923e07124df069ba68d8bb12324398f4b6b709 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 15 Jul 2021 01:56:24 +0300
+Subject: KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 0f923e07124df069ba68d8bb12324398f4b6b709 upstream.
+
+* Invert the mask of bits that we pick from L2 in
+  nested_vmcb02_prepare_control
+
+* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
+
+This fixes a security issue that allowed a malicious L1 to run L2 with
+AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
+AVIC to read/write the host physical memory at some offsets.
+
+Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/svm.h |    2 ++
+ arch/x86/kvm/svm/nested.c  |    9 ++++++---
+ arch/x86/kvm/svm/svm.c     |    9 +++++----
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -178,6 +178,8 @@ struct __attribute__ ((__packed__)) vmcb
+ #define V_IGN_TPR_SHIFT 20
+ #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
++#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
++
+ #define V_INTR_MASKING_SHIFT 24
+ #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -480,7 +480,10 @@ static void nested_vmcb02_prepare_save(s
+ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
+ {
+-      const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
++      const u32 int_ctl_vmcb01_bits =
++              V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
++
++      const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
+       /*
+        * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
+@@ -511,8 +514,8 @@ static void nested_vmcb02_prepare_contro
+               svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
+       svm->vmcb->control.int_ctl             =
+-              (svm->nested.ctl.int_ctl & ~mask) |
+-              (svm->vmcb01.ptr->control.int_ctl & mask);
++              (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
++              (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
+       svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
+       svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1552,17 +1552,18 @@ static void svm_set_vintr(struct vcpu_sv
+ static void svm_clear_vintr(struct vcpu_svm *svm)
+ {
+-      const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
+       svm_clr_intercept(svm, INTERCEPT_VINTR);
+       /* Drop int_ctl fields related to VINTR injection.  */
+-      svm->vmcb->control.int_ctl &= mask;
++      svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
+       if (is_guest_mode(&svm->vcpu)) {
+-              svm->vmcb01.ptr->control.int_ctl &= mask;
++              svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
+               WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
+                       (svm->nested.ctl.int_ctl & V_TPR_MASK));
+-              svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
++
++              svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
++                      V_IRQ_INJECTION_BITS_MASK;
+       }
+       vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
index 9a1098485c5213f9c0c57ca749a28a0f6e14393d..b9756ad4b8e61c7ade6dee753dacf21a83664669 100644 (file)
@@ -149,3 +149,5 @@ ceph-add-some-lockdep-assertions-around-snaprealm-handling.patch
 ceph-clean-up-locking-annotation-for-ceph_get_snap_realm-and-__lookup_snap_realm.patch
 ceph-take-snap_empty_lock-atomically-with-snaprealm-refcount-change.patch
 kasan-slub-reset-tag-when-printing-address.patch
+kvm-nsvm-avoid-picking-up-unsupported-bits-from-l2-in-int_ctl-cve-2021-3653.patch
+kvm-nsvm-always-intercept-vmload-vmsave-when-nested-cve-2021-3656.patch