--- /dev/null
+From c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Mon, 19 Jul 2021 16:05:00 +0300
+Subject: KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc upstream.
+
+If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable
+Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor),
+then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only
+possible by making L0 intercept these instructions.
+
+Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted,
+and thus read/write portions of the host physical memory.
+
+Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature")
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -147,6 +147,9 @@ void recalc_intercepts(struct vcpu_svm *
+
+ for (i = 0; i < MAX_INTERCEPT; i++)
+ c->intercepts[i] |= g->intercepts[i];
++
++ vmcb_set_intercept(c, INTERCEPT_VMLOAD);
++ vmcb_set_intercept(c, INTERCEPT_VMSAVE);
+ }
+
+ static void copy_vmcb_control_area(struct vmcb_control_area *dst,
--- /dev/null
+From 0f923e07124df069ba68d8bb12324398f4b6b709 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 15 Jul 2021 01:56:24 +0300
+Subject: KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 0f923e07124df069ba68d8bb12324398f4b6b709 upstream.
+
+* Invert the mask of bits that we pick from L2 in
+ nested_vmcb02_prepare_control
+
+* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
+
+This fixes a security issue that allowed a malicious L1 to run L2 with
+AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
+AVIC to read/write the host physical memory at some offsets.
+
+Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/svm.h | 2 ++
+ arch/x86/kvm/svm/nested.c | 11 +++++++----
+ arch/x86/kvm/svm/svm.c | 8 ++++----
+ 3 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -166,6 +166,8 @@ struct __attribute__ ((__packed__)) vmcb
+ #define V_IGN_TPR_SHIFT 20
+ #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
++#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
++
+ #define V_INTR_MASKING_SHIFT 24
+ #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -429,7 +429,10 @@ static void nested_prepare_vmcb_save(str
+
+ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
+ {
+- const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
++ const u32 int_ctl_vmcb01_bits =
++ V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
++
++ const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
+
+ if (nested_npt_enabled(svm))
+ nested_svm_init_mmu_context(&svm->vcpu);
+@@ -437,9 +440,9 @@ static void nested_prepare_vmcb_control(
+ svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
+ svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
+
+- svm->vmcb->control.int_ctl =
+- (svm->nested.ctl.int_ctl & ~mask) |
+- (svm->nested.hsave->control.int_ctl & mask);
++ svm->vmcb->control.int_ctl =
++ (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
++ (svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits);
+
+ svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
+ svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1486,17 +1486,17 @@ static void svm_set_vintr(struct vcpu_sv
+
+ static void svm_clear_vintr(struct vcpu_svm *svm)
+ {
+- const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
+ svm_clr_intercept(svm, INTERCEPT_VINTR);
+
+ /* Drop int_ctl fields related to VINTR injection. */
+- svm->vmcb->control.int_ctl &= mask;
++ svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
+ if (is_guest_mode(&svm->vcpu)) {
+- svm->nested.hsave->control.int_ctl &= mask;
++ svm->nested.hsave->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
+
+ WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
+ (svm->nested.ctl.int_ctl & V_TPR_MASK));
+- svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
++ svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
++ V_IRQ_INJECTION_BITS_MASK;
+ }
+
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTR);