--- /dev/null
+From foo@baz Mon Aug 16 09:25:03 PM CEST 2021
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 16 Aug 2021 16:02:36 +0200
+Subject: KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
+To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
+Cc: stable@vger.kernel.org, Maxim Levitsky <mlevitsk@redhat.com>
+Message-ID: <20210816140240.11399-8-pbonzini@redhat.com>
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ upstream commit c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc ]
+
+If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable
+Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor),
+then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only
+possible by making L0 intercept these instructions.
+
+Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted,
+and thus read/write portions of the host physical memory.
+
+Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature")
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -389,6 +389,9 @@ static void recalc_intercepts(struct vcp
+ c->intercept_dr = h->intercept_dr | g->intercept_dr;
+ c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+ c->intercept = h->intercept | g->intercept;
++
++ c->intercept |= (1ULL << INTERCEPT_VMLOAD);
++ c->intercept |= (1ULL << INTERCEPT_VMSAVE);
+ }
+
+ static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
--- /dev/null
+From foo@baz Mon Aug 16 09:25:03 PM CEST 2021
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 16 Aug 2021 16:02:29 +0200
+Subject: KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
+To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
+Cc: stable@vger.kernel.org, Maxim Levitsky <mlevitsk@redhat.com>
+Message-ID: <20210816140240.11399-1-pbonzini@redhat.com>
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ upstream commit 0f923e07124df069ba68d8bb12324398f4b6b709 ]
+
+* Invert the mask of bits that we pick from L2 in
+ nested_vmcb02_prepare_control
+
+* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
+
+This fixes a security issue that allowed a malicious L1 to run L2 with
+AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
+AVIC to read/write the host physical memory at some offsets.
+
+Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/svm.h | 2 ++
+ arch/x86/kvm/svm.c | 15 ++++++++-------
+ 2 files changed, 10 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -117,6 +117,8 @@ struct __attribute__ ((__packed__)) vmcb
+ #define V_IGN_TPR_SHIFT 20
+ #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
++#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
++
+ #define V_INTR_MASKING_SHIFT 24
+ #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1211,12 +1211,7 @@ static __init int svm_hardware_setup(voi
+ }
+ }
+
+- if (vgif) {
+- if (!boot_cpu_has(X86_FEATURE_VGIF))
+- vgif = false;
+- else
+- pr_info("Virtual GIF supported\n");
+- }
++ vgif = false; /* Disabled for CVE-2021-3653 */
+
+ return 0;
+
+@@ -3164,7 +3159,13 @@ static bool nested_svm_vmrun(struct vcpu
+ svm->nested.intercept = nested_vmcb->control.intercept;
+
+ svm_flush_tlb(&svm->vcpu, true);
+- svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
++
++ svm->vmcb->control.int_ctl &=
++ V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
++
++ svm->vmcb->control.int_ctl |= nested_vmcb->control.int_ctl &
++ (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK);
++
+ if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
+ svm->vcpu.arch.hflags |= HF_VINTR_MASK;
+ else