--- /dev/null
+From a3ba26ecfb569f4aa3f867e80c02aa65f20aadad Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 9 Apr 2021 09:38:42 -0500
+Subject: KVM: SVM: Make sure GHCB is mapped before updating
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit a3ba26ecfb569f4aa3f867e80c02aa65f20aadad upstream.
+
+Access to the GHCB is mainly in the VMGEXIT path and it is known that the
+GHCB will be mapped. But there are two paths where it is possible the GHCB
+might not be mapped.
+
+The sev_vcpu_deliver_sipi_vector() routine will update the GHCB to inform
+the caller of the AP Reset Hold NAE event that a SIPI has been delivered.
+However, if a SIPI is performed without a corresponding AP Reset Hold,
+then the GHCB might not be mapped (depending on the previous VMEXIT),
+which will result in a NULL pointer dereference.
+
+The svm_complete_emulated_msr() routine will update the GHCB to inform
+the caller of a RDMSR/WRMSR operation about any errors. While it is likely
+that the GHCB will be mapped in this situation, add a safe guard
+in this path to be certain a NULL pointer dereference is not encountered.
+
+Fixes: f1c6366e3043 ("KVM: SVM: Add required changes to support intercepts under SEV-ES")
+Fixes: 647daca25d24 ("KVM: SVM: Add support for booting APs in an SEV-ES guest")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: stable@vger.kernel.org
+Message-Id: <a5d3ebb600a91170fc88599d5a575452b3e31036.1617979121.git.thomas.lendacky@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c | 3 +++
+ arch/x86/kvm/svm/svm.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2106,5 +2106,8 @@ void sev_vcpu_deliver_sipi_vector(struct
+ * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+ * non-zero value.
+ */
++ if (!svm->ghcb)
++ return;
++
+ ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ }
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2811,7 +2811,7 @@ static int svm_get_msr(struct kvm_vcpu *
+ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+- if (!sev_es_guest(svm->vcpu.kvm) || !err)
++ if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
+ return kvm_complete_insn_gp(&svm->vcpu, err);
+
+ ghcb_set_sw_exit_info_1(svm->ghcb, 1);
--- /dev/null
+From a217a6593cec8b315d4c2f344bae33660b39b703 Mon Sep 17 00:00:00 2001
+From: Lai Jiangshan <laijs@linux.alibaba.com>
+Date: Tue, 4 May 2021 21:50:14 +0200
+Subject: KVM/VMX: Invoke NMI non-IST entry instead of IST entry
+
+From: Lai Jiangshan <laijs@linux.alibaba.com>
+
+commit a217a6593cec8b315d4c2f344bae33660b39b703 upstream.
+
+In VMX, the host NMI handler needs to be invoked after NMI VM-Exit.
+Before commit 1a5488ef0dcf6 ("KVM: VMX: Invoke NMI handler via indirect
+call instead of INTn"), this was done by INTn ("int $2"). But INTn
+microcode is relatively expensive, so the commit reworked NMI VM-Exit
+handling to invoke the kernel handler by function call.
+
+But this missed a detail. The NMI entry point for direct invocation is
+fetched from the IDT table and called on the kernel stack. But on 64-bit
+the NMI entry installed in the IDT expects to be invoked on the IST stack.
+It relies on the "NMI executing" variable on the IST stack to work
+correctly, which is at a fixed position in the IST stack. When the entry
+point is unexpectedly called on the kernel stack, the RSP-addressed "NMI
+executing" variable is obviously also on the kernel stack and is
+"uninitialized" and can cause the NMI entry code to run in the wrong way.
+
+Provide a non-ist entry point for VMX which shares the C-function with
+the regular NMI entry and invoke the new asm entry point instead.
+
+On 32-bit this just maps to the regular NMI entry point as 32-bit has no
+ISTs and is not affected.
+
+[ tglx: Made it independent for backporting, massaged changelog ]
+
+Fixes: 1a5488ef0dcf6 ("KVM: VMX: Invoke NMI handler via indirect call instead of INTn")
+Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Lai Jiangshan <laijs@linux.alibaba.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/87r1imi8i1.ffs@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/idtentry.h | 15 +++++++++++++++
+ arch/x86/kernel/nmi.c | 10 ++++++++++
+ arch/x86/kvm/vmx/vmx.c | 16 +++++++++-------
+ 3 files changed, 34 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, xenpv_
+ #endif
+
+ /* NMI */
++
++#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
++/*
++ * Special NOIST entry point for VMX which invokes this on the kernel
++ * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
++ * 'executing' marker.
++ *
++ * On 32bit this just uses the regular NMI entry point because 32-bit does
++ * not have ISTs.
++ */
++DECLARE_IDTENTRY(X86_TRAP_NMI, exc_nmi_noist);
++#else
++#define asm_exc_nmi_noist asm_exc_nmi
++#endif
++
+ DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
+ #ifdef CONFIG_XEN_PV
+ DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -524,6 +524,16 @@ nmi_restart:
+ mds_user_clear_cpu_buffers();
+ }
+
++#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
++DEFINE_IDTENTRY_RAW(exc_nmi_noist)
++{
++ exc_nmi(regs);
++}
++#endif
++#if IS_MODULE(CONFIG_KVM_INTEL)
++EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
++#endif
++
+ void stop_nmi(void)
+ {
+ ignore_nmis++;
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -36,6 +36,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/desc.h>
+ #include <asm/fpu/internal.h>
++#include <asm/idtentry.h>
+ #include <asm/io.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/kexec.h>
+@@ -6394,18 +6395,17 @@ static void vmx_apicv_post_state_restore
+
+ void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
+
+-static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
++static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
++ unsigned long entry)
+ {
+- unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
+- gate_desc *desc = (gate_desc *)host_idt_base + vector;
+-
+ kvm_before_interrupt(vcpu);
+- vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
++ vmx_do_interrupt_nmi_irqoff(entry);
+ kvm_after_interrupt(vcpu);
+ }
+
+ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
+ {
++ const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
+ u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
+
+ /* if exit due to PF check for async PF */
+@@ -6416,18 +6416,20 @@ static void handle_exception_nmi_irqoff(
+ kvm_machine_check();
+ /* We need to handle NMIs before interrupts are enabled */
+ else if (is_nmi(intr_info))
+- handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
++ handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
+ }
+
+ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
+ {
+ u32 intr_info = vmx_get_intr_info(vcpu);
++ unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
++ gate_desc *desc = (gate_desc *)host_idt_base + vector;
+
+ if (WARN_ONCE(!is_external_intr(intr_info),
+ "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
+ return;
+
+- handle_interrupt_nmi_irqoff(vcpu, intr_info);
++ handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
+ }
+
+ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)