--- /dev/null
+From 9f46c187e2e680ecd9de7983e4d081c3391acc76 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 20 May 2022 13:48:11 -0400
+Subject: KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9f46c187e2e680ecd9de7983e4d081c3391acc76 upstream.
+
+With shadow paging enabled, the INVPCID instruction results in a call
+to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
+invlpg callback is not set and the result is a NULL pointer dereference.
+Fix it trivially by checking for mmu->invlpg before every call.
+
+There are other possibilities:
+
+- check for CR0.PG, because KVM (like all Intel processors after P5)
+ flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
+ nop with paging disabled
+
+- check for EFER.LMA, because KVM syncs and flushes when switching
+ MMU contexts outside of 64-bit mode
+
+All of these are tricky, go for the simple solution. This is CVE-2022-1789.
+
+Reported-by: Yongkang Jia <kangel@zju.edu.cn>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[fix conflict due to missing b9e5603c2a3accbadfec570ac501a54431a6bdba]
+Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5178,14 +5178,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu
+ uint i;
+
+ if (pcid == kvm_get_active_pcid(vcpu)) {
+- mmu->invlpg(vcpu, gva, mmu->root_hpa);
++ if (mmu->invlpg)
++ mmu->invlpg(vcpu, gva, mmu->root_hpa);
+ tlb_flush = true;
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
+ pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
+- mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
++ if (mmu->invlpg)
++ mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+ tlb_flush = true;
+ }
+ }
--- /dev/null
+From 2f15d027c05fac406decdb5eceb9ec0902b68f53 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Thu, 22 Apr 2021 11:29:48 +0200
+Subject: KVM: x86: Properly handle APF vs disabled LAPIC situation
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit 2f15d027c05fac406decdb5eceb9ec0902b68f53 upstream.
+
+Async PF 'page ready' event may happen when LAPIC is (temporary) disabled.
+In particular, Sebastien reports that when Linux kernel is directly booted
+by Cloud Hypervisor, LAPIC is 'software disabled' when APF mechanism is
+initialized. On initialization KVM tries to inject 'wakeup all' event and
+puts the corresponding token to the slot. It is, however, failing to inject
+an interrupt (kvm_apic_set_irq() -> __apic_accept_irq() -> !apic_enabled())
+so the guest never gets notified and the whole APF mechanism gets stuck.
+The same issue is likely to happen if the guest temporary disables LAPIC
+and a previously unavailable page becomes available.
+
+Do two things to resolve the issue:
+- Avoid dequeuing 'page ready' events from APF queue when LAPIC is
+ disabled.
+- Trigger an attempt to deliver pending 'page ready' events when LAPIC
+ becomes enabled (SPIV or MSR_IA32_APICBASE).
+
+Reported-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20210422092948.568327-1-vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[Guoqing: backport to 5.10-stable ]
+Signed-off-by: Guoqing Jiang <guoqing.jiang@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c | 6 ++++++
+ arch/x86/kvm/x86.c | 2 +-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -297,6 +297,10 @@ static inline void apic_set_spiv(struct
+
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
+ }
++
++ /* Check if there are APF page ready requests pending */
++ if (enabled)
++ kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
+ }
+
+ static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
+@@ -2260,6 +2264,8 @@ void kvm_lapic_set_base(struct kvm_vcpu
+ if (value & MSR_IA32_APICBASE_ENABLE) {
+ kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
+ static_key_slow_dec_deferred(&apic_hw_disabled);
++ /* Check if there are APF page ready requests pending */
++ kvm_make_request(KVM_REQ_APF_READY, vcpu);
+ } else {
+ static_key_slow_inc(&apic_hw_disabled.key);
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11146,7 +11146,7 @@ bool kvm_arch_can_dequeue_async_page_pre
+ if (!kvm_pv_async_pf_enabled(vcpu))
+ return true;
+ else
+- return apf_pageready_slot_free(vcpu);
++ return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
+ }
+
+ void kvm_arch_start_assignment(struct kvm *kvm)