--- /dev/null
+From b5331379bc62611d1026173a09c73573384201d9 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Tue, 11 Aug 2020 11:27:25 +0100
+Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not set
+
+From: Will Deacon <will@kernel.org>
+
+commit b5331379bc62611d1026173a09c73573384201d9 upstream.
+
+When an MMU notifier call results in unmapping a range that spans multiple
+PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary,
+since this avoids running into RCU stalls during VM teardown. Unfortunately,
+if the VM is destroyed as a result of OOM, then blocking is not permitted
+and the call to the scheduler triggers the following BUG():
+
+ | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394
+ | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper
+ | INFO: lockdep is turned off.
+ | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1
+ | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015
+ | Call trace:
+ | dump_backtrace+0x0/0x284
+ | show_stack+0x1c/0x28
+ | dump_stack+0xf0/0x1a4
+ | ___might_sleep+0x2bc/0x2cc
+ | unmap_stage2_range+0x160/0x1ac
+ | kvm_unmap_hva_range+0x1a0/0x1c8
+ | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8
+ | __mmu_notifier_invalidate_range_start+0x218/0x31c
+ | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0
+ | __oom_reap_task_mm+0x128/0x268
+ | oom_reap_task+0xac/0x298
+ | oom_reaper+0x178/0x17c
+ | kthread+0x1e4/0x1fc
+ | ret_from_fork+0x10/0x30
+
+Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we
+only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier
+flags.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd")
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Message-Id: <20200811102725.7121-3-will@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[will: Backport to 4.19; use 'blockable' instead of non-existent MMU_NOTIFIER_RANGE_BLOCKABLE flag]
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/arm/mmu.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
++ bool may_block)
+ {
+ pgd_t *pgd;
+ phys_addr_t addr = start, end = start + size;
+@@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kv
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+- if (next != end)
++ if (may_block && next != end)
+ cond_resched_lock(&kvm->mmu_lock);
+ } while (pgd++, addr = next, addr != end);
+ }
+
++static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
++{
++ __unmap_stage2_range(kvm, start, size, true);
++}
++
+ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+ phys_addr_t addr, phys_addr_t end)
+ {
+@@ -1820,7 +1826,9 @@ static int handle_hva_to_gpa(struct kvm
+
+ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+ {
+- unmap_stage2_range(kvm, gpa, size);
++ bool may_block = *(bool *)data;
++
++ __unmap_stage2_range(kvm, gpa, size, may_block);
+ return 0;
+ }
+
+@@ -1831,7 +1839,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
+ return 0;
+
+ trace_kvm_unmap_hva_range(start, end);
+- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
++ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable);
+ return 0;
+ }
+
--- /dev/null
+From fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Tue, 11 Aug 2020 11:27:24 +0100
+Subject: KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
+
+From: Will Deacon <will@kernel.org>
+
+commit fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 upstream.
+
+The 'flags' field of 'struct mmu_notifier_range' is used to indicate
+whether invalidate_range_{start,end}() are permitted to block. In the
+case of kvm_mmu_notifier_invalidate_range_start(), this field is not
+forwarded on to the architecture-specific implementation of
+kvm_unmap_hva_range() and therefore the backend cannot sensibly decide
+whether or not to block.
+
+Add an extra 'flags' parameter to kvm_unmap_hva_range() so that
+architectures are aware as to whether or not they are permitted to block.
+
+Cc: <stable@vger.kernel.org>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Message-Id: <20200811102725.7121-2-will@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[will: Backport to 4.19; use 'blockable' instead of non-existent range flags]
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_host.h | 2 +-
+ arch/arm64/include/asm/kvm_host.h | 2 +-
+ arch/mips/include/asm/kvm_host.h | 2 +-
+ arch/mips/kvm/mmu.c | 3 ++-
+ arch/powerpc/include/asm/kvm_host.h | 3 ++-
+ arch/powerpc/kvm/book3s.c | 3 ++-
+ arch/powerpc/kvm/e500_mmu_host.c | 3 ++-
+ arch/x86/include/asm/kvm_host.h | 3 ++-
+ arch/x86/kvm/mmu.c | 3 ++-
+ virt/kvm/arm/mmu.c | 2 +-
+ virt/kvm/kvm_main.c | 2 +-
+ 11 files changed, 17 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -234,7 +234,7 @@ int __kvm_arm_vcpu_set_events(struct kvm
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, bool blockable);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -370,7 +370,7 @@ int __kvm_arm_vcpu_set_events(struct kvm
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, bool blockable);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -936,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, bool blockable);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+--- a/arch/mips/kvm/mmu.c
++++ b/arch/mips/kvm/mmu.c
+@@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct
+ return 1;
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -68,7 +68,8 @@
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+
+ extern int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end,
++ bool blockable);
+ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -812,7 +812,8 @@ void kvmppc_core_commit_memory_region(st
+ kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+ }
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -737,7 +737,8 @@ static int kvm_unmap_hva(struct kvm *kvm
+ return 0;
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1465,7 +1465,8 @@ asmlinkage void __noreturn kvm_spurious_
+ ____kvm_handle_fault_on_reboot(insn, "")
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -1956,7 +1956,8 @@ static int kvm_handle_hva(struct kvm *kv
+ return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ }
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1825,7 +1825,7 @@ static int kvm_unmap_hva_handler(struct
+ }
+
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end)
++ unsigned long start, unsigned long end, bool blockable)
+ {
+ if (!kvm->arch.pgd)
+ return 0;
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -410,7 +410,7 @@ static int kvm_mmu_notifier_invalidate_r
+ * count is also read inside the mmu_lock critical section.
+ */
+ kvm->mmu_notifier_count++;
+- need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
++ need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable);
+ need_tlb_flush |= kvm->tlbs_dirty;
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)