--- /dev/null
+From ee1dfad5325ff1cfb2239e564cd411b3bfe8667a Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Mon, 14 Sep 2020 13:04:19 -0400
+Subject: dm: fix bio splitting and its bio completion order for regular IO
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit ee1dfad5325ff1cfb2239e564cd411b3bfe8667a upstream.
+
+dm_queue_split() is removed because __split_and_process_bio() _must_
+handle splitting bios to ensure proper bio submission and completion
+ordering as a bio is split.
+
+Otherwise, multiple recursive calls to ->submit_bio will cause multiple
+split bios to be allocated from the same ->bio_split mempool at the same
+time. This would result in deadlock in low memory conditions because no
+progress could be made (only one bio is available in ->bio_split
+mempool).
+
+This fix has been verified to still fix the loss of performance, due
+to excess splitting, that commit 120c9257f5f1 provided.
+
+Fixes: 120c9257f5f1 ("Revert "dm: always call blk_queue_split() in dm_process_bio()"")
+Cc: stable@vger.kernel.org # 5.0+, requires custom backport due to 5.9 changes
+Reported-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 23 ++---------------------
+ 1 file changed, 2 insertions(+), 21 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1728,23 +1728,6 @@ out:
+ return ret;
+ }
+
+-static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
+-{
+- unsigned len, sector_count;
+-
+- sector_count = bio_sectors(*bio);
+- len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
+-
+- if (sector_count > len) {
+- struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
+-
+- bio_chain(split, *bio);
+- trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
+- generic_make_request(*bio);
+- *bio = split;
+- }
+-}
+-
+ static blk_qc_t dm_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+ {
+@@ -1772,14 +1755,12 @@ static blk_qc_t dm_process_bio(struct ma
+ if (current->bio_list) {
+ if (is_abnormal_io(bio))
+ blk_queue_split(md->queue, &bio);
+- else
+- dm_queue_split(md, ti, &bio);
++ /* regular IO is split by __split_and_process_bio */
+ }
+
+ if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
+ return __process_bio(md, map, bio, ti);
+- else
+- return __split_and_process_bio(md, map, bio);
++ return __split_and_process_bio(md, map, bio);
+ }
+
+ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
--- /dev/null
+From f3cd4850504ff612d0ea77a0aaf29b66c98fcefe Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 24 Sep 2020 14:55:54 -0600
+Subject: io_uring: ensure open/openat2 name is cleaned on cancelation
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit f3cd4850504ff612d0ea77a0aaf29b66c98fcefe upstream.
+
+If we cancel these requests, we'll leak the memory associated with the
+filename. Add them to the table of ops that need cleaning, if
+REQ_F_NEED_CLEANUP is set.
+
+Cc: stable@vger.kernel.org
+Fixes: e62753e4e292 ("io_uring: call statx directly")
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5254,6 +5254,8 @@ static void io_cleanup_req(struct io_kio
+ break;
+ case IORING_OP_OPENAT:
+ case IORING_OP_OPENAT2:
++ if (req->open.filename)
++ putname(req->open.filename);
+ break;
+ case IORING_OP_SPLICE:
+ case IORING_OP_TEE:
--- /dev/null
+From c4ad98e4b72cb5be30ea282fce935248f2300e62 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 15 Sep 2020 11:42:17 +0100
+Subject: KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit c4ad98e4b72cb5be30ea282fce935248f2300e62 upstream.
+
+KVM currently assumes that an instruction abort can never be a write.
+This is in general true, except when the abort is triggered by
+a S1PTW on instruction fetch that tries to update the S1 page tables
+(to set AF, for example).
+
+This can happen if the page tables have been paged out and brought
+back in without seeing a direct write to them (they are thus marked
+read only), and the fault handling code will make the PT executable(!)
+instead of writable. The guest gets stuck forever.
+
+In these conditions, the permission fault must be considered as
+a write so that the Stage-1 update can take place. This is essentially
+the I-side equivalent of the problem fixed by 60e21a0ef54c ("arm64: KVM:
+Take S1 walks into account when determining S2 write faults").
+
+Update kvm_is_write_fault() to return true on IABT+S1PTW, and introduce
+kvm_vcpu_trap_is_exec_fault() that only return true when no faulting
+on a S1 fault. Additionally, kvm_vcpu_dabt_iss1tw() is renamed to
+kvm_vcpu_abt_iss1tw(), as the above makes it plain that it isn't
+specific to data abort.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Will Deacon <will@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200915104218.1284701-2-maz@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm64/include/asm/kvm_emulate.h | 12 ++++++++++--
+ arch/arm64/kvm/hyp/switch.c | 2 +-
+ arch/arm64/kvm/mmio.c | 2 +-
+ arch/arm64/kvm/mmu.c | 2 +-
+ 4 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -319,7 +319,7 @@ static __always_inline int kvm_vcpu_dabt
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+ }
+
+-static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
++static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
+ {
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+ }
+@@ -327,7 +327,7 @@ static __always_inline bool kvm_vcpu_dab
+ static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+ {
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
++ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
+ }
+
+ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+@@ -356,6 +356,11 @@ static inline bool kvm_vcpu_trap_is_iabt
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+ }
+
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
++{
++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
++}
++
+ static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+ {
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
+@@ -393,6 +398,9 @@ static __always_inline int kvm_vcpu_sys_
+
+ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+ {
++ if (kvm_vcpu_abt_iss1tw(vcpu))
++ return true;
++
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ return false;
+
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -599,7 +599,7 @@ static bool __hyp_text fixup_guest_exit(
+ kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+ kvm_vcpu_dabt_isvalid(vcpu) &&
+ !kvm_vcpu_dabt_isextabt(vcpu) &&
+- !kvm_vcpu_dabt_iss1tw(vcpu);
++ !kvm_vcpu_abt_iss1tw(vcpu);
+
+ if (valid) {
+ int ret = __vgic_v2_perform_cpuif_access(vcpu);
+--- a/arch/arm64/kvm/mmio.c
++++ b/arch/arm64/kvm/mmio.c
+@@ -146,7 +146,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu,
+ }
+
+ /* Page table accesses IO mem: tell guest to fix its TTBR */
+- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
++ if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ return 1;
+ }
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1845,7 +1845,7 @@ static int user_mem_abort(struct kvm_vcp
+ unsigned long vma_pagesize, flags = 0;
+
+ write_fault = kvm_is_write_fault(vcpu);
+- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
++ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ VM_BUG_ON(write_fault && exec_fault);
+
+ if (fault_status == FSC_PERM && !write_fault && !exec_fault) {