From: Greg Kroah-Hartman Date: Mon, 28 Sep 2020 17:55:52 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v4.4.238~23 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=45b87ca1f1bf8ac9ce2d9dc5187e8aebd3902a31;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: dm-fix-bio-splitting-and-its-bio-completion-order-for-regular-io.patch kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch --- diff --git a/queue-5.4/dm-fix-bio-splitting-and-its-bio-completion-order-for-regular-io.patch b/queue-5.4/dm-fix-bio-splitting-and-its-bio-completion-order-for-regular-io.patch new file mode 100644 index 00000000000..61f299d9f46 --- /dev/null +++ b/queue-5.4/dm-fix-bio-splitting-and-its-bio-completion-order-for-regular-io.patch @@ -0,0 +1,75 @@ +From ee1dfad5325ff1cfb2239e564cd411b3bfe8667a Mon Sep 17 00:00:00 2001 +From: Mike Snitzer +Date: Mon, 14 Sep 2020 13:04:19 -0400 +Subject: dm: fix bio splitting and its bio completion order for regular IO + +From: Mike Snitzer + +commit ee1dfad5325ff1cfb2239e564cd411b3bfe8667a upstream. + +dm_queue_split() is removed because __split_and_process_bio() _must_ +handle splitting bios to ensure proper bio submission and completion +ordering as a bio is split. + +Otherwise, multiple recursive calls to ->submit_bio will cause multiple +split bios to be allocated from the same ->bio_split mempool at the same +time. This would result in deadlock in low memory conditions because no +progress could be made (only one bio is available in ->bio_split +mempool). + +This fix has been verified to still fix the loss of performance, due +to excess splitting, that commit 120c9257f5f1 provided. + +Fixes: 120c9257f5f1 ("Revert "dm: always call blk_queue_split() in dm_process_bio()"") +Cc: stable@vger.kernel.org # 5.0+, requires custom backport due to 5.9 changes +Reported-by: Ming Lei +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm.c | 23 ++--------------------- + 1 file changed, 2 insertions(+), 21 deletions(-) + +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1720,23 +1720,6 @@ out: + return ret; + } + +-static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) +-{ +- unsigned len, sector_count; +- +- sector_count = bio_sectors(*bio); +- len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); +- +- if (sector_count > len) { +- struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); +- +- bio_chain(split, *bio); +- trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); +- generic_make_request(*bio); +- *bio = split; +- } +-} +- + static blk_qc_t dm_process_bio(struct mapped_device *md, + struct dm_table *map, struct bio *bio) + { +@@ -1764,14 +1747,12 @@ static blk_qc_t dm_process_bio(struct ma + if (current->bio_list) { + if (is_abnormal_io(bio)) + blk_queue_split(md->queue, &bio); +- else +- dm_queue_split(md, ti, &bio); ++ /* regular IO is split by __split_and_process_bio */ + } + + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) + return __process_bio(md, map, bio, ti); +- else +- return __split_and_process_bio(md, map, bio); ++ return __split_and_process_bio(md, map, bio); + } + + static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) diff --git a/queue-5.4/kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch b/queue-5.4/kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch new file mode 100644 index 00000000000..ed5aa2a9412 --- /dev/null +++ b/queue-5.4/kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch @@ -0,0 +1,154 @@ +From c4ad98e4b72cb5be30ea282fce935248f2300e62 Mon Sep 17 00:00:00 2001 +From: Marc Zyngier +Date: Tue, 15 Sep 2020 11:42:17 +0100 +Subject: KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch + +From: Marc Zyngier + +commit c4ad98e4b72cb5be30ea282fce935248f2300e62 upstream. + +KVM currently assumes that an instruction abort can never be a write. +This is in general true, except when the abort is triggered by +a S1PTW on instruction fetch that tries to update the S1 page tables +(to set AF, for example). + +This can happen if the page tables have been paged out and brought +back in without seeing a direct write to them (they are thus marked +read only), and the fault handling code will make the PT executable(!) +instead of writable. The guest gets stuck forever. + +In these conditions, the permission fault must be considered as +a write so that the Stage-1 update can take place. This is essentially +the I-side equivalent of the problem fixed by 60e21a0ef54c ("arm64: KVM: +Take S1 walks into account when determining S2 write faults"). + +Update kvm_is_write_fault() to return true on IABT+S1PTW, and introduce +kvm_vcpu_trap_is_exec_fault() that only return true when no faulting +on a S1 fault. Additionally, kvm_vcpu_dabt_iss1tw() is renamed to +kvm_vcpu_abt_iss1tw(), as the above makes it plain that it isn't +specific to data abort. + +Signed-off-by: Marc Zyngier +Reviewed-by: Will Deacon +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20200915104218.1284701-2-maz@kernel.org +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/include/asm/kvm_emulate.h | 11 ++++++++--- + arch/arm64/include/asm/kvm_emulate.h | 12 ++++++++++-- + arch/arm64/kvm/hyp/switch.c | 2 +- + virt/kvm/arm/mmio.c | 2 +- + virt/kvm/arm/mmu.c | 2 +- + 5 files changed, 21 insertions(+), 8 deletions(-) + +--- a/arch/arm/include/asm/kvm_emulate.h ++++ b/arch/arm/include/asm/kvm_emulate.h +@@ -204,7 +204,7 @@ static inline int kvm_vcpu_dabt_get_rd(s + return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; + } + +-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) ++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) + { + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; + } +@@ -236,16 +236,21 @@ static inline bool kvm_vcpu_trap_il_is32 + return kvm_vcpu_get_hsr(vcpu) & HSR_IL; + } + +-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) ++static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) + { + return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; + } + +-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) ++static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) + { + return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; + } + ++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) ++{ ++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); ++} ++ + static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) + { + return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; +--- a/arch/arm64/include/asm/kvm_emulate.h ++++ b/arch/arm64/include/asm/kvm_emulate.h +@@ -299,7 +299,7 @@ static inline int kvm_vcpu_dabt_get_rd(c + return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; + } + +-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) ++static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) + { + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); + } +@@ -307,7 +307,7 @@ static inline bool kvm_vcpu_dabt_iss1tw( + static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) + { + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || +- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ ++ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */ + } + + static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) +@@ -336,6 +336,11 @@ static inline bool kvm_vcpu_trap_is_iabt + return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; + } + ++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) ++{ ++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); ++} ++ + static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) + { + return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; +@@ -373,6 +378,9 @@ static inline int kvm_vcpu_sys_get_rt(st + + static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) + { ++ if (kvm_vcpu_abt_iss1tw(vcpu)) ++ return true; ++ + if (kvm_vcpu_trap_is_iabt(vcpu)) + return false; + +--- a/arch/arm64/kvm/hyp/switch.c ++++ b/arch/arm64/kvm/hyp/switch.c +@@ -496,7 +496,7 @@ static bool __hyp_text fixup_guest_exit( + kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && + kvm_vcpu_dabt_isvalid(vcpu) && + !kvm_vcpu_dabt_isextabt(vcpu) && +- !kvm_vcpu_dabt_iss1tw(vcpu); ++ !kvm_vcpu_abt_iss1tw(vcpu); + + if (valid) { + int ret = __vgic_v2_perform_cpuif_access(vcpu); +--- a/virt/kvm/arm/mmio.c ++++ b/virt/kvm/arm/mmio.c +@@ -130,7 +130,7 @@ static int decode_hsr(struct kvm_vcpu *v + bool sign_extend; + bool sixty_four; + +- if (kvm_vcpu_dabt_iss1tw(vcpu)) { ++ if (kvm_vcpu_abt_iss1tw(vcpu)) { + /* page table accesses IO mem: tell guest to fix its TTBR */ + kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + return 1; +--- a/virt/kvm/arm/mmu.c ++++ b/virt/kvm/arm/mmu.c +@@ -1690,7 +1690,7 @@ static int user_mem_abort(struct kvm_vcp + unsigned long vma_pagesize, flags = 0; + + write_fault = kvm_is_write_fault(vcpu); +- exec_fault = kvm_vcpu_trap_is_iabt(vcpu); ++ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); + VM_BUG_ON(write_fault && exec_fault); + + if (fault_status == FSC_PERM && !write_fault && !exec_fault) { diff --git a/queue-5.4/series b/queue-5.4/series index 874e3a29433..9dcf98e6d9b 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -383,3 +383,5 @@ dmabuf-fix-null-pointer-dereference-in-dma_buf_release.patch mm-thp-swap-fix-allocating-cluster-for-swapfile-by-mistake.patch mm-gup-fix-gup_fast-with-dynamic-page-table-folding.patch s390-zcrypt-fix-zcrypt_perdev_reqcnt-ioctl.patch +kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch +dm-fix-bio-splitting-and-its-bio-completion-order-for-regular-io.patch