From: Greg Kroah-Hartman Date: Wed, 5 Apr 2023 10:01:20 +0000 (+0200) Subject: drop swiotlb patches from 6.1 and 6.2 queues X-Git-Tag: v6.1.23~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=eef2f5c96f582150fe145459d1faaddc19ff682f;p=thirdparty%2Fkernel%2Fstable-queue.git drop swiotlb patches from 6.1 and 6.2 queues --- diff --git a/queue-6.1/series b/queue-6.1/series index 6632fdf9d0d..3f0a1259cdf 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -63,13 +63,11 @@ btrfs-fix-uninitialized-variable-warning-in-btrfs_up.patch btrfs-use-temporary-variable-for-space_info-in-btrfs.patch mtd-rawnand-meson-initialize-struct-with-zeroes.patch mtd-nand-mxic-ecc-fix-mxic_ecc_data_xfer_wait_for_co.patch -swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch ca8210-fix-unsigned-mac_len-comparison-with-zero-in-.patch riscv-kvm-fix-vm-hang-in-case-of-timer-delta-being-z.patch mips-bmips-bcm6358-disable-rac-flush-for-tp1.patch alsa-usb-audio-fix-recursive-locking-at-xrun-during-.patch pci-dwc-fix-port_link_control-update-when-cdm-check-.patch -swiotlb-fix-slot-alignment-checks.patch platform-x86-think-lmi-add-missing-type-attribute.patch platform-x86-think-lmi-use-correct-possible_values-d.patch platform-x86-think-lmi-only-display-possible_values-.patch diff --git a/queue-6.1/swiotlb-fix-slot-alignment-checks.patch b/queue-6.1/swiotlb-fix-slot-alignment-checks.patch deleted file mode 100644 index babfffcaa17..00000000000 --- a/queue-6.1/swiotlb-fix-slot-alignment-checks.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 6b19ccf221a13a2afa961c8ffffd7472127e69bc Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 21 Mar 2023 09:31:27 +0100 -Subject: swiotlb: fix slot alignment checks - -From: Petr Tesarik - -[ Upstream commit 0eee5ae1025699ea93d44fdb6ef2365505082103 ] - -Explicit alignment and page alignment are used only to calculate -the stride, not when checking actual slot physical address. - -Originally, only page alignment was implemented, and that worked, -because the whole SWIOTLB is allocated on a page boundary, so -aligning the start index was sufficient to ensure a page-aligned -slot. - -When commit 1f221a0d0dbf ("swiotlb: respect min_align_mask") added -support for min_align_mask, the index could be incremented in the -search loop, potentially finding an unaligned slot if minimum device -alignment is between IO_TLB_SIZE and PAGE_SIZE. The bug could go -unnoticed, because the slot size is 2 KiB, and the most common page -size is 4 KiB, so there is no alignment value in between. - -IIUC the intention has been to find a slot that conforms to all -alignment constraints: device minimum alignment, an explicit -alignment (given as function parameter) and optionally page -alignment (if allocation size is >= PAGE_SIZE). The most -restrictive mask can be trivially computed with logical AND. The -rest can stay. - -Fixes: 1f221a0d0dbf ("swiotlb: respect min_align_mask") -Fixes: e81e99bacc9f ("swiotlb: Support aligned swiotlb buffers") -Signed-off-by: Petr Tesarik -Signed-off-by: Christoph Hellwig -Signed-off-by: Sasha Levin ---- - kernel/dma/swiotlb.c | 16 ++++++++++------ - 1 file changed, 10 insertions(+), 6 deletions(-) - -diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c -index 1ecd0d1f7231a..eeb5695c3f286 100644 ---- a/kernel/dma/swiotlb.c -+++ b/kernel/dma/swiotlb.c -@@ -626,22 +626,26 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - BUG_ON(!nslots); - BUG_ON(area_index >= mem->nareas); - -+ /* -+ * For allocations of PAGE_SIZE or larger only look for page aligned -+ * allocations. -+ */ -+ if (alloc_size >= PAGE_SIZE) -+ iotlb_align_mask &= PAGE_MASK; -+ iotlb_align_mask &= alloc_align_mask; -+ - /* - * For mappings with an alignment requirement don't bother looping to -- * unaligned slots once we found an aligned one. For allocations of -- * PAGE_SIZE or larger only look for page aligned allocations. -+ * unaligned slots once we found an aligned one. - */ - stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; -- if (alloc_size >= PAGE_SIZE) -- stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); -- stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); - - spin_lock_irqsave(&area->lock, flags); - if (unlikely(nslots > mem->area_nslabs - area->used)) - goto not_found; - - slot_base = area_index * mem->area_nslabs; -- index = wrap_area_index(mem, ALIGN(area->index, stride)); -+ index = area->index; - - for (slots_checked = 0; slots_checked < mem->area_nslabs; ) { - slot_index = slot_base + index; --- -2.39.2 - diff --git a/queue-6.1/swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch b/queue-6.1/swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch deleted file mode 100644 index 4fee09e7730..00000000000 --- a/queue-6.1/swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 9466c1da4cf8169876da5c26c0e31427150d91cb Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 23 Feb 2023 00:53:15 +0800 -Subject: swiotlb: fix the deadlock in swiotlb_do_find_slots - -From: GuoRui.Yu - -[ Upstream commit 7c3940bf81e5664cdb50c3fedfec8f0a756a34fb ] - -In general, if swiotlb is sufficient, the logic of index = -wrap_area_index(mem, index + 1) is fine, it will quickly take a slot and -release the area->lock; But if swiotlb is insufficient and the device -has min_align_mask requirements, such as NVME, we may not be able to -satisfy index == wrap and exit the loop properly. In this case, other -kernel threads will not be able to acquire the area->lock and release -the slot, resulting in a deadlock. - -The current implementation of wrap_area_index does not involve a modulo -operation, so adjusting the wrap to ensure the loop ends is not trivial. -Introduce a new variable to record the number of loops and exit the loop -after completing the traversal. - -Backtraces: -Other CPUs are waiting this core to exit the swiotlb_do_find_slots -loop. -[10199.924391] RIP: 0010:swiotlb_do_find_slots+0x1fe/0x3e0 -[10199.924403] Call Trace: -[10199.924404] -[10199.924405] swiotlb_tbl_map_single+0xec/0x1f0 -[10199.924407] swiotlb_map+0x5c/0x260 -[10199.924409] ? nvme_pci_setup_prps+0x1ed/0x340 -[10199.924411] dma_direct_map_page+0x12e/0x1c0 -[10199.924413] nvme_map_data+0x304/0x370 -[10199.924415] nvme_prep_rq.part.0+0x31/0x120 -[10199.924417] nvme_queue_rq+0x77/0x1f0 - -... -[ 9639.596311] NMI backtrace for cpu 48 -[ 9639.596336] Call Trace: -[ 9639.596337] -[ 9639.596338] _raw_spin_lock_irqsave+0x37/0x40 -[ 9639.596341] swiotlb_do_find_slots+0xef/0x3e0 -[ 9639.596344] swiotlb_tbl_map_single+0xec/0x1f0 -[ 9639.596347] swiotlb_map+0x5c/0x260 -[ 9639.596349] dma_direct_map_sg+0x7a/0x280 -[ 9639.596352] __dma_map_sg_attrs+0x30/0x70 -[ 9639.596355] dma_map_sgtable+0x1d/0x30 -[ 9639.596356] nvme_map_data+0xce/0x370 - -... -[ 9639.595665] NMI backtrace for cpu 50 -[ 9639.595682] Call Trace: -[ 9639.595682] -[ 9639.595683] _raw_spin_lock_irqsave+0x37/0x40 -[ 9639.595686] swiotlb_release_slots.isra.0+0x86/0x180 -[ 9639.595688] dma_direct_unmap_sg+0xcf/0x1a0 -[ 9639.595690] nvme_unmap_data.part.0+0x43/0xc0 - -Fixes: 1f221a0d0dbf ("swiotlb: respect min_align_mask") -Signed-off-by: GuoRui.Yu -Signed-off-by: Xiaokang Hu -Signed-off-by: Christoph Hellwig -Signed-off-by: Sasha Levin ---- - kernel/dma/swiotlb.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c -index 339a990554e7f..1ecd0d1f7231a 100644 ---- a/kernel/dma/swiotlb.c -+++ b/kernel/dma/swiotlb.c -@@ -617,8 +617,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - unsigned int iotlb_align_mask = - dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); - unsigned int nslots = nr_slots(alloc_size), stride; -- unsigned int index, wrap, count = 0, i; - unsigned int offset = swiotlb_align_offset(dev, orig_addr); -+ unsigned int index, slots_checked, count = 0, i; - unsigned long flags; - unsigned int slot_base; - unsigned int slot_index; -@@ -641,15 +641,16 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - goto not_found; - - slot_base = area_index * mem->area_nslabs; -- index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); -+ index = wrap_area_index(mem, ALIGN(area->index, stride)); - -- do { -+ for (slots_checked = 0; slots_checked < mem->area_nslabs; ) { - slot_index = slot_base + index; - - if (orig_addr && - (slot_addr(tbl_dma_addr, slot_index) & - iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { - index = wrap_area_index(mem, index + 1); -+ slots_checked++; - continue; - } - -@@ -665,7 +666,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - goto found; - } - index = wrap_area_index(mem, index + stride); -- } while (index != wrap); -+ slots_checked += stride; -+ } - - not_found: - spin_unlock_irqrestore(&area->lock, flags); --- -2.39.2 - diff --git a/queue-6.2/series b/queue-6.2/series index 03fbeb6ef22..2e9a2342541 100644 --- a/queue-6.2/series +++ b/queue-6.2/series @@ -61,13 +61,11 @@ btrfs-fix-uninitialized-variable-warning-in-btrfs_up.patch btrfs-use-temporary-variable-for-space_info-in-btrfs.patch mtd-rawnand-meson-initialize-struct-with-zeroes.patch mtd-nand-mxic-ecc-fix-mxic_ecc_data_xfer_wait_for_co.patch -swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch ca8210-fix-unsigned-mac_len-comparison-with-zero-in-.patch riscv-kvm-fix-vm-hang-in-case-of-timer-delta-being-z.patch mips-bmips-bcm6358-disable-rac-flush-for-tp1.patch alsa-usb-audio-fix-recursive-locking-at-xrun-during-.patch pci-dwc-fix-port_link_control-update-when-cdm-check-.patch -swiotlb-fix-slot-alignment-checks.patch platform-x86-think-lmi-add-missing-type-attribute.patch platform-x86-think-lmi-use-correct-possible_values-d.patch platform-x86-think-lmi-only-display-possible_values-.patch diff --git a/queue-6.2/swiotlb-fix-slot-alignment-checks.patch b/queue-6.2/swiotlb-fix-slot-alignment-checks.patch deleted file mode 100644 index 6a47ed9fb68..00000000000 --- a/queue-6.2/swiotlb-fix-slot-alignment-checks.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 5b40da7a8b4802ef6cf63b3c2ed19fb62f374878 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 21 Mar 2023 09:31:27 +0100 -Subject: swiotlb: fix slot alignment checks - -From: Petr Tesarik - -[ Upstream commit 0eee5ae1025699ea93d44fdb6ef2365505082103 ] - -Explicit alignment and page alignment are used only to calculate -the stride, not when checking actual slot physical address. - -Originally, only page alignment was implemented, and that worked, -because the whole SWIOTLB is allocated on a page boundary, so -aligning the start index was sufficient to ensure a page-aligned -slot. - -When commit 1f221a0d0dbf ("swiotlb: respect min_align_mask") added -support for min_align_mask, the index could be incremented in the -search loop, potentially finding an unaligned slot if minimum device -alignment is between IO_TLB_SIZE and PAGE_SIZE. The bug could go -unnoticed, because the slot size is 2 KiB, and the most common page -size is 4 KiB, so there is no alignment value in between. - -IIUC the intention has been to find a slot that conforms to all -alignment constraints: device minimum alignment, an explicit -alignment (given as function parameter) and optionally page -alignment (if allocation size is >= PAGE_SIZE). The most -restrictive mask can be trivially computed with logical AND. The -rest can stay. - -Fixes: 1f221a0d0dbf ("swiotlb: respect min_align_mask") -Fixes: e81e99bacc9f ("swiotlb: Support aligned swiotlb buffers") -Signed-off-by: Petr Tesarik -Signed-off-by: Christoph Hellwig -Signed-off-by: Sasha Levin ---- - kernel/dma/swiotlb.c | 16 ++++++++++------ - 1 file changed, 10 insertions(+), 6 deletions(-) - -diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c -index 869dd6667c464..312458506e6d5 100644 ---- a/kernel/dma/swiotlb.c -+++ b/kernel/dma/swiotlb.c -@@ -642,22 +642,26 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - BUG_ON(!nslots); - BUG_ON(area_index >= mem->nareas); - -+ /* -+ * For allocations of PAGE_SIZE or larger only look for page aligned -+ * allocations. -+ */ -+ if (alloc_size >= PAGE_SIZE) -+ iotlb_align_mask &= PAGE_MASK; -+ iotlb_align_mask &= alloc_align_mask; -+ - /* - * For mappings with an alignment requirement don't bother looping to -- * unaligned slots once we found an aligned one. For allocations of -- * PAGE_SIZE or larger only look for page aligned allocations. -+ * unaligned slots once we found an aligned one. - */ - stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; -- if (alloc_size >= PAGE_SIZE) -- stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); -- stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); - - spin_lock_irqsave(&area->lock, flags); - if (unlikely(nslots > mem->area_nslabs - area->used)) - goto not_found; - - slot_base = area_index * mem->area_nslabs; -- index = wrap_area_index(mem, ALIGN(area->index, stride)); -+ index = area->index; - - for (slots_checked = 0; slots_checked < mem->area_nslabs; ) { - slot_index = slot_base + index; --- -2.39.2 - diff --git a/queue-6.2/swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch b/queue-6.2/swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch deleted file mode 100644 index 83d3464a3ad..00000000000 --- a/queue-6.2/swiotlb-fix-the-deadlock-in-swiotlb_do_find_slots.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 1222a2257dd9ea061e9994748c3496703b8a0d4f Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 23 Feb 2023 00:53:15 +0800 -Subject: swiotlb: fix the deadlock in swiotlb_do_find_slots - -From: GuoRui.Yu - -[ Upstream commit 7c3940bf81e5664cdb50c3fedfec8f0a756a34fb ] - -In general, if swiotlb is sufficient, the logic of index = -wrap_area_index(mem, index + 1) is fine, it will quickly take a slot and -release the area->lock; But if swiotlb is insufficient and the device -has min_align_mask requirements, such as NVME, we may not be able to -satisfy index == wrap and exit the loop properly. In this case, other -kernel threads will not be able to acquire the area->lock and release -the slot, resulting in a deadlock. - -The current implementation of wrap_area_index does not involve a modulo -operation, so adjusting the wrap to ensure the loop ends is not trivial. -Introduce a new variable to record the number of loops and exit the loop -after completing the traversal. - -Backtraces: -Other CPUs are waiting this core to exit the swiotlb_do_find_slots -loop. -[10199.924391] RIP: 0010:swiotlb_do_find_slots+0x1fe/0x3e0 -[10199.924403] Call Trace: -[10199.924404] -[10199.924405] swiotlb_tbl_map_single+0xec/0x1f0 -[10199.924407] swiotlb_map+0x5c/0x260 -[10199.924409] ? nvme_pci_setup_prps+0x1ed/0x340 -[10199.924411] dma_direct_map_page+0x12e/0x1c0 -[10199.924413] nvme_map_data+0x304/0x370 -[10199.924415] nvme_prep_rq.part.0+0x31/0x120 -[10199.924417] nvme_queue_rq+0x77/0x1f0 - -... -[ 9639.596311] NMI backtrace for cpu 48 -[ 9639.596336] Call Trace: -[ 9639.596337] -[ 9639.596338] _raw_spin_lock_irqsave+0x37/0x40 -[ 9639.596341] swiotlb_do_find_slots+0xef/0x3e0 -[ 9639.596344] swiotlb_tbl_map_single+0xec/0x1f0 -[ 9639.596347] swiotlb_map+0x5c/0x260 -[ 9639.596349] dma_direct_map_sg+0x7a/0x280 -[ 9639.596352] __dma_map_sg_attrs+0x30/0x70 -[ 9639.596355] dma_map_sgtable+0x1d/0x30 -[ 9639.596356] nvme_map_data+0xce/0x370 - -... -[ 9639.595665] NMI backtrace for cpu 50 -[ 9639.595682] Call Trace: -[ 9639.595682] -[ 9639.595683] _raw_spin_lock_irqsave+0x37/0x40 -[ 9639.595686] swiotlb_release_slots.isra.0+0x86/0x180 -[ 9639.595688] dma_direct_unmap_sg+0xcf/0x1a0 -[ 9639.595690] nvme_unmap_data.part.0+0x43/0xc0 - -Fixes: 1f221a0d0dbf ("swiotlb: respect min_align_mask") -Signed-off-by: GuoRui.Yu -Signed-off-by: Xiaokang Hu -Signed-off-by: Christoph Hellwig -Signed-off-by: Sasha Levin ---- - kernel/dma/swiotlb.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c -index ef3bc3a5bbed3..869dd6667c464 100644 ---- a/kernel/dma/swiotlb.c -+++ b/kernel/dma/swiotlb.c -@@ -633,8 +633,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - unsigned int iotlb_align_mask = - dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); - unsigned int nslots = nr_slots(alloc_size), stride; -- unsigned int index, wrap, count = 0, i; - unsigned int offset = swiotlb_align_offset(dev, orig_addr); -+ unsigned int index, slots_checked, count = 0, i; - unsigned long flags; - unsigned int slot_base; - unsigned int slot_index; -@@ -657,15 +657,16 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - goto not_found; - - slot_base = area_index * mem->area_nslabs; -- index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); -+ index = wrap_area_index(mem, ALIGN(area->index, stride)); - -- do { -+ for (slots_checked = 0; slots_checked < mem->area_nslabs; ) { - slot_index = slot_base + index; - - if (orig_addr && - (slot_addr(tbl_dma_addr, slot_index) & - iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { - index = wrap_area_index(mem, index + 1); -+ slots_checked++; - continue; - } - -@@ -681,7 +682,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index, - goto found; - } - index = wrap_area_index(mem, index + stride); -- } while (index != wrap); -+ slots_checked += stride; -+ } - - not_found: - spin_unlock_irqrestore(&area->lock, flags); --- -2.39.2 -