]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 Jun 2025 13:38:48 +0000 (15:38 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 Jun 2025 13:38:48 +0000 (15:38 +0200)
added patches:
xen-swiotlb-relax-alignment-requirements.patch

queue-5.4/series
queue-5.4/xen-swiotlb-relax-alignment-requirements.patch [new file with mode: 0644]

index d1bf19f50707b21b515f99f8a54781c28bf5a833..302805b4cca62f0553129c74e7c9b0325f1b5d4d 100644 (file)
@@ -201,3 +201,4 @@ um-let-make-clean-properly-clean-underlying-subarch-.patch
 spi-spi-sun4i-fix-early-activation.patch
 platform-x86-fujitsu-laptop-support-lifebook-s2110-h.patch
 platform-x86-thinkpad_acpi-ignore-battery-threshold-.patch
+xen-swiotlb-relax-alignment-requirements.patch
diff --git a/queue-5.4/xen-swiotlb-relax-alignment-requirements.patch b/queue-5.4/xen-swiotlb-relax-alignment-requirements.patch
new file mode 100644 (file)
index 0000000..5d61e57
--- /dev/null
@@ -0,0 +1,78 @@
+From 85fcb57c983f423180ba6ec5d0034242da05cc54 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 10 Feb 2025 08:43:39 +0100
+Subject: xen/swiotlb: relax alignment requirements
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 85fcb57c983f423180ba6ec5d0034242da05cc54 upstream.
+
+When mapping a buffer for DMA via .map_page or .map_sg DMA operations,
+there is no need to check the machine frames to be aligned according
+to the mapped areas size. All what is needed in these cases is that the
+buffer is contiguous at machine level.
+
+So carve out the alignment check from range_straddles_page_boundary()
+and move it to a helper called by xen_swiotlb_alloc_coherent() and
+xen_swiotlb_free_coherent() directly.
+
+Fixes: 9f40ec84a797 ("xen/swiotlb: add alignment check for dma buffers")
+Reported-by: Jan Vejvalka <jan.vejvalka@lfmotol.cuni.cz>
+Tested-by: Jan Vejvalka <jan.vejvalka@lfmotol.cuni.cz>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/swiotlb-xen.c |   18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -85,19 +85,21 @@ static inline dma_addr_t xen_virt_to_bus
+       return xen_phys_to_bus(virt_to_phys(address));
+ }
++static inline bool range_requires_alignment(phys_addr_t p, size_t size)
++{
++      phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
++      phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
++
++      return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
++}
++
+ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
+ {
+       unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+       unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
+-      phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+       next_bfn = pfn_to_bfn(xen_pfn);
+-      /* If buffer is physically aligned, ensure DMA alignment. */
+-      if (IS_ALIGNED(p, algn) &&
+-          !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
+-              return 1;
+-
+       for (i = 1; i < nr_pages; i++)
+               if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
+                       return 1;
+@@ -320,7 +322,8 @@ xen_swiotlb_alloc_coherent(struct device
+       phys = *dma_handle;
+       dev_addr = xen_phys_to_bus(phys);
+       if (((dev_addr + size - 1 <= dma_mask)) &&
+-          !range_straddles_page_boundary(phys, size))
++          !range_straddles_page_boundary(phys, size) &&
++          !range_requires_alignment(phys, size))
+               *dma_handle = dev_addr;
+       else {
+               if (xen_create_contiguous_region(phys, order,
+@@ -360,6 +363,7 @@ xen_swiotlb_free_coherent(struct device
+       if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+                    range_straddles_page_boundary(phys, size)) &&
++          !range_requires_alignment(phys, size) &&
+           TestClearPageXenRemapped(page))
+               xen_destroy_contiguous_region(phys, order);