]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 Jun 2025 13:38:58 +0000 (15:38 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 Jun 2025 13:38:58 +0000 (15:38 +0200)
added patches:
perf-arm-cmn-initialise-cmn-cpu-earlier.patch
xen-swiotlb-relax-alignment-requirements.patch

queue-5.10/perf-arm-cmn-initialise-cmn-cpu-earlier.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/xen-swiotlb-relax-alignment-requirements.patch [new file with mode: 0644]

diff --git a/queue-5.10/perf-arm-cmn-initialise-cmn-cpu-earlier.patch b/queue-5.10/perf-arm-cmn-initialise-cmn-cpu-earlier.patch
new file mode 100644 (file)
index 0000000..baaca6e
--- /dev/null
@@ -0,0 +1,45 @@
+From 597704e201068db3d104de3c7a4d447ff8209127 Mon Sep 17 00:00:00 2001
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Mon, 12 May 2025 18:11:54 +0100
+Subject: perf/arm-cmn: Initialise cmn->cpu earlier
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+commit 597704e201068db3d104de3c7a4d447ff8209127 upstream.
+
+For all the complexity of handling affinity for CPU hotplug, what we've
+apparently managed to overlook is that arm_cmn_init_irqs() has in fact
+always been setting the *initial* affinity of all IRQs to CPU 0, not the
+CPU we subsequently choose for event scheduling. Oh dear.
+
+Cc: stable@vger.kernel.org
+Fixes: 0ba64770a2f2 ("perf: Add Arm CMN-600 PMU driver")
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Ilkka Koskinen <ilkka@os.amperecomputing.com>
+Link: https://lore.kernel.org/r/b12fccba6b5b4d2674944f59e4daad91cd63420b.1747069914.git.robin.murphy@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+[ backport past NUMA changes in 5.17 ]
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/perf/arm-cmn.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1512,6 +1512,7 @@ static int arm_cmn_probe(struct platform
+               return -ENOMEM;
+       cmn->dev = &pdev->dev;
++      cmn->cpu = raw_smp_processor_id();
+       platform_set_drvdata(pdev, cmn);
+       if (has_acpi_companion(cmn->dev))
+@@ -1533,7 +1534,6 @@ static int arm_cmn_probe(struct platform
+       if (err)
+               return err;
+-      cmn->cpu = raw_smp_processor_id();
+       cmn->pmu = (struct pmu) {
+               .module = THIS_MODULE,
+               .attr_groups = arm_cmn_attr_groups,
index 5a2917ea7c0cb062eddb8a3d0f225b1336b87710..ebfea2ae406033b24f56e1f5ab304fe7ac81e182 100644 (file)
@@ -266,3 +266,5 @@ spi-spi-sun4i-fix-early-activation.patch
 tpm-tis-double-the-timeout-b-to-4s.patch
 platform-x86-fujitsu-laptop-support-lifebook-s2110-h.patch
 platform-x86-thinkpad_acpi-ignore-battery-threshold-.patch
+xen-swiotlb-relax-alignment-requirements.patch
+perf-arm-cmn-initialise-cmn-cpu-earlier.patch
diff --git a/queue-5.10/xen-swiotlb-relax-alignment-requirements.patch b/queue-5.10/xen-swiotlb-relax-alignment-requirements.patch
new file mode 100644 (file)
index 0000000..4e74790
--- /dev/null
@@ -0,0 +1,78 @@
+From 85fcb57c983f423180ba6ec5d0034242da05cc54 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 10 Feb 2025 08:43:39 +0100
+Subject: xen/swiotlb: relax alignment requirements
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 85fcb57c983f423180ba6ec5d0034242da05cc54 upstream.
+
+When mapping a buffer for DMA via .map_page or .map_sg DMA operations,
+there is no need to check the machine frames to be aligned according
+to the mapped areas size. All what is needed in these cases is that the
+buffer is contiguous at machine level.
+
+So carve out the alignment check from range_straddles_page_boundary()
+and move it to a helper called by xen_swiotlb_alloc_coherent() and
+xen_swiotlb_free_coherent() directly.
+
+Fixes: 9f40ec84a797 ("xen/swiotlb: add alignment check for dma buffers")
+Reported-by: Jan Vejvalka <jan.vejvalka@lfmotol.cuni.cz>
+Tested-by: Jan Vejvalka <jan.vejvalka@lfmotol.cuni.cz>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/swiotlb-xen.c |   18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -87,19 +87,21 @@ static inline dma_addr_t xen_virt_to_bus
+       return xen_phys_to_dma(dev, virt_to_phys(address));
+ }
++static inline bool range_requires_alignment(phys_addr_t p, size_t size)
++{
++      phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
++      phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
++
++      return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
++}
++
+ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
+ {
+       unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+       unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
+-      phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+       next_bfn = pfn_to_bfn(xen_pfn);
+-      /* If buffer is physically aligned, ensure DMA alignment. */
+-      if (IS_ALIGNED(p, algn) &&
+-          !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
+-              return 1;
+-
+       for (i = 1; i < nr_pages; i++)
+               if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
+                       return 1;
+@@ -321,7 +323,8 @@ xen_swiotlb_alloc_coherent(struct device
+       phys = dma_to_phys(hwdev, *dma_handle);
+       dev_addr = xen_phys_to_dma(hwdev, phys);
+       if (((dev_addr + size - 1 <= dma_mask)) &&
+-          !range_straddles_page_boundary(phys, size))
++          !range_straddles_page_boundary(phys, size) &&
++          !range_requires_alignment(phys, size))
+               *dma_handle = dev_addr;
+       else {
+               if (xen_create_contiguous_region(phys, order,
+@@ -362,6 +365,7 @@ xen_swiotlb_free_coherent(struct device
+       if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+                    range_straddles_page_boundary(phys, size)) &&
++          !range_requires_alignment(phys, size) &&
+           TestClearPageXenRemapped(page))
+               xen_destroy_contiguous_region(phys, order);