From: Greg Kroah-Hartman Date: Mon, 2 Jun 2025 13:38:58 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v5.4.294~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=66444e76978a2301bd8e434f049eb3dc78d9a05a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: perf-arm-cmn-initialise-cmn-cpu-earlier.patch xen-swiotlb-relax-alignment-requirements.patch --- diff --git a/queue-5.10/perf-arm-cmn-initialise-cmn-cpu-earlier.patch b/queue-5.10/perf-arm-cmn-initialise-cmn-cpu-earlier.patch new file mode 100644 index 0000000000..baaca6ee49 --- /dev/null +++ b/queue-5.10/perf-arm-cmn-initialise-cmn-cpu-earlier.patch @@ -0,0 +1,45 @@ +From 597704e201068db3d104de3c7a4d447ff8209127 Mon Sep 17 00:00:00 2001 +From: Robin Murphy +Date: Mon, 12 May 2025 18:11:54 +0100 +Subject: perf/arm-cmn: Initialise cmn->cpu earlier + +From: Robin Murphy + +commit 597704e201068db3d104de3c7a4d447ff8209127 upstream. + +For all the complexity of handling affinity for CPU hotplug, what we've +apparently managed to overlook is that arm_cmn_init_irqs() has in fact +always been setting the *initial* affinity of all IRQs to CPU 0, not the +CPU we subsequently choose for event scheduling. Oh dear. + +Cc: stable@vger.kernel.org +Fixes: 0ba64770a2f2 ("perf: Add Arm CMN-600 PMU driver") +Signed-off-by: Robin Murphy +Reviewed-by: Ilkka Koskinen +Link: https://lore.kernel.org/r/b12fccba6b5b4d2674944f59e4daad91cd63420b.1747069914.git.robin.murphy@arm.com +Signed-off-by: Will Deacon +[ backport past NUMA changes in 5.17 ] +Signed-off-by: Robin Murphy +Signed-off-by: Greg Kroah-Hartman +--- + drivers/perf/arm-cmn.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -1512,6 +1512,7 @@ static int arm_cmn_probe(struct platform + return -ENOMEM; + + cmn->dev = &pdev->dev; ++ cmn->cpu = raw_smp_processor_id(); + platform_set_drvdata(pdev, cmn); + + if (has_acpi_companion(cmn->dev)) +@@ -1533,7 +1534,6 @@ static int arm_cmn_probe(struct platform + if (err) + return err; + +- cmn->cpu = raw_smp_processor_id(); + cmn->pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = arm_cmn_attr_groups, diff --git a/queue-5.10/series b/queue-5.10/series index 5a2917ea7c..ebfea2ae40 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -266,3 +266,5 @@ spi-spi-sun4i-fix-early-activation.patch tpm-tis-double-the-timeout-b-to-4s.patch platform-x86-fujitsu-laptop-support-lifebook-s2110-h.patch platform-x86-thinkpad_acpi-ignore-battery-threshold-.patch +xen-swiotlb-relax-alignment-requirements.patch +perf-arm-cmn-initialise-cmn-cpu-earlier.patch diff --git a/queue-5.10/xen-swiotlb-relax-alignment-requirements.patch b/queue-5.10/xen-swiotlb-relax-alignment-requirements.patch new file mode 100644 index 0000000000..4e747901c3 --- /dev/null +++ b/queue-5.10/xen-swiotlb-relax-alignment-requirements.patch @@ -0,0 +1,78 @@ +From 85fcb57c983f423180ba6ec5d0034242da05cc54 Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Mon, 10 Feb 2025 08:43:39 +0100 +Subject: xen/swiotlb: relax alignment requirements + +From: Juergen Gross + +commit 85fcb57c983f423180ba6ec5d0034242da05cc54 upstream. + +When mapping a buffer for DMA via .map_page or .map_sg DMA operations, +there is no need to check the machine frames to be aligned according +to the mapped areas size. All what is needed in these cases is that the +buffer is contiguous at machine level. + +So carve out the alignment check from range_straddles_page_boundary() +and move it to a helper called by xen_swiotlb_alloc_coherent() and +xen_swiotlb_free_coherent() directly. + +Fixes: 9f40ec84a797 ("xen/swiotlb: add alignment check for dma buffers") +Reported-by: Jan Vejvalka +Tested-by: Jan Vejvalka +Signed-off-by: Juergen Gross +Reviewed-by: Stefano Stabellini +Signed-off-by: Juergen Gross +Signed-off-by: Harshvardhan Jha +Signed-off-by: Greg Kroah-Hartman +--- + drivers/xen/swiotlb-xen.c | 18 +++++++++++------- + 1 file changed, 11 insertions(+), 7 deletions(-) + +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -87,19 +87,21 @@ static inline dma_addr_t xen_virt_to_bus + return xen_phys_to_dma(dev, virt_to_phys(address)); + } + ++static inline bool range_requires_alignment(phys_addr_t p, size_t size) ++{ ++ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT); ++ phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT; ++ ++ return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn); ++} ++ + static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) + { + unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); + unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); +- phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT); + + next_bfn = pfn_to_bfn(xen_pfn); + +- /* If buffer is physically aligned, ensure DMA alignment. */ +- if (IS_ALIGNED(p, algn) && +- !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn)) +- return 1; +- + for (i = 1; i < nr_pages; i++) + if (pfn_to_bfn(++xen_pfn) != ++next_bfn) + return 1; +@@ -321,7 +323,8 @@ xen_swiotlb_alloc_coherent(struct device + phys = dma_to_phys(hwdev, *dma_handle); + dev_addr = xen_phys_to_dma(hwdev, phys); + if (((dev_addr + size - 1 <= dma_mask)) && +- !range_straddles_page_boundary(phys, size)) ++ !range_straddles_page_boundary(phys, size) && ++ !range_requires_alignment(phys, size)) + *dma_handle = dev_addr; + else { + if (xen_create_contiguous_region(phys, order, +@@ -362,6 +365,7 @@ xen_swiotlb_free_coherent(struct device + + if (!WARN_ON((dev_addr + size - 1 > dma_mask) || + range_straddles_page_boundary(phys, size)) && ++ !range_requires_alignment(phys, size) && + TestClearPageXenRemapped(page)) + xen_destroy_contiguous_region(phys, order); +