]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for all trees
authorSasha Levin <sashal@kernel.org>
Wed, 10 Sep 2025 11:07:10 +0000 (07:07 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 10 Sep 2025 11:07:10 +0000 (07:07 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
26 files changed:
queue-5.15/revert-fbdev-disable-sysfb-device-registration-when-.patch [new file with mode: 0644]
queue-5.15/series [new file with mode: 0644]
queue-5.15/xfs-short-circuit-xfs_growfs_data_private-if-delta-i.patch [new file with mode: 0644]
queue-6.12/dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch [new file with mode: 0644]
queue-6.12/dma-debug-store-a-phys_addr_t-in-struct-dma_debug_en.patch [new file with mode: 0644]
queue-6.12/dma-mapping-trace-dma_alloc-free-direction.patch [new file with mode: 0644]
queue-6.12/dma-mapping-trace-more-error-paths.patch [new file with mode: 0644]
queue-6.12/dma-mapping-use-trace_dma_alloc-for-dma_alloc-instea.patch [new file with mode: 0644]
queue-6.12/fhandle-use-more-consistent-rules-for-decoding-file-.patch [new file with mode: 0644]
queue-6.12/series [new file with mode: 0644]
queue-6.16/block-don-t-silently-ignore-metadata-for-sync-read-w.patch [new file with mode: 0644]
queue-6.16/bluetooth-hci_conn-fix-not-cleaning-up-broadcaster-b.patch [new file with mode: 0644]
queue-6.16/bluetooth-hci_conn-fix-running-bis_cleanup-for-hci_c.patch [new file with mode: 0644]
queue-6.16/bluetooth-iso-fix-getname-not-returning-broadcast-fi.patch [new file with mode: 0644]
queue-6.16/coredump-don-t-pointlessly-check-and-spew-warnings.patch [new file with mode: 0644]
queue-6.16/dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch [new file with mode: 0644]
queue-6.16/fhandle-use-more-consistent-rules-for-decoding-file-.patch [new file with mode: 0644]
queue-6.16/fs-add-a-fmode_-flag-to-indicate-iocb_has_metadata-a.patch [new file with mode: 0644]
queue-6.16/fuse-block-access-to-folio-overlimit.patch [new file with mode: 0644]
queue-6.16/iommu-vt-d-create-unique-domain-ops-for-each-stage.patch [new file with mode: 0644]
queue-6.16/iommu-vt-d-make-iotlb_sync_map-a-static-property-of-.patch [new file with mode: 0644]
queue-6.16/iommu-vt-d-split-intel_iommu_domain_alloc_paging_fla.patch [new file with mode: 0644]
queue-6.16/iommu-vt-d-split-paging_domain_compatible.patch [new file with mode: 0644]
queue-6.16/irqchip-mvebu-gicp-fix-an-is_err-vs-null-check-in-pr.patch [new file with mode: 0644]
queue-6.16/perf-fix-the-poll_hup-delivery-breakage.patch [new file with mode: 0644]
queue-6.16/series [new file with mode: 0644]

diff --git a/queue-5.15/revert-fbdev-disable-sysfb-device-registration-when-.patch b/queue-5.15/revert-fbdev-disable-sysfb-device-registration-when-.patch
new file mode 100644 (file)
index 0000000..1b35e89
--- /dev/null
@@ -0,0 +1,57 @@
+From 060a2f99afabef85bef383d75595ce64ed53f9a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 09:38:03 +0000
+Subject: Revert "fbdev: Disable sysfb device registration when removing
+ conflicting FBs"
+
+From: Brett A C Sheffield <bacs@librecast.net>
+
+This reverts commit 13d28e0c79cbf69fc6f145767af66905586c1249.
+
+Commit ee7a69aa38d8 ("fbdev: Disable sysfb device registration when
+removing conflicting FBs") was backported to 5.15.y LTS. This causes a
+regression where all virtual consoles stop responding during boot at:
+
+"Populating /dev with existing devices through uevents ..."
+
+Reverting the commit fixes the regression.
+
+Signed-off-by: Brett A C Sheffield <bacs@librecast.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbmem.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index d938c31e8f90a..3b52ddfe03506 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/major.h>
+ #include <linux/slab.h>
+-#include <linux/sysfb.h>
+ #include <linux/mm.h>
+ #include <linux/mman.h>
+ #include <linux/vt.h>
+@@ -1795,17 +1794,6 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
+               do_free = true;
+       }
+-      /*
+-       * If a driver asked to unregister a platform device registered by
+-       * sysfb, then can be assumed that this is a driver for a display
+-       * that is set up by the system firmware and has a generic driver.
+-       *
+-       * Drivers for devices that don't have a generic driver will never
+-       * ask for this, so let's assume that a real driver for the display
+-       * was already probed and prevent sysfb to register devices later.
+-       */
+-      sysfb_disable();
+-
+       mutex_lock(&registration_lock);
+       do_remove_conflicting_framebuffers(a, name, primary);
+       mutex_unlock(&registration_lock);
+-- 
+2.51.0
+
diff --git a/queue-5.15/series b/queue-5.15/series
new file mode 100644 (file)
index 0000000..1bb7e41
--- /dev/null
@@ -0,0 +1,2 @@
+revert-fbdev-disable-sysfb-device-registration-when-.patch
+xfs-short-circuit-xfs_growfs_data_private-if-delta-i.patch
diff --git a/queue-5.15/xfs-short-circuit-xfs_growfs_data_private-if-delta-i.patch b/queue-5.15/xfs-short-circuit-xfs_growfs_data_private-if-delta-i.patch
new file mode 100644 (file)
index 0000000..2b686b1
--- /dev/null
@@ -0,0 +1,52 @@
+From 6f5a0613290ac3fc5cb88af1d86df917651262f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 12:16:21 +0200
+Subject: xfs: short circuit xfs_growfs_data_private() if delta is zero
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+[ Upstream commit 84712492e6dab803bf595fb8494d11098b74a652 ]
+
+Although xfs_growfs_data() doesn't call xfs_growfs_data_private()
+if in->newblocks == mp->m_sb.sb_dblocks, xfs_growfs_data_private()
+further massages the new block count so that we don't i.e. try
+to create a too-small new AG.
+
+This may lead to a delta of "0" in xfs_growfs_data_private(), so
+we end up in the shrink case and emit the EXPERIMENTAL warning
+even if we're not changing anything at all.
+
+Fix this by returning straightaway if the block delta is zero.
+
+(nb: in older kernels, the result of entering the shrink case
+with delta == 0 may actually let an -ENOSPC escape to userspace,
+which is confusing for users.)
+
+Fixes: fb2fc1720185 ("xfs: support shrinking unused space in the last AG")
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
+Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_fsops.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index 5b5b68affe66d..2d7467be2a48c 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -126,6 +126,10 @@ xfs_growfs_data_private(
+       if (delta < 0 && nagcount < 2)
+               return -EINVAL;
++      /* No work to do */
++      if (delta == 0)
++              return 0;
++
+       oagcount = mp->m_sb.sb_agcount;
+       /* allocate the new per-ag structures */
+-- 
+2.51.0
+
diff --git a/queue-6.12/dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch b/queue-6.12/dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch
new file mode 100644 (file)
index 0000000..7837937
--- /dev/null
@@ -0,0 +1,170 @@
+From 3dbce430c44674aea3079e6b56c85d55e01299c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 16:17:33 +0800
+Subject: dma-debug: don't enforce dma mapping check on noncoherent allocations
+
+From: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+
+[ Upstream commit 7e2368a21741e2db542330b32aa6fdd8908e7cff ]
+
+As discussed in [1], there is no need to enforce dma mapping check on
+noncoherent allocations, a simple test on the returned CPU address is
+good enough.
+
+Add a new pair of debug helpers and use them for noncoherent alloc/free
+to fix this issue.
+
+Fixes: efa70f2fdc84 ("dma-mapping: add a new dma_alloc_pages API")
+Link: https://lore.kernel.org/all/ff6c1fe6-820f-4e58-8395-df06aa91706c@oss.qualcomm.com # 1
+Signed-off-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Link: https://lore.kernel.org/r/20250828-dma-debug-fix-noncoherent-dma-check-v1-1-76e9be0dd7fc@oss.qualcomm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/debug.c   | 48 +++++++++++++++++++++++++++++++++++++++++++-
+ kernel/dma/debug.h   | 20 ++++++++++++++++++
+ kernel/dma/mapping.c |  4 ++--
+ 3 files changed, 69 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index 4e3692afdf0d2..0221023e1120d 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -39,6 +39,7 @@ enum {
+       dma_debug_sg,
+       dma_debug_coherent,
+       dma_debug_resource,
++      dma_debug_noncoherent,
+ };
+ enum map_err_types {
+@@ -141,6 +142,7 @@ static const char *type2name[] = {
+       [dma_debug_sg] = "scatter-gather",
+       [dma_debug_coherent] = "coherent",
+       [dma_debug_resource] = "resource",
++      [dma_debug_noncoherent] = "noncoherent",
+ };
+ static const char *dir2name[] = {
+@@ -993,7 +995,8 @@ static void check_unmap(struct dma_debug_entry *ref)
+                          "[mapped as %s] [unmapped as %s]\n",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type], type2name[ref->type]);
+-      } else if (entry->type == dma_debug_coherent &&
++      } else if ((entry->type == dma_debug_coherent ||
++                  entry->type == dma_debug_noncoherent) &&
+                  ref->paddr != entry->paddr) {
+               err_printk(ref->dev, entry, "device driver frees "
+                          "DMA memory with different CPU address "
+@@ -1573,6 +1576,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+       }
+ }
++void debug_dma_alloc_pages(struct device *dev, struct page *page,
++                         size_t size, int direction,
++                         dma_addr_t dma_addr,
++                         unsigned long attrs)
++{
++      struct dma_debug_entry *entry;
++
++      if (unlikely(dma_debug_disabled()))
++              return;
++
++      entry = dma_entry_alloc();
++      if (!entry)
++              return;
++
++      entry->type      = dma_debug_noncoherent;
++      entry->dev       = dev;
++      entry->paddr     = page_to_phys(page);
++      entry->size      = size;
++      entry->dev_addr  = dma_addr;
++      entry->direction = direction;
++
++      add_dma_entry(entry, attrs);
++}
++
++void debug_dma_free_pages(struct device *dev, struct page *page,
++                        size_t size, int direction,
++                        dma_addr_t dma_addr)
++{
++      struct dma_debug_entry ref = {
++              .type           = dma_debug_noncoherent,
++              .dev            = dev,
++              .paddr          = page_to_phys(page),
++              .dev_addr       = dma_addr,
++              .size           = size,
++              .direction      = direction,
++      };
++
++      if (unlikely(dma_debug_disabled()))
++              return;
++
++      check_unmap(&ref);
++}
++
+ static int __init dma_debug_driver_setup(char *str)
+ {
+       int i;
+diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
+index f525197d3cae6..48757ca13f314 100644
+--- a/kernel/dma/debug.h
++++ b/kernel/dma/debug.h
+@@ -54,6 +54,13 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
+ extern void debug_dma_sync_sg_for_device(struct device *dev,
+                                        struct scatterlist *sg,
+                                        int nelems, int direction);
++extern void debug_dma_alloc_pages(struct device *dev, struct page *page,
++                                size_t size, int direction,
++                                dma_addr_t dma_addr,
++                                unsigned long attrs);
++extern void debug_dma_free_pages(struct device *dev, struct page *page,
++                               size_t size, int direction,
++                               dma_addr_t dma_addr);
+ #else /* CONFIG_DMA_API_DEBUG */
+ static inline void debug_dma_map_page(struct device *dev, struct page *page,
+                                     size_t offset, size_t size,
+@@ -126,5 +133,18 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
+                                               int nelems, int direction)
+ {
+ }
++
++static inline void debug_dma_alloc_pages(struct device *dev, struct page *page,
++                                       size_t size, int direction,
++                                       dma_addr_t dma_addr,
++                                       unsigned long attrs)
++{
++}
++
++static inline void debug_dma_free_pages(struct device *dev, struct page *page,
++                                      size_t size, int direction,
++                                      dma_addr_t dma_addr)
++{
++}
+ #endif /* CONFIG_DMA_API_DEBUG */
+ #endif /* _KERNEL_DMA_DEBUG_H */
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 32dcf8492bbcd..c12c62ad8a6bf 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -694,7 +694,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
+       if (page) {
+               trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
+                                     size, dir, gfp, 0);
+-              debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
++              debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
+       } else {
+               trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
+       }
+@@ -720,7 +720,7 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
+               dma_addr_t dma_handle, enum dma_data_direction dir)
+ {
+       trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
+-      debug_dma_unmap_page(dev, dma_handle, size, dir);
++      debug_dma_free_pages(dev, page, size, dir, dma_handle);
+       __dma_free_pages(dev, size, page, dma_handle, dir);
+ }
+ EXPORT_SYMBOL_GPL(dma_free_pages);
+-- 
+2.51.0
+
diff --git a/queue-6.12/dma-debug-store-a-phys_addr_t-in-struct-dma_debug_en.patch b/queue-6.12/dma-debug-store-a-phys_addr_t-in-struct-dma_debug_en.patch
new file mode 100644 (file)
index 0000000..8e48ac6
--- /dev/null
@@ -0,0 +1,231 @@
+From 7fbf197949e4595012fcfeecf9b5a865575ecfde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Oct 2024 09:20:01 +0200
+Subject: dma-debug: store a phys_addr_t in struct dma_debug_entry
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 9d4f645a1fd49eea70a21e8671d358ebe1c08d02 ]
+
+dma-debug goes to great length to split incoming physical addresses into
+a PFN and offset to store them in struct dma_debug_entry, just to
+recombine those for all meaningful uses.  Just store a phys_addr_t
+instead.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: 7e2368a21741 ("dma-debug: don't enforce dma mapping check on noncoherent allocations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/debug.c | 79 ++++++++++++++++------------------------------
+ 1 file changed, 28 insertions(+), 51 deletions(-)
+
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index f6f0387761d05..4e3692afdf0d2 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -59,8 +59,7 @@ enum map_err_types {
+  * @direction: enum dma_data_direction
+  * @sg_call_ents: 'nents' from dma_map_sg
+  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
+- * @pfn: page frame of the start address
+- * @offset: offset of mapping relative to pfn
++ * @paddr: physical start address of the mapping
+  * @map_err_type: track whether dma_mapping_error() was checked
+  * @stack_len: number of backtrace entries in @stack_entries
+  * @stack_entries: stack of backtrace history
+@@ -74,8 +73,7 @@ struct dma_debug_entry {
+       int              direction;
+       int              sg_call_ents;
+       int              sg_mapped_ents;
+-      unsigned long    pfn;
+-      size_t           offset;
++      phys_addr_t      paddr;
+       enum map_err_types  map_err_type;
+ #ifdef CONFIG_STACKTRACE
+       unsigned int    stack_len;
+@@ -389,14 +387,6 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
+       list_del(&entry->list);
+ }
+-static unsigned long long phys_addr(struct dma_debug_entry *entry)
+-{
+-      if (entry->type == dma_debug_resource)
+-              return __pfn_to_phys(entry->pfn) + entry->offset;
+-
+-      return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
+-}
+-
+ /*
+  * For each mapping (initial cacheline in the case of
+  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+@@ -428,8 +418,8 @@ static DEFINE_SPINLOCK(radix_lock);
+ static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+ {
+-      return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+-              (entry->offset >> L1_CACHE_SHIFT);
++      return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
++              (offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
+ }
+ static int active_cacheline_read_overlap(phys_addr_t cln)
+@@ -538,11 +528,11 @@ void debug_dma_dump_mappings(struct device *dev)
+                       if (!dev || dev == entry->dev) {
+                               cln = to_cacheline_number(entry);
+                               dev_info(entry->dev,
+-                                       "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
++                                       "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
+                                        type2name[entry->type], idx,
+-                                       phys_addr(entry), entry->pfn,
+-                                       entry->dev_addr, entry->size,
+-                                       &cln, dir2name[entry->direction],
++                                       &entry->paddr, entry->dev_addr,
++                                       entry->size, &cln,
++                                       dir2name[entry->direction],
+                                        maperr2str[entry->map_err_type]);
+                       }
+               }
+@@ -569,13 +559,13 @@ static int dump_show(struct seq_file *seq, void *v)
+               list_for_each_entry(entry, &bucket->list, list) {
+                       cln = to_cacheline_number(entry);
+                       seq_printf(seq,
+-                                 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
++                                 "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
+                                  dev_driver_string(entry->dev),
+                                  dev_name(entry->dev),
+                                  type2name[entry->type], idx,
+-                                 phys_addr(entry), entry->pfn,
+-                                 entry->dev_addr, entry->size,
+-                                 &cln, dir2name[entry->direction],
++                                 &entry->paddr, entry->dev_addr,
++                                 entry->size, &cln,
++                                 dir2name[entry->direction],
+                                  maperr2str[entry->map_err_type]);
+               }
+               spin_unlock_irqrestore(&bucket->lock, flags);
+@@ -1003,16 +993,16 @@ static void check_unmap(struct dma_debug_entry *ref)
+                          "[mapped as %s] [unmapped as %s]\n",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type], type2name[ref->type]);
+-      } else if ((entry->type == dma_debug_coherent) &&
+-                 (phys_addr(ref) != phys_addr(entry))) {
++      } else if (entry->type == dma_debug_coherent &&
++                 ref->paddr != entry->paddr) {
+               err_printk(ref->dev, entry, "device driver frees "
+                          "DMA memory with different CPU address "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+-                         "[cpu alloc address=0x%016llx] "
+-                         "[cpu free address=0x%016llx]",
++                         "[cpu alloc address=0x%pa] "
++                         "[cpu free address=0x%pa]",
+                          ref->dev_addr, ref->size,
+-                         phys_addr(entry),
+-                         phys_addr(ref));
++                         &entry->paddr,
++                         &ref->paddr);
+       }
+       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
+@@ -1231,8 +1221,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
+       entry->dev       = dev;
+       entry->type      = dma_debug_single;
+-      entry->pfn       = page_to_pfn(page);
+-      entry->offset    = offset;
++      entry->paddr     = page_to_phys(page);
+       entry->dev_addr  = dma_addr;
+       entry->size      = size;
+       entry->direction = direction;
+@@ -1327,8 +1316,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+               entry->type           = dma_debug_sg;
+               entry->dev            = dev;
+-              entry->pfn            = page_to_pfn(sg_page(s));
+-              entry->offset         = s->offset;
++              entry->paddr          = sg_phys(s);
+               entry->size           = sg_dma_len(s);
+               entry->dev_addr       = sg_dma_address(s);
+               entry->direction      = direction;
+@@ -1374,8 +1362,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+-                      .pfn            = page_to_pfn(sg_page(s)),
+-                      .offset         = s->offset,
++                      .paddr          = sg_phys(s),
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = dir,
+@@ -1414,16 +1401,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
+       entry->type      = dma_debug_coherent;
+       entry->dev       = dev;
+-      entry->offset    = offset_in_page(virt);
++      entry->paddr     = page_to_phys((is_vmalloc_addr(virt) ?
++                              vmalloc_to_page(virt) : virt_to_page(virt)));
+       entry->size      = size;
+       entry->dev_addr  = dma_addr;
+       entry->direction = DMA_BIDIRECTIONAL;
+-      if (is_vmalloc_addr(virt))
+-              entry->pfn = vmalloc_to_pfn(virt);
+-      else
+-              entry->pfn = page_to_pfn(virt_to_page(virt));
+-
+       add_dma_entry(entry, attrs);
+ }
+@@ -1433,7 +1416,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_coherent,
+               .dev            = dev,
+-              .offset         = offset_in_page(virt),
+               .dev_addr       = dma_addr,
+               .size           = size,
+               .direction      = DMA_BIDIRECTIONAL,
+@@ -1443,10 +1425,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
+               return;
+-      if (is_vmalloc_addr(virt))
+-              ref.pfn = vmalloc_to_pfn(virt);
+-      else
+-              ref.pfn = page_to_pfn(virt_to_page(virt));
++      ref.paddr = page_to_phys((is_vmalloc_addr(virt) ?
++                      vmalloc_to_page(virt) : virt_to_page(virt)));
+       if (unlikely(dma_debug_disabled()))
+               return;
+@@ -1469,8 +1449,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
+       entry->type             = dma_debug_resource;
+       entry->dev              = dev;
+-      entry->pfn              = PHYS_PFN(addr);
+-      entry->offset           = offset_in_page(addr);
++      entry->paddr            = addr;
+       entry->size             = size;
+       entry->dev_addr         = dma_addr;
+       entry->direction        = direction;
+@@ -1547,8 +1526,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+-                      .pfn            = page_to_pfn(sg_page(s)),
+-                      .offset         = s->offset,
++                      .paddr          = sg_phys(s),
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+@@ -1579,8 +1557,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+-                      .pfn            = page_to_pfn(sg_page(s)),
+-                      .offset         = s->offset,
++                      .paddr          = sg_phys(sg),
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+-- 
+2.51.0
+
diff --git a/queue-6.12/dma-mapping-trace-dma_alloc-free-direction.patch b/queue-6.12/dma-mapping-trace-dma_alloc-free-direction.patch
new file mode 100644 (file)
index 0000000..ca4b055
--- /dev/null
@@ -0,0 +1,118 @@
+From bbc61be546092c4a185b219fd73880d54ce2d3b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Oct 2024 11:00:35 -0400
+Subject: dma-mapping: trace dma_alloc/free direction
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit 3afff779a725cba914e6caba360b696ae6f90249 ]
+
+In preparation for using these tracepoints in a few more places, trace
+the DMA direction as well. For coherent allocations this is always
+bidirectional.
+
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: 7e2368a21741 ("dma-debug: don't enforce dma mapping check on noncoherent allocations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/dma.h | 18 ++++++++++++------
+ kernel/dma/mapping.c       |  6 ++++--
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
+index b0f41265191c3..012729cc178f0 100644
+--- a/include/trace/events/dma.h
++++ b/include/trace/events/dma.h
+@@ -116,8 +116,9 @@ DEFINE_EVENT(dma_unmap, dma_unmap_resource,
+ TRACE_EVENT(dma_alloc,
+       TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+-               size_t size, gfp_t flags, unsigned long attrs),
+-      TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs),
++               size_t size, enum dma_data_direction dir, gfp_t flags,
++               unsigned long attrs),
++      TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs),
+       TP_STRUCT__entry(
+               __string(device, dev_name(dev))
+@@ -125,6 +126,7 @@ TRACE_EVENT(dma_alloc,
+               __field(u64, dma_addr)
+               __field(size_t, size)
+               __field(gfp_t, flags)
++              __field(enum dma_data_direction, dir)
+               __field(unsigned long, attrs)
+       ),
+@@ -137,8 +139,9 @@ TRACE_EVENT(dma_alloc,
+               __entry->attrs = attrs;
+       ),
+-      TP_printk("%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
++      TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
+               __get_str(device),
++              decode_dma_data_direction(__entry->dir),
+               __entry->dma_addr,
+               __entry->size,
+               __entry->virt_addr,
+@@ -148,14 +151,15 @@ TRACE_EVENT(dma_alloc,
+ TRACE_EVENT(dma_free,
+       TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+-               size_t size, unsigned long attrs),
+-      TP_ARGS(dev, virt_addr, dma_addr, size, attrs),
++               size_t size, enum dma_data_direction dir, unsigned long attrs),
++      TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
+       TP_STRUCT__entry(
+               __string(device, dev_name(dev))
+               __field(void *, virt_addr)
+               __field(u64, dma_addr)
+               __field(size_t, size)
++              __field(enum dma_data_direction, dir)
+               __field(unsigned long, attrs)
+       ),
+@@ -164,11 +168,13 @@ TRACE_EVENT(dma_free,
+               __entry->virt_addr = virt_addr;
+               __entry->dma_addr = dma_addr;
+               __entry->size = size;
++              __entry->dir = dir;
+               __entry->attrs = attrs;
+       ),
+-      TP_printk("%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
++      TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
+               __get_str(device),
++              decode_dma_data_direction(__entry->dir),
+               __entry->dma_addr,
+               __entry->size,
+               __entry->virt_addr,
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 74d453ec750a1..9720f3c157d9f 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -619,7 +619,8 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+       else
+               return NULL;
+-      trace_dma_alloc(dev, cpu_addr, *dma_handle, size, flag, attrs);
++      trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
++                      flag, attrs);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
+       return cpu_addr;
+ }
+@@ -644,7 +645,8 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+       if (!cpu_addr)
+               return;
+-      trace_dma_free(dev, cpu_addr, dma_handle, size, attrs);
++      trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
++                     attrs);
+       debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       if (dma_alloc_direct(dev, ops))
+               dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+-- 
+2.51.0
+
diff --git a/queue-6.12/dma-mapping-trace-more-error-paths.patch b/queue-6.12/dma-mapping-trace-more-error-paths.patch
new file mode 100644 (file)
index 0000000..104262e
--- /dev/null
@@ -0,0 +1,158 @@
+From a3d556dc2b010445e1e537b57020dd54ca8efbbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Oct 2024 11:00:37 -0400
+Subject: dma-mapping: trace more error paths
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit 68b6dbf1f441c4eba3b8511728a41cf9b01dca35 ]
+
+It can be surprising to the user if DMA functions are only traced on
+success. On failure, it can be unclear what the source of the problem
+is. Fix this by tracing all functions even when they fail. Cases where
+we BUG/WARN are skipped, since those should be sufficiently noisy
+already.
+
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: 7e2368a21741 ("dma-debug: don't enforce dma mapping check on noncoherent allocations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/dma.h | 36 ++++++++++++++++++++++++++++++++++++
+ kernel/dma/mapping.c       | 25 ++++++++++++++++++-------
+ 2 files changed, 54 insertions(+), 7 deletions(-)
+
+diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
+index 45cc0ca8287fe..63b55ccc4f00c 100644
+--- a/include/trace/events/dma.h
++++ b/include/trace/events/dma.h
+@@ -158,6 +158,7 @@ DEFINE_EVENT(dma_alloc_class, name, \
+ DEFINE_ALLOC_EVENT(dma_alloc);
+ DEFINE_ALLOC_EVENT(dma_alloc_pages);
++DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);
+ TRACE_EVENT(dma_alloc_sgt,
+       TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
+@@ -322,6 +323,41 @@ TRACE_EVENT(dma_map_sg,
+               decode_dma_attrs(__entry->attrs))
+ );
++TRACE_EVENT(dma_map_sg_err,
++      TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
++               int err, enum dma_data_direction dir, unsigned long attrs),
++      TP_ARGS(dev, sgl, nents, err, dir, attrs),
++
++      TP_STRUCT__entry(
++              __string(device, dev_name(dev))
++              __dynamic_array(u64, phys_addrs, nents)
++              __field(int, err)
++              __field(enum dma_data_direction, dir)
++              __field(unsigned long, attrs)
++      ),
++
++      TP_fast_assign(
++              struct scatterlist *sg;
++              int i;
++
++              __assign_str(device);
++              for_each_sg(sgl, sg, nents, i)
++                      ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
++              __entry->err = err;
++              __entry->dir = dir;
++              __entry->attrs = attrs;
++      ),
++
++      TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
++              __get_str(device),
++              decode_dma_data_direction(__entry->dir),
++              __print_array(__get_dynamic_array(phys_addrs),
++                            __get_dynamic_array_len(phys_addrs) /
++                              sizeof(u64), sizeof(u64)),
++              __entry->err,
++              decode_dma_attrs(__entry->attrs))
++);
++
+ TRACE_EVENT(dma_unmap_sg,
+       TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+                enum dma_data_direction dir, unsigned long attrs),
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 690aeda8bd7da..32dcf8492bbcd 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+               debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
+       } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
+                               ents != -EIO && ents != -EREMOTEIO)) {
++              trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
+               return -EIO;
+       }
+@@ -604,20 +605,26 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+       if (WARN_ON_ONCE(flag & __GFP_COMP))
+               return NULL;
+-      if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
++      if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
++              trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
++                              DMA_BIDIRECTIONAL, flag, attrs);
+               return cpu_addr;
++      }
+       /* let the implementation decide on the zone to allocate from: */
+       flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+-      if (dma_alloc_direct(dev, ops))
++      if (dma_alloc_direct(dev, ops)) {
+               cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
+-      else if (use_dma_iommu(dev))
++      } else if (use_dma_iommu(dev)) {
+               cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
+-      else if (ops->alloc)
++      } else if (ops->alloc) {
+               cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+-      else
++      } else {
++              trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
++                              attrs);
+               return NULL;
++      }
+       trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
+                       flag, attrs);
+@@ -642,11 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+        */
+       WARN_ON(irqs_disabled());
++      trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
++                     attrs);
+       if (!cpu_addr)
+               return;
+-      trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
+-                     attrs);
+       debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       if (dma_alloc_direct(dev, ops))
+               dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+@@ -688,6 +695,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
+               trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
+                                     size, dir, gfp, 0);
+               debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
++      } else {
++              trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
+       }
+       return page;
+ }
+@@ -772,6 +781,8 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+               sgt->nents = 1;
+               trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
+               debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
++      } else {
++              trace_dma_alloc_sgt_err(dev, NULL, 0, size, gfp, dir, attrs);
+       }
+       return sgt;
+ }
+-- 
+2.51.0
+
diff --git a/queue-6.12/dma-mapping-use-trace_dma_alloc-for-dma_alloc-instea.patch b/queue-6.12/dma-mapping-use-trace_dma_alloc-for-dma_alloc-instea.patch
new file mode 100644 (file)
index 0000000..683e568
--- /dev/null
@@ -0,0 +1,197 @@
+From 554d86f36340031be9bdc0f48fc85570d572f22f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Oct 2024 11:00:36 -0400
+Subject: dma-mapping: use trace_dma_alloc for dma_alloc* instead of using
+ trace_dma_map
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit c4484ab86ee00f2d9236e2851621ea02c105f4cc ]
+
+In some cases, we use trace_dma_map to trace dma_alloc* functions. This
+generally follows dma_debug. However, this does not record all of the
+relevant information for allocations, such as GFP flags. Create new
+dma_alloc tracepoints for these functions. Note that while
+dma_alloc_noncontiguous may allocate discontiguous pages (from the CPU's
+point of view), the device will only see one contiguous mapping.
+Therefore, we just need to trace dma_addr and size.
+
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: 7e2368a21741 ("dma-debug: don't enforce dma mapping check on noncoherent allocations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/dma.h | 99 +++++++++++++++++++++++++++++++++++++-
+ kernel/dma/mapping.c       | 10 ++--
+ 2 files changed, 102 insertions(+), 7 deletions(-)
+
+diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
+index 012729cc178f0..45cc0ca8287fe 100644
+--- a/include/trace/events/dma.h
++++ b/include/trace/events/dma.h
+@@ -114,7 +114,7 @@ DEFINE_EVENT(dma_unmap, dma_unmap_resource,
+                enum dma_data_direction dir, unsigned long attrs),
+       TP_ARGS(dev, addr, size, dir, attrs));
+-TRACE_EVENT(dma_alloc,
++DECLARE_EVENT_CLASS(dma_alloc_class,
+       TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+                size_t size, enum dma_data_direction dir, gfp_t flags,
+                unsigned long attrs),
+@@ -149,7 +149,58 @@ TRACE_EVENT(dma_alloc,
+               decode_dma_attrs(__entry->attrs))
+ );
+-TRACE_EVENT(dma_free,
++#define DEFINE_ALLOC_EVENT(name) \
++DEFINE_EVENT(dma_alloc_class, name, \
++      TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
++               size_t size, enum dma_data_direction dir, gfp_t flags, \
++               unsigned long attrs), \
++      TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs))
++
++DEFINE_ALLOC_EVENT(dma_alloc);
++DEFINE_ALLOC_EVENT(dma_alloc_pages);
++
++TRACE_EVENT(dma_alloc_sgt,
++      TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
++               enum dma_data_direction dir, gfp_t flags, unsigned long attrs),
++      TP_ARGS(dev, sgt, size, dir, flags, attrs),
++
++      TP_STRUCT__entry(
++              __string(device, dev_name(dev))
++              __dynamic_array(u64, phys_addrs, sgt->orig_nents)
++              __field(u64, dma_addr)
++              __field(size_t, size)
++              __field(enum dma_data_direction, dir)
++              __field(gfp_t, flags)
++              __field(unsigned long, attrs)
++      ),
++
++      TP_fast_assign(
++              struct scatterlist *sg;
++              int i;
++
++              __assign_str(device);
++              for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
++                      ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
++              __entry->dma_addr = sg_dma_address(sgt->sgl);
++              __entry->size = size;
++              __entry->dir = dir;
++              __entry->flags = flags;
++              __entry->attrs = attrs;
++      ),
++
++      TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s",
++              __get_str(device),
++              decode_dma_data_direction(__entry->dir),
++              __entry->dma_addr,
++              __entry->size,
++              __print_array(__get_dynamic_array(phys_addrs),
++                            __get_dynamic_array_len(phys_addrs) /
++                              sizeof(u64), sizeof(u64)),
++              show_gfp_flags(__entry->flags),
++              decode_dma_attrs(__entry->attrs))
++);
++
++DECLARE_EVENT_CLASS(dma_free_class,
+       TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+                size_t size, enum dma_data_direction dir, unsigned long attrs),
+       TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
+@@ -181,6 +232,50 @@ TRACE_EVENT(dma_free,
+               decode_dma_attrs(__entry->attrs))
+ );
++#define DEFINE_FREE_EVENT(name) \
++DEFINE_EVENT(dma_free_class, name, \
++      TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
++               size_t size, enum dma_data_direction dir, unsigned long attrs), \
++      TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs))
++
++DEFINE_FREE_EVENT(dma_free);
++DEFINE_FREE_EVENT(dma_free_pages);
++
++TRACE_EVENT(dma_free_sgt,
++      TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
++               enum dma_data_direction dir),
++      TP_ARGS(dev, sgt, size, dir),
++
++      TP_STRUCT__entry(
++              __string(device, dev_name(dev))
++              __dynamic_array(u64, phys_addrs, sgt->orig_nents)
++              __field(u64, dma_addr)
++              __field(size_t, size)
++              __field(enum dma_data_direction, dir)
++      ),
++
++      TP_fast_assign(
++              struct scatterlist *sg;
++              int i;
++
++              __assign_str(device);
++              for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
++                      ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
++              __entry->dma_addr = sg_dma_address(sgt->sgl);
++              __entry->size = size;
++              __entry->dir = dir;
++      ),
++
++      TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s",
++              __get_str(device),
++              decode_dma_data_direction(__entry->dir),
++              __entry->dma_addr,
++              __entry->size,
++              __print_array(__get_dynamic_array(phys_addrs),
++                            __get_dynamic_array_len(phys_addrs) /
++                              sizeof(u64), sizeof(u64)))
++);
++
+ TRACE_EVENT(dma_map_sg,
+       TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+                int ents, enum dma_data_direction dir, unsigned long attrs),
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 9720f3c157d9f..690aeda8bd7da 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -685,8 +685,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
+       struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+       if (page) {
+-              trace_dma_map_page(dev, page_to_phys(page), *dma_handle, size,
+-                                 dir, 0);
++              trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
++                                    size, dir, gfp, 0);
+               debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
+       }
+       return page;
+@@ -710,7 +710,7 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
+ void dma_free_pages(struct device *dev, size_t size, struct page *page,
+               dma_addr_t dma_handle, enum dma_data_direction dir)
+ {
+-      trace_dma_unmap_page(dev, dma_handle, size, dir, 0);
++      trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
+       debug_dma_unmap_page(dev, dma_handle, size, dir);
+       __dma_free_pages(dev, size, page, dma_handle, dir);
+ }
+@@ -770,7 +770,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+       if (sgt) {
+               sgt->nents = 1;
+-              trace_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
++              trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
+               debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
+       }
+       return sgt;
+@@ -789,7 +789,7 @@ static void free_single_sgt(struct device *dev, size_t size,
+ void dma_free_noncontiguous(struct device *dev, size_t size,
+               struct sg_table *sgt, enum dma_data_direction dir)
+ {
+-      trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
++      trace_dma_free_sgt(dev, sgt, size, dir);
+       debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+       if (use_dma_iommu(dev))
+-- 
+2.51.0
+
diff --git a/queue-6.12/fhandle-use-more-consistent-rules-for-decoding-file-.patch b/queue-6.12/fhandle-use-more-consistent-rules-for-decoding-file-.patch
new file mode 100644 (file)
index 0000000..1503c19
--- /dev/null
@@ -0,0 +1,65 @@
+From e406c186bb497c28c86de1093bb4bc15ffa67077 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Aug 2025 21:43:09 +0200
+Subject: fhandle: use more consistent rules for decoding file handle from
+ userns
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+[ Upstream commit bb585591ebf00fb1f6a1fdd1ea96b5848bd9112d ]
+
+Commit 620c266f39493 ("fhandle: relax open_by_handle_at() permission
+checks") relaxed the coditions for decoding a file handle from non init
+userns.
+
+The conditions are that that decoded dentry is accessible from the user
+provided mountfd (or to fs root) and that all the ancestors along the
+path have a valid id mapping in the userns.
+
+These conditions are intentionally more strict than the condition that
+the decoded dentry should be "lookable" by path from the mountfd.
+
+For example, the path /home/amir/dir/subdir is lookable by path from
+unpriv userns of user amir, because /home perms is 755, but the owner of
+/home does not have a valid id mapping in unpriv userns of user amir.
+
+The current code did not check that the decoded dentry itself has a
+valid id mapping in the userns.  There is no security risk in that,
+because that final open still performs the needed permission checks,
+but this is inconsistent with the checks performed on the ancestors,
+so the behavior can be a bit confusing.
+
+Add the check for the decoded dentry itself, so that the entire path,
+including the last component has a valid id mapping in the userns.
+
+Fixes: 620c266f39493 ("fhandle: relax open_by_handle_at() permission checks")
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Link: https://lore.kernel.org/20250827194309.1259650-1-amir73il@gmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fhandle.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index 82df28d45cd70..ff90f8203015e 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -176,6 +176,14 @@ static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
+       if (!ctx->flags)
+               return 1;
++      /*
++       * Verify that the decoded dentry itself has a valid id mapping.
++       * In case the decoded dentry is the mountfd root itself, this
++       * verifies that the mountfd inode itself has a valid id mapping.
++       */
++      if (!privileged_wrt_inode_uidgid(user_ns, idmap, d_inode(dentry)))
++              return 0;
++
+       /*
+        * It's racy as we're not taking rename_lock but we're able to ignore
+        * permissions and we just need an approximation whether we were able
+-- 
+2.51.0
+
diff --git a/queue-6.12/series b/queue-6.12/series
new file mode 100644 (file)
index 0000000..7730352
--- /dev/null
@@ -0,0 +1,6 @@
+fhandle-use-more-consistent-rules-for-decoding-file-.patch
+dma-debug-store-a-phys_addr_t-in-struct-dma_debug_en.patch
+dma-mapping-trace-dma_alloc-free-direction.patch
+dma-mapping-use-trace_dma_alloc-for-dma_alloc-instea.patch
+dma-mapping-trace-more-error-paths.patch
+dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch
diff --git a/queue-6.16/block-don-t-silently-ignore-metadata-for-sync-read-w.patch b/queue-6.16/block-don-t-silently-ignore-metadata-for-sync-read-w.patch
new file mode 100644 (file)
index 0000000..f090625
--- /dev/null
@@ -0,0 +1,84 @@
+From 087a28e6a7b1bc39fed41d5d37b20d8524aeaf22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 10:25:01 +0200
+Subject: block: don't silently ignore metadata for sync read/write
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 2729a60bbfb9215997f25372ebe9b7964f038296 ]
+
+The block fops don't try to handle metadata for synchronous requests,
+probably because the completion handler looks at dio->iocb which is not
+valid for synchronous requests.
+
+But silently ignoring metadata (or warning in case of
+__blkdev_direct_IO_simple) is a really bad idea as that can cause
+silent data corruption if a user ever shows up.
+
+Instead simply handle metadata for synchronous requests as the completion
+handler can simply check for bio_integrity() as the block layer default
+integrity will already be freed at this point, and thus bio_integrity()
+will only return true for user mapped integrity.
+
+Fixes: 3d8b5a22d404 ("block: add support to pass user meta buffer")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/20250819082517.2038819-3-hch@lst.de
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/fops.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/block/fops.c b/block/fops.c
+index e7bfe65c57f22..d62fbefb2e671 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -55,7 +55,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
+       struct bio bio;
+       ssize_t ret;
+-      WARN_ON_ONCE(iocb->ki_flags & IOCB_HAS_METADATA);
+       if (nr_pages <= DIO_INLINE_BIO_VECS)
+               vecs = inline_vecs;
+       else {
+@@ -132,7 +131,7 @@ static void blkdev_bio_end_io(struct bio *bio)
+       if (bio->bi_status && !dio->bio.bi_status)
+               dio->bio.bi_status = bio->bi_status;
+-      if (!is_sync && (dio->iocb->ki_flags & IOCB_HAS_METADATA))
++      if (bio_integrity(bio))
+               bio_integrity_unmap_user(bio);
+       if (atomic_dec_and_test(&dio->ref)) {
+@@ -234,7 +233,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                       }
+                       bio->bi_opf |= REQ_NOWAIT;
+               }
+-              if (!is_sync && (iocb->ki_flags & IOCB_HAS_METADATA)) {
++              if (iocb->ki_flags & IOCB_HAS_METADATA) {
+                       ret = bio_integrity_map_iter(bio, iocb->private);
+                       if (unlikely(ret))
+                               goto fail;
+@@ -302,7 +301,7 @@ static void blkdev_bio_end_io_async(struct bio *bio)
+               ret = blk_status_to_errno(bio->bi_status);
+       }
+-      if (iocb->ki_flags & IOCB_HAS_METADATA)
++      if (bio_integrity(bio))
+               bio_integrity_unmap_user(bio);
+       iocb->ki_complete(iocb, ret);
+@@ -423,7 +422,8 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+       }
+       nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
+-      if (likely(nr_pages <= BIO_MAX_VECS)) {
++      if (likely(nr_pages <= BIO_MAX_VECS &&
++                 !(iocb->ki_flags & IOCB_HAS_METADATA))) {
+               if (is_sync_kiocb(iocb))
+                       return __blkdev_direct_IO_simple(iocb, iter, bdev,
+                                                       nr_pages);
+-- 
+2.51.0
+
diff --git a/queue-6.16/bluetooth-hci_conn-fix-not-cleaning-up-broadcaster-b.patch b/queue-6.16/bluetooth-hci_conn-fix-not-cleaning-up-broadcaster-b.patch
new file mode 100644 (file)
index 0000000..d86fbbc
--- /dev/null
@@ -0,0 +1,35 @@
+From 914ac91f013f29422ff362719cbc3d7be58b49a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 12:11:09 -0400
+Subject: Bluetooth: hci_conn: Fix not cleaning up Broadcaster/Broadcast Source
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 3ba486c5f3ce2c22ffd29c0103404cdbe21912b3 ]
+
+This fixes Broadcaster/Broadcast Source not sending HCI_OP_LE_TERM_BIG
+because HCI_CONN_PER_ADV where not being set.
+
+Fixes: a7bcffc673de ("Bluetooth: Add PA_LINK to distinguish BIG sync and PA sync connections")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index ad5574e9a93ee..dc4f23ceff2a6 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -2274,7 +2274,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+        * the start periodic advertising and create BIG commands have
+        * been queued
+        */
+-      hci_conn_hash_list_state(hdev, bis_mark_per_adv, PA_LINK,
++      hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,
+                                BT_BOUND, &data);
+       /* Queue start periodic advertising and create BIG */
+-- 
+2.51.0
+
diff --git a/queue-6.16/bluetooth-hci_conn-fix-running-bis_cleanup-for-hci_c.patch b/queue-6.16/bluetooth-hci_conn-fix-running-bis_cleanup-for-hci_c.patch
new file mode 100644 (file)
index 0000000..3c446b0
--- /dev/null
@@ -0,0 +1,70 @@
+From 98a9180ee981fe887c910c5e568c80cf3e82efd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 13:51:01 -0400
+Subject: Bluetooth: hci_conn: Fix running bis_cleanup for hci_conn->type
+ PA_LINK
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit d36349ea73d805bb72cbc24ab90cb1da4ad5c379 ]
+
+Connections with type of PA_LINK shall be considered temporary just to
+track the lifetime of PA Sync setup, once the BIG Sync is established
+and connection are created with BIS_LINK the existing PA_LINK
+connection shall not longer use bis_cleanup otherwise it terminates the
+PA Sync when that shall be left to BIS_LINK connection to do it.
+
+Fixes: a7bcffc673de ("Bluetooth: Add PA_LINK to distinguish BIG sync and PA sync connections")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c  | 12 +++++++++++-
+ net/bluetooth/hci_event.c |  7 ++++++-
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index dc4f23ceff2a6..ce17e489c67c3 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -829,7 +829,17 @@ static void bis_cleanup(struct hci_conn *conn)
+               /* Check if ISO connection is a BIS and terminate advertising
+                * set and BIG if there are no other connections using it.
+                */
+-              bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
++              bis = hci_conn_hash_lookup_big_state(hdev,
++                                                   conn->iso_qos.bcast.big,
++                                                   BT_CONNECTED,
++                                                   HCI_ROLE_MASTER);
++              if (bis)
++                      return;
++
++              bis = hci_conn_hash_lookup_big_state(hdev,
++                                                   conn->iso_qos.bcast.big,
++                                                   BT_CONNECT,
++                                                   HCI_ROLE_MASTER);
+               if (bis)
+                       return;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 0ffdbe249f5d3..090c7ffa51525 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6973,9 +6973,14 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+                               continue;
+               }
+-              if (ev->status != 0x42)
++              if (ev->status != 0x42) {
+                       /* Mark PA sync as established */
+                       set_bit(HCI_CONN_PA_SYNC, &bis->flags);
++                      /* Reset cleanup callback of PA Sync so it doesn't
++                       * terminate the sync when deleting the connection.
++                       */
++                      conn->cleanup = NULL;
++              }
+               bis->sync_handle = conn->sync_handle;
+               bis->iso_qos.bcast.big = ev->handle;
+-- 
+2.51.0
+
diff --git a/queue-6.16/bluetooth-iso-fix-getname-not-returning-broadcast-fi.patch b/queue-6.16/bluetooth-iso-fix-getname-not-returning-broadcast-fi.patch
new file mode 100644 (file)
index 0000000..66436f7
--- /dev/null
@@ -0,0 +1,36 @@
+From dc0f8ddeb25aa914cf510b3473c1af141618571d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 16:36:27 -0400
+Subject: Bluetooth: ISO: Fix getname not returning broadcast fields
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit aee29c18a38d479c2f058c9b6a39b0527cf81d10 ]
+
+getname shall return iso_bc fields for both BIS_LINK and PA_LINK since
+the likes of bluetoothd do use the getpeername to retrieve the SID both
+when enumerating the broadcasters and when synchronizing.
+
+Fixes: a7bcffc673de ("Bluetooth: Add PA_LINK to distinguish BIG sync and PA sync connections")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/iso.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 14a4215352d5f..c21566e1494a9 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1347,7 +1347,7 @@ static int iso_sock_getname(struct socket *sock, struct sockaddr *addr,
+               bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst);
+               sa->iso_bdaddr_type = iso_pi(sk)->dst_type;
+-              if (hcon && hcon->type == BIS_LINK) {
++              if (hcon && (hcon->type == BIS_LINK || hcon->type == PA_LINK)) {
+                       sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid;
+                       sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis;
+                       memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis,
+-- 
+2.51.0
+
diff --git a/queue-6.16/coredump-don-t-pointlessly-check-and-spew-warnings.patch b/queue-6.16/coredump-don-t-pointlessly-check-and-spew-warnings.patch
new file mode 100644 (file)
index 0000000..25abc75
--- /dev/null
@@ -0,0 +1,61 @@
+From a14fc19651556fc1d6e3b2c4d9e2d9155a2aeb42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 13:50:47 +0200
+Subject: coredump: don't pointlessly check and spew warnings
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit be1e0283021ec73c2eb92839db9a471a068709d9 ]
+
+When a write happens it doesn't make sense to check perform checks on
+the input. Skip them.
+
+Whether a fixes tag is licensed is a bit of a gray area here but I'll
+add one for the socket validation part I added recently.
+
+Link: https://lore.kernel.org/20250821-moosbedeckt-denunziant-7908663f3563@brauner
+Fixes: 16195d2c7dd2 ("coredump: validate socket name as it is written")
+Reported-by: Brad Spengler <brad.spengler@opensrcsec.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/coredump.c | 4 ++++
+ fs/exec.c     | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/coredump.c b/fs/coredump.c
+index f217ebf2b3b68..012915262d11b 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -1263,11 +1263,15 @@ static int proc_dostring_coredump(const struct ctl_table *table, int write,
+       ssize_t retval;
+       char old_core_pattern[CORENAME_MAX_SIZE];
++      if (write)
++              return proc_dostring(table, write, buffer, lenp, ppos);
++
+       retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE);
+       error = proc_dostring(table, write, buffer, lenp, ppos);
+       if (error)
+               return error;
++
+       if (!check_coredump_socket()) {
+               strscpy(core_pattern, old_core_pattern, retval + 1);
+               return -EINVAL;
+diff --git a/fs/exec.c b/fs/exec.c
+index ba400aafd6406..551e1cc5bf1e3 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -2048,7 +2048,7 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ
+ {
+       int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+-      if (!error)
++      if (!error && !write)
+               validate_coredump_safety();
+       return error;
+ }
+-- 
+2.51.0
+
diff --git a/queue-6.16/dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch b/queue-6.16/dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch
new file mode 100644 (file)
index 0000000..f5235d7
--- /dev/null
@@ -0,0 +1,170 @@
+From ac871ba873aeed3bcccf79654b1d627e839dd268 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 16:17:33 +0800
+Subject: dma-debug: don't enforce dma mapping check on noncoherent allocations
+
+From: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+
+[ Upstream commit 7e2368a21741e2db542330b32aa6fdd8908e7cff ]
+
+As discussed in [1], there is no need to enforce dma mapping check on
+noncoherent allocations, a simple test on the returned CPU address is
+good enough.
+
+Add a new pair of debug helpers and use them for noncoherent alloc/free
+to fix this issue.
+
+Fixes: efa70f2fdc84 ("dma-mapping: add a new dma_alloc_pages API")
+Link: https://lore.kernel.org/all/ff6c1fe6-820f-4e58-8395-df06aa91706c@oss.qualcomm.com # 1
+Signed-off-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Link: https://lore.kernel.org/r/20250828-dma-debug-fix-noncoherent-dma-check-v1-1-76e9be0dd7fc@oss.qualcomm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/debug.c   | 48 +++++++++++++++++++++++++++++++++++++++++++-
+ kernel/dma/debug.h   | 20 ++++++++++++++++++
+ kernel/dma/mapping.c |  4 ++--
+ 3 files changed, 69 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index e43c6de2bce4e..b82399437db03 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -39,6 +39,7 @@ enum {
+       dma_debug_sg,
+       dma_debug_coherent,
+       dma_debug_resource,
++      dma_debug_noncoherent,
+ };
+ enum map_err_types {
+@@ -141,6 +142,7 @@ static const char *type2name[] = {
+       [dma_debug_sg] = "scatter-gather",
+       [dma_debug_coherent] = "coherent",
+       [dma_debug_resource] = "resource",
++      [dma_debug_noncoherent] = "noncoherent",
+ };
+ static const char *dir2name[] = {
+@@ -993,7 +995,8 @@ static void check_unmap(struct dma_debug_entry *ref)
+                          "[mapped as %s] [unmapped as %s]\n",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type], type2name[ref->type]);
+-      } else if (entry->type == dma_debug_coherent &&
++      } else if ((entry->type == dma_debug_coherent ||
++                  entry->type == dma_debug_noncoherent) &&
+                  ref->paddr != entry->paddr) {
+               err_printk(ref->dev, entry, "device driver frees "
+                          "DMA memory with different CPU address "
+@@ -1581,6 +1584,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+       }
+ }
++void debug_dma_alloc_pages(struct device *dev, struct page *page,
++                         size_t size, int direction,
++                         dma_addr_t dma_addr,
++                         unsigned long attrs)
++{
++      struct dma_debug_entry *entry;
++
++      if (unlikely(dma_debug_disabled()))
++              return;
++
++      entry = dma_entry_alloc();
++      if (!entry)
++              return;
++
++      entry->type      = dma_debug_noncoherent;
++      entry->dev       = dev;
++      entry->paddr     = page_to_phys(page);
++      entry->size      = size;
++      entry->dev_addr  = dma_addr;
++      entry->direction = direction;
++
++      add_dma_entry(entry, attrs);
++}
++
++void debug_dma_free_pages(struct device *dev, struct page *page,
++                        size_t size, int direction,
++                        dma_addr_t dma_addr)
++{
++      struct dma_debug_entry ref = {
++              .type           = dma_debug_noncoherent,
++              .dev            = dev,
++              .paddr          = page_to_phys(page),
++              .dev_addr       = dma_addr,
++              .size           = size,
++              .direction      = direction,
++      };
++
++      if (unlikely(dma_debug_disabled()))
++              return;
++
++      check_unmap(&ref);
++}
++
+ static int __init dma_debug_driver_setup(char *str)
+ {
+       int i;
+diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
+index f525197d3cae6..48757ca13f314 100644
+--- a/kernel/dma/debug.h
++++ b/kernel/dma/debug.h
+@@ -54,6 +54,13 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
+ extern void debug_dma_sync_sg_for_device(struct device *dev,
+                                        struct scatterlist *sg,
+                                        int nelems, int direction);
++extern void debug_dma_alloc_pages(struct device *dev, struct page *page,
++                                size_t size, int direction,
++                                dma_addr_t dma_addr,
++                                unsigned long attrs);
++extern void debug_dma_free_pages(struct device *dev, struct page *page,
++                               size_t size, int direction,
++                               dma_addr_t dma_addr);
+ #else /* CONFIG_DMA_API_DEBUG */
+ static inline void debug_dma_map_page(struct device *dev, struct page *page,
+                                     size_t offset, size_t size,
+@@ -126,5 +133,18 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
+                                               int nelems, int direction)
+ {
+ }
++
++static inline void debug_dma_alloc_pages(struct device *dev, struct page *page,
++                                       size_t size, int direction,
++                                       dma_addr_t dma_addr,
++                                       unsigned long attrs)
++{
++}
++
++static inline void debug_dma_free_pages(struct device *dev, struct page *page,
++                                      size_t size, int direction,
++                                      dma_addr_t dma_addr)
++{
++}
+ #endif /* CONFIG_DMA_API_DEBUG */
+ #endif /* _KERNEL_DMA_DEBUG_H */
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 107e4a4d251df..56de28a3b1799 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -712,7 +712,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
+       if (page) {
+               trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
+                                     size, dir, gfp, 0);
+-              debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
++              debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
+       } else {
+               trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
+       }
+@@ -738,7 +738,7 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
+               dma_addr_t dma_handle, enum dma_data_direction dir)
+ {
+       trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
+-      debug_dma_unmap_page(dev, dma_handle, size, dir);
++      debug_dma_free_pages(dev, page, size, dir, dma_handle);
+       __dma_free_pages(dev, size, page, dma_handle, dir);
+ }
+ EXPORT_SYMBOL_GPL(dma_free_pages);
+-- 
+2.51.0
+
diff --git a/queue-6.16/fhandle-use-more-consistent-rules-for-decoding-file-.patch b/queue-6.16/fhandle-use-more-consistent-rules-for-decoding-file-.patch
new file mode 100644 (file)
index 0000000..53640f5
--- /dev/null
@@ -0,0 +1,65 @@
+From ace5aeb560c33543aa4c4e30389b0f4e946b7400 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Aug 2025 21:43:09 +0200
+Subject: fhandle: use more consistent rules for decoding file handle from
+ userns
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+[ Upstream commit bb585591ebf00fb1f6a1fdd1ea96b5848bd9112d ]
+
+Commit 620c266f39493 ("fhandle: relax open_by_handle_at() permission
+checks") relaxed the coditions for decoding a file handle from non init
+userns.
+
+The conditions are that that decoded dentry is accessible from the user
+provided mountfd (or to fs root) and that all the ancestors along the
+path have a valid id mapping in the userns.
+
+These conditions are intentionally more strict than the condition that
+the decoded dentry should be "lookable" by path from the mountfd.
+
+For example, the path /home/amir/dir/subdir is lookable by path from
+unpriv userns of user amir, because /home perms is 755, but the owner of
+/home does not have a valid id mapping in unpriv userns of user amir.
+
+The current code did not check that the decoded dentry itself has a
+valid id mapping in the userns.  There is no security risk in that,
+because that final open still performs the needed permission checks,
+but this is inconsistent with the checks performed on the ancestors,
+so the behavior can be a bit confusing.
+
+Add the check for the decoded dentry itself, so that the entire path,
+including the last component has a valid id mapping in the userns.
+
+Fixes: 620c266f39493 ("fhandle: relax open_by_handle_at() permission checks")
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Link: https://lore.kernel.org/20250827194309.1259650-1-amir73il@gmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fhandle.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index e21ec857f2abc..52c72896e1c16 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -202,6 +202,14 @@ static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
+       if (!ctx->flags)
+               return 1;
++      /*
++       * Verify that the decoded dentry itself has a valid id mapping.
++       * In case the decoded dentry is the mountfd root itself, this
++       * verifies that the mountfd inode itself has a valid id mapping.
++       */
++      if (!privileged_wrt_inode_uidgid(user_ns, idmap, d_inode(dentry)))
++              return 0;
++
+       /*
+        * It's racy as we're not taking rename_lock but we're able to ignore
+        * permissions and we just need an approximation whether we were able
+-- 
+2.51.0
+
diff --git a/queue-6.16/fs-add-a-fmode_-flag-to-indicate-iocb_has_metadata-a.patch b/queue-6.16/fs-add-a-fmode_-flag-to-indicate-iocb_has_metadata-a.patch
new file mode 100644 (file)
index 0000000..b3214b2
--- /dev/null
@@ -0,0 +1,76 @@
+From caf6f62a049bb1ae10aa9933de31e7438c10a8c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 10:25:00 +0200
+Subject: fs: add a FMODE_ flag to indicate IOCB_HAS_METADATA availability
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit d072148a8631f102de60ed5a3a827e85d09d24f0 ]
+
+Currently the kernel will happily route io_uring requests with metadata
+to file operations that don't support it.  Add a FMODE_ flag to guard
+that.
+
+Fixes: 4de2ce04c862 ("fs: introduce IOCB_HAS_METADATA for metadata")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/20250819082517.2038819-2-hch@lst.de
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/fops.c       | 3 +++
+ include/linux/fs.h | 3 ++-
+ io_uring/rw.c      | 3 +++
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/block/fops.c b/block/fops.c
+index 1309861d4c2c4..e7bfe65c57f22 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -7,6 +7,7 @@
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/blkdev.h>
++#include <linux/blk-integrity.h>
+ #include <linux/buffer_head.h>
+ #include <linux/mpage.h>
+ #include <linux/uio.h>
+@@ -672,6 +673,8 @@ static int blkdev_open(struct inode *inode, struct file *filp)
+       if (bdev_can_atomic_write(bdev))
+               filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
++      if (blk_get_integrity(bdev->bd_disk))
++              filp->f_mode |= FMODE_HAS_METADATA;
+       ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
+       if (ret)
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 040c0036320fd..d6716ff498a7a 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -149,7 +149,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+ /* Expect random access pattern */
+ #define FMODE_RANDOM          ((__force fmode_t)(1 << 12))
+-/* FMODE_* bit 13 */
++/* Supports IOCB_HAS_METADATA */
++#define FMODE_HAS_METADATA    ((__force fmode_t)(1 << 13))
+ /* File is opened with O_PATH; almost nothing can be done with it */
+ #define FMODE_PATH            ((__force fmode_t)(1 << 14))
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 52a5b950b2e5e..af5a54b5db123 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -886,6 +886,9 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
+       if (req->flags & REQ_F_HAS_METADATA) {
+               struct io_async_rw *io = req->async_data;
++              if (!(file->f_mode & FMODE_HAS_METADATA))
++                      return -EINVAL;
++
+               /*
+                * We have a union of meta fields with wpq used for buffered-io
+                * in io_async_rw, so fail it here.
+-- 
+2.51.0
+
diff --git a/queue-6.16/fuse-block-access-to-folio-overlimit.patch b/queue-6.16/fuse-block-access-to-folio-overlimit.patch
new file mode 100644 (file)
index 0000000..4f43d8f
--- /dev/null
@@ -0,0 +1,44 @@
+From 5063c752c1abbfb328a59b3099f39544390f0ed8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Aug 2025 09:45:55 +0800
+Subject: fuse: Block access to folio overlimit
+
+From: Edward Adam Davis <eadavis@qq.com>
+
+[ Upstream commit 9d81ba6d49a7457784f0b6a71046818b86ec7e44 ]
+
+syz reported a slab-out-of-bounds Write in fuse_dev_do_write.
+
+When the number of bytes to be retrieved is truncated to the upper limit
+by fc->max_pages and there is an offset, the oob is triggered.
+
+Add a loop termination condition to prevent overruns.
+
+Fixes: 3568a9569326 ("fuse: support large folios for retrieves")
+Reported-by: syzbot+2d215d165f9354b9c4ea@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2d215d165f9354b9c4ea
+Tested-by: syzbot+2d215d165f9354b9c4ea@syzkaller.appspotmail.com
+Signed-off-by: Edward Adam Davis <eadavis@qq.com>
+Reviewed-by: Joanne Koong <joannelkoong@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fuse/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index e80cd8f2c049f..5150aa25e64be 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1893,7 +1893,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
+       index = outarg->offset >> PAGE_SHIFT;
+-      while (num) {
++      while (num && ap->num_folios < num_pages) {
+               struct folio *folio;
+               unsigned int folio_offset;
+               unsigned int nr_bytes;
+-- 
+2.51.0
+
diff --git a/queue-6.16/iommu-vt-d-create-unique-domain-ops-for-each-stage.patch b/queue-6.16/iommu-vt-d-create-unique-domain-ops-for-each-stage.patch
new file mode 100644 (file)
index 0000000..3c802cb
--- /dev/null
@@ -0,0 +1,262 @@
+From 0d2cf9fda7809e37bc0e406e49be4450ee7cf0cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 12:50:24 +0800
+Subject: iommu/vt-d: Create unique domain ops for each stage
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit b33125296b5047115469b8a3b74c0fdbf4976548 ]
+
+Use the domain ops pointer to tell what kind of domain it is instead of
+the internal use_first_level indication. This also protects against
+wrongly using a SVA/nested/IDENTITY/BLOCKED domain type in places they
+should not be.
+
+The only remaining uses of use_first_level outside the paging domain are in
+paging_domain_compatible() and intel_iommu_enforce_cache_coherency().
+
+Thus, remove the useless sets of use_first_level in
+intel_svm_domain_alloc() and intel_iommu_domain_alloc_nested(). None of
+the unique ops for these domain types ever reference it on their call
+chains.
+
+Add a WARN_ON() check in domain_context_mapping_one() as it only works
+with second stage.
+
+This is preparation for iommupt which will have different ops for each of
+the stages.
+
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/5-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20250714045028.958850-8-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: cee686775f9c ("iommu/vt-d: Make iotlb_sync_map a static property of dmar_domain")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/cache.c  |  5 +--
+ drivers/iommu/intel/iommu.c  | 60 +++++++++++++++++++++++++-----------
+ drivers/iommu/intel/iommu.h  | 12 ++++++++
+ drivers/iommu/intel/nested.c |  4 +--
+ drivers/iommu/intel/svm.c    |  1 -
+ 5 files changed, 58 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
+index c8b79de84d3fb..071f78e67fcba 100644
+--- a/drivers/iommu/intel/cache.c
++++ b/drivers/iommu/intel/cache.c
+@@ -370,7 +370,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
+       struct intel_iommu *iommu = tag->iommu;
+       u64 type = DMA_TLB_PSI_FLUSH;
+-      if (domain->use_first_level) {
++      if (intel_domain_is_fs_paging(domain)) {
+               qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
+                                   pages, ih, domain->qi_batch);
+               return;
+@@ -529,7 +529,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
+                       qi_batch_flush_descs(iommu, domain->qi_batch);
+               iommu = tag->iommu;
+-              if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
++              if (!cap_caching_mode(iommu->cap) ||
++                  intel_domain_is_fs_paging(domain)) {
+                       iommu_flush_write_buffer(iommu);
+                       continue;
+               }
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index f9f16d9bbf0bc..207f87eeb47a2 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1479,6 +1479,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
+       struct context_entry *context;
+       int ret;
++      if (WARN_ON(!intel_domain_is_ss_paging(domain)))
++              return -EINVAL;
++
+       pr_debug("Set context mapping for %02x:%02x.%d\n",
+               bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+@@ -1798,7 +1801,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+ static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
+                                      struct intel_iommu *iommu)
+ {
+-      if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
++      if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
+               return true;
+       if (rwbf_quirk || cap_rwbf(iommu->cap))
+@@ -1830,12 +1833,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
+       if (!sm_supported(iommu))
+               ret = domain_context_mapping(domain, dev);
+-      else if (domain->use_first_level)
++      else if (intel_domain_is_fs_paging(domain))
+               ret = domain_setup_first_level(iommu, domain, dev,
+                                              IOMMU_NO_PASID, NULL);
+-      else
++      else if (intel_domain_is_ss_paging(domain))
+               ret = domain_setup_second_level(iommu, domain, dev,
+                                               IOMMU_NO_PASID, NULL);
++      else if (WARN_ON(true))
++              ret = -EINVAL;
+       if (ret)
+               goto out_block_translation;
+@@ -3306,7 +3311,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
+       domain->use_first_level = first_stage;
+       domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+-      domain->domain.ops = intel_iommu_ops.default_domain_ops;
+       /* calculate the address width */
+       addr_width = agaw_to_width(iommu->agaw);
+@@ -3364,6 +3368,8 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
+       dmar_domain = paging_domain_alloc(dev, true);
+       if (IS_ERR(dmar_domain))
+               return ERR_CAST(dmar_domain);
++
++      dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
+       return &dmar_domain->domain;
+ }
+@@ -3392,6 +3398,7 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
+       if (IS_ERR(dmar_domain))
+               return ERR_CAST(dmar_domain);
++      dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
+       dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
+       if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+@@ -4110,12 +4117,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+       if (ret)
+               goto out_remove_dev_pasid;
+-      if (dmar_domain->use_first_level)
++      if (intel_domain_is_fs_paging(dmar_domain))
+               ret = domain_setup_first_level(iommu, dmar_domain,
+                                              dev, pasid, old);
+-      else
++      else if (intel_domain_is_ss_paging(dmar_domain))
+               ret = domain_setup_second_level(iommu, dmar_domain,
+                                               dev, pasid, old);
++      else if (WARN_ON(true))
++              ret = -EINVAL;
++
+       if (ret)
+               goto out_unwind_iopf;
+@@ -4390,6 +4400,32 @@ static struct iommu_domain identity_domain = {
+       },
+ };
++const struct iommu_domain_ops intel_fs_paging_domain_ops = {
++      .attach_dev = intel_iommu_attach_device,
++      .set_dev_pasid = intel_iommu_set_dev_pasid,
++      .map_pages = intel_iommu_map_pages,
++      .unmap_pages = intel_iommu_unmap_pages,
++      .iotlb_sync_map = intel_iommu_iotlb_sync_map,
++      .flush_iotlb_all = intel_flush_iotlb_all,
++      .iotlb_sync = intel_iommu_tlb_sync,
++      .iova_to_phys = intel_iommu_iova_to_phys,
++      .free = intel_iommu_domain_free,
++      .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
++};
++
++const struct iommu_domain_ops intel_ss_paging_domain_ops = {
++      .attach_dev = intel_iommu_attach_device,
++      .set_dev_pasid = intel_iommu_set_dev_pasid,
++      .map_pages = intel_iommu_map_pages,
++      .unmap_pages = intel_iommu_unmap_pages,
++      .iotlb_sync_map = intel_iommu_iotlb_sync_map,
++      .flush_iotlb_all = intel_flush_iotlb_all,
++      .iotlb_sync = intel_iommu_tlb_sync,
++      .iova_to_phys = intel_iommu_iova_to_phys,
++      .free = intel_iommu_domain_free,
++      .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
++};
++
+ const struct iommu_ops intel_iommu_ops = {
+       .blocked_domain         = &blocking_domain,
+       .release_domain         = &blocking_domain,
+@@ -4407,18 +4443,6 @@ const struct iommu_ops intel_iommu_ops = {
+       .is_attach_deferred     = intel_iommu_is_attach_deferred,
+       .def_domain_type        = device_def_domain_type,
+       .page_response          = intel_iommu_page_response,
+-      .default_domain_ops = &(const struct iommu_domain_ops) {
+-              .attach_dev             = intel_iommu_attach_device,
+-              .set_dev_pasid          = intel_iommu_set_dev_pasid,
+-              .map_pages              = intel_iommu_map_pages,
+-              .unmap_pages            = intel_iommu_unmap_pages,
+-              .iotlb_sync_map         = intel_iommu_iotlb_sync_map,
+-              .flush_iotlb_all        = intel_flush_iotlb_all,
+-              .iotlb_sync             = intel_iommu_tlb_sync,
+-              .iova_to_phys           = intel_iommu_iova_to_phys,
+-              .free                   = intel_iommu_domain_free,
+-              .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+-      }
+ };
+ static void quirk_iommu_igfx(struct pci_dev *dev)
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index 61f42802fe9e9..c699ed8810f23 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -1381,6 +1381,18 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+                                        u8 devfn, int alloc);
+ extern const struct iommu_ops intel_iommu_ops;
++extern const struct iommu_domain_ops intel_fs_paging_domain_ops;
++extern const struct iommu_domain_ops intel_ss_paging_domain_ops;
++
++static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain)
++{
++      return domain->domain.ops == &intel_fs_paging_domain_ops;
++}
++
++static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain)
++{
++      return domain->domain.ops == &intel_ss_paging_domain_ops;
++}
+ #ifdef CONFIG_INTEL_IOMMU
+ extern int intel_iommu_sm;
+diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
+index fc312f649f9ef..1b6ad9c900a5a 100644
+--- a/drivers/iommu/intel/nested.c
++++ b/drivers/iommu/intel/nested.c
+@@ -216,8 +216,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
+       /* Must be nested domain */
+       if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
+               return ERR_PTR(-EOPNOTSUPP);
+-      if (parent->ops != intel_iommu_ops.default_domain_ops ||
+-          !s2_domain->nested_parent)
++      if (!intel_domain_is_ss_paging(s2_domain) || !s2_domain->nested_parent)
+               return ERR_PTR(-EINVAL);
+       ret = iommu_copy_struct_from_user(&vtd, user_data,
+@@ -229,7 +228,6 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
+       if (!domain)
+               return ERR_PTR(-ENOMEM);
+-      domain->use_first_level = true;
+       domain->s2_domain = s2_domain;
+       domain->s1_cfg = vtd;
+       domain->domain.ops = &intel_nested_domain_ops;
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index f3da596410b5e..3994521f6ea48 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -214,7 +214,6 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
+               return ERR_PTR(-ENOMEM);
+       domain->domain.ops = &intel_svm_domain_ops;
+-      domain->use_first_level = true;
+       INIT_LIST_HEAD(&domain->dev_pasids);
+       INIT_LIST_HEAD(&domain->cache_tags);
+       spin_lock_init(&domain->cache_lock);
+-- 
+2.51.0
+
diff --git a/queue-6.16/iommu-vt-d-make-iotlb_sync_map-a-static-property-of-.patch b/queue-6.16/iommu-vt-d-make-iotlb_sync_map-a-static-property-of-.patch
new file mode 100644 (file)
index 0000000..8a76462
--- /dev/null
@@ -0,0 +1,132 @@
+From 2000e22d4ec6cdfa5bb9f9a88e44ba911c5b4759 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Jul 2025 13:16:57 +0800
+Subject: iommu/vt-d: Make iotlb_sync_map a static property of dmar_domain
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+[ Upstream commit cee686775f9cd4eae31f3c1f7ec24b2048082667 ]
+
+Commit 12724ce3fe1a ("iommu/vt-d: Optimize iotlb_sync_map for
+non-caching/non-RWBF modes") dynamically set iotlb_sync_map. This causes
+synchronization issues due to lack of locking on map and attach paths,
+racing iommufd userspace operations.
+
+Invalidation changes must precede device attachment to ensure all flushes
+complete before hardware walks page tables, preventing coherence issues.
+
+Make domain->iotlb_sync_map static, set once during domain allocation. If
+an IOMMU requires iotlb_sync_map but the domain lacks it, attach is
+rejected. This won't reduce domain sharing: RWBF and shadowing page table
+caching are legacy uses with legacy hardware. Mixed configs (some IOMMUs
+in caching mode, others not) are unlikely in real-world scenarios.
+
+Fixes: 12724ce3fe1a ("iommu/vt-d: Optimize iotlb_sync_map for non-caching/non-RWBF modes")
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20250721051657.1695788-1-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/iommu.c | 43 +++++++++++++++++++++++++------------
+ 1 file changed, 29 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index a718f0bc14cdf..34dd175a331dc 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -57,6 +57,8 @@
+ static void __init check_tylersburg_isoch(void);
+ static int rwbf_quirk;
++#define rwbf_required(iommu)  (rwbf_quirk || cap_rwbf((iommu)->cap))
++
+ /*
+  * set to 1 to panic kernel if can't successfully enable VT-d
+  * (used when kernel is launched w/ TXT)
+@@ -1798,18 +1800,6 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+                                         (pgd_t *)pgd, flags, old);
+ }
+-static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
+-                                     struct intel_iommu *iommu)
+-{
+-      if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
+-              return true;
+-
+-      if (rwbf_quirk || cap_rwbf(iommu->cap))
+-              return true;
+-
+-      return false;
+-}
+-
+ static int dmar_domain_attach_device(struct dmar_domain *domain,
+                                    struct device *dev)
+ {
+@@ -1849,8 +1839,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
+       if (ret)
+               goto out_block_translation;
+-      domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
+-
+       return 0;
+ out_block_translation:
+@@ -3370,6 +3358,14 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
+               return ERR_CAST(dmar_domain);
+       dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
++      /*
++       * iotlb sync for map is only needed for legacy implementations that
++       * explicitly require flushing internal write buffers to ensure memory
++       * coherence.
++       */
++      if (rwbf_required(iommu))
++              dmar_domain->iotlb_sync_map = true;
++
+       return &dmar_domain->domain;
+ }
+@@ -3404,6 +3400,14 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
+       if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+               dmar_domain->domain.dirty_ops = &intel_dirty_ops;
++      /*
++       * Besides the internal write buffer flush, the caching mode used for
++       * legacy nested translation (which utilizes shadowing page tables)
++       * also requires iotlb sync on map.
++       */
++      if (rwbf_required(iommu) || cap_caching_mode(iommu->cap))
++              dmar_domain->iotlb_sync_map = true;
++
+       return &dmar_domain->domain;
+ }
+@@ -3449,6 +3453,11 @@ static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
+       if (!cap_fl1gp_support(iommu->cap) &&
+           (dmar_domain->domain.pgsize_bitmap & SZ_1G))
+               return -EINVAL;
++
++      /* iotlb sync on map requirement */
++      if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map)
++              return -EINVAL;
++
+       return 0;
+ }
+@@ -3472,6 +3481,12 @@ paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
+               return -EINVAL;
+       if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
+               return -EINVAL;
++
++      /* iotlb sync on map requirement */
++      if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) &&
++          !dmar_domain->iotlb_sync_map)
++              return -EINVAL;
++
+       return 0;
+ }
+-- 
+2.51.0
+
diff --git a/queue-6.16/iommu-vt-d-split-intel_iommu_domain_alloc_paging_fla.patch b/queue-6.16/iommu-vt-d-split-intel_iommu_domain_alloc_paging_fla.patch
new file mode 100644 (file)
index 0000000..3340a15
--- /dev/null
@@ -0,0 +1,174 @@
+From 44b89905ddce4bce7c5ed001476c471de84a0c80 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 12:50:23 +0800
+Subject: iommu/vt-d: Split intel_iommu_domain_alloc_paging_flags()
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit b9434ba97c44f5744ea537adfd1f9f3fe102681c ]
+
+Create stage specific functions that check the stage specific conditions
+if each stage can be supported.
+
+Have intel_iommu_domain_alloc_paging_flags() call both stages in sequence
+until one does not return EOPNOTSUPP and prefer to use the first stage if
+available and suitable for the requested flags.
+
+Move second stage only operations like nested_parent and dirty_tracking
+into the second stage function for clarity.
+
+Move initialization of the iommu_domain members into paging_domain_alloc().
+
+Drop initialization of domain->owner as the callers all do it.
+
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/4-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20250714045028.958850-7-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: cee686775f9c ("iommu/vt-d: Make iotlb_sync_map a static property of dmar_domain")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/iommu.c | 100 +++++++++++++++++++++---------------
+ 1 file changed, 58 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index c239e280e43d9..f9f16d9bbf0bc 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3299,10 +3299,15 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
+       spin_lock_init(&domain->lock);
+       spin_lock_init(&domain->cache_lock);
+       xa_init(&domain->iommu_array);
++      INIT_LIST_HEAD(&domain->s1_domains);
++      spin_lock_init(&domain->s1_lock);
+       domain->nid = dev_to_node(dev);
+       domain->use_first_level = first_stage;
++      domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
++      domain->domain.ops = intel_iommu_ops.default_domain_ops;
++
+       /* calculate the address width */
+       addr_width = agaw_to_width(iommu->agaw);
+       if (addr_width > cap_mgaw(iommu->cap))
+@@ -3344,62 +3349,73 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
+ }
+ static struct iommu_domain *
+-intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
+-                                    const struct iommu_user_data *user_data)
++intel_iommu_domain_alloc_first_stage(struct device *dev,
++                                   struct intel_iommu *iommu, u32 flags)
++{
++      struct dmar_domain *dmar_domain;
++
++      if (flags & ~IOMMU_HWPT_ALLOC_PASID)
++              return ERR_PTR(-EOPNOTSUPP);
++
++      /* Only SL is available in legacy mode */
++      if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
++              return ERR_PTR(-EOPNOTSUPP);
++
++      dmar_domain = paging_domain_alloc(dev, true);
++      if (IS_ERR(dmar_domain))
++              return ERR_CAST(dmar_domain);
++      return &dmar_domain->domain;
++}
++
++static struct iommu_domain *
++intel_iommu_domain_alloc_second_stage(struct device *dev,
++                                    struct intel_iommu *iommu, u32 flags)
+ {
+-      struct device_domain_info *info = dev_iommu_priv_get(dev);
+-      bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+-      bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
+-      struct intel_iommu *iommu = info->iommu;
+       struct dmar_domain *dmar_domain;
+-      struct iommu_domain *domain;
+-      bool first_stage;
+       if (flags &
+           (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+              IOMMU_HWPT_ALLOC_PASID)))
+               return ERR_PTR(-EOPNOTSUPP);
+-      if (nested_parent && !nested_supported(iommu))
+-              return ERR_PTR(-EOPNOTSUPP);
+-      if (user_data || (dirty_tracking && !ssads_supported(iommu)))
++
++      if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
++           !nested_supported(iommu)) ||
++          ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
++           !ssads_supported(iommu)))
+               return ERR_PTR(-EOPNOTSUPP);
+-      /*
+-       * Always allocate the guest compatible page table unless
+-       * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING
+-       * is specified.
+-       */
+-      if (nested_parent || dirty_tracking) {
+-              if (!sm_supported(iommu) || !ecap_slts(iommu->ecap))
+-                      return ERR_PTR(-EOPNOTSUPP);
+-              first_stage = false;
+-      } else {
+-              first_stage = first_level_by_default(iommu);
+-      }
++      /* Legacy mode always supports second stage */
++      if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
++              return ERR_PTR(-EOPNOTSUPP);
+-      dmar_domain = paging_domain_alloc(dev, first_stage);
++      dmar_domain = paging_domain_alloc(dev, false);
+       if (IS_ERR(dmar_domain))
+               return ERR_CAST(dmar_domain);
+-      domain = &dmar_domain->domain;
+-      domain->type = IOMMU_DOMAIN_UNMANAGED;
+-      domain->owner = &intel_iommu_ops;
+-      domain->ops = intel_iommu_ops.default_domain_ops;
+-
+-      if (nested_parent) {
+-              dmar_domain->nested_parent = true;
+-              INIT_LIST_HEAD(&dmar_domain->s1_domains);
+-              spin_lock_init(&dmar_domain->s1_lock);
+-      }
+-      if (dirty_tracking) {
+-              if (dmar_domain->use_first_level) {
+-                      iommu_domain_free(domain);
+-                      return ERR_PTR(-EOPNOTSUPP);
+-              }
+-              domain->dirty_ops = &intel_dirty_ops;
+-      }
++      dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
+-      return domain;
++      if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
++              dmar_domain->domain.dirty_ops = &intel_dirty_ops;
++
++      return &dmar_domain->domain;
++}
++
++static struct iommu_domain *
++intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
++                                    const struct iommu_user_data *user_data)
++{
++      struct device_domain_info *info = dev_iommu_priv_get(dev);
++      struct intel_iommu *iommu = info->iommu;
++      struct iommu_domain *domain;
++
++      if (user_data)
++              return ERR_PTR(-EOPNOTSUPP);
++
++      /* Prefer first stage if possible by default. */
++      domain = intel_iommu_domain_alloc_first_stage(dev, iommu, flags);
++      if (domain != ERR_PTR(-EOPNOTSUPP))
++              return domain;
++      return intel_iommu_domain_alloc_second_stage(dev, iommu, flags);
+ }
+ static void intel_iommu_domain_free(struct iommu_domain *domain)
+-- 
+2.51.0
+
diff --git a/queue-6.16/iommu-vt-d-split-paging_domain_compatible.patch b/queue-6.16/iommu-vt-d-split-paging_domain_compatible.patch
new file mode 100644 (file)
index 0000000..f3d4447
--- /dev/null
@@ -0,0 +1,128 @@
+From a677c18ebcd8eeb03622fe4678b37da7b96ea7dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 12:50:26 +0800
+Subject: iommu/vt-d: Split paging_domain_compatible()
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit 85cfaacc99377a63e47412eeef66eff77197acea ]
+
+Make First/Second stage specific functions that follow the same pattern in
+intel_iommu_domain_alloc_first/second_stage() for computing
+EOPNOTSUPP. This makes the code easier to understand as if we couldn't
+create a domain with the parameters for this IOMMU instance then we
+certainly are not compatible with it.
+
+Check superpage support directly against the per-stage cap bits and the
+pgsize_bitmap.
+
+Add a note that the force_snooping is read without locking. The locking
+needs to cover the compatible check and the add of the device to the list.
+
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/7-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20250714045028.958850-10-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Stable-dep-of: cee686775f9c ("iommu/vt-d: Make iotlb_sync_map a static property of dmar_domain")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/iommu.c | 66 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 54 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 207f87eeb47a2..a718f0bc14cdf 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3434,33 +3434,75 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
+       domain_exit(dmar_domain);
+ }
++static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
++                                              struct intel_iommu *iommu)
++{
++      if (WARN_ON(dmar_domain->domain.dirty_ops ||
++                  dmar_domain->nested_parent))
++              return -EINVAL;
++
++      /* Only SL is available in legacy mode */
++      if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
++              return -EINVAL;
++
++      /* Same page size support */
++      if (!cap_fl1gp_support(iommu->cap) &&
++          (dmar_domain->domain.pgsize_bitmap & SZ_1G))
++              return -EINVAL;
++      return 0;
++}
++
++static int
++paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
++                                    struct intel_iommu *iommu)
++{
++      unsigned int sslps = cap_super_page_val(iommu->cap);
++
++      if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu))
++              return -EINVAL;
++      if (dmar_domain->nested_parent && !nested_supported(iommu))
++              return -EINVAL;
++
++      /* Legacy mode always supports second stage */
++      if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
++              return -EINVAL;
++
++      /* Same page size support */
++      if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M))
++              return -EINVAL;
++      if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
++              return -EINVAL;
++      return 0;
++}
++
+ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
+ {
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct intel_iommu *iommu = info->iommu;
++      int ret = -EINVAL;
+       int addr_width;
+-      if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+-              return -EPERM;
++      if (intel_domain_is_fs_paging(dmar_domain))
++              ret = paging_domain_compatible_first_stage(dmar_domain, iommu);
++      else if (intel_domain_is_ss_paging(dmar_domain))
++              ret = paging_domain_compatible_second_stage(dmar_domain, iommu);
++      else if (WARN_ON(true))
++              ret = -EINVAL;
++      if (ret)
++              return ret;
++      /*
++       * FIXME this is locked wrong, it needs to be under the
++       * dmar_domain->lock
++       */
+       if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
+               return -EINVAL;
+-      if (domain->dirty_ops && !ssads_supported(iommu))
+-              return -EINVAL;
+-
+       if (dmar_domain->iommu_coherency !=
+                       iommu_paging_structure_coherency(iommu))
+               return -EINVAL;
+-      if (dmar_domain->iommu_superpage !=
+-                      iommu_superpage_capability(iommu, dmar_domain->use_first_level))
+-              return -EINVAL;
+-
+-      if (dmar_domain->use_first_level &&
+-          (!sm_supported(iommu) || !ecap_flts(iommu->ecap)))
+-              return -EINVAL;
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+-- 
+2.51.0
+
diff --git a/queue-6.16/irqchip-mvebu-gicp-fix-an-is_err-vs-null-check-in-pr.patch b/queue-6.16/irqchip-mvebu-gicp-fix-an-is_err-vs-null-check-in-pr.patch
new file mode 100644 (file)
index 0000000..9fec491
--- /dev/null
@@ -0,0 +1,37 @@
+From 728f6064d8b834ac62cf508de5f526d518032c3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 12:40:02 +0300
+Subject: irqchip/mvebu-gicp: Fix an IS_ERR() vs NULL check in probe()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit c8bb0f00a4886b24d933ffaabcdc09bf9a370dca ]
+
+ioremap() never returns error pointers, it returns NULL on error.  Fix the
+check to match.
+
+Fixes: 3c3d7dbab2c7 ("irqchip/mvebu-gicp: Clear pending interrupts on init")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/aKRGcgMeaXm2TMIC@stanley.mountain
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-mvebu-gicp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
+index 54833717f8a70..667bde3c651ff 100644
+--- a/drivers/irqchip/irq-mvebu-gicp.c
++++ b/drivers/irqchip/irq-mvebu-gicp.c
+@@ -238,7 +238,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
+       }
+       base = ioremap(gicp->res->start, resource_size(gicp->res));
+-      if (IS_ERR(base)) {
++      if (!base) {
+               dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n");
+       } else {
+               for (i = 0; i < 64; i++)
+-- 
+2.51.0
+
diff --git a/queue-6.16/perf-fix-the-poll_hup-delivery-breakage.patch b/queue-6.16/perf-fix-the-poll_hup-delivery-breakage.patch
new file mode 100644 (file)
index 0000000..2be51d6
--- /dev/null
@@ -0,0 +1,48 @@
+From 60a657f6a14d5f648c363ca3283106784e8bcce5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 11:26:44 -0700
+Subject: perf: Fix the POLL_HUP delivery breakage
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit 18dbcbfabfffc4a5d3ea10290c5ad27f22b0d240 ]
+
+The event_limit can be set by the PERF_EVENT_IOC_REFRESH to limit the
+number of events. When the event_limit reaches 0, the POLL_HUP signal
+should be sent. But it's missed.
+
+The corresponding counter should be stopped when the event_limit reaches
+0. It was implemented in the ARCH-specific code. However, since the
+commit 9734e25fbf5a ("perf: Fix the throttle logic for a group"), all
+the ARCH-specific code has been moved to the generic code. The code to
+handle the event_limit was lost.
+
+Add the event->pmu->stop(event, 0); back.
+
+Fixes: 9734e25fbf5a ("perf: Fix the throttle logic for a group")
+Closes: https://lore.kernel.org/lkml/aICYAqM5EQUlTqtX@li-2b55cdcc-350b-11b2-a85c-a78bff51fc11.ibm.com/
+Reported-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Link: https://lkml.kernel.org/r/20250811182644.1305952-1-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 872122e074e5f..820127536e62b 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10330,6 +10330,7 @@ static int __perf_event_overflow(struct perf_event *event,
+               ret = 1;
+               event->pending_kill = POLL_HUP;
+               perf_event_disable_inatomic(event);
++              event->pmu->stop(event, 0);
+       }
+       if (event->attr.sigtrap) {
+-- 
+2.51.0
+
diff --git a/queue-6.16/series b/queue-6.16/series
new file mode 100644 (file)
index 0000000..abe5ec0
--- /dev/null
@@ -0,0 +1,15 @@
+fs-add-a-fmode_-flag-to-indicate-iocb_has_metadata-a.patch
+block-don-t-silently-ignore-metadata-for-sync-read-w.patch
+coredump-don-t-pointlessly-check-and-spew-warnings.patch
+fuse-block-access-to-folio-overlimit.patch
+fhandle-use-more-consistent-rules-for-decoding-file-.patch
+dma-debug-don-t-enforce-dma-mapping-check-on-noncohe.patch
+perf-fix-the-poll_hup-delivery-breakage.patch
+irqchip-mvebu-gicp-fix-an-is_err-vs-null-check-in-pr.patch
+bluetooth-hci_conn-fix-not-cleaning-up-broadcaster-b.patch
+iommu-vt-d-split-intel_iommu_domain_alloc_paging_fla.patch
+iommu-vt-d-create-unique-domain-ops-for-each-stage.patch
+iommu-vt-d-split-paging_domain_compatible.patch
+iommu-vt-d-make-iotlb_sync_map-a-static-property-of-.patch
+bluetooth-hci_conn-fix-running-bis_cleanup-for-hci_c.patch
+bluetooth-iso-fix-getname-not-returning-broadcast-fi.patch