]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Aug 2025 10:07:49 +0000 (12:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Aug 2025 10:07:49 +0000 (12:07 +0200)
added patches:
block-introduce-bio_needs_zone_write_plugging.patch
cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
clk-qcom-gcc-ipq8074-fix-broken-freq-table-for-nss_port6_tx_clk_src.patch
dm-always-split-write-bios-to-zoned-device-limits.patch
ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
ext4-initialize-superblock-fields-in-the-kballoc-test.c-kunit-tests.patch
iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
iommu-vt-d-optimize-iotlb_sync_map-for-non-caching-non-rwbf-modes.patch
iommufd-prevent-align-overflow.patch
iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
mm-damon-core-commit-damos-target_nid.patch
usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
usb-typec-ucsi-update-power_supply-on-power-role-change.patch

16 files changed:
queue-6.12/block-introduce-bio_needs_zone_write_plugging.patch [new file with mode: 0644]
queue-6.12/cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch [new file with mode: 0644]
queue-6.12/clk-qcom-gcc-ipq8074-fix-broken-freq-table-for-nss_port6_tx_clk_src.patch [new file with mode: 0644]
queue-6.12/dm-always-split-write-bios-to-zoned-device-limits.patch [new file with mode: 0644]
queue-6.12/ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch [new file with mode: 0644]
queue-6.12/ext4-fix-zombie-groups-in-average-fragment-size-lists.patch [new file with mode: 0644]
queue-6.12/ext4-initialize-superblock-fields-in-the-kballoc-test.c-kunit-tests.patch [new file with mode: 0644]
queue-6.12/iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch [new file with mode: 0644]
queue-6.12/iommu-vt-d-optimize-iotlb_sync_map-for-non-caching-non-rwbf-modes.patch [new file with mode: 0644]
queue-6.12/iommufd-prevent-align-overflow.patch [new file with mode: 0644]
queue-6.12/iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch [new file with mode: 0644]
queue-6.12/misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch [new file with mode: 0644]
queue-6.12/mm-damon-core-commit-damos-target_nid.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch [new file with mode: 0644]
queue-6.12/usb-typec-ucsi-update-power_supply-on-power-role-change.patch [new file with mode: 0644]

diff --git a/queue-6.12/block-introduce-bio_needs_zone_write_plugging.patch b/queue-6.12/block-introduce-bio_needs_zone_write_plugging.patch
new file mode 100644 (file)
index 0000000..b723bce
--- /dev/null
@@ -0,0 +1,161 @@
+From f70291411ba20d50008db90a6f0731efac27872c Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Wed, 25 Jun 2025 18:33:24 +0900
+Subject: block: Introduce bio_needs_zone_write_plugging()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit f70291411ba20d50008db90a6f0731efac27872c upstream.
+
+In preparation for fixing device mapper zone write handling, introduce
+the inline helper function bio_needs_zone_write_plugging() to test if a
+BIO requires handling through zone write plugging using the function
+blk_zone_plug_bio(). This function returns true for any write
+(op_is_write(bio) == true) operation directed at a zoned block device
+using zone write plugging, that is, a block device with a disk that has
+a zone write plug hash table.
+
+This helper allows simplifying the check on entry to blk_zone_plug_bio()
+and used in to protect calls to it for blk-mq devices and DM devices.
+
+Fixes: f211268ed1f9 ("dm: Use the block layer zone append emulation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20250625093327.548866-3-dlemoal@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c         |    6 +++--
+ block/blk-zoned.c      |   20 -----------------
+ drivers/md/dm.c        |    4 ++-
+ include/linux/blkdev.h |   55 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 63 insertions(+), 22 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3114,8 +3114,10 @@ void blk_mq_submit_bio(struct bio *bio)
+       if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+               goto queue_exit;
+-      if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs))
+-              goto queue_exit;
++      if (bio_needs_zone_write_plugging(bio)) {
++              if (blk_zone_plug_bio(bio, nr_segs))
++                      goto queue_exit;
++      }
+ new_request:
+       if (!rq) {
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1131,25 +1131,7 @@ bool blk_zone_plug_bio(struct bio *bio,
+ {
+       struct block_device *bdev = bio->bi_bdev;
+-      if (!bdev->bd_disk->zone_wplugs_hash)
+-              return false;
+-
+-      /*
+-       * If the BIO already has the plugging flag set, then it was already
+-       * handled through this path and this is a submission from the zone
+-       * plug bio submit work.
+-       */
+-      if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
+-              return false;
+-
+-      /*
+-       * We do not need to do anything special for empty flush BIOs, e.g
+-       * BIOs such as issued by blkdev_issue_flush(). The is because it is
+-       * the responsibility of the user to first wait for the completion of
+-       * write operations for flush to have any effect on the persistence of
+-       * the written data.
+-       */
+-      if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
++      if (WARN_ON_ONCE(!bdev->bd_disk->zone_wplugs_hash))
+               return false;
+       /*
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1800,7 +1800,9 @@ static inline bool dm_zone_bio_needs_spl
+ }
+ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+ {
+-      return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
++      if (!bio_needs_zone_write_plugging(bio))
++              return false;
++      return blk_zone_plug_bio(bio, 0);
+ }
+ static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -682,12 +682,67 @@ static inline unsigned int disk_nr_zones
+ {
+       return disk->nr_zones;
+ }
++
++/**
++ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
++ *                               write plugging
++ * @bio: The BIO being submitted
++ *
++ * Return true whenever @bio execution needs to be handled through zone
++ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
++ */
++static inline bool bio_needs_zone_write_plugging(struct bio *bio)
++{
++      enum req_op op = bio_op(bio);
++
++      /*
++       * Only zoned block devices have a zone write plug hash table. But not
++       * all of them have one (e.g. DM devices may not need one).
++       */
++      if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
++              return false;
++
++      /* Only write operations need zone write plugging. */
++      if (!op_is_write(op))
++              return false;
++
++      /* Ignore empty flush */
++      if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
++              return false;
++
++      /* Ignore BIOs that already have been handled by zone write plugging. */
++      if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
++              return false;
++
++      /*
++       * All zone write operations must be handled through zone write plugging
++       * using blk_zone_plug_bio().
++       */
++      switch (op) {
++      case REQ_OP_ZONE_APPEND:
++      case REQ_OP_WRITE:
++      case REQ_OP_WRITE_ZEROES:
++      case REQ_OP_ZONE_FINISH:
++      case REQ_OP_ZONE_RESET:
++      case REQ_OP_ZONE_RESET_ALL:
++              return true;
++      default:
++              return false;
++      }
++}
++
+ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+ #else /* CONFIG_BLK_DEV_ZONED */
+ static inline unsigned int disk_nr_zones(struct gendisk *disk)
+ {
+       return 0;
+ }
++
++static inline bool bio_needs_zone_write_plugging(struct bio *bio)
++{
++      return false;
++}
++
+ static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ {
+       return false;
diff --git a/queue-6.12/cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch b/queue-6.12/cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
new file mode 100644 (file)
index 0000000..e4d152c
--- /dev/null
@@ -0,0 +1,61 @@
+From 9d5eff7821f6d70f7d1b4d8a60680fba4de868a7 Mon Sep 17 00:00:00 2001
+From: Shyam Prasad N <sprasad@microsoft.com>
+Date: Thu, 17 Jul 2025 17:36:13 +0530
+Subject: cifs: reset iface weights when we cannot find a candidate
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+commit 9d5eff7821f6d70f7d1b4d8a60680fba4de868a7 upstream.
+
+We now do a weighted selection of server interfaces when allocating
+new channels. The weights are decided based on the speed advertised.
+The fulfilled weight for an interface is a counter that is used to
+track the interface selection. It should be reset back to zero once
+all interfaces fulfilling their weight.
+
+In cifs_chan_update_iface, this reset logic was missing. As a result
+when the server interface list changes, the client may not be able
+to find a new candidate for other channels after all interfaces have
+been fulfilled.
+
+Fixes: a6d8fb54a515 ("cifs: distribute channels across interfaces based on speed")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/sess.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -360,6 +360,7 @@ cifs_chan_update_iface(struct cifs_ses *
+       struct cifs_server_iface *old_iface = NULL;
+       struct cifs_server_iface *last_iface = NULL;
+       struct sockaddr_storage ss;
++      int retry = 0;
+       spin_lock(&ses->chan_lock);
+       chan_index = cifs_ses_get_chan_index(ses, server);
+@@ -388,6 +389,7 @@ cifs_chan_update_iface(struct cifs_ses *
+               return;
+       }
++try_again:
+       last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
+                                    iface_head);
+       iface_min_speed = last_iface->speed;
+@@ -425,6 +427,13 @@ cifs_chan_update_iface(struct cifs_ses *
+       }
+       if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++              list_for_each_entry(iface, &ses->iface_list, iface_head)
++                      iface->weight_fulfilled = 0;
++
++              /* see if it can be satisfied in second attempt */
++              if (!retry++)
++                      goto try_again;
++
+               iface = NULL;
+               cifs_dbg(FYI, "unable to find a suitable iface\n");
+       }
diff --git a/queue-6.12/clk-qcom-gcc-ipq8074-fix-broken-freq-table-for-nss_port6_tx_clk_src.patch b/queue-6.12/clk-qcom-gcc-ipq8074-fix-broken-freq-table-for-nss_port6_tx_clk_src.patch
new file mode 100644 (file)
index 0000000..ad0d2fa
--- /dev/null
@@ -0,0 +1,56 @@
+From 077ec7bcec9a8987d2a133afb7e13011878c7576 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Thu, 22 May 2025 22:25:55 +0200
+Subject: clk: qcom: gcc-ipq8074: fix broken freq table for nss_port6_tx_clk_src
+
+From: Christian Marangi <ansuelsmth@gmail.com>
+
+commit 077ec7bcec9a8987d2a133afb7e13011878c7576 upstream.
+
+With the conversion done by commit e88f03230dc0 ("clk: qcom: gcc-ipq8074:
+rework nss_port5/6 clock to multiple conf") a Copy-Paste error was made
+for the nss_port6_tx_clk_src frequency table.
+
+This was caused by the wrong setting of the parent in
+ftbl_nss_port6_tx_clk_src that was wrongly set to P_UNIPHY1_RX instead
+of P_UNIPHY2_TX.
+
+This cause the UNIPHY2 port to malfunction when it needs to be scaled to
+higher clock. The malfunction was observed with the example scenario
+with an Aquantia 10G PHY connected and a speed higher than 1G (example
+2.5G)
+
+Fix the broken frequency table to restore original functionality.
+
+Cc: stable@vger.kernel.org
+Fixes: e88f03230dc0 ("clk: qcom: gcc-ipq8074: rework nss_port5/6 clock to multiple conf")
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+Tested-by: Robert Marko <robimarko@gmail.com>
+Link: https://lore.kernel.org/r/20250522202600.4028-1-ansuelsmth@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/gcc-ipq8074.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 7258ba5c0900..1329ea28d703 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -1895,10 +1895,10 @@ static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = {
+ static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
+       FMS(19200000, P_XO, 1, 0, 0),
+       FM(25000000, ftbl_nss_port6_tx_clk_src_25),
+-      FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
++      FMS(78125000, P_UNIPHY2_TX, 4, 0, 0),
+       FM(125000000, ftbl_nss_port6_tx_clk_src_125),
+-      FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+-      FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
++      FMS(156250000, P_UNIPHY2_TX, 2, 0, 0),
++      FMS(312500000, P_UNIPHY2_TX, 1, 0, 0),
+       { }
+ };
+-- 
+2.50.1
+
diff --git a/queue-6.12/dm-always-split-write-bios-to-zoned-device-limits.patch b/queue-6.12/dm-always-split-write-bios-to-zoned-device-limits.patch
new file mode 100644 (file)
index 0000000..6c096d7
--- /dev/null
@@ -0,0 +1,101 @@
+From 2df7168717b7d2d32bcf017c68be16e4aae9dd13 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Wed, 25 Jun 2025 18:33:25 +0900
+Subject: dm: Always split write BIOs to zoned device limits
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit 2df7168717b7d2d32bcf017c68be16e4aae9dd13 upstream.
+
+Any zoned DM target that requires zone append emulation will use the
+block layer zone write plugging. In such case, DM target drivers must
+not split BIOs using dm_accept_partial_bio() as doing so can potentially
+lead to deadlocks with queue freeze operations. Regular write operations
+used to emulate zone append operations also cannot be split by the
+target driver as that would result in an invalid writen sector value
+return using the BIO sector.
+
+In order for zoned DM target drivers to avoid such incorrect BIO
+splitting, we must ensure that large BIOs are split before being passed
+to the map() function of the target, thus guaranteeing that the
+limits for the mapped device are not exceeded.
+
+dm-crypt and dm-flakey are the only target drivers supporting zoned
+devices and using dm_accept_partial_bio().
+
+In the case of dm-crypt, this function is used to split BIOs to the
+internal max_write_size limit (which will be suppressed in a different
+patch). However, since crypt_alloc_buffer() uses a bioset allowing only
+up to BIO_MAX_VECS (256) vectors in a BIO. The dm-crypt device
+max_segments limit, which is not set and so default to BLK_MAX_SEGMENTS
+(128), must thus be respected and write BIOs split accordingly.
+
+In the case of dm-flakey, since zone append emulation is not required,
+the block layer zone write plugging is not used and no splitting of BIOs
+required.
+
+Modify the function dm_zone_bio_needs_split() to use the block layer
+helper function bio_needs_zone_write_plugging() to force a call to
+bio_split_to_limits() in dm_split_and_process_bio(). This allows DM
+target drivers to avoid using dm_accept_partial_bio() for write
+operations on zoned DM devices.
+
+Fixes: f211268ed1f9 ("dm: Use the block layer zone append emulation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Link: https://lore.kernel.org/r/20250625093327.548866-4-dlemoal@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm.c |   29 ++++++++++++++++++++++-------
+ 1 file changed, 22 insertions(+), 7 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1792,12 +1792,29 @@ static inline bool dm_zone_bio_needs_spl
+                                          struct bio *bio)
+ {
+       /*
+-       * For mapped device that need zone append emulation, we must
+-       * split any large BIO that straddles zone boundaries.
++       * Special case the zone operations that cannot or should not be split.
+        */
+-      return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
+-              !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
++      switch (bio_op(bio)) {
++      case REQ_OP_ZONE_APPEND:
++      case REQ_OP_ZONE_FINISH:
++      case REQ_OP_ZONE_RESET:
++      case REQ_OP_ZONE_RESET_ALL:
++              return false;
++      default:
++              break;
++      }
++
++      /*
++       * Mapped devices that require zone append emulation will use the block
++       * layer zone write plugging. In such case, we must split any large BIO
++       * to the mapped device limits to avoid potential deadlocks with queue
++       * freeze operations.
++       */
++      if (!dm_emulate_zone_append(md))
++              return false;
++      return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
+ }
++
+ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+ {
+       if (!bio_needs_zone_write_plugging(bio))
+@@ -1946,9 +1963,7 @@ static void dm_split_and_process_bio(str
+       is_abnormal = is_abnormal_io(bio);
+       if (static_branch_unlikely(&zoned_enabled)) {
+-              /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
+-              need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) &&
+-                      (is_abnormal || dm_zone_bio_needs_split(md, bio));
++              need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
+       } else {
+               need_split = is_abnormal;
+       }
diff --git a/queue-6.12/ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch b/queue-6.12/ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
new file mode 100644 (file)
index 0000000..6489b9e
--- /dev/null
@@ -0,0 +1,92 @@
+From 7d345aa1fac4c2ec9584fbd6f389f2c2368671d5 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 14 Jul 2025 21:03:21 +0800
+Subject: ext4: fix largest free orders lists corruption on mb_optimize_scan switch
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 7d345aa1fac4c2ec9584fbd6f389f2c2368671d5 upstream.
+
+The grp->bb_largest_free_order is updated regardless of whether
+mb_optimize_scan is enabled. This can lead to inconsistencies between
+grp->bb_largest_free_order and the actual s_mb_largest_free_orders list
+index when mb_optimize_scan is repeatedly enabled and disabled via remount.
+
+For example, if mb_optimize_scan is initially enabled, largest free
+order is 3, and the group is in s_mb_largest_free_orders[3]. Then,
+mb_optimize_scan is disabled via remount, block allocations occur,
+updating largest free order to 2. Finally, mb_optimize_scan is re-enabled
+via remount, more block allocations update largest free order to 1.
+
+At this point, the group would be removed from s_mb_largest_free_orders[3]
+under the protection of s_mb_largest_free_orders_locks[2]. This lock
+mismatch can lead to list corruption.
+
+To fix this, whenever grp->bb_largest_free_order changes, we now always
+attempt to remove the group from its old order list. However, we only
+insert the group into the new order list if `mb_optimize_scan` is enabled.
+This approach helps prevent lock inconsistencies and ensures the data in
+the order lists remains reliable.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@vger.kernel.org
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-12-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c |   33 ++++++++++++++-------------------
+ 1 file changed, 14 insertions(+), 19 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1150,33 +1150,28 @@ static void
+ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      int i;
++      int new, old = grp->bb_largest_free_order;
+-      for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+-              if (grp->bb_counters[i] > 0)
++      for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--)
++              if (grp->bb_counters[new] > 0)
+                       break;
++
+       /* No need to move between order lists? */
+-      if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+-          i == grp->bb_largest_free_order) {
+-              grp->bb_largest_free_order = i;
++      if (new == old)
+               return;
+-      }
+-      if (grp->bb_largest_free_order >= 0) {
+-              write_lock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++      if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) {
++              write_lock(&sbi->s_mb_largest_free_orders_locks[old]);
+               list_del_init(&grp->bb_largest_free_order_node);
+-              write_unlock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++              write_unlock(&sbi->s_mb_largest_free_orders_locks[old]);
+       }
+-      grp->bb_largest_free_order = i;
+-      if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
+-              write_lock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++
++      grp->bb_largest_free_order = new;
++      if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
++              write_lock(&sbi->s_mb_largest_free_orders_locks[new]);
+               list_add_tail(&grp->bb_largest_free_order_node,
+-                    &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
+-              write_unlock(&sbi->s_mb_largest_free_orders_locks[
+-                                            grp->bb_largest_free_order]);
++                            &sbi->s_mb_largest_free_orders[new]);
++              write_unlock(&sbi->s_mb_largest_free_orders_locks[new]);
+       }
+ }
diff --git a/queue-6.12/ext4-fix-zombie-groups-in-average-fragment-size-lists.patch b/queue-6.12/ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
new file mode 100644 (file)
index 0000000..4c44084
--- /dev/null
@@ -0,0 +1,85 @@
+From 1c320d8e92925bb7615f83a7b6e3f402a5c2ca63 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 14 Jul 2025 21:03:20 +0800
+Subject: ext4: fix zombie groups in average fragment size lists
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 1c320d8e92925bb7615f83a7b6e3f402a5c2ca63 upstream.
+
+Groups with no free blocks shouldn't be in any average fragment size list.
+However, when all blocks in a group are allocated(i.e., bb_fragments or
+bb_free is 0), we currently skip updating the average fragment size, which
+means the group isn't removed from its previous s_mb_avg_fragment_size[old]
+list.
+
+This created "zombie" groups that were always skipped during traversal as
+they couldn't satisfy any block allocation requests, negatively impacting
+traversal efficiency.
+
+Therefore, when a group becomes completely full, bb_avg_fragment_size_order
+is now set to -1. If the old order was not -1, a removal operation is
+performed; if the new order is not -1, an insertion is performed.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@vger.kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-11-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c |   34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -841,30 +841,30 @@ static void
+ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      int new_order;
++      int new, old;
+-      if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
++      if (!test_opt2(sb, MB_OPTIMIZE_SCAN))
+               return;
+-      new_order = mb_avg_fragment_size_order(sb,
+-                                      grp->bb_free / grp->bb_fragments);
+-      if (new_order == grp->bb_avg_fragment_size_order)
++      old = grp->bb_avg_fragment_size_order;
++      new = grp->bb_fragments == 0 ? -1 :
++            mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments);
++      if (new == old)
+               return;
+-      if (grp->bb_avg_fragment_size_order != -1) {
+-              write_lock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
++      if (old >= 0) {
++              write_lock(&sbi->s_mb_avg_fragment_size_locks[old]);
+               list_del(&grp->bb_avg_fragment_size_node);
+-              write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
++              write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]);
++      }
++
++      grp->bb_avg_fragment_size_order = new;
++      if (new >= 0) {
++              write_lock(&sbi->s_mb_avg_fragment_size_locks[new]);
++              list_add_tail(&grp->bb_avg_fragment_size_node,
++                              &sbi->s_mb_avg_fragment_size[new]);
++              write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]);
+       }
+-      grp->bb_avg_fragment_size_order = new_order;
+-      write_lock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
+-      list_add_tail(&grp->bb_avg_fragment_size_node,
+-              &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
+-      write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+-                                      grp->bb_avg_fragment_size_order]);
+ }
+ /*
diff --git a/queue-6.12/ext4-initialize-superblock-fields-in-the-kballoc-test.c-kunit-tests.patch b/queue-6.12/ext4-initialize-superblock-fields-in-the-kballoc-test.c-kunit-tests.patch
new file mode 100644 (file)
index 0000000..2df7067
--- /dev/null
@@ -0,0 +1,67 @@
+From 82e6381e23f1ea7a14f418215068aaa2ca046c84 Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Fri, 25 Jul 2025 10:15:50 +0800
+Subject: ext4: initialize superblock fields in the kballoc-test.c kunit tests
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 82e6381e23f1ea7a14f418215068aaa2ca046c84 upstream.
+
+Various changes in the "ext4: better scalability for ext4 block
+allocation" patch series have resulted in kunit test failures, most
+notably in the test_new_blocks_simple and the test_mb_mark_used tests.
+The root cause of these failures is that various in-memory ext4 data
+structures were not getting initialized, and while previous versions
+of the functions exercised by the unit tests didn't use these
+structure members, this was arguably a test bug.
+
+Since one of the patches in the block allocation scalability patches
+is a fix which is has a cc:stable tag, this commit also has a
+cc:stable tag.
+
+CC: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250714130327.1830534-1-libaokun1@huawei.com
+Link: https://patch.msgid.link/20250725021550.3177573-1-yi.zhang@huaweicloud.com
+Link: https://patch.msgid.link/20250725021654.3188798-1-yi.zhang@huaweicloud.com
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Closes: https://lore.kernel.org/linux-ext4/b0635ad0-7ebf-4152-a69b-58e7e87d5085@roeck-us.net/
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc-test.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/ext4/mballoc-test.c
++++ b/fs/ext4/mballoc-test.c
+@@ -155,6 +155,7 @@ static struct super_block *mbt_ext4_allo
+       bgl_lock_init(sbi->s_blockgroup_lock);
+       sbi->s_es = &fsb->es;
++      sbi->s_sb = sb;
+       sb->s_fs_info = sbi;
+       up_write(&sb->s_umount);
+@@ -801,6 +802,10 @@ static void test_mb_mark_used(struct kun
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
++      grp->bb_largest_free_order = -1;
++      grp->bb_avg_fragment_size_order = -1;
++      INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
++      INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+       mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+       for (i = 0; i < TEST_RANGE_COUNT; i++)
+               test_mb_mark_used_range(test, &e4b, ranges[i].start,
+@@ -873,6 +878,10 @@ static void test_mb_free_blocks(struct k
+       ext4_unlock_group(sb, TEST_GOAL_GROUP);
+       grp->bb_free = 0;
++      grp->bb_largest_free_order = -1;
++      grp->bb_avg_fragment_size_order = -1;
++      INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
++      INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+       memset(bitmap, 0xff, sb->s_blocksize);
+       mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
diff --git a/queue-6.12/iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch b/queue-6.12/iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
new file mode 100644 (file)
index 0000000..76e8f16
--- /dev/null
@@ -0,0 +1,111 @@
+From f7fa8520f30373ce99c436c4d57c76befdacbef3 Mon Sep 17 00:00:00 2001
+From: Alexey Klimov <alexey.klimov@linaro.org>
+Date: Fri, 13 Jun 2025 18:32:38 +0100
+Subject: iommu/arm-smmu-qcom: Add SM6115 MDSS compatible
+
+From: Alexey Klimov <alexey.klimov@linaro.org>
+
+commit f7fa8520f30373ce99c436c4d57c76befdacbef3 upstream.
+
+Add the SM6115 MDSS compatible to clients compatible list, as it also
+needs that workaround.
+Without this workaround, for example, QRB4210 RB2 which is based on
+SM4250/SM6115 generates a lot of smmu unhandled context faults during
+boot:
+
+arm_smmu_context_fault: 116854 callbacks suppressed
+arm-smmu c600000.iommu: Unhandled context fault: fsr=0x402,
+iova=0x5c0ec600, fsynr=0x320021, cbfrsynra=0x420, cb=5
+arm-smmu c600000.iommu: FSR    = 00000402 [Format=2 TF], SID=0x420
+arm-smmu c600000.iommu: FSYNR0 = 00320021 [S1CBNDX=50 PNU PLVL=1]
+arm-smmu c600000.iommu: Unhandled context fault: fsr=0x402,
+iova=0x5c0d7800, fsynr=0x320021, cbfrsynra=0x420, cb=5
+arm-smmu c600000.iommu: FSR    = 00000402 [Format=2 TF], SID=0x420
+
+and also failed initialisation of lontium lt9611uxc, gpu and dpu is
+observed:
+(binding MDSS components triggered by lt9611uxc have failed)
+
+ ------------[ cut here ]------------
+ !aspace
+ WARNING: CPU: 6 PID: 324 at drivers/gpu/drm/msm/msm_gem_vma.c:130 msm_gem_vma_init+0x150/0x18c [msm]
+ Modules linked in: ... (long list of modules)
+ CPU: 6 UID: 0 PID: 324 Comm: (udev-worker) Not tainted 6.15.0-03037-gaacc73ceeb8b #4 PREEMPT
+ Hardware name: Qualcomm Technologies, Inc. QRB4210 RB2 (DT)
+ pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : msm_gem_vma_init+0x150/0x18c [msm]
+ lr : msm_gem_vma_init+0x150/0x18c [msm]
+ sp : ffff80008144b280
+               ...
+ Call trace:
+  msm_gem_vma_init+0x150/0x18c [msm] (P)
+  get_vma_locked+0xc0/0x194 [msm]
+  msm_gem_get_and_pin_iova_range+0x4c/0xdc [msm]
+  msm_gem_kernel_new+0x48/0x160 [msm]
+  msm_gpu_init+0x34c/0x53c [msm]
+  adreno_gpu_init+0x1b0/0x2d8 [msm]
+  a6xx_gpu_init+0x1e8/0x9e0 [msm]
+  adreno_bind+0x2b8/0x348 [msm]
+  component_bind_all+0x100/0x230
+  msm_drm_bind+0x13c/0x3d0 [msm]
+  try_to_bring_up_aggregate_device+0x164/0x1d0
+  __component_add+0xa4/0x174
+  component_add+0x14/0x20
+  dsi_dev_attach+0x20/0x34 [msm]
+  dsi_host_attach+0x58/0x98 [msm]
+  devm_mipi_dsi_attach+0x34/0x90
+  lt9611uxc_attach_dsi.isra.0+0x94/0x124 [lontium_lt9611uxc]
+  lt9611uxc_probe+0x540/0x5fc [lontium_lt9611uxc]
+  i2c_device_probe+0x148/0x2a8
+  really_probe+0xbc/0x2c0
+  __driver_probe_device+0x78/0x120
+  driver_probe_device+0x3c/0x154
+  __driver_attach+0x90/0x1a0
+  bus_for_each_dev+0x68/0xb8
+  driver_attach+0x24/0x30
+  bus_add_driver+0xe4/0x208
+  driver_register+0x68/0x124
+  i2c_register_driver+0x48/0xcc
+  lt9611uxc_driver_init+0x20/0x1000 [lontium_lt9611uxc]
+  do_one_initcall+0x60/0x1d4
+  do_init_module+0x54/0x1fc
+  load_module+0x1748/0x1c8c
+  init_module_from_file+0x74/0xa0
+  __arm64_sys_finit_module+0x130/0x2f8
+  invoke_syscall+0x48/0x104
+  el0_svc_common.constprop.0+0xc0/0xe0
+  do_el0_svc+0x1c/0x28
+  el0_svc+0x2c/0x80
+  el0t_64_sync_handler+0x10c/0x138
+  el0t_64_sync+0x198/0x19c
+ ---[ end trace 0000000000000000 ]---
+ msm_dpu 5e01000.display-controller: [drm:msm_gpu_init [msm]] *ERROR* could not allocate memptrs: -22
+ msm_dpu 5e01000.display-controller: failed to load adreno gpu
+ platform a400000.remoteproc:glink-edge:apr:service@7:dais: Adding to iommu group 19
+ msm_dpu 5e01000.display-controller: failed to bind 5900000.gpu (ops a3xx_ops [msm]): -22
+ msm_dpu 5e01000.display-controller: adev bind failed: -22
+ lt9611uxc 0-002b: failed to attach dsi to host
+ lt9611uxc 0-002b: probe with driver lt9611uxc failed with error -22
+
+Suggested-by: Bjorn Andersson <andersson@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Fixes: 3581b7062cec ("drm/msm/disp/dpu1: add support for display on SM6115")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexey Klimov <alexey.klimov@linaro.org>
+Link: https://lore.kernel.org/r/20250613173238.15061-1-alexey.klimov@linaro.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -258,6 +258,7 @@ static const struct of_device_id qcom_sm
+       { .compatible = "qcom,sdm670-mdss" },
+       { .compatible = "qcom,sdm845-mdss" },
+       { .compatible = "qcom,sdm845-mss-pil" },
++      { .compatible = "qcom,sm6115-mdss" },
+       { .compatible = "qcom,sm6350-mdss" },
+       { .compatible = "qcom,sm6375-mdss" },
+       { .compatible = "qcom,sm8150-mdss" },
diff --git a/queue-6.12/iommu-vt-d-optimize-iotlb_sync_map-for-non-caching-non-rwbf-modes.patch b/queue-6.12/iommu-vt-d-optimize-iotlb_sync_map-for-non-caching-non-rwbf-modes.patch
new file mode 100644 (file)
index 0000000..5807b32
--- /dev/null
@@ -0,0 +1,107 @@
+From 12724ce3fe1a3d8f30d56e48b4f272d8860d1970 Mon Sep 17 00:00:00 2001
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Mon, 14 Jul 2025 12:50:19 +0800
+Subject: iommu/vt-d: Optimize iotlb_sync_map for non-caching/non-RWBF modes
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+commit 12724ce3fe1a3d8f30d56e48b4f272d8860d1970 upstream.
+
+The iotlb_sync_map iommu ops allows drivers to perform necessary cache
+flushes when new mappings are established. For the Intel iommu driver,
+this callback specifically serves two purposes:
+
+- To flush caches when a second-stage page table is attached to a device
+  whose iommu is operating in caching mode (CAP_REG.CM==1).
+- To explicitly flush internal write buffers to ensure updates to memory-
+  resident remapping structures are visible to hardware (CAP_REG.RWBF==1).
+
+However, in scenarios where neither caching mode nor the RWBF flag is
+active, the cache_tag_flush_range_np() helper, which is called in the
+iotlb_sync_map path, effectively becomes a no-op.
+
+Despite being a no-op, cache_tag_flush_range_np() involves iterating
+through all cache tags of the iommu's attached to the domain, protected
+by a spinlock. This unnecessary execution path introduces overhead,
+leading to a measurable I/O performance regression. On systems with NVMes
+under the same bridge, performance was observed to drop from approximately
+~6150 MiB/s down to ~4985 MiB/s.
+
+Introduce a flag in the dmar_domain structure. This flag will only be set
+when iotlb_sync_map is required (i.e., when CM or RWBF is set). The
+cache_tag_flush_range_np() is called only for domains where this flag is
+set. This flag, once set, is immutable, given that there won't be mixed
+configurations in real-world scenarios where some IOMMUs in a system
+operate in caching mode while others do not. Theoretically, the
+immutability of this flag does not impact functionality.
+
+Reported-by: Ioanna Alifieraki <ioanna-maria.alifieraki@canonical.com>
+Closes: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2115738
+Link: https://lore.kernel.org/r/20250701171154.52435-1-ioanna-maria.alifieraki@canonical.com
+Fixes: 129dab6e1286 ("iommu/vt-d: Use cache_tag_flush_range_np() in iotlb_sync_map")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/20250703031545.3378602-1-baolu.lu@linux.intel.com
+Link: https://lore.kernel.org/r/20250714045028.958850-3-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c |   19 ++++++++++++++++++-
+ drivers/iommu/intel/iommu.h |    3 +++
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1957,6 +1957,18 @@ static bool dev_is_real_dma_subdevice(st
+              pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
+ }
++static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
++                                     struct intel_iommu *iommu)
++{
++      if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
++              return true;
++
++      if (rwbf_quirk || cap_rwbf(iommu->cap))
++              return true;
++
++      return false;
++}
++
+ static int dmar_domain_attach_device(struct dmar_domain *domain,
+                                    struct device *dev)
+ {
+@@ -1994,6 +2006,8 @@ static int dmar_domain_attach_device(str
+       if (ret)
+               goto out_block_translation;
++      domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
++
+       return 0;
+ out_block_translation:
+@@ -4278,7 +4292,10 @@ static bool risky_device(struct pci_dev
+ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+                                     unsigned long iova, size_t size)
+ {
+-      cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
++      struct dmar_domain *dmar_domain = to_dmar_domain(domain);
++
++      if (dmar_domain->iotlb_sync_map)
++              cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1);
+       return 0;
+ }
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -614,6 +614,9 @@ struct dmar_domain {
+       u8 has_mappings:1;              /* Has mappings configured through
+                                        * iommu_map() interface.
+                                        */
++      u8 iotlb_sync_map:1;            /* Need to flush IOTLB cache or write
++                                       * buffer when creating mappings.
++                                       */
+       spinlock_t lock;                /* Protect device tracking lists */
+       struct list_head devices;       /* all devices' list */
diff --git a/queue-6.12/iommufd-prevent-align-overflow.patch b/queue-6.12/iommufd-prevent-align-overflow.patch
new file mode 100644 (file)
index 0000000..24b90f1
--- /dev/null
@@ -0,0 +1,99 @@
+From b42497e3c0e74db061eafad41c0cd7243c46436b Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@nvidia.com>
+Date: Thu, 17 Jul 2025 11:46:55 -0300
+Subject: iommufd: Prevent ALIGN() overflow
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+commit b42497e3c0e74db061eafad41c0cd7243c46436b upstream.
+
+When allocating IOVA the candidate range gets aligned to the target
+alignment. If the range is close to ULONG_MAX then the ALIGN() can
+wrap resulting in a corrupted iova.
+
+Open code the ALIGN() using get_add_overflow() to prevent this.
+This simplifies the checks as we don't need to check for length earlier
+either.
+
+Consolidate the two copies of this code under a single helper.
+
+This bug would allow userspace to create a mapping that overlaps with some
+other mapping or a reserved range.
+
+Cc: stable@vger.kernel.org
+Fixes: 51fe6141f0f6 ("iommufd: Data structure to provide IOVA to PFN mapping")
+Reported-by: syzbot+c2f65e2801743ca64e08@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com
+Reviewed-by: Yi Liu <yi.l.liu@intel.com>
+Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
+Link: https://patch.msgid.link/all/1-v1-7b4a16fc390b+10f4-iommufd_alloc_overflow_jgg@nvidia.com/
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/iommufd/io_pagetable.c |   41 +++++++++++++++++++++--------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -70,36 +70,45 @@ struct iopt_area *iopt_area_contig_next(
+       return iter->area;
+ }
+-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+-                                  unsigned long length,
+-                                  unsigned long iova_alignment,
+-                                  unsigned long page_offset)
++static bool __alloc_iova_check_range(unsigned long *start, unsigned long last,
++                                   unsigned long length,
++                                   unsigned long iova_alignment,
++                                   unsigned long page_offset)
+ {
+-      if (span->is_used || span->last_hole - span->start_hole < length - 1)
++      unsigned long aligned_start;
++
++      /* ALIGN_UP() */
++      if (check_add_overflow(*start, iova_alignment - 1, &aligned_start))
+               return false;
++      aligned_start &= ~(iova_alignment - 1);
++      aligned_start |= page_offset;
+-      span->start_hole = ALIGN(span->start_hole, iova_alignment) |
+-                         page_offset;
+-      if (span->start_hole > span->last_hole ||
+-          span->last_hole - span->start_hole < length - 1)
++      if (aligned_start >= last || last - aligned_start < length - 1)
+               return false;
++      *start = aligned_start;
+       return true;
+ }
+-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+                                   unsigned long length,
+                                   unsigned long iova_alignment,
+                                   unsigned long page_offset)
+ {
+-      if (span->is_hole || span->last_used - span->start_used < length - 1)
++      if (span->is_used)
+               return false;
++      return __alloc_iova_check_range(&span->start_hole, span->last_hole,
++                                      length, iova_alignment, page_offset);
++}
+-      span->start_used = ALIGN(span->start_used, iova_alignment) |
+-                         page_offset;
+-      if (span->start_used > span->last_used ||
+-          span->last_used - span->start_used < length - 1)
++static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++                                  unsigned long length,
++                                  unsigned long iova_alignment,
++                                  unsigned long page_offset)
++{
++      if (span->is_hole)
+               return false;
+-      return true;
++      return __alloc_iova_check_range(&span->start_used, span->last_used,
++                                      length, iova_alignment, page_offset);
+ }
+ /*
diff --git a/queue-6.12/iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch b/queue-6.12/iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
new file mode 100644 (file)
index 0000000..2659086
--- /dev/null
@@ -0,0 +1,47 @@
+From b23e09f9997771b4b739c1c694fa832b5fa2de02 Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Wed, 9 Jul 2025 22:58:53 -0700
+Subject: iommufd: Report unmapped bytes in the error path of iopt_unmap_iova_range
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit b23e09f9997771b4b739c1c694fa832b5fa2de02 upstream.
+
+There are callers that read the unmapped bytes even when rc != 0. Thus, do
+not forget to report it in the error path too.
+
+Fixes: 8d40205f6093 ("iommufd: Add kAPI toward external drivers for kernel access")
+Link: https://patch.msgid.link/r/e2b61303bbc008ba1a4e2d7c2a2894749b59fdac.1752126748.git.nicolinc@nvidia.com
+Cc: stable@vger.kernel.org
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/iommufd/io_pagetable.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -696,8 +696,10 @@ again:
+                       iommufd_access_notify_unmap(iopt, area_first, length);
+                       /* Something is not responding to unmap requests. */
+                       tries++;
+-                      if (WARN_ON(tries > 100))
+-                              return -EDEADLOCK;
++                      if (WARN_ON(tries > 100)) {
++                              rc = -EDEADLOCK;
++                              goto out_unmapped;
++                      }
+                       goto again;
+               }
+@@ -719,6 +721,7 @@ again:
+ out_unlock_iova:
+       up_write(&iopt->iova_rwsem);
+       up_read(&iopt->domains_rwsem);
++out_unmapped:
+       if (unmapped)
+               *unmapped = unmapped_bytes;
+       return rc;
diff --git a/queue-6.12/misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch b/queue-6.12/misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
new file mode 100644 (file)
index 0000000..39fa304
--- /dev/null
@@ -0,0 +1,69 @@
+From 966c5cd72be8989c8a559ddef8e8ff07a37c5eb0 Mon Sep 17 00:00:00 2001
+From: Ricky Wu <ricky_wu@realtek.com>
+Date: Fri, 11 Jul 2025 22:01:43 +0800
+Subject: misc: rtsx: usb: Ensure mmc child device is active when card is present
+
+From: Ricky Wu <ricky_wu@realtek.com>
+
+commit 966c5cd72be8989c8a559ddef8e8ff07a37c5eb0 upstream.
+
+When a card is present in the reader, the driver currently defers
+autosuspend by returning -EAGAIN during the suspend callback to
+trigger USB remote wakeup signaling. However, this does not guarantee
+that the mmc child device has been resumed, which may cause issues if
+it remains suspended while the card is accessible.
+This patch ensures that all child devices, including the mmc host
+controller, are explicitly resumed before returning -EAGAIN. This
+fixes a corner case introduced by earlier remote wakeup handling,
+improving reliability of runtime PM when a card is inserted.
+
+Fixes: 883a87ddf2f1 ("misc: rtsx_usb: Use USB remote wakeup signaling for card insertion detection")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricky Wu <ricky_wu@realtek.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Link: https://lore.kernel.org/r/20250711140143.2105224-1-ricky_wu@realtek.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cardreader/rtsx_usb.c |   16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -698,6 +698,12 @@ static void rtsx_usb_disconnect(struct u
+ }
+ #ifdef CONFIG_PM
++static int rtsx_usb_resume_child(struct device *dev, void *data)
++{
++      pm_request_resume(dev);
++      return 0;
++}
++
+ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+       struct rtsx_ucr *ucr =
+@@ -713,8 +719,10 @@ static int rtsx_usb_suspend(struct usb_i
+                       mutex_unlock(&ucr->dev_mutex);
+                       /* Defer the autosuspend if card exists */
+-                      if (val & (SD_CD | MS_CD))
++                      if (val & (SD_CD | MS_CD)) {
++                              device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
+                               return -EAGAIN;
++                      }
+               } else {
+                       /* There is an ongoing operation*/
+                       return -EAGAIN;
+@@ -724,12 +732,6 @@ static int rtsx_usb_suspend(struct usb_i
+       return 0;
+ }
+-static int rtsx_usb_resume_child(struct device *dev, void *data)
+-{
+-      pm_request_resume(dev);
+-      return 0;
+-}
+-
+ static int rtsx_usb_resume(struct usb_interface *intf)
+ {
+       device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
diff --git a/queue-6.12/mm-damon-core-commit-damos-target_nid.patch b/queue-6.12/mm-damon-core-commit-damos-target_nid.patch
new file mode 100644 (file)
index 0000000..9e44cf4
--- /dev/null
@@ -0,0 +1,39 @@
+From 579bd5006fe7f4a7abb32da0160d376476cab67d Mon Sep 17 00:00:00 2001
+From: Bijan Tabatabai <bijantabatab@micron.com>
+Date: Tue, 8 Jul 2025 19:47:29 -0500
+Subject: mm/damon/core: commit damos->target_nid
+
+From: Bijan Tabatabai <bijantabatab@micron.com>
+
+commit 579bd5006fe7f4a7abb32da0160d376476cab67d upstream.
+
+When committing new scheme parameters from the sysfs, the target_nid field
+of the damos struct would not be copied.  This would result in the
+target_nid field to retain its original value, despite being updated in
+the sysfs interface.
+
+This patch fixes this issue by copying target_nid in damos_commit().
+
+Link: https://lkml.kernel.org/r/20250709004729.17252-1-bijan311@gmail.com
+Fixes: 83dc7bbaecae ("mm/damon/sysfs: use damon_commit_ctx()")
+Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -837,6 +837,7 @@ static int damos_commit(struct damos *ds
+               return err;
+       dst->wmarks = src->wmarks;
++      dst->target_nid = src->target_nid;
+       err = damos_commit_filters(dst, src);
+       return err;
index ce0645222466d6a68f30567404dc06ada14c108f..726cfe4e80cc3f643c201da9c1c0bd23b8045ba5 100644 (file)
@@ -370,3 +370,18 @@ asoc-fsl_sai-replace-regmap_write-with-regmap_update.patch
 cifs-fix-collect_sample-to-handle-any-iterator-type.patch
 drm-amdgpu-fix-vram-reservation-issue.patch
 drm-amdgpu-fix-incorrect-vm-flags-to-map-bo.patch
+mm-damon-core-commit-damos-target_nid.patch
+block-introduce-bio_needs_zone_write_plugging.patch
+dm-always-split-write-bios-to-zoned-device-limits.patch
+clk-qcom-gcc-ipq8074-fix-broken-freq-table-for-nss_port6_tx_clk_src.patch
+cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
+iommu-vt-d-optimize-iotlb_sync_map-for-non-caching-non-rwbf-modes.patch
+iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
+iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
+iommufd-prevent-align-overflow.patch
+ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
+ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
+ext4-initialize-superblock-fields-in-the-kballoc-test.c-kunit-tests.patch
+usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
+misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
+usb-typec-ucsi-update-power_supply-on-power-role-change.patch
diff --git a/queue-6.12/usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch b/queue-6.12/usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
new file mode 100644 (file)
index 0000000..9359627
--- /dev/null
@@ -0,0 +1,41 @@
+From cf16f408364efd8a68f39011a3b073c83a03612d Mon Sep 17 00:00:00 2001
+From: Xinyu Liu <katieeliu@tencent.com>
+Date: Mon, 30 Jun 2025 10:02:56 +0800
+Subject: usb: core: config: Prevent OOB read in SS endpoint companion parsing
+
+From: Xinyu Liu <katieeliu@tencent.com>
+
+commit cf16f408364efd8a68f39011a3b073c83a03612d upstream.
+
+usb_parse_ss_endpoint_companion() checks descriptor type before length,
+enabling a potentially odd read outside of the buffer size.
+
+Fix this up by checking the size first before looking at any of the
+fields in the descriptor.
+
+Signed-off-by: Xinyu Liu <katieeliu@tencent.com>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/config.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -81,8 +81,14 @@ static void usb_parse_ss_endpoint_compan
+        */
+       desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+-      if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+-                      size < USB_DT_SS_EP_COMP_SIZE) {
++      if (size < USB_DT_SS_EP_COMP_SIZE) {
++              dev_notice(ddev,
++                         "invalid SuperSpeed endpoint companion descriptor "
++                         "of length %d, skipping\n", size);
++              return;
++      }
++
++      if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+               dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
+                               " interface %d altsetting %d ep %d: "
+                               "using minimum values\n",
diff --git a/queue-6.12/usb-typec-ucsi-update-power_supply-on-power-role-change.patch b/queue-6.12/usb-typec-ucsi-update-power_supply-on-power-role-change.patch
new file mode 100644 (file)
index 0000000..cf85dc4
--- /dev/null
@@ -0,0 +1,37 @@
+From 7616f006db07017ef5d4ae410fca99279aaca7aa Mon Sep 17 00:00:00 2001
+From: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+Date: Mon, 21 Jul 2025 13:32:51 +0700
+Subject: usb: typec: ucsi: Update power_supply on power role change
+
+From: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+
+commit 7616f006db07017ef5d4ae410fca99279aaca7aa upstream.
+
+The current power direction of an USB-C port also influences the
+power_supply's online status, so a power role change should also update
+the power_supply.
+
+Fixes an issue on some systems where plugging in a normal USB device in
+for the first time after a reboot will cause upower to erroneously
+consider the system to be connected to AC power.
+
+Cc: stable <stable@kernel.org>
+Fixes: 0e6371fbfba3 ("usb: typec: ucsi: Report power supply changes")
+Signed-off-by: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20250721-fix-ucsi-pwr-dir-notify-v1-1-e53d5340cb38@qtmlabs.xyz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1225,6 +1225,7 @@ static void ucsi_handle_connector_change
+       if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+               typec_set_pwr_role(con->port, role);
++              ucsi_port_psy_changed(con);
+               /* Complete pending power role swap */
+               if (!completion_done(&con->complete))