--- /dev/null
+From f70291411ba20d50008db90a6f0731efac27872c Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Wed, 25 Jun 2025 18:33:24 +0900
+Subject: block: Introduce bio_needs_zone_write_plugging()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit f70291411ba20d50008db90a6f0731efac27872c upstream.
+
+In preparation for fixing device mapper zone write handling, introduce
+the inline helper function bio_needs_zone_write_plugging() to test if a
+BIO requires handling through zone write plugging using the function
+blk_zone_plug_bio(). This function returns true for any write
+(op_is_write(bio) == true) operation directed at a zoned block device
+using zone write plugging, that is, a block device with a disk that has
+a zone write plug hash table.
+
+This helper allows simplifying the check on entry to blk_zone_plug_bio()
+and used in to protect calls to it for blk-mq devices and DM devices.
+
+Fixes: f211268ed1f9 ("dm: Use the block layer zone append emulation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20250625093327.548866-3-dlemoal@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 6 +++--
+ block/blk-zoned.c | 20 -----------------
+ drivers/md/dm.c | 4 ++-
+ include/linux/blkdev.h | 55 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 63 insertions(+), 22 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3169,8 +3169,10 @@ void blk_mq_submit_bio(struct bio *bio)
+ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ goto queue_exit;
+
+- if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs))
+- goto queue_exit;
++ if (bio_needs_zone_write_plugging(bio)) {
++ if (blk_zone_plug_bio(bio, nr_segs))
++ goto queue_exit;
++ }
+
+ new_request:
+ if (rq) {
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1116,25 +1116,7 @@ bool blk_zone_plug_bio(struct bio *bio,
+ {
+ struct block_device *bdev = bio->bi_bdev;
+
+- if (!bdev->bd_disk->zone_wplugs_hash)
+- return false;
+-
+- /*
+- * If the BIO already has the plugging flag set, then it was already
+- * handled through this path and this is a submission from the zone
+- * plug bio submit work.
+- */
+- if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
+- return false;
+-
+- /*
+- * We do not need to do anything special for empty flush BIOs, e.g
+- * BIOs such as issued by blkdev_issue_flush(). The is because it is
+- * the responsibility of the user to first wait for the completion of
+- * write operations for flush to have any effect on the persistence of
+- * the written data.
+- */
+- if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
++ if (WARN_ON_ONCE(!bdev->bd_disk->zone_wplugs_hash))
+ return false;
+
+ /*
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1788,7 +1788,9 @@ static inline bool dm_zone_bio_needs_spl
+ }
+ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+ {
+- return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
++ if (!bio_needs_zone_write_plugging(bio))
++ return false;
++ return blk_zone_plug_bio(bio, 0);
+ }
+
+ static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -842,6 +842,55 @@ static inline unsigned int disk_nr_zones
+ {
+ return disk->nr_zones;
+ }
++
++/**
++ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
++ * write plugging
++ * @bio: The BIO being submitted
++ *
++ * Return true whenever @bio execution needs to be handled through zone
++ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
++ */
++static inline bool bio_needs_zone_write_plugging(struct bio *bio)
++{
++ enum req_op op = bio_op(bio);
++
++ /*
++ * Only zoned block devices have a zone write plug hash table. But not
++ * all of them have one (e.g. DM devices may not need one).
++ */
++ if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
++ return false;
++
++ /* Only write operations need zone write plugging. */
++ if (!op_is_write(op))
++ return false;
++
++ /* Ignore empty flush */
++ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
++ return false;
++
++ /* Ignore BIOs that already have been handled by zone write plugging. */
++ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
++ return false;
++
++ /*
++ * All zone write operations must be handled through zone write plugging
++ * using blk_zone_plug_bio().
++ */
++ switch (op) {
++ case REQ_OP_ZONE_APPEND:
++ case REQ_OP_WRITE:
++ case REQ_OP_WRITE_ZEROES:
++ case REQ_OP_ZONE_FINISH:
++ case REQ_OP_ZONE_RESET:
++ case REQ_OP_ZONE_RESET_ALL:
++ return true;
++ default:
++ return false;
++ }
++}
++
+ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+ /**
+@@ -871,6 +920,12 @@ static inline unsigned int disk_nr_zones
+ {
+ return 0;
+ }
++
++static inline bool bio_needs_zone_write_plugging(struct bio *bio)
++{
++ return false;
++}
++
+ static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ {
+ return false;
--- /dev/null
+From 9d5eff7821f6d70f7d1b4d8a60680fba4de868a7 Mon Sep 17 00:00:00 2001
+From: Shyam Prasad N <sprasad@microsoft.com>
+Date: Thu, 17 Jul 2025 17:36:13 +0530
+Subject: cifs: reset iface weights when we cannot find a candidate
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+commit 9d5eff7821f6d70f7d1b4d8a60680fba4de868a7 upstream.
+
+We now do a weighted selection of server interfaces when allocating
+new channels. The weights are decided based on the speed advertised.
+The fulfilled weight for an interface is a counter that is used to
+track the interface selection. It should be reset back to zero once
+all interfaces fulfilling their weight.
+
+In cifs_chan_update_iface, this reset logic was missing. As a result
+when the server interface list changes, the client may not be able
+to find a new candidate for other channels after all interfaces have
+been fulfilled.
+
+Fixes: a6d8fb54a515 ("cifs: distribute channels across interfaces based on speed")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/sess.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -332,6 +332,7 @@ cifs_chan_update_iface(struct cifs_ses *
+ struct cifs_server_iface *old_iface = NULL;
+ struct cifs_server_iface *last_iface = NULL;
+ struct sockaddr_storage ss;
++ int retry = 0;
+
+ spin_lock(&ses->chan_lock);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+@@ -360,6 +361,7 @@ cifs_chan_update_iface(struct cifs_ses *
+ return;
+ }
+
++try_again:
+ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
+ iface_head);
+ iface_min_speed = last_iface->speed;
+@@ -397,6 +399,13 @@ cifs_chan_update_iface(struct cifs_ses *
+ }
+
+ if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++ list_for_each_entry(iface, &ses->iface_list, iface_head)
++ iface->weight_fulfilled = 0;
++
++ /* see if it can be satisfied in second attempt */
++ if (!retry++)
++ goto try_again;
++
+ iface = NULL;
+ cifs_dbg(FYI, "unable to find a suitable iface\n");
+ }
--- /dev/null
+From 0acf9e65a47d1e489c8b24c45a64436e30bcccf4 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Tue, 20 May 2025 11:07:42 +0200
+Subject: clk: qcom: dispcc-sm8750: Fix setting rate byte and pixel clocks
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit 0acf9e65a47d1e489c8b24c45a64436e30bcccf4 upstream.
+
+On SM8750 the setting rate of pixel and byte clocks, while the parent
+DSI PHY PLL, fails with:
+
+ disp_cc_mdss_byte0_clk_src: rcg didn't update its configuration.
+
+DSI PHY PLL has to be unprepared and its "PLL Power Down" bits in
+CMN_CTRL_0 asserted.
+
+Mark these clocks with CLK_OPS_PARENT_ENABLE to ensure the parent is
+enabled during rate changes.
+
+Cc: stable@vger.kernel.org
+Fixes: f1080d8dab0f ("clk: qcom: dispcc-sm8750: Add SM8750 Display clock controller")
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250520090741.45820-2-krzysztof.kozlowski@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/dispcc-sm8750.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/clk/qcom/dispcc-sm8750.c
++++ b/drivers/clk/qcom/dispcc-sm8750.c
+@@ -393,7 +393,7 @@ static struct clk_rcg2 disp_cc_mdss_byte
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_byte2_ops,
+ },
+ };
+@@ -408,7 +408,7 @@ static struct clk_rcg2 disp_cc_mdss_byte
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_byte2_ops,
+ },
+ };
+@@ -712,7 +712,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+ };
+@@ -727,7 +727,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+ };
+@@ -742,7 +742,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_pixel_ops,
+ },
+ };
--- /dev/null
+From 077ec7bcec9a8987d2a133afb7e13011878c7576 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Thu, 22 May 2025 22:25:55 +0200
+Subject: clk: qcom: gcc-ipq8074: fix broken freq table for nss_port6_tx_clk_src
+
+From: Christian Marangi <ansuelsmth@gmail.com>
+
+commit 077ec7bcec9a8987d2a133afb7e13011878c7576 upstream.
+
+With the conversion done by commit e88f03230dc0 ("clk: qcom: gcc-ipq8074:
+rework nss_port5/6 clock to multiple conf") a Copy-Paste error was made
+for the nss_port6_tx_clk_src frequency table.
+
+This was caused by the wrong setting of the parent in
+ftbl_nss_port6_tx_clk_src that was wrongly set to P_UNIPHY1_RX instead
+of P_UNIPHY2_TX.
+
+This cause the UNIPHY2 port to malfunction when it needs to be scaled to
+higher clock. The malfunction was observed with the example scenario
+with an Aquantia 10G PHY connected and a speed higher than 1G (example
+2.5G)
+
+Fix the broken frequency table to restore original functionality.
+
+Cc: stable@vger.kernel.org
+Fixes: e88f03230dc0 ("clk: qcom: gcc-ipq8074: rework nss_port5/6 clock to multiple conf")
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+Tested-by: Robert Marko <robimarko@gmail.com>
+Link: https://lore.kernel.org/r/20250522202600.4028-1-ansuelsmth@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/gcc-ipq8074.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -1895,10 +1895,10 @@ static const struct freq_conf ftbl_nss_p
+ static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
+ FMS(19200000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port6_tx_clk_src_25),
+- FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
++ FMS(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port6_tx_clk_src_125),
+- FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+- FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
++ FMS(156250000, P_UNIPHY2_TX, 2, 0, 0),
++ FMS(312500000, P_UNIPHY2_TX, 1, 0, 0),
+ { }
+ };
+
--- /dev/null
+From 2df7168717b7d2d32bcf017c68be16e4aae9dd13 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Wed, 25 Jun 2025 18:33:25 +0900
+Subject: dm: Always split write BIOs to zoned device limits
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit 2df7168717b7d2d32bcf017c68be16e4aae9dd13 upstream.
+
+Any zoned DM target that requires zone append emulation will use the
+block layer zone write plugging. In such case, DM target drivers must
+not split BIOs using dm_accept_partial_bio() as doing so can potentially
+lead to deadlocks with queue freeze operations. Regular write operations
+used to emulate zone append operations also cannot be split by the
+target driver as that would result in an invalid writen sector value
+return using the BIO sector.
+
+In order for zoned DM target drivers to avoid such incorrect BIO
+splitting, we must ensure that large BIOs are split before being passed
+to the map() function of the target, thus guaranteeing that the
+limits for the mapped device are not exceeded.
+
+dm-crypt and dm-flakey are the only target drivers supporting zoned
+devices and using dm_accept_partial_bio().
+
+In the case of dm-crypt, this function is used to split BIOs to the
+internal max_write_size limit (which will be suppressed in a different
+patch). However, since crypt_alloc_buffer() uses a bioset allowing only
+up to BIO_MAX_VECS (256) vectors in a BIO. The dm-crypt device
+max_segments limit, which is not set and so default to BLK_MAX_SEGMENTS
+(128), must thus be respected and write BIOs split accordingly.
+
+In the case of dm-flakey, since zone append emulation is not required,
+the block layer zone write plugging is not used and no splitting of BIOs
+required.
+
+Modify the function dm_zone_bio_needs_split() to use the block layer
+helper function bio_needs_zone_write_plugging() to force a call to
+bio_split_to_limits() in dm_split_and_process_bio(). This allows DM
+target drivers to avoid using dm_accept_partial_bio() for write
+operations on zoned DM devices.
+
+Fixes: f211268ed1f9 ("dm: Use the block layer zone append emulation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Link: https://lore.kernel.org/r/20250625093327.548866-4-dlemoal@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm.c | 29 ++++++++++++++++++++++-------
+ 1 file changed, 22 insertions(+), 7 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1780,12 +1780,29 @@ static inline bool dm_zone_bio_needs_spl
+ struct bio *bio)
+ {
+ /*
+- * For mapped device that need zone append emulation, we must
+- * split any large BIO that straddles zone boundaries.
++ * Special case the zone operations that cannot or should not be split.
+ */
+- return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
+- !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
++ switch (bio_op(bio)) {
++ case REQ_OP_ZONE_APPEND:
++ case REQ_OP_ZONE_FINISH:
++ case REQ_OP_ZONE_RESET:
++ case REQ_OP_ZONE_RESET_ALL:
++ return false;
++ default:
++ break;
++ }
++
++ /*
++ * Mapped devices that require zone append emulation will use the block
++ * layer zone write plugging. In such case, we must split any large BIO
++ * to the mapped device limits to avoid potential deadlocks with queue
++ * freeze operations.
++ */
++ if (!dm_emulate_zone_append(md))
++ return false;
++ return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
+ }
++
+ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+ {
+ if (!bio_needs_zone_write_plugging(bio))
+@@ -1934,9 +1951,7 @@ static void dm_split_and_process_bio(str
+
+ is_abnormal = is_abnormal_io(bio);
+ if (static_branch_unlikely(&zoned_enabled)) {
+- /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
+- need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) &&
+- (is_abnormal || dm_zone_bio_needs_split(md, bio));
++ need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
+ } else {
+ need_split = is_abnormal;
+ }
--- /dev/null
+From 7d345aa1fac4c2ec9584fbd6f389f2c2368671d5 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 14 Jul 2025 21:03:21 +0800
+Subject: ext4: fix largest free orders lists corruption on mb_optimize_scan switch
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 7d345aa1fac4c2ec9584fbd6f389f2c2368671d5 upstream.
+
+The grp->bb_largest_free_order is updated regardless of whether
+mb_optimize_scan is enabled. This can lead to inconsistencies between
+grp->bb_largest_free_order and the actual s_mb_largest_free_orders list
+index when mb_optimize_scan is repeatedly enabled and disabled via remount.
+
+For example, if mb_optimize_scan is initially enabled, largest free
+order is 3, and the group is in s_mb_largest_free_orders[3]. Then,
+mb_optimize_scan is disabled via remount, block allocations occur,
+updating largest free order to 2. Finally, mb_optimize_scan is re-enabled
+via remount, more block allocations update largest free order to 1.
+
+At this point, the group would be removed from s_mb_largest_free_orders[3]
+under the protection of s_mb_largest_free_orders_locks[2]. This lock
+mismatch can lead to list corruption.
+
+To fix this, whenever grp->bb_largest_free_order changes, we now always
+attempt to remove the group from its old order list. However, we only
+insert the group into the new order list if `mb_optimize_scan` is enabled.
+This approach helps prevent lock inconsistencies and ensures the data in
+the order lists remains reliable.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@vger.kernel.org
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-12-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 33 ++++++++++++++-------------------
+ 1 file changed, 14 insertions(+), 19 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1150,33 +1150,28 @@ static void
+ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- int i;
++ int new, old = grp->bb_largest_free_order;
+
+- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+- if (grp->bb_counters[i] > 0)
++ for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--)
++ if (grp->bb_counters[new] > 0)
+ break;
++
+ /* No need to move between order lists? */
+- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+- i == grp->bb_largest_free_order) {
+- grp->bb_largest_free_order = i;
++ if (new == old)
+ return;
+- }
+
+- if (grp->bb_largest_free_order >= 0) {
+- write_lock(&sbi->s_mb_largest_free_orders_locks[
+- grp->bb_largest_free_order]);
++ if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) {
++ write_lock(&sbi->s_mb_largest_free_orders_locks[old]);
+ list_del_init(&grp->bb_largest_free_order_node);
+- write_unlock(&sbi->s_mb_largest_free_orders_locks[
+- grp->bb_largest_free_order]);
++ write_unlock(&sbi->s_mb_largest_free_orders_locks[old]);
+ }
+- grp->bb_largest_free_order = i;
+- if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
+- write_lock(&sbi->s_mb_largest_free_orders_locks[
+- grp->bb_largest_free_order]);
++
++ grp->bb_largest_free_order = new;
++ if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
++ write_lock(&sbi->s_mb_largest_free_orders_locks[new]);
+ list_add_tail(&grp->bb_largest_free_order_node,
+- &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
+- write_unlock(&sbi->s_mb_largest_free_orders_locks[
+- grp->bb_largest_free_order]);
++ &sbi->s_mb_largest_free_orders[new]);
++ write_unlock(&sbi->s_mb_largest_free_orders_locks[new]);
+ }
+ }
+
--- /dev/null
+From 1c320d8e92925bb7615f83a7b6e3f402a5c2ca63 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 14 Jul 2025 21:03:20 +0800
+Subject: ext4: fix zombie groups in average fragment size lists
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 1c320d8e92925bb7615f83a7b6e3f402a5c2ca63 upstream.
+
+Groups with no free blocks shouldn't be in any average fragment size list.
+However, when all blocks in a group are allocated(i.e., bb_fragments or
+bb_free is 0), we currently skip updating the average fragment size, which
+means the group isn't removed from its previous s_mb_avg_fragment_size[old]
+list.
+
+This created "zombie" groups that were always skipped during traversal as
+they couldn't satisfy any block allocation requests, negatively impacting
+traversal efficiency.
+
+Therefore, when a group becomes completely full, bb_avg_fragment_size_order
+is now set to -1. If the old order was not -1, a removal operation is
+performed; if the new order is not -1, an insertion is performed.
+
+Fixes: 196e402adf2e ("ext4: improve cr 0 / cr 1 group scanning")
+CC: stable@vger.kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-11-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -841,30 +841,30 @@ static void
+ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- int new_order;
++ int new, old;
+
+- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN))
+ return;
+
+- new_order = mb_avg_fragment_size_order(sb,
+- grp->bb_free / grp->bb_fragments);
+- if (new_order == grp->bb_avg_fragment_size_order)
++ old = grp->bb_avg_fragment_size_order;
++ new = grp->bb_fragments == 0 ? -1 :
++ mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments);
++ if (new == old)
+ return;
+
+- if (grp->bb_avg_fragment_size_order != -1) {
+- write_lock(&sbi->s_mb_avg_fragment_size_locks[
+- grp->bb_avg_fragment_size_order]);
++ if (old >= 0) {
++ write_lock(&sbi->s_mb_avg_fragment_size_locks[old]);
+ list_del(&grp->bb_avg_fragment_size_node);
+- write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+- grp->bb_avg_fragment_size_order]);
++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]);
++ }
++
++ grp->bb_avg_fragment_size_order = new;
++ if (new >= 0) {
++ write_lock(&sbi->s_mb_avg_fragment_size_locks[new]);
++ list_add_tail(&grp->bb_avg_fragment_size_node,
++ &sbi->s_mb_avg_fragment_size[new]);
++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]);
+ }
+- grp->bb_avg_fragment_size_order = new_order;
+- write_lock(&sbi->s_mb_avg_fragment_size_locks[
+- grp->bb_avg_fragment_size_order]);
+- list_add_tail(&grp->bb_avg_fragment_size_node,
+- &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
+- write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+- grp->bb_avg_fragment_size_order]);
+ }
+
+ /*
--- /dev/null
+From 82e6381e23f1ea7a14f418215068aaa2ca046c84 Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Fri, 25 Jul 2025 10:15:50 +0800
+Subject: ext4: initialize superblock fields in the kballoc-test.c kunit tests
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 82e6381e23f1ea7a14f418215068aaa2ca046c84 upstream.
+
+Various changes in the "ext4: better scalability for ext4 block
+allocation" patch series have resulted in kunit test failures, most
+notably in the test_new_blocks_simple and the test_mb_mark_used tests.
+The root cause of these failures is that various in-memory ext4 data
+structures were not getting initialized, and while previous versions
+of the functions exercised by the unit tests didn't use these
+structure members, this was arguably a test bug.
+
+Since one of the patches in the block allocation scalability patches
+is a fix which is has a cc:stable tag, this commit also has a
+cc:stable tag.
+
+CC: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250714130327.1830534-1-libaokun1@huawei.com
+Link: https://patch.msgid.link/20250725021550.3177573-1-yi.zhang@huaweicloud.com
+Link: https://patch.msgid.link/20250725021654.3188798-1-yi.zhang@huaweicloud.com
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Closes: https://lore.kernel.org/linux-ext4/b0635ad0-7ebf-4152-a69b-58e7e87d5085@roeck-us.net/
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc-test.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/ext4/mballoc-test.c
++++ b/fs/ext4/mballoc-test.c
+@@ -155,6 +155,7 @@ static struct super_block *mbt_ext4_allo
+ bgl_lock_init(sbi->s_blockgroup_lock);
+
+ sbi->s_es = &fsb->es;
++ sbi->s_sb = sb;
+ sb->s_fs_info = sbi;
+
+ up_write(&sb->s_umount);
+@@ -802,6 +803,10 @@ static void test_mb_mark_used(struct kun
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
++ grp->bb_largest_free_order = -1;
++ grp->bb_avg_fragment_size_order = -1;
++ INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
++ INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ for (i = 0; i < TEST_RANGE_COUNT; i++)
+ test_mb_mark_used_range(test, &e4b, ranges[i].start,
+@@ -875,6 +880,10 @@ static void test_mb_free_blocks(struct k
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+
+ grp->bb_free = 0;
++ grp->bb_largest_free_order = -1;
++ grp->bb_avg_fragment_size_order = -1;
++ INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
++ INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+ memset(bitmap, 0xff, sb->s_blocksize);
+
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
--- /dev/null
+From dfb36e4a8db0cd56f92d4cb445f54e85a9b40897 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Mon, 11 Aug 2025 10:11:47 -0400
+Subject: futex: Use user_write_access_begin/_end() in futex_put_value()
+
+From: Waiman Long <longman@redhat.com>
+
+commit dfb36e4a8db0cd56f92d4cb445f54e85a9b40897 upstream.
+
+Commit cec199c5e39b ("futex: Implement FUTEX2_NUMA") introduced the
+futex_put_value() helper to write a value to the given user
+address.
+
+However, it uses user_read_access_begin() before the write. For
+architectures that differentiate between read and write accesses, like
+PowerPC, futex_put_value() fails with -EFAULT.
+
+Fix that by using the user_write_access_begin/user_write_access_end() pair
+instead.
+
+Fixes: cec199c5e39b ("futex: Implement FUTEX2_NUMA")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20250811141147.322261-1-longman@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex/futex.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/futex/futex.h
++++ b/kernel/futex/futex.h
+@@ -321,13 +321,13 @@ static __always_inline int futex_put_val
+ {
+ if (can_do_masked_user_access())
+ to = masked_user_access_begin(to);
+- else if (!user_read_access_begin(to, sizeof(*to)))
++ else if (!user_write_access_begin(to, sizeof(*to)))
+ return -EFAULT;
+ unsafe_put_user(val, to, Efault);
+- user_read_access_end();
++ user_write_access_end();
+ return 0;
+ Efault:
+- user_read_access_end();
++ user_write_access_end();
+ return -EFAULT;
+ }
+
--- /dev/null
+From f7fa8520f30373ce99c436c4d57c76befdacbef3 Mon Sep 17 00:00:00 2001
+From: Alexey Klimov <alexey.klimov@linaro.org>
+Date: Fri, 13 Jun 2025 18:32:38 +0100
+Subject: iommu/arm-smmu-qcom: Add SM6115 MDSS compatible
+
+From: Alexey Klimov <alexey.klimov@linaro.org>
+
+commit f7fa8520f30373ce99c436c4d57c76befdacbef3 upstream.
+
+Add the SM6115 MDSS compatible to clients compatible list, as it also
+needs that workaround.
+Without this workaround, for example, QRB4210 RB2 which is based on
+SM4250/SM6115 generates a lot of smmu unhandled context faults during
+boot:
+
+arm_smmu_context_fault: 116854 callbacks suppressed
+arm-smmu c600000.iommu: Unhandled context fault: fsr=0x402,
+iova=0x5c0ec600, fsynr=0x320021, cbfrsynra=0x420, cb=5
+arm-smmu c600000.iommu: FSR = 00000402 [Format=2 TF], SID=0x420
+arm-smmu c600000.iommu: FSYNR0 = 00320021 [S1CBNDX=50 PNU PLVL=1]
+arm-smmu c600000.iommu: Unhandled context fault: fsr=0x402,
+iova=0x5c0d7800, fsynr=0x320021, cbfrsynra=0x420, cb=5
+arm-smmu c600000.iommu: FSR = 00000402 [Format=2 TF], SID=0x420
+
+and also failed initialisation of lontium lt9611uxc, gpu and dpu is
+observed:
+(binding MDSS components triggered by lt9611uxc have failed)
+
+ ------------[ cut here ]------------
+ !aspace
+ WARNING: CPU: 6 PID: 324 at drivers/gpu/drm/msm/msm_gem_vma.c:130 msm_gem_vma_init+0x150/0x18c [msm]
+ Modules linked in: ... (long list of modules)
+ CPU: 6 UID: 0 PID: 324 Comm: (udev-worker) Not tainted 6.15.0-03037-gaacc73ceeb8b #4 PREEMPT
+ Hardware name: Qualcomm Technologies, Inc. QRB4210 RB2 (DT)
+ pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : msm_gem_vma_init+0x150/0x18c [msm]
+ lr : msm_gem_vma_init+0x150/0x18c [msm]
+ sp : ffff80008144b280
+ ...
+ Call trace:
+ msm_gem_vma_init+0x150/0x18c [msm] (P)
+ get_vma_locked+0xc0/0x194 [msm]
+ msm_gem_get_and_pin_iova_range+0x4c/0xdc [msm]
+ msm_gem_kernel_new+0x48/0x160 [msm]
+ msm_gpu_init+0x34c/0x53c [msm]
+ adreno_gpu_init+0x1b0/0x2d8 [msm]
+ a6xx_gpu_init+0x1e8/0x9e0 [msm]
+ adreno_bind+0x2b8/0x348 [msm]
+ component_bind_all+0x100/0x230
+ msm_drm_bind+0x13c/0x3d0 [msm]
+ try_to_bring_up_aggregate_device+0x164/0x1d0
+ __component_add+0xa4/0x174
+ component_add+0x14/0x20
+ dsi_dev_attach+0x20/0x34 [msm]
+ dsi_host_attach+0x58/0x98 [msm]
+ devm_mipi_dsi_attach+0x34/0x90
+ lt9611uxc_attach_dsi.isra.0+0x94/0x124 [lontium_lt9611uxc]
+ lt9611uxc_probe+0x540/0x5fc [lontium_lt9611uxc]
+ i2c_device_probe+0x148/0x2a8
+ really_probe+0xbc/0x2c0
+ __driver_probe_device+0x78/0x120
+ driver_probe_device+0x3c/0x154
+ __driver_attach+0x90/0x1a0
+ bus_for_each_dev+0x68/0xb8
+ driver_attach+0x24/0x30
+ bus_add_driver+0xe4/0x208
+ driver_register+0x68/0x124
+ i2c_register_driver+0x48/0xcc
+ lt9611uxc_driver_init+0x20/0x1000 [lontium_lt9611uxc]
+ do_one_initcall+0x60/0x1d4
+ do_init_module+0x54/0x1fc
+ load_module+0x1748/0x1c8c
+ init_module_from_file+0x74/0xa0
+ __arm64_sys_finit_module+0x130/0x2f8
+ invoke_syscall+0x48/0x104
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x2c/0x80
+ el0t_64_sync_handler+0x10c/0x138
+ el0t_64_sync+0x198/0x19c
+ ---[ end trace 0000000000000000 ]---
+ msm_dpu 5e01000.display-controller: [drm:msm_gpu_init [msm]] *ERROR* could not allocate memptrs: -22
+ msm_dpu 5e01000.display-controller: failed to load adreno gpu
+ platform a400000.remoteproc:glink-edge:apr:service@7:dais: Adding to iommu group 19
+ msm_dpu 5e01000.display-controller: failed to bind 5900000.gpu (ops a3xx_ops [msm]): -22
+ msm_dpu 5e01000.display-controller: adev bind failed: -22
+ lt9611uxc 0-002b: failed to attach dsi to host
+ lt9611uxc 0-002b: probe with driver lt9611uxc failed with error -22
+
+Suggested-by: Bjorn Andersson <andersson@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Fixes: 3581b7062cec ("drm/msm/disp/dpu1: add support for display on SM6115")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexey Klimov <alexey.klimov@linaro.org>
+Link: https://lore.kernel.org/r/20250613173238.15061-1-alexey.klimov@linaro.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -380,6 +380,7 @@ static const struct of_device_id qcom_sm
+ { .compatible = "qcom,sdm670-mdss" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sdm845-mss-pil" },
++ { .compatible = "qcom,sm6115-mdss" },
+ { .compatible = "qcom,sm6350-mdss" },
+ { .compatible = "qcom,sm6375-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
--- /dev/null
+From 49f42634e8054e57d09c7f9ef5e4527e116059cb Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Fri, 11 Jul 2025 13:40:20 -0700
+Subject: iommu/arm-smmu-v3: Revert vmaster in the error path
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit 49f42634e8054e57d09c7f9ef5e4527e116059cb upstream.
+
+The error path for err_free_master_domain leaks the vmaster. Move all
+the kfrees for vmaster into the goto error section.
+
+Fixes: cfea71aea921 ("iommu/arm-smmu-v3: Put iopf enablement in the domain attach path")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Pranjal Shrivastava <praan@google.com>
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Link: https://lore.kernel.org/r/20250711204020.1677884-1-nicolinc@nvidia.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 10cc6dc26b7b..dacaa78f69aa 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2906,8 +2906,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
+
+ master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL);
+ if (!master_domain) {
+- kfree(state->vmaster);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto err_free_vmaster;
+ }
+ master_domain->domain = new_domain;
+ master_domain->master = master;
+@@ -2941,7 +2941,6 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
+ !arm_smmu_master_canwbs(master)) {
+ spin_unlock_irqrestore(&smmu_domain->devices_lock,
+ flags);
+- kfree(state->vmaster);
+ ret = -EINVAL;
+ goto err_iopf;
+ }
+@@ -2967,6 +2966,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
+ arm_smmu_disable_iopf(master, master_domain);
+ err_free_master_domain:
+ kfree(master_domain);
++err_free_vmaster:
++ kfree(state->vmaster);
+ return ret;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 12724ce3fe1a3d8f30d56e48b4f272d8860d1970 Mon Sep 17 00:00:00 2001
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Mon, 14 Jul 2025 12:50:19 +0800
+Subject: iommu/vt-d: Optimize iotlb_sync_map for non-caching/non-RWBF modes
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+commit 12724ce3fe1a3d8f30d56e48b4f272d8860d1970 upstream.
+
+The iotlb_sync_map iommu ops allows drivers to perform necessary cache
+flushes when new mappings are established. For the Intel iommu driver,
+this callback specifically serves two purposes:
+
+- To flush caches when a second-stage page table is attached to a device
+ whose iommu is operating in caching mode (CAP_REG.CM==1).
+- To explicitly flush internal write buffers to ensure updates to memory-
+ resident remapping structures are visible to hardware (CAP_REG.RWBF==1).
+
+However, in scenarios where neither caching mode nor the RWBF flag is
+active, the cache_tag_flush_range_np() helper, which is called in the
+iotlb_sync_map path, effectively becomes a no-op.
+
+Despite being a no-op, cache_tag_flush_range_np() involves iterating
+through all cache tags of the iommu's attached to the domain, protected
+by a spinlock. This unnecessary execution path introduces overhead,
+leading to a measurable I/O performance regression. On systems with NVMes
+under the same bridge, performance was observed to drop from approximately
+~6150 MiB/s down to ~4985 MiB/s.
+
+Introduce a flag in the dmar_domain structure. This flag will only be set
+when iotlb_sync_map is required (i.e., when CM or RWBF is set). The
+cache_tag_flush_range_np() is called only for domains where this flag is
+set. This flag, once set, is immutable, given that there won't be mixed
+configurations in real-world scenarios where some IOMMUs in a system
+operate in caching mode while others do not. Theoretically, the
+immutability of this flag does not impact functionality.
+
+Reported-by: Ioanna Alifieraki <ioanna-maria.alifieraki@canonical.com>
+Closes: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2115738
+Link: https://lore.kernel.org/r/20250701171154.52435-1-ioanna-maria.alifieraki@canonical.com
+Fixes: 129dab6e1286 ("iommu/vt-d: Use cache_tag_flush_range_np() in iotlb_sync_map")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/20250703031545.3378602-1-baolu.lu@linux.intel.com
+Link: https://lore.kernel.org/r/20250714045028.958850-3-baolu.lu@linux.intel.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c | 19 ++++++++++++++++++-
+ drivers/iommu/intel/iommu.h | 3 +++
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1795,6 +1795,18 @@ static int domain_setup_first_level(stru
+ (pgd_t *)pgd, flags, old);
+ }
+
++static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
++ struct intel_iommu *iommu)
++{
++ if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
++ return true;
++
++ if (rwbf_quirk || cap_rwbf(iommu->cap))
++ return true;
++
++ return false;
++}
++
+ static int dmar_domain_attach_device(struct dmar_domain *domain,
+ struct device *dev)
+ {
+@@ -1832,6 +1844,8 @@ static int dmar_domain_attach_device(str
+ if (ret)
+ goto out_block_translation;
+
++ domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
++
+ return 0;
+
+ out_block_translation:
+@@ -3953,7 +3967,10 @@ static bool risky_device(struct pci_dev
+ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+ {
+- cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
++ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
++
++ if (dmar_domain->iotlb_sync_map)
++ cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1);
+
+ return 0;
+ }
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -614,6 +614,9 @@ struct dmar_domain {
+ u8 has_mappings:1; /* Has mappings configured through
+ * iommu_map() interface.
+ */
++ u8 iotlb_sync_map:1; /* Need to flush IOTLB cache or write
++ * buffer when creating mappings.
++ */
+
+ spinlock_t lock; /* Protect device tracking lists */
+ struct list_head devices; /* all devices' list */
--- /dev/null
+From b42497e3c0e74db061eafad41c0cd7243c46436b Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@nvidia.com>
+Date: Thu, 17 Jul 2025 11:46:55 -0300
+Subject: iommufd: Prevent ALIGN() overflow
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+commit b42497e3c0e74db061eafad41c0cd7243c46436b upstream.
+
+When allocating IOVA the candidate range gets aligned to the target
+alignment. If the range is close to ULONG_MAX then the ALIGN() can
+wrap resulting in a corrupted iova.
+
+Open code the ALIGN() using get_add_overflow() to prevent this.
+This simplifies the checks as we don't need to check for length earlier
+either.
+
+Consolidate the two copies of this code under a single helper.
+
+This bug would allow userspace to create a mapping that overlaps with some
+other mapping or a reserved range.
+
+Cc: stable@vger.kernel.org
+Fixes: 51fe6141f0f6 ("iommufd: Data structure to provide IOVA to PFN mapping")
+Reported-by: syzbot+c2f65e2801743ca64e08@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com
+Reviewed-by: Yi Liu <yi.l.liu@intel.com>
+Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
+Link: https://patch.msgid.link/all/1-v1-7b4a16fc390b+10f4-iommufd_alloc_overflow_jgg@nvidia.com/
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/iommufd/io_pagetable.c | 41 +++++++++++++++++++++--------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -70,36 +70,45 @@ struct iopt_area *iopt_area_contig_next(
+ return iter->area;
+ }
+
+-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+- unsigned long length,
+- unsigned long iova_alignment,
+- unsigned long page_offset)
++static bool __alloc_iova_check_range(unsigned long *start, unsigned long last,
++ unsigned long length,
++ unsigned long iova_alignment,
++ unsigned long page_offset)
+ {
+- if (span->is_used || span->last_hole - span->start_hole < length - 1)
++ unsigned long aligned_start;
++
++ /* ALIGN_UP() */
++ if (check_add_overflow(*start, iova_alignment - 1, &aligned_start))
+ return false;
++ aligned_start &= ~(iova_alignment - 1);
++ aligned_start |= page_offset;
+
+- span->start_hole = ALIGN(span->start_hole, iova_alignment) |
+- page_offset;
+- if (span->start_hole > span->last_hole ||
+- span->last_hole - span->start_hole < length - 1)
++ if (aligned_start >= last || last - aligned_start < length - 1)
+ return false;
++ *start = aligned_start;
+ return true;
+ }
+
+-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+ unsigned long length,
+ unsigned long iova_alignment,
+ unsigned long page_offset)
+ {
+- if (span->is_hole || span->last_used - span->start_used < length - 1)
++ if (span->is_used)
+ return false;
++ return __alloc_iova_check_range(&span->start_hole, span->last_hole,
++ length, iova_alignment, page_offset);
++}
+
+- span->start_used = ALIGN(span->start_used, iova_alignment) |
+- page_offset;
+- if (span->start_used > span->last_used ||
+- span->last_used - span->start_used < length - 1)
++static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++ unsigned long length,
++ unsigned long iova_alignment,
++ unsigned long page_offset)
++{
++ if (span->is_hole)
+ return false;
+- return true;
++ return __alloc_iova_check_range(&span->start_used, span->last_used,
++ length, iova_alignment, page_offset);
+ }
+
+ /*
--- /dev/null
+From b23e09f9997771b4b739c1c694fa832b5fa2de02 Mon Sep 17 00:00:00 2001
+From: Nicolin Chen <nicolinc@nvidia.com>
+Date: Wed, 9 Jul 2025 22:58:53 -0700
+Subject: iommufd: Report unmapped bytes in the error path of iopt_unmap_iova_range
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+commit b23e09f9997771b4b739c1c694fa832b5fa2de02 upstream.
+
+There are callers that read the unmapped bytes even when rc != 0. Thus, do
+not forget to report it in the error path too.
+
+Fixes: 8d40205f6093 ("iommufd: Add kAPI toward external drivers for kernel access")
+Link: https://patch.msgid.link/r/e2b61303bbc008ba1a4e2d7c2a2894749b59fdac.1752126748.git.nicolinc@nvidia.com
+Cc: stable@vger.kernel.org
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/iommufd/io_pagetable.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -743,8 +743,10 @@ again:
+ iommufd_access_notify_unmap(iopt, area_first, length);
+ /* Something is not responding to unmap requests. */
+ tries++;
+- if (WARN_ON(tries > 100))
+- return -EDEADLOCK;
++ if (WARN_ON(tries > 100)) {
++ rc = -EDEADLOCK;
++ goto out_unmapped;
++ }
+ goto again;
+ }
+
+@@ -766,6 +768,7 @@ again:
+ out_unlock_iova:
+ up_write(&iopt->iova_rwsem);
+ up_read(&iopt->domains_rwsem);
++out_unmapped:
+ if (unmapped)
+ *unmapped = unmapped_bytes;
+ return rc;
--- /dev/null
+From 966c5cd72be8989c8a559ddef8e8ff07a37c5eb0 Mon Sep 17 00:00:00 2001
+From: Ricky Wu <ricky_wu@realtek.com>
+Date: Fri, 11 Jul 2025 22:01:43 +0800
+Subject: misc: rtsx: usb: Ensure mmc child device is active when card is present
+
+From: Ricky Wu <ricky_wu@realtek.com>
+
+commit 966c5cd72be8989c8a559ddef8e8ff07a37c5eb0 upstream.
+
+When a card is present in the reader, the driver currently defers
+autosuspend by returning -EAGAIN during the suspend callback to
+trigger USB remote wakeup signaling. However, this does not guarantee
+that the mmc child device has been resumed, which may cause issues if
+it remains suspended while the card is accessible.
+This patch ensures that all child devices, including the mmc host
+controller, are explicitly resumed before returning -EAGAIN. This
+fixes a corner case introduced by earlier remote wakeup handling,
+improving reliability of runtime PM when a card is inserted.
+
+Fixes: 883a87ddf2f1 ("misc: rtsx_usb: Use USB remote wakeup signaling for card insertion detection")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricky Wu <ricky_wu@realtek.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Link: https://lore.kernel.org/r/20250711140143.2105224-1-ricky_wu@realtek.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cardreader/rtsx_usb.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -698,6 +698,12 @@ static void rtsx_usb_disconnect(struct u
+ }
+
+ #ifdef CONFIG_PM
++static int rtsx_usb_resume_child(struct device *dev, void *data)
++{
++ pm_request_resume(dev);
++ return 0;
++}
++
+ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+ struct rtsx_ucr *ucr =
+@@ -713,8 +719,10 @@ static int rtsx_usb_suspend(struct usb_i
+ mutex_unlock(&ucr->dev_mutex);
+
+ /* Defer the autosuspend if card exists */
+- if (val & (SD_CD | MS_CD))
++ if (val & (SD_CD | MS_CD)) {
++ device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
+ return -EAGAIN;
++ }
+ } else {
+ /* There is an ongoing operation*/
+ return -EAGAIN;
+@@ -724,12 +732,6 @@ static int rtsx_usb_suspend(struct usb_i
+ return 0;
+ }
+
+-static int rtsx_usb_resume_child(struct device *dev, void *data)
+-{
+- pm_request_resume(dev);
+- return 0;
+-}
+-
+ static int rtsx_usb_resume(struct usb_interface *intf)
+ {
+ device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
--- /dev/null
+From 579bd5006fe7f4a7abb32da0160d376476cab67d Mon Sep 17 00:00:00 2001
+From: Bijan Tabatabai <bijantabatab@micron.com>
+Date: Tue, 8 Jul 2025 19:47:29 -0500
+Subject: mm/damon/core: commit damos->target_nid
+
+From: Bijan Tabatabai <bijantabatab@micron.com>
+
+commit 579bd5006fe7f4a7abb32da0160d376476cab67d upstream.
+
+When committing new scheme parameters from the sysfs, the target_nid field
+of the damos struct would not be copied. This would result in the
+target_nid field to retain its original value, despite being updated in
+the sysfs interface.
+
+This patch fixes this issue by copying target_nid in damos_commit().
+
+Link: https://lkml.kernel.org/r/20250709004729.17252-1-bijan311@gmail.com
+Fixes: 83dc7bbaecae ("mm/damon/sysfs: use damon_commit_ctx()")
+Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -993,6 +993,7 @@ static int damos_commit(struct damos *ds
+ return err;
+
+ dst->wmarks = src->wmarks;
++ dst->target_nid = src->target_nid;
+
+ err = damos_commit_filters(dst, src);
+ return err;
--- /dev/null
+From 252fea131e15aba2cd487119d1a8f546471199e2 Mon Sep 17 00:00:00 2001
+From: Miguel Ojeda <ojeda@kernel.org>
+Date: Sat, 26 Jul 2025 15:34:35 +0200
+Subject: rust: kbuild: clean output before running `rustdoc`
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+commit 252fea131e15aba2cd487119d1a8f546471199e2 upstream.
+
+`rustdoc` can get confused when generating documentation into a folder
+that contains generated files from other `rustdoc` versions.
+
+For instance, running something like:
+
+ rustup default 1.78.0
+ make LLVM=1 rustdoc
+ rustup default 1.88.0
+ make LLVM=1 rustdoc
+
+may generate errors like:
+
+ error: couldn't generate documentation: invalid template: last line expected to start with a comment
+ |
+ = note: failed to create or modify "./Documentation/output/rust/rustdoc/src-files.js"
+
+Thus just always clean the output folder before generating the
+documentation -- we are anyway regenerating it every time the `rustdoc`
+target gets called, at least for the time being.
+
+Cc: stable@vger.kernel.org # Needed in 6.12.y and later (Rust is pinned in older LTSs).
+Reported-by: Daniel Almeida <daniel.almeida@collabora.com>
+Closes: https://rust-for-linux.zulipchat.com/#narrow/channel/288089/topic/x/near/527201113
+Reviewed-by: Tamir Duberstein <tamird@kernel.org>
+Link: https://lore.kernel.org/r/20250726133435.2460085-1-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/Makefile | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -103,14 +103,14 @@ rustdoc: rustdoc-core rustdoc-macros rus
+ rustdoc-macros: private rustdoc_host = yes
+ rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
+ --extern proc_macro
+-rustdoc-macros: $(src)/macros/lib.rs FORCE
++rustdoc-macros: $(src)/macros/lib.rs rustdoc-clean FORCE
+ +$(call if_changed,rustdoc)
+
+ # Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should
+ # not be needed -- see https://github.com/rust-lang/rust/pull/128307.
+ rustdoc-core: private skip_flags = --edition=2021 -Wrustdoc::unescaped_backticks
+ rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs)
+-rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
++rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE
+ +$(call if_changed,rustdoc)
+
+ rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
+@@ -122,7 +122,8 @@ rustdoc-ffi: $(src)/ffi.rs rustdoc-core
+ rustdoc-pin_init_internal: private rustdoc_host = yes
+ rustdoc-pin_init_internal: private rustc_target_flags = --cfg kernel \
+ --extern proc_macro --crate-type proc-macro
+-rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs FORCE
++rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs \
++ rustdoc-clean FORCE
+ +$(call if_changed,rustdoc)
+
+ rustdoc-pin_init: private rustdoc_host = yes
+@@ -140,6 +141,9 @@ rustdoc-kernel: $(src)/kernel/lib.rs rus
+ $(obj)/bindings.o FORCE
+ +$(call if_changed,rustdoc)
+
++rustdoc-clean: FORCE
++ $(Q)rm -rf $(rustdoc_output)
++
+ quiet_cmd_rustc_test_library = $(RUSTC_OR_CLIPPY_QUIET) TL $<
+ cmd_rustc_test_library = \
+ OBJTREE=$(abspath $(objtree)) \
--- /dev/null
+From abbf9a44944171ca99c150adad9361a2f517d3b6 Mon Sep 17 00:00:00 2001
+From: Miguel Ojeda <ojeda@kernel.org>
+Date: Sun, 27 Jul 2025 11:23:17 +0200
+Subject: rust: workaround `rustdoc` target modifiers bug
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+commit abbf9a44944171ca99c150adad9361a2f517d3b6 upstream.
+
+Starting with Rust 1.88.0 (released 2025-06-26), `rustdoc` complains
+about a target modifier mismatch in configurations where `-Zfixed-x18`
+is passed:
+
+ error: mixing `-Zfixed-x18` will cause an ABI mismatch in crate `rust_out`
+ |
+ = help: the `-Zfixed-x18` flag modifies the ABI so Rust crates compiled with different values of this flag cannot be used together safely
+ = note: unset `-Zfixed-x18` in this crate is incompatible with `-Zfixed-x18=` in dependency `core`
+ = help: set `-Zfixed-x18=` in this crate or unset `-Zfixed-x18` in `core`
+ = help: if you are sure this will not cause problems, you may use `-Cunsafe-allow-abi-mismatch=fixed-x18` to silence this error
+
+The reason is that `rustdoc` was not passing the target modifiers when
+configuring the session options, and thus it would report a mismatch
+that did not exist as soon as a target modifier is used in a dependency.
+
+We did not notice it in the kernel until now because `-Zfixed-x18` has
+been a target modifier only since 1.88.0 (and it is the only one we use
+so far).
+
+The issue has been reported upstream [1] and a fix has been submitted
+[2], including a test similar to the kernel case.
+
+ [ This is now fixed upstream (thanks Guillaume for the quick review),
+ so it will be fixed in Rust 1.90.0 (expected 2025-09-18).
+
+ - Miguel ]
+
+Meanwhile, conditionally pass `-Cunsafe-allow-abi-mismatch=fixed-x18`
+to workaround the issue on our side.
+
+Cc: stable@vger.kernel.org # Needed in 6.12.y and later (Rust is pinned in older LTSs).
+Reported-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Closes: https://lore.kernel.org/rust-for-linux/36cdc798-524f-4910-8b77-d7b9fac08d77@oss.qualcomm.com/
+Link: https://github.com/rust-lang/rust/issues/144521 [1]
+Link: https://github.com/rust-lang/rust/pull/144523 [2]
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://lore.kernel.org/r/20250727092317.2930617-1-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/Makefile | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -62,6 +62,10 @@ core-cfgs = \
+
+ core-edition := $(if $(call rustc-min-version,108700),2024,2021)
+
++# `rustdoc` did not save the target modifiers, thus workaround for
++# the time being (https://github.com/rust-lang/rust/issues/144521).
++rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18)
++
+ # `rustc` recognizes `--remap-path-prefix` since 1.26.0, but `rustdoc` only
+ # since Rust 1.81.0. Moreover, `rustdoc` ICEs on out-of-tree builds since Rust
+ # 1.82.0 (https://github.com/rust-lang/rust/issues/138520). Thus workaround both
+@@ -74,6 +78,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustd
+ -Zunstable-options --generate-link-to-definition \
+ --output $(rustdoc_output) \
+ --crate-name $(subst rustdoc-,,$@) \
++ $(rustdoc_modifiers_workaround) \
+ $(if $(rustdoc_host),,--sysroot=/dev/null) \
+ @$(objtree)/include/generated/rustc_cfg $<
+
+@@ -216,6 +221,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC
+ --extern bindings --extern uapi \
+ --no-run --crate-name kernel -Zunstable-options \
+ --sysroot=/dev/null \
++ $(rustdoc_modifiers_workaround) \
+ --test-builder $(objtree)/scripts/rustdoc_test_builder \
+ $< $(rustdoc_test_kernel_quiet); \
+ $(objtree)/scripts/rustdoc_test_gen
--- /dev/null
+From 964314344eab7bc43e38a32be281c5ea0609773b Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 6 Jul 2025 12:32:04 -0700
+Subject: samples/damon/mtier: support boot time enable setup
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 964314344eab7bc43e38a32be281c5ea0609773b upstream.
+
+If 'enable' parameter of the 'mtier' DAMON sample module is set at boot
+time via the kernel command line, memory allocation is tried before the
+slab is initialized. As a result kernel NULL pointer dereference BUG can
+happen. Fix it by checking the initialization status.
+
+Link: https://lkml.kernel.org/r/20250706193207.39810-4-sj@kernel.org
+Fixes: 82a08bde3cf7 ("samples/damon: implement a DAMON module for memory tiering")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ samples/damon/mtier.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/samples/damon/mtier.c
++++ b/samples/damon/mtier.c
+@@ -151,6 +151,8 @@ static void damon_sample_mtier_stop(void
+ damon_destroy_ctx(ctxs[1]);
+ }
+
++static bool init_called;
++
+ static int damon_sample_mtier_enable_store(
+ const char *val, const struct kernel_param *kp)
+ {
+@@ -176,6 +178,14 @@ static int damon_sample_mtier_enable_sto
+
+ static int __init damon_sample_mtier_init(void)
+ {
++ int err = 0;
++
++ init_called = true;
++ if (enable) {
++ err = damon_sample_mtier_start();
++ if (err)
++ enable = false;
++ }
+ return 0;
+ }
+
--- /dev/null
+From 0ed1165c37277822b519f519d0982d36efc30006 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 6 Jul 2025 12:32:02 -0700
+Subject: samples/damon/wsse: fix boot time enable handling
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 0ed1165c37277822b519f519d0982d36efc30006 upstream.
+
+Patch series "mm/damon: fix misc bugs in DAMON modules".
+
+From manual code review, I found below bugs in DAMON modules.
+
+DAMON sample modules crash if those are enabled at boot time, via kernel
+command line. A similar issue was found and fixed on DAMON non-sample
+modules in the past, but we didn't check that for sample modules.
+
+DAMON non-sample modules are not setting 'enabled' parameters accordingly
+when real enabling is failed. Honggyu found and fixed[1] this type of
+bugs in DAMON sample modules, and my inspection was motivated by the great
+work. Kudos to Honggyu.
+
+Finally, DAMON_RECLIAM is mistakenly losing scheme internal status due to
+misuse of damon_commit_ctx(). DAMON_LRU_SORT has a similar misuse, but
+fortunately it is not causing real status loss.
+
+Fix the bugs. Since these are similar patterns of bugs that were found in
+the past, it would be better to add tests or refactor the code, in future.
+
+
+This patch (of 6):
+
+If 'enable' parameter of the 'wsse' DAMON sample module is set at boot
+time via the kernel command line, memory allocation is tried before the
+slab is initialized. As a result kernel NULL pointer dereference BUG can
+happen. Fix it by checking the initialization status.
+
+Link: https://lkml.kernel.org/r/20250706193207.39810-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20250706193207.39810-2-sj@kernel.org
+Link: https://lore.kernel.org/20250702000205.1921-1-honggyu.kim@sk.com [1]
+Fixes: b757c6cfc696 ("samples/damon/wsse: start and stop DAMON as the user requests")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ samples/damon/wsse.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/samples/damon/wsse.c
++++ b/samples/damon/wsse.c
+@@ -89,6 +89,8 @@ static void damon_sample_wsse_stop(void)
+ put_pid(target_pidp);
+ }
+
++static bool init_called;
++
+ static int damon_sample_wsse_enable_store(
+ const char *val, const struct kernel_param *kp)
+ {
+@@ -114,7 +116,15 @@ static int damon_sample_wsse_enable_stor
+
+ static int __init damon_sample_wsse_init(void)
+ {
+- return 0;
++ int err = 0;
++
++ init_called = true;
++ if (enable) {
++ err = damon_sample_wsse_start();
++ if (err)
++ enable = false;
++ }
++ return err;
+ }
+
+ module_init(damon_sample_wsse_init);
drm-amdgpu-fix-vram-reservation-issue.patch
drm-amdgpu-fix-incorrect-vm-flags-to-map-bo.patch
x86-sev-improve-handling-of-writes-to-intercepted-ts.patch
+x86-fpu-fix-null-dereference-in-avx512_status.patch
+x86-sev-ensure-svsm-reserved-fields-in-a-page-validation-entry-are-initialized-to-zero.patch
+futex-use-user_write_access_begin-_end-in-futex_put_value.patch
+rust-kbuild-clean-output-before-running-rustdoc.patch
+rust-workaround-rustdoc-target-modifiers-bug.patch
+samples-damon-wsse-fix-boot-time-enable-handling.patch
+samples-damon-mtier-support-boot-time-enable-setup.patch
+mm-damon-core-commit-damos-target_nid.patch
+block-introduce-bio_needs_zone_write_plugging.patch
+dm-always-split-write-bios-to-zoned-device-limits.patch
+clk-qcom-gcc-ipq8074-fix-broken-freq-table-for-nss_port6_tx_clk_src.patch
+clk-qcom-dispcc-sm8750-fix-setting-rate-byte-and-pixel-clocks.patch
+cifs-reset-iface-weights-when-we-cannot-find-a-candidate.patch
+iommu-vt-d-optimize-iotlb_sync_map-for-non-caching-non-rwbf-modes.patch
+iommu-arm-smmu-v3-revert-vmaster-in-the-error-path.patch
+iommu-arm-smmu-qcom-add-sm6115-mdss-compatible.patch
+iommufd-report-unmapped-bytes-in-the-error-path-of-iopt_unmap_iova_range.patch
+iommufd-prevent-align-overflow.patch
+ext4-fix-zombie-groups-in-average-fragment-size-lists.patch
+ext4-fix-largest-free-orders-lists-corruption-on-mb_optimize_scan-switch.patch
+ext4-initialize-superblock-fields-in-the-kballoc-test.c-kunit-tests.patch
+usb-core-config-prevent-oob-read-in-ss-endpoint-companion-parsing.patch
+misc-rtsx-usb-ensure-mmc-child-device-is-active-when-card-is-present.patch
+usb-typec-ucsi-update-power_supply-on-power-role-change.patch
--- /dev/null
+From cf16f408364efd8a68f39011a3b073c83a03612d Mon Sep 17 00:00:00 2001
+From: Xinyu Liu <katieeliu@tencent.com>
+Date: Mon, 30 Jun 2025 10:02:56 +0800
+Subject: usb: core: config: Prevent OOB read in SS endpoint companion parsing
+
+From: Xinyu Liu <katieeliu@tencent.com>
+
+commit cf16f408364efd8a68f39011a3b073c83a03612d upstream.
+
+usb_parse_ss_endpoint_companion() checks descriptor type before length,
+enabling a potentially odd read outside of the buffer size.
+
+Fix this up by checking the size first before looking at any of the
+fields in the descriptor.
+
+Signed-off-by: Xinyu Liu <katieeliu@tencent.com>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/config.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -107,8 +107,14 @@ static void usb_parse_ss_endpoint_compan
+ */
+ desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+
+- if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+- size < USB_DT_SS_EP_COMP_SIZE) {
++ if (size < USB_DT_SS_EP_COMP_SIZE) {
++ dev_notice(ddev,
++ "invalid SuperSpeed endpoint companion descriptor "
++ "of length %d, skipping\n", size);
++ return;
++ }
++
++ if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+ dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
+ " interface %d altsetting %d ep %d: "
+ "using minimum values\n",
--- /dev/null
+From 7616f006db07017ef5d4ae410fca99279aaca7aa Mon Sep 17 00:00:00 2001
+From: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+Date: Mon, 21 Jul 2025 13:32:51 +0700
+Subject: usb: typec: ucsi: Update power_supply on power role change
+
+From: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+
+commit 7616f006db07017ef5d4ae410fca99279aaca7aa upstream.
+
+The current power direction of an USB-C port also influences the
+power_supply's online status, so a power role change should also update
+the power_supply.
+
+Fixes an issue on some systems where plugging in a normal USB device in
+for the first time after a reboot will cause upower to erroneously
+consider the system to be connected to AC power.
+
+Cc: stable <stable@kernel.org>
+Fixes: 0e6371fbfba3 ("usb: typec: ucsi: Report power supply changes")
+Signed-off-by: Myrrh Periwinkle <myrrhperiwinkle@qtmlabs.xyz>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20250721-fix-ucsi-pwr-dir-notify-v1-1-e53d5340cb38@qtmlabs.xyz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1246,6 +1246,7 @@ static void ucsi_handle_connector_change
+
+ if (change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+ typec_set_pwr_role(con->port, role);
++ ucsi_port_psy_changed(con);
+
+ /* Complete pending power role swap */
+ if (!completion_done(&con->complete))
--- /dev/null
+From 31cd31c9e17ece125aad27259501a2af69ccb020 Mon Sep 17 00:00:00 2001
+From: Fushuai Wang <wangfushuai@baidu.com>
+Date: Mon, 11 Aug 2025 11:50:44 -0700
+Subject: x86/fpu: Fix NULL dereference in avx512_status()
+
+From: Fushuai Wang <wangfushuai@baidu.com>
+
+commit 31cd31c9e17ece125aad27259501a2af69ccb020 upstream.
+
+Problem
+-------
+With CONFIG_X86_DEBUG_FPU enabled, reading /proc/[kthread]/arch_status
+causes a warning and a NULL pointer dereference.
+
+This is because the AVX-512 timestamp code uses x86_task_fpu() but
+doesn't check it for NULL. CONFIG_X86_DEBUG_FPU addles that function
+for kernel threads (PF_KTHREAD specifically), making it return NULL.
+
+The point of the warning was to ensure that kernel threads only access
+task->fpu after going through kernel_fpu_begin()/_end(). Note: all
+kernel tasks exposed in /proc have a valid task->fpu.
+
+Solution
+--------
+One option is to silence the warning and check for NULL from
+x86_task_fpu(). However, that warning is fairly fresh and seems like a
+defense against misuse of the FPU state in kernel threads.
+
+Instead, stop outputting AVX-512_elapsed_ms for kernel threads
+altogether. The data was garbage anyway because avx512_timestamp is
+only updated for user threads, not kernel threads.
+
+If anyone ever wants to track kernel thread AVX-512 use, they can come
+back later and do it properly, separate from this bug fix.
+
+[ dhansen: mostly rewrite changelog ]
+
+Fixes: 22aafe3bcb67 ("x86/fpu: Remove init_task FPU state dependencies, add debugging warning for PF_KTHREAD tasks")
+Co-developed-by: Sohil Mehta <sohil.mehta@intel.com>
+Signed-off-by: Sohil Mehta <sohil.mehta@intel.com>
+Signed-off-by: Fushuai Wang <wangfushuai@baidu.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20250811185044.2227268-1-sohil.mehta%40intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/xstate.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1855,19 +1855,20 @@ long fpu_xstate_prctl(int option, unsign
+ #ifdef CONFIG_PROC_PID_ARCH_STATUS
+ /*
+ * Report the amount of time elapsed in millisecond since last AVX512
+- * use in the task.
++ * use in the task. Report -1 if no AVX-512 usage.
+ */
+ static void avx512_status(struct seq_file *m, struct task_struct *task)
+ {
+- unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
+- long delta;
++ unsigned long timestamp;
++ long delta = -1;
+
+- if (!timestamp) {
+- /*
+- * Report -1 if no AVX512 usage
+- */
+- delta = -1;
+- } else {
++ /* AVX-512 usage is not tracked for kernel threads. Don't report anything. */
++ if (task->flags & (PF_KTHREAD | PF_USER_WORKER))
++ return;
++
++ timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
++
++ if (timestamp) {
+ delta = (long)(jiffies - timestamp);
+ /*
+ * Cap to LONG_MAX if time difference > LONG_MAX
--- /dev/null
+From 3ee9cebd0a5e7ea47eb35cec95eaa1a866af982d Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 13 Aug 2025 10:26:59 -0500
+Subject: x86/sev: Ensure SVSM reserved fields in a page validation entry are initialized to zero
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 3ee9cebd0a5e7ea47eb35cec95eaa1a866af982d upstream.
+
+In order to support future versions of the SVSM_CORE_PVALIDATE call, all
+reserved fields within a PVALIDATE entry must be set to zero as an SVSM should
+be ensuring all reserved fields are zero in order to support future usage of
+reserved areas based on the protocol version.
+
+Fixes: fcd042e86422 ("x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Joerg Roedel <joerg.roedel@amd.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/7cde412f8b057ea13a646fb166b1ca023f6a5031.1755098819.git.thomas.lendacky@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/startup/sev-shared.c | 1 +
+ arch/x86/coco/sev/core.c | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/x86/boot/startup/sev-shared.c
++++ b/arch/x86/boot/startup/sev-shared.c
+@@ -785,6 +785,7 @@ static void __head svsm_pval_4k_page(uns
+ pc->entry[0].page_size = RMP_PG_SIZE_4K;
+ pc->entry[0].action = validate;
+ pc->entry[0].ignore_cf = 0;
++ pc->entry[0].rsvd = 0;
+ pc->entry[0].pfn = paddr >> PAGE_SHIFT;
+
+ /* Protocol 0, Call ID 1 */
+--- a/arch/x86/coco/sev/core.c
++++ b/arch/x86/coco/sev/core.c
+@@ -227,6 +227,7 @@ static u64 svsm_build_ca_from_pfn_range(
+ pe->page_size = RMP_PG_SIZE_4K;
+ pe->action = action;
+ pe->ignore_cf = 0;
++ pe->rsvd = 0;
+ pe->pfn = pfn;
+
+ pe++;
+@@ -257,6 +258,7 @@ static int svsm_build_ca_from_psc_desc(s
+ pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
+ pe->ignore_cf = 0;
++ pe->rsvd = 0;
+ pe->pfn = e->gfn;
+
+ pe++;