]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for all trees
authorSasha Levin <sashal@kernel.org>
Tue, 31 Mar 2026 14:24:43 +0000 (10:24 -0400)
committerSasha Levin <sashal@kernel.org>
Tue, 31 Mar 2026 14:24:43 +0000 (10:24 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
131 files changed:
queue-5.10/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-5.10/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-5.10/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-5.10/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-5.10/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-5.10/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-5.10/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch [new file with mode: 0644]
queue-5.10/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-5.10/series
queue-5.15/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-5.15/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-5.15/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch [new file with mode: 0644]
queue-5.15/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch [new file with mode: 0644]
queue-5.15/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch [new file with mode: 0644]
queue-5.15/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-5.15/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-5.15/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-5.15/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-5.15/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch [new file with mode: 0644]
queue-5.15/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-5.15/series
queue-6.1/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch [new file with mode: 0644]
queue-6.1/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-6.1/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-6.1/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch [new file with mode: 0644]
queue-6.1/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch [new file with mode: 0644]
queue-6.1/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch [new file with mode: 0644]
queue-6.1/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch [new file with mode: 0644]
queue-6.1/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-6.1/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-6.1/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-6.1/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-6.1/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch [new file with mode: 0644]
queue-6.1/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-6.1/series
queue-6.12/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch [new file with mode: 0644]
queue-6.12/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-6.12/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-6.12/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-add-idxd_device_config_save-and-idxd_.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-add-idxd_pci_probe_alloc-helper.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-binding-and-unbinding-idxd-device-and.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-delete-unnecessary-null-check.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-fix-leaking-event-log-memory.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch [new file with mode: 0644]
queue-6.12/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch [new file with mode: 0644]
queue-6.12/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch [new file with mode: 0644]
queue-6.12/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-6.12/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-6.12/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-6.12/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-6.12/futex-require-sys_futex_requeue-to-have-identical-fl.patch [new file with mode: 0644]
queue-6.12/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch [new file with mode: 0644]
queue-6.12/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch [new file with mode: 0644]
queue-6.18/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch [new file with mode: 0644]
queue-6.18/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-6.18/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-6.18/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-leaking-event-log-memory.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-possible-invalid-memory-access-af.patch [new file with mode: 0644]
queue-6.18/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch [new file with mode: 0644]
queue-6.18/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch [new file with mode: 0644]
queue-6.18/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-6.18/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-6.18/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-6.18/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-6.18/futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch [new file with mode: 0644]
queue-6.18/futex-require-sys_futex_requeue-to-have-identical-fl.patch [new file with mode: 0644]
queue-6.18/irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch [new file with mode: 0644]
queue-6.18/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch [new file with mode: 0644]
queue-6.18/netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch [new file with mode: 0644]
queue-6.18/netfs-fix-read-abandonment-during-retry.patch [new file with mode: 0644]
queue-6.18/netfs-fix-the-handling-of-stream-front-by-removing-i.patch [new file with mode: 0644]
queue-6.18/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-6.18/selftests-mount_setattr-increase-tmpfs-size-for-idma.patch [new file with mode: 0644]
queue-6.18/series
queue-6.18/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch [new file with mode: 0644]
queue-6.19/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch [new file with mode: 0644]
queue-6.19/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-6.19/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-6.19/bug-avoid-format-attribute-warning-for-clang-as-well.patch [new file with mode: 0644]
queue-6.19/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-leaking-event-log-memory.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-possible-invalid-memory-access-af.patch [new file with mode: 0644]
queue-6.19/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch [new file with mode: 0644]
queue-6.19/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch [new file with mode: 0644]
queue-6.19/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-6.19/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-6.19/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-6.19/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-6.19/ext4-fix-mballoc-test.c-is-not-compiled-when-ext4_ku.patch [new file with mode: 0644]
queue-6.19/ext4-introduce-export_symbol_for_ext4_test-helper.patch [new file with mode: 0644]
queue-6.19/futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch [new file with mode: 0644]
queue-6.19/futex-require-sys_futex_requeue-to-have-identical-fl.patch [new file with mode: 0644]
queue-6.19/irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch [new file with mode: 0644]
queue-6.19/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch [new file with mode: 0644]
queue-6.19/netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch [new file with mode: 0644]
queue-6.19/netfs-fix-read-abandonment-during-retry.patch [new file with mode: 0644]
queue-6.19/netfs-fix-the-handling-of-stream-front-by-removing-i.patch [new file with mode: 0644]
queue-6.19/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-6.19/selftests-mount_setattr-increase-tmpfs-size-for-idma.patch [new file with mode: 0644]
queue-6.19/series
queue-6.19/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch [new file with mode: 0644]
queue-6.6/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch [new file with mode: 0644]
queue-6.6/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch [new file with mode: 0644]
queue-6.6/btrfs-fix-super-block-offset-in-error-message-in-btr.patch [new file with mode: 0644]
queue-6.6/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch [new file with mode: 0644]
queue-6.6/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch [new file with mode: 0644]
queue-6.6/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch [new file with mode: 0644]
queue-6.6/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch [new file with mode: 0644]
queue-6.6/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch [new file with mode: 0644]
queue-6.6/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch [new file with mode: 0644]
queue-6.6/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch [new file with mode: 0644]
queue-6.6/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch [new file with mode: 0644]
queue-6.6/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch [new file with mode: 0644]
queue-6.6/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch [new file with mode: 0644]
queue-6.6/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-5.10/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-5.10/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..c460538
--- /dev/null
@@ -0,0 +1,48 @@
+From 39a66d78e9218c6d56aadb0d58442e2fd8e872c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 2bba6e8d43740..da77493f4c17d 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7553,8 +7553,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-5.10/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-5.10/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..e0170a7
--- /dev/null
@@ -0,0 +1,46 @@
+From d9fd67dd327e1708b1db997346d7210e2f558238 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 29f0ba4adfbce..7a2b91f6cf14e 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2548,8 +2548,8 @@ static int validate_super(struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-5.10/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-5.10/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..6d3d44c
--- /dev/null
@@ -0,0 +1,38 @@
+From cbdc8879d7d49ce07309b85ecd7840a83d89aa5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 12e9ba5b114db..0d0f3df71245d 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2845,7 +2845,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = irq_of_parse_and_map(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-5.10/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-5.10/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..42d1daa
--- /dev/null
@@ -0,0 +1,75 @@
+From 15be882a60352263bb0288f116a7f0f47041f750 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 0d0f3df71245d..ca80a1dee8489 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1514,8 +1514,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-5.10/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-5.10/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..c3953cd
--- /dev/null
@@ -0,0 +1,62 @@
+From af6a146bfbc99799173f2f8f69e18a1890b7cef6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index ca80a1dee8489..a89a150be3284 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -964,16 +964,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -981,8 +981,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-5.10/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-5.10/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..58b4454
--- /dev/null
@@ -0,0 +1,98 @@
+From 0c11cd2551f60ac7298d732f01f207ceafb3420f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 7b3c47e442c15..ff7fda42b5ca2 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1190,14 +1190,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1566,6 +1558,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-5.10/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch b/queue-5.10/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch
new file mode 100644 (file)
index 0000000..50ec58e
--- /dev/null
@@ -0,0 +1,97 @@
+From 98802d4d39cb7ea6de952afbbde99fb9d8340116 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Aug 2023 11:21:46 +0530
+Subject: dmaengine: xilinx_dma: Program interrupt delay timeout
+
+From: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+
+[ Upstream commit 84b798fedf3fa8f0ab0c096593ba817abc454fe5 ]
+
+Program IRQDelay for AXI DMA. The interrupt timeout mechanism causes
+the DMA engine to generate an interrupt after the delay time period
+has expired. It enables dmaengine to respond in real-time even though
+interrupt coalescing is configured. It also remove the placeholder
+for delay interrupt and merge it with frame completion interrupt.
+Since by default interrupt delay timeout is disabled this feature
+addition has no functional impact on VDMA, MCDMA and CDMA IP's.
+
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://lore.kernel.org/r/1691387509-2113129-8-git-send-email-radhey.shyam.pandey@amd.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: a17ce4bc6f4f ("dmaengine: xilinx_dma: Fix reset related timeout with two-channel AXIDMA")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index a89a150be3284..7b3c47e442c15 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -174,8 +174,10 @@
+ #define XILINX_DMA_MAX_TRANS_LEN_MAX  23
+ #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX       26
+ #define XILINX_DMA_CR_COALESCE_MAX    GENMASK(23, 16)
++#define XILINX_DMA_CR_DELAY_MAX               GENMASK(31, 24)
+ #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK       BIT(4)
+ #define XILINX_DMA_CR_COALESCE_SHIFT  16
++#define XILINX_DMA_CR_DELAY_SHIFT     24
+ #define XILINX_DMA_BD_SOP             BIT(27)
+ #define XILINX_DMA_BD_EOP             BIT(26)
+ #define XILINX_DMA_COALESCE_MAX               255
+@@ -411,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
+  * @stop_transfer: Differentiate b/w DMA IP's quiesce
+  * @tdest: TDEST value for mcdma
+  * @has_vflip: S2MM vertical flip
++ * @irq_delay: Interrupt delay timeout
+  */
+ struct xilinx_dma_chan {
+       struct xilinx_dma_device *xdev;
+@@ -449,6 +452,7 @@ struct xilinx_dma_chan {
+       int (*stop_transfer)(struct xilinx_dma_chan *chan);
+       u16 tdest;
+       bool has_vflip;
++      u8 irq_delay;
+ };
+ /**
+@@ -1560,6 +1564,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->has_sg)
+               xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+                            head_desc->async_tx.phys);
++      reg  &= ~XILINX_DMA_CR_DELAY_MAX;
++      reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+@@ -1876,15 +1883,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
+               }
+       }
+-      if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
+-              /*
+-               * Device takes too long to do the transfer when user requires
+-               * responsiveness.
+-               */
+-              dev_dbg(chan->dev, "Inter-packet latency too long\n");
+-      }
+-
+-      if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
++      if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
++                    XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
+               spin_lock(&chan->lock);
+               xilinx_dma_complete_descriptor(chan);
+               chan->idle = true;
+@@ -2801,6 +2801,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+       /* Retrieve the channel properties from the device tree */
+       has_dre = of_property_read_bool(node, "xlnx,include-dre");
++      of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
++
+       chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+       err = of_property_read_u32(node, "xlnx,datawidth", &value);
+-- 
+2.53.0
+
diff --git a/queue-5.10/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-5.10/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..acc4d4e
--- /dev/null
@@ -0,0 +1,51 @@
+From 9ad26701a5faaa45ce9c8c730c32c2b4e4f03151 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index 5536b8f4bfd13..3c0aec368ea9f 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -799,6 +799,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -811,6 +812,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       wiz->lane_phy_type[i] = phy_type;
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
index b8f86d4d2e6a2db323aca32198e98f552e001f65..c81b754cf1e3eaf46ac6b2e64f6851dbb3611ff0 100644 (file)
@@ -307,3 +307,11 @@ ext4-convert-inline-data-to-extents-when-truncate-exceeds-inline-size.patch
 ext4-make-recently_deleted-properly-work-with-lazy-itable-initialization.patch
 ext4-avoid-allocate-block-from-corrupted-group-in-ext4_mb_find_by_goal.patch
 ext4-reject-mount-if-bigalloc-with-s_first_data_block-0.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
diff --git a/queue-5.15/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-5.15/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..1d3172c
--- /dev/null
@@ -0,0 +1,48 @@
+From b18f603e11f49c8fcd19c4dff1675c1720cd35c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 839ee01827b26..9ab226814cfde 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -8016,8 +8016,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-5.15/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-5.15/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..4bb49fa
--- /dev/null
@@ -0,0 +1,46 @@
+From a4712dcf45f01f1df28f34bbd18f96ad73aa7bf4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 136902f27e441..41cc27ba4355a 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2657,8 +2657,8 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch b/queue-5.15/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
new file mode 100644 (file)
index 0000000..3c69f33
--- /dev/null
@@ -0,0 +1,60 @@
+From 927c615565c6709a5bec8816e9b5bdb92c930d03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:35 -0800
+Subject: dmaengine: idxd: Fix freeing the allocated ida too late
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c311f5e9248471a950f0a524c2fd736414d98900 ]
+
+It can happen that when the cdev .release() is called, the driver
+already called ida_destroy(). Move ida_free() to the _del() path.
+
+We see with DEBUG_KOBJECT_RELEASE enabled and forcing an early PCI
+unbind.
+
+Fixes: 04922b7445a1 ("dmaengine: idxd: fix cdev setup and free device lifetime issues")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-9-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 8172c3f1f782e..005eef4df2162 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -42,11 +42,7 @@ struct idxd_user_context {
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+       struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
+-      struct idxd_cdev_context *cdev_ctx;
+-      struct idxd_wq *wq = idxd_cdev->wq;
+-      cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -295,11 +291,15 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
++      struct idxd_cdev_context *cdev_ctx;
+       struct idxd_cdev *idxd_cdev;
+       idxd_cdev = wq->idxd_cdev;
+       wq->idxd_cdev = NULL;
+       cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
++
++      cdev_ctx = &ictx[wq->idxd->data->type];
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       put_device(cdev_dev(idxd_cdev));
+ }
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch b/queue-5.15/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
new file mode 100644 (file)
index 0000000..3269dc1
--- /dev/null
@@ -0,0 +1,37 @@
+From 642d51d755da3cbcbebc40cb74c8a25c91f9d076 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:33 -0800
+Subject: dmaengine: idxd: Fix not releasing workqueue on .release()
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 3d33de353b1ff9023d5ec73b9becf80ea87af695 ]
+
+The workqueue associated with an DSA/IAA device is not released when
+the object is freed.
+
+Fixes: 47c16ac27d4c ("dmaengine: idxd: fix idxd conf_dev 'struct device' lifetime")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-7-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 489a9d8850764..ee208dfdd0cb5 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1271,6 +1271,7 @@ static void idxd_conf_device_release(struct device *dev)
+ {
+       struct idxd_device *idxd = confdev_to_idxd(dev);
++      destroy_workqueue(idxd->wq);
+       kfree(idxd->groups);
+       kfree(idxd->wqs);
+       kfree(idxd->engines);
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch b/queue-5.15/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch
new file mode 100644 (file)
index 0000000..304d61f
--- /dev/null
@@ -0,0 +1,57 @@
+From 231453a130d5ffc28b6a21b4174701226da77136 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 20:33:50 +0100
+Subject: dmaengine: idxd: Remove usage of the deprecated ida_simple_xx() API
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 1075ee66a8c19bfa375b19c236fd6a22a867f138 ]
+
+ida_alloc() and ida_free() should be preferred to the deprecated
+ida_simple_get() and ida_simple_remove().
+
+This is less verbose.
+
+Note that the upper limit of ida_simple_get() is exclusive, but the one of
+ida_alloc_range() is inclusive. Sothis change allows one more device.
+
+MINORMASK is ((1U << MINORBITS) - 1), so allowing MINORMASK as a maximum value
+makes sense. It is also consistent with other "ida_.*MINORMASK" and
+"ida_*MINOR()" usages.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
+Acked-by: Lijun Pan <lijun.pan@intel.com>
+Link: https://lore.kernel.org/r/ac991f5f42112fa782a881d391d447529cbc4a23.1702967302.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: c311f5e92484 ("dmaengine: idxd: Fix freeing the allocated ida too late")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 033df43db0cec..8172c3f1f782e 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -46,7 +46,7 @@ static void idxd_cdev_dev_release(struct device *dev)
+       struct idxd_wq *wq = idxd_cdev->wq;
+       cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -260,7 +260,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+       cdev = &idxd_cdev->cdev;
+       dev = cdev_dev(idxd_cdev);
+       cdev_ctx = &ictx[wq->idxd->data->type];
+-      minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
++      minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
+       if (minor < 0) {
+               kfree(idxd_cdev);
+               return minor;
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-5.15/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..8b0038b
--- /dev/null
@@ -0,0 +1,38 @@
+From 7fca72ebc696209136ab18f862710d7f3aba62bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index ba5850ca39ddd..e2adc33631ca3 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2862,7 +2862,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = irq_of_parse_and_map(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-5.15/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..3500edc
--- /dev/null
@@ -0,0 +1,75 @@
+From 561b09f674330399473432d80a51a4e13926b17f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index e2adc33631ca3..948093a47d9f9 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1513,8 +1513,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-5.15/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..7dea8d4
--- /dev/null
@@ -0,0 +1,62 @@
+From 75423ccb3aab01d0df3ece93132d47d00718b180 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 948093a47d9f9..00971b867ade2 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -964,16 +964,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -981,8 +981,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-5.15/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..0e28460
--- /dev/null
@@ -0,0 +1,98 @@
+From db7ee5906467c32430f9aa557f4f8cfa45a07a48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index f1162f7c9a52c..7a596eaba4660 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1190,14 +1190,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1565,6 +1557,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-5.15/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch b/queue-5.15/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch
new file mode 100644 (file)
index 0000000..c17af32
--- /dev/null
@@ -0,0 +1,97 @@
+From 3c590577aa163ab3e4f027211f11949e93fa3e9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Aug 2023 11:21:46 +0530
+Subject: dmaengine: xilinx_dma: Program interrupt delay timeout
+
+From: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+
+[ Upstream commit 84b798fedf3fa8f0ab0c096593ba817abc454fe5 ]
+
+Program IRQDelay for AXI DMA. The interrupt timeout mechanism causes
+the DMA engine to generate an interrupt after the delay time period
+has expired. It enables dmaengine to respond in real-time even though
+interrupt coalescing is configured. It also remove the placeholder
+for delay interrupt and merge it with frame completion interrupt.
+Since by default interrupt delay timeout is disabled this feature
+addition has no functional impact on VDMA, MCDMA and CDMA IP's.
+
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://lore.kernel.org/r/1691387509-2113129-8-git-send-email-radhey.shyam.pandey@amd.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: a17ce4bc6f4f ("dmaengine: xilinx_dma: Fix reset related timeout with two-channel AXIDMA")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 00971b867ade2..f1162f7c9a52c 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -174,8 +174,10 @@
+ #define XILINX_DMA_MAX_TRANS_LEN_MAX  23
+ #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX       26
+ #define XILINX_DMA_CR_COALESCE_MAX    GENMASK(23, 16)
++#define XILINX_DMA_CR_DELAY_MAX               GENMASK(31, 24)
+ #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK       BIT(4)
+ #define XILINX_DMA_CR_COALESCE_SHIFT  16
++#define XILINX_DMA_CR_DELAY_SHIFT     24
+ #define XILINX_DMA_BD_SOP             BIT(27)
+ #define XILINX_DMA_BD_EOP             BIT(26)
+ #define XILINX_DMA_COALESCE_MAX               255
+@@ -411,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
+  * @stop_transfer: Differentiate b/w DMA IP's quiesce
+  * @tdest: TDEST value for mcdma
+  * @has_vflip: S2MM vertical flip
++ * @irq_delay: Interrupt delay timeout
+  */
+ struct xilinx_dma_chan {
+       struct xilinx_dma_device *xdev;
+@@ -449,6 +452,7 @@ struct xilinx_dma_chan {
+       int (*stop_transfer)(struct xilinx_dma_chan *chan);
+       u16 tdest;
+       bool has_vflip;
++      u8 irq_delay;
+ };
+ /**
+@@ -1559,6 +1563,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->has_sg)
+               xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+                            head_desc->async_tx.phys);
++      reg  &= ~XILINX_DMA_CR_DELAY_MAX;
++      reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+@@ -1886,15 +1893,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
+               }
+       }
+-      if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
+-              /*
+-               * Device takes too long to do the transfer when user requires
+-               * responsiveness.
+-               */
+-              dev_dbg(chan->dev, "Inter-packet latency too long\n");
+-      }
+-
+-      if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
++      if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
++                    XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
+               spin_lock(&chan->lock);
+               xilinx_dma_complete_descriptor(chan);
+               chan->idle = true;
+@@ -2818,6 +2818,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+       /* Retrieve the channel properties from the device tree */
+       has_dre = of_property_read_bool(node, "xlnx,include-dre");
++      of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
++
+       chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+       err = of_property_read_u32(node, "xlnx,datawidth", &value);
+-- 
+2.53.0
+
diff --git a/queue-5.15/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-5.15/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..16da276
--- /dev/null
@@ -0,0 +1,51 @@
+From 042fa71d26d00c9e6c1c7af0e78199088cebb8c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index 8963fbf7aa73b..a3908a579115c 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -1116,6 +1116,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -1128,6 +1129,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       wiz->lane_phy_type[i] = phy_type;
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
index 93c85b56e15c446904fefc16d9e3eb00c7cb7180..397576735d7986b6d34fb1b580812b497cba01cf 100644 (file)
@@ -383,3 +383,14 @@ ext4-avoid-allocate-block-from-corrupted-group-in-ext4_mb_find_by_goal.patch
 ext4-reject-mount-if-bigalloc-with-s_first_data_block-0.patch
 ext4-fix-iloc.bh-leak-in-ext4_fc_replay_inode-error-paths.patch
 ext4-always-drain-queued-discard-work-in-ext4_mb_release.patch
+dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch
+dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
+dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
diff --git a/queue-6.1/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch b/queue-6.1/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
new file mode 100644 (file)
index 0000000..40ab6bc
--- /dev/null
@@ -0,0 +1,70 @@
+From 350f351697407240165834c942394f449c0ca0b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 21:17:04 +0900
+Subject: btrfs: fix leak of kobject name for sub-group space_info
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit a4376d9a5d4c9610e69def3fc0b32c86a7ab7a41 ]
+
+When create_space_info_sub_group() allocates elements of
+space_info->sub_group[], kobject_init_and_add() is called for each
+element via btrfs_sysfs_add_space_info_type(). However, when
+check_removing_space_info() frees these elements, it does not call
+btrfs_sysfs_remove_space_info() on them. As a result, kobject_put() is
+not called and the associated kobj->name objects are leaked.
+
+This memory leak is reproduced by running the blktests test case
+zbd/009 on kernels built with CONFIG_DEBUG_KMEMLEAK. The kmemleak
+feature reports the following error:
+
+unreferenced object 0xffff888112877d40 (size 16):
+  comm "mount", pid 1244, jiffies 4294996972
+  hex dump (first 16 bytes):
+    64 61 74 61 2d 72 65 6c 6f 63 00 c4 c6 a7 cb 7f  data-reloc......
+  backtrace (crc 53ffde4d):
+    __kmalloc_node_track_caller_noprof+0x619/0x870
+    kstrdup+0x42/0xc0
+    kobject_set_name_vargs+0x44/0x110
+    kobject_init_and_add+0xcf/0x150
+    btrfs_sysfs_add_space_info_type+0xfc/0x210 [btrfs]
+    create_space_info_sub_group.constprop.0+0xfb/0x1b0 [btrfs]
+    create_space_info+0x211/0x320 [btrfs]
+    btrfs_init_space_info+0x15a/0x1b0 [btrfs]
+    open_ctree+0x33c7/0x4a50 [btrfs]
+    btrfs_get_tree.cold+0x9f/0x1ee [btrfs]
+    vfs_get_tree+0x87/0x2f0
+    vfs_cmd_create+0xbd/0x280
+    __do_sys_fsconfig+0x3df/0x990
+    do_syscall_64+0x136/0x1540
+    entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+To avoid the leak, call btrfs_sysfs_remove_space_info() instead of
+kfree() for the elements.
+
+Fixes: f92ee31e031c ("btrfs: introduce btrfs_space_info sub-group")
+Link: https://lore.kernel.org/linux-block/b9488881-f18d-4f47-91a5-3c9bf63955a5@wdc.com/
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 880288d7358e6..1f9fbec887c03 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -4154,7 +4154,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info)
+               for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+                       if (space_info->sub_group[i]) {
+                               check_removing_space_info(space_info->sub_group[i]);
+-                              kfree(space_info->sub_group[i]);
++                              btrfs_sysfs_remove_space_info(space_info->sub_group[i]);
+                               space_info->sub_group[i] = NULL;
+                       }
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.1/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-6.1/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..7c5a400
--- /dev/null
@@ -0,0 +1,48 @@
+From 66bec256f331778122f9ad4f680a8d2f7aca4e73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index d06709ced0f36..9f5d5f5c53131 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -8089,8 +8089,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-6.1/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-6.1/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..0f31511
--- /dev/null
@@ -0,0 +1,46 @@
+From 721051858722648dd2c11447e4eae3d3b69d9b84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index cf124944302f1..203ff9bbad431 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2759,8 +2759,8 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch b/queue-6.1/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
new file mode 100644 (file)
index 0000000..c709fc1
--- /dev/null
@@ -0,0 +1,60 @@
+From 4db534911eb62cb369ffd4fe0e09c92e318f08c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:35 -0800
+Subject: dmaengine: idxd: Fix freeing the allocated ida too late
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c311f5e9248471a950f0a524c2fd736414d98900 ]
+
+It can happen that when the cdev .release() is called, the driver
+already called ida_destroy(). Move ida_free() to the _del() path.
+
+We see with DEBUG_KOBJECT_RELEASE enabled and forcing an early PCI
+unbind.
+
+Fixes: 04922b7445a1 ("dmaengine: idxd: fix cdev setup and free device lifetime issues")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-9-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index e42c9a9f3c238..622cc47c6a182 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -45,11 +45,7 @@ struct idxd_user_context {
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+       struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
+-      struct idxd_cdev_context *cdev_ctx;
+-      struct idxd_wq *wq = idxd_cdev->wq;
+-      cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -410,11 +406,15 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
++      struct idxd_cdev_context *cdev_ctx;
+       struct idxd_cdev *idxd_cdev;
+       idxd_cdev = wq->idxd_cdev;
+       wq->idxd_cdev = NULL;
+       cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
++
++      cdev_ctx = &ictx[wq->idxd->data->type];
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       put_device(cdev_dev(idxd_cdev));
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch b/queue-6.1/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
new file mode 100644 (file)
index 0000000..51b0cf7
--- /dev/null
@@ -0,0 +1,56 @@
+From 3b8cf25e4e435bbe7b866d438bf9c78ee2cd2130 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:34 -0800
+Subject: dmaengine: idxd: Fix memory leak when a wq is reset
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d9cfb5193a047a92a4d3c0e91ea4cc87c8f7c478 ]
+
+idxd_wq_disable_cleanup() which is called from the reset path for a
+workqueue, sets the wq type to NONE, which for other parts of the
+driver mean that the wq is empty (all its resources were released).
+
+Only set the wq type to NONE after its resources are released.
+
+Fixes: da32b28c95a7 ("dmaengine: idxd: cleanup workqueue config after disabling")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-8-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 188f6b8625f78..8b72e26640084 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -174,6 +174,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_queue_free(&wq->sbq);
++      wq->type = IDXD_WQT_NONE;
+ }
+ int idxd_wq_enable(struct idxd_wq *wq)
+@@ -381,7 +382,6 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+       lockdep_assert_held(&wq->wq_lock);
+       wq->state = IDXD_WQ_DISABLED;
+       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+-      wq->type = IDXD_WQT_NONE;
+       wq->threshold = 0;
+       wq->priority = 0;
+       wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+@@ -1426,7 +1426,6 @@ void drv_disable_wq(struct idxd_wq *wq)
+       idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
+       percpu_ref_exit(&wq->wq_active);
+-      wq->type = IDXD_WQT_NONE;
+       wq->client_count = 0;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch b/queue-6.1/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
new file mode 100644 (file)
index 0000000..5d4ad98
--- /dev/null
@@ -0,0 +1,37 @@
+From 9ac153ab08a085604188d81fc7300001b4ac7bcd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:33 -0800
+Subject: dmaengine: idxd: Fix not releasing workqueue on .release()
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 3d33de353b1ff9023d5ec73b9becf80ea87af695 ]
+
+The workqueue associated with an DSA/IAA device is not released when
+the object is freed.
+
+Fixes: 47c16ac27d4c ("dmaengine: idxd: fix idxd conf_dev 'struct device' lifetime")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-7-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 0689464c4816a..ea222e1654ab9 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1663,6 +1663,7 @@ static void idxd_conf_device_release(struct device *dev)
+ {
+       struct idxd_device *idxd = confdev_to_idxd(dev);
++      destroy_workqueue(idxd->wq);
+       kfree(idxd->groups);
+       bitmap_free(idxd->wq_enable_map);
+       kfree(idxd->wqs);
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch b/queue-6.1/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch
new file mode 100644 (file)
index 0000000..80d83ac
--- /dev/null
@@ -0,0 +1,57 @@
+From 6131e975adcce32f454601ac2d90fe7271923cb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 20:33:50 +0100
+Subject: dmaengine: idxd: Remove usage of the deprecated ida_simple_xx() API
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 1075ee66a8c19bfa375b19c236fd6a22a867f138 ]
+
+ida_alloc() and ida_free() should be preferred to the deprecated
+ida_simple_get() and ida_simple_remove().
+
+This is less verbose.
+
+Note that the upper limit of ida_simple_get() is exclusive, but the one of
+ida_alloc_range() is inclusive. Sothis change allows one more device.
+
+MINORMASK is ((1U << MINORBITS) - 1), so allowing MINORMASK as a maximum value
+makes sense. It is also consistent with other "ida_.*MINORMASK" and
+"ida_*MINOR()" usages.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
+Acked-by: Lijun Pan <lijun.pan@intel.com>
+Link: https://lore.kernel.org/r/ac991f5f42112fa782a881d391d447529cbc4a23.1702967302.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: c311f5e92484 ("dmaengine: idxd: Fix freeing the allocated ida too late")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 9b07474f450b5..e42c9a9f3c238 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -49,7 +49,7 @@ static void idxd_cdev_dev_release(struct device *dev)
+       struct idxd_wq *wq = idxd_cdev->wq;
+       cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -375,7 +375,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+       cdev = &idxd_cdev->cdev;
+       dev = cdev_dev(idxd_cdev);
+       cdev_ctx = &ictx[wq->idxd->data->type];
+-      minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
++      minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
+       if (minor < 0) {
+               kfree(idxd_cdev);
+               return minor;
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-6.1/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..2e9de33
--- /dev/null
@@ -0,0 +1,38 @@
+From 4e2404faa2296985db7237215f66535d7a811936 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 8402dc3d3a352..ce5f4bedf059d 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2860,7 +2860,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = of_irq_get(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-6.1/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..a40207d
--- /dev/null
@@ -0,0 +1,75 @@
+From 8f9886ea3f313ae3f3e18a320fe78dafe0a3b649 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index ce5f4bedf059d..2d734fea053d9 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1511,8 +1511,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-6.1/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..2dc20b0
--- /dev/null
@@ -0,0 +1,62 @@
+From 4d65ff2687b7cc77a6469e250cfc5dc1d5158047 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 2d734fea053d9..405638f5fc62c 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -964,16 +964,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -981,8 +981,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-6.1/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..17e783e
--- /dev/null
@@ -0,0 +1,98 @@
+From 6325d838f51e23f13be9c8174473f04224e7ab44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 7f6090d5126aa..79a12e248cbd4 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1188,14 +1188,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1563,6 +1555,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-6.1/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch b/queue-6.1/dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch
new file mode 100644 (file)
index 0000000..d38128c
--- /dev/null
@@ -0,0 +1,97 @@
+From d3a68364151928c48629e639536e28770e8d96a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Aug 2023 11:21:46 +0530
+Subject: dmaengine: xilinx_dma: Program interrupt delay timeout
+
+From: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+
+[ Upstream commit 84b798fedf3fa8f0ab0c096593ba817abc454fe5 ]
+
+Program IRQDelay for AXI DMA. The interrupt timeout mechanism causes
+the DMA engine to generate an interrupt after the delay time period
+has expired. It enables dmaengine to respond in real-time even though
+interrupt coalescing is configured. It also remove the placeholder
+for delay interrupt and merge it with frame completion interrupt.
+Since by default interrupt delay timeout is disabled this feature
+addition has no functional impact on VDMA, MCDMA and CDMA IP's.
+
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://lore.kernel.org/r/1691387509-2113129-8-git-send-email-radhey.shyam.pandey@amd.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: a17ce4bc6f4f ("dmaengine: xilinx_dma: Fix reset related timeout with two-channel AXIDMA")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 405638f5fc62c..7f6090d5126aa 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -174,8 +174,10 @@
+ #define XILINX_DMA_MAX_TRANS_LEN_MAX  23
+ #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX       26
+ #define XILINX_DMA_CR_COALESCE_MAX    GENMASK(23, 16)
++#define XILINX_DMA_CR_DELAY_MAX               GENMASK(31, 24)
+ #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK       BIT(4)
+ #define XILINX_DMA_CR_COALESCE_SHIFT  16
++#define XILINX_DMA_CR_DELAY_SHIFT     24
+ #define XILINX_DMA_BD_SOP             BIT(27)
+ #define XILINX_DMA_BD_EOP             BIT(26)
+ #define XILINX_DMA_COALESCE_MAX               255
+@@ -411,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
+  * @stop_transfer: Differentiate b/w DMA IP's quiesce
+  * @tdest: TDEST value for mcdma
+  * @has_vflip: S2MM vertical flip
++ * @irq_delay: Interrupt delay timeout
+  */
+ struct xilinx_dma_chan {
+       struct xilinx_dma_device *xdev;
+@@ -449,6 +452,7 @@ struct xilinx_dma_chan {
+       int (*stop_transfer)(struct xilinx_dma_chan *chan);
+       u16 tdest;
+       bool has_vflip;
++      u8 irq_delay;
+ };
+ /**
+@@ -1557,6 +1561,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->has_sg)
+               xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+                            head_desc->async_tx.phys);
++      reg  &= ~XILINX_DMA_CR_DELAY_MAX;
++      reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+@@ -1884,15 +1891,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
+               }
+       }
+-      if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
+-              /*
+-               * Device takes too long to do the transfer when user requires
+-               * responsiveness.
+-               */
+-              dev_dbg(chan->dev, "Inter-packet latency too long\n");
+-      }
+-
+-      if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
++      if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
++                    XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
+               spin_lock(&chan->lock);
+               xilinx_dma_complete_descriptor(chan);
+               chan->idle = true;
+@@ -2816,6 +2816,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+       /* Retrieve the channel properties from the device tree */
+       has_dre = of_property_read_bool(node, "xlnx,include-dre");
++      of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
++
+       chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+       err = of_property_read_u32(node, "xlnx,datawidth", &value);
+-- 
+2.53.0
+
diff --git a/queue-6.1/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-6.1/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..c71fb67
--- /dev/null
@@ -0,0 +1,51 @@
+From 893c6dbdbd0cd615f9ead8be1dcb535b05702544 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index 6a63380f6a71f..c4ff31d0df192 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -1339,6 +1339,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -1351,6 +1352,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       wiz->lane_phy_type[i] = phy_type;
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
index c5b7a5fc6d28124136f23f8a2dd514edb4646fd5..1e49e3a72d0c28294dc6213c78c62de427d592ad 100644 (file)
@@ -119,3 +119,16 @@ ext4-fix-iloc.bh-leak-in-ext4_fc_replay_inode-error-paths.patch
 ext4-always-drain-queued-discard-work-in-ext4_mb_release.patch
 arm64-dts-imx8mn-tqma8mqnl-fix-ldo5-power-off.patch
 powerpc64-bpf-do-not-increment-tailcall-count-when-prog-is-null.patch
+dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
+dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch
+dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
+dmaengine-xilinx_dma-program-interrupt-delay-timeout.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
diff --git a/queue-6.12/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch b/queue-6.12/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
new file mode 100644 (file)
index 0000000..82dfe1a
--- /dev/null
@@ -0,0 +1,70 @@
+From 611acbb3455ca3ec7c15c566a48d2c50fb12df7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 21:17:04 +0900
+Subject: btrfs: fix leak of kobject name for sub-group space_info
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit a4376d9a5d4c9610e69def3fc0b32c86a7ab7a41 ]
+
+When create_space_info_sub_group() allocates elements of
+space_info->sub_group[], kobject_init_and_add() is called for each
+element via btrfs_sysfs_add_space_info_type(). However, when
+check_removing_space_info() frees these elements, it does not call
+btrfs_sysfs_remove_space_info() on them. As a result, kobject_put() is
+not called and the associated kobj->name objects are leaked.
+
+This memory leak is reproduced by running the blktests test case
+zbd/009 on kernels built with CONFIG_DEBUG_KMEMLEAK. The kmemleak
+feature reports the following error:
+
+unreferenced object 0xffff888112877d40 (size 16):
+  comm "mount", pid 1244, jiffies 4294996972
+  hex dump (first 16 bytes):
+    64 61 74 61 2d 72 65 6c 6f 63 00 c4 c6 a7 cb 7f  data-reloc......
+  backtrace (crc 53ffde4d):
+    __kmalloc_node_track_caller_noprof+0x619/0x870
+    kstrdup+0x42/0xc0
+    kobject_set_name_vargs+0x44/0x110
+    kobject_init_and_add+0xcf/0x150
+    btrfs_sysfs_add_space_info_type+0xfc/0x210 [btrfs]
+    create_space_info_sub_group.constprop.0+0xfb/0x1b0 [btrfs]
+    create_space_info+0x211/0x320 [btrfs]
+    btrfs_init_space_info+0x15a/0x1b0 [btrfs]
+    open_ctree+0x33c7/0x4a50 [btrfs]
+    btrfs_get_tree.cold+0x9f/0x1ee [btrfs]
+    vfs_get_tree+0x87/0x2f0
+    vfs_cmd_create+0xbd/0x280
+    __do_sys_fsconfig+0x3df/0x990
+    do_syscall_64+0x136/0x1540
+    entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+To avoid the leak, call btrfs_sysfs_remove_space_info() instead of
+kfree() for the elements.
+
+Fixes: f92ee31e031c ("btrfs: introduce btrfs_space_info sub-group")
+Link: https://lore.kernel.org/linux-block/b9488881-f18d-4f47-91a5-3c9bf63955a5@wdc.com/
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index c579713e9899c..3ca24a0845cbe 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -4452,7 +4452,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info)
+               for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+                       if (space_info->sub_group[i]) {
+                               check_removing_space_info(space_info->sub_group[i]);
+-                              kfree(space_info->sub_group[i]);
++                              btrfs_sysfs_remove_space_info(space_info->sub_group[i]);
+                               space_info->sub_group[i] = NULL;
+                       }
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.12/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-6.12/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..ebc7f93
--- /dev/null
@@ -0,0 +1,48 @@
+From bcabbd175fd6f6e7975bcdc172e58f9a045244bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index b723e860e4e9e..c53e7e5c9d426 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7738,8 +7738,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-6.12/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-6.12/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..ac50e03
--- /dev/null
@@ -0,0 +1,46 @@
+From 146db9c6b45b945579eca87c5acb8ffb758be4cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index dea64839d2cad..05e91ed0af197 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2468,8 +2468,8 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch b/queue-6.12/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
new file mode 100644 (file)
index 0000000..91f6539
--- /dev/null
@@ -0,0 +1,70 @@
+From 19c89c91d8229db8a097cce1fc9f8243af0929f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 14:45:09 +0800
+Subject: dmaengine: dw-edma: Fix multiple times setting of the CYCLE_STATE and
+ CYCLE_BIT bits for HDMA.
+
+From: LUO Haowen <luo-hw@foxmail.com>
+
+[ Upstream commit 3f63297ff61a994b99d710dcb6dbde41c4003233 ]
+
+Others have submitted this issue (https://lore.kernel.org/dmaengine/
+20240722030405.3385-1-zhengdongxiong@gxmicro.cn/),
+but it has not been fixed yet. Therefore, more supplementary information
+is provided here.
+
+As mentioned in the "PCS-CCS-CB-TCB" Producer-Consumer Synchronization of
+"DesignWare Cores PCI Express Controller Databook, version 6.00a":
+
+1. The Consumer CYCLE_STATE (CCS) bit in the register only needs to be
+initialized once; the value will update automatically to be
+~CYCLE_BIT (CB) in the next chunk.
+2. The Consumer CYCLE_BIT bit in the register is loaded from the LL
+element and tested against CCS. When CB = CCS, the data transfer is
+executed. Otherwise not.
+
+The current logic sets customer (HDMA) CS and CB bits to 1 in each chunk
+while setting the producer (software) CB of odd chunks to 0 and even
+chunks to 1 in the linked list. This is leading to a mismatch between
+the producer CB and consumer CS bits.
+
+This issue can be reproduced by setting the transmission data size to
+exceed one chunk. By the way, in the EDMA using the same "PCS-CCS-CB-TCB"
+mechanism, the CS bit is only initialized once and this issue was not
+found. Refer to
+drivers/dma/dw-edma/dw-edma-v0-core.c:dw_edma_v0_core_start.
+
+So fix this issue by initializing the CYCLE_STATE and CYCLE_BIT bits
+only once.
+
+Fixes: e74c39573d35 ("dmaengine: dw-edma: Add support for native HDMA")
+Signed-off-by: LUO Haowen <luo-hw@foxmail.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/tencent_CB11AA9F3920C1911AF7477A9BD8EFE0AD05@qq.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/dw-edma/dw-hdma-v0-core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+index e3f8db4fe909a..ce8f7254bab21 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+@@ -252,10 +252,10 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
++              /* Set consumer cycle */
++              SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
++                      HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       }
+-      /* Set consumer cycle */
+-      SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
+-                HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       dw_hdma_v0_sync_ll_data(chunk);
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-add-idxd_device_config_save-and-idxd_.patch b/queue-6.12/dmaengine-idxd-add-idxd_device_config_save-and-idxd_.patch
new file mode 100644 (file)
index 0000000..3806a61
--- /dev/null
@@ -0,0 +1,286 @@
+From 41f6bf84ed6284cd845a30ca86dd991cc20e21a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 15:30:26 -0800
+Subject: dmaengine: idxd: Add idxd_device_config_save() and
+ idxd_device_config_restore() helpers
+
+From: Fenghua Yu <fenghua.yu@intel.com>
+
+[ Upstream commit 6078a315aec15e0776fa90347cf4eba7478cdbd7 ]
+
+Add the helpers to save and restore IDXD device configurations.
+
+These helpers will be called during Function Level Reset (FLR) processing.
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20241122233028.2762809-4-fenghua.yu@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: ee66bc295783 ("dmaengine: idxd: Fix leaking event log memory")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/idxd.h |  11 ++
+ drivers/dma/idxd/init.c | 225 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 236 insertions(+)
+
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 1f93dd6db28f0..8b381a1fb2595 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -374,6 +374,17 @@ struct idxd_device {
+       struct dentry *dbgfs_evl_file;
+       bool user_submission_safe;
++
++      struct idxd_saved_states *idxd_saved;
++};
++
++struct idxd_saved_states {
++      struct idxd_device saved_idxd;
++      struct idxd_evl saved_evl;
++      struct idxd_engine **saved_engines;
++      struct idxd_wq **saved_wqs;
++      struct idxd_group **saved_groups;
++      unsigned long *saved_wq_enable_map;
+ };
+ static inline unsigned int evl_ent_size(struct idxd_device *idxd)
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 795e408ba2d50..f6bbc95eeb925 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -833,6 +833,231 @@ static void idxd_unbind(struct device_driver *drv, const char *buf)
+       put_device(dev);
+ }
++#define idxd_free_saved_configs(saved_configs, count) \
++      do {                                            \
++              int i;                                  \
++                                                      \
++              for (i = 0; i < (count); i++)           \
++                      kfree(saved_configs[i]);        \
++      } while (0)
++
++static void idxd_free_saved(struct idxd_group **saved_groups,
++                          struct idxd_engine **saved_engines,
++                          struct idxd_wq **saved_wqs,
++                          struct idxd_device *idxd)
++{
++      if (saved_groups)
++              idxd_free_saved_configs(saved_groups, idxd->max_groups);
++      if (saved_engines)
++              idxd_free_saved_configs(saved_engines, idxd->max_engines);
++      if (saved_wqs)
++              idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
++}
++
++/*
++ * Save IDXD device configurations including engines, groups, wqs etc.
++ * The saved configurations can be restored when needed.
++ */
++static int idxd_device_config_save(struct idxd_device *idxd,
++                                 struct idxd_saved_states *idxd_saved)
++{
++      struct device *dev = &idxd->pdev->dev;
++      int i;
++
++      memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
++
++      if (idxd->evl) {
++              memcpy(&idxd_saved->saved_evl, idxd->evl,
++                     sizeof(struct idxd_evl));
++      }
++
++      struct idxd_group **saved_groups __free(kfree) =
++                      kcalloc_node(idxd->max_groups,
++                                   sizeof(struct idxd_group *),
++                                   GFP_KERNEL, dev_to_node(dev));
++      if (!saved_groups)
++              return -ENOMEM;
++
++      for (i = 0; i < idxd->max_groups; i++) {
++              struct idxd_group *saved_group __free(kfree) =
++                      kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
++                                   dev_to_node(dev));
++
++              if (!saved_group) {
++                      /* Free saved groups */
++                      idxd_free_saved(saved_groups, NULL, NULL, idxd);
++
++                      return -ENOMEM;
++              }
++
++              memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
++              saved_groups[i] = no_free_ptr(saved_group);
++      }
++
++      struct idxd_engine **saved_engines =
++                      kcalloc_node(idxd->max_engines,
++                                   sizeof(struct idxd_engine *),
++                                   GFP_KERNEL, dev_to_node(dev));
++      if (!saved_engines) {
++              /* Free saved groups */
++              idxd_free_saved(saved_groups, NULL, NULL, idxd);
++
++              return -ENOMEM;
++      }
++      for (i = 0; i < idxd->max_engines; i++) {
++              struct idxd_engine *saved_engine __free(kfree) =
++                              kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
++                                           dev_to_node(dev));
++              if (!saved_engine) {
++                      /* Free saved groups and engines */
++                      idxd_free_saved(saved_groups, saved_engines, NULL,
++                                      idxd);
++
++                      return -ENOMEM;
++              }
++
++              memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
++              saved_engines[i] = no_free_ptr(saved_engine);
++      }
++
++      unsigned long *saved_wq_enable_map __free(bitmap) =
++                      bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
++                                         dev_to_node(dev));
++      if (!saved_wq_enable_map) {
++              /* Free saved groups and engines */
++              idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
++
++              return -ENOMEM;
++      }
++
++      bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
++
++      struct idxd_wq **saved_wqs __free(kfree) =
++                      kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
++                                   GFP_KERNEL, dev_to_node(dev));
++      if (!saved_wqs) {
++              /* Free saved groups and engines */
++              idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
++
++              return -ENOMEM;
++      }
++
++      for (i = 0; i < idxd->max_wqs; i++) {
++              struct idxd_wq *saved_wq __free(kfree) =
++                      kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
++                                   dev_to_node(dev));
++              struct idxd_wq *wq;
++
++              if (!saved_wq) {
++                      /* Free saved groups, engines, and wqs */
++                      idxd_free_saved(saved_groups, saved_engines, saved_wqs,
++                                      idxd);
++
++                      return -ENOMEM;
++              }
++
++              if (!test_bit(i, saved_wq_enable_map))
++                      continue;
++
++              wq = idxd->wqs[i];
++              mutex_lock(&wq->wq_lock);
++              memcpy(saved_wq, wq, sizeof(*saved_wq));
++              saved_wqs[i] = no_free_ptr(saved_wq);
++              mutex_unlock(&wq->wq_lock);
++      }
++
++      /* Save configurations */
++      idxd_saved->saved_groups = no_free_ptr(saved_groups);
++      idxd_saved->saved_engines = no_free_ptr(saved_engines);
++      idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
++      idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
++
++      return 0;
++}
++
++/*
++ * Restore IDXD device configurations including engines, groups, wqs etc
++ * that were saved before.
++ */
++static void idxd_device_config_restore(struct idxd_device *idxd,
++                                     struct idxd_saved_states *idxd_saved)
++{
++      struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
++      int i;
++
++      idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
++
++      if (saved_evl)
++              idxd->evl->size = saved_evl->size;
++
++      for (i = 0; i < idxd->max_groups; i++) {
++              struct idxd_group *saved_group, *group;
++
++              saved_group = idxd_saved->saved_groups[i];
++              group = idxd->groups[i];
++
++              group->rdbufs_allowed = saved_group->rdbufs_allowed;
++              group->rdbufs_reserved = saved_group->rdbufs_reserved;
++              group->tc_a = saved_group->tc_a;
++              group->tc_b = saved_group->tc_b;
++              group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
++
++              kfree(saved_group);
++      }
++      kfree(idxd_saved->saved_groups);
++
++      for (i = 0; i < idxd->max_engines; i++) {
++              struct idxd_engine *saved_engine, *engine;
++
++              saved_engine = idxd_saved->saved_engines[i];
++              engine = idxd->engines[i];
++
++              engine->group = saved_engine->group;
++
++              kfree(saved_engine);
++      }
++      kfree(idxd_saved->saved_engines);
++
++      bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
++                  idxd->max_wqs);
++      bitmap_free(idxd_saved->saved_wq_enable_map);
++
++      for (i = 0; i < idxd->max_wqs; i++) {
++              struct idxd_wq *saved_wq, *wq;
++              size_t len;
++
++              if (!test_bit(i, idxd->wq_enable_map))
++                      continue;
++
++              saved_wq = idxd_saved->saved_wqs[i];
++              wq = idxd->wqs[i];
++
++              mutex_lock(&wq->wq_lock);
++
++              wq->group = saved_wq->group;
++              wq->flags = saved_wq->flags;
++              wq->threshold = saved_wq->threshold;
++              wq->size = saved_wq->size;
++              wq->priority = saved_wq->priority;
++              wq->type = saved_wq->type;
++              len = strlen(saved_wq->name) + 1;
++              strscpy(wq->name, saved_wq->name, len);
++              wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
++              wq->max_batch_size = saved_wq->max_batch_size;
++              wq->enqcmds_retries = saved_wq->enqcmds_retries;
++              wq->descs = saved_wq->descs;
++              wq->idxd_chan = saved_wq->idxd_chan;
++              len = strlen(saved_wq->driver_name) + 1;
++              strscpy(wq->driver_name, saved_wq->driver_name, len);
++
++              mutex_unlock(&wq->wq_lock);
++
++              kfree(saved_wq);
++      }
++
++      kfree(idxd_saved->saved_wqs);
++}
++
+ /*
+  * Probe idxd PCI device.
+  * If idxd is not given, need to allocate idxd and set up its data.
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-add-idxd_pci_probe_alloc-helper.patch b/queue-6.12/dmaengine-idxd-add-idxd_pci_probe_alloc-helper.patch
new file mode 100644 (file)
index 0000000..5f9f680
--- /dev/null
@@ -0,0 +1,187 @@
+From 0af25541e1ad7a10527ef8dbdd461d4b91753a64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 15:30:24 -0800
+Subject: dmaengine: idxd: Add idxd_pci_probe_alloc() helper
+
+From: Fenghua Yu <fenghua.yu@intel.com>
+
+[ Upstream commit 087e89b69b5fe5529a8809a06b4b4680e54f87e2 ]
+
+Add the idxd_pci_probe_alloc() helper to probe IDXD PCI device with or
+without allocating and setting idxd software values.
+
+The idxd_pci_probe() function is refactored to call this helper and
+always probe the IDXD device with allocating and setting the software
+values.
+
+This helper will be called later in the Function Level Reset (FLR)
+process without modifying the idxd software data.
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20241122233028.2762809-2-fenghua.yu@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: ee66bc295783 ("dmaengine: idxd: Fix leaking event log memory")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/idxd.h |   2 +
+ drivers/dma/idxd/init.c | 102 ++++++++++++++++++++++++----------------
+ 2 files changed, 64 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index d84e21daa9912..1f93dd6db28f0 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -742,6 +742,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+ /* device control */
+ int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
++int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
++                       const struct pci_device_id *id);
+ void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
+ int idxd_drv_enable_wq(struct idxd_wq *wq);
+ void idxd_drv_disable_wq(struct idxd_wq *wq);
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index e55136bb525e2..c3073518d1db4 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -800,67 +800,84 @@ static void idxd_cleanup(struct idxd_device *idxd)
+               idxd_disable_sva(idxd->pdev);
+ }
+-static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++/*
++ * Probe idxd PCI device.
++ * If idxd is not given, need to allocate idxd and set up its data.
++ *
++ * If idxd is given, idxd was allocated and setup already. Just need to
++ * configure device without re-allocating and re-configuring idxd data.
++ * This is useful for recovering from FLR.
++ */
++int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
++                       const struct pci_device_id *id)
+ {
+-      struct device *dev = &pdev->dev;
+-      struct idxd_device *idxd;
+-      struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
++      bool alloc_idxd = idxd ? false : true;
++      struct idxd_driver_data *data;
++      struct device *dev;
+       int rc;
++      pdev = idxd ? idxd->pdev : pdev;
++      dev = &pdev->dev;
++      data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
+       rc = pci_enable_device(pdev);
+       if (rc)
+               return rc;
+-      dev_dbg(dev, "Alloc IDXD context\n");
+-      idxd = idxd_alloc(pdev, data);
+-      if (!idxd) {
+-              rc = -ENOMEM;
+-              goto err_idxd_alloc;
+-      }
++      if (alloc_idxd) {
++              dev_dbg(dev, "Alloc IDXD context\n");
++              idxd = idxd_alloc(pdev, data);
++              if (!idxd) {
++                      rc = -ENOMEM;
++                      goto err_idxd_alloc;
++              }
+-      dev_dbg(dev, "Mapping BARs\n");
+-      idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+-      if (!idxd->reg_base) {
+-              rc = -ENOMEM;
+-              goto err_iomap;
+-      }
++              dev_dbg(dev, "Mapping BARs\n");
++              idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
++              if (!idxd->reg_base) {
++                      rc = -ENOMEM;
++                      goto err_iomap;
++              }
+-      dev_dbg(dev, "Set DMA masks\n");
+-      rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+-      if (rc)
+-              goto err;
++              dev_dbg(dev, "Set DMA masks\n");
++              rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++              if (rc)
++                      goto err;
++      }
+       dev_dbg(dev, "Set PCI master\n");
+       pci_set_master(pdev);
+       pci_set_drvdata(pdev, idxd);
+-      idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+-      rc = idxd_probe(idxd);
+-      if (rc) {
+-              dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+-              goto err;
+-      }
++      if (alloc_idxd) {
++              idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
++              rc = idxd_probe(idxd);
++              if (rc) {
++                      dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
++                      goto err;
++              }
+-      if (data->load_device_defaults) {
+-              rc = data->load_device_defaults(idxd);
+-              if (rc)
+-                      dev_warn(dev, "IDXD loading device defaults failed\n");
+-      }
++              if (data->load_device_defaults) {
++                      rc = data->load_device_defaults(idxd);
++                      if (rc)
++                              dev_warn(dev, "IDXD loading device defaults failed\n");
++              }
+-      rc = idxd_register_devices(idxd);
+-      if (rc) {
+-              dev_err(dev, "IDXD sysfs setup failed\n");
+-              goto err_dev_register;
+-      }
++              rc = idxd_register_devices(idxd);
++              if (rc) {
++                      dev_err(dev, "IDXD sysfs setup failed\n");
++                      goto err_dev_register;
++              }
+-      rc = idxd_device_init_debugfs(idxd);
+-      if (rc)
+-              dev_warn(dev, "IDXD debugfs failed to setup\n");
++              rc = idxd_device_init_debugfs(idxd);
++              if (rc)
++                      dev_warn(dev, "IDXD debugfs failed to setup\n");
++      }
+       dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+                idxd->hw.version);
+-      idxd->user_submission_safe = data->user_submission_safe;
++      if (data)
++              idxd->user_submission_safe = data->user_submission_safe;
+       return 0;
+@@ -875,6 +892,11 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+       return rc;
+ }
++static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++      return idxd_pci_probe_alloc(NULL, pdev, id);
++}
++
+ void idxd_wqs_quiesce(struct idxd_device *idxd)
+ {
+       struct idxd_wq *wq;
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-binding-and-unbinding-idxd-device-and.patch b/queue-6.12/dmaengine-idxd-binding-and-unbinding-idxd-device-and.patch
new file mode 100644 (file)
index 0000000..408436f
--- /dev/null
@@ -0,0 +1,71 @@
+From b57b47af4c4f5b2b73d018b29edb9752eeaf18d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 15:30:25 -0800
+Subject: dmaengine: idxd: Binding and unbinding IDXD device and driver
+
+From: Fenghua Yu <fenghua.yu@intel.com>
+
+[ Upstream commit 3ab45516772b813315324dc63a900703144e80c4 ]
+
+Add idxd_bind() and idxd_unbind() helpers to bind and unbind the IDXD
+device and driver.
+
+These helpers will be called during Function Level Reset (FLR) processing.
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20241122233028.2762809-3-fenghua.yu@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: ee66bc295783 ("dmaengine: idxd: Fix leaking event log memory")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index c3073518d1db4..795e408ba2d50 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -800,6 +800,39 @@ static void idxd_cleanup(struct idxd_device *idxd)
+               idxd_disable_sva(idxd->pdev);
+ }
++/*
++ * Attach IDXD device to IDXD driver.
++ */
++static int idxd_bind(struct device_driver *drv, const char *buf)
++{
++      const struct bus_type *bus = drv->bus;
++      struct device *dev;
++      int err = -ENODEV;
++
++      dev = bus_find_device_by_name(bus, NULL, buf);
++      if (dev)
++              err = device_driver_attach(drv, dev);
++
++      put_device(dev);
++
++      return err;
++}
++
++/*
++ * Detach IDXD device from driver.
++ */
++static void idxd_unbind(struct device_driver *drv, const char *buf)
++{
++      const struct bus_type *bus = drv->bus;
++      struct device *dev;
++
++      dev = bus_find_device_by_name(bus, NULL, buf);
++      if (dev && dev->driver == drv)
++              device_release_driver(dev);
++
++      put_device(dev);
++}
++
+ /*
+  * Probe idxd PCI device.
+  * If idxd is not given, need to allocate idxd and set up its data.
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-delete-unnecessary-null-check.patch b/queue-6.12/dmaengine-idxd-delete-unnecessary-null-check.patch
new file mode 100644 (file)
index 0000000..4a7b54f
--- /dev/null
@@ -0,0 +1,39 @@
+From 42f72dfb2eed1032744fed2f8b8c42fac03a05da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 12:13:20 +0300
+Subject: dmaengine: idxd: Delete unnecessary NULL check
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 2c17e9ea0caa5555e31e154fa1b06260b816f5cc ]
+
+The "saved_evl" pointer is a offset into the middle of a non-NULL struct.
+It can't be NULL and the check is slightly confusing.  Delete the check.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
+Link: https://lore.kernel.org/r/ec38214e-0bbb-4c5a-94ff-b2b2d4c3f245@stanley.mountain
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: ee66bc295783 ("dmaengine: idxd: Fix leaking event log memory")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index f6bbc95eeb925..0f9003dd342d6 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -987,8 +987,7 @@ static void idxd_device_config_restore(struct idxd_device *idxd,
+       idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+-      if (saved_evl)
+-              idxd->evl->size = saved_evl->size;
++      idxd->evl->size = saved_evl->size;
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *saved_group, *group;
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch b/queue-6.12/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch
new file mode 100644 (file)
index 0000000..0e9b4ef
--- /dev/null
@@ -0,0 +1,58 @@
+From 782eee4212c814e2146563aadb10f1b7c03e8002 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:28 -0800
+Subject: dmaengine: idxd: Fix crash when the event log is disabled
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 52d2edea0d63c935e82631e4b9e4a94eccf97b5b ]
+
+If reporting errors to the event log is not supported by the hardware,
+and an error that causes Function Level Reset (FLR) is received, the
+driver will try to restore the event log even if it was not allocated.
+
+Also, only try to free the event log if it was properly allocated.
+
+Fixes: 6078a315aec1 ("dmaengine: idxd: Add idxd_device_config_save() and idxd_device_config_restore() helpers")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-2-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: ee66bc295783 ("dmaengine: idxd: Fix leaking event log memory")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +++
+ drivers/dma/idxd/init.c   | 3 ++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index d8e0a12f62ace..4b32f7890e02a 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -815,6 +815,9 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+       struct device *dev = &idxd->pdev->dev;
+       struct idxd_evl *evl = idxd->evl;
++      if (!evl)
++              return;
++
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       if (!gencfg.evl_en)
+               return;
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 0f9003dd342d6..3655f340876c4 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -987,7 +987,8 @@ static void idxd_device_config_restore(struct idxd_device *idxd,
+       idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+-      idxd->evl->size = saved_evl->size;
++      if (idxd->evl)
++              idxd->evl->size = saved_evl->size;
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *saved_group, *group;
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch b/queue-6.12/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
new file mode 100644 (file)
index 0000000..b13e0af
--- /dev/null
@@ -0,0 +1,60 @@
+From 4550c4428296d571617ffbaa4dab0a757ad731b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:35 -0800
+Subject: dmaengine: idxd: Fix freeing the allocated ida too late
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c311f5e9248471a950f0a524c2fd736414d98900 ]
+
+It can happen that when the cdev .release() is called, the driver
+already called ida_destroy(). Move ida_free() to the _del() path.
+
+We see with DEBUG_KOBJECT_RELEASE enabled and forcing an early PCI
+unbind.
+
+Fixes: 04922b7445a1 ("dmaengine: idxd: fix cdev setup and free device lifetime issues")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-9-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 8b27bd545685a..8dcd2331bb1ac 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -161,11 +161,7 @@ static const struct device_type idxd_cdev_file_type = {
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+       struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
+-      struct idxd_cdev_context *cdev_ctx;
+-      struct idxd_wq *wq = idxd_cdev->wq;
+-      cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -585,11 +581,15 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
++      struct idxd_cdev_context *cdev_ctx;
+       struct idxd_cdev *idxd_cdev;
+       idxd_cdev = wq->idxd_cdev;
+       wq->idxd_cdev = NULL;
+       cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
++
++      cdev_ctx = &ictx[wq->idxd->data->type];
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       put_device(cdev_dev(idxd_cdev));
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-fix-leaking-event-log-memory.patch b/queue-6.12/dmaengine-idxd-fix-leaking-event-log-memory.patch
new file mode 100644 (file)
index 0000000..01ab47b
--- /dev/null
@@ -0,0 +1,46 @@
+From 0af56cb7c64e5a207f01cf04e50fb74c0851b49c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:36 -0800
+Subject: dmaengine: idxd: Fix leaking event log memory
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit ee66bc29578391c9b48523dc9119af67bd5c7c0f ]
+
+During the device remove process, the device is reset, causing the
+configuration registers to go back to their default state, which is
+zero. As the driver is checking if the event log support was enabled
+before deallocating, it will fail if a reset happened before.
+
+Do not check if the support was enabled, the check for 'idxd->evl'
+being valid (only allocated if the HW capability is available) is
+enough.
+
+Fixes: 244da66cda35 ("dmaengine: idxd: setup event log configuration")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-10-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 4b32f7890e02a..c967782c0ebb1 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -818,10 +818,6 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+       if (!evl)
+               return;
+-      gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+-      if (!gencfg.evl_en)
+-              return;
+-
+       mutex_lock(&evl->lock);
+       gencfg.evl_en = 0;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch b/queue-6.12/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
new file mode 100644 (file)
index 0000000..0a45c9a
--- /dev/null
@@ -0,0 +1,56 @@
+From 386fdc4a07e4fb37ecd2d8fa06f3e999bca2b1a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:34 -0800
+Subject: dmaengine: idxd: Fix memory leak when a wq is reset
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d9cfb5193a047a92a4d3c0e91ea4cc87c8f7c478 ]
+
+idxd_wq_disable_cleanup() which is called from the reset path for a
+workqueue, sets the wq type to NONE, which for other parts of the
+driver mean that the wq is empty (all its resources were released).
+
+Only set the wq type to NONE after its resources are released.
+
+Fixes: da32b28c95a7 ("dmaengine: idxd: cleanup workqueue config after disabling")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-8-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index c41ef195eeb9f..d8e0a12f62ace 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -174,6 +174,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_queue_free(&wq->sbq);
++      wq->type = IDXD_WQT_NONE;
+ }
+ EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
+@@ -367,7 +368,6 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+       lockdep_assert_held(&wq->wq_lock);
+       wq->state = IDXD_WQ_DISABLED;
+       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+-      wq->type = IDXD_WQT_NONE;
+       wq->threshold = 0;
+       wq->priority = 0;
+       wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+@@ -1513,7 +1513,6 @@ void idxd_drv_disable_wq(struct idxd_wq *wq)
+       idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
+       percpu_ref_exit(&wq->wq_active);
+-      wq->type = IDXD_WQT_NONE;
+       wq->client_count = 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD);
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch b/queue-6.12/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
new file mode 100644 (file)
index 0000000..73c1028
--- /dev/null
@@ -0,0 +1,37 @@
+From 5019a5734356161ec529096674ce6a126cc15c91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:33 -0800
+Subject: dmaengine: idxd: Fix not releasing workqueue on .release()
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 3d33de353b1ff9023d5ec73b9becf80ea87af695 ]
+
+The workqueue associated with an DSA/IAA device is not released when
+the object is freed.
+
+Fixes: 47c16ac27d4c ("dmaengine: idxd: fix idxd conf_dev 'struct device' lifetime")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-7-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index f706eae0e76b1..154d754db339c 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1810,6 +1810,7 @@ static void idxd_conf_device_release(struct device *dev)
+ {
+       struct idxd_device *idxd = confdev_to_idxd(dev);
++      destroy_workqueue(idxd->wq);
+       kfree(idxd->groups);
+       bitmap_free(idxd->wq_enable_map);
+       kfree(idxd->wqs);
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch b/queue-6.12/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch
new file mode 100644 (file)
index 0000000..1a6e4e3
--- /dev/null
@@ -0,0 +1,43 @@
+From 94b4c339392ba85863c07e2e7268d3aba398b894 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 11:24:28 +0800
+Subject: dmaengine: idxd: fix possible wrong descriptor completion in
+ llist_abort_desc()
+
+From: Tuo Li <islituo@gmail.com>
+
+[ Upstream commit e1c9866173c5f8521f2d0768547a01508cb9ff27 ]
+
+At the end of this function, d is the traversal cursor of flist, but the
+code completes found instead. This can lead to issues such as NULL pointer
+dereferences, double completion, or descriptor leaks.
+
+Fix this by completing d instead of found in the final
+list_for_each_entry_safe() loop.
+
+Fixes: aa8d18becc0c ("dmaengine: idxd: add callback support for iaa crypto")
+Signed-off-by: Tuo Li <islituo@gmail.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://patch.msgid.link/20260106032428.162445-1-islituo@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/submit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
+index 94eca25ae9b90..b246da3cfb55d 100644
+--- a/drivers/dma/idxd/submit.c
++++ b/drivers/dma/idxd/submit.c
+@@ -138,7 +138,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+        */
+       list_for_each_entry_safe(d, t, &flist, list) {
+               list_del_init(&d->list);
+-              idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true,
++              idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true,
+                                     NULL, NULL);
+       }
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch b/queue-6.12/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
new file mode 100644 (file)
index 0000000..1d17343
--- /dev/null
@@ -0,0 +1,41 @@
+From 26ad3bdd13b0d8f0709b667a5cd0db30627b7795 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 08:13:08 +0200
+Subject: dmaengine: xilinx: xdma: Fix regmap init error handling
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit e0adbf74e2a0455a6bc9628726ba87bcd0b42bf8 ]
+
+devm_regmap_init_mmio returns an ERR_PTR() upon error, not NULL.
+Fix the error check and also fix the error message. Use the error code
+from ERR_PTR() instead of the wrong value in ret.
+
+Fixes: 17ce252266c7 ("dmaengine: xilinx: xdma: Add xilinx xdma driver")
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20251014061309.283468-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xdma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
+index 2726c7154fcef..6781c6754e65a 100644
+--- a/drivers/dma/xilinx/xdma.c
++++ b/drivers/dma/xilinx/xdma.c
+@@ -1240,8 +1240,8 @@ static int xdma_probe(struct platform_device *pdev)
+       xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+                                          &xdma_regmap_config);
+-      if (!xdev->rmap) {
+-              xdma_err(xdev, "config regmap failed: %d", ret);
++      if (IS_ERR(xdev->rmap)) {
++              xdma_err(xdev, "config regmap failed: %pe", xdev->rmap);
+               goto failed;
+       }
+       INIT_LIST_HEAD(&xdev->dma_dev.channels);
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-6.12/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..60dd063
--- /dev/null
@@ -0,0 +1,38 @@
+From 0652d47ef89e60ac0de5996a9f122c1ad18bb87f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 3ad37e9b924a7..7d49b8978aa57 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2907,7 +2907,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = of_irq_get(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-6.12/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..f634ff7
--- /dev/null
@@ -0,0 +1,75 @@
+From 6671bba94fc19caa519c9db1a8a98d27eabb8b5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 7d49b8978aa57..fff51a00fb9fb 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1543,8 +1543,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-6.12/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..3635a81
--- /dev/null
@@ -0,0 +1,62 @@
+From ff1d7cd8ff476a4065517ffc7c035cda04df6a1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index fff51a00fb9fb..b1c540fbcd716 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -996,16 +996,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -1013,8 +1013,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.12/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-6.12/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..c5cab8a
--- /dev/null
@@ -0,0 +1,98 @@
+From 93a77f6476c2c49b664b269ba6d1506b68ea1af3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index b1c540fbcd716..a0361bcee1201 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1216,14 +1216,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1591,6 +1583,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-6.12/futex-require-sys_futex_requeue-to-have-identical-fl.patch b/queue-6.12/futex-require-sys_futex_requeue-to-have-identical-fl.patch
new file mode 100644 (file)
index 0000000..2bc20b5
--- /dev/null
@@ -0,0 +1,46 @@
+From 89cc7eba6b2ec23bef40b82cbdd05c876ed4ac96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Mar 2026 13:35:53 +0100
+Subject: futex: Require sys_futex_requeue() to have identical flags
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 19f94b39058681dec64a10ebeb6f23fe7fc3f77a ]
+
+Nicholas reported that his LLM found it was possible to create a UaF
+when sys_futex_requeue() is used with different flags. The initial
+motivation for allowing different flags was the variable sized futex,
+but since that hasn't been merged (yet), simply mandate the flags are
+identical, as is the case for the old style sys_futex() requeue
+operations.
+
+Fixes: 0f4b5f972216 ("futex: Add sys_futex_requeue()")
+Reported-by: Nicholas Carlini <npc@anthropic.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/futex/syscalls.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c
+index 880c9bf2f3150..99723189c8cf7 100644
+--- a/kernel/futex/syscalls.c
++++ b/kernel/futex/syscalls.c
+@@ -459,6 +459,14 @@ SYSCALL_DEFINE4(futex_requeue,
+       if (ret)
+               return ret;
++      /*
++       * For now mandate both flags are identical, like the sys_futex()
++       * interface has. If/when we merge the variable sized futex support,
++       * that patch can modify this test to allow a difference in size.
++       */
++      if (futexes[0].w.flags != futexes[1].w.flags)
++              return -EINVAL;
++
+       cmpval = futexes[0].w.val;
+       return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags,
+-- 
+2.53.0
+
diff --git a/queue-6.12/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch b/queue-6.12/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch
new file mode 100644 (file)
index 0000000..f2d8a28
--- /dev/null
@@ -0,0 +1,96 @@
+From 7e552626ddf9ed734eb4932aa60e9f66c48bc92e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Mar 2026 14:30:41 +0530
+Subject: netfs: Fix kernel BUG in netfs_limit_iter() for ITER_KVEC iterators
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit 67e467a11f62ff64ad219dc6aa5459e132c79d14 ]
+
+When a process crashes and the kernel writes a core dump to a 9P
+filesystem, __kernel_write() creates an ITER_KVEC iterator. This
+iterator reaches netfs_limit_iter() via netfs_unbuffered_write(), which
+only handles ITER_FOLIOQ, ITER_BVEC and ITER_XARRAY iterator types,
+hitting the BUG() for any other type.
+
+Fix this by adding netfs_limit_kvec() following the same pattern as
+netfs_limit_bvec(), since both kvec and bvec are simple segment arrays
+with pointer and length fields. Dispatch it from netfs_limit_iter() when
+the iterator type is ITER_KVEC.
+
+Fixes: cae932d3aee5 ("netfs: Add func to calculate pagecount/size-limited span of an iterator")
+Reported-by: syzbot+9c058f0d63475adc97fd@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=9c058f0d63475adc97fd
+Tested-by: syzbot+9c058f0d63475adc97fd@syzkaller.appspotmail.com
+Signed-off-by: Deepanshu Kartikey <Kartikey406@gmail.com>
+Link: https://patch.msgid.link/20260307090041.359870-1-kartikey406@gmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/iterator.c | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
+index 72a435e5fc6da..154a14bb2d7f7 100644
+--- a/fs/netfs/iterator.c
++++ b/fs/netfs/iterator.c
+@@ -142,6 +142,47 @@ static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
+       return min(span, max_size);
+ }
++/*
++ * Select the span of a kvec iterator we're going to use.  Limit it by both
++ * maximum size and maximum number of segments.  Returns the size of the span
++ * in bytes.
++ */
++static size_t netfs_limit_kvec(const struct iov_iter *iter, size_t start_offset,
++                             size_t max_size, size_t max_segs)
++{
++      const struct kvec *kvecs = iter->kvec;
++      unsigned int nkv = iter->nr_segs, ix = 0, nsegs = 0;
++      size_t len, span = 0, n = iter->count;
++      size_t skip = iter->iov_offset + start_offset;
++
++      if (WARN_ON(!iov_iter_is_kvec(iter)) ||
++          WARN_ON(start_offset > n) ||
++          n == 0)
++              return 0;
++
++      while (n && ix < nkv && skip) {
++              len = kvecs[ix].iov_len;
++              if (skip < len)
++                      break;
++              skip -= len;
++              n -= len;
++              ix++;
++      }
++
++      while (n && ix < nkv) {
++              len = min3(n, kvecs[ix].iov_len - skip, max_size);
++              span += len;
++              nsegs++;
++              ix++;
++              if (span >= max_size || nsegs >= max_segs)
++                      break;
++              skip = 0;
++              n -= len;
++      }
++
++      return min(span, max_size);
++}
++
+ /*
+  * Select the span of an xarray iterator we're going to use.  Limit it by both
+  * maximum size and maximum number of segments.  It is assumed that segments
+@@ -245,6 +286,8 @@ size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+               return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
+       if (iov_iter_is_xarray(iter))
+               return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
++      if (iov_iter_is_kvec(iter))
++              return netfs_limit_kvec(iter, start_offset, max_size, max_segs);
+       BUG();
+ }
+ EXPORT_SYMBOL(netfs_limit_iter);
+-- 
+2.53.0
+
diff --git a/queue-6.12/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-6.12/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..bf23192
--- /dev/null
@@ -0,0 +1,51 @@
+From 6934a9c1767d8fbc3c62f55d8747ee44d88aaaeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index cbcc7bd5dde0a..84c655b427a00 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -1426,6 +1426,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -1440,6 +1441,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+               }
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
index f7e1ef82de0f759eff761198acf6be5818e43a81..ace7e91ced87cf482a75f518eb4490a3476b0d39 100644 (file)
@@ -221,3 +221,26 @@ idpf-detach-and-close-netdevs-while-handling-a-reset.patch
 idpf-fix-rss-lut-null-pointer-crash-on-early-ethtool-operations.patch
 idpf-fix-rss-lut-null-ptr-issue-after-soft-reset.patch
 asoc-ak4458-convert-to-runtime_pm_ops-co.patch
+dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
+dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
+dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
+dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
+netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch
+dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+xen-privcmd-unregister-xenstore-notifier-on-module-e.patch
+futex-require-sys_futex_requeue-to-have-identical-fl.patch
+dmaengine-idxd-add-idxd_pci_probe_alloc-helper.patch
+dmaengine-idxd-binding-and-unbinding-idxd-device-and.patch
+dmaengine-idxd-add-idxd_device_config_save-and-idxd_.patch
+dmaengine-idxd-delete-unnecessary-null-check.patch
+dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch
+dmaengine-idxd-fix-leaking-event-log-memory.patch
diff --git a/queue-6.12/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch b/queue-6.12/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch
new file mode 100644 (file)
index 0000000..80145d4
--- /dev/null
@@ -0,0 +1,47 @@
+From eec3bd678044b25921d4ee6efc7fafbc8e1613e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 20:02:46 +0800
+Subject: xen/privcmd: unregister xenstore notifier on module exit
+
+From: GuoHan Zhao <zhaoguohan@kylinos.cn>
+
+[ Upstream commit cd7e1fef5a1ca1c4fcd232211962ac2395601636 ]
+
+Commit 453b8fb68f36 ("xen/privcmd: restrict usage in
+unprivileged domU") added a xenstore notifier to defer setting the
+restriction target until Xenstore is ready.
+
+XEN_PRIVCMD can be built as a module, but privcmd_exit() leaves that
+notifier behind. Balance the notifier lifecycle by unregistering it on
+module exit.
+
+This is harmless even if xenstore was already ready at registration
+time and the notifier was never queued on the chain.
+
+Fixes: 453b8fb68f3641fe ("xen/privcmd: restrict usage in unprivileged domU")
+Signed-off-by: GuoHan Zhao <zhaoguohan@kylinos.cn>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260325120246.252899-1-zhaoguohan@kylinos.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/privcmd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index b366192c77cf1..d7d9d427e51a1 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -1784,6 +1784,9 @@ static int __init privcmd_init(void)
+ static void __exit privcmd_exit(void)
+ {
++      if (!xen_initial_domain())
++              unregister_xenstore_notifier(&xenstore_notifier);
++
+       privcmd_ioeventfd_exit();
+       privcmd_irqfd_exit();
+       misc_deregister(&privcmd_dev);
+-- 
+2.53.0
+
diff --git a/queue-6.18/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch b/queue-6.18/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
new file mode 100644 (file)
index 0000000..604d611
--- /dev/null
@@ -0,0 +1,70 @@
+From 8b102621c7536a7ce3efb95ac1f6c18d3bc965dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 21:17:04 +0900
+Subject: btrfs: fix leak of kobject name for sub-group space_info
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit a4376d9a5d4c9610e69def3fc0b32c86a7ab7a41 ]
+
+When create_space_info_sub_group() allocates elements of
+space_info->sub_group[], kobject_init_and_add() is called for each
+element via btrfs_sysfs_add_space_info_type(). However, when
+check_removing_space_info() frees these elements, it does not call
+btrfs_sysfs_remove_space_info() on them. As a result, kobject_put() is
+not called and the associated kobj->name objects are leaked.
+
+This memory leak is reproduced by running the blktests test case
+zbd/009 on kernels built with CONFIG_DEBUG_KMEMLEAK. The kmemleak
+feature reports the following error:
+
+unreferenced object 0xffff888112877d40 (size 16):
+  comm "mount", pid 1244, jiffies 4294996972
+  hex dump (first 16 bytes):
+    64 61 74 61 2d 72 65 6c 6f 63 00 c4 c6 a7 cb 7f  data-reloc......
+  backtrace (crc 53ffde4d):
+    __kmalloc_node_track_caller_noprof+0x619/0x870
+    kstrdup+0x42/0xc0
+    kobject_set_name_vargs+0x44/0x110
+    kobject_init_and_add+0xcf/0x150
+    btrfs_sysfs_add_space_info_type+0xfc/0x210 [btrfs]
+    create_space_info_sub_group.constprop.0+0xfb/0x1b0 [btrfs]
+    create_space_info+0x211/0x320 [btrfs]
+    btrfs_init_space_info+0x15a/0x1b0 [btrfs]
+    open_ctree+0x33c7/0x4a50 [btrfs]
+    btrfs_get_tree.cold+0x9f/0x1ee [btrfs]
+    vfs_get_tree+0x87/0x2f0
+    vfs_cmd_create+0xbd/0x280
+    __do_sys_fsconfig+0x3df/0x990
+    do_syscall_64+0x136/0x1540
+    entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+To avoid the leak, call btrfs_sysfs_remove_space_info() instead of
+kfree() for the elements.
+
+Fixes: f92ee31e031c ("btrfs: introduce btrfs_space_info sub-group")
+Link: https://lore.kernel.org/linux-block/b9488881-f18d-4f47-91a5-3c9bf63955a5@wdc.com/
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 4689ef206d0ee..a277c8cc91661 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -4464,7 +4464,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info)
+               for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+                       if (space_info->sub_group[i]) {
+                               check_removing_space_info(space_info->sub_group[i]);
+-                              kfree(space_info->sub_group[i]);
++                              btrfs_sysfs_remove_space_info(space_info->sub_group[i]);
+                               space_info->sub_group[i] = NULL;
+                       }
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.18/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-6.18/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..82b225c
--- /dev/null
@@ -0,0 +1,48 @@
+From f0f605be34c653bac9cf20d50fd59b8dcab42178 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 3fe3a6c7da4e9..ef9f24076ccae 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7929,8 +7929,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-6.18/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-6.18/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..9cad40e
--- /dev/null
@@ -0,0 +1,46 @@
+From 3a2bc17c4d42b7b6932bf58c55d00a58b05616f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 9c3a944cbc24a..0f87f30c8dd27 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2513,8 +2513,8 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch b/queue-6.18/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
new file mode 100644 (file)
index 0000000..b0967bc
--- /dev/null
@@ -0,0 +1,70 @@
+From 3f8c3ae7f8ff8f0c8e474877f159d59ae9548554 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 14:45:09 +0800
+Subject: dmaengine: dw-edma: Fix multiple times setting of the CYCLE_STATE and
+ CYCLE_BIT bits for HDMA.
+
+From: LUO Haowen <luo-hw@foxmail.com>
+
+[ Upstream commit 3f63297ff61a994b99d710dcb6dbde41c4003233 ]
+
+Others have submitted this issue (https://lore.kernel.org/dmaengine/
+20240722030405.3385-1-zhengdongxiong@gxmicro.cn/),
+but it has not been fixed yet. Therefore, more supplementary information
+is provided here.
+
+As mentioned in the "PCS-CCS-CB-TCB" Producer-Consumer Synchronization of
+"DesignWare Cores PCI Express Controller Databook, version 6.00a":
+
+1. The Consumer CYCLE_STATE (CCS) bit in the register only needs to be
+initialized once; the value will update automatically to be
+~CYCLE_BIT (CB) in the next chunk.
+2. The Consumer CYCLE_BIT bit in the register is loaded from the LL
+element and tested against CCS. When CB = CCS, the data transfer is
+executed. Otherwise not.
+
+The current logic sets customer (HDMA) CS and CB bits to 1 in each chunk
+while setting the producer (software) CB of odd chunks to 0 and even
+chunks to 1 in the linked list. This is leading to a mismatch between
+the producer CB and consumer CS bits.
+
+This issue can be reproduced by setting the transmission data size to
+exceed one chunk. By the way, in the EDMA using the same "PCS-CCS-CB-TCB"
+mechanism, the CS bit is only initialized once and this issue was not
+found. Refer to
+drivers/dma/dw-edma/dw-edma-v0-core.c:dw_edma_v0_core_start.
+
+So fix this issue by initializing the CYCLE_STATE and CYCLE_BIT bits
+only once.
+
+Fixes: e74c39573d35 ("dmaengine: dw-edma: Add support for native HDMA")
+Signed-off-by: LUO Haowen <luo-hw@foxmail.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/tencent_CB11AA9F3920C1911AF7477A9BD8EFE0AD05@qq.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/dw-edma/dw-hdma-v0-core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+index e3f8db4fe909a..ce8f7254bab21 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+@@ -252,10 +252,10 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
++              /* Set consumer cycle */
++              SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
++                      HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       }
+-      /* Set consumer cycle */
+-      SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
+-                HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       dw_hdma_v0_sync_ll_data(chunk);
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch b/queue-6.18/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch
new file mode 100644 (file)
index 0000000..68cadbd
--- /dev/null
@@ -0,0 +1,57 @@
+From bd747aba7be312f7c0496f01578fb80a68fda2fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:28 -0800
+Subject: dmaengine: idxd: Fix crash when the event log is disabled
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 52d2edea0d63c935e82631e4b9e4a94eccf97b5b ]
+
+If reporting errors to the event log is not supported by the hardware,
+and an error that causes Function Level Reset (FLR) is received, the
+driver will try to restore the event log even if it was not allocated.
+
+Also, only try to free the event log if it was properly allocated.
+
+Fixes: 6078a315aec1 ("dmaengine: idxd: Add idxd_device_config_save() and idxd_device_config_restore() helpers")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-2-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +++
+ drivers/dma/idxd/init.c   | 3 ++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 5cf419fe6b464..c599a902767ee 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -815,6 +815,9 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+       struct device *dev = &idxd->pdev->dev;
+       struct idxd_evl *evl = idxd->evl;
++      if (!evl)
++              return;
++
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       if (!gencfg.evl_en)
+               return;
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 2acc34b3daff8..449424242631d 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -962,7 +962,8 @@ static void idxd_device_config_restore(struct idxd_device *idxd,
+       idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+-      idxd->evl->size = saved_evl->size;
++      if (idxd->evl)
++              idxd->evl->size = saved_evl->size;
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *saved_group, *group;
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch b/queue-6.18/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
new file mode 100644 (file)
index 0000000..709cf85
--- /dev/null
@@ -0,0 +1,60 @@
+From 9b96537b7538069be334cfc4edcd1a13c0c37307 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:35 -0800
+Subject: dmaengine: idxd: Fix freeing the allocated ida too late
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c311f5e9248471a950f0a524c2fd736414d98900 ]
+
+It can happen that when the cdev .release() is called, the driver
+already called ida_destroy(). Move ida_free() to the _del() path.
+
+We see with DEBUG_KOBJECT_RELEASE enabled and forcing an early PCI
+unbind.
+
+Fixes: 04922b7445a1 ("dmaengine: idxd: fix cdev setup and free device lifetime issues")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-9-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 7e4715f927732..4105688cf3f06 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -158,11 +158,7 @@ static const struct device_type idxd_cdev_file_type = {
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+       struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
+-      struct idxd_cdev_context *cdev_ctx;
+-      struct idxd_wq *wq = idxd_cdev->wq;
+-      cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -582,11 +578,15 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
++      struct idxd_cdev_context *cdev_ctx;
+       struct idxd_cdev *idxd_cdev;
+       idxd_cdev = wq->idxd_cdev;
+       wq->idxd_cdev = NULL;
+       cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
++
++      cdev_ctx = &ictx[wq->idxd->data->type];
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       put_device(cdev_dev(idxd_cdev));
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-leaking-event-log-memory.patch b/queue-6.18/dmaengine-idxd-fix-leaking-event-log-memory.patch
new file mode 100644 (file)
index 0000000..6c09d44
--- /dev/null
@@ -0,0 +1,46 @@
+From 1743c0d3a4d14469672a808f327936f5ec1d496b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:36 -0800
+Subject: dmaengine: idxd: Fix leaking event log memory
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit ee66bc29578391c9b48523dc9119af67bd5c7c0f ]
+
+During the device remove process, the device is reset, causing the
+configuration registers to go back to their default state, which is
+zero. As the driver is checking if the event log support was enabled
+before deallocating, it will fail if a reset happened before.
+
+Do not check if the support was enabled, the check for 'idxd->evl'
+being valid (only allocated if the HW capability is available) is
+enough.
+
+Fixes: 244da66cda35 ("dmaengine: idxd: setup event log configuration")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-10-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index c43547c40ee34..646d7f767afa3 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -818,10 +818,6 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+       if (!evl)
+               return;
+-      gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+-      if (!gencfg.evl_en)
+-              return;
+-
+       mutex_lock(&evl->lock);
+       gencfg.evl_en = 0;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch b/queue-6.18/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
new file mode 100644 (file)
index 0000000..a26725f
--- /dev/null
@@ -0,0 +1,56 @@
+From 077db8e320ac43538aedc6602047c8ba947e251f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:34 -0800
+Subject: dmaengine: idxd: Fix memory leak when a wq is reset
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d9cfb5193a047a92a4d3c0e91ea4cc87c8f7c478 ]
+
+idxd_wq_disable_cleanup() which is called from the reset path for a
+workqueue, sets the wq type to NONE, which for other parts of the
+driver mean that the wq is empty (all its resources were released).
+
+Only set the wq type to NONE after its resources are released.
+
+Fixes: da32b28c95a7 ("dmaengine: idxd: cleanup workqueue config after disabling")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-8-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index c599a902767ee..c43547c40ee34 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -174,6 +174,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_queue_free(&wq->sbq);
++      wq->type = IDXD_WQT_NONE;
+ }
+ EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD");
+@@ -367,7 +368,6 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+       lockdep_assert_held(&wq->wq_lock);
+       wq->state = IDXD_WQ_DISABLED;
+       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+-      wq->type = IDXD_WQT_NONE;
+       wq->threshold = 0;
+       wq->priority = 0;
+       wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+@@ -1516,7 +1516,6 @@ void idxd_drv_disable_wq(struct idxd_wq *wq)
+       idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
+       percpu_ref_exit(&wq->wq_active);
+-      wq->type = IDXD_WQT_NONE;
+       wq->client_count = 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD");
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch b/queue-6.18/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
new file mode 100644 (file)
index 0000000..237de12
--- /dev/null
@@ -0,0 +1,37 @@
+From 7a7a49d1144c0f5523f3dd01ea8792f2cac6ff48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:33 -0800
+Subject: dmaengine: idxd: Fix not releasing workqueue on .release()
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 3d33de353b1ff9023d5ec73b9becf80ea87af695 ]
+
+The workqueue associated with an DSA/IAA device is not released when
+the object is freed.
+
+Fixes: 47c16ac27d4c ("dmaengine: idxd: fix idxd conf_dev 'struct device' lifetime")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-7-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 9f0701021af0e..cdd7a59140d90 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1812,6 +1812,7 @@ static void idxd_conf_device_release(struct device *dev)
+ {
+       struct idxd_device *idxd = confdev_to_idxd(dev);
++      destroy_workqueue(idxd->wq);
+       kfree(idxd->groups);
+       bitmap_free(idxd->wq_enable_map);
+       kfree(idxd->wqs);
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-possible-invalid-memory-access-af.patch b/queue-6.18/dmaengine-idxd-fix-possible-invalid-memory-access-af.patch
new file mode 100644 (file)
index 0000000..ce408eb
--- /dev/null
@@ -0,0 +1,41 @@
+From 57422e7de940369bf7b12c9326ddc3c1ec0f421f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:29 -0800
+Subject: dmaengine: idxd: Fix possible invalid memory access after FLR
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d6077df7b75d26e4edf98983836c05d00ebabd8d ]
+
+In the case that the first Function Level Reset (FLR) concludes
+correctly, but in the second FLR the scratch area for the saved
+configuration cannot be allocated, it's possible for a invalid memory
+access to happen.
+
+Always set the deallocated scratch area to NULL after FLR completes.
+
+Fixes: 98d187a98903 ("dmaengine: idxd: Enable Function Level Reset (FLR) for halt")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-3-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 449424242631d..f2b37c63a964c 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -1137,6 +1137,7 @@ static void idxd_reset_done(struct pci_dev *pdev)
+       }
+ out:
+       kfree(idxd->idxd_saved);
++      idxd->idxd_saved = NULL;
+ }
+ static const struct pci_error_handlers idxd_error_handler = {
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch b/queue-6.18/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch
new file mode 100644 (file)
index 0000000..d494e22
--- /dev/null
@@ -0,0 +1,43 @@
+From b92bfabdad30d8efb90ecc9621309cf9c33f09ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 11:24:28 +0800
+Subject: dmaengine: idxd: fix possible wrong descriptor completion in
+ llist_abort_desc()
+
+From: Tuo Li <islituo@gmail.com>
+
+[ Upstream commit e1c9866173c5f8521f2d0768547a01508cb9ff27 ]
+
+At the end of this function, d is the traversal cursor of flist, but the
+code completes found instead. This can lead to issues such as NULL pointer
+dereferences, double completion, or descriptor leaks.
+
+Fix this by completing d instead of found in the final
+list_for_each_entry_safe() loop.
+
+Fixes: aa8d18becc0c ("dmaengine: idxd: add callback support for iaa crypto")
+Signed-off-by: Tuo Li <islituo@gmail.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://patch.msgid.link/20260106032428.162445-1-islituo@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/submit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
+index 6db1c5fcedc58..03217041b8b3e 100644
+--- a/drivers/dma/idxd/submit.c
++++ b/drivers/dma/idxd/submit.c
+@@ -138,7 +138,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+        */
+       list_for_each_entry_safe(d, t, &flist, list) {
+               list_del_init(&d->list);
+-              idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true,
++              idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true,
+                                     NULL, NULL);
+       }
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch b/queue-6.18/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
new file mode 100644 (file)
index 0000000..a59b383
--- /dev/null
@@ -0,0 +1,41 @@
+From 8c8aaf1f0705d2edc8c95973fe5447e74d0caf17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 08:13:08 +0200
+Subject: dmaengine: xilinx: xdma: Fix regmap init error handling
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit e0adbf74e2a0455a6bc9628726ba87bcd0b42bf8 ]
+
+devm_regmap_init_mmio returns an ERR_PTR() upon error, not NULL.
+Fix the error check and also fix the error message. Use the error code
+from ERR_PTR() instead of the wrong value in ret.
+
+Fixes: 17ce252266c7 ("dmaengine: xilinx: xdma: Add xilinx xdma driver")
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20251014061309.283468-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xdma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
+index 5ecf8223c112e..58e01e22b9765 100644
+--- a/drivers/dma/xilinx/xdma.c
++++ b/drivers/dma/xilinx/xdma.c
+@@ -1236,8 +1236,8 @@ static int xdma_probe(struct platform_device *pdev)
+       xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+                                          &xdma_regmap_config);
+-      if (!xdev->rmap) {
+-              xdma_err(xdev, "config regmap failed: %d", ret);
++      if (IS_ERR(xdev->rmap)) {
++              xdma_err(xdev, "config regmap failed: %pe", xdev->rmap);
+               goto failed;
+       }
+       INIT_LIST_HEAD(&xdev->dma_dev.channels);
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-6.18/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..7bdf032
--- /dev/null
@@ -0,0 +1,38 @@
+From d111e1b8fce6fc25265337bf8904c149598bb37f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 89a8254d9cdc6..e6d10079ec670 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -3003,7 +3003,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = of_irq_get(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-6.18/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..cc1dabf
--- /dev/null
@@ -0,0 +1,75 @@
+From 791722ff9ce68b64d002706ddcc5ef9b660d2d43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index e6d10079ec670..ccfcc2b801f82 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1546,8 +1546,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-6.18/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..ffa7a10
--- /dev/null
@@ -0,0 +1,62 @@
+From 1acb62c3afaa3549d56cb9fc045e5fa10f5ebf49 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index ccfcc2b801f82..7b24d0a18ea53 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -997,16 +997,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -1014,8 +1014,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.18/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-6.18/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..ed98e30
--- /dev/null
@@ -0,0 +1,98 @@
+From 41ef9c7a68e96c933e9be926194edf82ae886690 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 7b24d0a18ea53..7dec5e6babe14 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1217,14 +1217,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1594,6 +1586,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-6.18/futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch b/queue-6.18/futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch
new file mode 100644 (file)
index 0000000..0581e77
--- /dev/null
@@ -0,0 +1,100 @@
+From 2515972a71ae52668366eef782e28a3cdb8cc90e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Mar 2026 20:47:56 +0800
+Subject: futex: Fix UaF between futex_key_to_node_opt() and
+ vma_replace_policy()
+
+From: Hao-Yu Yang <naup96721@gmail.com>
+
+[ Upstream commit 190a8c48ff623c3d67cb295b4536a660db2012aa ]
+
+During futex_key_to_node_opt() execution, vma->vm_policy is read under
+speculative mmap lock and RCU. Concurrently, mbind() may call
+vma_replace_policy() which frees the old mempolicy immediately via
+kmem_cache_free().
+
+This creates a race where __futex_key_to_node() dereferences a freed
+mempolicy pointer, causing a use-after-free read of mpol->mode.
+
+[  151.412631] BUG: KASAN: slab-use-after-free in __futex_key_to_node (kernel/futex/core.c:349)
+[  151.414046] Read of size 2 at addr ffff888001c49634 by task e/87
+
+[  151.415969] Call Trace:
+
+[  151.416732]  __asan_load2 (mm/kasan/generic.c:271)
+[  151.416777]  __futex_key_to_node (kernel/futex/core.c:349)
+[  151.416822]  get_futex_key (kernel/futex/core.c:374 kernel/futex/core.c:386 kernel/futex/core.c:593)
+
+Fix by adding rcu to __mpol_put().
+
+Fixes: c042c505210d ("futex: Implement FUTEX2_MPOL")
+Reported-by: Hao-Yu Yang <naup96721@gmail.com>
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Hao-Yu Yang <naup96721@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Link: https://patch.msgid.link/20260324174418.GB1850007@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mempolicy.h |  1 +
+ kernel/futex/core.c       |  2 +-
+ mm/mempolicy.c            | 10 ++++++++--
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 0fe96f3ab3ef0..65c732d440d2f 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -55,6 +55,7 @@ struct mempolicy {
+               nodemask_t cpuset_mems_allowed; /* relative to these nodes */
+               nodemask_t user_nodemask;       /* nodemask passed by user */
+       } w;
++      struct rcu_head rcu;
+ };
+ /*
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index 2e77a6e5c8657..9e7dea6fc0ccd 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -342,7 +342,7 @@ static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr)
+       if (!vma)
+               return FUTEX_NO_NODE;
+-      mpol = vma_policy(vma);
++      mpol = READ_ONCE(vma->vm_policy);
+       if (!mpol)
+               return FUTEX_NO_NODE;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index eb83cff7db8c3..94327574fbbbb 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -485,7 +485,13 @@ void __mpol_put(struct mempolicy *pol)
+ {
+       if (!atomic_dec_and_test(&pol->refcnt))
+               return;
+-      kmem_cache_free(policy_cache, pol);
++      /*
++       * Required to allow mmap_lock_speculative*() access, see for example
++       * futex_key_to_node_opt(). All accesses are serialized by mmap_lock,
++       * however the speculative lock section unbound by the normal lock
++       * boundaries, requiring RCU freeing.
++       */
++      kfree_rcu(pol, rcu);
+ }
+ static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
+@@ -951,7 +957,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
+       }
+       old = vma->vm_policy;
+-      vma->vm_policy = new; /* protected by mmap_lock */
++      WRITE_ONCE(vma->vm_policy, new); /* protected by mmap_lock */
+       mpol_put(old);
+       return 0;
+-- 
+2.53.0
+
diff --git a/queue-6.18/futex-require-sys_futex_requeue-to-have-identical-fl.patch b/queue-6.18/futex-require-sys_futex_requeue-to-have-identical-fl.patch
new file mode 100644 (file)
index 0000000..8da8421
--- /dev/null
@@ -0,0 +1,46 @@
+From a845ee0494ddc30fca9063f4294453ca39c480e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Mar 2026 13:35:53 +0100
+Subject: futex: Require sys_futex_requeue() to have identical flags
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 19f94b39058681dec64a10ebeb6f23fe7fc3f77a ]
+
+Nicholas reported that his LLM found it was possible to create a UaF
+when sys_futex_requeue() is used with different flags. The initial
+motivation for allowing different flags was the variable sized futex,
+but since that hasn't been merged (yet), simply mandate the flags are
+identical, as is the case for the old style sys_futex() requeue
+operations.
+
+Fixes: 0f4b5f972216 ("futex: Add sys_futex_requeue()")
+Reported-by: Nicholas Carlini <npc@anthropic.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/futex/syscalls.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c
+index 880c9bf2f3150..99723189c8cf7 100644
+--- a/kernel/futex/syscalls.c
++++ b/kernel/futex/syscalls.c
+@@ -459,6 +459,14 @@ SYSCALL_DEFINE4(futex_requeue,
+       if (ret)
+               return ret;
++      /*
++       * For now mandate both flags are identical, like the sys_futex()
++       * interface has. If/when we merge the variable sized futex support,
++       * that patch can modify this test to allow a difference in size.
++       */
++      if (futexes[0].w.flags != futexes[1].w.flags)
++              return -EINVAL;
++
+       cmpval = futexes[0].w.val;
+       return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags,
+-- 
+2.53.0
+
diff --git a/queue-6.18/irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch b/queue-6.18/irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch
new file mode 100644 (file)
index 0000000..eb54244
--- /dev/null
@@ -0,0 +1,39 @@
+From 79d918b7c36909a2d5984e774b4e5652384b0075 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Mar 2026 12:49:14 +0000
+Subject: irqchip/renesas-rzv2h: Fix error path in rzv2h_icu_probe_common()
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 897cf98926429c8671a9009442883c2f62deae96 ]
+
+Replace pm_runtime_put() with pm_runtime_put_sync() when
+irq_domain_create_hierarchy() fails to ensure the device suspends
+synchronously before devres cleanup disables runtime PM via
+pm_runtime_disable().
+
+Fixes: 5ec8cabc3b86 ("irqchip/renesas-rzv2h: Use devm_pm_runtime_enable()")
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260323124917.41602-1-biju.das.jz@bp.renesas.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzv2h.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
+index 3dab62ededec9..c9f9099a51294 100644
+--- a/drivers/irqchip/irq-renesas-rzv2h.c
++++ b/drivers/irqchip/irq-renesas-rzv2h.c
+@@ -569,7 +569,7 @@ static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_no
+       return 0;
+ pm_put:
+-      pm_runtime_put(&pdev->dev);
++      pm_runtime_put_sync(&pdev->dev);
+       return ret;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.18/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch b/queue-6.18/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch
new file mode 100644 (file)
index 0000000..982d471
--- /dev/null
@@ -0,0 +1,96 @@
+From ce6422a16e56a83a18e4d4a47eb77384b9531d12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Mar 2026 14:30:41 +0530
+Subject: netfs: Fix kernel BUG in netfs_limit_iter() for ITER_KVEC iterators
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit 67e467a11f62ff64ad219dc6aa5459e132c79d14 ]
+
+When a process crashes and the kernel writes a core dump to a 9P
+filesystem, __kernel_write() creates an ITER_KVEC iterator. This
+iterator reaches netfs_limit_iter() via netfs_unbuffered_write(), which
+only handles ITER_FOLIOQ, ITER_BVEC and ITER_XARRAY iterator types,
+hitting the BUG() for any other type.
+
+Fix this by adding netfs_limit_kvec() following the same pattern as
+netfs_limit_bvec(), since both kvec and bvec are simple segment arrays
+with pointer and length fields. Dispatch it from netfs_limit_iter() when
+the iterator type is ITER_KVEC.
+
+Fixes: cae932d3aee5 ("netfs: Add func to calculate pagecount/size-limited span of an iterator")
+Reported-by: syzbot+9c058f0d63475adc97fd@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=9c058f0d63475adc97fd
+Tested-by: syzbot+9c058f0d63475adc97fd@syzkaller.appspotmail.com
+Signed-off-by: Deepanshu Kartikey <Kartikey406@gmail.com>
+Link: https://patch.msgid.link/20260307090041.359870-1-kartikey406@gmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/iterator.c | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
+index 72a435e5fc6da..154a14bb2d7f7 100644
+--- a/fs/netfs/iterator.c
++++ b/fs/netfs/iterator.c
+@@ -142,6 +142,47 @@ static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
+       return min(span, max_size);
+ }
++/*
++ * Select the span of a kvec iterator we're going to use.  Limit it by both
++ * maximum size and maximum number of segments.  Returns the size of the span
++ * in bytes.
++ */
++static size_t netfs_limit_kvec(const struct iov_iter *iter, size_t start_offset,
++                             size_t max_size, size_t max_segs)
++{
++      const struct kvec *kvecs = iter->kvec;
++      unsigned int nkv = iter->nr_segs, ix = 0, nsegs = 0;
++      size_t len, span = 0, n = iter->count;
++      size_t skip = iter->iov_offset + start_offset;
++
++      if (WARN_ON(!iov_iter_is_kvec(iter)) ||
++          WARN_ON(start_offset > n) ||
++          n == 0)
++              return 0;
++
++      while (n && ix < nkv && skip) {
++              len = kvecs[ix].iov_len;
++              if (skip < len)
++                      break;
++              skip -= len;
++              n -= len;
++              ix++;
++      }
++
++      while (n && ix < nkv) {
++              len = min3(n, kvecs[ix].iov_len - skip, max_size);
++              span += len;
++              nsegs++;
++              ix++;
++              if (span >= max_size || nsegs >= max_segs)
++                      break;
++              skip = 0;
++              n -= len;
++      }
++
++      return min(span, max_size);
++}
++
+ /*
+  * Select the span of an xarray iterator we're going to use.  Limit it by both
+  * maximum size and maximum number of segments.  It is assumed that segments
+@@ -245,6 +286,8 @@ size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+               return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
+       if (iov_iter_is_xarray(iter))
+               return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
++      if (iov_iter_is_kvec(iter))
++              return netfs_limit_kvec(iter, start_offset, max_size, max_segs);
+       BUG();
+ }
+ EXPORT_SYMBOL(netfs_limit_iter);
+-- 
+2.53.0
+
diff --git a/queue-6.18/netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch b/queue-6.18/netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch
new file mode 100644 (file)
index 0000000..4bf3157
--- /dev/null
@@ -0,0 +1,65 @@
+From d5513e1d2c08d879ccea2771ec23b70740295fde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Mar 2026 10:09:47 +0530
+Subject: netfs: Fix NULL pointer dereference in netfs_unbuffered_write() on
+ retry
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit e9075e420a1eb3b52c60f3b95893a55e77419ce8 ]
+
+When a write subrequest is marked NETFS_SREQ_NEED_RETRY, the retry path
+in netfs_unbuffered_write() unconditionally calls stream->prepare_write()
+without checking if it is NULL.
+
+Filesystems such as 9P do not set the prepare_write operation, so
+stream->prepare_write remains NULL. When get_user_pages() fails with
+-EFAULT and the subrequest is flagged for retry, this results in a NULL
+pointer dereference at fs/netfs/direct_write.c:189.
+
+Fix this by mirroring the pattern already used in write_retry.c: if
+stream->prepare_write is NULL, skip renegotiation and directly reissue
+the subrequest via netfs_reissue_write(), which handles iterator reset,
+IN_PROGRESS flag, stats update and reissue internally.
+
+Fixes: a0b4c7a49137 ("netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence")
+Reported-by: syzbot+7227db0fbac9f348dba0@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=7227db0fbac9f348dba0
+Signed-off-by: Deepanshu Kartikey <Kartikey406@gmail.com>
+Link: https://patch.msgid.link/20260307043947.347092-1-kartikey406@gmail.com
+Tested-by: syzbot+7227db0fbac9f348dba0@syzkaller.appspotmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/direct_write.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index dd1451bf7543d..4d9760e36c119 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -186,10 +186,18 @@ static int netfs_unbuffered_write(struct netfs_io_request *wreq)
+               stream->sreq_max_segs   = INT_MAX;
+               netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+-              stream->prepare_write(subreq);
+-              __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+-              netfs_stat(&netfs_n_wh_retry_write_subreq);
++              if (stream->prepare_write) {
++                      stream->prepare_write(subreq);
++                      __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
++                      netfs_stat(&netfs_n_wh_retry_write_subreq);
++              } else {
++                      struct iov_iter source;
++
++                      netfs_reset_iter(subreq);
++                      source = subreq->io_iter;
++                      netfs_reissue_write(stream, subreq, &source);
++              }
+       }
+       netfs_unbuffered_write_done(wreq);
+-- 
+2.53.0
+
diff --git a/queue-6.18/netfs-fix-read-abandonment-during-retry.patch b/queue-6.18/netfs-fix-read-abandonment-during-retry.patch
new file mode 100644 (file)
index 0000000..35c3650
--- /dev/null
@@ -0,0 +1,63 @@
+From 72d533b3d457285afcc3700a762c36ddf712d8cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 15:38:58 +0000
+Subject: netfs: Fix read abandonment during retry
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 7e57523490cd2efb52b1ea97f2e0a74c0fb634cd ]
+
+Under certain circumstances, all the remaining subrequests from a read
+request will get abandoned during retry.  The abandonment process expects
+the 'subreq' variable to be set to the place to start abandonment from, but
+it doesn't always have a useful value (it will be uninitialised on the
+first pass through the loop and it may point to a deleted subrequest on
+later passes).
+
+Fix the first jump to "abandon:" to set subreq to the start of the first
+subrequest expected to need retry (which, in this abandonment case, turned
+out unexpectedly to no longer have NEED_RETRY set).
+
+Also clear the subreq pointer after discarding superfluous retryable
+subrequests to cause an oops if we do try to access it.
+
+Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://patch.msgid.link/3775287.1773848338@warthog.procyon.org.uk
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+cc: Paulo Alcantara <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/read_retry.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
+index 7793ba5e3e8fc..cca9ac43c0773 100644
+--- a/fs/netfs/read_retry.c
++++ b/fs/netfs/read_retry.c
+@@ -93,8 +93,10 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+                      from->start, from->transferred, from->len);
+               if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+-                  !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
++                  !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) {
++                      subreq = from;
+                       goto abandon;
++              }
+               list_for_each_continue(next, &stream->subrequests) {
+                       subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+@@ -178,6 +180,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+                               if (subreq == to)
+                                       break;
+                       }
++                      subreq = NULL;
+                       continue;
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.18/netfs-fix-the-handling-of-stream-front-by-removing-i.patch b/queue-6.18/netfs-fix-the-handling-of-stream-front-by-removing-i.patch
new file mode 100644 (file)
index 0000000..39041a5
--- /dev/null
@@ -0,0 +1,196 @@
+From 20f8cc01f080d3f9ca6d8c7cdf3b31174ad12476 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 08:20:17 +0000
+Subject: netfs: Fix the handling of stream->front by removing it
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 0e764b9d46071668969410ec5429be0e2f38c6d3 ]
+
+The netfs_io_stream::front member is meant to point to the subrequest
+currently being collected on a stream, but it isn't actually used this way
+by direct write (which mostly ignores it).  However, there's a tracepoint
+which looks at it.  Further, stream->front is actually redundant with
+stream->subrequests.next.
+
+Fix the potential problem in the direct code by just removing the member
+and using stream->subrequests.next instead, thereby also simplifying the
+code.
+
+Fixes: a0b4c7a49137 ("netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence")
+Reported-by: Paulo Alcantara <pc@manguebit.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://patch.msgid.link/4158599.1774426817@warthog.procyon.org.uk
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/buffered_read.c     | 3 +--
+ fs/netfs/direct_read.c       | 3 +--
+ fs/netfs/direct_write.c      | 1 -
+ fs/netfs/read_collect.c      | 4 ++--
+ fs/netfs/read_single.c       | 1 -
+ fs/netfs/write_collect.c     | 4 ++--
+ fs/netfs/write_issue.c       | 3 +--
+ include/linux/netfs.h        | 1 -
+ include/trace/events/netfs.h | 8 ++++----
+ 9 files changed, 11 insertions(+), 17 deletions(-)
+
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index 37ab6f28b5ad0..88361e8c70961 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -171,9 +171,8 @@ static void netfs_queue_read(struct netfs_io_request *rreq,
+       spin_lock(&rreq->lock);
+       list_add_tail(&subreq->rreq_link, &stream->subrequests);
+       if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+-              stream->front = subreq;
+               if (!stream->active) {
+-                      stream->collected_to = stream->front->start;
++                      stream->collected_to = subreq->start;
+                       /* Store list pointers before active flag */
+                       smp_store_release(&stream->active, true);
+               }
+diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
+index a498ee8d66745..f72e6da88cca7 100644
+--- a/fs/netfs/direct_read.c
++++ b/fs/netfs/direct_read.c
+@@ -71,9 +71,8 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
+               spin_lock(&rreq->lock);
+               list_add_tail(&subreq->rreq_link, &stream->subrequests);
+               if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+-                      stream->front = subreq;
+                       if (!stream->active) {
+-                              stream->collected_to = stream->front->start;
++                              stream->collected_to = subreq->start;
+                               /* Store list pointers before active flag */
+                               smp_store_release(&stream->active, true);
+                       }
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index 4d9760e36c119..f9ab69de3e298 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -111,7 +111,6 @@ static int netfs_unbuffered_write(struct netfs_io_request *wreq)
+                       netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred);
+                       subreq = stream->construct;
+                       stream->construct = NULL;
+-                      stream->front = NULL;
+               }
+               /* Check if (re-)preparation failed. */
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index 137f0e28a44c5..e5f6665b3341e 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -205,7 +205,8 @@ static void netfs_collect_read_results(struct netfs_io_request *rreq)
+        * in progress.  The issuer thread may be adding stuff to the tail
+        * whilst we're doing this.
+        */
+-      front = READ_ONCE(stream->front);
++      front = list_first_entry_or_null(&stream->subrequests,
++                                       struct netfs_io_subrequest, rreq_link);
+       while (front) {
+               size_t transferred;
+@@ -301,7 +302,6 @@ static void netfs_collect_read_results(struct netfs_io_request *rreq)
+               list_del_init(&front->rreq_link);
+               front = list_first_entry_or_null(&stream->subrequests,
+                                                struct netfs_io_subrequest, rreq_link);
+-              stream->front = front;
+               spin_unlock(&rreq->lock);
+               netfs_put_subrequest(remove,
+                                    notes & ABANDON_SREQ ?
+diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
+index 5c0dc4efc7922..9d48ced80d1fa 100644
+--- a/fs/netfs/read_single.c
++++ b/fs/netfs/read_single.c
+@@ -107,7 +107,6 @@ static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
+       spin_lock(&rreq->lock);
+       list_add_tail(&subreq->rreq_link, &stream->subrequests);
+       trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+-      stream->front = subreq;
+       /* Store list pointers before active flag */
+       smp_store_release(&stream->active, true);
+       spin_unlock(&rreq->lock);
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 83eb3dc1adf8a..b194447f4b111 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -228,7 +228,8 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
+               if (!smp_load_acquire(&stream->active))
+                       continue;
+-              front = stream->front;
++              front = list_first_entry_or_null(&stream->subrequests,
++                                               struct netfs_io_subrequest, rreq_link);
+               while (front) {
+                       trace_netfs_collect_sreq(wreq, front);
+                       //_debug("sreq [%x] %llx %zx/%zx",
+@@ -279,7 +280,6 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
+                       list_del_init(&front->rreq_link);
+                       front = list_first_entry_or_null(&stream->subrequests,
+                                                        struct netfs_io_subrequest, rreq_link);
+-                      stream->front = front;
+                       spin_unlock(&wreq->lock);
+                       netfs_put_subrequest(remove,
+                                            notes & SAW_FAILURE ?
+diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
+index 437268f656409..2db688f941251 100644
+--- a/fs/netfs/write_issue.c
++++ b/fs/netfs/write_issue.c
+@@ -206,9 +206,8 @@ void netfs_prepare_write(struct netfs_io_request *wreq,
+       spin_lock(&wreq->lock);
+       list_add_tail(&subreq->rreq_link, &stream->subrequests);
+       if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+-              stream->front = subreq;
+               if (!stream->active) {
+-                      stream->collected_to = stream->front->start;
++                      stream->collected_to = subreq->start;
+                       /* Write list pointers before active flag */
+                       smp_store_release(&stream->active, true);
+               }
+diff --git a/include/linux/netfs.h b/include/linux/netfs.h
+index 72ee7d210a744..ba17ac5bf356a 100644
+--- a/include/linux/netfs.h
++++ b/include/linux/netfs.h
+@@ -140,7 +140,6 @@ struct netfs_io_stream {
+       void (*issue_write)(struct netfs_io_subrequest *subreq);
+       /* Collection tracking */
+       struct list_head        subrequests;    /* Contributory I/O operations */
+-      struct netfs_io_subrequest *front;      /* Op being collected */
+       unsigned long long      collected_to;   /* Position we've collected results to */
+       size_t                  transferred;    /* The amount transferred from this stream */
+       unsigned short          error;          /* Aggregate error for the stream */
+diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
+index 2d366be46a1c3..cbe28211106c5 100644
+--- a/include/trace/events/netfs.h
++++ b/include/trace/events/netfs.h
+@@ -740,19 +740,19 @@ TRACE_EVENT(netfs_collect_stream,
+                   __field(unsigned int,       wreq)
+                   __field(unsigned char,      stream)
+                   __field(unsigned long long, collected_to)
+-                  __field(unsigned long long, front)
++                  __field(unsigned long long, issued_to)
+                            ),
+           TP_fast_assign(
+                   __entry->wreq       = wreq->debug_id;
+                   __entry->stream     = stream->stream_nr;
+                   __entry->collected_to = stream->collected_to;
+-                  __entry->front      = stream->front ? stream->front->start : UINT_MAX;
++                  __entry->issued_to  = atomic64_read(&wreq->issued_to);
+                          ),
+-          TP_printk("R=%08x[%x:] cto=%llx frn=%llx",
++          TP_printk("R=%08x[%x:] cto=%llx ito=%llx",
+                     __entry->wreq, __entry->stream,
+-                    __entry->collected_to, __entry->front)
++                    __entry->collected_to, __entry->issued_to)
+           );
+ TRACE_EVENT(netfs_folioq,
+-- 
+2.53.0
+
diff --git a/queue-6.18/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-6.18/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..6709b32
--- /dev/null
@@ -0,0 +1,51 @@
+From 0663ef5332b09d0cdf6b4e679e5f7135d0b4e714 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index ba31b0a1f7f79..77f18de6fdf62 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -1425,6 +1425,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -1439,6 +1440,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+               }
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.18/selftests-mount_setattr-increase-tmpfs-size-for-idma.patch b/queue-6.18/selftests-mount_setattr-increase-tmpfs-size-for-idma.patch
new file mode 100644 (file)
index 0000000..02ea4dc
--- /dev/null
@@ -0,0 +1,45 @@
+From abb6bd55cdef04a8a02ff28e3328b9e7086a4971 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 16:59:45 +0100
+Subject: selftests/mount_setattr: increase tmpfs size for idmapped mount tests
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit c465f5591aa84a6f85d66d152e28b92844a45d4f ]
+
+The mount_setattr_idmapped fixture mounts a 2 MB tmpfs at /mnt and then
+creates a 2 GB sparse ext4 image at /mnt/C/ext4.img. While ftruncate()
+succeeds (sparse file), mkfs.ext4 needs to write actual metadata blocks
+(inode tables, journal, bitmaps) which easily exceeds the 2 MB tmpfs
+limit, causing ENOSPC and failing the fixture setup for all
+mount_setattr_idmapped tests.
+
+This was introduced by commit d37d4720c3e7 ("selftests/mount_settattr:
+ensure that ext4 filesystem can be created") which increased the image
+size from 2 MB to 2 GB but didn't adjust the tmpfs size.
+
+Bump the tmpfs size to 256 MB which is sufficient for the ext4 metadata.
+
+Fixes: d37d4720c3e7 ("selftests/mount_settattr: ensure that ext4 filesystem can be created")
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mount_setattr/mount_setattr_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+index a688871a98eba..388bca88ec94f 100644
+--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+@@ -1020,7 +1020,7 @@ FIXTURE_SETUP(mount_setattr_idmapped)
+                       "size=100000,mode=700"), 0);
+       ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
+-                      "size=2m,mode=700"), 0);
++                      "size=256m,mode=700"), 0);
+       ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
+-- 
+2.53.0
+
index e79046a33a34820de2c0f597d736d396fc5d5eeb..cc6f6f69b284a28a434a7007942f4aa8b5826a3b 100644 (file)
@@ -280,3 +280,29 @@ mm-damon-sysfs-fix-param_ctx-leak-on-damon_sysfs_new_test_ctx-failure.patch
 mm-huge_memory-fix-folio-isn-t-locked-in-softleaf_to_folio.patch
 ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch
 mm-mseal-update-vma-end-correctly-on-merge.patch
+dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch
+dmaengine-idxd-fix-possible-invalid-memory-access-af.patch
+dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
+dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
+dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
+dmaengine-idxd-fix-leaking-event-log-memory.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
+dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
+netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch
+netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch
+dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
+selftests-mount_setattr-increase-tmpfs-size-for-idma.patch
+netfs-fix-read-abandonment-during-retry.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+xen-privcmd-unregister-xenstore-notifier-on-module-e.patch
+netfs-fix-the-handling-of-stream-front-by-removing-i.patch
+irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch
+futex-require-sys_futex_requeue-to-have-identical-fl.patch
+futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch
diff --git a/queue-6.18/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch b/queue-6.18/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch
new file mode 100644 (file)
index 0000000..9dde9db
--- /dev/null
@@ -0,0 +1,47 @@
+From d5a072d1470037f878f8e0b8863ad5c82666e85f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 20:02:46 +0800
+Subject: xen/privcmd: unregister xenstore notifier on module exit
+
+From: GuoHan Zhao <zhaoguohan@kylinos.cn>
+
+[ Upstream commit cd7e1fef5a1ca1c4fcd232211962ac2395601636 ]
+
+Commit 453b8fb68f36 ("xen/privcmd: restrict usage in
+unprivileged domU") added a xenstore notifier to defer setting the
+restriction target until Xenstore is ready.
+
+XEN_PRIVCMD can be built as a module, but privcmd_exit() leaves that
+notifier behind. Balance the notifier lifecycle by unregistering it on
+module exit.
+
+This is harmless even if xenstore was already ready at registration
+time and the notifier was never queued on the chain.
+
+Fixes: 453b8fb68f3641fe ("xen/privcmd: restrict usage in unprivileged domU")
+Signed-off-by: GuoHan Zhao <zhaoguohan@kylinos.cn>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260325120246.252899-1-zhaoguohan@kylinos.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/privcmd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index b8a546fe7c1e2..cbc62f0df11b7 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -1764,6 +1764,9 @@ static int __init privcmd_init(void)
+ static void __exit privcmd_exit(void)
+ {
++      if (!xen_initial_domain())
++              unregister_xenstore_notifier(&xenstore_notifier);
++
+       privcmd_ioeventfd_exit();
+       privcmd_irqfd_exit();
+       misc_deregister(&privcmd_dev);
+-- 
+2.53.0
+
diff --git a/queue-6.19/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch b/queue-6.19/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
new file mode 100644 (file)
index 0000000..84cea9c
--- /dev/null
@@ -0,0 +1,70 @@
+From 392c9ba69d149c9f5192d0b0de7630db88897dd7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 21:17:04 +0900
+Subject: btrfs: fix leak of kobject name for sub-group space_info
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit a4376d9a5d4c9610e69def3fc0b32c86a7ab7a41 ]
+
+When create_space_info_sub_group() allocates elements of
+space_info->sub_group[], kobject_init_and_add() is called for each
+element via btrfs_sysfs_add_space_info_type(). However, when
+check_removing_space_info() frees these elements, it does not call
+btrfs_sysfs_remove_space_info() on them. As a result, kobject_put() is
+not called and the associated kobj->name objects are leaked.
+
+This memory leak is reproduced by running the blktests test case
+zbd/009 on kernels built with CONFIG_DEBUG_KMEMLEAK. The kmemleak
+feature reports the following error:
+
+unreferenced object 0xffff888112877d40 (size 16):
+  comm "mount", pid 1244, jiffies 4294996972
+  hex dump (first 16 bytes):
+    64 61 74 61 2d 72 65 6c 6f 63 00 c4 c6 a7 cb 7f  data-reloc......
+  backtrace (crc 53ffde4d):
+    __kmalloc_node_track_caller_noprof+0x619/0x870
+    kstrdup+0x42/0xc0
+    kobject_set_name_vargs+0x44/0x110
+    kobject_init_and_add+0xcf/0x150
+    btrfs_sysfs_add_space_info_type+0xfc/0x210 [btrfs]
+    create_space_info_sub_group.constprop.0+0xfb/0x1b0 [btrfs]
+    create_space_info+0x211/0x320 [btrfs]
+    btrfs_init_space_info+0x15a/0x1b0 [btrfs]
+    open_ctree+0x33c7/0x4a50 [btrfs]
+    btrfs_get_tree.cold+0x9f/0x1ee [btrfs]
+    vfs_get_tree+0x87/0x2f0
+    vfs_cmd_create+0xbd/0x280
+    __do_sys_fsconfig+0x3df/0x990
+    do_syscall_64+0x136/0x1540
+    entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+To avoid the leak, call btrfs_sysfs_remove_space_info() instead of
+kfree() for the elements.
+
+Fixes: f92ee31e031c ("btrfs: introduce btrfs_space_info sub-group")
+Link: https://lore.kernel.org/linux-block/b9488881-f18d-4f47-91a5-3c9bf63955a5@wdc.com/
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 25a0d207f10c9..4b73ccefcbcba 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -4466,7 +4466,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info)
+               for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+                       if (space_info->sub_group[i]) {
+                               check_removing_space_info(space_info->sub_group[i]);
+-                              kfree(space_info->sub_group[i]);
++                              btrfs_sysfs_remove_space_info(space_info->sub_group[i]);
+                               space_info->sub_group[i] = NULL;
+                       }
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.19/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-6.19/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..bbbf888
--- /dev/null
@@ -0,0 +1,48 @@
+From 68a86abd1b6d9729d2342898e60413bd10baa86f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index fbf23d20cce01..052b830a0b66e 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7874,8 +7874,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-6.19/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-6.19/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..60a71fb
--- /dev/null
@@ -0,0 +1,46 @@
+From d04910ecc0997a10832852244186cfa7059f002b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 6d2dcd023cc6f..8df7eb7f01e90 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2503,8 +2503,8 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.19/bug-avoid-format-attribute-warning-for-clang-as-well.patch b/queue-6.19/bug-avoid-format-attribute-warning-for-clang-as-well.patch
new file mode 100644 (file)
index 0000000..2d463c6
--- /dev/null
@@ -0,0 +1,78 @@
+From a77d6976c45cdb2921a4a0aa7b6d9e1f15a2681a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Mar 2026 21:55:16 +0100
+Subject: bug: avoid format attribute warning for clang as well
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 2598ab9d63f41160c7081998857fef409182933d ]
+
+Like gcc, clang-22 now also warns about a function that it incorrectly
+identifies as a printf-style format:
+
+lib/bug.c:190:22: error: diagnostic behavior may be improved by adding the 'format(printf, 1, 0)' attribute to the declaration of '__warn_printf' [-Werror,-Wmissing-format-attribute]
+  179 | static void __warn_printf(const char *fmt, struct pt_regs *regs)
+      | __attribute__((format(printf, 1, 0)))
+  180 | {
+  181 |         if (!fmt)
+  182 |                 return;
+  183 |
+  184 | #ifdef HAVE_ARCH_BUG_FORMAT_ARGS
+  185 |         if (regs) {
+  186 |                 struct arch_va_list _args;
+  187 |                 va_list *args = __warn_args(&_args, regs);
+  188 |
+  189 |                 if (args) {
+  190 |                         vprintk(fmt, *args);
+      |                                           ^
+
+Revert the change that added a gcc-specific workaround, and instead add
+the generic annotation that avoid the warning.
+
+Link: https://lkml.kernel.org/r/20260323205534.1284284-1-arnd@kernel.org
+Fixes: d36067d6ea00 ("bug: Hush suggest-attribute=format for __warn_printf()")
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Suggested-by: Brendan Jackman <jackmanb@google.com>
+Link: https://lore.kernel.org/all/20251208141618.2805983-1-andriy.shevchenko@linux.intel.com/T/#u
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Brendan Jackman <jackmanb@google.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Justin Stitt <justinstitt@google.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/bug.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/lib/bug.c b/lib/bug.c
+index 623c467a8b76c..aab9e6a40c5f9 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -173,10 +173,8 @@ struct bug_entry *find_bug(unsigned long bugaddr)
+       return module_find_bug(bugaddr);
+ }
+-__diag_push();
+-__diag_ignore(GCC, all, "-Wsuggest-attribute=format",
+-            "Not a valid __printf() conversion candidate.");
+-static void __warn_printf(const char *fmt, struct pt_regs *regs)
++static __printf(1, 0)
++void __warn_printf(const char *fmt, struct pt_regs *regs)
+ {
+       if (!fmt)
+               return;
+@@ -195,7 +193,6 @@ static void __warn_printf(const char *fmt, struct pt_regs *regs)
+       printk("%s", fmt);
+ }
+-__diag_pop();
+ static enum bug_trap_type __report_bug(struct bug_entry *bug, unsigned long bugaddr, struct pt_regs *regs)
+ {
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch b/queue-6.19/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
new file mode 100644 (file)
index 0000000..3f3a29f
--- /dev/null
@@ -0,0 +1,70 @@
+From ca0300dc2c8e4e2f2b9808844045430eee7e258f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 14:45:09 +0800
+Subject: dmaengine: dw-edma: Fix multiple times setting of the CYCLE_STATE and
+ CYCLE_BIT bits for HDMA.
+
+From: LUO Haowen <luo-hw@foxmail.com>
+
+[ Upstream commit 3f63297ff61a994b99d710dcb6dbde41c4003233 ]
+
+Others have submitted this issue (https://lore.kernel.org/dmaengine/
+20240722030405.3385-1-zhengdongxiong@gxmicro.cn/),
+but it has not been fixed yet. Therefore, more supplementary information
+is provided here.
+
+As mentioned in the "PCS-CCS-CB-TCB" Producer-Consumer Synchronization of
+"DesignWare Cores PCI Express Controller Databook, version 6.00a":
+
+1. The Consumer CYCLE_STATE (CCS) bit in the register only needs to be
+initialized once; the value will update automatically to be
+~CYCLE_BIT (CB) in the next chunk.
+2. The Consumer CYCLE_BIT bit in the register is loaded from the LL
+element and tested against CCS. When CB = CCS, the data transfer is
+executed. Otherwise not.
+
+The current logic sets customer (HDMA) CS and CB bits to 1 in each chunk
+while setting the producer (software) CB of odd chunks to 0 and even
+chunks to 1 in the linked list. This is leading to a mismatch between
+the producer CB and consumer CS bits.
+
+This issue can be reproduced by setting the transmission data size to
+exceed one chunk. By the way, in the EDMA using the same "PCS-CCS-CB-TCB"
+mechanism, the CS bit is only initialized once and this issue was not
+found. Refer to
+drivers/dma/dw-edma/dw-edma-v0-core.c:dw_edma_v0_core_start.
+
+So fix this issue by initializing the CYCLE_STATE and CYCLE_BIT bits
+only once.
+
+Fixes: e74c39573d35 ("dmaengine: dw-edma: Add support for native HDMA")
+Signed-off-by: LUO Haowen <luo-hw@foxmail.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/tencent_CB11AA9F3920C1911AF7477A9BD8EFE0AD05@qq.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/dw-edma/dw-hdma-v0-core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+index e3f8db4fe909a..ce8f7254bab21 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+@@ -252,10 +252,10 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
++              /* Set consumer cycle */
++              SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
++                      HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       }
+-      /* Set consumer cycle */
+-      SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
+-                HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       dw_hdma_v0_sync_ll_data(chunk);
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch b/queue-6.19/dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch
new file mode 100644 (file)
index 0000000..fbd3aa5
--- /dev/null
@@ -0,0 +1,57 @@
+From 37714f05c617a45f7e37f8779f8971afbab769c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:28 -0800
+Subject: dmaengine: idxd: Fix crash when the event log is disabled
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 52d2edea0d63c935e82631e4b9e4a94eccf97b5b ]
+
+If reporting errors to the event log is not supported by the hardware,
+and an error that causes Function Level Reset (FLR) is received, the
+driver will try to restore the event log even if it was not allocated.
+
+Also, only try to free the event log if it was properly allocated.
+
+Fixes: 6078a315aec1 ("dmaengine: idxd: Add idxd_device_config_save() and idxd_device_config_restore() helpers")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-2-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +++
+ drivers/dma/idxd/init.c   | 3 ++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index c2cdf41b6e576..f9e49c5545f65 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -830,6 +830,9 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+       struct device *dev = &idxd->pdev->dev;
+       struct idxd_evl *evl = idxd->evl;
++      if (!evl)
++              return;
++
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       if (!gencfg.evl_en)
+               return;
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 2acc34b3daff8..449424242631d 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -962,7 +962,8 @@ static void idxd_device_config_restore(struct idxd_device *idxd,
+       idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+-      idxd->evl->size = saved_evl->size;
++      if (idxd->evl)
++              idxd->evl->size = saved_evl->size;
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *saved_group, *group;
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch b/queue-6.19/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
new file mode 100644 (file)
index 0000000..1de8eeb
--- /dev/null
@@ -0,0 +1,60 @@
+From 870aefddf79a4588aa33edb69df66648d419bb7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:35 -0800
+Subject: dmaengine: idxd: Fix freeing the allocated ida too late
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c311f5e9248471a950f0a524c2fd736414d98900 ]
+
+It can happen that when the cdev .release() is called, the driver
+already called ida_destroy(). Move ida_free() to the _del() path.
+
+We see with DEBUG_KOBJECT_RELEASE enabled and forcing an early PCI
+unbind.
+
+Fixes: 04922b7445a1 ("dmaengine: idxd: fix cdev setup and free device lifetime issues")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-9-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 7e4715f927732..4105688cf3f06 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -158,11 +158,7 @@ static const struct device_type idxd_cdev_file_type = {
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+       struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
+-      struct idxd_cdev_context *cdev_ctx;
+-      struct idxd_wq *wq = idxd_cdev->wq;
+-      cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -582,11 +578,15 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
++      struct idxd_cdev_context *cdev_ctx;
+       struct idxd_cdev *idxd_cdev;
+       idxd_cdev = wq->idxd_cdev;
+       wq->idxd_cdev = NULL;
+       cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
++
++      cdev_ctx = &ictx[wq->idxd->data->type];
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       put_device(cdev_dev(idxd_cdev));
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-leaking-event-log-memory.patch b/queue-6.19/dmaengine-idxd-fix-leaking-event-log-memory.patch
new file mode 100644 (file)
index 0000000..aecda9a
--- /dev/null
@@ -0,0 +1,46 @@
+From c5174bc3390d752b9351b4106673f446a4ee2cc1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:36 -0800
+Subject: dmaengine: idxd: Fix leaking event log memory
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit ee66bc29578391c9b48523dc9119af67bd5c7c0f ]
+
+During the device remove process, the device is reset, causing the
+configuration registers to go back to their default state, which is
+zero. As the driver is checking if the event log support was enabled
+before deallocating, it will fail if a reset happened before.
+
+Do not check if the support was enabled, the check for 'idxd->evl'
+being valid (only allocated if the HW capability is available) is
+enough.
+
+Fixes: 244da66cda35 ("dmaengine: idxd: setup event log configuration")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-10-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index a7ecc17442354..4013f970cb3b2 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -833,10 +833,6 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+       if (!evl)
+               return;
+-      gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+-      if (!gencfg.evl_en)
+-              return;
+-
+       mutex_lock(&evl->lock);
+       gencfg.evl_en = 0;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch b/queue-6.19/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
new file mode 100644 (file)
index 0000000..6ebd760
--- /dev/null
@@ -0,0 +1,56 @@
+From f24e3665ab00755b7a5bae42cb1de811f7b1d362 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:34 -0800
+Subject: dmaengine: idxd: Fix memory leak when a wq is reset
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d9cfb5193a047a92a4d3c0e91ea4cc87c8f7c478 ]
+
+idxd_wq_disable_cleanup() which is called from the reset path for a
+workqueue, sets the wq type to NONE, which for other parts of the
+driver mean that the wq is empty (all its resources were released).
+
+Only set the wq type to NONE after its resources are released.
+
+Fixes: da32b28c95a7 ("dmaengine: idxd: cleanup workqueue config after disabling")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-8-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index f9e49c5545f65..a7ecc17442354 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -175,6 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_queue_free(&wq->sbq);
++      wq->type = IDXD_WQT_NONE;
+ }
+ EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD");
+@@ -382,7 +383,6 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+       lockdep_assert_held(&wq->wq_lock);
+       wq->state = IDXD_WQ_DISABLED;
+       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+-      wq->type = IDXD_WQT_NONE;
+       wq->threshold = 0;
+       wq->priority = 0;
+       wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+@@ -1531,7 +1531,6 @@ void idxd_drv_disable_wq(struct idxd_wq *wq)
+       idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
+       percpu_ref_exit(&wq->wq_active);
+-      wq->type = IDXD_WQT_NONE;
+       wq->client_count = 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD");
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch b/queue-6.19/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
new file mode 100644 (file)
index 0000000..030dae4
--- /dev/null
@@ -0,0 +1,37 @@
+From 939e0263809c5499d43dfe8f95a2d790e711f643 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:33 -0800
+Subject: dmaengine: idxd: Fix not releasing workqueue on .release()
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 3d33de353b1ff9023d5ec73b9becf80ea87af695 ]
+
+The workqueue associated with an DSA/IAA device is not released when
+the object is freed.
+
+Fixes: 47c16ac27d4c ("dmaengine: idxd: fix idxd conf_dev 'struct device' lifetime")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-7-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 9f0701021af0e..cdd7a59140d90 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1812,6 +1812,7 @@ static void idxd_conf_device_release(struct device *dev)
+ {
+       struct idxd_device *idxd = confdev_to_idxd(dev);
++      destroy_workqueue(idxd->wq);
+       kfree(idxd->groups);
+       bitmap_free(idxd->wq_enable_map);
+       kfree(idxd->wqs);
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-possible-invalid-memory-access-af.patch b/queue-6.19/dmaengine-idxd-fix-possible-invalid-memory-access-af.patch
new file mode 100644 (file)
index 0000000..c349ff3
--- /dev/null
@@ -0,0 +1,41 @@
+From 5cea64d34f00870a768555c760aa4218c6b9ea8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:29 -0800
+Subject: dmaengine: idxd: Fix possible invalid memory access after FLR
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d6077df7b75d26e4edf98983836c05d00ebabd8d ]
+
+In the case that the first Function Level Reset (FLR) concludes
+correctly, but in the second FLR the scratch area for the saved
+configuration cannot be allocated, it's possible for a invalid memory
+access to happen.
+
+Always set the deallocated scratch area to NULL after FLR completes.
+
+Fixes: 98d187a98903 ("dmaengine: idxd: Enable Function Level Reset (FLR) for halt")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-3-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 449424242631d..f2b37c63a964c 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -1137,6 +1137,7 @@ static void idxd_reset_done(struct pci_dev *pdev)
+       }
+ out:
+       kfree(idxd->idxd_saved);
++      idxd->idxd_saved = NULL;
+ }
+ static const struct pci_error_handlers idxd_error_handler = {
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch b/queue-6.19/dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch
new file mode 100644 (file)
index 0000000..81371e6
--- /dev/null
@@ -0,0 +1,43 @@
+From 907400468a3ef848ad0df6aed4c0a73391d88ac4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 11:24:28 +0800
+Subject: dmaengine: idxd: fix possible wrong descriptor completion in
+ llist_abort_desc()
+
+From: Tuo Li <islituo@gmail.com>
+
+[ Upstream commit e1c9866173c5f8521f2d0768547a01508cb9ff27 ]
+
+At the end of this function, d is the traversal cursor of flist, but the
+code completes found instead. This can lead to issues such as NULL pointer
+dereferences, double completion, or descriptor leaks.
+
+Fix this by completing d instead of found in the final
+list_for_each_entry_safe() loop.
+
+Fixes: aa8d18becc0c ("dmaengine: idxd: add callback support for iaa crypto")
+Signed-off-by: Tuo Li <islituo@gmail.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://patch.msgid.link/20260106032428.162445-1-islituo@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/submit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
+index 6db1c5fcedc58..03217041b8b3e 100644
+--- a/drivers/dma/idxd/submit.c
++++ b/drivers/dma/idxd/submit.c
+@@ -138,7 +138,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+        */
+       list_for_each_entry_safe(d, t, &flist, list) {
+               list_del_init(&d->list);
+-              idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true,
++              idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true,
+                                     NULL, NULL);
+       }
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch b/queue-6.19/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
new file mode 100644 (file)
index 0000000..ea33987
--- /dev/null
@@ -0,0 +1,41 @@
+From 3a5b8584cbc2617b77b5ac796751ac283ac8eb4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 08:13:08 +0200
+Subject: dmaengine: xilinx: xdma: Fix regmap init error handling
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit e0adbf74e2a0455a6bc9628726ba87bcd0b42bf8 ]
+
+devm_regmap_init_mmio returns an ERR_PTR() upon error, not NULL.
+Fix the error check and also fix the error message. Use the error code
+from ERR_PTR() instead of the wrong value in ret.
+
+Fixes: 17ce252266c7 ("dmaengine: xilinx: xdma: Add xilinx xdma driver")
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20251014061309.283468-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xdma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
+index 5ecf8223c112e..58e01e22b9765 100644
+--- a/drivers/dma/xilinx/xdma.c
++++ b/drivers/dma/xilinx/xdma.c
+@@ -1236,8 +1236,8 @@ static int xdma_probe(struct platform_device *pdev)
+       xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+                                          &xdma_regmap_config);
+-      if (!xdev->rmap) {
+-              xdma_err(xdev, "config regmap failed: %d", ret);
++      if (IS_ERR(xdev->rmap)) {
++              xdma_err(xdev, "config regmap failed: %pe", xdev->rmap);
+               goto failed;
+       }
+       INIT_LIST_HEAD(&xdev->dma_dev.channels);
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-6.19/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..fe5aa33
--- /dev/null
@@ -0,0 +1,38 @@
+From 1c558696e18fd8a4ff07b59b4316c73568d77407 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 89a8254d9cdc6..e6d10079ec670 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -3003,7 +3003,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = of_irq_get(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-6.19/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..6f12e59
--- /dev/null
@@ -0,0 +1,75 @@
+From 81799f7d6d9dcd5a06fcf2f75ef0a179f7d69f65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index e6d10079ec670..ccfcc2b801f82 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1546,8 +1546,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-6.19/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..1465ff7
--- /dev/null
@@ -0,0 +1,62 @@
+From 24230474994f543714dfe5c9894cc54d8a576b12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index ccfcc2b801f82..7b24d0a18ea53 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -997,16 +997,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -1014,8 +1014,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.19/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-6.19/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..e398e74
--- /dev/null
@@ -0,0 +1,98 @@
+From 597f83f0a361ffcda9d9e0f76d3eca3998319bca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 7b24d0a18ea53..7dec5e6babe14 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1217,14 +1217,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1594,6 +1586,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-6.19/ext4-fix-mballoc-test.c-is-not-compiled-when-ext4_ku.patch b/queue-6.19/ext4-fix-mballoc-test.c-is-not-compiled-when-ext4_ku.patch
new file mode 100644 (file)
index 0000000..da2b4dd
--- /dev/null
@@ -0,0 +1,509 @@
+From 62f9ec7f9d3bc8e16bc2d7fe615d0c95307004b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Mar 2026 15:52:57 +0800
+Subject: ext4: fix mballoc-test.c is not compiled when EXT4_KUNIT_TESTS=M
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit 519b76ac0b31d86b45784735d4ef964e8efdc56b ]
+
+Now, only EXT4_KUNIT_TESTS=Y testcase will be compiled in 'mballoc.c'.
+To solve this issue, the ext4 test code needs to be decoupled. The ext4
+test module is compiled into a separate module.
+
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Closes: https://patchwork.kernel.org/project/cifs-client/patch/20260118091313.1988168-2-chenxiaosong.chenxiaosong@linux.dev/
+Fixes: 7c9fa399a369 ("ext4: add first unit test for ext4_mb_new_blocks_simple in mballoc")
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260314075258.1317579-3-yebin@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/Makefile       |   4 +-
+ fs/ext4/mballoc-test.c |  81 ++++++++++++++++----------------
+ fs/ext4/mballoc.c      | 102 +++++++++++++++++++++++++++++++++++++++--
+ fs/ext4/mballoc.h      |  30 ++++++++++++
+ 4 files changed, 172 insertions(+), 45 deletions(-)
+
+diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
+index 72206a2926765..d836c3fe311b5 100644
+--- a/fs/ext4/Makefile
++++ b/fs/ext4/Makefile
+@@ -14,7 +14,7 @@ ext4-y       := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
+ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)      += acl.o
+ ext4-$(CONFIG_EXT4_FS_SECURITY)               += xattr_security.o
+-ext4-inode-test-objs                  += inode-test.o
+-obj-$(CONFIG_EXT4_KUNIT_TESTS)                += ext4-inode-test.o
++ext4-test-objs                                += inode-test.o mballoc-test.o
++obj-$(CONFIG_EXT4_KUNIT_TESTS)                += ext4-test.o
+ ext4-$(CONFIG_FS_VERITY)              += verity.o
+ ext4-$(CONFIG_FS_ENCRYPTION)          += crypto.o
+diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c
+index 4abb40d4561ce..749ed2fc22415 100644
+--- a/fs/ext4/mballoc-test.c
++++ b/fs/ext4/mballoc-test.c
+@@ -8,6 +8,7 @@
+ #include <linux/random.h>
+ #include "ext4.h"
++#include "mballoc.h"
+ struct mbt_grp_ctx {
+       struct buffer_head bitmap_bh;
+@@ -337,7 +338,7 @@ ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
+       if (state)
+               mb_set_bits(bitmap_bh->b_data, blkoff, len);
+       else
+-              mb_clear_bits(bitmap_bh->b_data, blkoff, len);
++              mb_clear_bits_test(bitmap_bh->b_data, blkoff, len);
+       return 0;
+ }
+@@ -414,14 +415,14 @@ static void test_new_blocks_simple(struct kunit *test)
+       /* get block at goal */
+       ar.goal = ext4_group_first_block_no(sb, goal_group);
+-      found = ext4_mb_new_blocks_simple(&ar, &err);
++      found = ext4_mb_new_blocks_simple_test(&ar, &err);
+       KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
+               "failed to alloc block at goal, expected %llu found %llu",
+               ar.goal, found);
+       /* get block after goal in goal group */
+       ar.goal = ext4_group_first_block_no(sb, goal_group);
+-      found = ext4_mb_new_blocks_simple(&ar, &err);
++      found = ext4_mb_new_blocks_simple_test(&ar, &err);
+       KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
+               "failed to alloc block after goal in goal group, expected %llu found %llu",
+               ar.goal + 1, found);
+@@ -429,7 +430,7 @@ static void test_new_blocks_simple(struct kunit *test)
+       /* get block after goal group */
+       mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+       ar.goal = ext4_group_first_block_no(sb, goal_group);
+-      found = ext4_mb_new_blocks_simple(&ar, &err);
++      found = ext4_mb_new_blocks_simple_test(&ar, &err);
+       KUNIT_ASSERT_EQ_MSG(test,
+               ext4_group_first_block_no(sb, goal_group + 1), found,
+               "failed to alloc block after goal group, expected %llu found %llu",
+@@ -439,7 +440,7 @@ static void test_new_blocks_simple(struct kunit *test)
+       for (i = goal_group; i < ext4_get_groups_count(sb); i++)
+               mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+       ar.goal = ext4_group_first_block_no(sb, goal_group);
+-      found = ext4_mb_new_blocks_simple(&ar, &err);
++      found = ext4_mb_new_blocks_simple_test(&ar, &err);
+       KUNIT_ASSERT_EQ_MSG(test,
+               ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
+               "failed to alloc block before goal group, expected %llu found %llu",
+@@ -449,7 +450,7 @@ static void test_new_blocks_simple(struct kunit *test)
+       for (i = 0; i < ext4_get_groups_count(sb); i++)
+               mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+       ar.goal = ext4_group_first_block_no(sb, goal_group);
+-      found = ext4_mb_new_blocks_simple(&ar, &err);
++      found = ext4_mb_new_blocks_simple_test(&ar, &err);
+       KUNIT_ASSERT_NE_MSG(test, err, 0,
+               "unexpectedly get block when no block is available");
+ }
+@@ -493,16 +494,16 @@ validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
+                       continue;
+               bitmap = mbt_ctx_bitmap(sb, i);
+-              bit = mb_find_next_zero_bit(bitmap, max, 0);
++              bit = mb_find_next_zero_bit_test(bitmap, max, 0);
+               KUNIT_ASSERT_EQ_MSG(test, bit, max,
+                                   "free block on unexpected group %d", i);
+       }
+       bitmap = mbt_ctx_bitmap(sb, goal_group);
+-      bit = mb_find_next_zero_bit(bitmap, max, 0);
++      bit = mb_find_next_zero_bit_test(bitmap, max, 0);
+       KUNIT_ASSERT_EQ(test, bit, start);
+-      bit = mb_find_next_bit(bitmap, max, bit + 1);
++      bit = mb_find_next_bit_test(bitmap, max, bit + 1);
+       KUNIT_ASSERT_EQ(test, bit, start + len);
+ }
+@@ -525,7 +526,7 @@ test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
+       block = ext4_group_first_block_no(sb, goal_group) +
+               EXT4_C2B(sbi, start);
+-      ext4_free_blocks_simple(inode, block, len);
++      ext4_free_blocks_simple_test(inode, block, len);
+       validate_free_blocks_simple(test, sb, goal_group, start, len);
+       mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+ }
+@@ -567,15 +568,15 @@ test_mark_diskspace_used_range(struct kunit *test,
+       bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
+       memset(bitmap, 0, sb->s_blocksize);
+-      ret = ext4_mb_mark_diskspace_used(ac, NULL);
++      ret = ext4_mb_mark_diskspace_used_test(ac, NULL);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       max = EXT4_CLUSTERS_PER_GROUP(sb);
+-      i = mb_find_next_bit(bitmap, max, 0);
++      i = mb_find_next_bit_test(bitmap, max, 0);
+       KUNIT_ASSERT_EQ(test, i, start);
+-      i = mb_find_next_zero_bit(bitmap, max, i + 1);
++      i = mb_find_next_zero_bit_test(bitmap, max, i + 1);
+       KUNIT_ASSERT_EQ(test, i, start + len);
+-      i = mb_find_next_bit(bitmap, max, i + 1);
++      i = mb_find_next_bit_test(bitmap, max, i + 1);
+       KUNIT_ASSERT_EQ(test, max, i);
+ }
+@@ -618,54 +619,54 @@ static void mbt_generate_buddy(struct super_block *sb, void *buddy,
+       max = EXT4_CLUSTERS_PER_GROUP(sb);
+       bb_h = buddy + sbi->s_mb_offsets[1];
+-      off = mb_find_next_zero_bit(bb, max, 0);
++      off = mb_find_next_zero_bit_test(bb, max, 0);
+       grp->bb_first_free = off;
+       while (off < max) {
+               grp->bb_counters[0]++;
+               grp->bb_free++;
+-              if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
++              if (!(off & 1) && !mb_test_bit_test(off + 1, bb)) {
+                       grp->bb_free++;
+                       grp->bb_counters[0]--;
+-                      mb_clear_bit(off >> 1, bb_h);
++                      mb_clear_bit_test(off >> 1, bb_h);
+                       grp->bb_counters[1]++;
+                       grp->bb_largest_free_order = 1;
+                       off++;
+               }
+-              off = mb_find_next_zero_bit(bb, max, off + 1);
++              off = mb_find_next_zero_bit_test(bb, max, off + 1);
+       }
+       for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
+               bb = buddy + sbi->s_mb_offsets[order];
+               bb_h = buddy + sbi->s_mb_offsets[order + 1];
+               max = max >> 1;
+-              off = mb_find_next_zero_bit(bb, max, 0);
++              off = mb_find_next_zero_bit_test(bb, max, 0);
+               while (off < max) {
+-                      if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
++                      if (!(off & 1) && !mb_test_bit_test(off + 1, bb)) {
+                               mb_set_bits(bb, off, 2);
+                               grp->bb_counters[order] -= 2;
+-                              mb_clear_bit(off >> 1, bb_h);
++                              mb_clear_bit_test(off >> 1, bb_h);
+                               grp->bb_counters[order + 1]++;
+                               grp->bb_largest_free_order = order + 1;
+                               off++;
+                       }
+-                      off = mb_find_next_zero_bit(bb, max, off + 1);
++                      off = mb_find_next_zero_bit_test(bb, max, off + 1);
+               }
+       }
+       max = EXT4_CLUSTERS_PER_GROUP(sb);
+-      off = mb_find_next_zero_bit(bitmap, max, 0);
++      off = mb_find_next_zero_bit_test(bitmap, max, 0);
+       while (off < max) {
+               grp->bb_fragments++;
+-              off = mb_find_next_bit(bitmap, max, off + 1);
++              off = mb_find_next_bit_test(bitmap, max, off + 1);
+               if (off + 1 >= max)
+                       break;
+-              off = mb_find_next_zero_bit(bitmap, max, off + 1);
++              off = mb_find_next_zero_bit_test(bitmap, max, off + 1);
+       }
+ }
+@@ -707,7 +708,7 @@ do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
+       /* needed by validation in ext4_mb_generate_buddy */
+       ext4_grp->bb_free = mbt_grp->bb_free;
+       memset(ext4_buddy, 0xff, sb->s_blocksize);
+-      ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
++      ext4_mb_generate_buddy_test(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
+                              ext4_grp);
+       KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
+@@ -761,7 +762,7 @@ test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
+       ex.fe_group = TEST_GOAL_GROUP;
+       ext4_lock_group(sb, TEST_GOAL_GROUP);
+-      mb_mark_used(e4b, &ex);
++      mb_mark_used_test(e4b, &ex);
+       ext4_unlock_group(sb, TEST_GOAL_GROUP);
+       mb_set_bits(bitmap, start, len);
+@@ -770,7 +771,7 @@ test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
+       memset(buddy, 0xff, sb->s_blocksize);
+       for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+               grp->bb_counters[i] = 0;
+-      ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
++      ext4_mb_generate_buddy_test(sb, buddy, bitmap, 0, grp);
+       KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
+                       0);
+@@ -799,7 +800,7 @@ static void test_mb_mark_used(struct kunit *test)
+                               bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
+-      ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
++      ret = ext4_mb_load_buddy_test(sb, TEST_GOAL_GROUP, &e4b);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
+@@ -810,7 +811,7 @@ static void test_mb_mark_used(struct kunit *test)
+               test_mb_mark_used_range(test, &e4b, ranges[i].start,
+                                       ranges[i].len, bitmap, buddy, grp);
+-      ext4_mb_unload_buddy(&e4b);
++      ext4_mb_unload_buddy_test(&e4b);
+ }
+ static void
+@@ -826,16 +827,16 @@ test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
+               return;
+       ext4_lock_group(sb, e4b->bd_group);
+-      mb_free_blocks(NULL, e4b, start, len);
++      mb_free_blocks_test(NULL, e4b, start, len);
+       ext4_unlock_group(sb, e4b->bd_group);
+-      mb_clear_bits(bitmap, start, len);
++      mb_clear_bits_test(bitmap, start, len);
+       /* bypass bb_free validatoin in ext4_mb_generate_buddy */
+       grp->bb_free += len;
+       memset(buddy, 0xff, sb->s_blocksize);
+       for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+               grp->bb_counters[i] = 0;
+-      ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
++      ext4_mb_generate_buddy_test(sb, buddy, bitmap, 0, grp);
+       KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
+                       0);
+@@ -866,7 +867,7 @@ static void test_mb_free_blocks(struct kunit *test)
+                               bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
+-      ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
++      ret = ext4_mb_load_buddy_test(sb, TEST_GOAL_GROUP, &e4b);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       ex.fe_start = 0;
+@@ -874,7 +875,7 @@ static void test_mb_free_blocks(struct kunit *test)
+       ex.fe_group = TEST_GOAL_GROUP;
+       ext4_lock_group(sb, TEST_GOAL_GROUP);
+-      mb_mark_used(&e4b, &ex);
++      mb_mark_used_test(&e4b, &ex);
+       ext4_unlock_group(sb, TEST_GOAL_GROUP);
+       grp->bb_free = 0;
+@@ -887,7 +888,7 @@ static void test_mb_free_blocks(struct kunit *test)
+               test_mb_free_blocks_range(test, &e4b, ranges[i].start,
+                                         ranges[i].len, bitmap, buddy, grp);
+-      ext4_mb_unload_buddy(&e4b);
++      ext4_mb_unload_buddy_test(&e4b);
+ }
+ #define COUNT_FOR_ESTIMATE 100000
+@@ -905,7 +906,7 @@ static void test_mb_mark_used_cost(struct kunit *test)
+       if (sb->s_blocksize > PAGE_SIZE)
+               kunit_skip(test, "blocksize exceeds pagesize");
+-      ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
++      ret = ext4_mb_load_buddy_test(sb, TEST_GOAL_GROUP, &e4b);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       ex.fe_group = TEST_GOAL_GROUP;
+@@ -919,7 +920,7 @@ static void test_mb_mark_used_cost(struct kunit *test)
+                       ex.fe_start = ranges[i].start;
+                       ex.fe_len = ranges[i].len;
+                       ext4_lock_group(sb, TEST_GOAL_GROUP);
+-                      mb_mark_used(&e4b, &ex);
++                      mb_mark_used_test(&e4b, &ex);
+                       ext4_unlock_group(sb, TEST_GOAL_GROUP);
+               }
+               end = jiffies;
+@@ -930,14 +931,14 @@ static void test_mb_mark_used_cost(struct kunit *test)
+                               continue;
+                       ext4_lock_group(sb, TEST_GOAL_GROUP);
+-                      mb_free_blocks(NULL, &e4b, ranges[i].start,
++                      mb_free_blocks_test(NULL, &e4b, ranges[i].start,
+                                      ranges[i].len);
+                       ext4_unlock_group(sb, TEST_GOAL_GROUP);
+               }
+       }
+       kunit_info(test, "costed jiffies %lu\n", all);
+-      ext4_mb_unload_buddy(&e4b);
++      ext4_mb_unload_buddy_test(&e4b);
+ }
+ static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9e01195a73488..88dcf218f456a 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4086,7 +4086,7 @@ void ext4_exit_mballoc(void)
+ #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
+ #define EXT4_MB_SYNC_UPDATE 0x0002
+-static int
++int
+ ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
+                    ext4_group_t group, ext4_grpblk_t blkoff,
+                    ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
+@@ -7191,6 +7191,102 @@ ext4_mballoc_query_range(
+       return error;
+ }
+-#ifdef CONFIG_EXT4_KUNIT_TESTS
+-#include "mballoc-test.c"
++#if IS_ENABLED(CONFIG_EXT4_KUNIT_TESTS)
++void mb_clear_bits_test(void *bm, int cur, int len)
++{
++       mb_clear_bits(bm, cur, len);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_clear_bits_test);
++
++ext4_fsblk_t
++ext4_mb_new_blocks_simple_test(struct ext4_allocation_request *ar,
++                             int *errp)
++{
++      return ext4_mb_new_blocks_simple(ar, errp);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_new_blocks_simple_test);
++
++int mb_find_next_zero_bit_test(void *addr, int max, int start)
++{
++      return mb_find_next_zero_bit(addr, max, start);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_find_next_zero_bit_test);
++
++int mb_find_next_bit_test(void *addr, int max, int start)
++{
++      return mb_find_next_bit(addr, max, start);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_find_next_bit_test);
++
++void mb_clear_bit_test(int bit, void *addr)
++{
++      mb_clear_bit(bit, addr);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_clear_bit_test);
++
++int mb_test_bit_test(int bit, void *addr)
++{
++      return mb_test_bit(bit, addr);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_test_bit_test);
++
++int ext4_mb_mark_diskspace_used_test(struct ext4_allocation_context *ac,
++                                   handle_t *handle)
++{
++      return ext4_mb_mark_diskspace_used(ac, handle);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_mark_diskspace_used_test);
++
++int mb_mark_used_test(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
++{
++      return mb_mark_used(e4b, ex);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_mark_used_test);
++
++void ext4_mb_generate_buddy_test(struct super_block *sb, void *buddy,
++                               void *bitmap, ext4_group_t group,
++                               struct ext4_group_info *grp)
++{
++      ext4_mb_generate_buddy(sb, buddy, bitmap, group, grp);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_generate_buddy_test);
++
++int ext4_mb_load_buddy_test(struct super_block *sb, ext4_group_t group,
++                          struct ext4_buddy *e4b)
++{
++      return ext4_mb_load_buddy(sb, group, e4b);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_load_buddy_test);
++
++void ext4_mb_unload_buddy_test(struct ext4_buddy *e4b)
++{
++      ext4_mb_unload_buddy(e4b);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_unload_buddy_test);
++
++void mb_free_blocks_test(struct inode *inode, struct ext4_buddy *e4b,
++                       int first, int count)
++{
++      mb_free_blocks(inode, e4b, first, count);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_free_blocks_test);
++
++void ext4_free_blocks_simple_test(struct inode *inode, ext4_fsblk_t block,
++                                unsigned long count)
++{
++      return ext4_free_blocks_simple(inode, block, count);
++}
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_free_blocks_simple_test);
++
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_wait_block_bitmap);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_init);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_get_group_desc);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_count_free_clusters);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_get_group_info);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_free_group_clusters_set);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_release);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_read_block_bitmap_nowait);
++EXPORT_SYMBOL_FOR_EXT4_TEST(mb_set_bits);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_fc_init_inode);
++EXPORT_SYMBOL_FOR_EXT4_TEST(ext4_mb_mark_context);
+ #endif
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 15a049f05d04a..39333ce72cbd5 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -270,4 +270,34 @@ ext4_mballoc_query_range(
+       ext4_mballoc_query_range_fn     formatter,
+       void                            *priv);
++extern int ext4_mb_mark_context(handle_t *handle,
++              struct super_block *sb, bool state,
++              ext4_group_t group, ext4_grpblk_t blkoff,
++              ext4_grpblk_t len, int flags,
++              ext4_grpblk_t *ret_changed);
++#if IS_ENABLED(CONFIG_EXT4_KUNIT_TESTS)
++extern void mb_clear_bits_test(void *bm, int cur, int len);
++extern ext4_fsblk_t
++ext4_mb_new_blocks_simple_test(struct ext4_allocation_request *ar,
++                             int *errp);
++extern int mb_find_next_zero_bit_test(void *addr, int max, int start);
++extern int mb_find_next_bit_test(void *addr, int max, int start);
++extern void mb_clear_bit_test(int bit, void *addr);
++extern int mb_test_bit_test(int bit, void *addr);
++extern int
++ext4_mb_mark_diskspace_used_test(struct ext4_allocation_context *ac,
++                               handle_t *handle);
++extern int mb_mark_used_test(struct ext4_buddy *e4b,
++                           struct ext4_free_extent *ex);
++extern void ext4_mb_generate_buddy_test(struct super_block *sb,
++              void *buddy, void *bitmap, ext4_group_t group,
++              struct ext4_group_info *grp);
++extern int ext4_mb_load_buddy_test(struct super_block *sb,
++              ext4_group_t group, struct ext4_buddy *e4b);
++extern void ext4_mb_unload_buddy_test(struct ext4_buddy *e4b);
++extern void mb_free_blocks_test(struct inode *inode,
++              struct ext4_buddy *e4b, int first, int count);
++extern void ext4_free_blocks_simple_test(struct inode *inode,
++              ext4_fsblk_t block, unsigned long count);
++#endif
+ #endif
+-- 
+2.53.0
+
diff --git a/queue-6.19/ext4-introduce-export_symbol_for_ext4_test-helper.patch b/queue-6.19/ext4-introduce-export_symbol_for_ext4_test-helper.patch
new file mode 100644 (file)
index 0000000..d732e10
--- /dev/null
@@ -0,0 +1,40 @@
+From cd921eb047b4723f412697132fd6082165f83a45 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Mar 2026 15:52:56 +0800
+Subject: ext4: introduce EXPORT_SYMBOL_FOR_EXT4_TEST() helper
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit 49504a512587147dd6da3b4b08832ccc157b97dc ]
+
+Introduce EXPORT_SYMBOL_FOR_EXT4_TEST() helper for kuint test.
+
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260314075258.1317579-2-yebin@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 519b76ac0b31 ("ext4: fix mballoc-test.c is not compiled when EXT4_KUNIT_TESTS=M")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index d4a98ff58076f..f1c476303f3a9 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3953,6 +3953,11 @@ static inline bool ext4_inode_can_atomic_write(struct inode *inode)
+ extern int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+                                 loff_t pos, unsigned len,
+                                 get_block_t *get_block);
++
++#if IS_ENABLED(CONFIG_EXT4_KUNIT_TESTS)
++#define EXPORT_SYMBOL_FOR_EXT4_TEST(sym) \
++      EXPORT_SYMBOL_FOR_MODULES(sym, "ext4-test")
++#endif
+ #endif        /* __KERNEL__ */
+ #define EFSBADCRC     EBADMSG         /* Bad CRC detected */
+-- 
+2.53.0
+
diff --git a/queue-6.19/futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch b/queue-6.19/futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch
new file mode 100644 (file)
index 0000000..800e1d6
--- /dev/null
@@ -0,0 +1,100 @@
+From 70ca29181b1656f333a5123f862efc96c00990fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Mar 2026 20:47:56 +0800
+Subject: futex: Fix UaF between futex_key_to_node_opt() and
+ vma_replace_policy()
+
+From: Hao-Yu Yang <naup96721@gmail.com>
+
+[ Upstream commit 190a8c48ff623c3d67cb295b4536a660db2012aa ]
+
+During futex_key_to_node_opt() execution, vma->vm_policy is read under
+speculative mmap lock and RCU. Concurrently, mbind() may call
+vma_replace_policy() which frees the old mempolicy immediately via
+kmem_cache_free().
+
+This creates a race where __futex_key_to_node() dereferences a freed
+mempolicy pointer, causing a use-after-free read of mpol->mode.
+
+[  151.412631] BUG: KASAN: slab-use-after-free in __futex_key_to_node (kernel/futex/core.c:349)
+[  151.414046] Read of size 2 at addr ffff888001c49634 by task e/87
+
+[  151.415969] Call Trace:
+
+[  151.416732]  __asan_load2 (mm/kasan/generic.c:271)
+[  151.416777]  __futex_key_to_node (kernel/futex/core.c:349)
+[  151.416822]  get_futex_key (kernel/futex/core.c:374 kernel/futex/core.c:386 kernel/futex/core.c:593)
+
+Fix by adding rcu to __mpol_put().
+
+Fixes: c042c505210d ("futex: Implement FUTEX2_MPOL")
+Reported-by: Hao-Yu Yang <naup96721@gmail.com>
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Hao-Yu Yang <naup96721@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Link: https://patch.msgid.link/20260324174418.GB1850007@noisy.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mempolicy.h |  1 +
+ kernel/futex/core.c       |  2 +-
+ mm/mempolicy.c            | 10 ++++++++--
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 0fe96f3ab3ef0..65c732d440d2f 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -55,6 +55,7 @@ struct mempolicy {
+               nodemask_t cpuset_mems_allowed; /* relative to these nodes */
+               nodemask_t user_nodemask;       /* nodemask passed by user */
+       } w;
++      struct rcu_head rcu;
+ };
+ /*
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index cf7e610eac429..31e83a09789e0 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -342,7 +342,7 @@ static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr)
+       if (!vma)
+               return FUTEX_NO_NODE;
+-      mpol = vma_policy(vma);
++      mpol = READ_ONCE(vma->vm_policy);
+       if (!mpol)
+               return FUTEX_NO_NODE;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 68a98ba578821..74ebf38a7db1a 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -488,7 +488,13 @@ void __mpol_put(struct mempolicy *pol)
+ {
+       if (!atomic_dec_and_test(&pol->refcnt))
+               return;
+-      kmem_cache_free(policy_cache, pol);
++      /*
++       * Required to allow mmap_lock_speculative*() access, see for example
++       * futex_key_to_node_opt(). All accesses are serialized by mmap_lock,
++       * however the speculative lock section unbound by the normal lock
++       * boundaries, requiring RCU freeing.
++       */
++      kfree_rcu(pol, rcu);
+ }
+ EXPORT_SYMBOL_FOR_MODULES(__mpol_put, "kvm");
+@@ -1021,7 +1027,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
+       }
+       old = vma->vm_policy;
+-      vma->vm_policy = new; /* protected by mmap_lock */
++      WRITE_ONCE(vma->vm_policy, new); /* protected by mmap_lock */
+       mpol_put(old);
+       return 0;
+-- 
+2.53.0
+
diff --git a/queue-6.19/futex-require-sys_futex_requeue-to-have-identical-fl.patch b/queue-6.19/futex-require-sys_futex_requeue-to-have-identical-fl.patch
new file mode 100644 (file)
index 0000000..c92ce5c
--- /dev/null
@@ -0,0 +1,46 @@
+From 910220ee0e08119edddc56197cd67f5ffaf79b4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Mar 2026 13:35:53 +0100
+Subject: futex: Require sys_futex_requeue() to have identical flags
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 19f94b39058681dec64a10ebeb6f23fe7fc3f77a ]
+
+Nicholas reported that his LLM found it was possible to create a UaF
+when sys_futex_requeue() is used with different flags. The initial
+motivation for allowing different flags was the variable sized futex,
+but since that hasn't been merged (yet), simply mandate the flags are
+identical, as is the case for the old style sys_futex() requeue
+operations.
+
+Fixes: 0f4b5f972216 ("futex: Add sys_futex_requeue()")
+Reported-by: Nicholas Carlini <npc@anthropic.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/futex/syscalls.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c
+index 880c9bf2f3150..99723189c8cf7 100644
+--- a/kernel/futex/syscalls.c
++++ b/kernel/futex/syscalls.c
+@@ -459,6 +459,14 @@ SYSCALL_DEFINE4(futex_requeue,
+       if (ret)
+               return ret;
++      /*
++       * For now mandate both flags are identical, like the sys_futex()
++       * interface has. If/when we merge the variable sized futex support,
++       * that patch can modify this test to allow a difference in size.
++       */
++      if (futexes[0].w.flags != futexes[1].w.flags)
++              return -EINVAL;
++
+       cmpval = futexes[0].w.val;
+       return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags,
+-- 
+2.53.0
+
diff --git a/queue-6.19/irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch b/queue-6.19/irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch
new file mode 100644 (file)
index 0000000..569b6f9
--- /dev/null
@@ -0,0 +1,39 @@
+From 2f462bb2c7e8c765e3855f17ba00fc5af0c5379b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Mar 2026 12:49:14 +0000
+Subject: irqchip/renesas-rzv2h: Fix error path in rzv2h_icu_probe_common()
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 897cf98926429c8671a9009442883c2f62deae96 ]
+
+Replace pm_runtime_put() with pm_runtime_put_sync() when
+irq_domain_create_hierarchy() fails to ensure the device suspends
+synchronously before devres cleanup disables runtime PM via
+pm_runtime_disable().
+
+Fixes: 5ec8cabc3b86 ("irqchip/renesas-rzv2h: Use devm_pm_runtime_enable()")
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260323124917.41602-1-biju.das.jz@bp.renesas.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzv2h.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
+index 9b487120f0113..85eb194dfe3b2 100644
+--- a/drivers/irqchip/irq-renesas-rzv2h.c
++++ b/drivers/irqchip/irq-renesas-rzv2h.c
+@@ -567,7 +567,7 @@ static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_no
+       return 0;
+ pm_put:
+-      pm_runtime_put(&pdev->dev);
++      pm_runtime_put_sync(&pdev->dev);
+       return ret;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.19/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch b/queue-6.19/netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch
new file mode 100644 (file)
index 0000000..5b9bb06
--- /dev/null
@@ -0,0 +1,96 @@
+From 3976763e20a992733fda0ac87a8bc36e17122e07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Mar 2026 14:30:41 +0530
+Subject: netfs: Fix kernel BUG in netfs_limit_iter() for ITER_KVEC iterators
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit 67e467a11f62ff64ad219dc6aa5459e132c79d14 ]
+
+When a process crashes and the kernel writes a core dump to a 9P
+filesystem, __kernel_write() creates an ITER_KVEC iterator. This
+iterator reaches netfs_limit_iter() via netfs_unbuffered_write(), which
+only handles ITER_FOLIOQ, ITER_BVEC and ITER_XARRAY iterator types,
+hitting the BUG() for any other type.
+
+Fix this by adding netfs_limit_kvec() following the same pattern as
+netfs_limit_bvec(), since both kvec and bvec are simple segment arrays
+with pointer and length fields. Dispatch it from netfs_limit_iter() when
+the iterator type is ITER_KVEC.
+
+Fixes: cae932d3aee5 ("netfs: Add func to calculate pagecount/size-limited span of an iterator")
+Reported-by: syzbot+9c058f0d63475adc97fd@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=9c058f0d63475adc97fd
+Tested-by: syzbot+9c058f0d63475adc97fd@syzkaller.appspotmail.com
+Signed-off-by: Deepanshu Kartikey <Kartikey406@gmail.com>
+Link: https://patch.msgid.link/20260307090041.359870-1-kartikey406@gmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/iterator.c | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
+index 72a435e5fc6da..154a14bb2d7f7 100644
+--- a/fs/netfs/iterator.c
++++ b/fs/netfs/iterator.c
+@@ -142,6 +142,47 @@ static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
+       return min(span, max_size);
+ }
++/*
++ * Select the span of a kvec iterator we're going to use.  Limit it by both
++ * maximum size and maximum number of segments.  Returns the size of the span
++ * in bytes.
++ */
++static size_t netfs_limit_kvec(const struct iov_iter *iter, size_t start_offset,
++                             size_t max_size, size_t max_segs)
++{
++      const struct kvec *kvecs = iter->kvec;
++      unsigned int nkv = iter->nr_segs, ix = 0, nsegs = 0;
++      size_t len, span = 0, n = iter->count;
++      size_t skip = iter->iov_offset + start_offset;
++
++      if (WARN_ON(!iov_iter_is_kvec(iter)) ||
++          WARN_ON(start_offset > n) ||
++          n == 0)
++              return 0;
++
++      while (n && ix < nkv && skip) {
++              len = kvecs[ix].iov_len;
++              if (skip < len)
++                      break;
++              skip -= len;
++              n -= len;
++              ix++;
++      }
++
++      while (n && ix < nkv) {
++              len = min3(n, kvecs[ix].iov_len - skip, max_size);
++              span += len;
++              nsegs++;
++              ix++;
++              if (span >= max_size || nsegs >= max_segs)
++                      break;
++              skip = 0;
++              n -= len;
++      }
++
++      return min(span, max_size);
++}
++
+ /*
+  * Select the span of an xarray iterator we're going to use.  Limit it by both
+  * maximum size and maximum number of segments.  It is assumed that segments
+@@ -245,6 +286,8 @@ size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+               return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
+       if (iov_iter_is_xarray(iter))
+               return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
++      if (iov_iter_is_kvec(iter))
++              return netfs_limit_kvec(iter, start_offset, max_size, max_segs);
+       BUG();
+ }
+ EXPORT_SYMBOL(netfs_limit_iter);
+-- 
+2.53.0
+
diff --git a/queue-6.19/netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch b/queue-6.19/netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch
new file mode 100644 (file)
index 0000000..0abd935
--- /dev/null
@@ -0,0 +1,65 @@
+From 5f136a9f6351cea7b9c6b36cf647a389f1b2ca8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Mar 2026 10:09:47 +0530
+Subject: netfs: Fix NULL pointer dereference in netfs_unbuffered_write() on
+ retry
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit e9075e420a1eb3b52c60f3b95893a55e77419ce8 ]
+
+When a write subrequest is marked NETFS_SREQ_NEED_RETRY, the retry path
+in netfs_unbuffered_write() unconditionally calls stream->prepare_write()
+without checking if it is NULL.
+
+Filesystems such as 9P do not set the prepare_write operation, so
+stream->prepare_write remains NULL. When get_user_pages() fails with
+-EFAULT and the subrequest is flagged for retry, this results in a NULL
+pointer dereference at fs/netfs/direct_write.c:189.
+
+Fix this by mirroring the pattern already used in write_retry.c: if
+stream->prepare_write is NULL, skip renegotiation and directly reissue
+the subrequest via netfs_reissue_write(), which handles iterator reset,
+IN_PROGRESS flag, stats update and reissue internally.
+
+Fixes: a0b4c7a49137 ("netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence")
+Reported-by: syzbot+7227db0fbac9f348dba0@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=7227db0fbac9f348dba0
+Signed-off-by: Deepanshu Kartikey <Kartikey406@gmail.com>
+Link: https://patch.msgid.link/20260307043947.347092-1-kartikey406@gmail.com
+Tested-by: syzbot+7227db0fbac9f348dba0@syzkaller.appspotmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/direct_write.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index dd1451bf7543d..4d9760e36c119 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -186,10 +186,18 @@ static int netfs_unbuffered_write(struct netfs_io_request *wreq)
+               stream->sreq_max_segs   = INT_MAX;
+               netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+-              stream->prepare_write(subreq);
+-              __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+-              netfs_stat(&netfs_n_wh_retry_write_subreq);
++              if (stream->prepare_write) {
++                      stream->prepare_write(subreq);
++                      __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
++                      netfs_stat(&netfs_n_wh_retry_write_subreq);
++              } else {
++                      struct iov_iter source;
++
++                      netfs_reset_iter(subreq);
++                      source = subreq->io_iter;
++                      netfs_reissue_write(stream, subreq, &source);
++              }
+       }
+       netfs_unbuffered_write_done(wreq);
+-- 
+2.53.0
+
diff --git a/queue-6.19/netfs-fix-read-abandonment-during-retry.patch b/queue-6.19/netfs-fix-read-abandonment-during-retry.patch
new file mode 100644 (file)
index 0000000..1ab45ff
--- /dev/null
@@ -0,0 +1,63 @@
+From df3beea4e5e846755bad3a0af9aa2a879f9ad69b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 15:38:58 +0000
+Subject: netfs: Fix read abandonment during retry
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 7e57523490cd2efb52b1ea97f2e0a74c0fb634cd ]
+
+Under certain circumstances, all the remaining subrequests from a read
+request will get abandoned during retry.  The abandonment process expects
+the 'subreq' variable to be set to the place to start abandonment from, but
+it doesn't always have a useful value (it will be uninitialised on the
+first pass through the loop and it may point to a deleted subrequest on
+later passes).
+
+Fix the first jump to "abandon:" to set subreq to the start of the first
+subrequest expected to need retry (which, in this abandonment case, turned
+out unexpectedly to no longer have NEED_RETRY set).
+
+Also clear the subreq pointer after discarding superfluous retryable
+subrequests to cause an oops if we do try to access it.
+
+Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://patch.msgid.link/3775287.1773848338@warthog.procyon.org.uk
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+cc: Paulo Alcantara <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/read_retry.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
+index 7793ba5e3e8fc..cca9ac43c0773 100644
+--- a/fs/netfs/read_retry.c
++++ b/fs/netfs/read_retry.c
+@@ -93,8 +93,10 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+                      from->start, from->transferred, from->len);
+               if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+-                  !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
++                  !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) {
++                      subreq = from;
+                       goto abandon;
++              }
+               list_for_each_continue(next, &stream->subrequests) {
+                       subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+@@ -178,6 +180,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+                               if (subreq == to)
+                                       break;
+                       }
++                      subreq = NULL;
+                       continue;
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.19/netfs-fix-the-handling-of-stream-front-by-removing-i.patch b/queue-6.19/netfs-fix-the-handling-of-stream-front-by-removing-i.patch
new file mode 100644 (file)
index 0000000..4c85357
--- /dev/null
@@ -0,0 +1,196 @@
+From 295a964a8a7782674ead6202cefce71ea1d2e78e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 08:20:17 +0000
+Subject: netfs: Fix the handling of stream->front by removing it
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 0e764b9d46071668969410ec5429be0e2f38c6d3 ]
+
+The netfs_io_stream::front member is meant to point to the subrequest
+currently being collected on a stream, but it isn't actually used this way
+by direct write (which mostly ignores it).  However, there's a tracepoint
+which looks at it.  Further, stream->front is actually redundant with
+stream->subrequests.next.
+
+Fix the potential problem in the direct code by just removing the member
+and using stream->subrequests.next instead, thereby also simplifying the
+code.
+
+Fixes: a0b4c7a49137 ("netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence")
+Reported-by: Paulo Alcantara <pc@manguebit.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://patch.msgid.link/4158599.1774426817@warthog.procyon.org.uk
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/buffered_read.c     | 3 +--
+ fs/netfs/direct_read.c       | 3 +--
+ fs/netfs/direct_write.c      | 1 -
+ fs/netfs/read_collect.c      | 4 ++--
+ fs/netfs/read_single.c       | 1 -
+ fs/netfs/write_collect.c     | 4 ++--
+ fs/netfs/write_issue.c       | 3 +--
+ include/linux/netfs.h        | 1 -
+ include/trace/events/netfs.h | 8 ++++----
+ 9 files changed, 11 insertions(+), 17 deletions(-)
+
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index 37ab6f28b5ad0..88361e8c70961 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -171,9 +171,8 @@ static void netfs_queue_read(struct netfs_io_request *rreq,
+       spin_lock(&rreq->lock);
+       list_add_tail(&subreq->rreq_link, &stream->subrequests);
+       if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+-              stream->front = subreq;
+               if (!stream->active) {
+-                      stream->collected_to = stream->front->start;
++                      stream->collected_to = subreq->start;
+                       /* Store list pointers before active flag */
+                       smp_store_release(&stream->active, true);
+               }
+diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
+index a498ee8d66745..f72e6da88cca7 100644
+--- a/fs/netfs/direct_read.c
++++ b/fs/netfs/direct_read.c
+@@ -71,9 +71,8 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
+               spin_lock(&rreq->lock);
+               list_add_tail(&subreq->rreq_link, &stream->subrequests);
+               if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+-                      stream->front = subreq;
+                       if (!stream->active) {
+-                              stream->collected_to = stream->front->start;
++                              stream->collected_to = subreq->start;
+                               /* Store list pointers before active flag */
+                               smp_store_release(&stream->active, true);
+                       }
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index 4d9760e36c119..f9ab69de3e298 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -111,7 +111,6 @@ static int netfs_unbuffered_write(struct netfs_io_request *wreq)
+                       netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred);
+                       subreq = stream->construct;
+                       stream->construct = NULL;
+-                      stream->front = NULL;
+               }
+               /* Check if (re-)preparation failed. */
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index 137f0e28a44c5..e5f6665b3341e 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -205,7 +205,8 @@ static void netfs_collect_read_results(struct netfs_io_request *rreq)
+        * in progress.  The issuer thread may be adding stuff to the tail
+        * whilst we're doing this.
+        */
+-      front = READ_ONCE(stream->front);
++      front = list_first_entry_or_null(&stream->subrequests,
++                                       struct netfs_io_subrequest, rreq_link);
+       while (front) {
+               size_t transferred;
+@@ -301,7 +302,6 @@ static void netfs_collect_read_results(struct netfs_io_request *rreq)
+               list_del_init(&front->rreq_link);
+               front = list_first_entry_or_null(&stream->subrequests,
+                                                struct netfs_io_subrequest, rreq_link);
+-              stream->front = front;
+               spin_unlock(&rreq->lock);
+               netfs_put_subrequest(remove,
+                                    notes & ABANDON_SREQ ?
+diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
+index 8e6264f62a8f3..d0e23bc42445f 100644
+--- a/fs/netfs/read_single.c
++++ b/fs/netfs/read_single.c
+@@ -107,7 +107,6 @@ static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
+       spin_lock(&rreq->lock);
+       list_add_tail(&subreq->rreq_link, &stream->subrequests);
+       trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+-      stream->front = subreq;
+       /* Store list pointers before active flag */
+       smp_store_release(&stream->active, true);
+       spin_unlock(&rreq->lock);
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 83eb3dc1adf8a..b194447f4b111 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -228,7 +228,8 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
+               if (!smp_load_acquire(&stream->active))
+                       continue;
+-              front = stream->front;
++              front = list_first_entry_or_null(&stream->subrequests,
++                                               struct netfs_io_subrequest, rreq_link);
+               while (front) {
+                       trace_netfs_collect_sreq(wreq, front);
+                       //_debug("sreq [%x] %llx %zx/%zx",
+@@ -279,7 +280,6 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
+                       list_del_init(&front->rreq_link);
+                       front = list_first_entry_or_null(&stream->subrequests,
+                                                        struct netfs_io_subrequest, rreq_link);
+-                      stream->front = front;
+                       spin_unlock(&wreq->lock);
+                       netfs_put_subrequest(remove,
+                                            notes & SAW_FAILURE ?
+diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
+index 437268f656409..2db688f941251 100644
+--- a/fs/netfs/write_issue.c
++++ b/fs/netfs/write_issue.c
+@@ -206,9 +206,8 @@ void netfs_prepare_write(struct netfs_io_request *wreq,
+       spin_lock(&wreq->lock);
+       list_add_tail(&subreq->rreq_link, &stream->subrequests);
+       if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+-              stream->front = subreq;
+               if (!stream->active) {
+-                      stream->collected_to = stream->front->start;
++                      stream->collected_to = subreq->start;
+                       /* Write list pointers before active flag */
+                       smp_store_release(&stream->active, true);
+               }
+diff --git a/include/linux/netfs.h b/include/linux/netfs.h
+index 72ee7d210a744..ba17ac5bf356a 100644
+--- a/include/linux/netfs.h
++++ b/include/linux/netfs.h
+@@ -140,7 +140,6 @@ struct netfs_io_stream {
+       void (*issue_write)(struct netfs_io_subrequest *subreq);
+       /* Collection tracking */
+       struct list_head        subrequests;    /* Contributory I/O operations */
+-      struct netfs_io_subrequest *front;      /* Op being collected */
+       unsigned long long      collected_to;   /* Position we've collected results to */
+       size_t                  transferred;    /* The amount transferred from this stream */
+       unsigned short          error;          /* Aggregate error for the stream */
+diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
+index 2d366be46a1c3..cbe28211106c5 100644
+--- a/include/trace/events/netfs.h
++++ b/include/trace/events/netfs.h
+@@ -740,19 +740,19 @@ TRACE_EVENT(netfs_collect_stream,
+                   __field(unsigned int,       wreq)
+                   __field(unsigned char,      stream)
+                   __field(unsigned long long, collected_to)
+-                  __field(unsigned long long, front)
++                  __field(unsigned long long, issued_to)
+                            ),
+           TP_fast_assign(
+                   __entry->wreq       = wreq->debug_id;
+                   __entry->stream     = stream->stream_nr;
+                   __entry->collected_to = stream->collected_to;
+-                  __entry->front      = stream->front ? stream->front->start : UINT_MAX;
++                  __entry->issued_to  = atomic64_read(&wreq->issued_to);
+                          ),
+-          TP_printk("R=%08x[%x:] cto=%llx frn=%llx",
++          TP_printk("R=%08x[%x:] cto=%llx ito=%llx",
+                     __entry->wreq, __entry->stream,
+-                    __entry->collected_to, __entry->front)
++                    __entry->collected_to, __entry->issued_to)
+           );
+ TRACE_EVENT(netfs_folioq,
+-- 
+2.53.0
+
diff --git a/queue-6.19/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-6.19/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..2d63f43
--- /dev/null
@@ -0,0 +1,51 @@
+From 90d60db76ce4732fceb7de8558bd45fc4a2d0cc2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index ba31b0a1f7f79..77f18de6fdf62 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -1425,6 +1425,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -1439,6 +1440,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+               }
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.19/selftests-mount_setattr-increase-tmpfs-size-for-idma.patch b/queue-6.19/selftests-mount_setattr-increase-tmpfs-size-for-idma.patch
new file mode 100644 (file)
index 0000000..8f7d71b
--- /dev/null
@@ -0,0 +1,45 @@
+From 19b4e1602d3b0e26273cd74f5f20155a6d7891de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 16:59:45 +0100
+Subject: selftests/mount_setattr: increase tmpfs size for idmapped mount tests
+
+From: Christian Brauner <brauner@kernel.org>
+
+[ Upstream commit c465f5591aa84a6f85d66d152e28b92844a45d4f ]
+
+The mount_setattr_idmapped fixture mounts a 2 MB tmpfs at /mnt and then
+creates a 2 GB sparse ext4 image at /mnt/C/ext4.img. While ftruncate()
+succeeds (sparse file), mkfs.ext4 needs to write actual metadata blocks
+(inode tables, journal, bitmaps) which easily exceeds the 2 MB tmpfs
+limit, causing ENOSPC and failing the fixture setup for all
+mount_setattr_idmapped tests.
+
+This was introduced by commit d37d4720c3e7 ("selftests/mount_settattr:
+ensure that ext4 filesystem can be created") which increased the image
+size from 2 MB to 2 GB but didn't adjust the tmpfs size.
+
+Bump the tmpfs size to 256 MB which is sufficient for the ext4 metadata.
+
+Fixes: d37d4720c3e7 ("selftests/mount_settattr: ensure that ext4 filesystem can be created")
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mount_setattr/mount_setattr_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+index 7aec3ae82a446..c6dafb3cc1163 100644
+--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+@@ -1020,7 +1020,7 @@ FIXTURE_SETUP(mount_setattr_idmapped)
+                       "size=100000,mode=700"), 0);
+       ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
+-                      "size=2m,mode=700"), 0);
++                      "size=256m,mode=700"), 0);
+       ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
+-- 
+2.53.0
+
index d80816b3e752d1c6797e1b2fc355b47f1b68be5f..39a48d8cb89521ffae9d365c29c01f663f373714 100644 (file)
@@ -310,3 +310,32 @@ powerpc64-bpf-do-not-increment-tailcall-count-when-prog-is-null.patch
 mm-damon-core-avoid-use-of-half-online-committed-context.patch
 rust-pin-init-internal-init-document-load-bearing-fact-of-field-accessors.patch
 ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch
+dmaengine-idxd-fix-crash-when-the-event-log-is-disab.patch
+dmaengine-idxd-fix-possible-invalid-memory-access-af.patch
+dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
+dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
+dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
+dmaengine-idxd-fix-leaking-event-log-memory.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
+dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
+netfs-fix-kernel-bug-in-netfs_limit_iter-for-iter_kv.patch
+netfs-fix-null-pointer-dereference-in-netfs_unbuffer.patch
+dmaengine-idxd-fix-possible-wrong-descriptor-complet.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
+selftests-mount_setattr-increase-tmpfs-size-for-idma.patch
+netfs-fix-read-abandonment-during-retry.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+xen-privcmd-unregister-xenstore-notifier-on-module-e.patch
+netfs-fix-the-handling-of-stream-front-by-removing-i.patch
+irqchip-renesas-rzv2h-fix-error-path-in-rzv2h_icu_pr.patch
+futex-require-sys_futex_requeue-to-have-identical-fl.patch
+futex-fix-uaf-between-futex_key_to_node_opt-and-vma_.patch
+ext4-introduce-export_symbol_for_ext4_test-helper.patch
+ext4-fix-mballoc-test.c-is-not-compiled-when-ext4_ku.patch
+bug-avoid-format-attribute-warning-for-clang-as-well.patch
diff --git a/queue-6.19/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch b/queue-6.19/xen-privcmd-unregister-xenstore-notifier-on-module-e.patch
new file mode 100644 (file)
index 0000000..25e92f4
--- /dev/null
@@ -0,0 +1,47 @@
+From 58d944dbd4e6462868d61b4f79e89ab2cab46087 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 20:02:46 +0800
+Subject: xen/privcmd: unregister xenstore notifier on module exit
+
+From: GuoHan Zhao <zhaoguohan@kylinos.cn>
+
+[ Upstream commit cd7e1fef5a1ca1c4fcd232211962ac2395601636 ]
+
+Commit 453b8fb68f36 ("xen/privcmd: restrict usage in
+unprivileged domU") added a xenstore notifier to defer setting the
+restriction target until Xenstore is ready.
+
+XEN_PRIVCMD can be built as a module, but privcmd_exit() leaves that
+notifier behind. Balance the notifier lifecycle by unregistering it on
+module exit.
+
+This is harmless even if xenstore was already ready at registration
+time and the notifier was never queued on the chain.
+
+Fixes: 453b8fb68f3641fe ("xen/privcmd: restrict usage in unprivileged domU")
+Signed-off-by: GuoHan Zhao <zhaoguohan@kylinos.cn>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260325120246.252899-1-zhaoguohan@kylinos.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/privcmd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index b8a546fe7c1e2..cbc62f0df11b7 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -1764,6 +1764,9 @@ static int __init privcmd_init(void)
+ static void __exit privcmd_exit(void)
+ {
++      if (!xen_initial_domain())
++              unregister_xenstore_notifier(&xenstore_notifier);
++
+       privcmd_ioeventfd_exit();
+       privcmd_irqfd_exit();
+       misc_deregister(&privcmd_dev);
+-- 
+2.53.0
+
diff --git a/queue-6.6/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch b/queue-6.6/btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
new file mode 100644 (file)
index 0000000..55ce708
--- /dev/null
@@ -0,0 +1,70 @@
+From 4ee760b5066baf80cc539edab8e73c52d5ed3d40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 21:17:04 +0900
+Subject: btrfs: fix leak of kobject name for sub-group space_info
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit a4376d9a5d4c9610e69def3fc0b32c86a7ab7a41 ]
+
+When create_space_info_sub_group() allocates elements of
+space_info->sub_group[], kobject_init_and_add() is called for each
+element via btrfs_sysfs_add_space_info_type(). However, when
+check_removing_space_info() frees these elements, it does not call
+btrfs_sysfs_remove_space_info() on them. As a result, kobject_put() is
+not called and the associated kobj->name objects are leaked.
+
+This memory leak is reproduced by running the blktests test case
+zbd/009 on kernels built with CONFIG_DEBUG_KMEMLEAK. The kmemleak
+feature reports the following error:
+
+unreferenced object 0xffff888112877d40 (size 16):
+  comm "mount", pid 1244, jiffies 4294996972
+  hex dump (first 16 bytes):
+    64 61 74 61 2d 72 65 6c 6f 63 00 c4 c6 a7 cb 7f  data-reloc......
+  backtrace (crc 53ffde4d):
+    __kmalloc_node_track_caller_noprof+0x619/0x870
+    kstrdup+0x42/0xc0
+    kobject_set_name_vargs+0x44/0x110
+    kobject_init_and_add+0xcf/0x150
+    btrfs_sysfs_add_space_info_type+0xfc/0x210 [btrfs]
+    create_space_info_sub_group.constprop.0+0xfb/0x1b0 [btrfs]
+    create_space_info+0x211/0x320 [btrfs]
+    btrfs_init_space_info+0x15a/0x1b0 [btrfs]
+    open_ctree+0x33c7/0x4a50 [btrfs]
+    btrfs_get_tree.cold+0x9f/0x1ee [btrfs]
+    vfs_get_tree+0x87/0x2f0
+    vfs_cmd_create+0xbd/0x280
+    __do_sys_fsconfig+0x3df/0x990
+    do_syscall_64+0x136/0x1540
+    entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+To avoid the leak, call btrfs_sysfs_remove_space_info() instead of
+kfree() for the elements.
+
+Fixes: f92ee31e031c ("btrfs: introduce btrfs_space_info sub-group")
+Link: https://lore.kernel.org/linux-block/b9488881-f18d-4f47-91a5-3c9bf63955a5@wdc.com/
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index a08e03a74909a..3bc6c99ed2e38 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -4386,7 +4386,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info)
+               for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+                       if (space_info->sub_group[i]) {
+                               check_removing_space_info(space_info->sub_group[i]);
+-                              kfree(space_info->sub_group[i]);
++                              btrfs_sysfs_remove_space_info(space_info->sub_group[i]);
+                               space_info->sub_group[i] = NULL;
+                       }
+               }
+-- 
+2.53.0
+
diff --git a/queue-6.6/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch b/queue-6.6/btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
new file mode 100644 (file)
index 0000000..c5ed641
--- /dev/null
@@ -0,0 +1,48 @@
+From c332753cb68ed749af2a57f8cfce35fafc5aa42f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2026 16:17:59 +0000
+Subject: btrfs: fix lost error when running device stats on multiple devices
+ fs
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1c37d896b12dfd0d4c96e310b0033c6676933917 ]
+
+Whenever we get an error updating the device stats item for a device in
+btrfs_run_dev_stats() we allow the loop to go to the next device, and if
+updating the stats item for the next device succeeds, we end up losing
+the error we had from the previous device.
+
+Fix this by breaking out of the loop once we get an error and make sure
+it's returned to the caller. Since we are in the transaction commit path
+(and in the critical section actually), returning the error will result
+in a transaction abort.
+
+Fixes: 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 23756f1464013..f5e4b8f3dcb7f 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7641,8 +7641,9 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
+               smp_rmb();
+               ret = update_dev_stat_item(trans, device);
+-              if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++              if (ret)
++                      break;
++              atomic_sub(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+-- 
+2.53.0
+
diff --git a/queue-6.6/btrfs-fix-super-block-offset-in-error-message-in-btr.patch b/queue-6.6/btrfs-fix-super-block-offset-in-error-message-in-btr.patch
new file mode 100644 (file)
index 0000000..ec9d2d1
--- /dev/null
@@ -0,0 +1,46 @@
+From 588efb5312c2c9444e27b0bd55092cd6ba771cee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 17:35:42 +0000
+Subject: btrfs: fix super block offset in error message in
+ btrfs_validate_super()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+[ Upstream commit b52fe51f724385b3ed81e37e510a4a33107e8161 ]
+
+Fix the superblock offset mismatch error message in
+btrfs_validate_super(): we changed it so that it considers all the
+superblocks, but the message still assumes we're only looking at the
+first one.
+
+The change from %u to %llu is because we're changing from a constant to
+a u64.
+
+Fixes: 069ec957c35e ("btrfs: Refactor btrfs_check_super_valid")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index b4ec844b7d741..c7746baa86f8c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2444,8 +2444,8 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+       if (mirror_num >= 0 &&
+           btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
+-              btrfs_err(fs_info, "super offset mismatch %llu != %u",
+-                        btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
++              btrfs_err(fs_info, "super offset mismatch %llu != %llu",
++                        btrfs_super_bytenr(sb), btrfs_sb_offset(mirror_num));
+               ret = -EINVAL;
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch b/queue-6.6/dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
new file mode 100644 (file)
index 0000000..0828a1d
--- /dev/null
@@ -0,0 +1,70 @@
+From 43e823d0039f03a819524249ae460b6035dbe599 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 14:45:09 +0800
+Subject: dmaengine: dw-edma: Fix multiple times setting of the CYCLE_STATE and
+ CYCLE_BIT bits for HDMA.
+
+From: LUO Haowen <luo-hw@foxmail.com>
+
+[ Upstream commit 3f63297ff61a994b99d710dcb6dbde41c4003233 ]
+
+Others have submitted this issue (https://lore.kernel.org/dmaengine/
+20240722030405.3385-1-zhengdongxiong@gxmicro.cn/),
+but it has not been fixed yet. Therefore, more supplementary information
+is provided here.
+
+As mentioned in the "PCS-CCS-CB-TCB" Producer-Consumer Synchronization of
+"DesignWare Cores PCI Express Controller Databook, version 6.00a":
+
+1. The Consumer CYCLE_STATE (CCS) bit in the register only needs to be
+initialized once; the value will update automatically to be
+~CYCLE_BIT (CB) in the next chunk.
+2. The Consumer CYCLE_BIT bit in the register is loaded from the LL
+element and tested against CCS. When CB = CCS, the data transfer is
+executed. Otherwise not.
+
+The current logic sets customer (HDMA) CS and CB bits to 1 in each chunk
+while setting the producer (software) CB of odd chunks to 0 and even
+chunks to 1 in the linked list. This is leading to a mismatch between
+the producer CB and consumer CS bits.
+
+This issue can be reproduced by setting the transmission data size to
+exceed one chunk. By the way, in the EDMA using the same "PCS-CCS-CB-TCB"
+mechanism, the CS bit is only initialized once and this issue was not
+found. Refer to
+drivers/dma/dw-edma/dw-edma-v0-core.c:dw_edma_v0_core_start.
+
+So fix this issue by initializing the CYCLE_STATE and CYCLE_BIT bits
+only once.
+
+Fixes: e74c39573d35 ("dmaengine: dw-edma: Add support for native HDMA")
+Signed-off-by: LUO Haowen <luo-hw@foxmail.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/tencent_CB11AA9F3920C1911AF7477A9BD8EFE0AD05@qq.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/dw-edma/dw-hdma-v0-core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+index e3f8db4fe909a..ce8f7254bab21 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+@@ -252,10 +252,10 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
++              /* Set consumer cycle */
++              SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
++                      HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       }
+-      /* Set consumer cycle */
+-      SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
+-                HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+       dw_hdma_v0_sync_ll_data(chunk);
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch b/queue-6.6/dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch
new file mode 100644 (file)
index 0000000..4a2167b
--- /dev/null
@@ -0,0 +1,60 @@
+From 30a580128ed7e8a9004670c77b6d7edfac206fdc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:35 -0800
+Subject: dmaengine: idxd: Fix freeing the allocated ida too late
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit c311f5e9248471a950f0a524c2fd736414d98900 ]
+
+It can happen that when the cdev .release() is called, the driver
+already called ida_destroy(). Move ida_free() to the _del() path.
+
+We see with DEBUG_KOBJECT_RELEASE enabled and forcing an early PCI
+unbind.
+
+Fixes: 04922b7445a1 ("dmaengine: idxd: fix cdev setup and free device lifetime issues")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-9-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 20d380524f4ee..5ded4a0887bc8 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -161,11 +161,7 @@ static struct device_type idxd_cdev_file_type = {
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+       struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
+-      struct idxd_cdev_context *cdev_ctx;
+-      struct idxd_wq *wq = idxd_cdev->wq;
+-      cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -585,11 +581,15 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
++      struct idxd_cdev_context *cdev_ctx;
+       struct idxd_cdev *idxd_cdev;
+       idxd_cdev = wq->idxd_cdev;
+       wq->idxd_cdev = NULL;
+       cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
++
++      cdev_ctx = &ictx[wq->idxd->data->type];
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       put_device(cdev_dev(idxd_cdev));
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch b/queue-6.6/dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
new file mode 100644 (file)
index 0000000..36d37ba
--- /dev/null
@@ -0,0 +1,56 @@
+From bfcb9966e44f4353ee9ec0ba5e54d1d4fafaa04d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:34 -0800
+Subject: dmaengine: idxd: Fix memory leak when a wq is reset
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit d9cfb5193a047a92a4d3c0e91ea4cc87c8f7c478 ]
+
+idxd_wq_disable_cleanup() which is called from the reset path for a
+workqueue, sets the wq type to NONE, which for other parts of the
+driver mean that the wq is empty (all its resources were released).
+
+Only set the wq type to NONE after its resources are released.
+
+Fixes: da32b28c95a7 ("dmaengine: idxd: cleanup workqueue config after disabling")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-8-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/device.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 542d340552dd7..44bbeb3acd14e 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -173,6 +173,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_queue_free(&wq->sbq);
++      wq->type = IDXD_WQT_NONE;
+ }
+ int idxd_wq_enable(struct idxd_wq *wq)
+@@ -365,7 +366,6 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+       lockdep_assert_held(&wq->wq_lock);
+       wq->state = IDXD_WQ_DISABLED;
+       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+-      wq->type = IDXD_WQT_NONE;
+       wq->threshold = 0;
+       wq->priority = 0;
+       wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+@@ -1507,7 +1507,6 @@ void drv_disable_wq(struct idxd_wq *wq)
+       idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
+       percpu_ref_exit(&wq->wq_active);
+-      wq->type = IDXD_WQT_NONE;
+       wq->client_count = 0;
+ }
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch b/queue-6.6/dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
new file mode 100644 (file)
index 0000000..38d904e
--- /dev/null
@@ -0,0 +1,37 @@
+From 26eb759fee060a2300f371f117500190a9ef1c38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:34:33 -0800
+Subject: dmaengine: idxd: Fix not releasing workqueue on .release()
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 3d33de353b1ff9023d5ec73b9becf80ea87af695 ]
+
+The workqueue associated with an DSA/IAA device is not released when
+the object is freed.
+
+Fixes: 47c16ac27d4c ("dmaengine: idxd: fix idxd conf_dev 'struct device' lifetime")
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-7-7ed70658a9d1@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/sysfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 3a5ce477a81ad..7971db45709f8 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1810,6 +1810,7 @@ static void idxd_conf_device_release(struct device *dev)
+ {
+       struct idxd_device *idxd = confdev_to_idxd(dev);
++      destroy_workqueue(idxd->wq);
+       kfree(idxd->groups);
+       bitmap_free(idxd->wq_enable_map);
+       kfree(idxd->wqs);
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch b/queue-6.6/dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch
new file mode 100644 (file)
index 0000000..267260f
--- /dev/null
@@ -0,0 +1,57 @@
+From a1ff3c886b486c9a0bdf8dc38b983cac1964b51f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 20:33:50 +0100
+Subject: dmaengine: idxd: Remove usage of the deprecated ida_simple_xx() API
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 1075ee66a8c19bfa375b19c236fd6a22a867f138 ]
+
+ida_alloc() and ida_free() should be preferred to the deprecated
+ida_simple_get() and ida_simple_remove().
+
+This is less verbose.
+
+Note that the upper limit of ida_simple_get() is exclusive, but the one of
+ida_alloc_range() is inclusive. Sothis change allows one more device.
+
+MINORMASK is ((1U << MINORBITS) - 1), so allowing MINORMASK as a maximum value
+makes sense. It is also consistent with other "ida_.*MINORMASK" and
+"ida_*MINOR()" usages.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
+Acked-by: Lijun Pan <lijun.pan@intel.com>
+Link: https://lore.kernel.org/r/ac991f5f42112fa782a881d391d447529cbc4a23.1702967302.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: c311f5e92484 ("dmaengine: idxd: Fix freeing the allocated ida too late")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index aa39fcd389a94..20d380524f4ee 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev)
+       struct idxd_wq *wq = idxd_cdev->wq;
+       cdev_ctx = &ictx[wq->idxd->data->type];
+-      ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
++      ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       kfree(idxd_cdev);
+ }
+@@ -550,7 +550,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
+       cdev = &idxd_cdev->cdev;
+       dev = cdev_dev(idxd_cdev);
+       cdev_ctx = &ictx[wq->idxd->data->type];
+-      minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
++      minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
+       if (minor < 0) {
+               kfree(idxd_cdev);
+               return minor;
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch b/queue-6.6/dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
new file mode 100644 (file)
index 0000000..5453ac5
--- /dev/null
@@ -0,0 +1,41 @@
+From 3ac2dedaee11a37b2e7708a4469ee2b6e3758fc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 08:13:08 +0200
+Subject: dmaengine: xilinx: xdma: Fix regmap init error handling
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit e0adbf74e2a0455a6bc9628726ba87bcd0b42bf8 ]
+
+devm_regmap_init_mmio returns an ERR_PTR() upon error, not NULL.
+Fix the error check and also fix the error message. Use the error code
+from ERR_PTR() instead of the wrong value in ret.
+
+Fixes: 17ce252266c7 ("dmaengine: xilinx: xdma: Add xilinx xdma driver")
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20251014061309.283468-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xdma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
+index dbab4c4499143..d806bb1162aef 100644
+--- a/drivers/dma/xilinx/xdma.c
++++ b/drivers/dma/xilinx/xdma.c
+@@ -901,8 +901,8 @@ static int xdma_probe(struct platform_device *pdev)
+       xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+                                          &xdma_regmap_config);
+-      if (!xdev->rmap) {
+-              xdma_err(xdev, "config regmap failed: %d", ret);
++      if (IS_ERR(xdev->rmap)) {
++              xdma_err(xdev, "config regmap failed: %pe", xdev->rmap);
+               goto failed;
+       }
+       INIT_LIST_HEAD(&xdev->dma_dev.channels);
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch b/queue-6.6/dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
new file mode 100644 (file)
index 0000000..039f230
--- /dev/null
@@ -0,0 +1,38 @@
+From eb5d034e448bfe21e6aed09b5c38ba4047b473a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:16:54 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix dma_device directions
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit e9cc95397bb7da13fe8a5b53a2f23cfaf9018ade ]
+
+Unlike chan->direction , struct dma_device .directions field is a
+bitfield. Turn chan->direction into a bitfield to make it compatible
+with struct dma_device .directions .
+
+Fixes: 7e01511443c3 ("dmaengine: xilinx_dma: Set dma_device directions")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221728.160139-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 176cac3f37a73..f2006e6996220 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2901,7 +2901,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+               return -EINVAL;
+       }
+-      xdev->common.directions |= chan->direction;
++      xdev->common.directions |= BIT(chan->direction);
+       /* Request the interrupt */
+       chan->irq = of_irq_get(node, chan->tdest);
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch b/queue-6.6/dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
new file mode 100644 (file)
index 0000000..70ebea3
--- /dev/null
@@ -0,0 +1,75 @@
+From 5f8ac427bb9940518d3f44bf3495d033c1c83c3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:18:57 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix residue calculation for cyclic DMA
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit f61d145999d61948a23cd436ebbfa4c3b9ab8987 ]
+
+The cyclic DMA calculation is currently entirely broken and reports
+residue only for the first segment. The problem is twofold.
+
+First, when the first descriptor finishes, it is moved from active_list
+to done_list, but it is never returned back into the active_list. The
+xilinx_dma_tx_status() expects the descriptor to be in the active_list
+to report any meaningful residue information, which never happens after
+the first descriptor finishes. Fix this up in xilinx_dma_start_transfer()
+and if the descriptor is cyclic, lift it from done_list and place it back
+into active_list list.
+
+Second, the segment .status fields of the descriptor remain dirty. Once
+the DMA did one pass on the descriptor, the .status fields are populated
+with data by the DMA, but the .status fields are not cleared before reuse
+during the next cyclic DMA round. The xilinx_dma_get_residue() recognizes
+that as if the descriptor was complete and had 0 residue, which is bogus.
+Reinitialize the status field before placing the descriptor back into the
+active_list.
+
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316221943.160375-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index f2006e6996220..17c424e650d2f 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1541,8 +1541,29 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+       if (chan->err)
+               return;
+-      if (list_empty(&chan->pending_list))
++      if (list_empty(&chan->pending_list)) {
++              if (chan->cyclic) {
++                      struct xilinx_dma_tx_descriptor *desc;
++                      struct list_head *entry;
++
++                      desc = list_last_entry(&chan->done_list,
++                                             struct xilinx_dma_tx_descriptor, node);
++                      list_for_each(entry, &desc->segments) {
++                              struct xilinx_axidma_tx_segment *axidma_seg;
++                              struct xilinx_axidma_desc_hw *axidma_hw;
++                              axidma_seg = list_entry(entry,
++                                                      struct xilinx_axidma_tx_segment,
++                                                      node);
++                              axidma_hw = &axidma_seg->hw;
++                              axidma_hw->status = 0;
++                      }
++
++                      list_splice_tail_init(&chan->done_list, &chan->active_list);
++                      chan->desc_pendingcount = 0;
++                      chan->idle = false;
++              }
+               return;
++      }
+       if (!chan->idle)
+               return;
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch b/queue-6.6/dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
new file mode 100644 (file)
index 0000000..edc0973
--- /dev/null
@@ -0,0 +1,62 @@
+From beb56d4dc05db300ab715f78795da0dc98450a05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 23:25:24 +0100
+Subject: dmaengine: xilinx: xilinx_dma: Fix unmasked residue subtraction
+
+From: Marek Vasut <marex@nabladev.com>
+
+[ Upstream commit c7d812e33f3e8ca0fa9eeabf71d1c7bc3acedc09 ]
+
+The segment .control and .status fields both contain top bits which are
+not part of the buffer size, the buffer size is located only in the bottom
+max_buffer_len bits. To avoid interference from those top bits, mask out
+the size using max_buffer_len first, and only then subtract the values.
+
+Fixes: a575d0b4e663 ("dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue")
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260316222530.163815-1-marex@nabladev.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 17c424e650d2f..89583d6c4de46 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -994,16 +994,16 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+-                      residue += (cdma_hw->control - cdma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (cdma_hw->control & chan->xdev->max_buffer_len) -
++                                 (cdma_hw->status & chan->xdev->max_buffer_len);
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+-                      residue += (axidma_hw->control - axidma_hw->status) &
+-                                 chan->xdev->max_buffer_len;
++                      residue += (axidma_hw->control & chan->xdev->max_buffer_len) -
++                                 (axidma_hw->status & chan->xdev->max_buffer_len);
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+@@ -1011,8 +1011,8 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+-                              (aximcdma_hw->control - aximcdma_hw->status) &
+-                              chan->xdev->max_buffer_len;
++                              (aximcdma_hw->control & chan->xdev->max_buffer_len) -
++                              (aximcdma_hw->status & chan->xdev->max_buffer_len);
+               }
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.6/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch b/queue-6.6/dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
new file mode 100644 (file)
index 0000000..c115f3d
--- /dev/null
@@ -0,0 +1,98 @@
+From 9afba5f4adbc2ac3defcff5482efd676ba852aa6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 07:34:46 +0200
+Subject: dmaengine: xilinx_dma: Fix reset related timeout with two-channel
+ AXIDMA
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit a17ce4bc6f4f9acf77ba416c36791a15602e53aa ]
+
+A single AXIDMA controller can have one or two channels. When it has two
+channels, the reset for both are tied together: resetting one channel
+resets the other as well. This creates a problem where resetting one
+channel will reset the registers for both channels, including clearing
+interrupt enable bits for the other channel, which can then lead  to
+timeouts as the driver is waiting for an interrupt which never comes.
+
+The driver currently has a probe-time work around for this: when a
+channel is created, the driver also resets and enables the
+interrupts. With two channels the reset for the second channel will
+clear the interrupt enables for the first one. The work around in the
+driver is just to manually enable the interrupts again in
+xilinx_dma_alloc_chan_resources().
+
+This workaround only addresses the probe-time issue. When channels are
+reset at runtime (e.g., in xilinx_dma_terminate_all() or during error
+recovery), there's no corresponding mechanism to restore the other
+channel's interrupt enables. This leads to one channel having its
+interrupts disabled while the driver expects them to work, causing
+timeouts and DMA failures.
+
+A proper fix is a complicated matter, as we should not reset the other
+channel when it's operating normally. So, perhaps, there should be some
+kind of synchronization for a common reset, which is not trivial to
+implement. To add to the complexity, the driver also supports other DMA
+types, like VDMA, CDMA and MCDMA, which don't have a shared reset.
+
+However, when the two-channel AXIDMA is used in the (assumably) normal
+use case, providing DMA for a single memory-to-memory device, the common
+reset is a bit smaller issue: when something bad happens on one channel,
+or when one channel is terminated, the assumption is that we also want
+to terminate the other channel. And thus resetting both at the same time
+is "ok".
+
+With that line of thinking we can implement a bit better work around
+than just the current probe time work around: let's enable the
+AXIDMA interrupts at xilinx_dma_start_transfer() instead.
+This ensures interrupts are enabled whenever a transfer starts,
+regardless of any prior resets that may have cleared them.
+
+This approach is also more logical: enable interrupts only when needed
+for a transfer, rather than at resource allocation time, and, I think,
+all the other DMA types should also use this model, but I'm reluctant to
+do such changes as I cannot test them.
+
+The reset function still enables interrupts even though it's not needed
+for AXIDMA anymore, but it's common code for all DMA types (VDMA, CDMA,
+MCDMA), so leave it unchanged to avoid affecting other variants.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
+Link: https://patch.msgid.link/20260311-xilinx-dma-fix-v2-1-a725abb66e3c@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 89583d6c4de46..3d728dd1ecde1 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1214,14 +1214,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+       dma_cookie_init(dchan);
+-      if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+-              /* For AXI DMA resetting once channel will reset the
+-               * other channel as well so enable the interrupts here.
+-               */
+-              dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+-                            XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+-      }
+-
+       if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+               dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+                            XILINX_CDMA_CR_SGMODE);
+@@ -1589,6 +1581,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+                            head_desc->async_tx.phys);
+       reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+       reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
++      reg |= XILINX_DMA_DMAXR_ALL_IRQ_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+       xilinx_dma_start(chan);
+-- 
+2.53.0
+
diff --git a/queue-6.6/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch b/queue-6.6/phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
new file mode 100644 (file)
index 0000000..67c26d3
--- /dev/null
@@ -0,0 +1,51 @@
+From 47f7bca383b1a22692c604318dbcc84cbbac8f4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 18:39:19 +0800
+Subject: phy: ti: j721e-wiz: Fix device node reference leak in
+ wiz_get_lane_phy_types()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 584b457f4166293bdfa50f930228e9fb91a38392 ]
+
+The serdes device_node is obtained using of_get_child_by_name(),
+which increments the reference count. However, it is never put,
+leading to a reference leak.
+
+Add the missing of_node_put() calls to ensure the reference count is
+properly balanced.
+
+Fixes: 7ae14cf581f2 ("phy: ti: j721e-wiz: Implement DisplayPort mode to the wiz driver")
+Suggested-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20260212-wiz-v2-1-6e8bd4cc7a4a@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index fc3cd98c60ff4..a28c168b35d92 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -1424,6 +1424,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+                       dev_err(dev,
+                               "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+                               __func__, subnode->name, ret);
++                      of_node_put(serdes);
+                       return ret;
+               }
+               of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+@@ -1438,6 +1439,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+               }
+       }
++      of_node_put(serdes);
+       return 0;
+ }
+-- 
+2.53.0
+
index 40f4ecb91297c8d269c8101f28144b2b845415f1..9b0dd06919edb86cef0c69fac22d21603219d7b6 100644 (file)
@@ -153,3 +153,17 @@ gfs2-fix-unlikely-race-in-gdlm_put_lock.patch
 libbpf-fix-wdiscarded-qualifiers-under-c23.patch
 xattr-switch-to-class-fd.patch
 nvme-fix-admin-queue-leak-on-controller-reset.patch
+dmaengine-idxd-fix-not-releasing-workqueue-on-.relea.patch
+dmaengine-idxd-fix-memory-leak-when-a-wq-is-reset.patch
+phy-ti-j721e-wiz-fix-device-node-reference-leak-in-w.patch
+dmaengine-dw-edma-fix-multiple-times-setting-of-the-.patch
+dmaengine-xilinx-xdma-fix-regmap-init-error-handling.patch
+dmaengine-xilinx-xilinx_dma-fix-dma_device-direction.patch
+dmaengine-xilinx-xilinx_dma-fix-residue-calculation-.patch
+dmaengine-xilinx-xilinx_dma-fix-unmasked-residue-sub.patch
+dmaengine-xilinx_dma-fix-reset-related-timeout-with-.patch
+btrfs-fix-super-block-offset-in-error-message-in-btr.patch
+btrfs-fix-leak-of-kobject-name-for-sub-group-space_i.patch
+btrfs-fix-lost-error-when-running-device-stats-on-mu.patch
+dmaengine-idxd-remove-usage-of-the-deprecated-ida_si.patch
+dmaengine-idxd-fix-freeing-the-allocated-ida-too-lat.patch