]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Mar 2025 11:52:10 +0000 (07:52 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Mar 2025 11:52:10 +0000 (07:52 -0400)
added patches:
arm64-dts-rockchip-fix-u2phy1_host-status-for-nanopi-r4s.patch
block-bfq-fix-re-introduced-uaf-in-bic_set_bfqq.patch
drm-amdgpu-fix-use-after-free-bug.patch
drm-mediatek-fix-coverity-issue-with-unintentional-integer-overflow.patch
fs-ntfs3-change-new-sparse-cluster-processing.patch
media-mediatek-vcodec-fix-vp8-stateless-decoder-smatch-warning.patch
mm-migrate-fix-shmem-xarray-update-during-migration.patch
netfilter-nft_counter-use-u64_stats_t-for-statistic.patch
wifi-iwlwifi-mvm-ensure-offloading-tid-queue-exists.patch

13 files changed:
queue-6.1/arm64-dts-rockchip-fix-u2phy1_host-status-for-nanopi-r4s.patch [new file with mode: 0644]
queue-6.1/block-bfq-fix-re-introduced-uaf-in-bic_set_bfqq.patch [new file with mode: 0644]
queue-6.1/drm-amdgpu-fix-use-after-free-bug.patch [new file with mode: 0644]
queue-6.1/drm-mediatek-fix-coverity-issue-with-unintentional-integer-overflow.patch [new file with mode: 0644]
queue-6.1/fs-ntfs3-change-new-sparse-cluster-processing.patch [new file with mode: 0644]
queue-6.1/media-mediatek-vcodec-fix-vp8-stateless-decoder-smatch-warning.patch [new file with mode: 0644]
queue-6.1/mm-migrate-fix-shmem-xarray-update-during-migration.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_counter-use-u64_stats_t-for-statistic.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/wifi-iwlwifi-mvm-ensure-offloading-tid-queue-exists.patch [new file with mode: 0644]
queue-6.1/xfs-pass-the-xfs_defer_pending-object-to-iop_recover.patch [deleted file]
queue-6.1/xfs-transfer-recovered-intent-item-ownership-in-iop_recover.patch [deleted file]
queue-6.1/xfs-use-xfs_defer_pending-objects-to-recover-intent-items.patch [deleted file]

diff --git a/queue-6.1/arm64-dts-rockchip-fix-u2phy1_host-status-for-nanopi-r4s.patch b/queue-6.1/arm64-dts-rockchip-fix-u2phy1_host-status-for-nanopi-r4s.patch
new file mode 100644 (file)
index 0000000..1248f86
--- /dev/null
@@ -0,0 +1,38 @@
+From 38f4aa34a5f737ea8588dac320d884cc2e762c03 Mon Sep 17 00:00:00 2001
+From: Justin Klaassen <justin@tidylabs.net>
+Date: Tue, 25 Feb 2025 17:03:58 +0000
+Subject: arm64: dts: rockchip: fix u2phy1_host status for NanoPi R4S
+
+From: Justin Klaassen <justin@tidylabs.net>
+
+commit 38f4aa34a5f737ea8588dac320d884cc2e762c03 upstream.
+
+The u2phy1_host should always have the same status as usb_host1_ehci
+and usb_host1_ohci, otherwise the EHCI and OHCI drivers may be
+initialized for a disabled usb port.
+
+Per the NanoPi R4S schematic, the phy-supply for u2phy1_host is set to
+the vdd_5v regulator.
+
+Fixes: db792e9adbf8 ("rockchip: rk3399: Add support for FriendlyARM NanoPi R4S")
+Cc: stable@vger.kernel.org
+Signed-off-by: Justin Klaassen <justin@tidylabs.net>
+Reviewed-by: Dragan Simic <dsimic@manjaro.org>
+Link: https://lore.kernel.org/r/20250225170420.3898-1-justin@tidylabs.net
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
+@@ -117,7 +117,7 @@
+ };
+ &u2phy1_host {
+-      status = "disabled";
++      phy-supply = <&vdd_5v>;
+ };
+ &uart0 {
diff --git a/queue-6.1/block-bfq-fix-re-introduced-uaf-in-bic_set_bfqq.patch b/queue-6.1/block-bfq-fix-re-introduced-uaf-in-bic_set_bfqq.patch
new file mode 100644 (file)
index 0000000..3d96545
--- /dev/null
@@ -0,0 +1,46 @@
+From stable+bounces-126003-greg=kroah.com@vger.kernel.org Tue Mar 25 06:25:03 2025
+From: "Acs, Jakub" <acsjakub@amazon.de>
+Date: Tue, 25 Mar 2025 10:24:41 +0000
+Subject: block, bfq: fix re-introduced UAF in bic_set_bfqq()
+To: "gregkh@linuxfoundation.org" <gregkh@linuxfoundation.org>
+Cc: "Acs, Jakub" <acsjakub@amazon.de>, Hagar Hemdan <hagarhem@amazon.com>, "stable@vger.kernel.org" <stable@vger.kernel.org>
+Message-ID: <20250325102409.50587-1-acsjakub@amazon.de>
+
+From: "Acs, Jakub" <acsjakub@amazon.de>
+
+Commit eca0025faa96ac ("block, bfq: split sync bfq_queues on a
+per-actuator basis"), which is a backport of 9778369a2d6c5e ("block,
+bfq: split sync bfq_queues on a per-actuator basis") re-introduces UAF
+bug originally fixed by b600de2d7d3a16 ("block, bfq: fix uaf for bfqq in
+bic_set_bfqq()") and backported to 6.1 in cb1876fc33af26 ("block, bfq:
+fix uaf for bfqq in bic_set_bfqq()").
+
+bfq_release_process_ref() may release the sync_bfqq variable, which
+points to the same bfqq as bic->bfqq member for call context from
+__bfq_bic_change_cgroup(). bic_set_bfqq() then accesses bic->bfqq member
+which leads to the UAF condition.
+
+Fix this by bringing the incriminated function calls back in correct
+order.
+
+Fixes: eca0025faa96ac ("block, bfq: split sync bfq_queues on a per-actuator basis")
+Signed-off-by: Jakub Acs <acsjakub@amazon.de>
+Cc: Hagar Hemdan <hagarhem@amazon.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-cgroup.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -739,8 +739,8 @@ static void bfq_sync_bfqq_move(struct bf
+                * old cgroup.
+                */
+               bfq_put_cooperator(sync_bfqq);
+-              bfq_release_process_ref(bfqd, sync_bfqq);
+               bic_set_bfqq(bic, NULL, true, act_idx);
++              bfq_release_process_ref(bfqd, sync_bfqq);
+       }
+ }
diff --git a/queue-6.1/drm-amdgpu-fix-use-after-free-bug.patch b/queue-6.1/drm-amdgpu-fix-use-after-free-bug.patch
new file mode 100644 (file)
index 0000000..7000136
--- /dev/null
@@ -0,0 +1,174 @@
+From 22207fd5c80177b860279653d017474b2812af5e Mon Sep 17 00:00:00 2001
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Date: Wed, 6 Mar 2024 14:57:48 -0500
+Subject: drm/amdgpu: fix use-after-free bug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+
+commit 22207fd5c80177b860279653d017474b2812af5e upstream.
+
+The bug can be triggered by sending a single amdgpu_gem_userptr_ioctl
+to the AMDGPU DRM driver on any ASICs with an invalid address and size.
+The bug was reported by Joonkyo Jung <joonkyoj@yonsei.ac.kr>.
+For example the following code:
+
+static void Syzkaller1(int fd)
+{
+       struct drm_amdgpu_gem_userptr arg;
+       int ret;
+
+       arg.addr = 0xffffffffffff0000;
+       arg.size = 0x80000000; /*2 Gb*/
+       arg.flags = 0x7;
+       ret = drmIoctl(fd, 0xc1186451/*amdgpu_gem_userptr_ioctl*/, &arg);
+}
+
+Due to the address and size are not valid there is a failure in
+amdgpu_hmm_register->mmu_interval_notifier_insert->__mmu_interval_notifier_insert->
+check_shl_overflow, but we even the amdgpu_hmm_register failure we still call
+amdgpu_hmm_unregister into  amdgpu_gem_object_free which causes access to a bad address.
+The following stack is below when the issue is reproduced when Kazan is enabled:
+
+[  +0.000014] Hardware name: ASUS System Product Name/ROG STRIX B550-F GAMING (WI-FI), BIOS 1401 12/03/2020
+[  +0.000009] RIP: 0010:mmu_interval_notifier_remove+0x327/0x340
+[  +0.000017] Code: ff ff 49 89 44 24 08 48 b8 00 01 00 00 00 00 ad de 4c 89 f7 49 89 47 40 48 83 c0 22 49 89 47 48 e8 ce d1 2d 01 e9 32 ff ff ff <0f> 0b e9 16 ff ff ff 4c 89 ef e8 fa 14 b3 ff e9 36 ff ff ff e8 80
+[  +0.000014] RSP: 0018:ffffc90002657988 EFLAGS: 00010246
+[  +0.000013] RAX: 0000000000000000 RBX: 1ffff920004caf35 RCX: ffffffff8160565b
+[  +0.000011] RDX: dffffc0000000000 RSI: 0000000000000004 RDI: ffff8881a9f78260
+[  +0.000010] RBP: ffffc90002657a70 R08: 0000000000000001 R09: fffff520004caf25
+[  +0.000010] R10: 0000000000000003 R11: ffffffff8161d1d6 R12: ffff88810e988c00
+[  +0.000010] R13: ffff888126fb5a00 R14: ffff88810e988c0c R15: ffff8881a9f78260
+[  +0.000011] FS:  00007ff9ec848540(0000) GS:ffff8883cc880000(0000) knlGS:0000000000000000
+[  +0.000012] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  +0.000010] CR2: 000055b3f7e14328 CR3: 00000001b5770000 CR4: 0000000000350ef0
+[  +0.000010] Call Trace:
+[  +0.000006]  <TASK>
+[  +0.000007]  ? show_regs+0x6a/0x80
+[  +0.000018]  ? __warn+0xa5/0x1b0
+[  +0.000019]  ? mmu_interval_notifier_remove+0x327/0x340
+[  +0.000018]  ? report_bug+0x24a/0x290
+[  +0.000022]  ? handle_bug+0x46/0x90
+[  +0.000015]  ? exc_invalid_op+0x19/0x50
+[  +0.000016]  ? asm_exc_invalid_op+0x1b/0x20
+[  +0.000017]  ? kasan_save_stack+0x26/0x50
+[  +0.000017]  ? mmu_interval_notifier_remove+0x23b/0x340
+[  +0.000019]  ? mmu_interval_notifier_remove+0x327/0x340
+[  +0.000019]  ? mmu_interval_notifier_remove+0x23b/0x340
+[  +0.000020]  ? __pfx_mmu_interval_notifier_remove+0x10/0x10
+[  +0.000017]  ? kasan_save_alloc_info+0x1e/0x30
+[  +0.000018]  ? srso_return_thunk+0x5/0x5f
+[  +0.000014]  ? __kasan_kmalloc+0xb1/0xc0
+[  +0.000018]  ? srso_return_thunk+0x5/0x5f
+[  +0.000013]  ? __kasan_check_read+0x11/0x20
+[  +0.000020]  amdgpu_hmm_unregister+0x34/0x50 [amdgpu]
+[  +0.004695]  amdgpu_gem_object_free+0x66/0xa0 [amdgpu]
+[  +0.004534]  ? __pfx_amdgpu_gem_object_free+0x10/0x10 [amdgpu]
+[  +0.004291]  ? do_syscall_64+0x5f/0xe0
+[  +0.000023]  ? srso_return_thunk+0x5/0x5f
+[  +0.000017]  drm_gem_object_free+0x3b/0x50 [drm]
+[  +0.000489]  amdgpu_gem_userptr_ioctl+0x306/0x500 [amdgpu]
+[  +0.004295]  ? __pfx_amdgpu_gem_userptr_ioctl+0x10/0x10 [amdgpu]
+[  +0.004270]  ? srso_return_thunk+0x5/0x5f
+[  +0.000014]  ? __this_cpu_preempt_check+0x13/0x20
+[  +0.000015]  ? srso_return_thunk+0x5/0x5f
+[  +0.000013]  ? sysvec_apic_timer_interrupt+0x57/0xc0
+[  +0.000020]  ? srso_return_thunk+0x5/0x5f
+[  +0.000014]  ? asm_sysvec_apic_timer_interrupt+0x1b/0x20
+[  +0.000022]  ? drm_ioctl_kernel+0x17b/0x1f0 [drm]
+[  +0.000496]  ? __pfx_amdgpu_gem_userptr_ioctl+0x10/0x10 [amdgpu]
+[  +0.004272]  ? drm_ioctl_kernel+0x190/0x1f0 [drm]
+[  +0.000492]  drm_ioctl_kernel+0x140/0x1f0 [drm]
+[  +0.000497]  ? __pfx_amdgpu_gem_userptr_ioctl+0x10/0x10 [amdgpu]
+[  +0.004297]  ? __pfx_drm_ioctl_kernel+0x10/0x10 [drm]
+[  +0.000489]  ? srso_return_thunk+0x5/0x5f
+[  +0.000011]  ? __kasan_check_write+0x14/0x20
+[  +0.000016]  drm_ioctl+0x3da/0x730 [drm]
+[  +0.000475]  ? __pfx_amdgpu_gem_userptr_ioctl+0x10/0x10 [amdgpu]
+[  +0.004293]  ? __pfx_drm_ioctl+0x10/0x10 [drm]
+[  +0.000506]  ? __pfx_rpm_resume+0x10/0x10
+[  +0.000016]  ? srso_return_thunk+0x5/0x5f
+[  +0.000011]  ? __kasan_check_write+0x14/0x20
+[  +0.000010]  ? srso_return_thunk+0x5/0x5f
+[  +0.000011]  ? _raw_spin_lock_irqsave+0x99/0x100
+[  +0.000015]  ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+[  +0.000014]  ? srso_return_thunk+0x5/0x5f
+[  +0.000013]  ? srso_return_thunk+0x5/0x5f
+[  +0.000011]  ? srso_return_thunk+0x5/0x5f
+[  +0.000011]  ? preempt_count_sub+0x18/0xc0
+[  +0.000013]  ? srso_return_thunk+0x5/0x5f
+[  +0.000010]  ? _raw_spin_unlock_irqrestore+0x27/0x50
+[  +0.000019]  amdgpu_drm_ioctl+0x7e/0xe0 [amdgpu]
+[  +0.004272]  __x64_sys_ioctl+0xcd/0x110
+[  +0.000020]  do_syscall_64+0x5f/0xe0
+[  +0.000021]  entry_SYSCALL_64_after_hwframe+0x6e/0x76
+[  +0.000015] RIP: 0033:0x7ff9ed31a94f
+[  +0.000012] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00
+[  +0.000013] RSP: 002b:00007fff25f66790 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+[  +0.000016] RAX: ffffffffffffffda RBX: 000055b3f7e133e0 RCX: 00007ff9ed31a94f
+[  +0.000012] RDX: 000055b3f7e133e0 RSI: 00000000c1186451 RDI: 0000000000000003
+[  +0.000010] RBP: 00000000c1186451 R08: 0000000000000000 R09: 0000000000000000
+[  +0.000009] R10: 0000000000000008 R11: 0000000000000246 R12: 00007fff25f66ca8
+[  +0.000009] R13: 0000000000000003 R14: 000055b3f7021ba8 R15: 00007ff9ed7af040
+[  +0.000024]  </TASK>
+[  +0.000007] ---[ end trace 0000000000000000 ]---
+
+v2: Consolidate any error handling into amdgpu_hmm_register
+    which applied to kfd_bo also. (Christian)
+v3: Improve syntax and comment (Christian)
+
+Cc: Christian Koenig <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Felix Kuehling <felix.kuehling@amd.com>
+Cc: Joonkyo Jung <joonkyoj@yonsei.ac.kr>
+Cc: Dokyung Song <dokyungs@yonsei.ac.kr>
+Cc: <jisoo.jang@yonsei.ac.kr>
+Cc: <yw9865@yonsei.ac.kr>
+Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[ drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c is renamed from
+  drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c since
+  d9483ecd327b ("drm/amdgpu: rename the files for HMM handling").
+  The path is changed accordingly to apply the patch on 6.1.y. ]
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c |   20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -132,13 +132,25 @@ static const struct mmu_interval_notifie
+  */
+ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ {
++      int r;
++
+       if (bo->kfd_bo)
+-              return mmu_interval_notifier_insert(&bo->notifier, current->mm,
++              r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
+                                                   addr, amdgpu_bo_size(bo),
+                                                   &amdgpu_mn_hsa_ops);
+-      return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+-                                          amdgpu_bo_size(bo),
+-                                          &amdgpu_mn_gfx_ops);
++      else
++              r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
++                                                      amdgpu_bo_size(bo),
++                                                      &amdgpu_mn_gfx_ops);
++      if (r)
++              /*
++               * Make sure amdgpu_hmm_unregister() doesn't call
++               * mmu_interval_notifier_remove() when the notifier isn't properly
++               * initialized.
++               */
++              bo->notifier.mm = NULL;
++
++      return r;
+ }
+ /**
diff --git a/queue-6.1/drm-mediatek-fix-coverity-issue-with-unintentional-integer-overflow.patch b/queue-6.1/drm-mediatek-fix-coverity-issue-with-unintentional-integer-overflow.patch
new file mode 100644 (file)
index 0000000..8ddab63
--- /dev/null
@@ -0,0 +1,79 @@
+From b0b0d811eac6b4c52cb9ad632fa6384cf48869e7 Mon Sep 17 00:00:00 2001
+From: "Jason-JH.Lin" <jason-jh.lin@mediatek.com>
+Date: Thu, 7 Sep 2023 17:14:25 +0800
+Subject: drm/mediatek: Fix coverity issue with unintentional integer overflow
+
+From: Jason-JH.Lin <jason-jh.lin@mediatek.com>
+
+commit b0b0d811eac6b4c52cb9ad632fa6384cf48869e7 upstream.
+
+1. Instead of multiplying 2 variable of different types. Change to
+assign a value of one variable and then multiply the other variable.
+
+2. Add a int variable for multiplier calculation instead of calculating
+different types multiplier with dma_addr_t variable directly.
+
+Fixes: 1a64a7aff8da ("drm/mediatek: Fix cursor plane no update")
+Signed-off-by: Jason-JH.Lin <jason-jh.lin@mediatek.com>
+Reviewed-by: Alexandre Mergnat <amergnat@baylibre.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20230907091425.9526-1-jason-jh.lin@mediatek.com/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+[ For certain code segments with coverity issue do not exist in
+  function mtk_plane_update_new_state(), those not present in v6.1 are
+  not back ported. ]
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_drm_gem.c   |    9 ++++++++-
+ drivers/gpu/drm/mediatek/mtk_drm_plane.c |   13 +++++++++++--
+ 2 files changed, 19 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -119,7 +119,14 @@ int mtk_drm_gem_dumb_create(struct drm_f
+       int ret;
+       args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+-      args->size = args->pitch * args->height;
++
++      /*
++       * Multiply 2 variables of different types,
++       * for example: args->size = args->spacing * args->height;
++       * may cause coverity issue with unintentional overflow.
++       */
++      args->size = args->pitch;
++      args->size *= args->height;
+       mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+       if (IS_ERR(mtk_gem))
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -120,6 +120,7 @@ static void mtk_plane_update_new_state(s
+       struct mtk_drm_gem_obj *mtk_gem;
+       unsigned int pitch, format;
+       dma_addr_t addr;
++      int offset;
+       gem = fb->obj[0];
+       mtk_gem = to_mtk_gem_obj(gem);
+@@ -127,8 +128,16 @@ static void mtk_plane_update_new_state(s
+       pitch = fb->pitches[0];
+       format = fb->format->format;
+-      addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+-      addr += (new_state->src.y1 >> 16) * pitch;
++      /*
++       * Using dma_addr_t variable to calculate with multiplier of different types,
++       * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
++       * may cause coverity issue with unintentional overflow.
++       */
++      offset = (new_state->src.x1 >> 16) * fb->format->cpp[0];
++      addr += offset;
++      offset = (new_state->src.y1 >> 16) * pitch;
++      addr += offset;
++
+       mtk_plane_state->pending.enable = true;
+       mtk_plane_state->pending.pitch = pitch;
diff --git a/queue-6.1/fs-ntfs3-change-new-sparse-cluster-processing.patch b/queue-6.1/fs-ntfs3-change-new-sparse-cluster-processing.patch
new file mode 100644 (file)
index 0000000..8109b08
--- /dev/null
@@ -0,0 +1,670 @@
+From c380b52f6c5702cc4bdda5e6d456d6c19a201a0b Mon Sep 17 00:00:00 2001
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Date: Fri, 7 Oct 2022 14:02:36 +0300
+Subject: fs/ntfs3: Change new sparse cluster processing
+
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+
+commit c380b52f6c5702cc4bdda5e6d456d6c19a201a0b upstream.
+
+Remove ntfs_sparse_cluster.
+Zero clusters in attr_allocate_clusters.
+Fixes xfstest generic/263
+
+Bug: https://syzkaller.appspot.com/bug?extid=f3e5d0948a1837ed1bb0
+Reported-by: syzbot+f3e5d0948a1837ed1bb0@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Miguel Garcia Roman <miguelgarciaroman8@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/attrib.c  |  178 ++++++++++++++++++++++++++++++++++++-----------------
+ fs/ntfs3/file.c    |  151 +++++++++-----------------------------------
+ fs/ntfs3/frecord.c |    2 
+ fs/ntfs3/index.c   |    4 -
+ fs/ntfs3/inode.c   |   12 +--
+ fs/ntfs3/ntfs_fs.h |    7 --
+ 6 files changed, 167 insertions(+), 187 deletions(-)
+
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -176,7 +176,7 @@ out:
+ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
+                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
+                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
+-                         CLST *new_lcn)
++                         CLST *new_lcn, CLST *new_len)
+ {
+       int err;
+       CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
+@@ -196,20 +196,36 @@ int attr_allocate_clusters(struct ntfs_s
+               if (err)
+                       goto out;
+-              if (new_lcn && vcn == vcn0)
+-                      *new_lcn = lcn;
++              if (vcn == vcn0) {
++                      /* Return the first fragment. */
++                      if (new_lcn)
++                              *new_lcn = lcn;
++                      if (new_len)
++                              *new_len = flen;
++              }
+               /* Add new fragment into run storage. */
+-              if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
++              if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
+                       /* Undo last 'ntfs_look_for_free_space' */
+                       mark_as_free_ex(sbi, lcn, len, false);
+                       err = -ENOMEM;
+                       goto out;
+               }
++              if (opt & ALLOCATE_ZERO) {
++                      u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
++
++                      err = blkdev_issue_zeroout(sbi->sb->s_bdev,
++                                                 (sector_t)lcn << shift,
++                                                 (sector_t)flen << shift,
++                                                 GFP_NOFS, 0);
++                      if (err)
++                              goto out;
++              }
++
+               vcn += flen;
+-              if (flen >= len || opt == ALLOCATE_MFT ||
++              if (flen >= len || (opt & ALLOCATE_MFT) ||
+                   (fr && run->count - cnt >= fr)) {
+                       *alen = vcn - vcn0;
+                       return 0;
+@@ -287,7 +303,8 @@ int attr_make_nonresident(struct ntfs_in
+               const char *data = resident_data(attr);
+               err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
+-                                           ALLOCATE_DEF, &alen, 0, NULL);
++                                           ALLOCATE_DEF, &alen, 0, NULL,
++                                           NULL);
+               if (err)
+                       goto out1;
+@@ -582,13 +599,13 @@ add_alloc_in_same_attr_seg:
+                       /* ~3 bytes per fragment. */
+                       err = attr_allocate_clusters(
+                               sbi, run, vcn, lcn, to_allocate, &pre_alloc,
+-                              is_mft ? ALLOCATE_MFT : 0, &alen,
++                              is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
+                               is_mft ? 0
+                                      : (sbi->record_size -
+                                         le32_to_cpu(rec->used) + 8) /
+                                                        3 +
+                                                1,
+-                              NULL);
++                              NULL, NULL);
+                       if (err)
+                               goto out;
+               }
+@@ -886,8 +903,19 @@ bad_inode:
+       return err;
+ }
++/*
++ * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
++ *
++ * @new == NULL means just to get current mapping for 'vcn'
++ * @new != NULL means allocate real cluster if 'vcn' maps to hole
++ * @zero - zeroout new allocated clusters
++ *
++ *  NOTE:
++ *  - @new != NULL is called only for sparsed or compressed attributes.
++ *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
++ */
+ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+-                      CLST *len, bool *new)
++                      CLST *len, bool *new, bool zero)
+ {
+       int err = 0;
+       struct runs_tree *run = &ni->file.run;
+@@ -896,29 +924,27 @@ int attr_data_get_block(struct ntfs_inod
+       struct ATTRIB *attr = NULL, *attr_b;
+       struct ATTR_LIST_ENTRY *le, *le_b;
+       struct mft_inode *mi, *mi_b;
+-      CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
++      CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
++      unsigned int fr;
+       u64 total_size;
+-      u32 clst_per_frame;
+-      bool ok;
+       if (new)
+               *new = false;
++      /* Try to find in cache. */
+       down_read(&ni->file.run_lock);
+-      ok = run_lookup_entry(run, vcn, lcn, len, NULL);
++      if (!run_lookup_entry(run, vcn, lcn, len, NULL))
++              *len = 0;
+       up_read(&ni->file.run_lock);
+-      if (ok && (*lcn != SPARSE_LCN || !new)) {
+-              /* Normal way. */
+-              return 0;
++      if (*len) {
++              if (*lcn != SPARSE_LCN || !new)
++                      return 0; /* Fast normal way without allocation. */
++              else if (clen > *len)
++                      clen = *len;
+       }
+-      if (!clen)
+-              clen = 1;
+-
+-      if (ok && clen > *len)
+-              clen = *len;
+-
++      /* No cluster in cache or we need to allocate cluster in hole. */
+       sbi = ni->mi.sbi;
+       cluster_bits = sbi->cluster_bits;
+@@ -944,12 +970,6 @@ int attr_data_get_block(struct ntfs_inod
+               goto out;
+       }
+-      clst_per_frame = 1u << attr_b->nres.c_unit;
+-      to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
+-
+-      if (vcn + to_alloc > asize)
+-              to_alloc = asize - vcn;
+-
+       svcn = le64_to_cpu(attr_b->nres.svcn);
+       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+@@ -968,36 +988,68 @@ int attr_data_get_block(struct ntfs_inod
+               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+       }
++      /* Load in cache actual information. */
+       err = attr_load_runs(attr, ni, run, NULL);
+       if (err)
+               goto out;
+-      if (!ok) {
+-              ok = run_lookup_entry(run, vcn, lcn, len, NULL);
+-              if (ok && (*lcn != SPARSE_LCN || !new)) {
+-                      /* Normal way. */
+-                      err = 0;
++      if (!*len) {
++              if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
++                      if (*lcn != SPARSE_LCN || !new)
++                              goto ok; /* Slow normal way without allocation. */
++
++                      if (clen > *len)
++                              clen = *len;
++              } else if (!new) {
++                      /* Here we may return -ENOENT.
++                       * In any case caller gets zero length. */
+                       goto ok;
+               }
+-
+-              if (!ok && !new) {
+-                      *len = 0;
+-                      err = 0;
+-                      goto ok;
+-              }
+-
+-              if (ok && clen > *len) {
+-                      clen = *len;
+-                      to_alloc = (clen + clst_per_frame - 1) &
+-                                 ~(clst_per_frame - 1);
+-              }
+       }
+       if (!is_attr_ext(attr_b)) {
++              /* The code below only for sparsed or compressed attributes. */
+               err = -EINVAL;
+               goto out;
+       }
++      vcn0 = vcn;
++      to_alloc = clen;
++      fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
++      /* Allocate frame aligned clusters.
++       * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
++       * ntfs3 uses 1 cluster per frame for new created sparsed files. */
++      if (attr_b->nres.c_unit) {
++              CLST clst_per_frame = 1u << attr_b->nres.c_unit;
++              CLST cmask = ~(clst_per_frame - 1);
++
++              /* Get frame aligned vcn and to_alloc. */
++              vcn = vcn0 & cmask;
++              to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
++              if (fr < clst_per_frame)
++                      fr = clst_per_frame;
++              zero = true;
++
++              /* Check if 'vcn' and 'vcn0' in different attribute segments. */
++              if (vcn < svcn || evcn1 <= vcn) {
++                      /* Load attribute for truncated vcn. */
++                      attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
++                                          &vcn, &mi);
++                      if (!attr) {
++                              err = -EINVAL;
++                              goto out;
++                      }
++                      svcn = le64_to_cpu(attr->nres.svcn);
++                      evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
++                      err = attr_load_runs(attr, ni, run, NULL);
++                      if (err)
++                              goto out;
++              }
++      }
++
++      if (vcn + to_alloc > asize)
++              to_alloc = asize - vcn;
++
+       /* Get the last LCN to allocate from. */
+       hint = 0;
+@@ -1011,18 +1063,33 @@ int attr_data_get_block(struct ntfs_inod
+               hint = -1;
+       }
+-      err = attr_allocate_clusters(
+-              sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
+-              (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
+-              lcn);
++      /* Allocate and zeroout new clusters. */
++      err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
++                                   zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
++                                   fr, lcn, len);
+       if (err)
+               goto out;
+       *new = true;
+-      end = vcn + *len;
+-
++      end = vcn + alen;
+       total_size = le64_to_cpu(attr_b->nres.total_size) +
+-                   ((u64)*len << cluster_bits);
++                   ((u64)alen << cluster_bits);
++
++      if (vcn != vcn0) {
++              if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
++                      err = -EINVAL;
++                      goto out;
++              }
++              if (*lcn == SPARSE_LCN) {
++                      /* Internal error. Should not happened. */
++                      WARN_ON(1);
++                      err = -EINVAL;
++                      goto out;
++              }
++              /* Check case when vcn0 + len overlaps new allocated clusters. */
++              if (vcn0 + *len > end)
++                      *len = end - vcn0;
++      }
+ repack:
+       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
+@@ -1547,7 +1614,7 @@ int attr_allocate_frame(struct ntfs_inod
+       struct ATTRIB *attr = NULL, *attr_b;
+       struct ATTR_LIST_ENTRY *le, *le_b;
+       struct mft_inode *mi, *mi_b;
+-      CLST svcn, evcn1, next_svcn, lcn, len;
++      CLST svcn, evcn1, next_svcn, len;
+       CLST vcn, end, clst_data;
+       u64 total_size, valid_size, data_size;
+@@ -1623,8 +1690,9 @@ int attr_allocate_frame(struct ntfs_inod
+               }
+               err = attr_allocate_clusters(sbi, run, vcn + clst_data,
+-                                           hint + 1, len - clst_data, NULL, 0,
+-                                           &alen, 0, &lcn);
++                                           hint + 1, len - clst_data, NULL,
++                                           ALLOCATE_DEF, &alen, 0, NULL,
++                                           NULL);
+               if (err)
+                       goto out;
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -122,8 +122,8 @@ static int ntfs_extend_initialized_size(
+                       bits = sbi->cluster_bits;
+                       vcn = pos >> bits;
+-                      err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
+-                                                NULL);
++                      err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
++                                                false);
+                       if (err)
+                               goto out;
+@@ -196,18 +196,18 @@ static int ntfs_zero_range(struct inode
+       struct address_space *mapping = inode->i_mapping;
+       u32 blocksize = 1 << inode->i_blkbits;
+       pgoff_t idx = vbo >> PAGE_SHIFT;
+-      u32 z_start = vbo & (PAGE_SIZE - 1);
++      u32 from = vbo & (PAGE_SIZE - 1);
+       pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       loff_t page_off;
+       struct buffer_head *head, *bh;
+-      u32 bh_next, bh_off, z_end;
++      u32 bh_next, bh_off, to;
+       sector_t iblock;
+       struct page *page;
+-      for (; idx < idx_end; idx += 1, z_start = 0) {
++      for (; idx < idx_end; idx += 1, from = 0) {
+               page_off = (loff_t)idx << PAGE_SHIFT;
+-              z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
+-                                                      : PAGE_SIZE;
++              to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
++                                                   : PAGE_SIZE;
+               iblock = page_off >> inode->i_blkbits;
+               page = find_or_create_page(mapping, idx,
+@@ -224,7 +224,7 @@ static int ntfs_zero_range(struct inode
+               do {
+                       bh_next = bh_off + blocksize;
+-                      if (bh_next <= z_start || bh_off >= z_end)
++                      if (bh_next <= from || bh_off >= to)
+                               continue;
+                       if (!buffer_mapped(bh)) {
+@@ -258,7 +258,7 @@ static int ntfs_zero_range(struct inode
+               } while (bh_off = bh_next, iblock += 1,
+                        head != (bh = bh->b_this_page));
+-              zero_user_segment(page, z_start, z_end);
++              zero_user_segment(page, from, to);
+               unlock_page(page);
+               put_page(page);
+@@ -270,81 +270,6 @@ out:
+ }
+ /*
+- * ntfs_sparse_cluster - Helper function to zero a new allocated clusters.
+- *
+- * NOTE: 512 <= cluster size <= 2M
+- */
+-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
+-                       CLST len)
+-{
+-      struct address_space *mapping = inode->i_mapping;
+-      struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+-      u64 vbo = (u64)vcn << sbi->cluster_bits;
+-      u64 bytes = (u64)len << sbi->cluster_bits;
+-      u32 blocksize = 1 << inode->i_blkbits;
+-      pgoff_t idx0 = page0 ? page0->index : -1;
+-      loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
+-      loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
+-      pgoff_t idx = vbo_clst >> PAGE_SHIFT;
+-      u32 from = vbo_clst & (PAGE_SIZE - 1);
+-      pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+-      loff_t page_off;
+-      u32 to;
+-      bool partial;
+-      struct page *page;
+-
+-      for (; idx < idx_end; idx += 1, from = 0) {
+-              page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
+-
+-              if (!page)
+-                      continue;
+-
+-              page_off = (loff_t)idx << PAGE_SHIFT;
+-              to = (page_off + PAGE_SIZE) > end ? (end - page_off)
+-                                                : PAGE_SIZE;
+-              partial = false;
+-
+-              if ((from || PAGE_SIZE != to) &&
+-                  likely(!page_has_buffers(page))) {
+-                      create_empty_buffers(page, blocksize, 0);
+-              }
+-
+-              if (page_has_buffers(page)) {
+-                      struct buffer_head *head, *bh;
+-                      u32 bh_off = 0;
+-
+-                      bh = head = page_buffers(page);
+-                      do {
+-                              u32 bh_next = bh_off + blocksize;
+-
+-                              if (from <= bh_off && bh_next <= to) {
+-                                      set_buffer_uptodate(bh);
+-                                      mark_buffer_dirty(bh);
+-                              } else if (!buffer_uptodate(bh)) {
+-                                      partial = true;
+-                              }
+-                              bh_off = bh_next;
+-                      } while (head != (bh = bh->b_this_page));
+-              }
+-
+-              zero_user_segment(page, from, to);
+-
+-              if (!partial) {
+-                      if (!PageUptodate(page))
+-                              SetPageUptodate(page);
+-                      set_page_dirty(page);
+-              }
+-
+-              if (idx != idx0) {
+-                      unlock_page(page);
+-                      put_page(page);
+-              }
+-              cond_resched();
+-      }
+-      mark_inode_dirty(inode);
+-}
+-
+-/*
+  * ntfs_file_mmap - file_operations::mmap
+  */
+ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -385,13 +310,9 @@ static int ntfs_file_mmap(struct file *f
+                       for (; vcn < end; vcn += len) {
+                               err = attr_data_get_block(ni, vcn, 1, &lcn,
+-                                                        &len, &new);
++                                                        &len, &new, true);
+                               if (err)
+                                       goto out;
+-
+-                              if (!new)
+-                                      continue;
+-                              ntfs_sparse_cluster(inode, NULL, vcn, 1);
+                       }
+               }
+@@ -532,7 +453,8 @@ static long ntfs_fallocate(struct file *
+       struct ntfs_sb_info *sbi = sb->s_fs_info;
+       struct ntfs_inode *ni = ntfs_i(inode);
+       loff_t end = vbo + len;
+-      loff_t vbo_down = round_down(vbo, PAGE_SIZE);
++      loff_t vbo_down = round_down(vbo, max_t(unsigned long,
++                                              sbi->cluster_size, PAGE_SIZE));
+       bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
+       loff_t i_size, new_size;
+       bool map_locked;
+@@ -585,11 +507,8 @@ static long ntfs_fallocate(struct file *
+               u32 frame_size;
+               loff_t mask, vbo_a, end_a, tmp;
+-              err = filemap_write_and_wait_range(mapping, vbo, end - 1);
+-              if (err)
+-                      goto out;
+-
+-              err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
++              err = filemap_write_and_wait_range(mapping, vbo_down,
++                                                 LLONG_MAX);
+               if (err)
+                       goto out;
+@@ -692,39 +611,35 @@ static long ntfs_fallocate(struct file *
+                       goto out;
+               if (is_supported_holes) {
+-                      CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
+                       CLST vcn = vbo >> sbi->cluster_bits;
+                       CLST cend = bytes_to_cluster(sbi, end);
++                      CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
+                       CLST lcn, clen;
+                       bool new;
++                      if (cend_v > cend)
++                              cend_v = cend;
++
+                       /*
+-                       * Allocate but do not zero new clusters. (see below comments)
+-                       * This breaks security: One can read unused on-disk areas.
++                       * Allocate and zero new clusters.
+                        * Zeroing these clusters may be too long.
+-                       * Maybe we should check here for root rights?
++                       */
++                      for (; vcn < cend_v; vcn += clen) {
++                              err = attr_data_get_block(ni, vcn, cend_v - vcn,
++                                                        &lcn, &clen, &new,
++                                                        true);
++                              if (err)
++                                      goto out;
++                      }
++                      /*
++                       * Allocate but not zero new clusters.
+                        */
+                       for (; vcn < cend; vcn += clen) {
+                               err = attr_data_get_block(ni, vcn, cend - vcn,
+-                                                        &lcn, &clen, &new);
++                                                        &lcn, &clen, &new,
++                                                        false);
+                               if (err)
+                                       goto out;
+-                              if (!new || vcn >= vcn_v)
+-                                      continue;
+-
+-                              /*
+-                               * Unwritten area.
+-                               * NTFS is not able to store several unwritten areas.
+-                               * Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
+-                               *
+-                               * Dangerous in case:
+-                               * 1G of sparsed clusters + 1 cluster of data =>
+-                               * valid_size == 1G + 1 cluster
+-                               * fallocate(1G) will zero 1G and this can be very long
+-                               * xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
+-                               */
+-                              ntfs_sparse_cluster(inode, NULL, vcn,
+-                                                  min(vcn_v - vcn, clen));
+                       }
+               }
+@@ -945,8 +860,8 @@ static ssize_t ntfs_compress_write(struc
+               frame_vbo = valid & ~(frame_size - 1);
+               off = valid & (frame_size - 1);
+-              err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
+-                                        &clen, NULL);
++              err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
++                                        &clen, NULL, false);
+               if (err)
+                       goto out;
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -2297,7 +2297,7 @@ int ni_decompress_file(struct ntfs_inode
+               for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
+                       err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
+-                                                &clen, &new);
++                                                &clen, &new, false);
+                       if (err)
+                               goto out;
+               }
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1442,8 +1442,8 @@ static int indx_create_allocate(struct n
+       run_init(&run);
+-      err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
+-                                   NULL);
++      err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, ALLOCATE_DEF,
++                                   &alen, 0, NULL, NULL);
+       if (err)
+               goto out;
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -592,7 +592,8 @@ static noinline int ntfs_get_block_vbo(s
+       off = vbo & sbi->cluster_mask;
+       new = false;
+-      err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
++      err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL,
++                                create && sbi->cluster_size > PAGE_SIZE);
+       if (err)
+               goto out;
+@@ -610,11 +611,8 @@ static noinline int ntfs_get_block_vbo(s
+               WARN_ON(1);
+       }
+-      if (new) {
++      if (new)
+               set_buffer_new(bh);
+-              if ((len << cluster_bits) > block_size)
+-                      ntfs_sparse_cluster(inode, page, vcn, len);
+-      }
+       lbo = ((u64)lcn << cluster_bits) + off;
+@@ -1533,8 +1531,8 @@ struct inode *ntfs_create_inode(struct u
+                               cpu_to_le64(ntfs_up_cluster(sbi, nsize));
+                       err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
+-                                                   clst, NULL, 0, &alen, 0,
+-                                                   NULL);
++                                                   clst, NULL, ALLOCATE_DEF,
++                                                   &alen, 0, NULL, NULL);
+                       if (err)
+                               goto out5;
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -128,6 +128,7 @@ struct ntfs_buffers {
+ enum ALLOCATE_OPT {
+       ALLOCATE_DEF = 0, // Allocate all clusters.
+       ALLOCATE_MFT = 1, // Allocate for MFT.
++      ALLOCATE_ZERO = 2, // Zeroout new allocated clusters
+ };
+ enum bitmap_mutex_classes {
+@@ -418,7 +419,7 @@ enum REPARSE_SIGN {
+ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
+                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
+                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
+-                         CLST *new_lcn);
++                         CLST *new_lcn, CLST *new_len);
+ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
+                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
+                         u64 new_size, struct runs_tree *run,
+@@ -428,7 +429,7 @@ int attr_set_size(struct ntfs_inode *ni,
+                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
+                 struct ATTRIB **ret);
+ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+-                      CLST *len, bool *new);
++                      CLST *len, bool *new, bool zero);
+ int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
+ int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
+ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
+@@ -493,8 +494,6 @@ extern const struct file_operations ntfs
+ /* Globals from file.c */
+ int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+                struct kstat *stat, u32 request_mask, u32 flags);
+-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
+-                       CLST len);
+ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+                 struct iattr *attr);
+ int ntfs_file_open(struct inode *inode, struct file *file);
diff --git a/queue-6.1/media-mediatek-vcodec-fix-vp8-stateless-decoder-smatch-warning.patch b/queue-6.1/media-mediatek-vcodec-fix-vp8-stateless-decoder-smatch-warning.patch
new file mode 100644 (file)
index 0000000..61683fa
--- /dev/null
@@ -0,0 +1,48 @@
+From b113bc7c0e83b32f4dd2d291a2b6c4803e0a2c44 Mon Sep 17 00:00:00 2001
+From: Yunfei Dong <yunfei.dong@mediatek.com>
+Date: Thu, 13 Jun 2024 17:33:56 +0800
+Subject: media: mediatek: vcodec: Fix VP8 stateless decoder smatch warning
+
+From: Yunfei Dong <yunfei.dong@mediatek.com>
+
+commit b113bc7c0e83b32f4dd2d291a2b6c4803e0a2c44 upstream.
+
+Fix a smatch static checker warning on vdec_vp8_req_if.c.
+Which leads to a kernel crash when fb is NULL.
+
+Fixes: 7a7ae26fd458 ("media: mediatek: vcodec: support stateless VP8 decoding")
+Signed-off-by: Yunfei Dong <yunfei.dong@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Signed-off-by: Sebastian Fricke <sebastian.fricke@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c
+@@ -336,14 +336,18 @@ static int vdec_vp8_slice_decode(void *h
+       src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+       fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+-      dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
++      if (!fb) {
++              mtk_vcodec_err(inst, "fb buffer is NULL");
++              return -ENOMEM;
++      }
+-      y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
++      dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
++      y_fb_dma = fb->base_y.dma_addr;
+       if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+               c_fb_dma = y_fb_dma +
+                       inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+       else
+-              c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
++              c_fb_dma = fb->base_c.dma_addr;
+       inst->vsi->dec.bs_dma = (u64)bs->dma_addr;
+       inst->vsi->dec.bs_sz = bs->size;
diff --git a/queue-6.1/mm-migrate-fix-shmem-xarray-update-during-migration.patch b/queue-6.1/mm-migrate-fix-shmem-xarray-update-during-migration.patch
new file mode 100644 (file)
index 0000000..d5442e6
--- /dev/null
@@ -0,0 +1,83 @@
+From 60cf233b585cdf1f3c5e52d1225606b86acd08b0 Mon Sep 17 00:00:00 2001
+From: Zi Yan <ziy@nvidia.com>
+Date: Wed, 5 Mar 2025 15:04:03 -0500
+Subject: mm/migrate: fix shmem xarray update during migration
+
+From: Zi Yan <ziy@nvidia.com>
+
+commit 60cf233b585cdf1f3c5e52d1225606b86acd08b0 upstream.
+
+A shmem folio can be either in page cache or in swap cache, but not at the
+same time.  Namely, once it is in swap cache, folio->mapping should be
+NULL, and the folio is no longer in a shmem mapping.
+
+In __folio_migrate_mapping(), to determine the number of xarray entries to
+update, folio_test_swapbacked() is used, but that conflates shmem in page
+cache case and shmem in swap cache case.  It leads to xarray multi-index
+entry corruption, since it turns a sibling entry to a normal entry during
+xas_store() (see [1] for a userspace reproduction).  Fix it by only using
+folio_test_swapcache() to determine whether xarray is storing swap cache
+entries or not to choose the right number of xarray entries to update.
+
+[1] https://lore.kernel.org/linux-mm/Z8idPCkaJW1IChjT@casper.infradead.org/
+
+Note:
+In __split_huge_page(), folio_test_anon() && folio_test_swapcache() is
+used to get swap_cache address space, but that ignores the shmem folio in
+swap cache case.  It could lead to NULL pointer dereferencing when a
+in-swap-cache shmem folio is split at __xa_store(), since
+!folio_test_anon() is true and folio->mapping is NULL.  But fortunately,
+its caller split_huge_page_to_list_to_order() bails out early with EBUSY
+when folio->mapping is NULL.  So no need to take care of it here.
+
+Link: https://lkml.kernel.org/r/20250305200403.2822855-1-ziy@nvidia.com
+Fixes: fc346d0a70a1 ("mm: migrate high-order folios in swap cache correctly")
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Reported-by: Liu Shixin <liushixin2@huawei.com>
+Closes: https://lore.kernel.org/all/28546fb4-5210-bf75-16d6-43e1f8646080@huawei.com/
+Suggested-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Charan Teja Kalla <quic_charante@quicinc.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Lance Yang <ioworker0@gmail.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c |   16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -420,19 +420,17 @@ int folio_migrate_mapping(struct address
+       newfolio->index = folio->index;
+       newfolio->mapping = folio->mapping;
+       folio_ref_add(newfolio, nr); /* add cache reference */
+-      if (folio_test_swapbacked(folio)) {
++      if (folio_test_swapbacked(folio))
+               __folio_set_swapbacked(newfolio);
+-              if (folio_test_swapcache(folio)) {
+-                      int i;
++    if (folio_test_swapcache(folio)) {
++        int i;
+-                      folio_set_swapcache(newfolio);
+-                      for (i = 0; i < nr; i++)
+-                              set_page_private(folio_page(newfolio, i),
+-                                      page_private(folio_page(folio, i)));
+-              }
++        folio_set_swapcache(newfolio);
++        for (i = 0; i < nr; i++)
++            set_page_private(folio_page(newfolio, i),
++                page_private(folio_page(folio, i)));
+               entries = nr;
+       } else {
+-              VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+               entries = 1;
+       }
diff --git a/queue-6.1/netfilter-nft_counter-use-u64_stats_t-for-statistic.patch b/queue-6.1/netfilter-nft_counter-use-u64_stats_t-for-statistic.patch
new file mode 100644 (file)
index 0000000..4e05630
--- /dev/null
@@ -0,0 +1,245 @@
+From 4a1d3acd6ea86075e77fcc1188c3fc372833ba73 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 20 Aug 2024 09:54:32 +0200
+Subject: netfilter: nft_counter: Use u64_stats_t for statistic.
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 4a1d3acd6ea86075e77fcc1188c3fc372833ba73 upstream.
+
+The nft_counter uses two s64 counters for statistics. Those two are
+protected by a seqcount to ensure that the 64bit variable is always
+properly seen during updates even on 32bit architectures where the store
+is performed by two writes. A side effect is that the two counter (bytes
+and packet) are written and read together in the same window.
+
+This can be replaced with u64_stats_t. write_seqcount_begin()/ end() is
+replaced with u64_stats_update_begin()/ end() and behaves the same way
+as with seqcount_t on 32bit architectures. Additionally there is a
+preempt_disable on PREEMPT_RT to ensure that a reader does not preempt a
+writer.
+On 64bit architectures the macros are removed and the reads happen
+without any retries. This also means that the reader can observe one
+counter (bytes) from before the update and the other counter (packets)
+but that is okay since there is no requirement to have both counter from
+the same update window.
+
+Convert the statistic to u64_stats_t. There is one optimisation:
+nft_counter_do_init() and nft_counter_clone() allocate a new per-CPU
+counter and assign a value to it. During this assignment preemption is
+disabled which is not needed because the counter is not yet exposed to
+the system so there can not be another writer or reader. Therefore
+disabling preemption is omitted and raw_cpu_ptr() is used to obtain a
+pointer to a counter for the assignment.
+
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nft_counter.c |   90 ++++++++++++++++++++++----------------------
+ 1 file changed, 46 insertions(+), 44 deletions(-)
+
+--- a/net/netfilter/nft_counter.c
++++ b/net/netfilter/nft_counter.c
+@@ -8,7 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+-#include <linux/seqlock.h>
++#include <linux/u64_stats_sync.h>
+ #include <linux/netlink.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter/nf_tables.h>
+@@ -17,6 +17,11 @@
+ #include <net/netfilter/nf_tables_offload.h>
+ struct nft_counter {
++      u64_stats_t     bytes;
++      u64_stats_t     packets;
++};
++
++struct nft_counter_tot {
+       s64             bytes;
+       s64             packets;
+ };
+@@ -25,25 +30,24 @@ struct nft_counter_percpu_priv {
+       struct nft_counter __percpu *counter;
+ };
+-static DEFINE_PER_CPU(seqcount_t, nft_counter_seq);
++static DEFINE_PER_CPU(struct u64_stats_sync, nft_counter_sync);
+ static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv,
+                                      struct nft_regs *regs,
+                                      const struct nft_pktinfo *pkt)
+ {
++      struct u64_stats_sync *nft_sync;
+       struct nft_counter *this_cpu;
+-      seqcount_t *myseq;
+       local_bh_disable();
+       this_cpu = this_cpu_ptr(priv->counter);
+-      myseq = this_cpu_ptr(&nft_counter_seq);
+-
+-      write_seqcount_begin(myseq);
++      nft_sync = this_cpu_ptr(&nft_counter_sync);
+-      this_cpu->bytes += pkt->skb->len;
+-      this_cpu->packets++;
++      u64_stats_update_begin(nft_sync);
++      u64_stats_add(&this_cpu->bytes, pkt->skb->len);
++      u64_stats_inc(&this_cpu->packets);
++      u64_stats_update_end(nft_sync);
+-      write_seqcount_end(myseq);
+       local_bh_enable();
+ }
+@@ -66,17 +70,16 @@ static int nft_counter_do_init(const str
+       if (cpu_stats == NULL)
+               return -ENOMEM;
+-      preempt_disable();
+-      this_cpu = this_cpu_ptr(cpu_stats);
++      this_cpu = raw_cpu_ptr(cpu_stats);
+       if (tb[NFTA_COUNTER_PACKETS]) {
+-              this_cpu->packets =
+-                      be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
++              u64_stats_set(&this_cpu->packets,
++                            be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])));
+       }
+       if (tb[NFTA_COUNTER_BYTES]) {
+-              this_cpu->bytes =
+-                      be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
++              u64_stats_set(&this_cpu->bytes,
++                            be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])));
+       }
+-      preempt_enable();
++
+       priv->counter = cpu_stats;
+       return 0;
+ }
+@@ -104,40 +107,41 @@ static void nft_counter_obj_destroy(cons
+ }
+ static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
+-                            struct nft_counter *total)
++                            struct nft_counter_tot *total)
+ {
++      struct u64_stats_sync *nft_sync;
+       struct nft_counter *this_cpu;
+-      seqcount_t *myseq;
+       local_bh_disable();
+       this_cpu = this_cpu_ptr(priv->counter);
+-      myseq = this_cpu_ptr(&nft_counter_seq);
++      nft_sync = this_cpu_ptr(&nft_counter_sync);
++
++      u64_stats_update_begin(nft_sync);
++      u64_stats_add(&this_cpu->packets, -total->packets);
++      u64_stats_add(&this_cpu->bytes, -total->bytes);
++      u64_stats_update_end(nft_sync);
+-      write_seqcount_begin(myseq);
+-      this_cpu->packets -= total->packets;
+-      this_cpu->bytes -= total->bytes;
+-      write_seqcount_end(myseq);
+       local_bh_enable();
+ }
+ static void nft_counter_fetch(struct nft_counter_percpu_priv *priv,
+-                            struct nft_counter *total)
++                            struct nft_counter_tot *total)
+ {
+       struct nft_counter *this_cpu;
+-      const seqcount_t *myseq;
+       u64 bytes, packets;
+       unsigned int seq;
+       int cpu;
+       memset(total, 0, sizeof(*total));
+       for_each_possible_cpu(cpu) {
+-              myseq = per_cpu_ptr(&nft_counter_seq, cpu);
++              struct u64_stats_sync *nft_sync = per_cpu_ptr(&nft_counter_sync, cpu);
++
+               this_cpu = per_cpu_ptr(priv->counter, cpu);
+               do {
+-                      seq     = read_seqcount_begin(myseq);
+-                      bytes   = this_cpu->bytes;
+-                      packets = this_cpu->packets;
+-              } while (read_seqcount_retry(myseq, seq));
++                      seq     = u64_stats_fetch_begin(nft_sync);
++                      bytes   = u64_stats_read(&this_cpu->bytes);
++                      packets = u64_stats_read(&this_cpu->packets);
++              } while (u64_stats_fetch_retry(nft_sync, seq));
+               total->bytes    += bytes;
+               total->packets  += packets;
+@@ -148,7 +152,7 @@ static int nft_counter_do_dump(struct sk
+                              struct nft_counter_percpu_priv *priv,
+                              bool reset)
+ {
+-      struct nft_counter total;
++      struct nft_counter_tot total;
+       nft_counter_fetch(priv, &total);
+@@ -236,7 +240,7 @@ static int nft_counter_clone(struct nft_
+       struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
+       struct nft_counter __percpu *cpu_stats;
+       struct nft_counter *this_cpu;
+-      struct nft_counter total;
++      struct nft_counter_tot total;
+       nft_counter_fetch(priv, &total);
+@@ -244,11 +248,9 @@ static int nft_counter_clone(struct nft_
+       if (cpu_stats == NULL)
+               return -ENOMEM;
+-      preempt_disable();
+-      this_cpu = this_cpu_ptr(cpu_stats);
+-      this_cpu->packets = total.packets;
+-      this_cpu->bytes = total.bytes;
+-      preempt_enable();
++      this_cpu = raw_cpu_ptr(cpu_stats);
++      u64_stats_set(&this_cpu->packets, total.packets);
++      u64_stats_set(&this_cpu->bytes, total.bytes);
+       priv_clone->counter = cpu_stats;
+       return 0;
+@@ -266,17 +268,17 @@ static void nft_counter_offload_stats(st
+                                     const struct flow_stats *stats)
+ {
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
++      struct u64_stats_sync *nft_sync;
+       struct nft_counter *this_cpu;
+-      seqcount_t *myseq;
+       local_bh_disable();
+       this_cpu = this_cpu_ptr(priv->counter);
+-      myseq = this_cpu_ptr(&nft_counter_seq);
++      nft_sync = this_cpu_ptr(&nft_counter_sync);
+-      write_seqcount_begin(myseq);
+-      this_cpu->packets += stats->pkts;
+-      this_cpu->bytes += stats->bytes;
+-      write_seqcount_end(myseq);
++      u64_stats_update_begin(nft_sync);
++      u64_stats_add(&this_cpu->packets, stats->pkts);
++      u64_stats_add(&this_cpu->bytes, stats->bytes);
++      u64_stats_update_end(nft_sync);
+       local_bh_enable();
+ }
+@@ -285,7 +287,7 @@ void nft_counter_init_seqcount(void)
+       int cpu;
+       for_each_possible_cpu(cpu)
+-              seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu));
++              u64_stats_init(per_cpu_ptr(&nft_counter_sync, cpu));
+ }
+ struct nft_expr_type nft_counter_type;
index d10928d3c1dbe64cc4aace1eb78ea79395e2ca76..5598e1b414ea87ea338d9319a2c4e33f6dd783f1 100644 (file)
@@ -88,9 +88,6 @@ xfs-move-the-xfs_rtbitmap.c-declarations-to-xfs_rtbitmap.h.patch
 xfs-convert-rt-bitmap-extent-lengths-to-xfs_rtbxlen_t.patch
 xfs-consider-minlen-sized-extents-in-xfs_rtallocate_extent_block.patch
 xfs-don-t-leak-recovered-attri-intent-items.patch
-xfs-use-xfs_defer_pending-objects-to-recover-intent-items.patch
-xfs-pass-the-xfs_defer_pending-object-to-iop_recover.patch
-xfs-transfer-recovered-intent-item-ownership-in-iop_recover.patch
 xfs-make-rextslog-computation-consistent-with-mkfs.patch
 xfs-fix-32-bit-truncation-in-xfs_compute_rextslog.patch
 xfs-don-t-allow-overly-small-or-large-realtime-volumes.patch
@@ -190,3 +187,12 @@ drm-amdgpu-fix-jpeg-video-caps-max-size-for-navi1x-and-raven.patch
 ksmbd-fix-incorrect-validation-for-num_aces-field-of-smb_acl.patch
 drm-amd-display-use-hw-lock-mgr-for-psr1-when-only-one-edp.patch
 mptcp-fix-data-stream-corruption-in-the-address-announcement.patch
+netfilter-nft_counter-use-u64_stats_t-for-statistic.patch
+drm-mediatek-fix-coverity-issue-with-unintentional-integer-overflow.patch
+media-mediatek-vcodec-fix-vp8-stateless-decoder-smatch-warning.patch
+arm64-dts-rockchip-fix-u2phy1_host-status-for-nanopi-r4s.patch
+drm-amdgpu-fix-use-after-free-bug.patch
+fs-ntfs3-change-new-sparse-cluster-processing.patch
+wifi-iwlwifi-mvm-ensure-offloading-tid-queue-exists.patch
+mm-migrate-fix-shmem-xarray-update-during-migration.patch
+block-bfq-fix-re-introduced-uaf-in-bic_set_bfqq.patch
diff --git a/queue-6.1/wifi-iwlwifi-mvm-ensure-offloading-tid-queue-exists.patch b/queue-6.1/wifi-iwlwifi-mvm-ensure-offloading-tid-queue-exists.patch
new file mode 100644 (file)
index 0000000..98f0a4f
--- /dev/null
@@ -0,0 +1,110 @@
+From 78f65fbf421a61894c14a1b91fe2fb4437b3fe5f Mon Sep 17 00:00:00 2001
+From: Benjamin Berg <benjamin.berg@intel.com>
+Date: Sun, 18 Feb 2024 19:51:47 +0200
+Subject: wifi: iwlwifi: mvm: ensure offloading TID queue exists
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+commit 78f65fbf421a61894c14a1b91fe2fb4437b3fe5f upstream.
+
+The resume code path assumes that the TX queue for the offloading TID
+has been configured. At resume time it then tries to sync the write
+pointer as it may have been updated by the firmware.
+
+In the unusual event that no packets have been send on TID 0, the queue
+will not have been allocated and this causes a crash. Fix this by
+ensuring the queue exist at suspend time.
+
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://msgid.link/20240218194912.6632e6dc7b35.Ie6e6a7488c9c7d4529f13d48f752b5439d8ac3c4@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Jianqi Ren <jianqi.ren.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/d3.c  |    9 +++++++-
+ drivers/net/wireless/intel/iwlwifi/mvm/sta.c |   28 +++++++++++++++++++++++++++
+ drivers/net/wireless/intel/iwlwifi/mvm/sta.h |    3 +-
+ 3 files changed, 38 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -1286,7 +1286,9 @@ static int __iwl_mvm_suspend(struct ieee
+               mvm->net_detect = true;
+       } else {
+-              struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
++              struct iwl_wowlan_config_cmd wowlan_config_cmd = {
++                      .offloading_tid = 0,
++              };
+               wowlan_config_cmd.sta_id = mvmvif->ap_sta_id;
+@@ -1298,6 +1300,11 @@ static int __iwl_mvm_suspend(struct ieee
+                       goto out_noreset;
+               }
++              ret = iwl_mvm_sta_ensure_queue(
++                      mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
++              if (ret)
++                      goto out_noreset;
++
+               ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+                                               vif, mvmvif, ap_sta);
+               if (ret)
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -1419,6 +1419,34 @@ out_err:
+       return ret;
+ }
++int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
++                           struct ieee80211_txq *txq)
++{
++      struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
++      int ret = -EINVAL;
++
++      lockdep_assert_held(&mvm->mutex);
++
++      if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
++          !txq->sta) {
++              return 0;
++      }
++
++      if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
++              set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++              ret = 0;
++      }
++
++      local_bh_disable();
++      spin_lock(&mvm->add_stream_lock);
++      if (!list_empty(&mvmtxq->list))
++              list_del_init(&mvmtxq->list);
++      spin_unlock(&mvm->add_stream_lock);
++      local_bh_enable();
++
++      return ret;
++}
++
+ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+ {
+       struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
++ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+  * Copyright (C) 2015-2016 Intel Deutschland GmbH
+  */
+@@ -544,6 +544,7 @@ void iwl_mvm_modify_all_sta_disable_tx(s
+                                      struct iwl_mvm_vif *mvmvif,
+                                      bool disable);
+ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
++int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
+ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
+ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                        struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
diff --git a/queue-6.1/xfs-pass-the-xfs_defer_pending-object-to-iop_recover.patch b/queue-6.1/xfs-pass-the-xfs_defer_pending-object-to-iop_recover.patch
deleted file mode 100644 (file)
index 4f98ef4..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-From stable+bounces-124372-greg=kroah.com@vger.kernel.org Thu Mar 13 21:26:31 2025
-From: Leah Rumancik <leah.rumancik@gmail.com>
-Date: Thu, 13 Mar 2025 13:25:35 -0700
-Subject: xfs: pass the xfs_defer_pending object to iop_recover
-To: stable@vger.kernel.org
-Cc: xfs-stable@lists.linux.dev, "Darrick J. Wong" <djwong@kernel.org>, Christoph Hellwig <hch@lst.de>, Leah Rumancik <leah.rumancik@gmail.com>
-Message-ID: <20250313202550.2257219-16-leah.rumancik@gmail.com>
-
-From: "Darrick J. Wong" <djwong@kernel.org>
-
-[ Upstream commit a050acdfa8003a44eae4558fddafc7afb1aef458 ]
-
-Now that log intent item recovery recreates the xfs_defer_pending state,
-we should pass that into the ->iop_recover routines so that the intent
-item can finish the recreation work.
-
-Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
-Acked-by: "Darrick J. Wong" <djwong@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- fs/xfs/xfs_attr_item.c     |    3 ++-
- fs/xfs/xfs_bmap_item.c     |    3 ++-
- fs/xfs/xfs_extfree_item.c  |    3 ++-
- fs/xfs/xfs_log_recover.c   |    2 +-
- fs/xfs/xfs_refcount_item.c |    3 ++-
- fs/xfs/xfs_rmap_item.c     |    3 ++-
- fs/xfs/xfs_trans.h         |    4 +++-
- 7 files changed, 14 insertions(+), 7 deletions(-)
-
---- a/fs/xfs/xfs_attr_item.c
-+++ b/fs/xfs/xfs_attr_item.c
-@@ -545,9 +545,10 @@ xfs_attri_validate(
-  */
- STATIC int
- xfs_attri_item_recover(
--      struct xfs_log_item             *lip,
-+      struct xfs_defer_pending        *dfp,
-       struct list_head                *capture_list)
- {
-+      struct xfs_log_item             *lip = dfp->dfp_intent;
-       struct xfs_attri_log_item       *attrip = ATTRI_ITEM(lip);
-       struct xfs_attr_intent          *attr;
-       struct xfs_mount                *mp = lip->li_log->l_mp;
---- a/fs/xfs/xfs_bmap_item.c
-+++ b/fs/xfs/xfs_bmap_item.c
-@@ -453,11 +453,12 @@ xfs_bui_validate(
-  */
- STATIC int
- xfs_bui_item_recover(
--      struct xfs_log_item             *lip,
-+      struct xfs_defer_pending        *dfp,
-       struct list_head                *capture_list)
- {
-       struct xfs_bmap_intent          fake = { };
-       struct xfs_trans_res            resv;
-+      struct xfs_log_item             *lip = dfp->dfp_intent;
-       struct xfs_bui_log_item         *buip = BUI_ITEM(lip);
-       struct xfs_trans                *tp;
-       struct xfs_inode                *ip = NULL;
---- a/fs/xfs/xfs_extfree_item.c
-+++ b/fs/xfs/xfs_extfree_item.c
-@@ -595,10 +595,11 @@ xfs_efi_validate_ext(
-  */
- STATIC int
- xfs_efi_item_recover(
--      struct xfs_log_item             *lip,
-+      struct xfs_defer_pending        *dfp,
-       struct list_head                *capture_list)
- {
-       struct xfs_trans_res            resv;
-+      struct xfs_log_item             *lip = dfp->dfp_intent;
-       struct xfs_efi_log_item         *efip = EFI_ITEM(lip);
-       struct xfs_mount                *mp = lip->li_log->l_mp;
-       struct xfs_efd_log_item         *efdp;
---- a/fs/xfs/xfs_log_recover.c
-+++ b/fs/xfs/xfs_log_recover.c
-@@ -2586,7 +2586,7 @@ xlog_recover_process_intents(
-                * The recovery function can free the log item, so we must not
-                * access lip after it returns.
-                */
--              error = ops->iop_recover(lip, &capture_list);
-+              error = ops->iop_recover(dfp, &capture_list);
-               if (error) {
-                       trace_xlog_intent_recovery_failed(log->l_mp, error,
-                                       ops->iop_recover);
---- a/fs/xfs/xfs_refcount_item.c
-+++ b/fs/xfs/xfs_refcount_item.c
-@@ -450,10 +450,11 @@ xfs_cui_validate_phys(
-  */
- STATIC int
- xfs_cui_item_recover(
--      struct xfs_log_item             *lip,
-+      struct xfs_defer_pending        *dfp,
-       struct list_head                *capture_list)
- {
-       struct xfs_trans_res            resv;
-+      struct xfs_log_item             *lip = dfp->dfp_intent;
-       struct xfs_cui_log_item         *cuip = CUI_ITEM(lip);
-       struct xfs_cud_log_item         *cudp;
-       struct xfs_trans                *tp;
---- a/fs/xfs/xfs_rmap_item.c
-+++ b/fs/xfs/xfs_rmap_item.c
-@@ -489,10 +489,11 @@ xfs_rui_validate_map(
-  */
- STATIC int
- xfs_rui_item_recover(
--      struct xfs_log_item             *lip,
-+      struct xfs_defer_pending        *dfp,
-       struct list_head                *capture_list)
- {
-       struct xfs_trans_res            resv;
-+      struct xfs_log_item             *lip = dfp->dfp_intent;
-       struct xfs_rui_log_item         *ruip = RUI_ITEM(lip);
-       struct xfs_map_extent           *rmap;
-       struct xfs_rud_log_item         *rudp;
---- a/fs/xfs/xfs_trans.h
-+++ b/fs/xfs/xfs_trans.h
-@@ -66,6 +66,8 @@ struct xfs_log_item {
-       { (1u << XFS_LI_DIRTY),         "DIRTY" }, \
-       { (1u << XFS_LI_WHITEOUT),      "WHITEOUT" }
-+struct xfs_defer_pending;
-+
- struct xfs_item_ops {
-       unsigned flags;
-       void (*iop_size)(struct xfs_log_item *, int *, int *);
-@@ -78,7 +80,7 @@ struct xfs_item_ops {
-       xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
-       uint (*iop_push)(struct xfs_log_item *, struct list_head *);
-       void (*iop_release)(struct xfs_log_item *);
--      int (*iop_recover)(struct xfs_log_item *lip,
-+      int (*iop_recover)(struct xfs_defer_pending *dfp,
-                          struct list_head *capture_list);
-       bool (*iop_match)(struct xfs_log_item *item, uint64_t id);
-       struct xfs_log_item *(*iop_relog)(struct xfs_log_item *intent,
diff --git a/queue-6.1/xfs-transfer-recovered-intent-item-ownership-in-iop_recover.patch b/queue-6.1/xfs-transfer-recovered-intent-item-ownership-in-iop_recover.patch
deleted file mode 100644 (file)
index 4d05b3c..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-From stable+bounces-124373-greg=kroah.com@vger.kernel.org Thu Mar 13 21:26:34 2025
-From: Leah Rumancik <leah.rumancik@gmail.com>
-Date: Thu, 13 Mar 2025 13:25:36 -0700
-Subject: xfs: transfer recovered intent item ownership in ->iop_recover
-To: stable@vger.kernel.org
-Cc: xfs-stable@lists.linux.dev, "Darrick J. Wong" <djwong@kernel.org>, Christoph Hellwig <hch@lst.de>, Leah Rumancik <leah.rumancik@gmail.com>
-Message-ID: <20250313202550.2257219-17-leah.rumancik@gmail.com>
-
-From: "Darrick J. Wong" <djwong@kernel.org>
-
-[ Upstream commit deb4cd8ba87f17b12c72b3827820d9c703e9fd95 ]
-
-Now that we pass the xfs_defer_pending object into the intent item
-recovery functions, we know exactly when ownership of the sole refcount
-passes from the recovery context to the intent done item.  At that
-point, we need to null out dfp_intent so that the recovery mechanism
-won't release it.  This should fix the UAF problem reported by Long Li.
-
-Note that we still want to recreate the full deferred work state.  That
-will be addressed in the next patches.
-
-Fixes: 2e76f188fd90 ("xfs: cancel intents immediately if process_intents fails")
-Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
-Acked-by: "Darrick J. Wong" <djwong@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- fs/xfs/libxfs/xfs_log_recover.h |    2 ++
- fs/xfs/xfs_attr_item.c          |    1 +
- fs/xfs/xfs_bmap_item.c          |    2 ++
- fs/xfs/xfs_extfree_item.c       |    2 ++
- fs/xfs/xfs_log_recover.c        |   19 ++++++++++++-------
- fs/xfs/xfs_refcount_item.c      |    1 +
- fs/xfs/xfs_rmap_item.c          |    2 ++
- 7 files changed, 22 insertions(+), 7 deletions(-)
-
---- a/fs/xfs/libxfs/xfs_log_recover.h
-+++ b/fs/xfs/libxfs/xfs_log_recover.h
-@@ -155,5 +155,7 @@ xlog_recover_resv(const struct xfs_trans
- void xlog_recover_intent_item(struct xlog *log, struct xfs_log_item *lip,
-               xfs_lsn_t lsn, unsigned int dfp_type);
-+void xlog_recover_transfer_intent(struct xfs_trans *tp,
-+              struct xfs_defer_pending *dfp);
- #endif        /* __XFS_LOG_RECOVER_H__ */
---- a/fs/xfs/xfs_attr_item.c
-+++ b/fs/xfs/xfs_attr_item.c
-@@ -632,6 +632,7 @@ xfs_attri_item_recover(
-       args->trans = tp;
-       done_item = xfs_trans_get_attrd(tp, attrip);
-+      xlog_recover_transfer_intent(tp, dfp);
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, 0);
---- a/fs/xfs/xfs_bmap_item.c
-+++ b/fs/xfs/xfs_bmap_item.c
-@@ -491,6 +491,8 @@ xfs_bui_item_recover(
-               goto err_rele;
-       budp = xfs_trans_get_bud(tp, buip);
-+      xlog_recover_transfer_intent(tp, dfp);
-+
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, 0);
---- a/fs/xfs/xfs_extfree_item.c
-+++ b/fs/xfs/xfs_extfree_item.c
-@@ -626,7 +626,9 @@ xfs_efi_item_recover(
-       error = xfs_trans_alloc(mp, &resv, 0, 0, 0, &tp);
-       if (error)
-               return error;
-+
-       efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
-+      xlog_recover_transfer_intent(tp, dfp);
-       for (i = 0; i < efip->efi_format.efi_nextents; i++) {
-               struct xfs_extent_free_item     fake = {
---- a/fs/xfs/xfs_log_recover.c
-+++ b/fs/xfs/xfs_log_recover.c
-@@ -2593,13 +2593,6 @@ xlog_recover_process_intents(
-                       break;
-               }
--              /*
--               * XXX: @lip could have been freed, so detach the log item from
--               * the pending item before freeing the pending item.  This does
--               * not fix the existing UAF bug that occurs if ->iop_recover
--               * fails after creating the intent done item.
--               */
--              dfp->dfp_intent = NULL;
-               xfs_defer_cancel_recovery(log->l_mp, dfp);
-       }
-       if (error)
-@@ -2634,6 +2627,18 @@ xlog_recover_cancel_intents(
- }
- /*
-+ * Transfer ownership of the recovered log intent item to the recovery
-+ * transaction.
-+ */
-+void
-+xlog_recover_transfer_intent(
-+      struct xfs_trans                *tp,
-+      struct xfs_defer_pending        *dfp)
-+{
-+      dfp->dfp_intent = NULL;
-+}
-+
-+/*
-  * This routine performs a transaction to null out a bad inode pointer
-  * in an agi unlinked inode hash bucket.
-  */
---- a/fs/xfs/xfs_refcount_item.c
-+++ b/fs/xfs/xfs_refcount_item.c
-@@ -499,6 +499,7 @@ xfs_cui_item_recover(
-               return error;
-       cudp = xfs_trans_get_cud(tp, cuip);
-+      xlog_recover_transfer_intent(tp, dfp);
-       for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
-               struct xfs_refcount_intent      fake = { };
---- a/fs/xfs/xfs_rmap_item.c
-+++ b/fs/xfs/xfs_rmap_item.c
-@@ -526,7 +526,9 @@ xfs_rui_item_recover(
-                       XFS_TRANS_RESERVE, &tp);
-       if (error)
-               return error;
-+
-       rudp = xfs_trans_get_rud(tp, ruip);
-+      xlog_recover_transfer_intent(tp, dfp);
-       for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
-               rmap = &ruip->rui_format.rui_extents[i];
diff --git a/queue-6.1/xfs-use-xfs_defer_pending-objects-to-recover-intent-items.patch b/queue-6.1/xfs-use-xfs_defer_pending-objects-to-recover-intent-items.patch
deleted file mode 100644 (file)
index dd75c77..0000000
+++ /dev/null
@@ -1,527 +0,0 @@
-From stable+bounces-124371-greg=kroah.com@vger.kernel.org Thu Mar 13 21:26:31 2025
-From: Leah Rumancik <leah.rumancik@gmail.com>
-Date: Thu, 13 Mar 2025 13:25:34 -0700
-Subject: xfs: use xfs_defer_pending objects to recover intent items
-To: stable@vger.kernel.org
-Cc: xfs-stable@lists.linux.dev, "Darrick J. Wong" <djwong@kernel.org>, Christoph Hellwig <hch@lst.de>, Catherine Hoang <catherine.hoang@oracle.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Leah Rumancik <leah.rumancik@gmail.com>
-Message-ID: <20250313202550.2257219-15-leah.rumancik@gmail.com>
-
-From: "Darrick J. Wong" <djwong@kernel.org>
-
-[ Upstream commit 03f7767c9f6120ac933378fdec3bfd78bf07bc11 ]
-
-[ 6.1: resovled conflict in xfs_defer.c ]
-
-One thing I never quite got around to doing is porting the log intent
-item recovery code to reconstruct the deferred pending work state.  As a
-result, each intent item open codes xfs_defer_finish_one in its recovery
-method, because that's what the EFI code did before xfs_defer.c even
-existed.
-
-This is a gross thing to have left unfixed -- if an EFI cannot proceed
-due to busy extents, we end up creating separate new EFIs for each
-unfinished work item, which is a change in behavior from what runtime
-would have done.
-
-Worse yet, Long Li pointed out that there's a UAF in the recovery code.
-The ->commit_pass2 function adds the intent item to the AIL and drops
-the refcount.  The one remaining refcount is now owned by the recovery
-mechanism (aka the log intent items in the AIL) with the intent of
-giving the refcount to the intent done item in the ->iop_recover
-function.
-
-However, if something fails later in recovery, xlog_recover_finish will
-walk the recovered intent items in the AIL and release them.  If the CIL
-hasn't been pushed before that point (which is possible since we don't
-force the log until later) then the intent done release will try to free
-its associated intent, which has already been freed.
-
-This patch starts to address this mess by having the ->commit_pass2
-functions recreate the xfs_defer_pending state.  The next few patches
-will fix the recovery functions.
-
-Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Signed-off-by: Catherine Hoang <catherine.hoang@oracle.com>
-Acked-by: Darrick J. Wong <djwong@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
-Acked-by: "Darrick J. Wong" <djwong@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- fs/xfs/libxfs/xfs_defer.c       |  103 ++++++++++++++++++++++++++---------
- fs/xfs/libxfs/xfs_defer.h       |    5 +
- fs/xfs/libxfs/xfs_log_recover.h |    3 +
- fs/xfs/xfs_attr_item.c          |   10 ---
- fs/xfs/xfs_bmap_item.c          |    9 +--
- fs/xfs/xfs_extfree_item.c       |    9 +--
- fs/xfs/xfs_log.c                |    1 
- fs/xfs/xfs_log_priv.h           |    1 
- fs/xfs/xfs_log_recover.c        |  115 ++++++++++++++++++++--------------------
- fs/xfs/xfs_refcount_item.c      |    9 +--
- fs/xfs/xfs_rmap_item.c          |    9 +--
- 11 files changed, 158 insertions(+), 116 deletions(-)
-
---- a/fs/xfs/libxfs/xfs_defer.c
-+++ b/fs/xfs/libxfs/xfs_defer.c
-@@ -245,23 +245,52 @@ xfs_defer_create_intents(
-       return ret;
- }
--STATIC void
-+static inline void
- xfs_defer_pending_abort(
-       struct xfs_mount                *mp,
-+      struct xfs_defer_pending        *dfp)
-+{
-+      const struct xfs_defer_op_type  *ops = defer_op_types[dfp->dfp_type];
-+
-+      trace_xfs_defer_pending_abort(mp, dfp);
-+
-+      if (dfp->dfp_intent && !dfp->dfp_done) {
-+              ops->abort_intent(dfp->dfp_intent);
-+              dfp->dfp_intent = NULL;
-+      }
-+}
-+
-+static inline void
-+xfs_defer_pending_cancel_work(
-+      struct xfs_mount                *mp,
-+      struct xfs_defer_pending        *dfp)
-+{
-+      const struct xfs_defer_op_type  *ops = defer_op_types[dfp->dfp_type];
-+      struct list_head                *pwi;
-+      struct list_head                *n;
-+
-+      trace_xfs_defer_cancel_list(mp, dfp);
-+
-+      list_del(&dfp->dfp_list);
-+      list_for_each_safe(pwi, n, &dfp->dfp_work) {
-+              list_del(pwi);
-+              dfp->dfp_count--;
-+              ops->cancel_item(pwi);
-+      }
-+      ASSERT(dfp->dfp_count == 0);
-+      kmem_cache_free(xfs_defer_pending_cache, dfp);
-+}
-+
-+STATIC void
-+xfs_defer_pending_abort_list(
-+      struct xfs_mount                *mp,
-       struct list_head                *dop_list)
- {
-       struct xfs_defer_pending        *dfp;
--      const struct xfs_defer_op_type  *ops;
-       /* Abort intent items that don't have a done item. */
--      list_for_each_entry(dfp, dop_list, dfp_list) {
--              ops = defer_op_types[dfp->dfp_type];
--              trace_xfs_defer_pending_abort(mp, dfp);
--              if (dfp->dfp_intent && !dfp->dfp_done) {
--                      ops->abort_intent(dfp->dfp_intent);
--                      dfp->dfp_intent = NULL;
--              }
--      }
-+      list_for_each_entry(dfp, dop_list, dfp_list)
-+              xfs_defer_pending_abort(mp, dfp);
- }
- /* Abort all the intents that were committed. */
-@@ -271,7 +300,7 @@ xfs_defer_trans_abort(
-       struct list_head                *dop_pending)
- {
-       trace_xfs_defer_trans_abort(tp, _RET_IP_);
--      xfs_defer_pending_abort(tp->t_mountp, dop_pending);
-+      xfs_defer_pending_abort_list(tp->t_mountp, dop_pending);
- }
- /*
-@@ -389,26 +418,13 @@ xfs_defer_cancel_list(
- {
-       struct xfs_defer_pending        *dfp;
-       struct xfs_defer_pending        *pli;
--      struct list_head                *pwi;
--      struct list_head                *n;
--      const struct xfs_defer_op_type  *ops;
-       /*
-        * Free the pending items.  Caller should already have arranged
-        * for the intent items to be released.
-        */
--      list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
--              ops = defer_op_types[dfp->dfp_type];
--              trace_xfs_defer_cancel_list(mp, dfp);
--              list_del(&dfp->dfp_list);
--              list_for_each_safe(pwi, n, &dfp->dfp_work) {
--                      list_del(pwi);
--                      dfp->dfp_count--;
--                      ops->cancel_item(pwi);
--              }
--              ASSERT(dfp->dfp_count == 0);
--              kmem_cache_free(xfs_defer_pending_cache, dfp);
--      }
-+      list_for_each_entry_safe(dfp, pli, dop_list, dfp_list)
-+              xfs_defer_pending_cancel_work(mp, dfp);
- }
- /*
-@@ -664,6 +680,39 @@ xfs_defer_add(
- }
- /*
-+ * Create a pending deferred work item to replay the recovered intent item
-+ * and add it to the list.
-+ */
-+void
-+xfs_defer_start_recovery(
-+      struct xfs_log_item             *lip,
-+      enum xfs_defer_ops_type         dfp_type,
-+      struct list_head                *r_dfops)
-+{
-+      struct xfs_defer_pending        *dfp;
-+
-+      dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
-+                      GFP_NOFS | __GFP_NOFAIL);
-+      dfp->dfp_type = dfp_type;
-+      dfp->dfp_intent = lip;
-+      INIT_LIST_HEAD(&dfp->dfp_work);
-+      list_add_tail(&dfp->dfp_list, r_dfops);
-+}
-+
-+/*
-+ * Cancel a deferred work item created to recover a log intent item.  @dfp
-+ * will be freed after this function returns.
-+ */
-+void
-+xfs_defer_cancel_recovery(
-+      struct xfs_mount                *mp,
-+      struct xfs_defer_pending        *dfp)
-+{
-+      xfs_defer_pending_abort(mp, dfp);
-+      xfs_defer_pending_cancel_work(mp, dfp);
-+}
-+
-+/*
-  * Move deferred ops from one transaction to another and reset the source to
-  * initial state. This is primarily used to carry state forward across
-  * transaction rolls with pending dfops.
-@@ -767,7 +816,7 @@ xfs_defer_ops_capture_abort(
- {
-       unsigned short                  i;
--      xfs_defer_pending_abort(mp, &dfc->dfc_dfops);
-+      xfs_defer_pending_abort_list(mp, &dfc->dfc_dfops);
-       xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
-       for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
---- a/fs/xfs/libxfs/xfs_defer.h
-+++ b/fs/xfs/libxfs/xfs_defer.h
-@@ -125,6 +125,11 @@ void xfs_defer_ops_capture_abort(struct
-               struct xfs_defer_capture *d);
- void xfs_defer_resources_rele(struct xfs_defer_resources *dres);
-+void xfs_defer_start_recovery(struct xfs_log_item *lip,
-+              enum xfs_defer_ops_type dfp_type, struct list_head *r_dfops);
-+void xfs_defer_cancel_recovery(struct xfs_mount *mp,
-+              struct xfs_defer_pending *dfp);
-+
- int __init xfs_defer_init_item_caches(void);
- void xfs_defer_destroy_item_caches(void);
---- a/fs/xfs/libxfs/xfs_log_recover.h
-+++ b/fs/xfs/libxfs/xfs_log_recover.h
-@@ -153,4 +153,7 @@ xlog_recover_resv(const struct xfs_trans
-       return ret;
- }
-+void xlog_recover_intent_item(struct xlog *log, struct xfs_log_item *lip,
-+              xfs_lsn_t lsn, unsigned int dfp_type);
-+
- #endif        /* __XFS_LOG_RECOVER_H__ */
---- a/fs/xfs/xfs_attr_item.c
-+++ b/fs/xfs/xfs_attr_item.c
-@@ -772,14 +772,8 @@ xlog_recover_attri_commit_pass2(
-       attrip = xfs_attri_init(mp, nv);
-       memcpy(&attrip->attri_format, attri_formatp, len);
--      /*
--       * The ATTRI has two references. One for the ATTRD and one for ATTRI to
--       * ensure it makes it into the AIL. Insert the ATTRI into the AIL
--       * directly and drop the ATTRI reference. Note that
--       * xfs_trans_ail_update() drops the AIL lock.
--       */
--      xfs_trans_ail_insert(log->l_ailp, &attrip->attri_item, lsn);
--      xfs_attri_release(attrip);
-+      xlog_recover_intent_item(log, &attrip->attri_item, lsn,
-+                      XFS_DEFER_OPS_TYPE_ATTR);
-       xfs_attri_log_nameval_put(nv);
-       return 0;
- }
---- a/fs/xfs/xfs_bmap_item.c
-+++ b/fs/xfs/xfs_bmap_item.c
-@@ -646,12 +646,9 @@ xlog_recover_bui_commit_pass2(
-       buip = xfs_bui_init(mp);
-       xfs_bui_copy_format(&buip->bui_format, bui_formatp);
-       atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
--      /*
--       * Insert the intent into the AIL directly and drop one reference so
--       * that finishing or canceling the work will drop the other.
--       */
--      xfs_trans_ail_insert(log->l_ailp, &buip->bui_item, lsn);
--      xfs_bui_release(buip);
-+
-+      xlog_recover_intent_item(log, &buip->bui_item, lsn,
-+                      XFS_DEFER_OPS_TYPE_BMAP);
-       return 0;
- }
---- a/fs/xfs/xfs_extfree_item.c
-+++ b/fs/xfs/xfs_extfree_item.c
-@@ -736,12 +736,9 @@ xlog_recover_efi_commit_pass2(
-               return error;
-       }
-       atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
--      /*
--       * Insert the intent into the AIL directly and drop one reference so
--       * that finishing or canceling the work will drop the other.
--       */
--      xfs_trans_ail_insert(log->l_ailp, &efip->efi_item, lsn);
--      xfs_efi_release(efip);
-+
-+      xlog_recover_intent_item(log, &efip->efi_item, lsn,
-+                      XFS_DEFER_OPS_TYPE_FREE);
-       return 0;
- }
---- a/fs/xfs/xfs_log.c
-+++ b/fs/xfs/xfs_log.c
-@@ -1540,6 +1540,7 @@ xlog_alloc_log(
-       log->l_covered_state = XLOG_STATE_COVER_IDLE;
-       set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
-       INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
-+      INIT_LIST_HEAD(&log->r_dfops);
-       log->l_prev_block  = -1;
-       /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
---- a/fs/xfs/xfs_log_priv.h
-+++ b/fs/xfs/xfs_log_priv.h
-@@ -403,6 +403,7 @@ struct xlog {
-       long                    l_opstate;      /* operational state */
-       uint                    l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
-       struct list_head        *l_buf_cancel_table;
-+      struct list_head        r_dfops;        /* recovered log intent items */
-       int                     l_iclog_hsize;  /* size of iclog header */
-       int                     l_iclog_heads;  /* # of iclog header sectors */
-       uint                    l_sectBBsize;   /* sector size in BBs (2^n) */
---- a/fs/xfs/xfs_log_recover.c
-+++ b/fs/xfs/xfs_log_recover.c
-@@ -1723,30 +1723,24 @@ xlog_clear_stale_blocks(
-  */
- void
- xlog_recover_release_intent(
--      struct xlog             *log,
--      unsigned short          intent_type,
--      uint64_t                intent_id)
-+      struct xlog                     *log,
-+      unsigned short                  intent_type,
-+      uint64_t                        intent_id)
- {
--      struct xfs_ail_cursor   cur;
--      struct xfs_log_item     *lip;
--      struct xfs_ail          *ailp = log->l_ailp;
--
--      spin_lock(&ailp->ail_lock);
--      for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
--           lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
-+      struct xfs_defer_pending        *dfp, *n;
-+
-+      list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
-+              struct xfs_log_item     *lip = dfp->dfp_intent;
-+
-               if (lip->li_type != intent_type)
-                       continue;
-               if (!lip->li_ops->iop_match(lip, intent_id))
-                       continue;
--              spin_unlock(&ailp->ail_lock);
--              lip->li_ops->iop_release(lip);
--              spin_lock(&ailp->ail_lock);
--              break;
--      }
-+              ASSERT(xlog_item_is_intent(lip));
--      xfs_trans_ail_cursor_done(&cur);
--      spin_unlock(&ailp->ail_lock);
-+              xfs_defer_cancel_recovery(log->l_mp, dfp);
-+      }
- }
- int
-@@ -1939,6 +1933,29 @@ xlog_buf_readahead(
-               xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
- }
-+/*
-+ * Create a deferred work structure for resuming and tracking the progress of a
-+ * log intent item that was found during recovery.
-+ */
-+void
-+xlog_recover_intent_item(
-+      struct xlog                     *log,
-+      struct xfs_log_item             *lip,
-+      xfs_lsn_t                       lsn,
-+      unsigned int                    dfp_type)
-+{
-+      ASSERT(xlog_item_is_intent(lip));
-+
-+      xfs_defer_start_recovery(lip, dfp_type, &log->r_dfops);
-+
-+      /*
-+       * Insert the intent into the AIL directly and drop one reference so
-+       * that finishing or canceling the work will drop the other.
-+       */
-+      xfs_trans_ail_insert(log->l_ailp, lip, lsn);
-+      lip->li_ops->iop_unpin(lip, 0);
-+}
-+
- STATIC int
- xlog_recover_items_pass2(
-       struct xlog                     *log,
-@@ -2536,29 +2553,22 @@ xlog_abort_defer_ops(
-  */
- STATIC int
- xlog_recover_process_intents(
--      struct xlog             *log)
-+      struct xlog                     *log)
- {
-       LIST_HEAD(capture_list);
--      struct xfs_ail_cursor   cur;
--      struct xfs_log_item     *lip;
--      struct xfs_ail          *ailp;
--      int                     error = 0;
-+      struct xfs_defer_pending        *dfp, *n;
-+      int                             error = 0;
- #if defined(DEBUG) || defined(XFS_WARN)
--      xfs_lsn_t               last_lsn;
--#endif
-+      xfs_lsn_t                       last_lsn;
--      ailp = log->l_ailp;
--      spin_lock(&ailp->ail_lock);
--#if defined(DEBUG) || defined(XFS_WARN)
-       last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
- #endif
--      for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
--           lip != NULL;
--           lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
--              const struct xfs_item_ops       *ops;
--              if (!xlog_item_is_intent(lip))
--                      break;
-+      list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
-+              struct xfs_log_item     *lip = dfp->dfp_intent;
-+              const struct xfs_item_ops *ops = lip->li_ops;
-+
-+              ASSERT(xlog_item_is_intent(lip));
-               /*
-                * We should never see a redo item with a LSN higher than
-@@ -2576,19 +2586,22 @@ xlog_recover_process_intents(
-                * The recovery function can free the log item, so we must not
-                * access lip after it returns.
-                */
--              spin_unlock(&ailp->ail_lock);
--              ops = lip->li_ops;
-               error = ops->iop_recover(lip, &capture_list);
--              spin_lock(&ailp->ail_lock);
-               if (error) {
-                       trace_xlog_intent_recovery_failed(log->l_mp, error,
-                                       ops->iop_recover);
-                       break;
-               }
--      }
--      xfs_trans_ail_cursor_done(&cur);
--      spin_unlock(&ailp->ail_lock);
-+              /*
-+               * XXX: @lip could have been freed, so detach the log item from
-+               * the pending item before freeing the pending item.  This does
-+               * not fix the existing UAF bug that occurs if ->iop_recover
-+               * fails after creating the intent done item.
-+               */
-+              dfp->dfp_intent = NULL;
-+              xfs_defer_cancel_recovery(log->l_mp, dfp);
-+      }
-       if (error)
-               goto err;
-@@ -2609,27 +2622,15 @@ err:
-  */
- STATIC void
- xlog_recover_cancel_intents(
--      struct xlog             *log)
-+      struct xlog                     *log)
- {
--      struct xfs_log_item     *lip;
--      struct xfs_ail_cursor   cur;
--      struct xfs_ail          *ailp;
--
--      ailp = log->l_ailp;
--      spin_lock(&ailp->ail_lock);
--      lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
--      while (lip != NULL) {
--              if (!xlog_item_is_intent(lip))
--                      break;
-+      struct xfs_defer_pending        *dfp, *n;
--              spin_unlock(&ailp->ail_lock);
--              lip->li_ops->iop_release(lip);
--              spin_lock(&ailp->ail_lock);
--              lip = xfs_trans_ail_cursor_next(ailp, &cur);
--      }
-+      list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
-+              ASSERT(xlog_item_is_intent(dfp->dfp_intent));
--      xfs_trans_ail_cursor_done(&cur);
--      spin_unlock(&ailp->ail_lock);
-+              xfs_defer_cancel_recovery(log->l_mp, dfp);
-+      }
- }
- /*
---- a/fs/xfs/xfs_refcount_item.c
-+++ b/fs/xfs/xfs_refcount_item.c
-@@ -668,12 +668,9 @@ xlog_recover_cui_commit_pass2(
-       cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
-       xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
-       atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
--      /*
--       * Insert the intent into the AIL directly and drop one reference so
--       * that finishing or canceling the work will drop the other.
--       */
--      xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
--      xfs_cui_release(cuip);
-+
-+      xlog_recover_intent_item(log, &cuip->cui_item, lsn,
-+                      XFS_DEFER_OPS_TYPE_REFCOUNT);
-       return 0;
- }
---- a/fs/xfs/xfs_rmap_item.c
-+++ b/fs/xfs/xfs_rmap_item.c
-@@ -682,12 +682,9 @@ xlog_recover_rui_commit_pass2(
-       ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
-       xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
-       atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
--      /*
--       * Insert the intent into the AIL directly and drop one reference so
--       * that finishing or canceling the work will drop the other.
--       */
--      xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
--      xfs_rui_release(ruip);
-+
-+      xlog_recover_intent_item(log, &ruip->rui_item, lsn,
-+                      XFS_DEFER_OPS_TYPE_RMAP);
-       return 0;
- }