--- /dev/null
+From a5a51bf4e9b7354ce7cd697e610d72c1b33fd949 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 1 Oct 2025 11:08:13 +0100
+Subject: btrfs: do not assert we found block group item when creating free space tree
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit a5a51bf4e9b7354ce7cd697e610d72c1b33fd949 upstream.
+
+Currently, when building a free space tree at populate_free_space_tree(),
+if we are not using the block group tree feature, we always expect to find
+block group items (either extent items or a block group item with key type
+BTRFS_BLOCK_GROUP_ITEM_KEY) when we search the extent tree with
+btrfs_search_slot_for_read(), so we assert that we found an item. However
+this expectation is wrong since we can have a new block group created in
+the current transaction which is still empty and for which we still have
+not added the block group's item to the extent tree, in which case we do
+not have any items in the extent tree associated to the block group.
+
+The insertion of a new block group's block group item in the extent tree
+happens at btrfs_create_pending_block_groups() when it calls the helper
+insert_block_group_item(). This typically is done when a transaction
+handle is released, committed or when running delayed refs (either as
+part of a transaction commit or when serving tickets for space reservation
+if we are low on free space).
+
+So remove the assertion at populate_free_space_tree() even when the block
+group tree feature is not enabled and update the comment to mention this
+case.
+
+Syzbot reported this with the following stack trace:
+
+ BTRFS info (device loop3 state M): rebuilding free space tree
+ assertion failed: ret == 0 :: 0, in fs/btrfs/free-space-tree.c:1115
+ ------------[ cut here ]------------
+ kernel BUG at fs/btrfs/free-space-tree.c:1115!
+ Oops: invalid opcode: 0000 [#1] SMP KASAN PTI
+ CPU: 1 UID: 0 PID: 6352 Comm: syz.3.25 Not tainted syzkaller #0 PREEMPT(full)
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/18/2025
+ RIP: 0010:populate_free_space_tree+0x700/0x710 fs/btrfs/free-space-tree.c:1115
+ Code: ff ff e8 d3 (...)
+ RSP: 0018:ffffc9000430f780 EFLAGS: 00010246
+ RAX: 0000000000000043 RBX: ffff88805b709630 RCX: fea61d0e2e79d000
+ RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000
+ RBP: ffffc9000430f8b0 R08: ffffc9000430f4a7 R09: 1ffff92000861e94
+ R10: dffffc0000000000 R11: fffff52000861e95 R12: 0000000000000001
+ R13: 1ffff92000861f00 R14: dffffc0000000000 R15: 0000000000000000
+ FS: 00007f424d9fe6c0(0000) GS:ffff888125afc000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007fd78ad212c0 CR3: 0000000076d68000 CR4: 00000000003526f0
+ Call Trace:
+ <TASK>
+ btrfs_rebuild_free_space_tree+0x1ba/0x6d0 fs/btrfs/free-space-tree.c:1364
+ btrfs_start_pre_rw_mount+0x128f/0x1bf0 fs/btrfs/disk-io.c:3062
+ btrfs_remount_rw fs/btrfs/super.c:1334 [inline]
+ btrfs_reconfigure+0xaed/0x2160 fs/btrfs/super.c:1559
+ reconfigure_super+0x227/0x890 fs/super.c:1076
+ do_remount fs/namespace.c:3279 [inline]
+ path_mount+0xd1a/0xfe0 fs/namespace.c:4027
+ do_mount fs/namespace.c:4048 [inline]
+ __do_sys_mount fs/namespace.c:4236 [inline]
+ __se_sys_mount+0x313/0x410 fs/namespace.c:4213
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0xfa0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ RIP: 0033:0x7f424e39066a
+ Code: d8 64 89 02 (...)
+ RSP: 002b:00007f424d9fde68 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
+ RAX: ffffffffffffffda RBX: 00007f424d9fdef0 RCX: 00007f424e39066a
+ RDX: 0000200000000180 RSI: 0000200000000380 RDI: 0000000000000000
+ RBP: 0000200000000180 R08: 00007f424d9fdef0 R09: 0000000000000020
+ R10: 0000000000000020 R11: 0000000000000246 R12: 0000200000000380
+ R13: 00007f424d9fdeb0 R14: 0000000000000000 R15: 00002000000002c0
+ </TASK>
+ Modules linked in:
+ ---[ end trace 0000000000000000 ]---
+
+Reported-by: syzbot+884dc4621377ba579a6f@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/linux-btrfs/68dc3dab.a00a0220.102ee.004e.GAE@google.com/
+Fixes: a5ed91828518 ("Btrfs: implement the free space B-tree")
+CC: <stable@vger.kernel.org> # 6.1.x: 1961d20f6fa8: btrfs: fix assertion when building free space tree
+CC: <stable@vger.kernel.org> # 6.1.x
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/free-space-tree.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1108,14 +1108,15 @@ static int populate_free_space_tree(stru
+ * If ret is 1 (no key found), it means this is an empty block group,
+ * without any extents allocated from it and there's no block group
+ * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
+- * because we are using the block group tree feature, so block group
+- * items are stored in the block group tree. It also means there are no
+- * extents allocated for block groups with a start offset beyond this
+- * block group's end offset (this is the last, highest, block group).
++ * because we are using the block group tree feature (so block group
++ * items are stored in the block group tree) or this is a new block
++ * group created in the current transaction and its block group item
++ * was not yet inserted in the extent tree (that happens in
++ * btrfs_create_pending_block_groups() -> insert_block_group_item()).
++ * It also means there are no extents allocated for block groups with a
++ * start offset beyond this block group's end offset (this is the last,
++ * highest, block group).
+ */
+- if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
+- ASSERT(ret == 0);
+-
+ start = block_group->start;
+ end = block_group->start + block_group->length;
+ while (ret == 0) {
--- /dev/null
+From 7e5a5983edda664e8e4bb20af17b80f5135c655c Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 24 Sep 2025 16:10:38 +0100
+Subject: btrfs: fix clearing of BTRFS_FS_RELOC_RUNNING if relocation already running
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 7e5a5983edda664e8e4bb20af17b80f5135c655c upstream.
+
+When starting relocation, at reloc_chunk_start(), if we happen to find
+the flag BTRFS_FS_RELOC_RUNNING is already set we return an error
+(-EINPROGRESS) to the callers, however the callers call reloc_chunk_end()
+which will clear the flag BTRFS_FS_RELOC_RUNNING, which is wrong since
+relocation was started by another task and still running.
+
+Finding the BTRFS_FS_RELOC_RUNNING flag already set is an unexpected
+scenario, but still our current behaviour is not correct.
+
+Fix this by never calling reloc_chunk_end() if reloc_chunk_start() has
+returned an error, which is what logically makes sense, since the general
+widespread pattern is to have end functions called only if the counterpart
+start functions succeeded. This requires changing reloc_chunk_start() to
+clear BTRFS_FS_RELOC_RUNNING if there's a pending cancel request.
+
+Fixes: 907d2710d727 ("btrfs: add cancellable chunk relocation support")
+CC: stable@vger.kernel.org # 5.15+
+Reviewed-by: Boris Burkov <boris@bur.io>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/relocation.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -3877,6 +3877,7 @@ out:
+ /*
+ * Mark start of chunk relocation that is cancellable. Check if the cancellation
+ * has been requested meanwhile and don't start in that case.
++ * NOTE: if this returns an error, reloc_chunk_end() must not be called.
+ *
+ * Return:
+ * 0 success
+@@ -3893,10 +3894,8 @@ static int reloc_chunk_start(struct btrf
+
+ if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
+ btrfs_info(fs_info, "chunk relocation canceled on start");
+- /*
+- * On cancel, clear all requests but let the caller mark
+- * the end after cleanup operations.
+- */
++ /* On cancel, clear all requests. */
++ clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
+ atomic_set(&fs_info->reloc_cancel_req, 0);
+ return -ECANCELED;
+ }
+@@ -3905,9 +3904,11 @@ static int reloc_chunk_start(struct btrf
+
+ /*
+ * Mark end of chunk relocation that is cancellable and wake any waiters.
++ * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
+ */
+ static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
+ {
++ ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
+ /* Requested after start, clear bit first so any waiters can continue */
+ if (atomic_read(&fs_info->reloc_cancel_req) > 0)
+ btrfs_info(fs_info, "chunk relocation canceled during operation");
+@@ -4119,9 +4120,9 @@ out:
+ if (err && rw)
+ btrfs_dec_block_group_ro(rc->block_group);
+ iput(rc->data_inode);
++ reloc_chunk_end(fs_info);
+ out_put_bg:
+ btrfs_put_block_group(bg);
+- reloc_chunk_end(fs_info);
+ free_reloc_control(rc);
+ return err;
+ }
+@@ -4311,8 +4312,8 @@ out_clean:
+ err = ret;
+ out_unset:
+ unset_reloc_control(rc);
+-out_end:
+ reloc_chunk_end(fs_info);
++out_end:
+ free_reloc_control(rc);
+ out:
+ free_reloc_roots(&reloc_roots);
--- /dev/null
+From 8ab2fa69691b2913a67f3c54fbb991247b3755be Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Tue, 30 Sep 2025 21:05:17 -0700
+Subject: btrfs: fix incorrect readahead expansion length
+
+From: Boris Burkov <boris@bur.io>
+
+commit 8ab2fa69691b2913a67f3c54fbb991247b3755be upstream.
+
+The intent of btrfs_readahead_expand() was to expand to the length of
+the current compressed extent being read. However, "ram_bytes" is *not*
+that, in the case where a single physical compressed extent is used for
+multiple file extents.
+
+Consider this case with a large compressed extent C and then later two
+non-compressed extents N1 and N2 written over C, leaving C1 and C2
+pointing to offset/len pairs of C:
+
+[ C ]
+[ N1 ][ C1 ][ N2 ][ C2 ]
+
+In such a case, ram_bytes for both C1 and C2 is the full uncompressed
+length of C. So starting readahead in C1 will expand the readahead past
+the end of C1, past N2, and into C2. This will then expand readahead
+again, to C2_start + ram_bytes, way past EOF. First of all, this is
+totally undesirable, we don't want to read the whole file in arbitrary
+chunks of the large underlying extent if it happens to exist. Secondly,
+it results in zeroing the range past the end of C2 up to ram_bytes. This
+is particularly unpleasant with fs-verity as it can zero and set
+uptodate pages in the verity virtual space past EOF. This incorrect
+readahead behavior can lead to verity verification errors, if we iterate
+in a way that happens to do the wrong readahead.
+
+Fix this by using em->len for readahead expansion, not em->ram_bytes,
+resulting in the expected behavior of stopping readahead at the extent
+boundary.
+
+Reported-by: Max Chernoff <git@maxchernoff.ca>
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2399898
+Fixes: 9e9ff875e417 ("btrfs: use readahead_expand() on compressed extents")
+CC: stable@vger.kernel.org # 6.17
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/extent_io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -985,7 +985,7 @@ static void btrfs_readahead_expand(struc
+ {
+ const u64 ra_pos = readahead_pos(ractl);
+ const u64 ra_end = ra_pos + readahead_length(ractl);
+- const u64 em_end = em->start + em->ram_bytes;
++ const u64 em_end = em->start + em->len;
+
+ /* No expansion for holes and inline extents. */
+ if (em->block_start > EXTENT_MAP_LAST_BYTE)
--- /dev/null
+From a12f0bc764da3781da2019c60826f47a6d7ed64f Mon Sep 17 00:00:00 2001
+From: Celeste Liu <uwu@coelacanthus.name>
+Date: Tue, 30 Sep 2025 14:53:39 +0800
+Subject: can: gs_usb: gs_make_candev(): populate net_device->dev_port
+
+From: Celeste Liu <uwu@coelacanthus.name>
+
+commit a12f0bc764da3781da2019c60826f47a6d7ed64f upstream.
+
+The gs_usb driver supports USB devices with more than 1 CAN channel.
+In old kernel before 3.15, it uses net_device->dev_id to distinguish
+different channel in userspace, which was done in commit
+acff76fa45b4 ("can: gs_usb: gs_make_candev(): set netdev->dev_id").
+But since 3.15, the correct way is populating net_device->dev_port.
+And according to documentation, if network device support multiple
+interface, lack of net_device->dev_port SHALL be treated as a bug.
+
+Fixes: acff76fa45b4 ("can: gs_usb: gs_make_candev(): set netdev->dev_id")
+Cc: stable@vger.kernel.org
+Signed-off-by: Celeste Liu <uwu@coelacanthus.name>
+Link: https://patch.msgid.link/20250930-gs-usb-populate-net_device-dev_port-v1-1-68a065de6937@coelacanthus.name
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/usb/gs_usb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -1246,6 +1246,7 @@ static struct gs_can *gs_make_candev(uns
+
+ netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+ netdev->dev_id = channel;
++ netdev->dev_port = channel;
+
+ /* dev setup */
+ strcpy(dev->bt_const.name, KBUILD_MODNAME);
--- /dev/null
+From 2a27f6a8fb5722223d526843040f747e9b0e8060 Mon Sep 17 00:00:00 2001
+From: Celeste Liu <uwu@coelacanthus.name>
+Date: Tue, 30 Sep 2025 19:34:28 +0800
+Subject: can: gs_usb: increase max interface to U8_MAX
+
+From: Celeste Liu <uwu@coelacanthus.name>
+
+commit 2a27f6a8fb5722223d526843040f747e9b0e8060 upstream.
+
+This issue was found by Runcheng Lu when develop HSCanT USB to CAN FD
+converter[1]. The original developers may have only 3 interfaces
+device to test so they write 3 here and wait for future change.
+
+During the HSCanT development, we actually used 4 interfaces, so the
+limitation of 3 is not enough now. But just increase one is not
+future-proofed. Since the channel index type in gs_host_frame is u8,
+just make canch[] become a flexible array with a u8 index, so it
+naturally constraint by U8_MAX and avoid statically allocate 256
+pointer for every gs_usb device.
+
+[1]: https://github.com/cherry-embedded/HSCanT-hardware
+
+Fixes: d08e973a77d1 ("can: gs_usb: Added support for the GS_USB CAN devices")
+Reported-by: Runcheng Lu <runcheng.lu@hpmicro.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Vincent Mailhol <mailhol@kernel.org>
+Signed-off-by: Celeste Liu <uwu@coelacanthus.name>
+Link: https://patch.msgid.link/20250930-gs-usb-max-if-v5-1-863330bf6666@coelacanthus.name
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/usb/gs_usb.c | 22 ++++++++++------------
+ 1 file changed, 10 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -286,11 +286,6 @@ struct gs_host_frame {
+ #define GS_MAX_RX_URBS 30
+ #define GS_NAPI_WEIGHT 32
+
+-/* Maximum number of interfaces the driver supports per device.
+- * Current hardware only supports 3 interfaces. The future may vary.
+- */
+-#define GS_MAX_INTF 3
+-
+ struct gs_tx_context {
+ struct gs_can *dev;
+ unsigned int echo_id;
+@@ -321,7 +316,6 @@ struct gs_can {
+
+ /* usb interface struct */
+ struct gs_usb {
+- struct gs_can *canch[GS_MAX_INTF];
+ struct usb_anchor rx_submitted;
+ struct usb_device *udev;
+
+@@ -333,9 +327,11 @@ struct gs_usb {
+
+ unsigned int hf_size_rx;
+ u8 active_channels;
++ u8 channel_cnt;
+
+ unsigned int pipe_in;
+ unsigned int pipe_out;
++ struct gs_can *canch[] __counted_by(channel_cnt);
+ };
+
+ /* 'allocate' a tx context.
+@@ -596,7 +592,7 @@ static void gs_usb_receive_bulk_callback
+ }
+
+ /* device reports out of range channel id */
+- if (hf->channel >= GS_MAX_INTF)
++ if (hf->channel >= parent->channel_cnt)
+ goto device_detach;
+
+ dev = parent->canch[hf->channel];
+@@ -696,7 +692,7 @@ resubmit_urb:
+ /* USB failure take down all interfaces */
+ if (rc == -ENODEV) {
+ device_detach:
+- for (rc = 0; rc < GS_MAX_INTF; rc++) {
++ for (rc = 0; rc < parent->channel_cnt; rc++) {
+ if (parent->canch[rc])
+ netif_device_detach(parent->canch[rc]->netdev);
+ }
+@@ -1458,17 +1454,19 @@ static int gs_usb_probe(struct usb_inter
+ icount = dconf.icount + 1;
+ dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
+
+- if (icount > GS_MAX_INTF) {
++ if (icount > type_max(parent->channel_cnt)) {
+ dev_err(&intf->dev,
+ "Driver cannot handle more that %u CAN interfaces\n",
+- GS_MAX_INTF);
++ type_max(parent->channel_cnt));
+ return -EINVAL;
+ }
+
+- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
++ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
+ if (!parent)
+ return -ENOMEM;
+
++ parent->channel_cnt = icount;
++
+ init_usb_anchor(&parent->rx_submitted);
+
+ usb_set_intfdata(intf, parent);
+@@ -1529,7 +1527,7 @@ static void gs_usb_disconnect(struct usb
+ return;
+ }
+
+- for (i = 0; i < GS_MAX_INTF; i++)
++ for (i = 0; i < parent->channel_cnt; i++)
+ if (parent->canch[i])
+ gs_destroy_candev(parent->canch[i]);
+
--- /dev/null
+From 6447b0e355562a1ff748c4a2ffb89aae7e84d2c9 Mon Sep 17 00:00:00 2001
+From: Eugene Korenevsky <ekorenevsky@aliyun.com>
+Date: Mon, 13 Oct 2025 21:39:30 +0300
+Subject: cifs: parse_dfs_referrals: prevent oob on malformed input
+
+From: Eugene Korenevsky <ekorenevsky@aliyun.com>
+
+commit 6447b0e355562a1ff748c4a2ffb89aae7e84d2c9 upstream.
+
+Malicious SMB server can send invalid reply to FSCTL_DFS_GET_REFERRALS
+
+- reply smaller than sizeof(struct get_dfs_referral_rsp)
+- reply with number of referrals smaller than NumberOfReferrals in the
+header
+
+Processing of such replies will cause oob.
+
+Return -EINVAL error on such replies to prevent oob-s.
+
+Signed-off-by: Eugene Korenevsky <ekorenevsky@aliyun.com>
+Cc: stable@vger.kernel.org
+Suggested-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/misc.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -922,6 +922,14 @@ parse_dfs_referrals(struct get_dfs_refer
+ char *data_end;
+ struct dfs_referral_level_3 *ref;
+
++ if (rsp_size < sizeof(*rsp)) {
++ cifs_dbg(VFS | ONCE,
++ "%s: header is malformed (size is %u, must be %zu)\n",
++ __func__, rsp_size, sizeof(*rsp));
++ rc = -EINVAL;
++ goto parse_DFS_referrals_exit;
++ }
++
+ *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
+
+ if (*num_of_nodes < 1) {
+@@ -930,6 +938,15 @@ parse_dfs_referrals(struct get_dfs_refer
+ rc = -EINVAL;
+ goto parse_DFS_referrals_exit;
+ }
++
++ if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
++ cifs_dbg(VFS | ONCE,
++ "%s: malformed buffer (size is %u, must be at least %zu)\n",
++ __func__, rsp_size,
++ sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
++ rc = -EINVAL;
++ goto parse_DFS_referrals_exit;
++ }
+
+ ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
+ if (ref->VersionNumber != cpu_to_le16(3)) {
--- /dev/null
+From 6df8e84aa6b5b1812cc2cacd6b3f5ccbb18cda2b Mon Sep 17 00:00:00 2001
+From: Gui-Dong Han <hanguidong02@gmail.com>
+Date: Wed, 8 Oct 2025 03:43:27 +0000
+Subject: drm/amdgpu: use atomic functions with memory barriers for vm fault info
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+commit 6df8e84aa6b5b1812cc2cacd6b3f5ccbb18cda2b upstream.
+
+The atomic variable vm_fault_info_updated is used to synchronize access to
+adev->gmc.vm_fault_info between the interrupt handler and
+get_vm_fault_info().
+
+The default atomic functions like atomic_set() and atomic_read() do not
+provide memory barriers. This allows for CPU instruction reordering,
+meaning the memory accesses to vm_fault_info and the vm_fault_info_updated
+flag are not guaranteed to occur in the intended order. This creates a
+race condition that can lead to inconsistent or stale data being used.
+
+The previous implementation, which used an explicit mb(), was incomplete
+and inefficient. It failed to account for all potential CPU reorderings,
+such as the access of vm_fault_info being reordered before the atomic_read
+of the flag. This approach is also more verbose and less performant than
+using the proper atomic functions with acquire/release semantics.
+
+Fix this by switching to atomic_set_release() and atomic_read_acquire().
+These functions provide the necessary acquire and release semantics,
+which act as memory barriers to ensure the correct order of operations.
+It is also more efficient and idiomatic than using explicit full memory
+barriers.
+
+Fixes: b97dfa27ef3a ("drm/amdgpu: save vm fault information for amdkfd")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Signed-off-by: Felix Kuehling <felix.kuehling@amd.com>
+Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 7 +++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 7 +++----
+ 3 files changed, 8 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2285,10 +2285,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_fr
+ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
+ struct kfd_vm_fault_info *mem)
+ {
+- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
++ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+- mb(); /* make sure read happened */
+- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1061,7 +1061,7 @@ static int gmc_v7_0_sw_init(void *handle
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+
+ return 0;
+ }
+@@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(st
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
++ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+@@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(st
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+- mb();
+- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
+ return 0;
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1174,7 +1174,7 @@ static int gmc_v8_0_sw_init(void *handle
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+
+ return 0;
+ }
+@@ -1465,7 +1465,7 @@ static int gmc_v8_0_process_interrupt(st
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
++ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+@@ -1481,8 +1481,7 @@ static int gmc_v8_0_process_interrupt(st
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+- mb();
+- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
+ return 0;
--- /dev/null
+From 5801e65206b065b0b2af032f7f1eef222aa2fd83 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Wed, 15 Oct 2025 09:40:15 +0100
+Subject: drm/sched: Fix potential double free in drm_sched_job_add_resv_dependencies
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 5801e65206b065b0b2af032f7f1eef222aa2fd83 upstream.
+
+When adding dependencies with drm_sched_job_add_dependency(), that
+function consumes the fence reference both on success and failure, so in
+the latter case the dma_fence_put() on the error path (xarray failed to
+expand) is a double free.
+
+Interestingly this bug appears to have been present ever since
+commit ebd5f74255b9 ("drm/sched: Add dependency tracking"), since the code
+back then looked like this:
+
+drm_sched_job_add_implicit_dependencies():
+...
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_sched_job_add_dependency(job, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+
+Which means for the failing 'i' the dma_fence_put was already a double
+free. Possibly there were no users at that time, or the test cases were
+insufficient to hit it.
+
+The bug was then only noticed and fixed after
+commit 9c2ba265352a ("drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2")
+landed, with its fixup of
+commit 4eaf02d6076c ("drm/scheduler: fix drm_sched_job_add_implicit_dependencies").
+
+At that point it was a slightly different flavour of a double free, which
+commit 963d0b356935 ("drm/scheduler: fix drm_sched_job_add_implicit_dependencies harder")
+noticed and attempted to fix.
+
+But it only moved the double free from happening inside the
+drm_sched_job_add_dependency(), when releasing the reference not yet
+obtained, to the caller, when releasing the reference already released by
+the former in the failure case.
+
+As such it is not easy to identify the right target for the fixes tag so
+lets keep it simple and just continue the chain.
+
+While fixing we also improve the comment and explain the reason for taking
+the reference and not dropping it.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: 963d0b356935 ("drm/scheduler: fix drm_sched_job_add_implicit_dependencies harder")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/dri-devel/aNFbXq8OeYl3QSdm@stanley.mountain/
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Rob Clark <robdclark@chromium.org>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: Philipp Stanner <phasta@kernel.org>
+Cc: Christian König <ckoenig.leichtzumerken@gmail.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: stable@vger.kernel.org # v5.16+
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://lore.kernel.org/r/20251015084015.6273-1-tvrtko.ursulin@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/scheduler/sched_main.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -783,13 +783,14 @@ int drm_sched_job_add_resv_dependencies(
+ dma_resv_assert_held(resv);
+
+ dma_resv_for_each_fence(&cursor, resv, usage, fence) {
+- /* Make sure to grab an additional ref on the added fence */
+- dma_fence_get(fence);
+- ret = drm_sched_job_add_dependency(job, fence);
+- if (ret) {
+- dma_fence_put(fence);
++ /*
++ * As drm_sched_job_add_dependency always consumes the fence
++ * reference (even when it fails), and dma_resv_for_each_fence
++ * is not obtaining one, we need to grab one before calling.
++ */
++ ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
++ if (ret)
+ return ret;
+- }
+ }
+ return 0;
+ }
--- /dev/null
+From 1d3ad183943b38eec2acf72a0ae98e635dc8456b Mon Sep 17 00:00:00 2001
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+Date: Tue, 30 Sep 2025 16:58:10 +0530
+Subject: ext4: detect invalid INLINE_DATA + EXTENTS flag combination
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+commit 1d3ad183943b38eec2acf72a0ae98e635dc8456b upstream.
+
+syzbot reported a BUG_ON in ext4_es_cache_extent() when opening a verity
+file on a corrupted ext4 filesystem mounted without a journal.
+
+The issue is that the filesystem has an inode with both the INLINE_DATA
+and EXTENTS flags set:
+
+ EXT4-fs error (device loop0): ext4_cache_extents:545: inode #15:
+ comm syz.0.17: corrupted extent tree: lblk 0 < prev 66
+
+Investigation revealed that the inode has both flags set:
+ DEBUG: inode 15 - flag=1, i_inline_off=164, has_inline=1, extents_flag=1
+
+This is an invalid combination since an inode should have either:
+- INLINE_DATA: data stored directly in the inode
+- EXTENTS: data stored in extent-mapped blocks
+
+Having both flags causes ext4_has_inline_data() to return true, skipping
+extent tree validation in __ext4_iget(). The unvalidated out-of-order
+extents then trigger a BUG_ON in ext4_es_cache_extent() due to integer
+underflow when calculating hole sizes.
+
+Fix this by detecting this invalid flag combination early in ext4_iget()
+and rejecting the corrupted inode.
+
+Cc: stable@kernel.org
+Reported-and-tested-by: syzbot+038b7bf43423e132b308@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=038b7bf43423e132b308
+Suggested-by: Zhang Yi <yi.zhang@huawei.com>
+Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Message-ID: <20250930112810.315095-1-kartikey406@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4944,6 +4944,14 @@ struct inode *__ext4_iget(struct super_b
+ }
+ ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ ext4_set_inode_flags(inode, true);
++ /* Detect invalid flag combination - can't have both inline data and extents */
++ if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
++ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
++ ext4_error_inode(inode, function, line, 0,
++ "inode has both inline data and extents flags");
++ ret = -EFSCORRUPTED;
++ goto bad_inode;
++ }
+ inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
+ ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
+ if (ext4_has_feature_64bit(sb))
--- /dev/null
+From 328a782cb138029182e521c08f50eb1587db955d Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Tue, 16 Sep 2025 17:33:37 +0800
+Subject: ext4: wait for ongoing I/O to complete before freeing blocks
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 328a782cb138029182e521c08f50eb1587db955d upstream.
+
+When freeing metadata blocks in nojournal mode, ext4_forget() calls
+bforget() to clear the dirty flag on the buffer_head and remvoe
+associated mappings. This is acceptable if the metadata has not yet
+begun to be written back. However, if the write-back has already started
+but is not yet completed, ext4_forget() will have no effect.
+Subsequently, ext4_mb_clear_bb() will immediately return the block to
+the mb allocator. This block can then be reallocated immediately,
+potentially causing an data corruption issue.
+
+Fix this by clearing the buffer's dirty flag and waiting for the ongoing
+I/O to complete, ensuring that no further writes to stale data will
+occur.
+
+Fixes: 16e08b14a455 ("ext4: cleanup clean_bdev_aliases() calls")
+Cc: stable@kernel.org
+Reported-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Closes: https://lore.kernel.org/linux-ext4/a9417096-9549-4441-9878-b1955b899b4e@huaweicloud.com/
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Message-ID: <20250916093337.3161016-3-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4_jbd2.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -277,9 +277,16 @@ int __ext4_forget(const char *where, uns
+ bh, is_metadata, inode->i_mode,
+ test_opt(inode->i_sb, DATA_FLAGS));
+
+- /* In the no journal case, we can just do a bforget and return */
++ /*
++ * In the no journal case, we should wait for the ongoing buffer
++ * to complete and do a forget.
++ */
+ if (!ext4_handle_valid(handle)) {
+- bforget(bh);
++ if (bh) {
++ clear_buffer_dirty(bh);
++ wait_on_buffer(bh);
++ __bforget(bh);
++ }
+ return 0;
+ }
+
--- /dev/null
+From 9d5c4f5c7a2c7677e1b3942772122b032c265aae Mon Sep 17 00:00:00 2001
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+Date: Tue, 7 Oct 2025 03:32:30 +0000
+Subject: f2fs: fix wrong block mapping for multi-devices
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+commit 9d5c4f5c7a2c7677e1b3942772122b032c265aae upstream.
+
+Assuming the disk layout as below,
+
+disk0: 0 --- 0x00035abfff
+disk1: 0x00035ac000 --- 0x00037abfff
+disk2: 0x00037ac000 --- 0x00037ebfff
+
+and we want to read data from offset=13568 having len=128 across the block
+devices, we can illustrate the block addresses like below.
+
+0 .. 0x00037ac000 ------------------- 0x00037ebfff, 0x00037ec000 -------
+ | ^ ^ ^
+ | fofs 0 13568 13568+128
+ | ------------------------------------------------------
+ | LBA 0x37e8aa9 0x37ebfa9 0x37ec029
+ --- map 0x3caa9 0x3ffa9
+
+In this example, we should give the relative map of the target block device
+ranging from 0x3caa9 to 0x3ffa9 where the length should be calculated by
+0x37ebfff + 1 - 0x37ebfa9.
+
+In the below equation, however, map->m_pblk was supposed to be the original
+address instead of the one from the target block address.
+
+ - map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
+
+Cc: stable@vger.kernel.org
+Fixes: 71f2c8206202 ("f2fs: multidevice: support direct IO")
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/data.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1506,8 +1506,8 @@ static bool f2fs_map_blocks_cached(struc
+ struct f2fs_dev_info *dev = &sbi->devs[bidx];
+
+ map->m_bdev = dev->bdev;
+- map->m_pblk -= dev->start_blk;
+ map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
++ map->m_pblk -= dev->start_blk;
+ } else {
+ map->m_bdev = inode->i_sb->s_bdev;
+ }
--- /dev/null
+From 3c652c3a71de1d30d72dc82c3bead8deb48eb749 Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Tue, 16 Sep 2025 17:33:36 +0800
+Subject: jbd2: ensure that all ongoing I/O complete before freeing blocks
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 3c652c3a71de1d30d72dc82c3bead8deb48eb749 upstream.
+
+When releasing file system metadata blocks in jbd2_journal_forget(), if
+this buffer has not yet been checkpointed, it may have already been
+written back, currently be in the process of being written back, or has
+not yet written back. jbd2_journal_forget() calls
+jbd2_journal_try_remove_checkpoint() to check the buffer's status and
+add it to the current transaction if it has not been written back. This
+buffer can only be reallocated after the transaction is committed.
+
+jbd2_journal_try_remove_checkpoint() attempts to lock the buffer and
+check its dirty status while holding the buffer lock. If the buffer has
+already been written back, everything proceeds normally. However, there
+are two issues. First, the function returns immediately if the buffer is
+locked by the write-back process. It does not wait for the write-back to
+complete. Consequently, until the current transaction is committed and
+the block is reallocated, there is no guarantee that the I/O will
+complete. This means that ongoing I/O could write stale metadata to the
+newly allocated block, potentially corrupting data. Second, the function
+unlocks the buffer as soon as it detects that the buffer is still dirty.
+If a concurrent write-back occurs immediately after this unlocking and
+before clear_buffer_dirty() is called in jbd2_journal_forget(), data
+corruption can theoretically still occur.
+
+Although these two issues are unlikely to occur in practice since the
+undergoing metadata writeback I/O does not take this long to complete,
+it's better to explicitly ensure that all ongoing I/O operations are
+completed.
+
+Fixes: 597599268e3b ("jbd2: discard dirty data when forgetting an un-journalled buffer")
+Cc: stable@kernel.org
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Message-ID: <20250916093337.3161016-2-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jbd2/transaction.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1649,6 +1649,7 @@ int jbd2_journal_forget(handle_t *handle
+ int drop_reserve = 0;
+ int err = 0;
+ int was_modified = 0;
++ int wait_for_writeback = 0;
+
+ if (is_handle_aborted(handle))
+ return -EROFS;
+@@ -1772,18 +1773,22 @@ int jbd2_journal_forget(handle_t *handle
+ }
+
+ /*
+- * The buffer is still not written to disk, we should
+- * attach this buffer to current transaction so that the
+- * buffer can be checkpointed only after the current
+- * transaction commits.
++ * The buffer has not yet been written to disk. We should
++ * either clear the buffer or ensure that the ongoing I/O
++ * is completed, and attach this buffer to current
++ * transaction so that the buffer can be checkpointed only
++ * after the current transaction commits.
+ */
+ clear_buffer_dirty(bh);
++ wait_for_writeback = 1;
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
+ spin_unlock(&journal->j_list_lock);
+ }
+ drop:
+ __brelse(bh);
+ spin_unlock(&jh->b_state_lock);
++ if (wait_for_writeback)
++ wait_on_buffer(bh);
+ jbd2_journal_put_journal_head(jh);
+ if (drop_reserve) {
+ /* no need to reserve log space for this block -bzzz */
--- /dev/null
+From 0aa1b76fe1429629215a7c79820e4b96233ac4a3 Mon Sep 17 00:00:00 2001
+From: Oliver Upton <oliver.upton@linux.dev>
+Date: Tue, 30 Sep 2025 01:52:37 -0700
+Subject: KVM: arm64: Prevent access to vCPU events before init
+
+From: Oliver Upton <oliver.upton@linux.dev>
+
+commit 0aa1b76fe1429629215a7c79820e4b96233ac4a3 upstream.
+
+Another day, another syzkaller bug. KVM erroneously allows userspace to
+pend vCPU events for a vCPU that hasn't been initialized yet, leading to
+KVM interpreting a bunch of uninitialized garbage for routing /
+injecting the exception.
+
+In one case the injection code and the hyp disagree on whether the vCPU
+has a 32bit EL1 and put the vCPU into an illegal mode for AArch64,
+tripping the BUG() in exception_target_el() during the next injection:
+
+ kernel BUG at arch/arm64/kvm/inject_fault.c:40!
+ Internal error: Oops - BUG: 00000000f2000800 [#1] SMP
+ CPU: 3 UID: 0 PID: 318 Comm: repro Not tainted 6.17.0-rc4-00104-g10fd0285305d #6 PREEMPT
+ Hardware name: linux,dummy-virt (DT)
+ pstate: 21402009 (nzCv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--)
+ pc : exception_target_el+0x88/0x8c
+ lr : pend_serror_exception+0x18/0x13c
+ sp : ffff800082f03a10
+ x29: ffff800082f03a10 x28: ffff0000cb132280 x27: 0000000000000000
+ x26: 0000000000000000 x25: ffff0000c2a99c20 x24: 0000000000000000
+ x23: 0000000000008000 x22: 0000000000000002 x21: 0000000000000004
+ x20: 0000000000008000 x19: ffff0000c2a99c20 x18: 0000000000000000
+ x17: 0000000000000000 x16: 0000000000000000 x15: 00000000200000c0
+ x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000
+ x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000
+ x8 : ffff800082f03af8 x7 : 0000000000000000 x6 : 0000000000000000
+ x5 : ffff800080f621f0 x4 : 0000000000000000 x3 : 0000000000000000
+ x2 : 000000000040009b x1 : 0000000000000003 x0 : ffff0000c2a99c20
+ Call trace:
+ exception_target_el+0x88/0x8c (P)
+ kvm_inject_serror_esr+0x40/0x3b4
+ __kvm_arm_vcpu_set_events+0xf0/0x100
+ kvm_arch_vcpu_ioctl+0x180/0x9d4
+ kvm_vcpu_ioctl+0x60c/0x9f4
+ __arm64_sys_ioctl+0xac/0x104
+ invoke_syscall+0x48/0x110
+ el0_svc_common.constprop.0+0x40/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x34/0xf0
+ el0t_64_sync_handler+0xa0/0xe4
+ el0t_64_sync+0x198/0x19c
+ Code: f946bc01 b4fffe61 9101e020 17fffff2 (d4210000)
+
+Reject the ioctls outright as no sane VMM would call these before
+KVM_ARM_VCPU_INIT anyway. Even if it did the exception would've been
+thrown away by the eventual reset of the vCPU's state.
+
+Cc: stable@vger.kernel.org # 6.17
+Fixes: b7b27facc7b5 ("arm/arm64: KVM: Add KVM_GET/SET_VCPU_EVENTS")
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/arm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1492,6 +1492,9 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ case KVM_GET_VCPU_EVENTS: {
+ struct kvm_vcpu_events events;
+
++ if (!kvm_vcpu_initialized(vcpu))
++ return -ENOEXEC;
++
+ if (kvm_arm_vcpu_get_events(vcpu, &events))
+ return -EINVAL;
+
+@@ -1503,6 +1506,9 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ case KVM_SET_VCPU_EVENTS: {
+ struct kvm_vcpu_events events;
+
++ if (!kvm_vcpu_initialized(vcpu))
++ return -ENOEXEC;
++
+ if (copy_from_user(&events, argp, sizeof(events)))
+ return -EFAULT;
+
--- /dev/null
+From 75527d61d60d493d1eb064f335071a20ca581f54 Mon Sep 17 00:00:00 2001
+From: Yi Cong <yicong@kylinos.cn>
+Date: Sat, 11 Oct 2025 16:24:15 +0800
+Subject: r8152: add error handling in rtl8152_driver_init
+
+From: Yi Cong <yicong@kylinos.cn>
+
+commit 75527d61d60d493d1eb064f335071a20ca581f54 upstream.
+
+rtl8152_driver_init() is missing the error handling.
+When rtl8152_driver registration fails, rtl8152_cfgselector_driver
+should be deregistered.
+
+Fixes: ec51fbd1b8a2 ("r8152: add USB device driver for config selection")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yi Cong <yicong@kylinos.cn>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20251011082415.580740-1-yicongsrfy@163.com
+[pabeni@redhat.com: clarified the commit message]
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -10104,7 +10104,12 @@ static int __init rtl8152_driver_init(vo
+ ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
+ if (ret)
+ return ret;
+- return usb_register(&rtl8152_driver);
++
++ ret = usb_register(&rtl8152_driver);
++ if (ret)
++ usb_deregister_device_driver(&rtl8152_cfgselector_driver);
++
++ return ret;
+ }
+
+ static void __exit rtl8152_driver_exit(void)
--- /dev/null
+smb-client-fix-refcount-leak-for-cifs_sb_tlink.patch
+r8152-add-error-handling-in-rtl8152_driver_init.patch
+kvm-arm64-prevent-access-to-vcpu-events-before-init.patch
+f2fs-fix-wrong-block-mapping-for-multi-devices.patch
+jbd2-ensure-that-all-ongoing-i-o-complete-before-freeing-blocks.patch
+ext4-wait-for-ongoing-i-o-to-complete-before-freeing-blocks.patch
+ext4-detect-invalid-inline_data-extents-flag-combination.patch
+btrfs-fix-clearing-of-btrfs_fs_reloc_running-if-relocation-already-running.patch
+btrfs-fix-incorrect-readahead-expansion-length.patch
+btrfs-do-not-assert-we-found-block-group-item-when-creating-free-space-tree.patch
+can-gs_usb-gs_make_candev-populate-net_device-dev_port.patch
+can-gs_usb-increase-max-interface-to-u8_max.patch
+cifs-parse_dfs_referrals-prevent-oob-on-malformed-input.patch
+drm-sched-fix-potential-double-free-in-drm_sched_job_add_resv_dependencies.patch
+drm-amdgpu-use-atomic-functions-with-memory-barriers-for-vm-fault-info.patch
--- /dev/null
+From c2b77f42205ef485a647f62082c442c1cd69d3fc Mon Sep 17 00:00:00 2001
+From: Shuhao Fu <sfual@cse.ust.hk>
+Date: Thu, 16 Oct 2025 02:52:55 +0000
+Subject: smb: client: Fix refcount leak for cifs_sb_tlink
+
+From: Shuhao Fu <sfual@cse.ust.hk>
+
+commit c2b77f42205ef485a647f62082c442c1cd69d3fc upstream.
+
+Fix three refcount inconsistency issues related to `cifs_sb_tlink`.
+
+Comments for `cifs_sb_tlink` state that `cifs_put_tlink()` needs to be
+called after successful calls to `cifs_sb_tlink()`. Three calls fail to
+update refcount accordingly, leading to possible resource leaks.
+
+Fixes: 8ceb98437946 ("CIFS: Move rename to ops struct")
+Fixes: 2f1afe25997f ("cifs: Use smb 2 - 3 and cifsacl mount options getacl functions")
+Fixes: 366ed846df60 ("cifs: Use smb 2 - 3 and cifsacl mount options setacl function")
+Cc: stable@vger.kernel.org
+Signed-off-by: Shuhao Fu <sfual@cse.ust.hk>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/inode.c | 6 ++++--
+ fs/smb/client/smb2ops.c | 8 ++++----
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -2319,8 +2319,10 @@ cifs_do_rename(const unsigned int xid, s
+ tcon = tlink_tcon(tlink);
+ server = tcon->ses->server;
+
+- if (!server->ops->rename)
+- return -ENOSYS;
++ if (!server->ops->rename) {
++ rc = -ENOSYS;
++ goto do_rename_exit;
++ }
+
+ /* try path-based rename first */
+ rc = server->ops->rename(xid, tcon, from_dentry,
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3072,8 +3072,7 @@ get_smb2_acl_by_path(struct cifs_sb_info
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path) {
+ rc = -ENOMEM;
+- free_xid(xid);
+- return ERR_PTR(rc);
++ goto put_tlink;
+ }
+
+ oparms = (struct cifs_open_parms) {
+@@ -3105,6 +3104,7 @@ get_smb2_acl_by_path(struct cifs_sb_info
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ }
+
++put_tlink:
+ cifs_put_tlink(tlink);
+ free_xid(xid);
+
+@@ -3145,8 +3145,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path) {
+ rc = -ENOMEM;
+- free_xid(xid);
+- return rc;
++ goto put_tlink;
+ }
+
+ oparms = (struct cifs_open_parms) {
+@@ -3167,6 +3166,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ }
+
++put_tlink:
+ cifs_put_tlink(tlink);
+ free_xid(xid);
+ return rc;