--- /dev/null
+From 8d33a030c566e1f105cd5bf27f37940b6367f3be Mon Sep 17 00:00:00 2001
+From: Zheng Qixing <zhengqixing@huawei.com>
+Date: Tue, 26 Aug 2025 15:42:04 +0800
+Subject: dm: fix NULL pointer dereference in __dm_suspend()
+
+From: Zheng Qixing <zhengqixing@huawei.com>
+
+commit 8d33a030c566e1f105cd5bf27f37940b6367f3be upstream.
+
+There is a race condition between dm device suspend and table load that
+can lead to null pointer dereference. The issue occurs when suspend is
+invoked before table load completes:
+
+BUG: kernel NULL pointer dereference, address: 0000000000000054
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 6 PID: 6798 Comm: dmsetup Not tainted 6.6.0-g7e52f5f0ca9b #62
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.1-2.fc37 04/01/2014
+RIP: 0010:blk_mq_wait_quiesce_done+0x0/0x50
+Call Trace:
+ <TASK>
+ blk_mq_quiesce_queue+0x2c/0x50
+ dm_stop_queue+0xd/0x20
+ __dm_suspend+0x130/0x330
+ dm_suspend+0x11a/0x180
+ dev_suspend+0x27e/0x560
+ ctl_ioctl+0x4cf/0x850
+ dm_ctl_ioctl+0xd/0x20
+ vfs_ioctl+0x1d/0x50
+ __se_sys_ioctl+0x9b/0xc0
+ __x64_sys_ioctl+0x19/0x30
+ x64_sys_call+0x2c4a/0x4620
+ do_syscall_64+0x9e/0x1b0
+
+The issue can be triggered as below:
+
+T1 T2
+dm_suspend table_load
+__dm_suspend dm_setup_md_queue
+ dm_mq_init_request_queue
+ blk_mq_init_allocated_queue
+ => q->mq_ops = set->ops; (1)
+dm_stop_queue / dm_wait_for_completion
+=> q->tag_set NULL pointer! (2)
+ => q->tag_set = set; (3)
+
+Fix this by checking if a valid table (map) exists before performing
+request-based suspend and waiting for target I/O. When map is NULL,
+skip these table-dependent suspend steps.
+
+Even when map is NULL, no I/O can reach any target because there is
+no table loaded; I/O submitted in this state will fail early in the
+DM layer. Skipping the table-dependent suspend logic in this case
+is safe and avoids NULL pointer dereferences.
+
+Fixes: c4576aed8d85 ("dm: fix request-based dm's use of dm_wait_for_completion")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2908,7 +2908,7 @@ static int __dm_suspend(struct mapped_de
+ {
+ bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
+ bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
+- int r;
++ int r = 0;
+
+ lockdep_assert_held(&md->suspend_lock);
+
+@@ -2960,7 +2960,7 @@ static int __dm_suspend(struct mapped_de
+ * Stop md->queue before flushing md->wq in case request-based
+ * dm defers requests to md->wq from md->queue.
+ */
+- if (dm_request_based(md)) {
++ if (map && dm_request_based(md)) {
+ dm_stop_queue(md->queue);
+ set_bit(DMF_QUEUE_STOPPED, &md->flags);
+ }
+@@ -2972,7 +2972,8 @@ static int __dm_suspend(struct mapped_de
+ * We call dm_wait_for_completion to wait for all existing requests
+ * to finish.
+ */
+- r = dm_wait_for_completion(md, task_state);
++ if (map)
++ r = dm_wait_for_completion(md, task_state);
+ if (!r)
+ set_bit(dmf_suspended_flag, &md->flags);
+
--- /dev/null
+From 7f597c2cdb9d3263a6fce07c4fc0a9eaa8e8fc43 Mon Sep 17 00:00:00 2001
+From: Zheng Qixing <zhengqixing@huawei.com>
+Date: Tue, 26 Aug 2025 15:42:03 +0800
+Subject: dm: fix queue start/stop imbalance under suspend/load/resume races
+
+From: Zheng Qixing <zhengqixing@huawei.com>
+
+commit 7f597c2cdb9d3263a6fce07c4fc0a9eaa8e8fc43 upstream.
+
+When suspend and load run concurrently, before q->mq_ops is set in
+blk_mq_init_allocated_queue(), __dm_suspend() skip dm_stop_queue(). As a
+result, the queue's quiesce depth is not incremented.
+
+Later, once table load has finished and __dm_resume() runs, which triggers
+q->quiesce_depth ==0 warning in blk_mq_unquiesce_queue():
+Call Trace:
+ <TASK>
+ dm_start_queue+0x16/0x20 [dm_mod]
+ __dm_resume+0xac/0xb0 [dm_mod]
+ dm_resume+0x12d/0x150 [dm_mod]
+ do_resume+0x2c2/0x420 [dm_mod]
+ dev_suspend+0x30/0x130 [dm_mod]
+ ctl_ioctl+0x402/0x570 [dm_mod]
+ dm_ctl_ioctl+0x23/0x30 [dm_mod]
+
+Fix this by explicitly tracking whether the request queue was
+stopped in __dm_suspend() via a new DMF_QUEUE_STOPPED flag.
+Only call dm_start_queue() in __dm_resume() if the queue was
+actually stopped.
+
+Fixes: e70feb8b3e68 ("blk-mq: support concurrent queue quiesce/unquiesce")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-core.h | 1 +
+ drivers/md/dm.c | 8 +++++---
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -162,6 +162,7 @@ struct mapped_device {
+ #define DMF_SUSPENDED_INTERNALLY 7
+ #define DMF_POST_SUSPENDING 8
+ #define DMF_EMULATE_ZONE_APPEND 9
++#define DMF_QUEUE_STOPPED 10
+
+ static inline sector_t dm_get_size(struct mapped_device *md)
+ {
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2960,8 +2960,10 @@ static int __dm_suspend(struct mapped_de
+ * Stop md->queue before flushing md->wq in case request-based
+ * dm defers requests to md->wq from md->queue.
+ */
+- if (dm_request_based(md))
++ if (dm_request_based(md)) {
+ dm_stop_queue(md->queue);
++ set_bit(DMF_QUEUE_STOPPED, &md->flags);
++ }
+
+ flush_workqueue(md->wq);
+
+@@ -2983,7 +2985,7 @@ static int __dm_suspend(struct mapped_de
+ if (r < 0) {
+ dm_queue_flush(md);
+
+- if (dm_request_based(md))
++ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
+ dm_start_queue(md->queue);
+
+ unlock_fs(md);
+@@ -3067,7 +3069,7 @@ static int __dm_resume(struct mapped_dev
+ * so that mapping of targets can work correctly.
+ * Request-based dm is queueing the deferred I/Os in its request_queue.
+ */
+- if (dm_request_based(md))
++ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
+ dm_start_queue(md->queue);
+
+ unlock_fs(md);
--- /dev/null
+From acf943e9768ec9d9be80982ca0ebc4bfd6b7631e Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 25 Sep 2025 14:30:39 +0200
+Subject: ext4: fix checks for orphan inodes
+
+From: Jan Kara <jack@suse.cz>
+
+commit acf943e9768ec9d9be80982ca0ebc4bfd6b7631e upstream.
+
+When orphan file feature is enabled, inode can be tracked as orphan
+either in the standard orphan list or in the orphan file. The first can
+be tested by checking ei->i_orphan list head, the second is recorded by
+EXT4_STATE_ORPHAN_FILE inode state flag. There are several places where
+we want to check whether inode is tracked as orphan and only some of
+them properly check for both possibilities. Luckily the consequences are
+mostly minor, the worst that can happen is that we track an inode as
+orphan although we don't need to and e2fsck then complains (resulting in
+occasional ext4/307 xfstest failures). Fix the problem by introducing a
+helper for checking whether an inode is tracked as orphan and use it in
+appropriate places.
+
+Fixes: 4a79a98c7b19 ("ext4: Improve scalability of ext4 orphan file handling")
+Cc: stable@kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Message-ID: <20250925123038.20264-2-jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4.h | 10 ++++++++++
+ fs/ext4/file.c | 2 +-
+ fs/ext4/inode.c | 2 +-
+ fs/ext4/orphan.c | 6 +-----
+ fs/ext4/super.c | 4 ++--
+ 5 files changed, 15 insertions(+), 9 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1982,6 +1982,16 @@ static inline bool ext4_verity_in_progre
+ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
+
+ /*
++ * Check whether the inode is tracked as orphan (either in orphan file or
++ * orphan list).
++ */
++static inline bool ext4_inode_orphan_tracked(struct inode *inode)
++{
++ return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
++ !list_empty(&EXT4_I(inode)->i_orphan);
++}
++
++/*
+ * Codes for operating systems
+ */
+ #define EXT4_OS_LINUX 0
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -354,7 +354,7 @@ static void ext4_inode_extension_cleanup
+ * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
+ * now.
+ */
+- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++ if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) {
+ handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+
+ if (IS_ERR(handle)) {
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4748,7 +4748,7 @@ static int ext4_fill_raw_inode(struct in
+ * old inodes get re-used with the upper 16 bits of the
+ * uid/gid intact.
+ */
+- if (ei->i_dtime && list_empty(&ei->i_orphan)) {
++ if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) {
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ } else {
+--- a/fs/ext4/orphan.c
++++ b/fs/ext4/orphan.c
+@@ -109,11 +109,7 @@ int ext4_orphan_add(handle_t *handle, st
+
+ WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+ !inode_is_locked(inode));
+- /*
+- * Inode orphaned in orphan file or in orphan list?
+- */
+- if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
+- !list_empty(&EXT4_I(inode)->i_orphan))
++ if (ext4_inode_orphan_tracked(inode))
+ return 0;
+
+ /*
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1438,9 +1438,9 @@ static void ext4_free_in_core_inode(stru
+
+ static void ext4_destroy_inode(struct inode *inode)
+ {
+- if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
++ if (ext4_inode_orphan_tracked(inode)) {
+ ext4_msg(inode->i_sb, KERN_ERR,
+- "Inode %lu (%p): orphan list check failed!",
++ "Inode %lu (%p): inode tracked as orphan!",
+ inode->i_ino, EXT4_I(inode));
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ EXT4_I(inode), sizeof(struct ext4_inode_info),
--- /dev/null
+From 3c3fac6bc0a9c00dbe65d8dc0d3a282afe4d3188 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Mon, 25 Aug 2025 11:38:30 +0800
+Subject: ext4: fix potential null deref in ext4_mb_init()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 3c3fac6bc0a9c00dbe65d8dc0d3a282afe4d3188 upstream.
+
+In ext4_mb_init(), ext4_mb_avg_fragment_size_destroy() may be called
+when sbi->s_mb_avg_fragment_size remains uninitialized (e.g., if groupinfo
+slab cache allocation fails). Since ext4_mb_avg_fragment_size_destroy()
+lacks null pointer checking, this leads to a null pointer dereference.
+
+==================================================================
+EXT4-fs: no memory for groupinfo slab cache
+BUG: kernel NULL pointer dereference, address: 0000000000000000
+PGD 0 P4D 0
+Oops: Oops: 0002 [#1] SMP PTI
+CPU:2 UID: 0 PID: 87 Comm:mount Not tainted 6.17.0-rc2 #1134 PREEMPT(none)
+RIP: 0010:_raw_spin_lock_irqsave+0x1b/0x40
+Call Trace:
+ <TASK>
+ xa_destroy+0x61/0x130
+ ext4_mb_init+0x483/0x540
+ __ext4_fill_super+0x116d/0x17b0
+ ext4_fill_super+0xd3/0x280
+ get_tree_bdev_flags+0x132/0x1d0
+ vfs_get_tree+0x29/0xd0
+ do_new_mount+0x197/0x300
+ __x64_sys_mount+0x116/0x150
+ do_syscall_64+0x50/0x1c0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+==================================================================
+
+Therefore, add necessary null check to ext4_mb_avg_fragment_size_destroy()
+to prevent this issue. The same fix is also applied to
+ext4_mb_largest_free_orders_destroy().
+
+Reported-by: syzbot+1713b1aa266195b916c2@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=1713b1aa266195b916c2
+Cc: stable@kernel.org
+Fixes: f7eaacbb4e54 ("ext4: convert free groups order lists to xarrays")
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3655,16 +3655,26 @@ static void ext4_discard_work(struct wor
+
+ static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi)
+ {
++ if (!sbi->s_mb_avg_fragment_size)
++ return;
++
+ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
+ xa_destroy(&sbi->s_mb_avg_fragment_size[i]);
++
+ kfree(sbi->s_mb_avg_fragment_size);
++ sbi->s_mb_avg_fragment_size = NULL;
+ }
+
+ static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi)
+ {
++ if (!sbi->s_mb_largest_free_orders)
++ return;
++
+ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
+ xa_destroy(&sbi->s_mb_largest_free_orders[i]);
++
+ kfree(sbi->s_mb_largest_free_orders);
++ sbi->s_mb_largest_free_orders = NULL;
+ }
+
+ int ext4_mb_init(struct super_block *sb)
--- /dev/null
+From d8b6dc9256762293048bf122fc11c4e612d0ef5d Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 1 Oct 2025 09:25:35 +0900
+Subject: ksmbd: add max ip connections parameter
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit d8b6dc9256762293048bf122fc11c4e612d0ef5d upstream.
+
+This parameter set the maximum number of connections per ip address.
+The default is 8.
+
+Cc: stable@vger.kernel.org
+Fixes: c0d41112f1a5 ("ksmbd: extend the connection limiting mechanism to support IPv6")
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/ksmbd_netlink.h | 5 +++--
+ fs/smb/server/server.h | 1 +
+ fs/smb/server/transport_ipc.c | 3 +++
+ fs/smb/server/transport_tcp.c | 27 ++++++++++++++++-----------
+ 4 files changed, 23 insertions(+), 13 deletions(-)
+
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -112,10 +112,11 @@ struct ksmbd_startup_request {
+ __u32 smbd_max_io_size; /* smbd read write size */
+ __u32 max_connections; /* Number of maximum simultaneous connections */
+ __s8 bind_interfaces_only;
+- __s8 reserved[503]; /* Reserved room */
++ __u32 max_ip_connections; /* Number of maximum connection per ip address */
++ __s8 reserved[499]; /* Reserved room */
+ __u32 ifc_list_sz; /* interfaces list size */
+ __s8 ____payload[];
+-};
++} __packed;
+
+ #define KSMBD_STARTUP_CONFIG_INTERFACES(s) ((s)->____payload)
+
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -43,6 +43,7 @@ struct ksmbd_server_config {
+ unsigned int auth_mechs;
+ unsigned int max_connections;
+ unsigned int max_inflight_req;
++ unsigned int max_ip_connections;
+
+ char *conf[SERVER_CONF_WORK_GROUP + 1];
+ struct task_struct *dh_task;
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -335,6 +335,9 @@ static int ipc_server_config_on_startup(
+ if (req->max_connections)
+ server_conf.max_connections = req->max_connections;
+
++ if (req->max_ip_connections)
++ server_conf.max_ip_connections = req->max_ip_connections;
++
+ ret = ksmbd_set_netbios_name(req->netbios_name);
+ ret |= ksmbd_set_server_string(req->server_string);
+ ret |= ksmbd_set_work_group(req->work_group);
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -238,6 +238,7 @@ static int ksmbd_kthread_fn(void *p)
+ struct interface *iface = (struct interface *)p;
+ struct ksmbd_conn *conn;
+ int ret;
++ unsigned int max_ip_conns;
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&iface->sock_release_lock);
+@@ -255,34 +256,38 @@ static int ksmbd_kthread_fn(void *p)
+ continue;
+ }
+
++ if (!server_conf.max_ip_connections)
++ goto skip_max_ip_conns_limit;
++
+ /*
+ * Limits repeated connections from clients with the same IP.
+ */
++ max_ip_conns = 0;
+ down_read(&conn_list_lock);
+- list_for_each_entry(conn, &conn_list, conns_list)
++ list_for_each_entry(conn, &conn_list, conns_list) {
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (client_sk->sk->sk_family == AF_INET6) {
+ if (memcmp(&client_sk->sk->sk_v6_daddr,
+- &conn->inet6_addr, 16) == 0) {
+- ret = -EAGAIN;
+- break;
+- }
++ &conn->inet6_addr, 16) == 0)
++ max_ip_conns++;
+ } else if (inet_sk(client_sk->sk)->inet_daddr ==
+- conn->inet_addr) {
+- ret = -EAGAIN;
+- break;
+- }
++ conn->inet_addr)
++ max_ip_conns++;
+ #else
+ if (inet_sk(client_sk->sk)->inet_daddr ==
+- conn->inet_addr) {
++ conn->inet_addr)
++ max_ip_conns++;
++#endif
++ if (server_conf.max_ip_connections <= max_ip_conns) {
+ ret = -EAGAIN;
+ break;
+ }
+-#endif
++ }
+ up_read(&conn_list_lock);
+ if (ret == -EAGAIN)
+ continue;
+
++skip_max_ip_conns_limit:
+ if (server_conf.max_connections &&
+ atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+ pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
--- /dev/null
+From 88daf2f448aad05a2e6df738d66fe8b0cf85cee0 Mon Sep 17 00:00:00 2001
+From: Matvey Kovalev <matvey.kovalev@ispras.ru>
+Date: Thu, 25 Sep 2025 15:12:34 +0300
+Subject: ksmbd: fix error code overwriting in smb2_get_info_filesystem()
+
+From: Matvey Kovalev <matvey.kovalev@ispras.ru>
+
+commit 88daf2f448aad05a2e6df738d66fe8b0cf85cee0 upstream.
+
+If client doesn't negotiate with SMB3.1.1 POSIX Extensions,
+then proper error code won't be returned due to overwriting.
+
+Return error immediately.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: e2f34481b24db ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matvey Kovalev <matvey.kovalev@ispras.ru>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5628,7 +5628,8 @@ static int smb2_get_info_filesystem(stru
+
+ if (!work->tcon->posix_extensions) {
+ pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+- rc = -EOPNOTSUPP;
++ path_put(&path);
++ return -EOPNOTSUPP;
+ } else {
+ info = (struct filesystem_posix_info *)(rsp->Buffer);
+ info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize);
--- /dev/null
+From 305853cce379407090a73b38c5de5ba748893aee Mon Sep 17 00:00:00 2001
+From: Yunseong Kim <ysk@kzalloc.com>
+Date: Mon, 15 Sep 2025 22:44:09 +0000
+Subject: ksmbd: Fix race condition in RPC handle list access
+
+From: Yunseong Kim <ysk@kzalloc.com>
+
+commit 305853cce379407090a73b38c5de5ba748893aee upstream.
+
+The 'sess->rpc_handle_list' XArray manages RPC handles within a ksmbd
+session. Access to this list is intended to be protected by
+'sess->rpc_lock' (an rw_semaphore). However, the locking implementation was
+flawed, leading to potential race conditions.
+
+In ksmbd_session_rpc_open(), the code incorrectly acquired only a read lock
+before calling xa_store() and xa_erase(). Since these operations modify
+the XArray structure, a write lock is required to ensure exclusive access
+and prevent data corruption from concurrent modifications.
+
+Furthermore, ksmbd_session_rpc_method() accessed the list using xa_load()
+without holding any lock at all. This could lead to reading inconsistent
+data or a potential use-after-free if an entry is concurrently removed and
+the pointer is dereferenced.
+
+Fix these issues by:
+1. Using down_write() and up_write() in ksmbd_session_rpc_open()
+ to ensure exclusive access during XArray modification, and ensuring
+ the lock is correctly released on error paths.
+2. Adding down_read() and up_read() in ksmbd_session_rpc_method()
+ to safely protect the lookup.
+
+Fixes: a1f46c99d9ea ("ksmbd: fix use-after-free in ksmbd_session_rpc_open")
+Fixes: b685757c7b08 ("ksmbd: Implements sess->rpc_handle_list as xarray")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yunseong Kim <ysk@kzalloc.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/mgmt/user_session.c | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -104,29 +104,32 @@ int ksmbd_session_rpc_open(struct ksmbd_
+ if (!entry)
+ return -ENOMEM;
+
+- down_read(&sess->rpc_lock);
+ entry->method = method;
+ entry->id = id = ksmbd_ipc_id_alloc();
+ if (id < 0)
+ goto free_entry;
++
++ down_write(&sess->rpc_lock);
+ old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
+- if (xa_is_err(old))
++ if (xa_is_err(old)) {
++ up_write(&sess->rpc_lock);
+ goto free_id;
++ }
+
+ resp = ksmbd_rpc_open(sess, id);
+- if (!resp)
+- goto erase_xa;
++ if (!resp) {
++ xa_erase(&sess->rpc_handle_list, entry->id);
++ up_write(&sess->rpc_lock);
++ goto free_id;
++ }
+
+- up_read(&sess->rpc_lock);
++ up_write(&sess->rpc_lock);
+ kvfree(resp);
+ return id;
+-erase_xa:
+- xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ kfree(entry);
+- up_read(&sess->rpc_lock);
+ return -EINVAL;
+ }
+
+@@ -144,9 +147,14 @@ void ksmbd_session_rpc_close(struct ksmb
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+ struct ksmbd_session_rpc *entry;
++ int method;
+
++ down_read(&sess->rpc_lock);
+ entry = xa_load(&sess->rpc_handle_list, id);
+- return entry ? entry->method : 0;
++ method = entry ? entry->method : 0;
++ up_read(&sess->rpc_lock);
++
++ return method;
+ }
+
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
--- /dev/null
+From c8168b4faf1d62cbb320a3e518ad31cdd567cb05 Mon Sep 17 00:00:00 2001
+From: Youling Tang <tangyouling@kylinos.cn>
+Date: Thu, 2 Oct 2025 22:39:08 +0800
+Subject: LoongArch: Automatically disable kaslr if boot from kexec_file
+
+From: Youling Tang <tangyouling@kylinos.cn>
+
+commit c8168b4faf1d62cbb320a3e518ad31cdd567cb05 upstream.
+
+Automatically disable kaslr when the kernel loads from kexec_file.
+
+kexec_file loads the secondary kernel image to a non-linked address,
+inherently providing KASLR-like randomization.
+
+However, on LoongArch where System RAM may be non-contiguous, enabling
+KASLR for the second kernel may relocate it to an invalid memory region
+and cause a boot failure. Thus, we disable KASLR when "kexec_file" is
+detected in the command line.
+
+To ensure compatibility with older kernels loaded via kexec_file, this
+patch should be backported to stable branches.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Youling Tang <tangyouling@kylinos.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/relocate.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/loongarch/kernel/relocate.c
++++ b/arch/loongarch/kernel/relocate.c
+@@ -166,6 +166,10 @@ static inline __init bool kaslr_disabled
+ return true;
+ #endif
+
++ str = strstr(boot_command_line, "kexec_file");
++ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
++ return true;
++
+ return false;
+ }
+
--- /dev/null
+From a04731cbee6e981afa4263289a0c0059c8b2e7b9 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: Don't align trampoline size
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit a04731cbee6e981afa4263289a0c0059c8b2e7b9 upstream.
+
+Currently, arch_alloc_bpf_trampoline() use bpf_prog_pack_alloc() which
+will pack multiple trampolines into a huge page. So, no need to assume
+the trampoline size is page aligned.
+
+Cc: stable@vger.kernel.org
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1785,8 +1785,7 @@ int arch_bpf_trampoline_size(const struc
+
+ ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
+
+- /* Page align */
+- return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE);
++ return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE;
+ }
+
+ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
--- /dev/null
+From 7b6c2d172d023d344527d3cb4516d0d6b29f4919 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: Fix uninitialized symbol 'retval_off'
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 7b6c2d172d023d344527d3cb4516d0d6b29f4919 upstream.
+
+In __arch_prepare_bpf_trampoline(), retval_off is meaningful only when
+save_ret is not 0, so the current logic is correct. But it may cause a
+build warning:
+
+arch/loongarch/net/bpf_jit.c:1547 __arch_prepare_bpf_trampoline() error: uninitialized symbol 'retval_off'.
+
+So initialize retval_off unconditionally to fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: f9b6b41f0cf3 ("LoongArch: BPF: Add basic bpf trampoline support")
+Closes: https://lore.kernel.org/r/202508191020.PBBh07cK-lkp@intel.com/
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1538,11 +1538,10 @@ static int __arch_prepare_bpf_trampoline
+ stack_size = 16;
+
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+- if (save_ret) {
+- /* Save BPF R0 and A0 */
+- stack_size += 16;
+- retval_off = stack_size;
+- }
++ if (save_ret)
++ stack_size += 16; /* Save BPF R0 and A0 */
++
++ retval_off = stack_size;
+
+ /* Room of trampoline frame to store args */
+ nargs = m->nr_args;
--- /dev/null
+From de2c0b7788648850b68b75f7cc8698b2749dd31e Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:53 +0800
+Subject: LoongArch: BPF: Make error handling robust in arch_prepare_bpf_trampoline()
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit de2c0b7788648850b68b75f7cc8698b2749dd31e upstream.
+
+Bail out instead of trying to perform a bpf_arch_text_copy() if
+__arch_prepare_bpf_trampoline() failed.
+
+Cc: stable@vger.kernel.org
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1756,7 +1756,10 @@ int arch_prepare_bpf_trampoline(struct b
+
+ jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
+ ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
+- if (ret > 0 && validate_code(&ctx) < 0) {
++ if (ret < 0)
++ goto out;
++
++ if (validate_code(&ctx) < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
--- /dev/null
+From ea645cfd3d5f74a2bd40a60003f113b3c467975d Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: Make trampoline size stable
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit ea645cfd3d5f74a2bd40a60003f113b3c467975d upstream.
+
+When attach fentry/fexit BPF programs, __arch_prepare_bpf_trampoline()
+is called twice with different `struct bpf_tramp_image *im`:
+
+ bpf_trampoline_update()
+ -> arch_bpf_trampoline_size()
+ -> __arch_prepare_bpf_trampoline()
+ -> arch_prepare_bpf_trampoline()
+ -> __arch_prepare_bpf_trampoline()
+
+Use move_imm() will emit unstable instruction sequences, so let's use
+move_addr() instead to prevent subtle bugs.
+
+(I observed this while debugging other issues with printk.)
+
+Cc: stable@vger.kernel.org
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1632,7 +1632,7 @@ static int __arch_prepare_bpf_trampoline
+ orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+- move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
++ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
+ ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
+ if (ret)
+ return ret;
+@@ -1682,7 +1682,7 @@ static int __arch_prepare_bpf_trampoline
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = ctx->ro_image + ctx->idx;
+- move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
++ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
+ ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
+ if (ret)
+ goto out;
--- /dev/null
+From e82406c7cbdd368c5459b8a45e118811d2ba0794 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: No support of struct argument in trampoline programs
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit e82406c7cbdd368c5459b8a45e118811d2ba0794 upstream.
+
+The current implementation does not support struct argument. This causes
+a oops when running bpf selftest:
+
+ $ ./test_progs -a tracing_struct
+ Oops[#1]:
+ CPU -1 Unable to handle kernel paging request at virtual address 0000000000000018, era == 9000000085bef268, ra == 90000000844f3938
+ rcu: INFO: rcu_preempt detected stalls on CPUs/tasks:
+ rcu: 1-...0: (19 ticks this GP) idle=1094/1/0x4000000000000000 softirq=1380/1382 fqs=801
+ rcu: (detected by 0, t=5252 jiffies, g=1197, q=52 ncpus=4)
+ Sending NMI from CPU 0 to CPUs 1:
+ rcu: rcu_preempt kthread starved for 2495 jiffies! g1197 f0x0 RCU_GP_DOING_FQS(6) ->state=0x0 ->cpu=2
+ rcu: Unless rcu_preempt kthread gets sufficient CPU time, OOM is now expected behavior.
+ rcu: RCU grace-period kthread stack dump:
+ task:rcu_preempt state:I stack:0 pid:15 tgid:15 ppid:2 task_flags:0x208040 flags:0x00000800
+ Stack : 9000000100423e80 0000000000000402 0000000000000010 90000001003b0680
+ 9000000085d88000 0000000000000000 0000000000000040 9000000087159350
+ 9000000085c2b9b0 0000000000000001 900000008704a000 0000000000000005
+ 00000000ffff355b 00000000ffff355b 0000000000000000 0000000000000004
+ 9000000085d90510 0000000000000000 0000000000000002 7b5d998f8281e86e
+ 00000000ffff355c 7b5d998f8281e86e 000000000000003f 9000000087159350
+ 900000008715bf98 0000000000000005 9000000087036000 900000008704a000
+ 9000000100407c98 90000001003aff80 900000008715c4c0 9000000085c2b9b0
+ 00000000ffff355b 9000000085c33d3c 00000000000000b4 0000000000000000
+ 9000000007002150 00000000ffff355b 9000000084615480 0000000007000002
+ ...
+ Call Trace:
+ [<9000000085c2a868>] __schedule+0x410/0x1520
+ [<9000000085c2b9ac>] schedule+0x34/0x190
+ [<9000000085c33d38>] schedule_timeout+0x98/0x140
+ [<90000000845e9120>] rcu_gp_fqs_loop+0x5f8/0x868
+ [<90000000845ed538>] rcu_gp_kthread+0x260/0x2e0
+ [<900000008454e8a4>] kthread+0x144/0x238
+ [<9000000085c26b60>] ret_from_kernel_thread+0x28/0xc8
+ [<90000000844f20e4>] ret_from_kernel_thread_asm+0xc/0x88
+
+ rcu: Stack dump where RCU GP kthread last ran:
+ Sending NMI from CPU 0 to CPUs 2:
+ NMI backtrace for cpu 2 skipped: idling at idle_exit+0x0/0x4
+
+Reject it for now.
+
+Cc: stable@vger.kernel.org
+Fixes: f9b6b41f0cf3 ("LoongArch: BPF: Add basic bpf trampoline support")
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1526,6 +1526,12 @@ static int __arch_prepare_bpf_trampoline
+ if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
+ return -ENOTSUPP;
+
++ /* FIXME: No support of struct argument */
++ for (i = 0; i < m->nr_args; i++) {
++ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
++ return -ENOTSUPP;
++ }
++
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
--- /dev/null
+From 3d770bd11b943066db11dba7be0b6f0d81cb5d50 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: No text_poke() for kernel text
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 3d770bd11b943066db11dba7be0b6f0d81cb5d50 upstream.
+
+The current implementation of bpf_arch_text_poke() requires 5 nops
+at patch site which is not applicable for kernel/module functions.
+Because LoongArch reserves ONLY 2 nops at the function entry. With
+CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y, this can be done by ftrace
+instead.
+
+See the following commit for details:
+ * commit b91e014f078e ("bpf: Make BPF trampoline use register_ftrace_direct() API")
+ * commit 9cdc3b6a299c ("LoongArch: ftrace: Add direct call support")
+
+Cc: stable@vger.kernel.org
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1294,8 +1294,10 @@ int bpf_arch_text_poke(void *ip, enum bp
+ u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
+ u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
+
+- if (!is_kernel_text((unsigned long)ip) &&
+- !is_bpf_text_address((unsigned long)ip))
++ /* Only poking bpf text is supported. Since kernel function entry
++ * is set up by ftrace, we rely on ftrace to poke kernel functions.
++ */
++ if (!is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
--- /dev/null
+From b0f50dc09bf008b2e581d5e6ad570d325725881c Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: Remove duplicated bpf_flush_icache()
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit b0f50dc09bf008b2e581d5e6ad570d325725881c upstream.
+
+The bpf_flush_icache() is called by bpf_arch_text_copy() already. So
+remove it. This has been done in arm64 and riscv.
+
+Cc: stable@vger.kernel.org
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1770,7 +1770,6 @@ int arch_prepare_bpf_trampoline(struct b
+ goto out;
+ }
+
+- bpf_flush_icache(ro_image, ro_image_end);
+ out:
+ kvfree(image);
+ return ret < 0 ? ret : size;
--- /dev/null
+From 909d3e3f51b1bc00f33a484ce0d41b42fed01965 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:52 +0800
+Subject: LoongArch: BPF: Remove duplicated flags check
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 909d3e3f51b1bc00f33a484ce0d41b42fed01965 upstream.
+
+The check for (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY) is
+duplicated in __arch_prepare_bpf_trampoline(). Remove it.
+
+While at it, make sure stack_size and nargs are initialized once.
+
+Cc: stable@vger.kernel.org
+Tested-by: Vincent Li <vincent.mc.li@gmail.com>
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1486,7 +1486,7 @@ static int __arch_prepare_bpf_trampoline
+ void *func_addr, u32 flags)
+ {
+ int i, ret, save_ret;
+- int stack_size = 0, nargs = 0;
++ int stack_size, nargs;
+ int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
+ bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
+ void *orig_call = func_addr;
+@@ -1495,9 +1495,6 @@ static int __arch_prepare_bpf_trampoline
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ u32 **branches = NULL;
+
+- if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+- return -ENOTSUPP;
+-
+ /*
+ * FP + 8 [ RA to parent func ] return address to parent
+ * function
+@@ -1537,10 +1534,8 @@ static int __arch_prepare_bpf_trampoline
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
+- stack_size = 0;
+-
+ /* Room of trampoline frame to store return address and frame pointer */
+- stack_size += 16;
++ stack_size = 16;
+
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret) {
--- /dev/null
+From 8b51b11b3d81c1ed48a52f87da9256d737b723a0 Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Thu, 2 Oct 2025 22:39:53 +0800
+Subject: LoongArch: BPF: Sign-extend struct ops return values properly
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 8b51b11b3d81c1ed48a52f87da9256d737b723a0 upstream.
+
+The ns_bpf_qdisc selftest triggers a kernel panic:
+
+ Oops[#1]:
+ CPU 0 Unable to handle kernel paging request at virtual address 0000000000741d58, era == 90000000851b5ac0, ra == 90000000851b5aa4
+ CPU: 0 UID: 0 PID: 449 Comm: test_progs Tainted: G OE 6.16.0+ #3 PREEMPT(full)
+ Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
+ Hardware name: QEMU QEMU Virtual Machine, BIOS unknown 2/2/2022
+ pc 90000000851b5ac0 ra 90000000851b5aa4 tp 90000001076b8000 sp 90000001076bb600
+ a0 0000000000741ce8 a1 0000000000000001 a2 90000001076bb5c0 a3 0000000000000008
+ a4 90000001004c4620 a5 9000000100741ce8 a6 0000000000000000 a7 0100000000000000
+ t0 0000000000000010 t1 0000000000000000 t2 9000000104d24d30 t3 0000000000000001
+ t4 4f2317da8a7e08c4 t5 fffffefffc002f00 t6 90000001004c4620 t7 ffffffffc61c5b3d
+ t8 0000000000000000 u0 0000000000000001 s9 0000000000000050 s0 90000001075bc800
+ s1 0000000000000040 s2 900000010597c400 s3 0000000000000008 s4 90000001075bc880
+ s5 90000001075bc8f0 s6 0000000000000000 s7 0000000000741ce8 s8 0000000000000000
+ ra: 90000000851b5aa4 __qdisc_run+0xac/0x8d8
+ ERA: 90000000851b5ac0 __qdisc_run+0xc8/0x8d8
+ CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE)
+ PRMD: 00000004 (PPLV0 +PIE -PWE)
+ EUEN: 00000007 (+FPE +SXE +ASXE -BTE)
+ ECFG: 00071c1d (LIE=0,2-4,10-12 VS=7)
+ ESTAT: 00010000 [PIL] (IS= ECode=1 EsubCode=0)
+ BADV: 0000000000741d58
+ PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
+ Modules linked in: bpf_testmod(OE) [last unloaded: bpf_testmod(OE)]
+ Process test_progs (pid: 449, threadinfo=000000009af02b3a, task=00000000e9ba4956)
+ Stack : 0000000000000000 90000001075bc8ac 90000000869524a8 9000000100741ce8
+ 90000001075bc800 9000000100415300 90000001075bc8ac 0000000000000000
+ 900000010597c400 900000008694a000 0000000000000000 9000000105b59000
+ 90000001075bc800 9000000100741ce8 0000000000000050 900000008513000c
+ 9000000086936000 0000000100094d4c fffffff400676208 0000000000000000
+ 9000000105b59000 900000008694a000 9000000086bf0dc0 9000000105b59000
+ 9000000086bf0d68 9000000085147010 90000001075be788 0000000000000000
+ 9000000086bf0f98 0000000000000001 0000000000000010 9000000006015840
+ 0000000000000000 9000000086be6c40 0000000000000000 0000000000000000
+ 0000000000000000 4f2317da8a7e08c4 0000000000000101 4f2317da8a7e08c4
+ ...
+ Call Trace:
+ [<90000000851b5ac0>] __qdisc_run+0xc8/0x8d8
+ [<9000000085130008>] __dev_queue_xmit+0x578/0x10f0
+ [<90000000853701c0>] ip6_finish_output2+0x2f0/0x950
+ [<9000000085374bc8>] ip6_finish_output+0x2b8/0x448
+ [<9000000085370b24>] ip6_xmit+0x304/0x858
+ [<90000000853c4438>] inet6_csk_xmit+0x100/0x170
+ [<90000000852b32f0>] __tcp_transmit_skb+0x490/0xdd0
+ [<90000000852b47fc>] tcp_connect+0xbcc/0x1168
+ [<90000000853b9088>] tcp_v6_connect+0x580/0x8a0
+ [<90000000852e7738>] __inet_stream_connect+0x170/0x480
+ [<90000000852e7a98>] inet_stream_connect+0x50/0x88
+ [<90000000850f2814>] __sys_connect+0xe4/0x110
+ [<90000000850f2858>] sys_connect+0x18/0x28
+ [<9000000085520c94>] do_syscall+0x94/0x1a0
+ [<9000000083df1fb8>] handle_syscall+0xb8/0x158
+
+ Code: 4001ad80 2400873f 2400832d <240073cc> 001137ff 001133ff 6407b41f 001503cc 0280041d
+
+ ---[ end trace 0000000000000000 ]---
+
+The bpf_fifo_dequeue prog returns a skb which is a pointer. The pointer
+is treated as a 32bit value and sign extend to 64bit in epilogue. This
+behavior is right for most bpf prog types but wrong for struct ops which
+requires LoongArch ABI.
+
+So let's sign extend struct ops return values according to the LoongArch
+ABI ([1]) and return value spec in function model.
+
+[1]: https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+
+Cc: stable@vger.kernel.org
+Fixes: 6abf17d690d8 ("LoongArch: BPF: Add struct ops support for trampoline")
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c | 37 +++++++++++++++++++++++++++++++++++-
+ 1 file changed, 36 insertions(+), 1 deletion(-)
+
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index fa1a3234e9a6..cbe53d0b7fb0 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1448,6 +1448,37 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
+ bpf_prog_pack_free(image, size);
+ }
+
++/*
++ * Sign-extend the register if necessary
++ */
++static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign)
++{
++ /* ABI requires unsigned char/short to be zero-extended */
++ if (!sign && (size == 1 || size == 2)) {
++ if (rd != rj)
++ move_reg(ctx, rd, rj);
++ return;
++ }
++
++ switch (size) {
++ case 1:
++ emit_insn(ctx, extwb, rd, rj);
++ break;
++ case 2:
++ emit_insn(ctx, extwh, rd, rj);
++ break;
++ case 4:
++ emit_insn(ctx, addiw, rd, rj, 0);
++ break;
++ case 8:
++ if (rd != rj)
++ move_reg(ctx, rd, rj);
++ break;
++ default:
++ pr_warn("bpf_jit: invalid size %d for sign_extend\n", size);
++ }
++}
++
+ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
+ void *func_addr, u32 flags)
+@@ -1655,8 +1686,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
+ restore_args(ctx, m->nr_args, args_off);
+
+ if (save_ret) {
+- emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
++ if (is_struct_ops)
++ sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0],
++ m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG);
++ else
++ emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ }
+
+ emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
+--
+2.51.0
+
--- /dev/null
+From f04aad36a07cc17b7a5d5b9a2d386ce6fae63e93 Mon Sep 17 00:00:00 2001
+From: Jakub Acs <acsjakub@amazon.de>
+Date: Wed, 1 Oct 2025 09:03:52 +0000
+Subject: mm/ksm: fix flag-dropping behavior in ksm_madvise
+
+From: Jakub Acs <acsjakub@amazon.de>
+
+commit f04aad36a07cc17b7a5d5b9a2d386ce6fae63e93 upstream.
+
+syzkaller discovered the following crash: (kernel BUG)
+
+[ 44.607039] ------------[ cut here ]------------
+[ 44.607422] kernel BUG at mm/userfaultfd.c:2067!
+[ 44.608148] Oops: invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN NOPTI
+[ 44.608814] CPU: 1 UID: 0 PID: 2475 Comm: reproducer Not tainted 6.16.0-rc6 #1 PREEMPT(none)
+[ 44.609635] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+[ 44.610695] RIP: 0010:userfaultfd_release_all+0x3a8/0x460
+
+<snip other registers, drop unreliable trace>
+
+[ 44.617726] Call Trace:
+[ 44.617926] <TASK>
+[ 44.619284] userfaultfd_release+0xef/0x1b0
+[ 44.620976] __fput+0x3f9/0xb60
+[ 44.621240] fput_close_sync+0x110/0x210
+[ 44.622222] __x64_sys_close+0x8f/0x120
+[ 44.622530] do_syscall_64+0x5b/0x2f0
+[ 44.622840] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 44.623244] RIP: 0033:0x7f365bb3f227
+
+Kernel panics because it detects UFFD inconsistency during
+userfaultfd_release_all(). Specifically, a VMA which has a valid pointer
+to vma->vm_userfaultfd_ctx, but no UFFD flags in vma->vm_flags.
+
+The inconsistency is caused in ksm_madvise(): when user calls madvise()
+with MADV_UNMEARGEABLE on a VMA that is registered for UFFD in MINOR mode,
+it accidentally clears all flags stored in the upper 32 bits of
+vma->vm_flags.
+
+Assuming x86_64 kernel build, unsigned long is 64-bit and unsigned int and
+int are 32-bit wide. This setup causes the following mishap during the &=
+~VM_MERGEABLE assignment.
+
+VM_MERGEABLE is a 32-bit constant of type unsigned int, 0x8000'0000.
+After ~ is applied, it becomes 0x7fff'ffff unsigned int, which is then
+promoted to unsigned long before the & operation. This promotion fills
+upper 32 bits with leading 0s, as we're doing unsigned conversion (and
+even for a signed conversion, this wouldn't help as the leading bit is 0).
+& operation thus ends up AND-ing vm_flags with 0x0000'0000'7fff'ffff
+instead of intended 0xffff'ffff'7fff'ffff and hence accidentally clears
+the upper 32-bits of its value.
+
+Fix it by changing `VM_MERGEABLE` constant to unsigned long, using the
+BIT() macro.
+
+Note: other VM_* flags are not affected: This only happens to the
+VM_MERGEABLE flag, as the other VM_* flags are all constants of type int
+and after ~ operation, they end up with leading 1 and are thus converted
+to unsigned long with leading 1s.
+
+Note 2:
+After commit 31defc3b01d9 ("userfaultfd: remove (VM_)BUG_ON()s"), this is
+no longer a kernel BUG, but a WARNING at the same place:
+
+[ 45.595973] WARNING: CPU: 1 PID: 2474 at mm/userfaultfd.c:2067
+
+but the root-cause (flag-drop) remains the same.
+
+[akpm@linux-foundation.org: rust bindgen wasn't able to handle BIT(), from Miguel]
+ Link: https://lore.kernel.org/oe-kbuild-all/202510030449.VfSaAjvd-lkp@intel.com/
+Link: https://lkml.kernel.org/r/20251001090353.57523-2-acsjakub@amazon.de
+Fixes: 7677f7fd8be7 ("userfaultfd: add minor fault registration mode")
+Signed-off-by: Jakub Acs <acsjakub@amazon.de>
+Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: SeongJae Park <sj@kernel.org>
+Tested-by: Alice Ryhl <aliceryhl@google.com>
+Tested-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+Cc: Xu Xin <xu.xin16@zte.com.cn>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 2 +-
+ rust/bindings/bindings_helper.h | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -296,7 +296,7 @@ extern unsigned int kobjsize(const void
+ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+ #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
+ #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
+-#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
++#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */
+
+ #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
+ #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
+--- a/rust/bindings/bindings_helper.h
++++ b/rust/bindings/bindings_helper.h
+@@ -99,3 +99,4 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRE
+
+ const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
+ const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
++const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE = VM_MERGEABLE;
--- /dev/null
+From 75604e9a5b60707722028947d6dc6bdacb42282e Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao <xry111@xry111.site>
+Date: Sat, 16 Aug 2025 18:49:05 +0800
+Subject: pwm: loongson: Fix LOONGSON_PWM_FREQ_DEFAULT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Xi Ruoyao <xry111@xry111.site>
+
+commit 75604e9a5b60707722028947d6dc6bdacb42282e upstream.
+
+Per the 7A1000 and 7A2000 user manual, the clock frequency of their
+PWM controllers is 50 MHz, not 50 kHz.
+
+Fixes: 2b62c89448dd ("pwm: Add Loongson PWM controller support")
+Signed-off-by: Xi Ruoyao <xry111@xry111.site>
+Reviewed-by: Binbin Zhou <zhoubinbin@loongson.cn>
+Reviewed-by: Huacai Chen <chenhuacai@loongson.cn>
+Link: https://lore.kernel.org/r/20250816104904.4779-2-xry111@xry111.site
+Cc: stable@vger.kernel.org
+Signed-off-by: Uwe Kleine-König <ukleinek@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pwm/pwm-loongson.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pwm/pwm-loongson.c b/drivers/pwm/pwm-loongson.c
+index 1ba16168cbb4..31a57edecfd0 100644
+--- a/drivers/pwm/pwm-loongson.c
++++ b/drivers/pwm/pwm-loongson.c
+@@ -49,7 +49,7 @@
+ #define LOONGSON_PWM_CTRL_REG_DZONE BIT(10) /* Anti-dead Zone Enable Bit */
+
+ /* default input clk frequency for the ACPI case */
+-#define LOONGSON_PWM_FREQ_DEFAULT 50000 /* Hz */
++#define LOONGSON_PWM_FREQ_DEFAULT 50000000 /* Hz */
+
+ struct pwm_loongson_ddata {
+ struct clk *clk;
+--
+2.51.0
+
tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch
tracing-fix-tracing_mark_raw_write-to-use-buf-and-not-ubuf.patch
tracing-stop-fortify-string-from-warning-in-tracing_mark_raw_write.patch
+dm-fix-queue-start-stop-imbalance-under-suspend-load-resume-races.patch
+dm-fix-null-pointer-dereference-in-__dm_suspend.patch
+loongarch-automatically-disable-kaslr-if-boot-from-kexec_file.patch
+pwm-loongson-fix-loongson_pwm_freq_default.patch
+loongarch-bpf-sign-extend-struct-ops-return-values-properly.patch
+loongarch-bpf-no-support-of-struct-argument-in-trampoline-programs.patch
+loongarch-bpf-don-t-align-trampoline-size.patch
+loongarch-bpf-make-trampoline-size-stable.patch
+loongarch-bpf-make-error-handling-robust-in-arch_prepare_bpf_trampoline.patch
+loongarch-bpf-remove-duplicated-bpf_flush_icache.patch
+loongarch-bpf-no-text_poke-for-kernel-text.patch
+loongarch-bpf-remove-duplicated-flags-check.patch
+loongarch-bpf-fix-uninitialized-symbol-retval_off.patch
+mm-ksm-fix-flag-dropping-behavior-in-ksm_madvise.patch
+ksmbd-fix-race-condition-in-rpc-handle-list-access.patch
+ksmbd-fix-error-code-overwriting-in-smb2_get_info_filesystem.patch
+ksmbd-add-max-ip-connections-parameter.patch
+ext4-fix-potential-null-deref-in-ext4_mb_init.patch
+ext4-fix-checks-for-orphan-inodes.patch