--- /dev/null
+From stable+bounces-208113-greg=kroah.com@vger.kernel.org Mon Jan 12 12:43:02 2026
+From: Keerthana K <keerthana.kalyanasundaram@broadcom.com>
+Date: Mon, 12 Jan 2026 11:39:36 +0000
+Subject: blk-throttle: Set BIO_THROTTLED when bio has been throttled
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: tj@kernel.org, axboe@kernel.dk, cgroups@vger.kernel.org, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, ajay.kaher@broadcom.com, alexey.makhalov@broadcom.com, vamsi-krishna.brahmajosyula@broadcom.com, yin.ding@broadcom.com, tapas.kundu@broadcom.com, Laibin Qiu <qiulaibin@huawei.com>, Ming Lei <ming.lei@redhat.com>, Sasha Levin <sashal@kernel.org>, Keerthana K <keerthana.kalyanasundaram@broadcom.com>, Shivani Agarwal <shivani.agarwal@broadcom.com>
+Message-ID: <20260112113936.3291786-1-keerthana.kalyanasundaram@broadcom.com>
+
+From: Laibin Qiu <qiulaibin@huawei.com>
+
+[ Upstream commit 5a011f889b4832aa80c2a872a5aade5c48d2756f ]
+
+1.In current process, all bio will set the BIO_THROTTLED flag
+after __blk_throtl_bio().
+
+2.If bio needs to be throttled, it will start the timer and
+stop submit bio directly. Bio will submit in
+blk_throtl_dispatch_work_fn() when the timer expires.But in
+the current process, if bio is throttled. The BIO_THROTTLED
+will be set to bio after timer start. If the bio has been
+completed, it may cause use-after-free blow.
+
+BUG: KASAN: use-after-free in blk_throtl_bio+0x12f0/0x2c70
+Read of size 2 at addr ffff88801b8902d4 by task fio/26380
+
+ dump_stack+0x9b/0xce
+ print_address_description.constprop.6+0x3e/0x60
+ kasan_report.cold.9+0x22/0x3a
+ blk_throtl_bio+0x12f0/0x2c70
+ submit_bio_checks+0x701/0x1550
+ submit_bio_noacct+0x83/0xc80
+ submit_bio+0xa7/0x330
+ mpage_readahead+0x380/0x500
+ read_pages+0x1c1/0xbf0
+ page_cache_ra_unbounded+0x471/0x6f0
+ do_page_cache_ra+0xda/0x110
+ ondemand_readahead+0x442/0xae0
+ page_cache_async_ra+0x210/0x300
+ generic_file_buffered_read+0x4d9/0x2130
+ generic_file_read_iter+0x315/0x490
+ blkdev_read_iter+0x113/0x1b0
+ aio_read+0x2ad/0x450
+ io_submit_one+0xc8e/0x1d60
+ __se_sys_io_submit+0x125/0x350
+ do_syscall_64+0x2d/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Allocated by task 26380:
+ kasan_save_stack+0x19/0x40
+ __kasan_kmalloc.constprop.2+0xc1/0xd0
+ kmem_cache_alloc+0x146/0x440
+ mempool_alloc+0x125/0x2f0
+ bio_alloc_bioset+0x353/0x590
+ mpage_alloc+0x3b/0x240
+ do_mpage_readpage+0xddf/0x1ef0
+ mpage_readahead+0x264/0x500
+ read_pages+0x1c1/0xbf0
+ page_cache_ra_unbounded+0x471/0x6f0
+ do_page_cache_ra+0xda/0x110
+ ondemand_readahead+0x442/0xae0
+ page_cache_async_ra+0x210/0x300
+ generic_file_buffered_read+0x4d9/0x2130
+ generic_file_read_iter+0x315/0x490
+ blkdev_read_iter+0x113/0x1b0
+ aio_read+0x2ad/0x450
+ io_submit_one+0xc8e/0x1d60
+ __se_sys_io_submit+0x125/0x350
+ do_syscall_64+0x2d/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Freed by task 0:
+ kasan_save_stack+0x19/0x40
+ kasan_set_track+0x1c/0x30
+ kasan_set_free_info+0x1b/0x30
+ __kasan_slab_free+0x111/0x160
+ kmem_cache_free+0x94/0x460
+ mempool_free+0xd6/0x320
+ bio_free+0xe0/0x130
+ bio_put+0xab/0xe0
+ bio_endio+0x3a6/0x5d0
+ blk_update_request+0x590/0x1370
+ scsi_end_request+0x7d/0x400
+ scsi_io_completion+0x1aa/0xe50
+ scsi_softirq_done+0x11b/0x240
+ blk_mq_complete_request+0xd4/0x120
+ scsi_mq_done+0xf0/0x200
+ virtscsi_vq_done+0xbc/0x150
+ vring_interrupt+0x179/0x390
+ __handle_irq_event_percpu+0xf7/0x490
+ handle_irq_event_percpu+0x7b/0x160
+ handle_irq_event+0xcc/0x170
+ handle_edge_irq+0x215/0xb20
+ common_interrupt+0x60/0x120
+ asm_common_interrupt+0x1e/0x40
+
+Fix this by move BIO_THROTTLED set into the queue_lock.
+
+Signed-off-by: Laibin Qiu <qiulaibin@huawei.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20220301123919.2381579-1-qiulaibin@huawei.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[ Keerthana: Remove 'out' and handle return with reference to commit 81c7a63 ]
+Signed-off-by: Keerthana K <keerthana.kalyanasundaram@broadcom.com>
+Signed-off-by: Shivani Agarwal <shivani.agarwal@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-throttle.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -2216,8 +2216,10 @@ bool blk_throtl_bio(struct bio *bio)
+ rcu_read_lock();
+
+ /* see throtl_charge_bio() */
+- if (bio_flagged(bio, BIO_THROTTLED))
+- goto out;
++ if (bio_flagged(bio, BIO_THROTTLED)) {
++ rcu_read_unlock();
++ return false;
++ }
+
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
+ blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
+@@ -2225,8 +2227,10 @@ bool blk_throtl_bio(struct bio *bio)
+ blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
+ }
+
+- if (!tg->has_rules[rw])
+- goto out;
++ if (!tg->has_rules[rw]) {
++ rcu_read_unlock();
++ return false;
++ }
+
+ spin_lock_irq(&q->queue_lock);
+
+@@ -2310,14 +2314,14 @@ again:
+ }
+
+ out_unlock:
+- spin_unlock_irq(&q->queue_lock);
+-out:
+ bio_set_flag(bio, BIO_THROTTLED);
+
+ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ if (throttled || !td->track_bio_latency)
+ bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
+ #endif
++ spin_unlock_irq(&q->queue_lock);
++
+ rcu_read_unlock();
+ return throttled;
+ }
--- /dev/null
+From stable+bounces-208176-greg=kroah.com@vger.kernel.org Mon Jan 12 16:58:28 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jan 2026 10:58:20 -0500
+Subject: nfsd: provide locking for v4_end_grace
+To: stable@vger.kernel.org
+Cc: NeilBrown <neil@brown.name>, Li Lingfeng <lilingfeng3@huawei.com>, Jeff Layton <jlayton@kernel.org>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260112155820.749076-1-sashal@kernel.org>
+
+From: NeilBrown <neil@brown.name>
+
+[ Upstream commit 2857bd59feb63fcf40fe4baf55401baea6b4feb4 ]
+
+Writing to v4_end_grace can race with server shutdown and result in
+memory being accessed after it was freed - reclaim_str_hashtbl in
+particularly.
+
+We cannot hold nfsd_mutex across the nfsd4_end_grace() call as that is
+held while client_tracking_op->init() is called and that can wait for
+an upcall to nfsdcltrack which can write to v4_end_grace, resulting in a
+deadlock.
+
+nfsd4_end_grace() is also called by the landromat work queue and this
+doesn't require locking as server shutdown will stop the work and wait
+for it before freeing anything that nfsd4_end_grace() might access.
+
+However, we must be sure that writing to v4_end_grace doesn't restart
+the work item after shutdown has already waited for it. For this we
+add a new flag protected with nn->client_lock. It is set only while it
+is safe to make client tracking calls, and v4_end_grace only schedules
+work while the flag is set with the spinlock held.
+
+So this patch adds a nfsd_net field "client_tracking_active" which is
+set as described. Another field "grace_end_forced", is set when
+v4_end_grace is written. After this is set, and providing
+client_tracking_active is set, the laundromat is scheduled.
+This "grace_end_forced" field bypasses other checks for whether the
+grace period has finished.
+
+This resolves a race which can result in use-after-free.
+
+Reported-by: Li Lingfeng <lilingfeng3@huawei.com>
+Closes: https://lore.kernel.org/linux-nfs/20250623030015.2353515-1-neil@brown.name/T/#t
+Fixes: 7f5ef2e900d9 ("nfsd: add a v4_end_grace file to /proc/fs/nfsd")
+Cc: stable@vger.kernel.org
+Signed-off-by: NeilBrown <neil@brown.name>
+Tested-by: Li Lingfeng <lilingfeng3@huawei.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/netns.h | 2 ++
+ fs/nfsd/nfs4state.c | 42 ++++++++++++++++++++++++++++++++++++++++--
+ fs/nfsd/nfsctl.c | 3 +--
+ fs/nfsd/state.h | 2 +-
+ 4 files changed, 44 insertions(+), 5 deletions(-)
+
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -64,6 +64,8 @@ struct nfsd_net {
+
+ struct lock_manager nfsd4_manager;
+ bool grace_ended;
++ bool grace_end_forced;
++ bool client_tracking_active;
+ time64_t boot_time;
+
+ struct dentry *nfsd_client_dir;
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -84,7 +84,7 @@ static u64 current_sessionid = 1;
+ /* forward declarations */
+ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
+ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
+-void nfsd4_end_grace(struct nfsd_net *nn);
++static void nfsd4_end_grace(struct nfsd_net *nn);
+ static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
+ static void nfsd4_file_hash_remove(struct nfs4_file *fi);
+
+@@ -5879,7 +5879,7 @@ nfsd4_renew(struct svc_rqst *rqstp, stru
+ return nfs_ok;
+ }
+
+-void
++static void
+ nfsd4_end_grace(struct nfsd_net *nn)
+ {
+ /* do nothing if grace period already ended */
+@@ -5912,6 +5912,33 @@ nfsd4_end_grace(struct nfsd_net *nn)
+ */
+ }
+
++/**
++ * nfsd4_force_end_grace - forcibly end the NFSv4 grace period
++ * @nn: network namespace for the server instance to be updated
++ *
++ * Forces bypass of normal grace period completion, then schedules
++ * the laundromat to end the grace period immediately. Does not wait
++ * for the grace period to fully terminate before returning.
++ *
++ * Return values:
++ * %true: Grace termination schedule
++ * %false: No action was taken
++ */
++bool nfsd4_force_end_grace(struct nfsd_net *nn)
++{
++ if (!nn->client_tracking_ops)
++ return false;
++ spin_lock(&nn->client_lock);
++ if (nn->grace_ended || !nn->client_tracking_active) {
++ spin_unlock(&nn->client_lock);
++ return false;
++ }
++ WRITE_ONCE(nn->grace_end_forced, true);
++ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
++ spin_unlock(&nn->client_lock);
++ return true;
++}
++
+ /*
+ * If we've waited a lease period but there are still clients trying to
+ * reclaim, wait a little longer to give them a chance to finish.
+@@ -5921,6 +5948,8 @@ static bool clients_still_reclaiming(str
+ time64_t double_grace_period_end = nn->boot_time +
+ 2 * nn->nfsd4_lease;
+
++ if (READ_ONCE(nn->grace_end_forced))
++ return false;
+ if (nn->track_reclaim_completes &&
+ atomic_read(&nn->nr_reclaim_complete) ==
+ nn->reclaim_str_hashtbl_size)
+@@ -8141,6 +8170,8 @@ static int nfs4_state_create_net(struct
+ nn->unconf_name_tree = RB_ROOT;
+ nn->boot_time = ktime_get_real_seconds();
+ nn->grace_ended = false;
++ nn->grace_end_forced = false;
++ nn->client_tracking_active = false;
+ nn->nfsd4_manager.block_opens = true;
+ INIT_LIST_HEAD(&nn->nfsd4_manager.list);
+ INIT_LIST_HEAD(&nn->client_lru);
+@@ -8217,6 +8248,10 @@ nfs4_state_start_net(struct net *net)
+ return ret;
+ locks_start_grace(net, &nn->nfsd4_manager);
+ nfsd4_client_tracking_init(net);
++ /* safe for laundromat to run now */
++ spin_lock(&nn->client_lock);
++ nn->client_tracking_active = true;
++ spin_unlock(&nn->client_lock);
+ if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
+ goto skip_grace;
+ printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
+@@ -8263,6 +8298,9 @@ nfs4_state_shutdown_net(struct net *net)
+
+ unregister_shrinker(&nn->nfsd_client_shrinker);
+ cancel_work_sync(&nn->nfsd_shrinker_work);
++ spin_lock(&nn->client_lock);
++ nn->client_tracking_active = false;
++ spin_unlock(&nn->client_lock);
+ cancel_delayed_work_sync(&nn->laundromat_work);
+ locks_end_grace(&nn->nfsd4_manager);
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1117,9 +1117,8 @@ static ssize_t write_v4_end_grace(struct
+ case 'Y':
+ case 'y':
+ case '1':
+- if (!nn->nfsd_serv)
++ if (!nfsd4_force_end_grace(nn))
+ return -EBUSY;
+- nfsd4_end_grace(nn);
+ break;
+ default:
+ return -EINVAL;
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -719,7 +719,7 @@ static inline void get_nfs4_file(struct
+ struct nfsd_file *find_any_file(struct nfs4_file *f);
+
+ /* grace period management */
+-void nfsd4_end_grace(struct nfsd_net *nn);
++bool nfsd4_force_end_grace(struct nfsd_net *nn);
+
+ /* nfs4recover operations */
+ extern int nfsd4_client_tracking_init(struct net *net);