--- /dev/null
+From 9c39b7a905d84b7da5f59d80f2e455853fea7217 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Thu, 22 Jun 2023 16:42:49 +0800
+Subject: block: make sure local irq is disabled when calling __blkcg_rstat_flush
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 9c39b7a905d84b7da5f59d80f2e455853fea7217 upstream.
+
+When __blkcg_rstat_flush() is called from cgroup_rstat_flush*() code
+path, interrupt is always disabled.
+
+When we start to flush blkcg per-cpu stats list in __blkg_release()
+for avoiding to leak blkcg_gq's reference in commit 20cb1c2fb756
+("blk-cgroup: Flush stats before releasing blkcg_gq"), local irq
+isn't disabled yet, then lockdep warning may be triggered because
+the dependent cgroup locks may be acquired from irq(soft irq) handler.
+
+Fix the issue by disabling local irq always.
+
+Fixes: 20cb1c2fb756 ("blk-cgroup: Flush stats before releasing blkcg_gq")
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Closes: https://lore.kernel.org/linux-block/pz2wzwnmn5tk3pwpskmjhli6g3qly7eoknilb26of376c7kwxy@qydzpvt6zpis/T/#u
+Cc: stable@vger.kernel.org
+Cc: Jay Shin <jaeshin@redhat.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Waiman Long <longman@redhat.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Waiman Long <longman@redhat.com>
+Link: https://lore.kernel.org/r/20230622084249.1208005-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-cgroup.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -886,6 +886,7 @@ static void __blkcg_rstat_flush(struct b
+ struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
+ struct llist_node *lnode;
+ struct blkg_iostat_set *bisc, *next_bisc;
++ unsigned long flags;
+
+ rcu_read_lock();
+
+@@ -899,7 +900,7 @@ static void __blkcg_rstat_flush(struct b
+ * When flushing from cgroup, cgroup_rstat_lock is always held, so
+ * this lock won't cause contention most of time.
+ */
+- raw_spin_lock(&blkg_stat_lock);
++ raw_spin_lock_irqsave(&blkg_stat_lock, flags);
+
+ /*
+ * Iterate only the iostat_cpu's queued in the lockless list.
+@@ -925,7 +926,7 @@ static void __blkcg_rstat_flush(struct b
+ blkcg_iostat_update(parent, &blkg->iostat.cur,
+ &blkg->iostat.last);
+ }
+- raw_spin_unlock(&blkg_stat_lock);
++ raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
+ out:
+ rcu_read_unlock();
+ }