--- /dev/null
+From 6a78699838a0ddeed3620ddf50c1521f1fe1e811 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Thu, 31 Oct 2024 21:37:19 +0800
+Subject: block: always verify unfreeze lock on the owner task
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 6a78699838a0ddeed3620ddf50c1521f1fe1e811 upstream.
+
+commit f1be1788a32e ("block: model freeze & enter queue as lock for
+supporting lockdep") tries to apply lockdep for verifying freeze &
+unfreeze. However, the verification is only done the outmost freeze and
+unfreeze. This way is actually not correct because q->mq_freeze_depth
+still may drop to zero on other task instead of the freeze owner task.
+
+Fix this issue by always verifying the last unfreeze lock on the owner
+task context, and make sure both the outmost freeze & unfreeze are
+verified in the current task.
+
+Fixes: f1be1788a32e ("block: model freeze & enter queue as lock for supporting lockdep")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20241031133723.303835-4-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-core.c | 2 -
+ block/blk-mq.c | 62 ++++++++++++++++++++++++++++++++++++++++++-------
+ block/blk.h | 3 +-
+ include/linux/blkdev.h | 4 +++
+ 4 files changed, 61 insertions(+), 10 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -287,7 +287,7 @@ bool blk_queue_start_drain(struct reques
+ * entering queue, so we call blk_freeze_queue_start() to
+ * prevent I/O from crossing blk_queue_enter().
+ */
+- bool freeze = __blk_freeze_queue_start(q);
++ bool freeze = __blk_freeze_queue_start(q, current);
+ if (queue_is_mq(q))
+ blk_mq_wake_waiters(q);
+ /* Make blk_queue_enter() reexamine the DYING flag. */
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -120,20 +120,66 @@ void blk_mq_in_flight_rw(struct request_
+ inflight[1] = mi.inflight[1];
+ }
+
+-bool __blk_freeze_queue_start(struct request_queue *q)
++#ifdef CONFIG_LOCKDEP
++static bool blk_freeze_set_owner(struct request_queue *q,
++ struct task_struct *owner)
+ {
+- int freeze;
++ if (!owner)
++ return false;
++
++ if (!q->mq_freeze_depth) {
++ q->mq_freeze_owner = owner;
++ q->mq_freeze_owner_depth = 1;
++ return true;
++ }
++
++ if (owner == q->mq_freeze_owner)
++ q->mq_freeze_owner_depth += 1;
++ return false;
++}
++
++/* verify the last unfreeze in owner context */
++static bool blk_unfreeze_check_owner(struct request_queue *q)
++{
++ if (!q->mq_freeze_owner)
++ return false;
++ if (q->mq_freeze_owner != current)
++ return false;
++ if (--q->mq_freeze_owner_depth == 0) {
++ q->mq_freeze_owner = NULL;
++ return true;
++ }
++ return false;
++}
++
++#else
++
++static bool blk_freeze_set_owner(struct request_queue *q,
++ struct task_struct *owner)
++{
++ return false;
++}
++
++static bool blk_unfreeze_check_owner(struct request_queue *q)
++{
++ return false;
++}
++#endif
++
++bool __blk_freeze_queue_start(struct request_queue *q,
++ struct task_struct *owner)
++{
++ bool freeze;
+
+ mutex_lock(&q->mq_freeze_lock);
++ freeze = blk_freeze_set_owner(q, owner);
+ if (++q->mq_freeze_depth == 1) {
+ percpu_ref_kill(&q->q_usage_counter);
+ mutex_unlock(&q->mq_freeze_lock);
+ if (queue_is_mq(q))
+ blk_mq_run_hw_queues(q, false);
+- freeze = true;
+ } else {
+ mutex_unlock(&q->mq_freeze_lock);
+- freeze = false;
+ }
+
+ return freeze;
+@@ -141,7 +187,7 @@ bool __blk_freeze_queue_start(struct req
+
+ void blk_freeze_queue_start(struct request_queue *q)
+ {
+- if (__blk_freeze_queue_start(q))
++ if (__blk_freeze_queue_start(q, current))
+ blk_freeze_acquire_lock(q, false, false);
+ }
+ EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
+@@ -190,7 +236,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
+
+ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
+ {
+- int unfreeze = false;
++ bool unfreeze;
+
+ mutex_lock(&q->mq_freeze_lock);
+ if (force_atomic)
+@@ -200,8 +246,8 @@ bool __blk_mq_unfreeze_queue(struct requ
+ if (!q->mq_freeze_depth) {
+ percpu_ref_resurrect(&q->q_usage_counter);
+ wake_up_all(&q->mq_freeze_wq);
+- unfreeze = true;
+ }
++ unfreeze = blk_unfreeze_check_owner(q);
+ mutex_unlock(&q->mq_freeze_lock);
+
+ return unfreeze;
+@@ -223,7 +269,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue)
+ */
+ void blk_freeze_queue_start_non_owner(struct request_queue *q)
+ {
+- __blk_freeze_queue_start(q);
++ __blk_freeze_queue_start(q, NULL);
+ }
+ EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner);
+
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -38,7 +38,8 @@ void blk_free_flush_queue(struct blk_flu
+ void blk_freeze_queue(struct request_queue *q);
+ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
+ bool blk_queue_start_drain(struct request_queue *q);
+-bool __blk_freeze_queue_start(struct request_queue *q);
++bool __blk_freeze_queue_start(struct request_queue *q,
++ struct task_struct *owner);
+ int __bio_queue_enter(struct request_queue *q, struct bio *bio);
+ void submit_bio_noacct_nocheck(struct bio *bio);
+ void bio_await_chain(struct bio *bio);
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -572,6 +572,10 @@ struct request_queue {
+ struct throtl_data *td;
+ #endif
+ struct rcu_head rcu_head;
++#ifdef CONFIG_LOCKDEP
++ struct task_struct *mq_freeze_owner;
++ int mq_freeze_owner_depth;
++#endif
+ wait_queue_head_t mq_freeze_wq;
+ /*
+ * Protect concurrent access to q_usage_counter by
--- /dev/null
+From 357e1b7f730bd85a383e7afa75a3caba329c5707 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Thu, 31 Oct 2024 21:37:20 +0800
+Subject: block: don't verify IO lock for freeze/unfreeze in elevator_init_mq()
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 357e1b7f730bd85a383e7afa75a3caba329c5707 upstream.
+
+elevator_init_mq() is only called at the entry of add_disk_fwnode() when
+disk IO isn't allowed yet.
+
+So not verify io lock(q->io_lockdep_map) for freeze & unfreeze in
+elevator_init_mq().
+
+Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reported-by: Lai Yi <yi1.lai@linux.intel.com>
+Fixes: f1be1788a32e ("block: model freeze & enter queue as lock for supporting lockdep")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20241031133723.303835-5-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/elevator.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -598,13 +598,19 @@ void elevator_init_mq(struct request_que
+ * drain any dispatch activities originated from passthrough
+ * requests, then no need to quiesce queue which may add long boot
+ * latency, especially when lots of disks are involved.
++ *
++ * Disk isn't added yet, so verifying queue lock only manually.
+ */
+- blk_mq_freeze_queue(q);
++ blk_freeze_queue_start_non_owner(q);
++ blk_freeze_acquire_lock(q, true, false);
++ blk_mq_freeze_queue_wait(q);
++
+ blk_mq_cancel_work_sync(q);
+
+ err = blk_mq_init_sched(q, e);
+
+- blk_mq_unfreeze_queue(q);
++ blk_unfreeze_release_lock(q, true, false);
++ blk_mq_unfreeze_queue_non_owner(q);
+
+ if (err) {
+ pr_warn("\"%s\" elevator initialization failed, "