]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
block: always verify unfreeze lock on the owner task
authorMing Lei <ming.lei@redhat.com>
Thu, 31 Oct 2024 13:37:19 +0000 (21:37 +0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 7 Nov 2024 23:27:22 +0000 (16:27 -0700)
commit f1be1788a32e ("block: model freeze & enter queue as lock for
supporting lockdep") tries to apply lockdep for verifying freeze &
unfreeze. However, the verification is only done the outmost freeze and
unfreeze. This way is actually not correct because q->mq_freeze_depth
still may drop to zero on other task instead of the freeze owner task.

Fix this issue by always verifying the last unfreeze lock on the owner
task context, and make sure both the outmost freeze & unfreeze are
verified in the current task.

Fixes: f1be1788a32e ("block: model freeze & enter queue as lock for supporting lockdep")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241031133723.303835-4-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk.h
include/linux/blkdev.h

index 09d10bb95fda0526588c42b4421b54a5ceac1369..4f791a3114a12c4a965b754bb91209ff50c48db8 100644 (file)
@@ -287,7 +287,7 @@ bool blk_queue_start_drain(struct request_queue *q)
         * entering queue, so we call blk_freeze_queue_start() to
         * prevent I/O from crossing blk_queue_enter().
         */
-       bool freeze = __blk_freeze_queue_start(q);
+       bool freeze = __blk_freeze_queue_start(q, current);
        if (queue_is_mq(q))
                blk_mq_wake_waiters(q);
        /* Make blk_queue_enter() reexamine the DYING flag. */
index 5f4496220432ff27cace34ad0a05104d443d1c13..5e240a4b6be00ea67a803bdcecdf87d43bb5c7e7 100644 (file)
@@ -120,20 +120,66 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
        inflight[1] = mi.inflight[1];
 }
 
-bool __blk_freeze_queue_start(struct request_queue *q)
+#ifdef CONFIG_LOCKDEP
+static bool blk_freeze_set_owner(struct request_queue *q,
+                                struct task_struct *owner)
 {
-       int freeze;
+       if (!owner)
+               return false;
+
+       if (!q->mq_freeze_depth) {
+               q->mq_freeze_owner = owner;
+               q->mq_freeze_owner_depth = 1;
+               return true;
+       }
+
+       if (owner == q->mq_freeze_owner)
+               q->mq_freeze_owner_depth += 1;
+       return false;
+}
+
+/* verify the last unfreeze in owner context */
+static bool blk_unfreeze_check_owner(struct request_queue *q)
+{
+       if (!q->mq_freeze_owner)
+               return false;
+       if (q->mq_freeze_owner != current)
+               return false;
+       if (--q->mq_freeze_owner_depth == 0) {
+               q->mq_freeze_owner = NULL;
+               return true;
+       }
+       return false;
+}
+
+#else
+
+static bool blk_freeze_set_owner(struct request_queue *q,
+                                struct task_struct *owner)
+{
+       return false;
+}
+
+static bool blk_unfreeze_check_owner(struct request_queue *q)
+{
+       return false;
+}
+#endif
+
+bool __blk_freeze_queue_start(struct request_queue *q,
+                             struct task_struct *owner)
+{
+       bool freeze;
 
        mutex_lock(&q->mq_freeze_lock);
+       freeze = blk_freeze_set_owner(q, owner);
        if (++q->mq_freeze_depth == 1) {
                percpu_ref_kill(&q->q_usage_counter);
                mutex_unlock(&q->mq_freeze_lock);
                if (queue_is_mq(q))
                        blk_mq_run_hw_queues(q, false);
-               freeze = true;
        } else {
                mutex_unlock(&q->mq_freeze_lock);
-               freeze = false;
        }
 
        return freeze;
@@ -141,7 +187,7 @@ bool __blk_freeze_queue_start(struct request_queue *q)
 
 void blk_freeze_queue_start(struct request_queue *q)
 {
-       if (__blk_freeze_queue_start(q))
+       if (__blk_freeze_queue_start(q, current))
                blk_freeze_acquire_lock(q, false, false);
 }
 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -170,7 +216,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
 {
-       int unfreeze = false;
+       bool unfreeze;
 
        mutex_lock(&q->mq_freeze_lock);
        if (force_atomic)
@@ -180,8 +226,8 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
        if (!q->mq_freeze_depth) {
                percpu_ref_resurrect(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
-               unfreeze = true;
        }
+       unfreeze = blk_unfreeze_check_owner(q);
        mutex_unlock(&q->mq_freeze_lock);
 
        return unfreeze;
@@ -203,7 +249,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
  */
 void blk_freeze_queue_start_non_owner(struct request_queue *q)
 {
-       __blk_freeze_queue_start(q);
+       __blk_freeze_queue_start(q, NULL);
 }
 EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner);
 
index ac48b79cbf800e7a7d1c4ee4191d8f8e130bf780..57fc035620d60033e0f1fcfbc2fe2ff9dbb9e52b 100644 (file)
@@ -37,7 +37,8 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
 bool blk_queue_start_drain(struct request_queue *q);
-bool __blk_freeze_queue_start(struct request_queue *q);
+bool __blk_freeze_queue_start(struct request_queue *q,
+                             struct task_struct *owner);
 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
 void submit_bio_noacct_nocheck(struct bio *bio);
 void bio_await_chain(struct bio *bio);
index 93551772c1d61605da5fda79aaa72830e5fb5b2b..1b51a7c92e9beb70d1cb082008e1e6a53fa149f2 100644 (file)
@@ -575,6 +575,10 @@ struct request_queue {
        struct throtl_data *td;
 #endif
        struct rcu_head         rcu_head;
+#ifdef CONFIG_LOCKDEP
+       struct task_struct      *mq_freeze_owner;
+       int                     mq_freeze_owner_depth;
+#endif
        wait_queue_head_t       mq_freeze_wq;
        /*
         * Protect concurrent access to q_usage_counter by