]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ublk: fix canceling flag handling in batch I/O recovery
authorMing Lei <ming.lei@redhat.com>
Fri, 23 Jan 2026 09:15:43 +0000 (17:15 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jan 2026 12:11:03 +0000 (05:11 -0700)
Two issues with ubq->canceling flag handling:

1) In ublk_queue_reset_io_flags(), ubq->canceling is set outside
   cancel_lock, violating the locking requirement. Move it inside
   the spinlock-protected section.

2) In ublk_batch_unprep_io(), when rolling back after a batch prep
   failure, if the queue became ready during prep (which cleared
   canceling), the flag is not restored when the queue becomes
   not-ready again. This allows new requests to be queued to
   uninitialized IO slots.

Fix by restoring ubq->canceling = true under cancel_lock when the
queue transitions from ready to not-ready during rollback.

Reported-by: Jens Axboe <axboe@kernel.dk>
Fixes: 3f3850785594 ("ublk: fix batch I/O recovery -ENODEV error")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index 31279a8238b886ecd2ddb0904d55629ddb594d90..31fda782c47c3db4e9c41b321c1c51d30ed8dc9d 100644 (file)
@@ -2806,9 +2806,9 @@ static void ublk_queue_reset_io_flags(struct ublk_queue *ubq)
        spin_lock(&ubq->cancel_lock);
        for (j = 0; j < ubq->q_depth; j++)
                ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+       ubq->canceling = false;
        spin_unlock(&ubq->cancel_lock);
        ubq->fail_io = false;
-       ubq->canceling = false;
 }
 
 /* device can only be started after all IOs are ready */
@@ -3435,10 +3435,15 @@ static int ublk_batch_unprep_io(struct ublk_queue *ubq,
 
        /*
         * If queue was ready before this decrement, it won't be anymore,
-        * so we need to decrement the queue ready count too.
+        * so we need to decrement the queue ready count and restore the
+        * canceling flag to prevent new requests from being queued.
         */
-       if (ublk_queue_ready(ubq))
+       if (ublk_queue_ready(ubq)) {
                data->ub->nr_queue_ready--;
+               spin_lock(&ubq->cancel_lock);
+               ubq->canceling = true;
+               spin_unlock(&ubq->cancel_lock);
+       }
        ubq->nr_io_ready--;
 
        ublk_io_lock(io);