]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ublk: factor out ublk_start_io() helper
authorCaleb Sander Mateos <csander@purestorage.com>
Wed, 30 Apr 2025 22:52:31 +0000 (16:52 -0600)
committerJens Axboe <axboe@kernel.dk>
Fri, 2 May 2025 15:22:30 +0000 (09:22 -0600)
In preparation for calling it from outside ublk_dispatch_req(), factor
out the code responsible for setting up an incoming ublk I/O request.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250430225234.2676781-7-csander@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index dcde38b39a821ed4877b81e5caf93f8aa1111e55..b4c64779c4fd4f591087f4588bff1efa2acd0ae7 100644 (file)
@@ -1152,13 +1152,41 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
                blk_mq_end_request(rq, BLK_STS_IOERR);
 }
 
+static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+                         struct ublk_io *io)
+{
+       unsigned mapped_bytes = ublk_map_io(ubq, req, io);
+
+       /* partially mapped, update io descriptor */
+       if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
+               /*
+                * Nothing mapped, retry until we succeed.
+                *
+                * We may never succeed in mapping any bytes here because
+                * of OOM. TODO: reserve one buffer with single page pinned
+                * for providing forward progress guarantee.
+                */
+               if (unlikely(!mapped_bytes)) {
+                       blk_mq_requeue_request(req, false);
+                       blk_mq_delay_kick_requeue_list(req->q,
+                                       UBLK_REQUEUE_DELAY_MS);
+                       return false;
+               }
+
+               ublk_get_iod(ubq, req->tag)->nr_sectors =
+                       mapped_bytes >> 9;
+       }
+
+       ublk_init_req_ref(ubq, req);
+       return true;
+}
+
 static void ublk_dispatch_req(struct ublk_queue *ubq,
                              struct request *req,
                              unsigned int issue_flags)
 {
        int tag = req->tag;
        struct ublk_io *io = &ubq->ios[tag];
-       unsigned int mapped_bytes;
 
        pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
                        __func__, ubq->q_id, req->tag, io->flags,
@@ -1205,29 +1233,9 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
                                ublk_get_iod(ubq, req->tag)->addr);
        }
 
-       mapped_bytes = ublk_map_io(ubq, req, io);
-
-       /* partially mapped, update io descriptor */
-       if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
-               /*
-                * Nothing mapped, retry until we succeed.
-                *
-                * We may never succeed in mapping any bytes here because
-                * of OOM. TODO: reserve one buffer with single page pinned
-                * for providing forward progress guarantee.
-                */
-               if (unlikely(!mapped_bytes)) {
-                       blk_mq_requeue_request(req, false);
-                       blk_mq_delay_kick_requeue_list(req->q,
-                                       UBLK_REQUEUE_DELAY_MS);
-                       return;
-               }
-
-               ublk_get_iod(ubq, req->tag)->nr_sectors =
-                       mapped_bytes >> 9;
-       }
+       if (!ublk_start_io(ubq, req, io))
+               return;
 
-       ublk_init_req_ref(ubq, req);
        ublk_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
 }