]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
ublk: remove io_cmds list in ublk_queue
authorUday Shankar <ushankar@purestorage.com>
Tue, 18 Mar 2025 18:14:17 +0000 (12:14 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 19 Mar 2025 12:32:06 +0000 (06:32 -0600)
The current I/O dispatch mechanism - queueing I/O by adding it to the
io_cmds list (and poking task_work as needed), then dispatching it in
ublk server task context by reversing io_cmds and completing the
io_uring command associated to each one - was introduced by commit
7d4a93176e014 ("ublk_drv: don't forward io commands in reserve order")
to ensure that the ublk server received I/O in the same order that the
block layer submitted it to ublk_drv. This mechanism was only needed for
the "raw" task_work submission mechanism, since the io_uring task work
wrapper maintains FIFO ordering (using quite a similar mechanism in
fact). The "raw" task_work submission mechanism is no longer supported
in ublk_drv as of commit 29dc5d06613f2 ("ublk: kill queuing request by
task_work_add"), so the explicit llist/reversal is no longer needed - it
just duplicates logic already present in the underlying io_uring APIs.
Remove it.

Signed-off-by: Uday Shankar <ushankar@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250318-ublk_io_cmds-v1-1-c1bb74798fef@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index 22d52d3fabf0f83c9cde8738316f9a75f129521b..b63113a63128f45515e126603e42966f8bf1f624 100644 (file)
@@ -74,8 +74,6 @@
         UBLK_PARAM_TYPE_DMA_ALIGN)
 
 struct ublk_rq_data {
-       struct llist_node node;
-
        struct kref ref;
 };
 
@@ -142,8 +140,6 @@ struct ublk_queue {
        struct task_struct      *ubq_daemon;
        char *io_cmd_buf;
 
-       struct llist_head       io_cmds;
-
        unsigned long io_addr;  /* mapped vm address */
        unsigned int max_io_sz;
        bool force_abort;
@@ -1108,7 +1104,7 @@ static void ublk_complete_rq(struct kref *ref)
 }
 
 /*
- * Since __ublk_rq_task_work always fails requests immediately during
+ * Since ublk_rq_task_work_cb always fails requests immediately during
  * exiting, __ublk_fail_req() is only called from abort context during
  * exiting. So lock is unnecessary.
  *
@@ -1154,11 +1150,14 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
                blk_mq_end_request(rq, BLK_STS_IOERR);
 }
 
-static inline void __ublk_rq_task_work(struct request *req,
-                                      unsigned issue_flags)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
+                                unsigned int issue_flags)
 {
-       struct ublk_queue *ubq = req->mq_hctx->driver_data;
-       int tag = req->tag;
+       struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+       struct ublk_queue *ubq = pdu->ubq;
+       int tag = pdu->tag;
+       struct request *req = blk_mq_tag_to_rq(
+               ubq->dev->tag_set.tags[ubq->q_id], tag);
        struct ublk_io *io = &ubq->ios[tag];
        unsigned int mapped_bytes;
 
@@ -1233,34 +1232,11 @@ static inline void __ublk_rq_task_work(struct request *req,
        ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
 }
 
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
-                                       unsigned issue_flags)
-{
-       struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
-       struct ublk_rq_data *data, *tmp;
-
-       io_cmds = llist_reverse_order(io_cmds);
-       llist_for_each_entry_safe(data, tmp, io_cmds, node)
-               __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
-}
-
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
-{
-       struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
-       struct ublk_queue *ubq = pdu->ubq;
-
-       ublk_forward_io_cmds(ubq, issue_flags);
-}
-
 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
 {
-       struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+       struct ublk_io *io = &ubq->ios[rq->tag];
 
-       if (llist_add(&data->node, &ubq->io_cmds)) {
-               struct ublk_io *io = &ubq->ios[rq->tag];
-
-               io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
-       }
+       io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
 }
 
 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
@@ -1453,7 +1429,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
                        struct request *rq;
 
                        /*
-                        * Either we fail the request or ublk_rq_task_work_fn
+                        * Either we fail the request or ublk_rq_task_work_cb
                         * will do it
                         */
                        rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);