]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
block: pass io_comp_batch to rq_end_io_fn callback
authorMing Lei <ming.lei@redhat.com>
Fri, 16 Jan 2026 07:46:37 +0000 (15:46 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 20 Jan 2026 17:12:54 +0000 (10:12 -0700)
Add a third parameter 'const struct io_comp_batch *' to the rq_end_io_fn
callback signature. This allows end_io handlers to access the completion
batch context when requests are completed via blk_mq_end_request_batch().

The io_comp_batch is passed from blk_mq_end_request_batch(), while NULL
is passed from __blk_mq_end_request() and blk_mq_put_rq_ref() which don't
have batch context.

This infrastructure change enables drivers to detect whether they're
being called from a batched completion path (like iopoll) and access
additional context stored in the io_comp_batch.

Update all rq_end_io_fn implementations:
- block/blk-mq.c: blk_end_sync_rq
- block/blk-flush.c: flush_end_io, mq_flush_data_end_io
- drivers/nvme/host/ioctl.c: nvme_uring_cmd_end_io
- drivers/nvme/host/core.c: nvme_keep_alive_end_io
- drivers/nvme/host/pci.c: abort_endio, nvme_del_queue_end, nvme_del_cq_end
- drivers/nvme/target/passthru.c: nvmet_passthru_req_done
- drivers/scsi/scsi_error.c: eh_lock_door_done
- drivers/scsi/sg.c: sg_rq_end_io
- drivers/scsi/st.c: st_scsi_execute_end
- drivers/target/target_core_pscsi.c: pscsi_req_done
- drivers/md/dm-rq.c: end_clone_request

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
12 files changed:
block/blk-flush.c
block/blk-mq.c
drivers/md/dm-rq.c
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c
drivers/nvme/target/passthru.c
drivers/scsi/scsi_error.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/target/target_core_pscsi.c
include/linux/blk-mq.h

index 43d6152897a420561e436f8d0dfaa0e4cbde3611..403a46c8641172d0fed25d122d4c9e558fb0ff29 100644 (file)
@@ -199,7 +199,8 @@ static void blk_flush_complete_seq(struct request *rq,
 }
 
 static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
-                                      blk_status_t error)
+                                      blk_status_t error,
+                                      const struct io_comp_batch *iob)
 {
        struct request_queue *q = flush_rq->q;
        struct list_head *running;
@@ -335,7 +336,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 }
 
 static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
-                                              blk_status_t error)
+                                              blk_status_t error,
+                                              const struct io_comp_batch *iob)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
index a29d8ac9d3e357aa6756b85298d93f173b22daf1..cf1daedbb39fddee6d638ae9ce622aeb3b730f24 100644 (file)
@@ -1156,7 +1156,7 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 
        if (rq->end_io) {
                rq_qos_done(rq->q, rq);
-               if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+               if (rq->end_io(rq, error, NULL) == RQ_END_IO_FREE)
                        blk_mq_free_request(rq);
        } else {
                blk_mq_free_request(rq);
@@ -1211,7 +1211,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
                 * If end_io handler returns NONE, then it still has
                 * ownership of the request.
                 */
-               if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
+               if (rq->end_io && rq->end_io(rq, 0, iob) == RQ_END_IO_NONE)
                        continue;
 
                WRITE_ONCE(rq->state, MQ_RQ_IDLE);
@@ -1458,7 +1458,8 @@ struct blk_rq_wait {
        blk_status_t ret;
 };
 
-static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
+static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret,
+                                         const struct io_comp_batch *iob)
 {
        struct blk_rq_wait *wait = rq->end_io_data;
 
@@ -1688,7 +1689,7 @@ static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expi
 void blk_mq_put_rq_ref(struct request *rq)
 {
        if (is_flush_rq(rq)) {
-               if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
+               if (rq->end_io(rq, 0, NULL) == RQ_END_IO_FREE)
                        blk_mq_free_request(rq);
        } else if (req_ref_put_and_test(rq)) {
                __blk_mq_free_request(rq);
index a6ca92049c10e37d659af17b54b9b5c1095a97b1..e9a7563b4b2fecbc27c885c0fd84d5bfba7cd1cd 100644 (file)
@@ -295,7 +295,8 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
 }
 
 static enum rq_end_io_ret end_clone_request(struct request *clone,
-                                           blk_status_t error)
+                                           blk_status_t error,
+                                           const struct io_comp_batch *iob)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
 
index 7bf228df6001f1f4d0b3c570de285a5eb17bb08e..19b67cf5d550685aabb4f7699b678f4c2051b513 100644 (file)
@@ -1333,7 +1333,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
 }
 
 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
-                                                blk_status_t status)
+                                                blk_status_t status,
+                                                const struct io_comp_batch *iob)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
        unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
index a9c097dacad6f7507e2ad849781ea9c17148aeca..e45ac0ca174e05c6155f10b54fde407f16b6ce2a 100644 (file)
@@ -410,7 +410,8 @@ static void nvme_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw)
 }
 
 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
-                                               blk_status_t err)
+                                               blk_status_t err,
+                                               const struct io_comp_batch *iob)
 {
        struct io_uring_cmd *ioucmd = req->end_io_data;
        struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
index 065555576d2f9bf882868f4d45ee1111a7d134c4..d87c56c62861ed21f8e794f57167e96ac98e5dc5 100644 (file)
@@ -1615,7 +1615,8 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error,
+                                     const struct io_comp_batch *iob)
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 
@@ -2858,7 +2859,8 @@ out_unlock:
 }
 
 static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
-                                            blk_status_t error)
+                                            blk_status_t error,
+                                            const struct io_comp_batch *iob)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -2868,14 +2870,15 @@ static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
 }
 
 static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
-                                         blk_status_t error)
+                                         blk_status_t error,
+                                         const struct io_comp_batch *iob)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
        if (error)
                set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
 
-       return nvme_del_queue_end(req, error);
+       return nvme_del_queue_end(req, error, iob);
 }
 
 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
index 96648ec2fadb55aca6760530849f202e035ba39a..0823c87637d3740b2ba1126a1920c2514acd207f 100644 (file)
@@ -247,7 +247,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 }
 
 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
-                                                 blk_status_t blk_status)
+                                                 blk_status_t blk_status,
+                                                 const struct io_comp_batch *iob)
 {
        struct nvmet_req *req = rq->end_io_data;
 
index f869108fd9693cfdcfb503a0504eec73c86733f4..1e93390c5a8272c616674d34f35a986e0afeef80 100644 (file)
@@ -2085,7 +2085,8 @@ maybe_retry:
 }
 
 static enum rq_end_io_ret eh_lock_door_done(struct request *req,
-                                           blk_status_t status)
+                                           blk_status_t status,
+                                           const struct io_comp_batch *iob)
 {
        blk_mq_free_request(req);
        return RQ_END_IO_NONE;
index 57fba34832ad118f73c28b47a0f85a191ec08017..1a521f9d821a0aeef5d240be1395c214e230d41e 100644 (file)
@@ -177,7 +177,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
 } Sg_device;
 
 /* tasklet or soft irq callback */
-static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status,
+                                      const struct io_comp_batch *iob);
 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
 static int sg_finish_rem_req(Sg_request * srp);
 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -1309,7 +1310,8 @@ sg_rq_end_io_usercontext(struct work_struct *work)
  * level when a command is completed (or has failed).
  */
 static enum rq_end_io_ret
-sg_rq_end_io(struct request *rq, blk_status_t status)
+sg_rq_end_io(struct request *rq, blk_status_t status,
+            const struct io_comp_batch *iob)
 {
        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
        struct sg_request *srp = rq->end_io_data;
index 168f25e4aaa387202100ed353246b61e8aa5cf4d..8aeaa3b68c25ed522792da49636579680fba7d2a 100644 (file)
@@ -525,7 +525,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
 }
 
 static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
-                                             blk_status_t status)
+                                             blk_status_t status,
+                                             const struct io_comp_batch *iob)
 {
        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
        struct st_request *SRpnt = req->end_io_data;
index db4e09042469c5dcf1c53525f158ba6e1b8e3cf5..823b2665f95b056eb428d0687319f62f2c04fe6f 100644 (file)
@@ -39,7 +39,8 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
 }
 
 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t,
+                                        const struct io_comp_batch *);
 
 /*     pscsi_attach_hba():
  *
@@ -1001,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
 }
 
 static enum rq_end_io_ret pscsi_req_done(struct request *req,
-                                        blk_status_t status)
+                                        blk_status_t status,
+                                        const struct io_comp_batch *iob)
 {
        struct se_cmd *cmd = req->end_io_data;
        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
index cae9e857aea4285ae6f6a01dedd460a691d1e081..18a2388ba581d060f514d3b130ad01e6b5d2ea20 100644 (file)
@@ -13,6 +13,7 @@
 
 struct blk_mq_tags;
 struct blk_flush_queue;
+struct io_comp_batch;
 
 #define BLKDEV_MIN_RQ  4
 #define BLKDEV_DEFAULT_RQ      128
@@ -22,7 +23,8 @@ enum rq_end_io_ret {
        RQ_END_IO_FREE,
 };
 
-typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t,
+                                         const struct io_comp_batch *);
 
 /*
  * request flags */