}
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
- blk_status_t error)
+ blk_status_t error,
+ const struct io_comp_batch *iob)
{
struct request_queue *q = flush_rq->q;
struct list_head *running;
}
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
- blk_status_t error)
+ blk_status_t error,
+ const struct io_comp_batch *iob)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->end_io) {
rq_qos_done(rq->q, rq);
- if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+ if (rq->end_io(rq, error, NULL) == RQ_END_IO_FREE)
blk_mq_free_request(rq);
} else {
blk_mq_free_request(rq);
* If end_io handler returns NONE, then it still has
* ownership of the request.
*/
- if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
+ if (rq->end_io && rq->end_io(rq, 0, iob) == RQ_END_IO_NONE)
continue;
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
blk_status_t ret;
};
-static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
+static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret,
+ const struct io_comp_batch *iob)
{
struct blk_rq_wait *wait = rq->end_io_data;
void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq)) {
- if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
+ if (rq->end_io(rq, 0, NULL) == RQ_END_IO_FREE)
blk_mq_free_request(rq);
} else if (req_ref_put_and_test(rq)) {
__blk_mq_free_request(rq);
}
static enum rq_end_io_ret end_clone_request(struct request *clone,
- blk_status_t error)
+ blk_status_t error,
+ const struct io_comp_batch *iob)
{
struct dm_rq_target_io *tio = clone->end_io_data;
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
- blk_status_t status)
+ blk_status_t status,
+ const struct io_comp_batch *iob)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
- blk_status_t err)
+ blk_status_t err,
+ const struct io_comp_batch *iob)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error,
+ const struct io_comp_batch *iob)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
}
static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
- blk_status_t error)
+ blk_status_t error,
+ const struct io_comp_batch *iob)
{
struct nvme_queue *nvmeq = req->end_io_data;
}
static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
- blk_status_t error)
+ blk_status_t error,
+ const struct io_comp_batch *iob)
{
struct nvme_queue *nvmeq = req->end_io_data;
if (error)
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
- return nvme_del_queue_end(req, error);
+ return nvme_del_queue_end(req, error, iob);
}
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
}
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
- blk_status_t blk_status)
+ blk_status_t blk_status,
+ const struct io_comp_batch *iob)
{
struct nvmet_req *req = rq->end_io_data;
}
static enum rq_end_io_ret eh_lock_door_done(struct request *req,
- blk_status_t status)
+ blk_status_t status,
+ const struct io_comp_batch *iob)
{
blk_mq_free_request(req);
return RQ_END_IO_NONE;
} Sg_device;
/* tasklet or soft irq callback */
-static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status,
+ const struct io_comp_batch *iob);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
* level when a command is completed (or has failed).
*/
static enum rq_end_io_ret
-sg_rq_end_io(struct request *rq, blk_status_t status)
+sg_rq_end_io(struct request *rq, blk_status_t status,
+ const struct io_comp_batch *iob)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
struct sg_request *srp = rq->end_io_data;
}
static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
- blk_status_t status)
+ blk_status_t status,
+ const struct io_comp_batch *iob)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
}
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t,
+ const struct io_comp_batch *);
/* pscsi_attach_hba():
*
}
static enum rq_end_io_ret pscsi_req_done(struct request *req,
- blk_status_t status)
+ blk_status_t status,
+ const struct io_comp_batch *iob)
{
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct blk_mq_tags;
struct blk_flush_queue;
+struct io_comp_batch;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_DEFAULT_RQ 128
RQ_END_IO_FREE,
};
-typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t,
+ const struct io_comp_batch *);
/*
* request flags */