]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
block: change blk_mq_add_to_batch() third argument type to bool
authorShin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Tue, 11 Mar 2025 10:43:59 +0000 (19:43 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 22 Mar 2025 19:54:27 +0000 (12:54 -0700)
[ Upstream commit 9bce6b5f8987678b9c6c1fe433af6b5fe41feadc ]

Commit 1f47ed294a2b ("block: cleanup and fix batch completion adding
conditions") modified the evaluation criteria for the third argument,
'ioerror', in the blk_mq_add_to_batch() function. Initially, the
function had checked if 'ioerror' equals zero. Following the commit, it
started checking for negative error values, with the presumption that
such values, for instance -EIO, would be passed in.

However, blk_mq_add_to_batch() callers do not pass negative error
values. Instead, they pass status codes defined in various ways:

- NVMe PCI and Apple drivers pass NVMe status code
- virtio_blk driver passes the virtblk request header status byte
- null_blk driver passes blk_status_t

These codes are either zero or positive, therefore the revised check
fails to function as intended. Specifically, with the NVMe PCI driver,
this modification led to the failure of the blktests test case nvme/039.
In this test scenario, errors are artificially injected to the NVMe
driver, resulting in positive NVMe status codes passed to
blk_mq_add_to_batch(), which unexpectedly processes the failed I/O in a
batch. Hence the failure.

To correct the ioerror check within blk_mq_add_to_batch(), make all
callers to uniformly pass the argument as boolean. Modify the callers to
check their specific status codes and pass the boolean value 'is_error'.
Also describe the arguments of blK_mq_add_to_batch as kerneldoc.

Fixes: 1f47ed294a2b ("block: cleanup and fix batch completion adding conditions")
Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Link: https://lore.kernel.org/r/20250311104359.1767728-3-shinichiro.kawasaki@wdc.com
[axboe: fold in documentation update]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/block/null_blk/main.c
drivers/block/virtio_blk.c
drivers/nvme/host/apple.c
drivers/nvme/host/pci.c
include/linux/blk-mq.h

index 2f0431e42c494d98f01d6c1741a1e865f33426b8..c479348ce8ff6939fc675d7e882a39d0c3a54546 100644 (file)
@@ -1541,8 +1541,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
                cmd = blk_mq_rq_to_pdu(req);
                cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
                                                blk_rq_sectors(req));
-               if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
-                                       blk_mq_end_request_batch))
+               if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+                                        blk_mq_end_request_batch))
                        blk_mq_end_request(req, cmd->error);
                nr++;
        }
index 0e50b65e1dbf5a2883cdddc5b8411550388eb99a..44a6937a4b65ccbde6dce10129128d07797313d7 100644 (file)
@@ -1210,11 +1210,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
 
        while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
                struct request *req = blk_mq_rq_from_pdu(vbr);
+               u8 status = virtblk_vbr_status(vbr);
 
                found++;
                if (!blk_mq_complete_request_remote(req) &&
-                   !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
-                                               virtblk_complete_batch))
+                   !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+                                        virtblk_complete_batch))
                        virtblk_request_done(req);
        }
 
index df054cd38c3e36771f6faa61fc936cd0396d3e2b..e79a0adf13950b0e9259008d0ef009dab3f4f8f4 100644 (file)
@@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
        }
 
        if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
-           !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+           !blk_mq_add_to_batch(req, iob,
+                                nvme_req(req)->status != NVME_SC_SUCCESS,
                                 apple_nvme_complete_batch))
                apple_nvme_complete_rq(req);
 }
index 1fbb5f7a9f239cc5fb162eb1f3b19c1513d66e7b..1d3205f08af8470421a8e288d3f1cb8c12d04e83 100644 (file)
@@ -1131,8 +1131,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
 
        trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
        if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
-           !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
-                                       nvme_pci_complete_batch))
+           !blk_mq_add_to_batch(req, iob,
+                                nvme_req(req)->status != NVME_SC_SUCCESS,
+                                nvme_pci_complete_batch))
                nvme_pci_complete_rq(req);
 }
 
index a53cbe256910434e8a2fd32739037ec3ca4f33f6..7b5e5388c3801aa7425faddcf8f20e8ffd81ef34 100644 (file)
@@ -871,12 +871,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq)
        return rq->rq_flags & RQF_RESV;
 }
 
-/*
+/**
+ * blk_mq_add_to_batch() - add a request to the completion batch
+ * @req: The request to add to batch
+ * @iob: The batch to add the request
+ * @is_error: Specify true if the request failed with an error
+ * @complete: The completaion handler for the request
+ *
  * Batched completions only work when there is no I/O error and no special
  * ->end_io handler.
+ *
+ * Return: true when the request was added to the batch, otherwise false
  */
 static inline bool blk_mq_add_to_batch(struct request *req,
-                                      struct io_comp_batch *iob, int ioerror,
+                                      struct io_comp_batch *iob, bool is_error,
                                       void (*complete)(struct io_comp_batch *))
 {
        /*
@@ -884,7 +892,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
         * 1) No batch container
         * 2) Has scheduler data attached
         * 3) Not a passthrough request and end_io set
-        * 4) Not a passthrough request and an ioerror
+        * 4) Not a passthrough request and failed with an error
         */
        if (!iob)
                return false;
@@ -893,7 +901,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
        if (!blk_rq_is_passthrough(req)) {
                if (req->end_io)
                        return false;
-               if (ioerror < 0)
+               if (is_error)
                        return false;
        }