]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
block: move bio queue-transition flag fixups into blk_steal_bios()
authorChaitanya Kulkarni <kch@nvidia.com>
Thu, 26 Feb 2026 03:12:42 +0000 (19:12 -0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 10 Mar 2026 13:11:09 +0000 (07:11 -0600)
blk_steal_bios() transfers bios from a request to a bio_list when the
request is requeued to a different queue. The NVMe multipath failover
path (nvme_failover_req) currently open-codes clearing of REQ_POLLED,
bi_cookie, and REQ_NOWAIT on each bio before calling blk_steal_bios().

Move these fixups into blk_steal_bios() itself so that any caller
automatically gets correct flag state when bios cross queue boundaries.
Simplify nvme_failover_req() accordingly.

Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://patch.msgid.link/20260226031243.87200-2-kch@nvidia.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
drivers/nvme/host/multipath.c

index a047faf3b0ec983a1bf80fa50dad1fa5e8ee6e30..4aebc6b479ef7855b6c1061e40e17fe9bfa1e252 100644 (file)
@@ -3424,6 +3424,23 @@ EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
  */
 void blk_steal_bios(struct bio_list *list, struct request *rq)
 {
+       struct bio *bio;
+
+       for (bio = rq->bio; bio; bio = bio->bi_next) {
+               if (bio->bi_opf & REQ_POLLED) {
+                       bio->bi_opf &= ~REQ_POLLED;
+                       bio->bi_cookie = BLK_QC_T_NONE;
+               }
+               /*
+                * The alternate request queue that we may end up submitting
+                * the bio to may be frozen temporarily, in this case REQ_NOWAIT
+                * will fail the I/O immediately with EAGAIN to the issuer.
+                * We are not in the issuer context which cannot block. Clear
+                * the flag to avoid spurious EAGAIN I/O failures.
+                */
+               bio->bi_opf &= ~REQ_NOWAIT;
+       }
+
        if (rq->bio) {
                if (list->tail)
                        list->tail->bi_next = rq->bio;
index fc6800a9f7f9460d1eb4e3bd15f3700330f4be48..ba00f0b72b858cbc2760db45c4996d967564bb15 100644 (file)
@@ -154,21 +154,8 @@ void nvme_failover_req(struct request *req)
        }
 
        spin_lock_irqsave(&ns->head->requeue_lock, flags);
-       for (bio = req->bio; bio; bio = bio->bi_next) {
+       for (bio = req->bio; bio; bio = bio->bi_next)
                bio_set_dev(bio, ns->head->disk->part0);
-               if (bio->bi_opf & REQ_POLLED) {
-                       bio->bi_opf &= ~REQ_POLLED;
-                       bio->bi_cookie = BLK_QC_T_NONE;
-               }
-               /*
-                * The alternate request queue that we may end up submitting
-                * the bio to may be frozen temporarily, in this case REQ_NOWAIT
-                * will fail the I/O immediately with EAGAIN to the issuer.
-                * We are not in the issuer context which cannot block. Clear
-                * the flag to avoid spurious EAGAIN I/O failures.
-                */
-               bio->bi_opf &= ~REQ_NOWAIT;
-       }
        blk_steal_bios(&ns->head->requeue_list, req);
        spin_unlock_irqrestore(&ns->head->requeue_lock, flags);