]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
block: Introduce blk_rq_is_seq_zoned_write()
authorBart Van Assche <bvanassche@acm.org>
Wed, 17 May 2023 17:42:23 +0000 (10:42 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 19 May 2023 01:47:49 +0000 (19:47 -0600)
Introduce the function blk_rq_is_seq_zoned_write(). This function will
be used in later patches to preserve the order of zoned writes that
require write serialization.

This patch includes an optimization: instead of using
rq->q->disk->part0->bd_queue to check whether or not the queue is
associated with a zoned block device, use rq->q->disk->queue.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230517174230.897144-6-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-zoned.c
include/linux/blk-mq.h

index 835d9e937d4d5e2a895f95aae566575f10e58070..096b6b47561f83a734aeaf08aa6ee8740e43f27c 100644 (file)
@@ -60,10 +60,7 @@ bool blk_req_needs_zone_write_lock(struct request *rq)
        if (!rq->q->disk->seq_zones_wlock)
                return false;
 
-       if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq)))
-               return blk_rq_zone_is_seq(rq);
-
-       return false;
+       return blk_rq_is_seq_zoned_write(rq);
 }
 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
 
index e4a211957db6610f669334917c71dd57aba37a40..49d14b1acfa5df996cc7c9caa87cf291a228f094 100644 (file)
@@ -1164,6 +1164,18 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
        return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
 }
 
+/**
+ * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
+ * @rq: Request to examine.
+ *
+ * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
+ */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+       return op_needs_zoned_write_locking(req_op(rq)) &&
+               blk_rq_zone_is_seq(rq);
+}
+
 bool blk_req_needs_zone_write_lock(struct request *rq);
 bool blk_req_zone_write_trylock(struct request *rq);
 void __blk_req_zone_write_lock(struct request *rq);
@@ -1194,6 +1206,11 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
        return !blk_req_zone_is_write_locked(rq);
 }
 #else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+       return false;
+}
+
 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
 {
        return false;