]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
block: introduce blk_queue_rot()
authorDamien Le Moal <dlemoal@kernel.org>
Thu, 29 Jan 2026 07:27:15 +0000 (16:27 +0900)
committerJens Axboe <axboe@kernel.dk>
Thu, 29 Jan 2026 20:15:50 +0000 (13:15 -0700)
To check if a request queue is for a rotational device, a double
negation is needed with the pattern "!blk_queue_nonrot(q)". Simplify
this with the introduction of the helper blk_queue_rot() which tests
if a requests queue limit has the BLK_FEAT_ROTATIONAL feature set.
All call sites of blk_queue_nonrot() are modified to use blk_queue_rot()
and blk_queue_nonrot() definition removed.

No functional changes.

Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Nitesh Shetty <nj.shetty@samsung.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/blk-iocost.c
block/blk-iolatency.c
block/blk-wbt.c
include/linux/blkdev.h

index 6e54b1d3d8bc2ad502bc732e9ef216ef49e18f27..3ebdec40e7589efe7697eb6447b7f92a86d3fdcf 100644 (file)
@@ -231,7 +231,7 @@ static struct kmem_cache *bfq_pool;
 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
        (get_sdist(last_pos, rq) >                      \
         BFQQ_SEEK_THR &&                               \
-        (!blk_queue_nonrot(bfqd->queue) ||             \
+        (blk_queue_rot(bfqd->queue) ||                 \
          blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
 #define BFQQ_CLOSE_THR         (sector_t)(8 * 1024)
 #define BFQQ_SEEKY(bfqq)       (hweight32(bfqq->seek_history) > 19)
@@ -4165,7 +4165,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 
        /* don't use too short time intervals */
        if (delta_usecs < 1000) {
-               if (blk_queue_nonrot(bfqd->queue))
+               if (!blk_queue_rot(bfqd->queue))
                         /*
                          * give same worst-case guarantees as idling
                          * for seeky
@@ -4487,7 +4487,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
                                             struct bfq_queue *bfqq)
 {
        bool rot_without_queueing =
-               !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
+               blk_queue_rot(bfqd->queue) && !bfqd->hw_tag,
                bfqq_sequential_and_IO_bound,
                idling_boosts_thr;
 
@@ -4521,7 +4521,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
         * flash-based device.
         */
        idling_boosts_thr = rot_without_queueing ||
-               ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
+               ((blk_queue_rot(bfqd->queue) || !bfqd->hw_tag) &&
                 bfqq_sequential_and_IO_bound);
 
        /*
@@ -4722,7 +4722,7 @@ bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
                         * there is only one in-flight large request
                         * at a time.
                         */
-                       if (blk_queue_nonrot(bfqd->queue) &&
+                       if (!blk_queue_rot(bfqd->queue) &&
                            blk_rq_sectors(bfqq->next_rq) >=
                            BFQQ_SECT_THR_NONROT &&
                            bfqd->tot_rq_in_driver >= 1)
@@ -6340,7 +6340,7 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
        bfqd->hw_tag_samples = 0;
 
        bfqd->nonrot_with_queueing =
-               blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
+               !blk_queue_rot(bfqd->queue) && bfqd->hw_tag;
 }
 
 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
@@ -7293,7 +7293,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
        INIT_HLIST_HEAD(&bfqd->burst_list);
 
        bfqd->hw_tag = -1;
-       bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
+       bfqd->nonrot_with_queueing = !blk_queue_rot(bfqd->queue);
 
        bfqd->bfq_max_budget = bfq_default_max_budget;
 
@@ -7328,9 +7328,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
         * Begin by assuming, optimistically, that the device peak
         * rate is equal to 2/3 of the highest reference rate.
         */
-       bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
-               ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
-       bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
+       bfqd->rate_dur_prod = ref_rate[!blk_queue_rot(bfqd->queue)] *
+               ref_wr_duration[!blk_queue_rot(bfqd->queue)];
+       bfqd->peak_rate = ref_rate[!blk_queue_rot(bfqd->queue)] * 2 / 3;
 
        /* see comments on the definition of next field inside bfq_data */
        bfqd->actuator_load_threshold = 4;
index a0416927d33dcaa31062b246e596af7746cb74c4..ef543d163d469c80ee710da9f108fdba4a251a13 100644 (file)
@@ -812,7 +812,7 @@ static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
        u64 now_ns;
 
        /* rotational? */
-       if (!blk_queue_nonrot(disk->queue))
+       if (blk_queue_rot(disk->queue))
                return AUTOP_HDD;
 
        /* handle SATA SSDs w/ broken NCQ */
index 45bd18f68541b732c06e7cd3e814f2e72b6d1034..f7434278cd29ca7b88d9e061a50e0fcc3d298c49 100644 (file)
@@ -988,10 +988,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
        u64 now = blk_time_get_ns();
        int cpu;
 
-       if (blk_queue_nonrot(blkg->q))
-               iolat->ssd = true;
-       else
-               iolat->ssd = false;
+       iolat->ssd = !blk_queue_rot(blkg->q);
 
        for_each_possible_cpu(cpu) {
                struct latency_stat *stat;
index 0974875f77bda299f4f38418bb311b0e3716aa03..8e025834f2fbbc7f0be1318f2d82617f5aaaa383 100644 (file)
@@ -747,10 +747,9 @@ u64 wbt_default_latency_nsec(struct request_queue *q)
         * We default to 2msec for non-rotational storage, and 75msec
         * for rotational storage.
         */
-       if (blk_queue_nonrot(q))
-               return 2000000ULL;
-       else
+       if (blk_queue_rot(q))
                return 75000000ULL;
+       return 2000000ULL;
 }
 
 static int wbt_data_dir(const struct request *rq)
index 4536211ff33c17dc37894c2cd34742bcb75e0aea..1e5b5547929f03aeaf177f79cffdd1f14cd8fe24 100644 (file)
@@ -680,7 +680,7 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q) \
        test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q)    (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_rot(q)       ((q)->limits.features & BLK_FEAT_ROTATIONAL)
 #define blk_queue_io_stat(q)   ((q)->limits.features & BLK_FEAT_IO_STAT)
 #define blk_queue_passthrough_stat(q)  \
        ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
@@ -1463,7 +1463,7 @@ bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
 
 static inline bool bdev_nonrot(struct block_device *bdev)
 {
-       return blk_queue_nonrot(bdev_get_queue(bdev));
+       return !blk_queue_rot(bdev_get_queue(bdev));
 }
 
 static inline bool bdev_synchronous(struct block_device *bdev)