]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
blk-rq-qos: Remove unlikely() hints from QoS checks
authorBreno Leitao <leitao@debian.org>
Tue, 6 Jan 2026 14:26:57 +0000 (06:26 -0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 7 Jan 2026 02:08:23 +0000 (19:08 -0700)
The unlikely() annotations on QUEUE_FLAG_QOS_ENABLED checks are
counterproductive. Writeback throttling (WBT) might be enabled by
default, mainly because CONFIG_BLK_WBT_MQ defaults to 'y'.

Branch profiling on Meta servers, which have WBT enabled, confirms 100%
misprediction rates on these checks.

Remove the unlikely() annotations to let the CPU's branch predictor
learn the actual behavior, potentially improving I/O path performance.

Signed-off-by: Breno Leitao <leitao@debian.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-rq-qos.h

index b538f2c0febc2bddf4092b722d18048b3b26ccf2..a747a504fe42948733bbd958ebeb8577fe30f7db 100644 (file)
@@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
 
 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos)
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
                __rq_qos_cleanup(q->rq_qos, bio);
 }
 
 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos && !blk_rq_is_passthrough(rq))
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
+           q->rq_qos && !blk_rq_is_passthrough(rq))
                __rq_qos_done(q->rq_qos, rq);
 }
 
 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos)
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
                __rq_qos_issue(q->rq_qos, rq);
 }
 
 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos)
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
                __rq_qos_requeue(q->rq_qos, rq);
 }
 
@@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
 
 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos) {
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
                bio_set_flag(bio, BIO_QOS_THROTTLED);
                __rq_qos_throttle(q->rq_qos, bio);
        }
@@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
                                struct bio *bio)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos)
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
                __rq_qos_track(q->rq_qos, rq, bio);
 }
 
 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
                                struct bio *bio)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos) {
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
                bio_set_flag(bio, BIO_QOS_MERGED);
                __rq_qos_merge(q->rq_qos, rq, bio);
        }
@@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
 
 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
-                       q->rq_qos)
+       if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
                __rq_qos_queue_depth_changed(q->rq_qos);
 }