]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
block: remove unused parameter
authorGuixin Liu <kanie@linux.alibaba.com>
Wed, 12 Mar 2025 08:47:22 +0000 (16:47 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 12 Mar 2025 14:25:28 +0000 (08:25 -0600)
The blk_mq_map_queue()'s request_queue param is not used anymore,
remove it, same with blk_get_flush_queue().

Signed-off-by: Guixin Liu <kanie@linux.alibaba.com>
Link: https://lore.kernel.org/r/20250312084722.129680-1-kanie@linux.alibaba.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq-sched.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
block/kyber-iosched.c

index a72e2a83d075f9e4104010196e0f6f5c826aa699..43d6152897a420561e436f8d0dfaa0e4cbde3611 100644 (file)
@@ -95,9 +95,9 @@ static void blk_kick_flush(struct request_queue *q,
                           struct blk_flush_queue *fq, blk_opf_t flags);
 
 static inline struct blk_flush_queue *
-blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
+blk_get_flush_queue(struct blk_mq_ctx *ctx)
 {
-       return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
+       return blk_mq_map_queue(REQ_OP_FLUSH, ctx)->fq;
 }
 
 static unsigned int blk_flush_cur_seq(struct request *rq)
@@ -205,7 +205,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
        struct list_head *running;
        struct request *rq, *n;
        unsigned long flags = 0;
-       struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+       struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx);
 
        /* release the tag's ownership to the req cloned from */
        spin_lock_irqsave(&fq->mq_flush_lock, flags);
@@ -341,7 +341,7 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
        struct blk_mq_ctx *ctx = rq->mq_ctx;
        unsigned long flags;
-       struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
+       struct blk_flush_queue *fq = blk_get_flush_queue(ctx);
 
        if (q->elevator) {
                WARN_ON(rq->tag < 0);
@@ -382,7 +382,7 @@ static void blk_rq_init_flush(struct request *rq)
 bool blk_insert_flush(struct request *rq)
 {
        struct request_queue *q = rq->q;
-       struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
+       struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx);
        bool supports_fua = q->limits.features & BLK_FEAT_FUA;
        unsigned int policy = 0;
 
index 7442ca27c2bf1a964845546d24bd8980ea6080d6..109611445d40f893e6102bdd6527eb99cfd9b772 100644 (file)
@@ -349,7 +349,7 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
        }
 
        ctx = blk_mq_get_ctx(q);
-       hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+       hctx = blk_mq_map_queue(bio->bi_opf, ctx);
        type = hctx->type;
        if (list_empty_careful(&ctx->rq_lists[type]))
                goto out_put;
index b9f417d980b46d54b74dec8adcb5b04e6a78635c..d880c50629d612e0f7a95400adbf2dc733fa613d 100644 (file)
@@ -190,8 +190,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                sbitmap_finish_wait(bt, ws, &wait);
 
                data->ctx = blk_mq_get_ctx(data->q);
-               data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
-                                               data->ctx);
+               data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
                tags = blk_mq_tags_from_data(data);
                if (data->flags & BLK_MQ_REQ_RESERVED)
                        bt = &tags->breserved_tags;
index f1030d589a1bbe917aa58c1dbc3a96c631b8ac96..ae8494d888979f458a6b714126106be2c21d9b73 100644 (file)
@@ -508,7 +508,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 
 retry:
        data->ctx = blk_mq_get_ctx(q);
-       data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+       data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
 
        if (q->elevator) {
                /*
index 44979e92b79f934919c7931604e4d758ff0bcc72..3011a78cf16ab575ed7781a0f05bbb956ad4f204 100644 (file)
@@ -100,12 +100,10 @@ static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
 
 /*
  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
- * @q: request queue
  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
  * @ctx: software queue cpu ctx
  */
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-                                                    blk_opf_t opf,
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf,
                                                     struct blk_mq_ctx *ctx)
 {
        return ctx->hctxs[blk_mq_get_hctx_type(opf)];
index dc31f2dfa414f68f3475489d5d33ad07730393b5..0f0f8452609a1139f8cfd2c080fca254edc6acd6 100644 (file)
@@ -568,7 +568,7 @@ static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
        struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(bio->bi_opf, ctx);
        struct kyber_hctx_data *khd = hctx->sched_data;
        struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
        unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);