]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
block: remove unused parameter 'q' parameter in __blk_rq_map_sg()
authorAnuj Gupta <anuj20.g@samsung.com>
Thu, 13 Mar 2025 03:53:18 +0000 (09:23 +0530)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Mar 2025 11:46:19 +0000 (05:46 -0600)
request_queue param is no longer used by blk_rq_map_sg and
__blk_rq_map_sg. Remove it.

Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20250313035322.243239-1-anuj20.g@samsung.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
18 files changed:
block/blk-merge.c
block/bsg-lib.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/rnbd/rnbd-clt.c
drivers/block/sunvdc.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/memstick/core/ms_block.c
drivers/memstick/core/mspro_block.c
drivers/mmc/core/queue.c
drivers/mtd/ubi/block.c
drivers/nvme/host/apple.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c
drivers/scsi/scsi_lib.c
include/linux/blk-mq.h

index 15cd231d560cb49f63ac8698b8a7bad95a322f04..8bfe54f23e5e089a7393a2fd7366edef909b593c 100644 (file)
@@ -551,8 +551,8 @@ static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
  * Map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries.
  */
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-               struct scatterlist *sglist, struct scatterlist **last_sg)
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+                   struct scatterlist **last_sg)
 {
        struct req_iterator iter = {
                .bio    = rq->bio,
index 93523d8f81950960312e9a1a2698aa17682d7b71..9ceb5d0832f5896c706c69d119c14b8312e3f9b7 100644 (file)
@@ -219,7 +219,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
        if (!buf->sg_list)
                return -ENOMEM;
        sg_init_table(buf->sg_list, req->nr_phys_segments);
-       buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+       buf->sg_cnt = blk_rq_map_sg(req, buf->sg_list);
        buf->payload_len = blk_rq_bytes(req);
        return 0;
 }
index 95361099a2dcec1a314b4246ba1e18edffae2835..0d619df03fa933686081a2f04a554f1fb25931ce 100644 (file)
@@ -2056,7 +2056,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
        unsigned int nents;
 
        /* Map the scatter list for DMA access */
-       nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
+       nents = blk_rq_map_sg(rq, command->sg);
        nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
 
        prefetch(&port->flags);
index 82467ecde7ece4fb1eae6731c4a311320b2508df..15627417f12e089bd3d7c87b8a2a7714fd90f3f0 100644 (file)
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
         * See queue limits.
         */
        if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
-               sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
+               sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
 
        if (sg_cnt == 0)
                sg_mark_end(&iu->sgt.sgl[0]);
index 282f81616a78e32ca9e288c12f1cb2b8c9a2d92e..2b33fb5b949b8f1c66aa399cfce4bd69363df114 100644 (file)
@@ -485,7 +485,7 @@ static int __send_request(struct request *req)
        }
 
        sg_init_table(sg, port->ring_cookies);
-       nsg = blk_rq_map_sg(req->q, req, sg);
+       nsg = blk_rq_map_sg(req, sg);
 
        len = 0;
        for (i = 0; i < nsg; i++)
index 6a61ec35f426031ee2b483ab837896067ef0911c..a3df4d49bd46dc77c77e511d0ce761e599757151 100644 (file)
@@ -226,7 +226,7 @@ static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
        if (unlikely(err))
                return -ENOMEM;
 
-       return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+       return blk_rq_map_sg(req, vbr->sg_table.sgl);
 }
 
 static void virtblk_cleanup_cmd(struct request *req)
index edcd08a9dcef3ff7a6daecae48765fdb30022527..5babe575c28831745dd3d5feeca20f8dce4c270d 100644 (file)
@@ -751,7 +751,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
        id = blkif_ring_get_request(rinfo, req, &final_ring_req);
        ring_req = &rinfo->shadow[id].req;
 
-       num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
+       num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg);
        num_grant = 0;
        /* Calculate the number of grant used */
        for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
index 5b617c1f67890b80972e55e31bd26e4973737e55..f4398383ae0648f3f43fe02efb8701d2ba3ce998 100644 (file)
@@ -1904,7 +1904,7 @@ static void msb_io_work(struct work_struct *work)
 
                /* process the request */
                dbg_verbose("IO: processing new request");
-               blk_rq_map_sg(msb->queue, req, sg);
+               blk_rq_map_sg(req, sg);
 
                lba = blk_rq_pos(req);
 
index 634d343b6bdba21b2d3431f335e23a32025c867e..c9853d887d2822632b9d76da0cb101dfc3598a74 100644 (file)
@@ -627,9 +627,7 @@ static int mspro_block_issue_req(struct memstick_dev *card)
        while (true) {
                msb->current_page = 0;
                msb->current_seg = 0;
-               msb->seg_count = blk_rq_map_sg(msb->block_req->q,
-                                              msb->block_req,
-                                              msb->req_sg);
+               msb->seg_count = blk_rq_map_sg(msb->block_req, msb->req_sg);
 
                if (!msb->seg_count) {
                        unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
index ab662f502fe7f15ff34e143bfbe7a8f0296605af..3ba62f825b8457ed5c87875bdc2fd4144d594fb2 100644 (file)
@@ -523,5 +523,5 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
 {
        struct request *req = mmc_queue_req_to_req(mqrq);
 
-       return blk_rq_map_sg(mq->queue, req, mqrq->sg);
+       return blk_rq_map_sg(req, mqrq->sg);
 }
index 2836905f0152a7ace36fd51abbc3055cbfcdf199..39cc0a6a4d3771741741ab7374f834b80d094c84 100644 (file)
@@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req)
         * and ubi_read_sg() will check that limit.
         */
        ubi_sgl_init(&pdu->usgl);
-       blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+       blk_rq_map_sg(req, pdu->usgl.sg);
 
        while (bytes_left) {
                /*
index 1de11b722f049abbc96a6bb62b072ac973b8c4aa..fe2f9b143c9f3438a74efce5d43fefb0af24fe97 100644 (file)
@@ -525,7 +525,7 @@ static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
        if (!iod->sg)
                return BLK_STS_RESOURCE;
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
-       iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+       iod->nents = blk_rq_map_sg(req, iod->sg);
        if (!iod->nents)
                goto out_free_sg;
 
index f4f1866fbd5b8b05730a785c7d256108c9344e62..7de29dae8e74f776c6a28ab4005ab7be5c2bf291 100644 (file)
@@ -2620,7 +2620,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        if (ret)
                return -ENOMEM;
 
-       op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+       op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl);
        WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, rq_dma_dir(rq));
index 9197a5b173fdff7f5abb3a6d4d6d555fc0658504..a65978b6cdd8a73d59271f21b43f693a3e246eec 100644 (file)
@@ -812,7 +812,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
        if (!iod->sgt.sgl)
                return BLK_STS_RESOURCE;
        sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
-       iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+       iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl);
        if (!iod->sgt.orig_nents)
                goto out_free_sg;
 
index 86a2891d9bcc7a990cd214a7fe93fa5c55b292c7..b5a0295b5bf457628812401e5c6d509f776f70d6 100644 (file)
@@ -1476,8 +1476,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
        if (ret)
                return -ENOMEM;
 
-       req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
-                                           req->data_sgl.sg_table.sgl);
+       req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl);
 
        *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
                               req->data_sgl.nents, rq_dma_dir(rq));
index a9d112d34d4f434ff0ba3310435581cb49285cbb..a5c41144667c681e55e09e3c92c523e2d98a6ad7 100644 (file)
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
 
                iod->req.sg = iod->sg_table.sgl;
-               iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+               iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl);
                iod->req.transfer_len = blk_rq_payload_bytes(req);
        }
 
index be0890e4e7062ebb387d3964b509bc4f3be5cb35..02576c98a8339c182b5ded5f028bc897bf8c59c3 100644 (file)
@@ -1149,7 +1149,7 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
         * Next, walk the list, and fill in the addresses and sizes of
         * each segment.
         */
-       count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
+       count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
 
        if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
                unsigned int pad_len =
index 9ebb53f031cdb519c6b60cc2bc0c4b8b3f4d5ff4..d990244233558d500cb660b96c5992bc61debccf 100644 (file)
@@ -1155,14 +1155,13 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
        return max_t(unsigned short, rq->nr_phys_segments, 1);
 }
 
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-               struct scatterlist *sglist, struct scatterlist **last_sg);
-static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-               struct scatterlist *sglist)
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+               struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
 {
        struct scatterlist *last_sg = NULL;
 
-       return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+       return __blk_rq_map_sg(rq, sglist, &last_sg);
 }
 void blk_dump_rq_flags(struct request *, char *);