]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
block, nvme: remove unused dma_iova_state function parameter
authorNitesh Shetty <nj.shetty@samsung.com>
Mon, 12 Jan 2026 14:38:08 +0000 (20:08 +0530)
committerJens Axboe <axboe@kernel.dk>
Tue, 13 Jan 2026 14:23:39 +0000 (07:23 -0700)
DMA IOVA state is not used inside blk_rq_dma_map_iter_next, get
rid of the argument.

Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-dma.c
drivers/nvme/host/pci.c
include/linux/blk-mq-dma.h

index fb018fffffdcc5f411f1f287d718f0b114d179a7..4afeda45df15ba8b4191f6fb14081ca9aa3fb925 100644 (file)
@@ -238,7 +238,6 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
  * blk_rq_dma_map_iter_next - map the next DMA segment for a request
  * @req:       request to map
  * @dma_dev:   device to map to
- * @state:     DMA IOVA state
  * @iter:      block layer DMA iterator
  *
  * Iterate to the next mapping after a previous call to
@@ -253,7 +252,7 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
  * returned in @iter.status.
  */
 bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
-               struct dma_iova_state *state, struct blk_dma_iter *iter)
+               struct blk_dma_iter *iter)
 {
        struct phys_vec vec;
 
index 0e4caeab739c7489292987e8023b86c5c153dff9..9fc4a60280a072c42b61485e9ec29ce1a4a4be46 100644 (file)
@@ -823,7 +823,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
 
        if (iter->len)
                return true;
-       if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
+       if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
                return false;
        if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
                iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
@@ -1010,8 +1010,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
                }
                nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
                iod->total_len += iter->len;
-       } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state,
-                               iter));
+       } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter));
 
        nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
        if (unlikely(iter->status))
index cb88fc791fbd1f5f1d472d2b9f4fba2b3df0c330..214c181ff2c9c5ce6d1b5e5e19213d560f405149 100644 (file)
@@ -28,7 +28,7 @@ struct blk_dma_iter {
 bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
                struct dma_iova_state *state, struct blk_dma_iter *iter);
 bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
-               struct dma_iova_state *state, struct blk_dma_iter *iter);
+               struct blk_dma_iter *iter);
 
 /**
  * blk_rq_dma_map_coalesce - were all segments coalesced?