]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvme-pci: migrate to dma_map_phys instead of map_page
authorLeon Romanovsky <leonro@nvidia.com>
Fri, 14 Nov 2025 09:07:03 +0000 (11:07 +0200)
committerJens Axboe <axboe@kernel.dk>
Fri, 14 Nov 2025 12:09:56 +0000 (05:09 -0700)
After introduction of dma_map_phys(), there is no need to convert
from physical address to struct page in order to map page. So let's
use it directly.

Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-dma.c
drivers/nvme/host/pci.c

index b4f45647296153fe9f682ce81a3640c5a068d4c6..cebfead826eea188ebd2558892927ff81b824322 100644 (file)
@@ -92,8 +92,8 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
 static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
                struct blk_dma_iter *iter, struct phys_vec *vec)
 {
-       iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr),
-                       offset_in_page(vec->paddr), vec->len, rq_dma_dir(req));
+       iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len,
+                       rq_dma_dir(req), 0);
        if (dma_mapping_error(dma_dev, iter->addr)) {
                iter->status = BLK_STS_RESOURCE;
                return false;
index 3c1727df1e36f5020f001a3025bde6cca64b0ce8..d0dd836ccdb96e17673d65de128d083279eacc6c 100644 (file)
@@ -698,20 +698,20 @@ static void nvme_free_descriptors(struct request *req)
        }
 }
 
-static void nvme_free_prps(struct request *req)
+static void nvme_free_prps(struct request *req, unsigned int attrs)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
        unsigned int i;
 
        for (i = 0; i < iod->nr_dma_vecs; i++)
-               dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr,
-                               iod->dma_vecs[i].len, rq_dma_dir(req));
+               dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr,
+                              iod->dma_vecs[i].len, rq_dma_dir(req), attrs);
        mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
 }
 
 static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
-               struct nvme_sgl_desc *sg_list)
+               struct nvme_sgl_desc *sg_list, unsigned int attrs)
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
        enum dma_data_direction dir = rq_dma_dir(req);
@@ -720,13 +720,14 @@ static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
        unsigned int i;
 
        if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
-               dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
+               dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir,
+                              attrs);
                return;
        }
 
        for (i = 0; i < len / sizeof(*sg_list); i++)
-               dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
-                       le32_to_cpu(sg_list[i].length), dir);
+               dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr),
+                       le32_to_cpu(sg_list[i].length), dir, attrs);
 }
 
 static void nvme_unmap_metadata(struct request *req)
@@ -747,10 +748,10 @@ static void nvme_unmap_metadata(struct request *req)
        if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
                                        iod->meta_total_len)) {
                if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
-                       nvme_free_sgls(req, sge, &sge[1]);
+                       nvme_free_sgls(req, sge, &sge[1], 0);
                else
-                       dma_unmap_page(dma_dev, iod->meta_dma,
-                                      iod->meta_total_len, dir);
+                       dma_unmap_phys(dma_dev, iod->meta_dma,
+                                      iod->meta_total_len, dir, 0);
        }
 
        if (iod->meta_descriptor)
@@ -775,9 +776,9 @@ static void nvme_unmap_data(struct request *req)
        if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
                if (nvme_pci_cmd_use_sgl(&iod->cmd))
                        nvme_free_sgls(req, iod->descriptors[0],
-                                      &iod->cmd.common.dptr.sgl);
+                                      &iod->cmd.common.dptr.sgl, 0);
                else
-                       nvme_free_prps(req);
+                       nvme_free_prps(req, 0);
        }
 
        if (iod->nr_descriptors)