]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nvme-pci: handle changing device dma map requirements
authorKeith Busch <kbusch@kernel.org>
Wed, 4 Feb 2026 14:29:11 +0000 (06:29 -0800)
committerKeith Busch <kbusch@kernel.org>
Thu, 5 Feb 2026 15:29:10 +0000 (07:29 -0800)
The initial state of dma_needs_unmap may be false, but change to true
while mapping the data iterator. Enabling swiotlb is one such case that
can change the result. The nvme driver needs to save the mapped dma
vectors to be unmapped later, so allocate as needed during iteration
rather than assume it was always allocated at the beginning. This fixes
a NULL dereference from accessing an uninitialized dma_vecs when the
device dma unmapping requirements change mid-iteration.

Fixes: b8b7570a7ec8 ("nvme-pci: fix dma unmapping when using PRPs and not using the IOVA mapping")
Link: https://lore.kernel.org/linux-nvme/20260202125738.1194899-1-pradeep.pragallapati@oss.qualcomm.com/
Reported-by: Pradeep P V K <pradeep.pragallapati@oss.qualcomm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/host/pci.c

index c2bee32332fee39b239828342edb552e4c5a798b..d86f2565a92caca6f93c9b6a3e9342580c7414af 100644 (file)
@@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req)
                nvme_free_descriptors(req);
 }
 
+static bool nvme_pci_prp_save_mapping(struct request *req,
+                                     struct device *dma_dev,
+                                     struct blk_dma_iter *iter)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+       if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
+               return true;
+
+       if (!iod->nr_dma_vecs) {
+               struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+               iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
+                               GFP_ATOMIC);
+               if (!iod->dma_vecs) {
+                       iter->status = BLK_STS_RESOURCE;
+                       return false;
+               }
+       }
+
+       iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
+       iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
+       iod->nr_dma_vecs++;
+       return true;
+}
+
 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
                struct blk_dma_iter *iter)
 {
@@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
                return true;
        if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
                return false;
-       if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
-               iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
-               iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
-               iod->nr_dma_vecs++;
-       }
-       return true;
+       return nvme_pci_prp_save_mapping(req, dma_dev, iter);
 }
 
 static blk_status_t nvme_pci_setup_data_prp(struct request *req,
@@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req,
        unsigned int prp_len, i;
        __le64 *prp_list;
 
-       if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
-               iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
-                               GFP_ATOMIC);
-               if (!iod->dma_vecs)
-                       return BLK_STS_RESOURCE;
-               iod->dma_vecs[0].addr = iter->addr;
-               iod->dma_vecs[0].len = iter->len;
-               iod->nr_dma_vecs = 1;
-       }
+       if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
+               return iter->status;
 
        /*
         * PRP1 always points to the start of the DMA transfers.
@@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
        iod->nr_descriptors = 0;
        iod->total_len = 0;
        iod->meta_total_len = 0;
+       iod->nr_dma_vecs = 0;
 
        ret = nvme_setup_cmd(req->q->queuedata, req);
        if (ret)