From: Keith Busch Date: Wed, 4 Feb 2026 14:29:11 +0000 (-0800) Subject: nvme-pci: handle changing device dma map requirements X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=071be3b0b6575d45be9df9c5b612f5882bfc5e88;p=thirdparty%2Fkernel%2Flinux.git nvme-pci: handle changing device dma map requirements The initial state of dma_needs_unmap may be false, but change to true while mapping the data iterator. Enabling swiotlb is one such case that can change the result. The nvme driver needs to save the mapped dma vectors to be unmapped later, so allocate as needed during iteration rather than assume it was always allocated at the beginning. This fixes a NULL dereference from accessing an uninitialized dma_vecs when the device dma unmapping requirements change mid-iteration. Fixes: b8b7570a7ec8 ("nvme-pci: fix dma unmapping when using PRPs and not using the IOVA mapping") Link: https://lore.kernel.org/linux-nvme/20260202125738.1194899-1-pradeep.pragallapati@oss.qualcomm.com/ Reported-by: Pradeep P V K Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c2bee32332fe..d86f2565a92c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req) nvme_free_descriptors(req); } +static bool nvme_pci_prp_save_mapping(struct request *req, + struct device *dma_dev, + struct blk_dma_iter *iter) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev)) + return true; + + if (!iod->nr_dma_vecs) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, + GFP_ATOMIC); + if (!iod->dma_vecs) { + iter->status = BLK_STS_RESOURCE; + return false; + } + } + + iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; + iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; + iod->nr_dma_vecs++; + return true; +} + static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, struct blk_dma_iter *iter) { @@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, return true; if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) return false; - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { - iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; - iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; - iod->nr_dma_vecs++; - } - return true; + return nvme_pci_prp_save_mapping(req, dma_dev, iter); } static blk_status_t nvme_pci_setup_data_prp(struct request *req, @@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req, unsigned int prp_len, i; __le64 *prp_list; - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { - iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, - GFP_ATOMIC); - if (!iod->dma_vecs) - return BLK_STS_RESOURCE; - iod->dma_vecs[0].addr = iter->addr; - iod->dma_vecs[0].len = iter->len; - iod->nr_dma_vecs = 1; - } + if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter)) + return iter->status; /* * PRP1 always points to the start of the DMA transfers. @@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req) iod->nr_descriptors = 0; iod->total_len = 0; iod->meta_total_len = 0; + iod->nr_dma_vecs = 0; ret = nvme_setup_cmd(req->q->queuedata, req); if (ret)