nvme_free_descriptors(req);
}
+static bool nvme_pci_prp_save_mapping(struct request *req,
+ struct device *dma_dev,
+ struct blk_dma_iter *iter)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
+ return true;
+
+ if (!iod->nr_dma_vecs) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
+ GFP_ATOMIC);
+ if (!iod->dma_vecs) {
+ iter->status = BLK_STS_RESOURCE;
+ return false;
+ }
+ }
+
+ iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
+ iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
+ iod->nr_dma_vecs++;
+ return true;
+}
+
static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter)
{
return true;
if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
return false;
- if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
- iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
- iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
- iod->nr_dma_vecs++;
- }
- return true;
+ return nvme_pci_prp_save_mapping(req, dma_dev, iter);
}
static blk_status_t nvme_pci_setup_data_prp(struct request *req,
unsigned int prp_len, i;
__le64 *prp_list;
- if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
- iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
- GFP_ATOMIC);
- if (!iod->dma_vecs)
- return BLK_STS_RESOURCE;
- iod->dma_vecs[0].addr = iter->addr;
- iod->dma_vecs[0].len = iter->len;
- iod->nr_dma_vecs = 1;
- }
+ if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
+ return iter->status;
/*
* PRP1 always points to the start of the DMA transfers.
iod->nr_descriptors = 0;
iod->total_len = 0;
iod->meta_total_len = 0;
+ iod->nr_dma_vecs = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)