* blk_rq_dma_map_iter_next - map the next DMA segment for a request
* @req: request to map
* @dma_dev: device to map to
- * @state: DMA IOVA state
* @iter: block layer DMA iterator
*
* Iterate to the next mapping after a previous call to
* returned in @iter.status.
*/
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter)
+ struct blk_dma_iter *iter)
{
struct phys_vec vec;
if (iter->len)
return true;
- if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
+ if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
return false;
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
}
nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
iod->total_len += iter->len;
- } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state,
- iter));
+ } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter));
nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
if (unlikely(iter->status))
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter);
+ struct blk_dma_iter *iter);
/**
* blk_rq_dma_map_coalesce - were all segments coalesced?