{
struct page **pages;
int nr_pages, ret;
+ bool mapped = false;
if (area_reg->dmabuf_fd)
return -EINVAL;
if (ret)
goto out_err;
+ ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table,
+ DMA_FROM_DEVICE, IO_DMA_ATTR);
+ if (ret < 0)
+ goto out_err;
+ mapped = true;
+
mem->account_pages = io_count_account_pages(pages, nr_pages);
ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
if (ret < 0) {
mem->size = area_reg->len;
return ret;
out_err:
+ if (mapped)
+ dma_unmap_sgtable(ifq->dev, &mem->page_sg_table,
+ DMA_FROM_DEVICE, IO_DMA_ATTR);
sg_free_table(&mem->page_sg_table);
unpin_user_pages(pages, nr_pages);
kvfree(pages);
}
}
-static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
-{
- int ret;
-
- guard(mutex)(&ifq->pp_lock);
- if (area->is_mapped)
- return 0;
-
- if (!area->mem.is_dmabuf) {
- ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
- DMA_FROM_DEVICE, IO_DMA_ATTR);
- if (ret < 0)
- return ret;
- }
-
- ret = io_populate_area_dma(ifq, area);
- if (ret && !area->mem.is_dmabuf)
- dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
- DMA_FROM_DEVICE, IO_DMA_ATTR);
- if (ret == 0)
- area->is_mapped = true;
- return ret;
-}
-
static void io_zcrx_sync_for_device(struct page_pool *pool,
struct net_iov *niov)
{
ret = io_import_area(ifq, &area->mem, area_reg);
if (ret)
goto err;
+ area->is_mapped = true;
if (buf_size_shift > io_area_max_shift(&area->mem)) {
ret = -ERANGE;
niov->type = NET_IOV_IOURING;
}
+ ret = io_populate_area_dma(ifq, area);
+ if (ret)
+ goto err;
+
area->free_count = nr_iovs;
/* we're only supporting one area per ifq for now */
area->area_id = 0;
static int io_pp_zc_init(struct page_pool *pp)
{
struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
- int ret;
if (WARN_ON_ONCE(!ifq))
return -EINVAL;
if (pp->p.dma_dir != DMA_FROM_DEVICE)
return -EOPNOTSUPP;
- ret = io_zcrx_map_area(ifq, ifq->area);
- if (ret)
- return ret;
-
refcount_inc(&ifq->refs);
return 0;
}