int dmabuf_fd = area_reg->dmabuf_fd;
int i, ret;
+ if (!ifq->dev)
+ return -EINVAL;
if (off)
return -EINVAL;
- if (WARN_ON_ONCE(!ifq->dev))
- return -EFAULT;
if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
return -EINVAL;
if (ret)
goto out_err;
- ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table,
- DMA_FROM_DEVICE, IO_DMA_ATTR);
- if (ret < 0)
- goto out_err;
- mapped = true;
+ if (ifq->dev) {
+ ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table,
+ DMA_FROM_DEVICE, IO_DMA_ATTR);
+ if (ret < 0)
+ goto out_err;
+ mapped = true;
+ }
mem->account_pages = io_count_account_pages(pages, nr_pages);
ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
ret = io_import_area(ifq, &area->mem, area_reg);
if (ret)
goto err;
- area->is_mapped = true;
+ if (ifq->dev)
+ area->is_mapped = true;
if (buf_size_shift > io_area_max_shift(&area->mem)) {
ret = -ERANGE;
niov->type = NET_IOV_IOURING;
}
- ret = io_populate_area_dma(ifq, area);
- if (ret)
- goto err;
+ if (ifq->dev) {
+ ret = io_populate_area_dma(ifq, area);
+ if (ret)
+ goto err;
+ }
area->free_count = nr_iovs;
/* we're only supporting one area per ifq for now */
return -EFAULT;
if (reg.if_rxq == -1 || !reg.rq_entries)
return -EINVAL;
+ if ((reg.if_rxq || reg.if_idx) && (reg.flags & ZCRX_REG_NODEV))
+ return -EINVAL;
if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
if (!(ctx->flags & IORING_SETUP_CLAMP))
return -EINVAL;
if (ret)
goto err;
- ret = zcrx_register_netdev(ifq, ®, &area);
- if (ret)
- goto err;
+ if (!(reg.flags & ZCRX_REG_NODEV)) {
+ ret = zcrx_register_netdev(ifq, ®, &area);
+ if (ret)
+ goto err;
+ } else {
+ ret = io_zcrx_create_area(ifq, &area, ®);
+ if (ret)
+ goto err;
+ }
reg.zcrx_id = id;