return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
}
+static int io_area_max_shift(struct io_zcrx_mem *mem)
+{
+ struct sg_table *sgt = mem->sgt;
+ struct scatterlist *sg;
+ unsigned shift = -1U;
+ unsigned i;
+
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ shift = min(shift, __ffs(sg->length));
+ return shift;
+}
+
static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area)
{
}
static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
- struct io_uring_zcrx_area_reg *area_reg)
+ struct io_uring_zcrx_area_reg *area_reg,
+ struct io_uring_zcrx_ifq_reg *reg)
{
+ int buf_size_shift = PAGE_SHIFT;
struct io_zcrx_area *area;
unsigned nr_iovs;
int i, ret;
+ if (reg->rx_buf_len) {
+ if (!is_power_of_2(reg->rx_buf_len) ||
+ reg->rx_buf_len < PAGE_SIZE)
+ return -EINVAL;
+ buf_size_shift = ilog2(reg->rx_buf_len);
+ }
+
ret = -ENOMEM;
area = kzalloc(sizeof(*area), GFP_KERNEL);
if (!area)
if (ret)
goto err;
- ifq->niov_shift = PAGE_SHIFT;
+ if (buf_size_shift > io_area_max_shift(&area->mem)) {
+ ret = -ERANGE;
+ goto err;
+ }
+
+ ifq->niov_shift = buf_size_shift;
nr_iovs = area->mem.size >> ifq->niov_shift;
area->nia.num_niovs = nr_iovs;
return -EINVAL;
if (copy_from_user(®, arg, sizeof(reg)))
return -EFAULT;
- if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) ||
- reg.__resv2 || reg.zcrx_id)
+ if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || reg.zcrx_id)
return -EINVAL;
if (reg.flags & ZCRX_REG_IMPORT)
return import_zcrx(ctx, arg, ®);
}
get_device(ifq->dev);
- ret = io_zcrx_create_area(ifq, &area);
+ ret = io_zcrx_create_area(ifq, &area, ®);
if (ret)
goto netdev_put_unlock;
+ mp_param.rx_page_size = 1U << ifq->niov_shift;
mp_param.mp_ops = &io_uring_pp_zc_ops;
mp_param.mp_priv = ifq;
ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
goto err;
}
+ reg.rx_buf_len = 1U << ifq->niov_shift;
+
if (copy_to_user(arg, ®, sizeof(reg)) ||
copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {