}
mem->account_pages = io_count_account_pages(pages, nr_pages);
- ret = io_account_mem(ifq->ctx->user, ifq->ctx->mm_account, mem->account_pages);
+ ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
if (ret < 0)
mem->account_pages = 0;
atomic_inc(io_get_user_counter(niov));
}
-static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq,
+static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
+ struct io_zcrx_ifq *ifq,
struct io_uring_zcrx_ifq_reg *reg,
struct io_uring_region_desc *rd,
u32 id)
mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
mmap_offset += id << IORING_OFF_PBUF_SHIFT;
- ret = io_create_region(ifq->ctx, &ifq->region, rd, mmap_offset);
+ ret = io_create_region(ctx, &ifq->region, rd, mmap_offset);
if (ret < 0)
return ret;
static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
{
- io_free_region(ifq->ctx->user, &ifq->region);
+ io_free_region(ifq->user, &ifq->region);
ifq->rq_ring = NULL;
ifq->rqes = NULL;
}
io_release_area_mem(&area->mem);
if (area->mem.account_pages)
- io_unaccount_mem(area->ifq->ctx->user, area->ifq->ctx->mm_account,
+ io_unaccount_mem(ifq->user, ifq->mm_account,
area->mem.account_pages);
kvfree(area->freelist);
if (ifq->area)
io_zcrx_free_area(ifq, ifq->area);
+ free_uid(ifq->user);
+ if (ifq->mm_account)
+ mmdrop(ifq->mm_account);
if (ifq->dev)
put_device(ifq->dev);
ifq = io_zcrx_ifq_alloc(ctx);
if (!ifq)
return -ENOMEM;
+ if (ctx->user) {
+ get_uid(ctx->user);
+ ifq->user = ctx->user;
+ }
+ if (ctx->mm_account) {
+ mmgrab(ctx->mm_account);
+ ifq->mm_account = ctx->mm_account;
+ }
ifq->rq_entries = reg.rq_entries;
scoped_guard(mutex, &ctx->mmap_lock) {
goto ifq_free;
}
- ret = io_allocate_rbuf_ring(ifq, ®, &rd, id);
+ ret = io_allocate_rbuf_ring(ctx, ifq, ®, &rd, id);
if (ret)
goto err;