req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
- io_buf_cachep = KMEM_CACHE(io_buffer,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
BUG_ON(!iou_wq);
}
extern struct kmem_cache *req_cachep;
-extern struct kmem_cache *io_buf_cachep;
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{
/* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID (1 << 16)
-struct kmem_cache *io_buf_cachep;
-
struct io_provide_buf {
struct file *file;
__u64 addr;
list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
buf = list_entry(item, struct io_buffer, list);
- kmem_cache_free(io_buf_cachep, buf);
+ kfree(buf);
}
}
spin_unlock(&ctx->completion_lock);
}
- buf = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
return -ENOMEM;
list_add_tail(&buf->list, &ctx->io_buffers_cache);