}
static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
- u64 user_data, s32 res, u32 cflags,
- u64 extra1, u64 extra2, gfp_t gfp)
+ struct io_cqe *cqe, u64 extra1,
+ u64 extra2, gfp_t gfp)
{
struct io_overflow_cqe *ocqe;
size_t ocq_size = sizeof(struct io_overflow_cqe);
ocq_size += sizeof(struct io_uring_cqe);
ocqe = kmalloc(ocq_size, gfp | __GFP_ACCOUNT);
- trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
+ trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe);
if (ocqe) {
- ocqe->cqe.user_data = user_data;
- ocqe->cqe.res = res;
- ocqe->cqe.flags = cflags;
+ ocqe->cqe.user_data = cqe->user_data;
+ ocqe->cqe.res = cqe->res;
+ ocqe->cqe.flags = cqe->flags;
if (is_cqe32) {
ocqe->cqe.big_cqe[0] = extra1;
ocqe->cqe.big_cqe[1] = extra2;
return false;
}
+static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
+{
+ return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags };
+}
+
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
{
bool filled;
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
if (unlikely(!filled)) {
struct io_overflow_cqe *ocqe;
+ struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
- ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_ATOMIC);
+ ocqe = io_alloc_ocqe(ctx, &cqe, 0, 0, GFP_ATOMIC);
filled = io_cqring_add_overflow(ctx, ocqe);
}
io_cq_unlock_post(ctx);
if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
struct io_overflow_cqe *ocqe;
+ struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
- ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_KERNEL);
+ ocqe = io_alloc_ocqe(ctx, &cqe, 0, 0, GFP_KERNEL);
spin_lock(&ctx->completion_lock);
io_cqring_add_overflow(ctx, ocqe);
spin_unlock(&ctx->completion_lock);
gfp_t gfp = ctx->lockless_cq ? GFP_KERNEL : GFP_ATOMIC;
struct io_overflow_cqe *ocqe;
- ocqe = io_alloc_ocqe(ctx, req->cqe.user_data, req->cqe.res,
- req->cqe.flags, req->big_cqe.extra1,
+ ocqe = io_alloc_ocqe(ctx, &req->cqe, req->big_cqe.extra1,
req->big_cqe.extra2, gfp);
if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);