if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
+ io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
* Note: the caller should never hard code @issue_flags and is only allowed
* to pass the mask provided by the core io_uring code.
*/
-void io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
- unsigned issue_flags);
+void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32);
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
io_uring_cmd_tw_t task_work_cb,
{
return -EOPNOTSUPP;
}
-static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
- u64 ret2, unsigned issue_flags)
+static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
+ u64 ret2, unsigned issue_flags, bool is_cqe32)
{
}
static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
return cmd_to_io_kiocb(cmd)->ctx;
}
+static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
+ u64 res2, unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, false);
+}
+
+static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
+ u64 res2, unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
+}
+
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
void (*release)(void *), unsigned int index,
unsigned int issue_flags);
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
*/
-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
- unsigned issue_flags)
+void __io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
req_set_fail(req);
io_req_set_res(req, ret, 0);
- if (req->ctx->flags & IORING_SETUP_CQE32)
+ if (is_cqe32) {
+ if (req->ctx->flags & IORING_SETUP_CQE_MIXED)
+ req->cqe.flags |= IORING_CQE_F_32;
io_req_set_cqe32_extra(req, res2, 0);
+ }
io_req_uring_cleanup(req, issue_flags);
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
io_req_task_work_add(req);
}
}
-EXPORT_SYMBOL_GPL(io_uring_cmd_done);
+EXPORT_SYMBOL_GPL(__io_uring_cmd_done);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{