1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
9 #include <uapi/linux/io_uring.h>
10 #include <uapi/asm-generic/ioctls.h>
14 #include "uring_cmd.h"
16 static void io_uring_cmd_work(struct io_kiocb
*req
, struct io_tw_state
*ts
)
18 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
19 unsigned issue_flags
= ts
->locked
? 0 : IO_URING_F_UNLOCKED
;
21 ioucmd
->task_work_cb(ioucmd
, issue_flags
);
24 void __io_uring_cmd_do_in_task(struct io_uring_cmd
*ioucmd
,
25 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned),
28 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
30 ioucmd
->task_work_cb
= task_work_cb
;
31 req
->io_task_work
.func
= io_uring_cmd_work
;
32 __io_req_task_work_add(req
, flags
);
34 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task
);
36 void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd
*ioucmd
,
37 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned))
39 __io_uring_cmd_do_in_task(ioucmd
, task_work_cb
, IOU_F_TWQ_LAZY_WAKE
);
41 EXPORT_SYMBOL_GPL(io_uring_cmd_do_in_task_lazy
);
43 static inline void io_req_set_cqe32_extra(struct io_kiocb
*req
,
44 u64 extra1
, u64 extra2
)
46 req
->big_cqe
.extra1
= extra1
;
47 req
->big_cqe
.extra2
= extra2
;
51 * Called by consumers of io_uring_cmd, if they originally returned
52 * -EIOCBQUEUED upon receiving the command.
54 void io_uring_cmd_done(struct io_uring_cmd
*ioucmd
, ssize_t ret
, ssize_t res2
,
57 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
62 io_req_set_res(req
, ret
, 0);
63 if (req
->ctx
->flags
& IORING_SETUP_CQE32
)
64 io_req_set_cqe32_extra(req
, res2
, 0);
65 if (req
->ctx
->flags
& IORING_SETUP_IOPOLL
) {
66 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
67 smp_store_release(&req
->iopoll_completed
, 1);
69 struct io_tw_state ts
= {
70 .locked
= !(issue_flags
& IO_URING_F_UNLOCKED
),
72 io_req_task_complete(req
, &ts
);
75 EXPORT_SYMBOL_GPL(io_uring_cmd_done
);
77 int io_uring_cmd_prep_async(struct io_kiocb
*req
)
79 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
81 memcpy(req
->async_data
, ioucmd
->sqe
, uring_sqe_size(req
->ctx
));
82 ioucmd
->sqe
= req
->async_data
;
86 int io_uring_cmd_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
88 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
93 ioucmd
->flags
= READ_ONCE(sqe
->uring_cmd_flags
);
94 if (ioucmd
->flags
& ~IORING_URING_CMD_FIXED
)
97 if (ioucmd
->flags
& IORING_URING_CMD_FIXED
) {
98 struct io_ring_ctx
*ctx
= req
->ctx
;
101 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
102 if (unlikely(req
->buf_index
>= ctx
->nr_user_bufs
))
104 index
= array_index_nospec(req
->buf_index
, ctx
->nr_user_bufs
);
105 req
->imu
= ctx
->user_bufs
[index
];
106 io_req_set_rsrc_node(req
, ctx
, 0);
109 ioucmd
->cmd_op
= READ_ONCE(sqe
->cmd_op
);
113 int io_uring_cmd(struct io_kiocb
*req
, unsigned int issue_flags
)
115 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
116 struct io_ring_ctx
*ctx
= req
->ctx
;
117 struct file
*file
= req
->file
;
120 if (!file
->f_op
->uring_cmd
)
123 ret
= security_uring_cmd(ioucmd
);
127 if (ctx
->flags
& IORING_SETUP_SQE128
)
128 issue_flags
|= IO_URING_F_SQE128
;
129 if (ctx
->flags
& IORING_SETUP_CQE32
)
130 issue_flags
|= IO_URING_F_CQE32
;
131 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
132 if (!file
->f_op
->uring_cmd_iopoll
)
134 issue_flags
|= IO_URING_F_IOPOLL
;
135 req
->iopoll_completed
= 0;
136 WRITE_ONCE(ioucmd
->cookie
, NULL
);
139 ret
= file
->f_op
->uring_cmd(ioucmd
, issue_flags
);
140 if (ret
== -EAGAIN
) {
141 if (!req_has_async_data(req
)) {
142 if (io_alloc_async_data(req
))
144 io_uring_cmd_prep_async(req
);
149 if (ret
!= -EIOCBQUEUED
) {
152 io_req_set_res(req
, ret
, 0);
156 return IOU_ISSUE_SKIP_COMPLETE
;
159 int io_uring_cmd_import_fixed(u64 ubuf
, unsigned long len
, int rw
,
160 struct iov_iter
*iter
, void *ioucmd
)
162 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
164 return io_import_fixed(rw
, iter
, req
->imu
, ubuf
, len
);
166 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed
);
168 int io_uring_cmd_sock(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
170 struct socket
*sock
= cmd
->file
->private_data
;
171 struct sock
*sk
= sock
->sk
;
172 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
175 if (!prot
|| !prot
->ioctl
)
178 switch (cmd
->sqe
->cmd_op
) {
179 case SOCKET_URING_OP_SIOCINQ
:
180 ret
= prot
->ioctl(sk
, SIOCINQ
, &arg
);
184 case SOCKET_URING_OP_SIOCOUTQ
:
185 ret
= prot
->ioctl(sk
, SIOCOUTQ
, &arg
);
193 EXPORT_SYMBOL_GPL(io_uring_cmd_sock
);