1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
10 #include <uapi/linux/io_uring.h>
11 #include <asm/ioctls.h>
15 #include "uring_cmd.h"
17 static void io_uring_cmd_del_cancelable(struct io_uring_cmd
*cmd
,
18 unsigned int issue_flags
)
20 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
21 struct io_ring_ctx
*ctx
= req
->ctx
;
23 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
))
26 cmd
->flags
&= ~IORING_URING_CMD_CANCELABLE
;
27 io_ring_submit_lock(ctx
, issue_flags
);
28 hlist_del(&req
->hash_node
);
29 io_ring_submit_unlock(ctx
, issue_flags
);
33 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
34 * will try to cancel this issued command by sending ->uring_cmd() with
35 * issue_flags of IO_URING_F_CANCEL.
37 * The command is guaranteed to not be done when calling ->uring_cmd()
38 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
39 * with race between io_uring canceling and normal completion.
41 void io_uring_cmd_mark_cancelable(struct io_uring_cmd
*cmd
,
42 unsigned int issue_flags
)
44 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
45 struct io_ring_ctx
*ctx
= req
->ctx
;
47 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
)) {
48 cmd
->flags
|= IORING_URING_CMD_CANCELABLE
;
49 io_ring_submit_lock(ctx
, issue_flags
);
50 hlist_add_head(&req
->hash_node
, &ctx
->cancelable_uring_cmd
);
51 io_ring_submit_unlock(ctx
, issue_flags
);
54 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable
);
56 static void io_uring_cmd_work(struct io_kiocb
*req
, struct io_tw_state
*ts
)
58 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
59 unsigned issue_flags
= ts
->locked
? 0 : IO_URING_F_UNLOCKED
;
61 ioucmd
->task_work_cb(ioucmd
, issue_flags
);
64 void __io_uring_cmd_do_in_task(struct io_uring_cmd
*ioucmd
,
65 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned),
68 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
70 ioucmd
->task_work_cb
= task_work_cb
;
71 req
->io_task_work
.func
= io_uring_cmd_work
;
72 __io_req_task_work_add(req
, flags
);
74 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task
);
76 static inline void io_req_set_cqe32_extra(struct io_kiocb
*req
,
77 u64 extra1
, u64 extra2
)
79 req
->big_cqe
.extra1
= extra1
;
80 req
->big_cqe
.extra2
= extra2
;
84 * Called by consumers of io_uring_cmd, if they originally returned
85 * -EIOCBQUEUED upon receiving the command.
87 void io_uring_cmd_done(struct io_uring_cmd
*ioucmd
, ssize_t ret
, ssize_t res2
,
90 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
92 io_uring_cmd_del_cancelable(ioucmd
, issue_flags
);
97 io_req_set_res(req
, ret
, 0);
98 if (req
->ctx
->flags
& IORING_SETUP_CQE32
)
99 io_req_set_cqe32_extra(req
, res2
, 0);
100 if (req
->ctx
->flags
& IORING_SETUP_IOPOLL
) {
101 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
102 smp_store_release(&req
->iopoll_completed
, 1);
104 struct io_tw_state ts
= {
105 .locked
= !(issue_flags
& IO_URING_F_UNLOCKED
),
107 io_req_task_complete(req
, &ts
);
110 EXPORT_SYMBOL_GPL(io_uring_cmd_done
);
112 int io_uring_cmd_prep_async(struct io_kiocb
*req
)
114 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
116 memcpy(req
->async_data
, ioucmd
->sqe
, uring_sqe_size(req
->ctx
));
117 ioucmd
->sqe
= req
->async_data
;
121 int io_uring_cmd_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
123 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
128 ioucmd
->flags
= READ_ONCE(sqe
->uring_cmd_flags
);
129 if (ioucmd
->flags
& ~IORING_URING_CMD_MASK
)
132 if (ioucmd
->flags
& IORING_URING_CMD_FIXED
) {
133 struct io_ring_ctx
*ctx
= req
->ctx
;
136 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
137 if (unlikely(req
->buf_index
>= ctx
->nr_user_bufs
))
139 index
= array_index_nospec(req
->buf_index
, ctx
->nr_user_bufs
);
140 req
->imu
= ctx
->user_bufs
[index
];
141 io_req_set_rsrc_node(req
, ctx
, 0);
144 ioucmd
->cmd_op
= READ_ONCE(sqe
->cmd_op
);
148 int io_uring_cmd(struct io_kiocb
*req
, unsigned int issue_flags
)
150 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
151 struct io_ring_ctx
*ctx
= req
->ctx
;
152 struct file
*file
= req
->file
;
155 if (!file
->f_op
->uring_cmd
)
158 ret
= security_uring_cmd(ioucmd
);
162 if (ctx
->flags
& IORING_SETUP_SQE128
)
163 issue_flags
|= IO_URING_F_SQE128
;
164 if (ctx
->flags
& IORING_SETUP_CQE32
)
165 issue_flags
|= IO_URING_F_CQE32
;
167 issue_flags
|= IO_URING_F_COMPAT
;
168 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
169 if (!file
->f_op
->uring_cmd_iopoll
)
171 issue_flags
|= IO_URING_F_IOPOLL
;
172 req
->iopoll_completed
= 0;
175 ret
= file
->f_op
->uring_cmd(ioucmd
, issue_flags
);
176 if (ret
== -EAGAIN
) {
177 if (!req_has_async_data(req
)) {
178 if (io_alloc_async_data(req
))
180 io_uring_cmd_prep_async(req
);
185 if (ret
!= -EIOCBQUEUED
) {
188 io_req_set_res(req
, ret
, 0);
192 return IOU_ISSUE_SKIP_COMPLETE
;
195 int io_uring_cmd_import_fixed(u64 ubuf
, unsigned long len
, int rw
,
196 struct iov_iter
*iter
, void *ioucmd
)
198 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
200 return io_import_fixed(rw
, iter
, req
->imu
, ubuf
, len
);
202 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed
);
204 static inline int io_uring_cmd_getsockopt(struct socket
*sock
,
205 struct io_uring_cmd
*cmd
,
206 unsigned int issue_flags
)
208 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
209 int optlen
, optname
, level
, err
;
212 level
= READ_ONCE(cmd
->sqe
->level
);
213 if (level
!= SOL_SOCKET
)
216 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
217 optname
= READ_ONCE(cmd
->sqe
->optname
);
218 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
220 err
= do_sock_getsockopt(sock
, compat
, level
, optname
,
221 USER_SOCKPTR(optval
),
222 KERNEL_SOCKPTR(&optlen
));
226 /* On success, return optlen */
230 static inline int io_uring_cmd_setsockopt(struct socket
*sock
,
231 struct io_uring_cmd
*cmd
,
232 unsigned int issue_flags
)
234 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
235 int optname
, optlen
, level
;
239 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
240 optname
= READ_ONCE(cmd
->sqe
->optname
);
241 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
242 level
= READ_ONCE(cmd
->sqe
->level
);
243 optval_s
= USER_SOCKPTR(optval
);
245 return do_sock_setsockopt(sock
, compat
, level
, optname
, optval_s
,
249 #if defined(CONFIG_NET)
250 int io_uring_cmd_sock(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
252 struct socket
*sock
= cmd
->file
->private_data
;
253 struct sock
*sk
= sock
->sk
;
254 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
257 if (!prot
|| !prot
->ioctl
)
260 switch (cmd
->sqe
->cmd_op
) {
261 case SOCKET_URING_OP_SIOCINQ
:
262 ret
= prot
->ioctl(sk
, SIOCINQ
, &arg
);
266 case SOCKET_URING_OP_SIOCOUTQ
:
267 ret
= prot
->ioctl(sk
, SIOCOUTQ
, &arg
);
271 case SOCKET_URING_OP_GETSOCKOPT
:
272 return io_uring_cmd_getsockopt(sock
, cmd
, issue_flags
);
273 case SOCKET_URING_OP_SETSOCKOPT
:
274 return io_uring_cmd_setsockopt(sock
, cmd
, issue_flags
);
279 EXPORT_SYMBOL_GPL(io_uring_cmd_sock
);