1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
9 #include <uapi/linux/io_uring.h>
10 #include <uapi/asm-generic/ioctls.h>
14 #include "uring_cmd.h"
16 static void io_uring_cmd_del_cancelable(struct io_uring_cmd
*cmd
,
17 unsigned int issue_flags
)
19 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
20 struct io_ring_ctx
*ctx
= req
->ctx
;
22 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
))
25 cmd
->flags
&= ~IORING_URING_CMD_CANCELABLE
;
26 io_ring_submit_lock(ctx
, issue_flags
);
27 hlist_del(&req
->hash_node
);
28 io_ring_submit_unlock(ctx
, issue_flags
);
32 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
33 * will try to cancel this issued command by sending ->uring_cmd() with
34 * issue_flags of IO_URING_F_CANCEL.
36 * The command is guaranteed to not be done when calling ->uring_cmd()
37 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
38 * with race between io_uring canceling and normal completion.
40 void io_uring_cmd_mark_cancelable(struct io_uring_cmd
*cmd
,
41 unsigned int issue_flags
)
43 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
44 struct io_ring_ctx
*ctx
= req
->ctx
;
46 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
)) {
47 cmd
->flags
|= IORING_URING_CMD_CANCELABLE
;
48 io_ring_submit_lock(ctx
, issue_flags
);
49 hlist_add_head(&req
->hash_node
, &ctx
->cancelable_uring_cmd
);
50 io_ring_submit_unlock(ctx
, issue_flags
);
53 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable
);
55 struct task_struct
*io_uring_cmd_get_task(struct io_uring_cmd
*cmd
)
57 return cmd_to_io_kiocb(cmd
)->task
;
59 EXPORT_SYMBOL_GPL(io_uring_cmd_get_task
);
61 static void io_uring_cmd_work(struct io_kiocb
*req
, struct io_tw_state
*ts
)
63 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
64 unsigned issue_flags
= ts
->locked
? 0 : IO_URING_F_UNLOCKED
;
66 ioucmd
->task_work_cb(ioucmd
, issue_flags
);
69 void __io_uring_cmd_do_in_task(struct io_uring_cmd
*ioucmd
,
70 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned),
73 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
75 ioucmd
->task_work_cb
= task_work_cb
;
76 req
->io_task_work
.func
= io_uring_cmd_work
;
77 __io_req_task_work_add(req
, flags
);
79 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task
);
81 void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd
*ioucmd
,
82 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned))
84 __io_uring_cmd_do_in_task(ioucmd
, task_work_cb
, IOU_F_TWQ_LAZY_WAKE
);
86 EXPORT_SYMBOL_GPL(io_uring_cmd_do_in_task_lazy
);
88 static inline void io_req_set_cqe32_extra(struct io_kiocb
*req
,
89 u64 extra1
, u64 extra2
)
91 req
->big_cqe
.extra1
= extra1
;
92 req
->big_cqe
.extra2
= extra2
;
96 * Called by consumers of io_uring_cmd, if they originally returned
97 * -EIOCBQUEUED upon receiving the command.
99 void io_uring_cmd_done(struct io_uring_cmd
*ioucmd
, ssize_t ret
, ssize_t res2
,
100 unsigned issue_flags
)
102 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
104 io_uring_cmd_del_cancelable(ioucmd
, issue_flags
);
109 io_req_set_res(req
, ret
, 0);
110 if (req
->ctx
->flags
& IORING_SETUP_CQE32
)
111 io_req_set_cqe32_extra(req
, res2
, 0);
112 if (req
->ctx
->flags
& IORING_SETUP_IOPOLL
) {
113 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
114 smp_store_release(&req
->iopoll_completed
, 1);
116 struct io_tw_state ts
= {
117 .locked
= !(issue_flags
& IO_URING_F_UNLOCKED
),
119 io_req_task_complete(req
, &ts
);
122 EXPORT_SYMBOL_GPL(io_uring_cmd_done
);
124 int io_uring_cmd_prep_async(struct io_kiocb
*req
)
126 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
128 memcpy(req
->async_data
, ioucmd
->sqe
, uring_sqe_size(req
->ctx
));
129 ioucmd
->sqe
= req
->async_data
;
133 int io_uring_cmd_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
135 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
140 ioucmd
->flags
= READ_ONCE(sqe
->uring_cmd_flags
);
141 if (ioucmd
->flags
& ~IORING_URING_CMD_MASK
)
144 if (ioucmd
->flags
& IORING_URING_CMD_FIXED
) {
145 struct io_ring_ctx
*ctx
= req
->ctx
;
148 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
149 if (unlikely(req
->buf_index
>= ctx
->nr_user_bufs
))
151 index
= array_index_nospec(req
->buf_index
, ctx
->nr_user_bufs
);
152 req
->imu
= ctx
->user_bufs
[index
];
153 io_req_set_rsrc_node(req
, ctx
, 0);
156 ioucmd
->cmd_op
= READ_ONCE(sqe
->cmd_op
);
160 int io_uring_cmd(struct io_kiocb
*req
, unsigned int issue_flags
)
162 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
163 struct io_ring_ctx
*ctx
= req
->ctx
;
164 struct file
*file
= req
->file
;
167 if (!file
->f_op
->uring_cmd
)
170 ret
= security_uring_cmd(ioucmd
);
174 if (ctx
->flags
& IORING_SETUP_SQE128
)
175 issue_flags
|= IO_URING_F_SQE128
;
176 if (ctx
->flags
& IORING_SETUP_CQE32
)
177 issue_flags
|= IO_URING_F_CQE32
;
179 issue_flags
|= IO_URING_F_COMPAT
;
180 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
181 if (!file
->f_op
->uring_cmd_iopoll
)
183 issue_flags
|= IO_URING_F_IOPOLL
;
184 req
->iopoll_completed
= 0;
185 WRITE_ONCE(ioucmd
->cookie
, NULL
);
188 ret
= file
->f_op
->uring_cmd(ioucmd
, issue_flags
);
189 if (ret
== -EAGAIN
) {
190 if (!req_has_async_data(req
)) {
191 if (io_alloc_async_data(req
))
193 io_uring_cmd_prep_async(req
);
198 if (ret
!= -EIOCBQUEUED
) {
201 io_req_set_res(req
, ret
, 0);
205 return IOU_ISSUE_SKIP_COMPLETE
;
208 int io_uring_cmd_import_fixed(u64 ubuf
, unsigned long len
, int rw
,
209 struct iov_iter
*iter
, void *ioucmd
)
211 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
213 return io_import_fixed(rw
, iter
, req
->imu
, ubuf
, len
);
215 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed
);
217 static inline int io_uring_cmd_getsockopt(struct socket
*sock
,
218 struct io_uring_cmd
*cmd
,
219 unsigned int issue_flags
)
221 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
222 int optlen
, optname
, level
, err
;
225 level
= READ_ONCE(cmd
->sqe
->level
);
226 if (level
!= SOL_SOCKET
)
229 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
230 optname
= READ_ONCE(cmd
->sqe
->optname
);
231 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
233 err
= do_sock_getsockopt(sock
, compat
, level
, optname
,
234 USER_SOCKPTR(optval
),
235 KERNEL_SOCKPTR(&optlen
));
239 /* On success, return optlen */
243 static inline int io_uring_cmd_setsockopt(struct socket
*sock
,
244 struct io_uring_cmd
*cmd
,
245 unsigned int issue_flags
)
247 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
248 int optname
, optlen
, level
;
252 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
253 optname
= READ_ONCE(cmd
->sqe
->optname
);
254 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
255 level
= READ_ONCE(cmd
->sqe
->level
);
256 optval_s
= USER_SOCKPTR(optval
);
258 return do_sock_setsockopt(sock
, compat
, level
, optname
, optval_s
,
262 #if defined(CONFIG_NET)
263 int io_uring_cmd_sock(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
265 struct socket
*sock
= cmd
->file
->private_data
;
266 struct sock
*sk
= sock
->sk
;
267 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
270 if (!prot
|| !prot
->ioctl
)
273 switch (cmd
->sqe
->cmd_op
) {
274 case SOCKET_URING_OP_SIOCINQ
:
275 ret
= prot
->ioctl(sk
, SIOCINQ
, &arg
);
279 case SOCKET_URING_OP_SIOCOUTQ
:
280 ret
= prot
->ioctl(sk
, SIOCOUTQ
, &arg
);
284 case SOCKET_URING_OP_GETSOCKOPT
:
285 return io_uring_cmd_getsockopt(sock
, cmd
, issue_flags
);
286 case SOCKET_URING_OP_SETSOCKOPT
:
287 return io_uring_cmd_setsockopt(sock
, cmd
, issue_flags
);
292 EXPORT_SYMBOL_GPL(io_uring_cmd_sock
);