1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
12 #include <uapi/linux/io_uring.h>
27 #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
28 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
30 static bool io_cancel_cb(struct io_wq_work
*work
, void *data
)
32 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
33 struct io_cancel_data
*cd
= data
;
35 if (req
->ctx
!= cd
->ctx
)
37 if (cd
->flags
& IORING_ASYNC_CANCEL_ANY
) {
39 } else if (cd
->flags
& IORING_ASYNC_CANCEL_FD
) {
40 if (req
->file
!= cd
->file
)
43 if (req
->cqe
.user_data
!= cd
->data
)
46 if (cd
->flags
& (IORING_ASYNC_CANCEL_ALL
|IORING_ASYNC_CANCEL_ANY
)) {
47 if (cd
->seq
== req
->work
.cancel_seq
)
49 req
->work
.cancel_seq
= cd
->seq
;
54 static int io_async_cancel_one(struct io_uring_task
*tctx
,
55 struct io_cancel_data
*cd
)
57 enum io_wq_cancel cancel_ret
;
61 if (!tctx
|| !tctx
->io_wq
)
64 all
= cd
->flags
& (IORING_ASYNC_CANCEL_ALL
|IORING_ASYNC_CANCEL_ANY
);
65 cancel_ret
= io_wq_cancel_cb(tctx
->io_wq
, io_cancel_cb
, cd
, all
);
70 case IO_WQ_CANCEL_RUNNING
:
73 case IO_WQ_CANCEL_NOTFOUND
:
81 int io_try_cancel(struct io_uring_task
*tctx
, struct io_cancel_data
*cd
,
84 struct io_ring_ctx
*ctx
= cd
->ctx
;
87 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx
!= current
->io_uring
);
89 ret
= io_async_cancel_one(tctx
, cd
);
91 * Fall-through even for -EALREADY, as we may have poll armed
97 ret
= io_poll_cancel(ctx
, cd
, issue_flags
);
101 spin_lock(&ctx
->completion_lock
);
102 if (!(cd
->flags
& IORING_ASYNC_CANCEL_FD
))
103 ret
= io_timeout_cancel(ctx
, cd
);
104 spin_unlock(&ctx
->completion_lock
);
108 int io_async_cancel_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
110 struct io_cancel
*cancel
= io_kiocb_to_cmd(req
, struct io_cancel
);
112 if (unlikely(req
->flags
& REQ_F_BUFFER_SELECT
))
114 if (sqe
->off
|| sqe
->len
|| sqe
->splice_fd_in
)
117 cancel
->addr
= READ_ONCE(sqe
->addr
);
118 cancel
->flags
= READ_ONCE(sqe
->cancel_flags
);
119 if (cancel
->flags
& ~CANCEL_FLAGS
)
121 if (cancel
->flags
& IORING_ASYNC_CANCEL_FD
) {
122 if (cancel
->flags
& IORING_ASYNC_CANCEL_ANY
)
124 cancel
->fd
= READ_ONCE(sqe
->fd
);
130 static int __io_async_cancel(struct io_cancel_data
*cd
,
131 struct io_uring_task
*tctx
,
132 unsigned int issue_flags
)
134 bool all
= cd
->flags
& (IORING_ASYNC_CANCEL_ALL
|IORING_ASYNC_CANCEL_ANY
);
135 struct io_ring_ctx
*ctx
= cd
->ctx
;
136 struct io_tctx_node
*node
;
140 ret
= io_try_cancel(tctx
, cd
, issue_flags
);
148 /* slow path, try all io-wq's */
149 io_ring_submit_lock(ctx
, issue_flags
);
151 list_for_each_entry(node
, &ctx
->tctx_list
, ctx_node
) {
152 struct io_uring_task
*tctx
= node
->task
->io_uring
;
154 ret
= io_async_cancel_one(tctx
, cd
);
155 if (ret
!= -ENOENT
) {
161 io_ring_submit_unlock(ctx
, issue_flags
);
162 return all
? nr
: ret
;
165 int io_async_cancel(struct io_kiocb
*req
, unsigned int issue_flags
)
167 struct io_cancel
*cancel
= io_kiocb_to_cmd(req
, struct io_cancel
);
168 struct io_cancel_data cd
= {
170 .data
= cancel
->addr
,
171 .flags
= cancel
->flags
,
172 .seq
= atomic_inc_return(&req
->ctx
->cancel_seq
),
174 struct io_uring_task
*tctx
= req
->task
->io_uring
;
177 if (cd
.flags
& IORING_ASYNC_CANCEL_FD
) {
178 if (req
->flags
& REQ_F_FIXED_FILE
||
179 cd
.flags
& IORING_ASYNC_CANCEL_FD_FIXED
) {
180 req
->flags
|= REQ_F_FIXED_FILE
;
181 req
->file
= io_file_get_fixed(req
, cancel
->fd
,
184 req
->file
= io_file_get_normal(req
, cancel
->fd
);
193 ret
= __io_async_cancel(&cd
, tctx
, issue_flags
);
197 io_req_set_res(req
, ret
, 0);
201 void init_hash_table(struct io_hash_table
*table
, unsigned size
)
205 for (i
= 0; i
< size
; i
++) {
206 spin_lock_init(&table
->hbs
[i
].lock
);
207 INIT_HLIST_HEAD(&table
->hbs
[i
].list
);
211 static int __io_sync_cancel(struct io_uring_task
*tctx
,
212 struct io_cancel_data
*cd
, int fd
)
214 struct io_ring_ctx
*ctx
= cd
->ctx
;
216 /* fixed must be grabbed every time since we drop the uring_lock */
217 if ((cd
->flags
& IORING_ASYNC_CANCEL_FD
) &&
218 (cd
->flags
& IORING_ASYNC_CANCEL_FD_FIXED
)) {
219 if (unlikely(fd
>= ctx
->nr_user_files
))
221 fd
= array_index_nospec(fd
, ctx
->nr_user_files
);
222 cd
->file
= io_file_from_index(&ctx
->file_table
, fd
);
227 return __io_async_cancel(cd
, tctx
, 0);
230 int io_sync_cancel(struct io_ring_ctx
*ctx
, void __user
*arg
)
231 __must_hold(&ctx
->uring_lock
)
233 struct io_cancel_data cd
= {
235 .seq
= atomic_inc_return(&ctx
->cancel_seq
),
237 ktime_t timeout
= KTIME_MAX
;
238 struct io_uring_sync_cancel_reg sc
;
243 if (copy_from_user(&sc
, arg
, sizeof(sc
)))
245 if (sc
.flags
& ~CANCEL_FLAGS
)
247 if (sc
.pad
[0] || sc
.pad
[1] || sc
.pad
[2] || sc
.pad
[3])
253 /* we can grab a normal file descriptor upfront */
254 if ((cd
.flags
& IORING_ASYNC_CANCEL_FD
) &&
255 !(cd
.flags
& IORING_ASYNC_CANCEL_FD_FIXED
)) {
262 ret
= __io_sync_cancel(current
->io_uring
, &cd
, sc
.fd
);
264 /* found something, done! */
265 if (ret
!= -EALREADY
)
268 if (sc
.timeout
.tv_sec
!= -1UL || sc
.timeout
.tv_nsec
!= -1UL) {
269 struct timespec64 ts
= {
270 .tv_sec
= sc
.timeout
.tv_sec
,
271 .tv_nsec
= sc
.timeout
.tv_nsec
274 timeout
= ktime_add_ns(timespec64_to_ktime(ts
), ktime_get_ns());
278 * Keep looking until we get -ENOENT. we'll get woken everytime
279 * every time a request completes and will retry the cancelation.
282 cd
.seq
= atomic_inc_return(&ctx
->cancel_seq
);
284 prepare_to_wait(&ctx
->cq_wait
, &wait
, TASK_INTERRUPTIBLE
);
286 ret
= __io_sync_cancel(current
->io_uring
, &cd
, sc
.fd
);
288 mutex_unlock(&ctx
->uring_lock
);
289 if (ret
!= -EALREADY
)
292 ret
= io_run_task_work_sig(ctx
);
295 ret
= schedule_hrtimeout(&timeout
, HRTIMER_MODE_ABS
);
300 mutex_lock(&ctx
->uring_lock
);
303 finish_wait(&ctx
->cq_wait
, &wait
);
304 mutex_lock(&ctx
->uring_lock
);
306 if (ret
== -ENOENT
|| ret
> 0)