1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
7 #include <trace/events/io_uring.h>
9 #include <uapi/linux/io_uring.h>
21 struct list_head list
;
22 /* head of the link, used by linked timeouts only */
23 struct io_kiocb
*head
;
24 /* for linked completions */
25 struct io_kiocb
*prev
;
28 struct io_timeout_rem
{
38 static inline bool io_is_timeout_noseq(struct io_kiocb
*req
)
40 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
41 struct io_timeout_data
*data
= req
->async_data
;
43 return !timeout
->off
|| data
->flags
& IORING_TIMEOUT_MULTISHOT
;
46 static inline void io_put_req(struct io_kiocb
*req
)
48 if (req_ref_put_and_test(req
)) {
54 static inline bool io_timeout_finish(struct io_timeout
*timeout
,
55 struct io_timeout_data
*data
)
57 if (!(data
->flags
& IORING_TIMEOUT_MULTISHOT
))
60 if (!timeout
->off
|| (timeout
->repeats
&& --timeout
->repeats
))
66 static enum hrtimer_restart
io_timeout_fn(struct hrtimer
*timer
);
68 static void io_timeout_complete(struct io_kiocb
*req
, struct io_tw_state
*ts
)
70 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
71 struct io_timeout_data
*data
= req
->async_data
;
72 struct io_ring_ctx
*ctx
= req
->ctx
;
74 if (!io_timeout_finish(timeout
, data
)) {
76 filled
= io_aux_cqe(req
, ts
->locked
, -ETIME
, IORING_CQE_F_MORE
,
80 spin_lock_irq(&ctx
->timeout_lock
);
81 list_add(&timeout
->list
, ctx
->timeout_list
.prev
);
82 data
->timer
.function
= io_timeout_fn
;
83 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
), data
->mode
);
84 spin_unlock_irq(&ctx
->timeout_lock
);
89 io_req_task_complete(req
, ts
);
92 static bool io_kill_timeout(struct io_kiocb
*req
, int status
)
93 __must_hold(&req
->ctx
->timeout_lock
)
95 struct io_timeout_data
*io
= req
->async_data
;
97 if (hrtimer_try_to_cancel(&io
->timer
) != -1) {
98 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
102 atomic_set(&req
->ctx
->cq_timeouts
,
103 atomic_read(&req
->ctx
->cq_timeouts
) + 1);
104 list_del_init(&timeout
->list
);
105 io_req_queue_tw_complete(req
, status
);
111 __cold
void io_flush_timeouts(struct io_ring_ctx
*ctx
)
114 struct io_timeout
*timeout
, *tmp
;
116 spin_lock_irq(&ctx
->timeout_lock
);
117 seq
= ctx
->cached_cq_tail
- atomic_read(&ctx
->cq_timeouts
);
119 list_for_each_entry_safe(timeout
, tmp
, &ctx
->timeout_list
, list
) {
120 struct io_kiocb
*req
= cmd_to_io_kiocb(timeout
);
121 u32 events_needed
, events_got
;
123 if (io_is_timeout_noseq(req
))
127 * Since seq can easily wrap around over time, subtract
128 * the last seq at which timeouts were flushed before comparing.
129 * Assuming not more than 2^31-1 events have happened since,
130 * these subtractions won't have wrapped, so we can check if
131 * target is in [last_seq, current_seq] by comparing the two.
133 events_needed
= timeout
->target_seq
- ctx
->cq_last_tm_flush
;
134 events_got
= seq
- ctx
->cq_last_tm_flush
;
135 if (events_got
< events_needed
)
138 io_kill_timeout(req
, 0);
140 ctx
->cq_last_tm_flush
= seq
;
141 spin_unlock_irq(&ctx
->timeout_lock
);
144 static void io_req_tw_fail_links(struct io_kiocb
*link
, struct io_tw_state
*ts
)
146 io_tw_lock(link
->ctx
, ts
);
148 struct io_kiocb
*nxt
= link
->link
;
149 long res
= -ECANCELED
;
151 if (link
->flags
& REQ_F_FAIL
)
154 io_req_set_res(link
, res
, 0);
155 io_req_task_complete(link
, ts
);
160 static void io_fail_links(struct io_kiocb
*req
)
161 __must_hold(&req
->ctx
->completion_lock
)
163 struct io_kiocb
*link
= req
->link
;
164 bool ignore_cqes
= req
->flags
& REQ_F_SKIP_LINK_CQES
;
171 link
->flags
|= REQ_F_CQE_SKIP
;
173 link
->flags
&= ~REQ_F_CQE_SKIP
;
174 trace_io_uring_fail_link(req
, link
);
179 link
->io_task_work
.func
= io_req_tw_fail_links
;
180 io_req_task_work_add(link
);
184 static inline void io_remove_next_linked(struct io_kiocb
*req
)
186 struct io_kiocb
*nxt
= req
->link
;
188 req
->link
= nxt
->link
;
192 void io_disarm_next(struct io_kiocb
*req
)
193 __must_hold(&req
->ctx
->completion_lock
)
195 struct io_kiocb
*link
= NULL
;
197 if (req
->flags
& REQ_F_ARM_LTIMEOUT
) {
199 req
->flags
&= ~REQ_F_ARM_LTIMEOUT
;
200 if (link
&& link
->opcode
== IORING_OP_LINK_TIMEOUT
) {
201 io_remove_next_linked(req
);
202 io_req_queue_tw_complete(link
, -ECANCELED
);
204 } else if (req
->flags
& REQ_F_LINK_TIMEOUT
) {
205 struct io_ring_ctx
*ctx
= req
->ctx
;
207 spin_lock_irq(&ctx
->timeout_lock
);
208 link
= io_disarm_linked_timeout(req
);
209 spin_unlock_irq(&ctx
->timeout_lock
);
211 io_req_queue_tw_complete(link
, -ECANCELED
);
213 if (unlikely((req
->flags
& REQ_F_FAIL
) &&
214 !(req
->flags
& REQ_F_HARDLINK
)))
218 struct io_kiocb
*__io_disarm_linked_timeout(struct io_kiocb
*req
,
219 struct io_kiocb
*link
)
220 __must_hold(&req
->ctx
->completion_lock
)
221 __must_hold(&req
->ctx
->timeout_lock
)
223 struct io_timeout_data
*io
= link
->async_data
;
224 struct io_timeout
*timeout
= io_kiocb_to_cmd(link
, struct io_timeout
);
226 io_remove_next_linked(req
);
227 timeout
->head
= NULL
;
228 if (hrtimer_try_to_cancel(&io
->timer
) != -1) {
229 list_del(&timeout
->list
);
236 static enum hrtimer_restart
io_timeout_fn(struct hrtimer
*timer
)
238 struct io_timeout_data
*data
= container_of(timer
,
239 struct io_timeout_data
, timer
);
240 struct io_kiocb
*req
= data
->req
;
241 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
242 struct io_ring_ctx
*ctx
= req
->ctx
;
245 spin_lock_irqsave(&ctx
->timeout_lock
, flags
);
246 list_del_init(&timeout
->list
);
247 atomic_set(&req
->ctx
->cq_timeouts
,
248 atomic_read(&req
->ctx
->cq_timeouts
) + 1);
249 spin_unlock_irqrestore(&ctx
->timeout_lock
, flags
);
251 if (!(data
->flags
& IORING_TIMEOUT_ETIME_SUCCESS
))
254 io_req_set_res(req
, -ETIME
, 0);
255 req
->io_task_work
.func
= io_timeout_complete
;
256 io_req_task_work_add(req
);
257 return HRTIMER_NORESTART
;
260 static struct io_kiocb
*io_timeout_extract(struct io_ring_ctx
*ctx
,
261 struct io_cancel_data
*cd
)
262 __must_hold(&ctx
->timeout_lock
)
264 struct io_timeout
*timeout
;
265 struct io_timeout_data
*io
;
266 struct io_kiocb
*req
= NULL
;
268 list_for_each_entry(timeout
, &ctx
->timeout_list
, list
) {
269 struct io_kiocb
*tmp
= cmd_to_io_kiocb(timeout
);
271 if (!(cd
->flags
& IORING_ASYNC_CANCEL_ANY
) &&
272 cd
->data
!= tmp
->cqe
.user_data
)
274 if (cd
->flags
& (IORING_ASYNC_CANCEL_ALL
|IORING_ASYNC_CANCEL_ANY
)) {
275 if (cd
->seq
== tmp
->work
.cancel_seq
)
277 tmp
->work
.cancel_seq
= cd
->seq
;
283 return ERR_PTR(-ENOENT
);
285 io
= req
->async_data
;
286 if (hrtimer_try_to_cancel(&io
->timer
) == -1)
287 return ERR_PTR(-EALREADY
);
288 timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
289 list_del_init(&timeout
->list
);
293 int io_timeout_cancel(struct io_ring_ctx
*ctx
, struct io_cancel_data
*cd
)
294 __must_hold(&ctx
->completion_lock
)
296 struct io_kiocb
*req
;
298 spin_lock_irq(&ctx
->timeout_lock
);
299 req
= io_timeout_extract(ctx
, cd
);
300 spin_unlock_irq(&ctx
->timeout_lock
);
304 io_req_task_queue_fail(req
, -ECANCELED
);
308 static void io_req_task_link_timeout(struct io_kiocb
*req
, struct io_tw_state
*ts
)
310 unsigned issue_flags
= ts
->locked
? 0 : IO_URING_F_UNLOCKED
;
311 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
312 struct io_kiocb
*prev
= timeout
->prev
;
316 if (!(req
->task
->flags
& PF_EXITING
)) {
317 struct io_cancel_data cd
= {
319 .data
= prev
->cqe
.user_data
,
322 ret
= io_try_cancel(req
->task
->io_uring
, &cd
, issue_flags
);
324 io_req_set_res(req
, ret
?: -ETIME
, 0);
325 io_req_task_complete(req
, ts
);
328 io_req_set_res(req
, -ETIME
, 0);
329 io_req_task_complete(req
, ts
);
333 static enum hrtimer_restart
io_link_timeout_fn(struct hrtimer
*timer
)
335 struct io_timeout_data
*data
= container_of(timer
,
336 struct io_timeout_data
, timer
);
337 struct io_kiocb
*prev
, *req
= data
->req
;
338 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
339 struct io_ring_ctx
*ctx
= req
->ctx
;
342 spin_lock_irqsave(&ctx
->timeout_lock
, flags
);
343 prev
= timeout
->head
;
344 timeout
->head
= NULL
;
347 * We don't expect the list to be empty, that will only happen if we
348 * race with the completion of the linked work.
351 io_remove_next_linked(prev
);
352 if (!req_ref_inc_not_zero(prev
))
355 list_del(&timeout
->list
);
356 timeout
->prev
= prev
;
357 spin_unlock_irqrestore(&ctx
->timeout_lock
, flags
);
359 req
->io_task_work
.func
= io_req_task_link_timeout
;
360 io_req_task_work_add(req
);
361 return HRTIMER_NORESTART
;
364 static clockid_t
io_timeout_get_clock(struct io_timeout_data
*data
)
366 switch (data
->flags
& IORING_TIMEOUT_CLOCK_MASK
) {
367 case IORING_TIMEOUT_BOOTTIME
:
368 return CLOCK_BOOTTIME
;
369 case IORING_TIMEOUT_REALTIME
:
370 return CLOCK_REALTIME
;
372 /* can't happen, vetted at prep time */
376 return CLOCK_MONOTONIC
;
380 static int io_linked_timeout_update(struct io_ring_ctx
*ctx
, __u64 user_data
,
381 struct timespec64
*ts
, enum hrtimer_mode mode
)
382 __must_hold(&ctx
->timeout_lock
)
384 struct io_timeout_data
*io
;
385 struct io_timeout
*timeout
;
386 struct io_kiocb
*req
= NULL
;
388 list_for_each_entry(timeout
, &ctx
->ltimeout_list
, list
) {
389 struct io_kiocb
*tmp
= cmd_to_io_kiocb(timeout
);
391 if (user_data
== tmp
->cqe
.user_data
) {
399 io
= req
->async_data
;
400 if (hrtimer_try_to_cancel(&io
->timer
) == -1)
402 hrtimer_init(&io
->timer
, io_timeout_get_clock(io
), mode
);
403 io
->timer
.function
= io_link_timeout_fn
;
404 hrtimer_start(&io
->timer
, timespec64_to_ktime(*ts
), mode
);
408 static int io_timeout_update(struct io_ring_ctx
*ctx
, __u64 user_data
,
409 struct timespec64
*ts
, enum hrtimer_mode mode
)
410 __must_hold(&ctx
->timeout_lock
)
412 struct io_cancel_data cd
= { .data
= user_data
, };
413 struct io_kiocb
*req
= io_timeout_extract(ctx
, &cd
);
414 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
415 struct io_timeout_data
*data
;
420 timeout
->off
= 0; /* noseq */
421 data
= req
->async_data
;
422 list_add_tail(&timeout
->list
, &ctx
->timeout_list
);
423 hrtimer_init(&data
->timer
, io_timeout_get_clock(data
), mode
);
424 data
->timer
.function
= io_timeout_fn
;
425 hrtimer_start(&data
->timer
, timespec64_to_ktime(*ts
), mode
);
429 int io_timeout_remove_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
431 struct io_timeout_rem
*tr
= io_kiocb_to_cmd(req
, struct io_timeout_rem
);
433 if (unlikely(req
->flags
& (REQ_F_FIXED_FILE
| REQ_F_BUFFER_SELECT
)))
435 if (sqe
->buf_index
|| sqe
->len
|| sqe
->splice_fd_in
)
438 tr
->ltimeout
= false;
439 tr
->addr
= READ_ONCE(sqe
->addr
);
440 tr
->flags
= READ_ONCE(sqe
->timeout_flags
);
441 if (tr
->flags
& IORING_TIMEOUT_UPDATE_MASK
) {
442 if (hweight32(tr
->flags
& IORING_TIMEOUT_CLOCK_MASK
) > 1)
444 if (tr
->flags
& IORING_LINK_TIMEOUT_UPDATE
)
446 if (tr
->flags
& ~(IORING_TIMEOUT_UPDATE_MASK
|IORING_TIMEOUT_ABS
))
448 if (get_timespec64(&tr
->ts
, u64_to_user_ptr(sqe
->addr2
)))
450 if (tr
->ts
.tv_sec
< 0 || tr
->ts
.tv_nsec
< 0)
452 } else if (tr
->flags
) {
453 /* timeout removal doesn't support flags */
460 static inline enum hrtimer_mode
io_translate_timeout_mode(unsigned int flags
)
462 return (flags
& IORING_TIMEOUT_ABS
) ? HRTIMER_MODE_ABS
467 * Remove or update an existing timeout command
469 int io_timeout_remove(struct io_kiocb
*req
, unsigned int issue_flags
)
471 struct io_timeout_rem
*tr
= io_kiocb_to_cmd(req
, struct io_timeout_rem
);
472 struct io_ring_ctx
*ctx
= req
->ctx
;
475 if (!(tr
->flags
& IORING_TIMEOUT_UPDATE
)) {
476 struct io_cancel_data cd
= { .data
= tr
->addr
, };
478 spin_lock(&ctx
->completion_lock
);
479 ret
= io_timeout_cancel(ctx
, &cd
);
480 spin_unlock(&ctx
->completion_lock
);
482 enum hrtimer_mode mode
= io_translate_timeout_mode(tr
->flags
);
484 spin_lock_irq(&ctx
->timeout_lock
);
486 ret
= io_linked_timeout_update(ctx
, tr
->addr
, &tr
->ts
, mode
);
488 ret
= io_timeout_update(ctx
, tr
->addr
, &tr
->ts
, mode
);
489 spin_unlock_irq(&ctx
->timeout_lock
);
494 io_req_set_res(req
, ret
, 0);
498 static int __io_timeout_prep(struct io_kiocb
*req
,
499 const struct io_uring_sqe
*sqe
,
500 bool is_timeout_link
)
502 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
503 struct io_timeout_data
*data
;
505 u32 off
= READ_ONCE(sqe
->off
);
507 if (sqe
->buf_index
|| sqe
->len
!= 1 || sqe
->splice_fd_in
)
509 if (off
&& is_timeout_link
)
511 flags
= READ_ONCE(sqe
->timeout_flags
);
512 if (flags
& ~(IORING_TIMEOUT_ABS
| IORING_TIMEOUT_CLOCK_MASK
|
513 IORING_TIMEOUT_ETIME_SUCCESS
|
514 IORING_TIMEOUT_MULTISHOT
))
516 /* more than one clock specified is invalid, obviously */
517 if (hweight32(flags
& IORING_TIMEOUT_CLOCK_MASK
) > 1)
519 /* multishot requests only make sense with rel values */
520 if (!(~flags
& (IORING_TIMEOUT_MULTISHOT
| IORING_TIMEOUT_ABS
)))
523 INIT_LIST_HEAD(&timeout
->list
);
525 if (unlikely(off
&& !req
->ctx
->off_timeout_used
))
526 req
->ctx
->off_timeout_used
= true;
528 * for multishot reqs w/ fixed nr of repeats, repeats tracks the
531 timeout
->repeats
= 0;
532 if ((flags
& IORING_TIMEOUT_MULTISHOT
) && off
> 0)
533 timeout
->repeats
= off
;
535 if (WARN_ON_ONCE(req_has_async_data(req
)))
537 if (io_alloc_async_data(req
))
540 data
= req
->async_data
;
544 if (get_timespec64(&data
->ts
, u64_to_user_ptr(sqe
->addr
)))
547 if (data
->ts
.tv_sec
< 0 || data
->ts
.tv_nsec
< 0)
550 INIT_LIST_HEAD(&timeout
->list
);
551 data
->mode
= io_translate_timeout_mode(flags
);
552 hrtimer_init(&data
->timer
, io_timeout_get_clock(data
), data
->mode
);
554 if (is_timeout_link
) {
555 struct io_submit_link
*link
= &req
->ctx
->submit_state
.link
;
559 if (link
->last
->opcode
== IORING_OP_LINK_TIMEOUT
)
561 timeout
->head
= link
->last
;
562 link
->last
->flags
|= REQ_F_ARM_LTIMEOUT
;
567 int io_timeout_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
569 return __io_timeout_prep(req
, sqe
, false);
572 int io_link_timeout_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
574 return __io_timeout_prep(req
, sqe
, true);
577 int io_timeout(struct io_kiocb
*req
, unsigned int issue_flags
)
579 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
580 struct io_ring_ctx
*ctx
= req
->ctx
;
581 struct io_timeout_data
*data
= req
->async_data
;
582 struct list_head
*entry
;
583 u32 tail
, off
= timeout
->off
;
585 spin_lock_irq(&ctx
->timeout_lock
);
588 * sqe->off holds how many events that need to occur for this
589 * timeout event to be satisfied. If it isn't set, then this is
590 * a pure timeout request, sequence isn't used.
592 if (io_is_timeout_noseq(req
)) {
593 entry
= ctx
->timeout_list
.prev
;
597 tail
= data_race(ctx
->cached_cq_tail
) - atomic_read(&ctx
->cq_timeouts
);
598 timeout
->target_seq
= tail
+ off
;
600 /* Update the last seq here in case io_flush_timeouts() hasn't.
601 * This is safe because ->completion_lock is held, and submissions
602 * and completions are never mixed in the same ->completion_lock section.
604 ctx
->cq_last_tm_flush
= tail
;
607 * Insertion sort, ensuring the first entry in the list is always
608 * the one we need first.
610 list_for_each_prev(entry
, &ctx
->timeout_list
) {
611 struct io_timeout
*nextt
= list_entry(entry
, struct io_timeout
, list
);
612 struct io_kiocb
*nxt
= cmd_to_io_kiocb(nextt
);
614 if (io_is_timeout_noseq(nxt
))
616 /* nxt.seq is behind @tail, otherwise would've been completed */
617 if (off
>= nextt
->target_seq
- tail
)
621 list_add(&timeout
->list
, entry
);
622 data
->timer
.function
= io_timeout_fn
;
623 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
), data
->mode
);
624 spin_unlock_irq(&ctx
->timeout_lock
);
625 return IOU_ISSUE_SKIP_COMPLETE
;
628 void io_queue_linked_timeout(struct io_kiocb
*req
)
630 struct io_timeout
*timeout
= io_kiocb_to_cmd(req
, struct io_timeout
);
631 struct io_ring_ctx
*ctx
= req
->ctx
;
633 spin_lock_irq(&ctx
->timeout_lock
);
635 * If the back reference is NULL, then our linked request finished
636 * before we got a chance to setup the timer
639 struct io_timeout_data
*data
= req
->async_data
;
641 data
->timer
.function
= io_link_timeout_fn
;
642 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
),
644 list_add_tail(&timeout
->list
, &ctx
->ltimeout_list
);
646 spin_unlock_irq(&ctx
->timeout_lock
);
647 /* drop submission reference */
651 static bool io_match_task(struct io_kiocb
*head
, struct task_struct
*task
,
653 __must_hold(&req
->ctx
->timeout_lock
)
655 struct io_kiocb
*req
;
657 if (task
&& head
->task
!= task
)
662 io_for_each_link(req
, head
) {
663 if (req
->flags
& REQ_F_INFLIGHT
)
669 /* Returns true if we found and killed one or more timeouts */
670 __cold
bool io_kill_timeouts(struct io_ring_ctx
*ctx
, struct task_struct
*tsk
,
673 struct io_timeout
*timeout
, *tmp
;
677 * completion_lock is needed for io_match_task(). Take it before
678 * timeout_lockfirst to keep locking ordering.
680 spin_lock(&ctx
->completion_lock
);
681 spin_lock_irq(&ctx
->timeout_lock
);
682 list_for_each_entry_safe(timeout
, tmp
, &ctx
->timeout_list
, list
) {
683 struct io_kiocb
*req
= cmd_to_io_kiocb(timeout
);
685 if (io_match_task(req
, tsk
, cancel_all
) &&
686 io_kill_timeout(req
, -ECANCELED
))
689 spin_unlock_irq(&ctx
->timeout_lock
);
690 spin_unlock(&ctx
->completion_lock
);
691 return canceled
!= 0;