4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/resume_user_mode.h>
7 #include <linux/kasan.h>
8 #include <linux/io_uring_types.h>
9 #include <uapi/linux/eventpoll.h>
12 #include "filetable.h"
14 #ifndef CREATE_TRACE_POINTS
15 #include <trace/events/io_uring.h>
20 * A hint to not wake right away but delay until there are enough of
21 * tw's queued to match the number of CQEs the task is waiting for.
23 * Must not be used wirh requests generating more than one CQE.
24 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
26 IOU_F_TWQ_LAZY_WAKE
= 1,
31 IOU_ISSUE_SKIP_COMPLETE
= -EIOCBQUEUED
,
34 * Intended only when both IO_URING_F_MULTISHOT is passed
35 * to indicate to the poll runner that multishot should be
36 * removed and the result is set on req->cqe.res.
38 IOU_STOP_MULTISHOT
= -ECANCELED
,
41 bool io_cqe_cache_refill(struct io_ring_ctx
*ctx
, bool overflow
);
42 void io_req_cqe_overflow(struct io_kiocb
*req
);
43 int io_run_task_work_sig(struct io_ring_ctx
*ctx
);
44 void io_req_defer_failed(struct io_kiocb
*req
, s32 res
);
45 void io_req_complete_post(struct io_kiocb
*req
, unsigned issue_flags
);
46 bool io_post_aux_cqe(struct io_ring_ctx
*ctx
, u64 user_data
, s32 res
, u32 cflags
);
47 bool io_fill_cqe_req_aux(struct io_kiocb
*req
, bool defer
, s32 res
, u32 cflags
);
48 void __io_commit_cqring_flush(struct io_ring_ctx
*ctx
);
50 struct page
**io_pin_pages(unsigned long ubuf
, unsigned long len
, int *npages
);
52 struct file
*io_file_get_normal(struct io_kiocb
*req
, int fd
);
53 struct file
*io_file_get_fixed(struct io_kiocb
*req
, int fd
,
54 unsigned issue_flags
);
56 void __io_req_task_work_add(struct io_kiocb
*req
, unsigned flags
);
57 bool io_is_uring_fops(struct file
*file
);
58 bool io_alloc_async_data(struct io_kiocb
*req
);
59 void io_req_task_queue(struct io_kiocb
*req
);
60 void io_queue_iowq(struct io_kiocb
*req
, struct io_tw_state
*ts_dont_use
);
61 void io_req_task_complete(struct io_kiocb
*req
, struct io_tw_state
*ts
);
62 void io_req_task_queue_fail(struct io_kiocb
*req
, int ret
);
63 void io_req_task_submit(struct io_kiocb
*req
, struct io_tw_state
*ts
);
64 void tctx_task_work(struct callback_head
*cb
);
65 __cold
void io_uring_cancel_generic(bool cancel_all
, struct io_sq_data
*sqd
);
66 int io_uring_alloc_task_context(struct task_struct
*task
,
67 struct io_ring_ctx
*ctx
);
69 int io_ring_add_registered_file(struct io_uring_task
*tctx
, struct file
*file
,
72 int io_poll_issue(struct io_kiocb
*req
, struct io_tw_state
*ts
);
73 int io_submit_sqes(struct io_ring_ctx
*ctx
, unsigned int nr
);
74 int io_do_iopoll(struct io_ring_ctx
*ctx
, bool force_nonspin
);
75 void __io_submit_flush_completions(struct io_ring_ctx
*ctx
);
76 int io_req_prep_async(struct io_kiocb
*req
);
78 struct io_wq_work
*io_wq_free_work(struct io_wq_work
*work
);
79 void io_wq_submit_work(struct io_wq_work
*work
);
81 void io_free_req(struct io_kiocb
*req
);
82 void io_queue_next(struct io_kiocb
*req
);
83 void io_task_refs_refill(struct io_uring_task
*tctx
);
84 bool __io_alloc_req_refill(struct io_ring_ctx
*ctx
);
86 bool io_match_task_safe(struct io_kiocb
*head
, struct task_struct
*task
,
89 #define io_lockdep_assert_cq_locked(ctx) \
91 lockdep_assert(in_task()); \
93 if (ctx->flags & IORING_SETUP_IOPOLL) { \
94 lockdep_assert_held(&ctx->uring_lock); \
95 } else if (!ctx->task_complete) { \
96 lockdep_assert_held(&ctx->completion_lock); \
97 } else if (ctx->submitter_task->flags & PF_EXITING) { \
98 lockdep_assert(current_work()); \
100 lockdep_assert(current == ctx->submitter_task); \
104 static inline void io_req_task_work_add(struct io_kiocb
*req
)
106 __io_req_task_work_add(req
, 0);
109 #define io_for_each_link(pos, head) \
110 for (pos = (head); pos; pos = pos->link)
112 static inline bool io_get_cqe_overflow(struct io_ring_ctx
*ctx
,
113 struct io_uring_cqe
**ret
,
116 io_lockdep_assert_cq_locked(ctx
);
118 if (unlikely(ctx
->cqe_cached
>= ctx
->cqe_sentinel
)) {
119 if (unlikely(!io_cqe_cache_refill(ctx
, overflow
)))
122 *ret
= ctx
->cqe_cached
;
123 ctx
->cached_cq_tail
++;
125 if (ctx
->flags
& IORING_SETUP_CQE32
)
130 static inline bool io_get_cqe(struct io_ring_ctx
*ctx
, struct io_uring_cqe
**ret
)
132 return io_get_cqe_overflow(ctx
, ret
, false);
135 static __always_inline
bool io_fill_cqe_req(struct io_ring_ctx
*ctx
,
136 struct io_kiocb
*req
)
138 struct io_uring_cqe
*cqe
;
141 * If we can't get a cq entry, userspace overflowed the
142 * submission (by quite a lot). Increment the overflow count in
145 if (unlikely(!io_get_cqe(ctx
, &cqe
)))
148 if (trace_io_uring_complete_enabled())
149 trace_io_uring_complete(req
->ctx
, req
, req
->cqe
.user_data
,
150 req
->cqe
.res
, req
->cqe
.flags
,
151 req
->big_cqe
.extra1
, req
->big_cqe
.extra2
);
153 memcpy(cqe
, &req
->cqe
, sizeof(*cqe
));
154 if (ctx
->flags
& IORING_SETUP_CQE32
) {
155 memcpy(cqe
->big_cqe
, &req
->big_cqe
, sizeof(*cqe
));
156 memset(&req
->big_cqe
, 0, sizeof(req
->big_cqe
));
161 static inline void req_set_fail(struct io_kiocb
*req
)
163 req
->flags
|= REQ_F_FAIL
;
164 if (req
->flags
& REQ_F_CQE_SKIP
) {
165 req
->flags
&= ~REQ_F_CQE_SKIP
;
166 req
->flags
|= REQ_F_SKIP_LINK_CQES
;
170 static inline void io_req_set_res(struct io_kiocb
*req
, s32 res
, u32 cflags
)
173 req
->cqe
.flags
= cflags
;
176 static inline bool req_has_async_data(struct io_kiocb
*req
)
178 return req
->flags
& REQ_F_ASYNC_DATA
;
181 static inline void io_put_file(struct io_kiocb
*req
)
183 if (!(req
->flags
& REQ_F_FIXED_FILE
) && req
->file
)
187 static inline void io_ring_submit_unlock(struct io_ring_ctx
*ctx
,
188 unsigned issue_flags
)
190 lockdep_assert_held(&ctx
->uring_lock
);
191 if (issue_flags
& IO_URING_F_UNLOCKED
)
192 mutex_unlock(&ctx
->uring_lock
);
195 static inline void io_ring_submit_lock(struct io_ring_ctx
*ctx
,
196 unsigned issue_flags
)
199 * "Normal" inline submissions always hold the uring_lock, since we
200 * grab it from the system call. Same is true for the SQPOLL offload.
201 * The only exception is when we've detached the request and issue it
202 * from an async worker thread, grab the lock for that case.
204 if (issue_flags
& IO_URING_F_UNLOCKED
)
205 mutex_lock(&ctx
->uring_lock
);
206 lockdep_assert_held(&ctx
->uring_lock
);
209 static inline void io_commit_cqring(struct io_ring_ctx
*ctx
)
211 /* order cqe stores with ring update */
212 smp_store_release(&ctx
->rings
->cq
.tail
, ctx
->cached_cq_tail
);
215 static inline void io_poll_wq_wake(struct io_ring_ctx
*ctx
)
217 if (wq_has_sleeper(&ctx
->poll_wq
))
218 __wake_up(&ctx
->poll_wq
, TASK_NORMAL
, 0,
219 poll_to_key(EPOLL_URING_WAKE
| EPOLLIN
));
222 static inline void io_cqring_wake(struct io_ring_ctx
*ctx
)
225 * Trigger waitqueue handler on all waiters on our waitqueue. This
226 * won't necessarily wake up all the tasks, io_should_wake() will make
229 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
230 * set in the mask so that if we recurse back into our own poll
231 * waitqueue handlers, we know we have a dependency between eventfd or
232 * epoll and should terminate multishot poll at that point.
234 if (wq_has_sleeper(&ctx
->cq_wait
))
235 __wake_up(&ctx
->cq_wait
, TASK_NORMAL
, 0,
236 poll_to_key(EPOLL_URING_WAKE
| EPOLLIN
));
239 static inline bool io_sqring_full(struct io_ring_ctx
*ctx
)
241 struct io_rings
*r
= ctx
->rings
;
243 return READ_ONCE(r
->sq
.tail
) - ctx
->cached_sq_head
== ctx
->sq_entries
;
246 static inline unsigned int io_sqring_entries(struct io_ring_ctx
*ctx
)
248 struct io_rings
*rings
= ctx
->rings
;
249 unsigned int entries
;
251 /* make sure SQ entry isn't read before tail */
252 entries
= smp_load_acquire(&rings
->sq
.tail
) - ctx
->cached_sq_head
;
253 return min(entries
, ctx
->sq_entries
);
256 static inline int io_run_task_work(void)
259 * Always check-and-clear the task_work notification signal. With how
260 * signaling works for task_work, we can find it set with nothing to
261 * run. We need to clear it for that case, like get_signal() does.
263 if (test_thread_flag(TIF_NOTIFY_SIGNAL
))
264 clear_notify_signal();
266 * PF_IO_WORKER never returns to userspace, so check here if we have
267 * notify work that needs processing.
269 if (current
->flags
& PF_IO_WORKER
&&
270 test_thread_flag(TIF_NOTIFY_RESUME
)) {
271 __set_current_state(TASK_RUNNING
);
272 resume_user_mode_work(NULL
);
274 if (task_work_pending(current
)) {
275 __set_current_state(TASK_RUNNING
);
283 static inline bool io_task_work_pending(struct io_ring_ctx
*ctx
)
285 return task_work_pending(current
) || !wq_list_empty(&ctx
->work_llist
);
288 static inline void io_tw_lock(struct io_ring_ctx
*ctx
, struct io_tw_state
*ts
)
291 mutex_lock(&ctx
->uring_lock
);
297 * Don't complete immediately but use deferred completion infrastructure.
298 * Protected by ->uring_lock and can only be used either with
299 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
301 static inline void io_req_complete_defer(struct io_kiocb
*req
)
302 __must_hold(&req
->ctx
->uring_lock
)
304 struct io_submit_state
*state
= &req
->ctx
->submit_state
;
306 lockdep_assert_held(&req
->ctx
->uring_lock
);
308 wq_list_add_tail(&req
->comp_list
, &state
->compl_reqs
);
311 static inline void io_commit_cqring_flush(struct io_ring_ctx
*ctx
)
313 if (unlikely(ctx
->off_timeout_used
|| ctx
->drain_active
||
314 ctx
->has_evfd
|| ctx
->poll_activated
))
315 __io_commit_cqring_flush(ctx
);
318 static inline void io_get_task_refs(int nr
)
320 struct io_uring_task
*tctx
= current
->io_uring
;
322 tctx
->cached_refs
-= nr
;
323 if (unlikely(tctx
->cached_refs
< 0))
324 io_task_refs_refill(tctx
);
327 static inline bool io_req_cache_empty(struct io_ring_ctx
*ctx
)
329 return !ctx
->submit_state
.free_list
.next
;
332 extern struct kmem_cache
*req_cachep
;
334 static inline struct io_kiocb
*io_extract_req(struct io_ring_ctx
*ctx
)
336 struct io_kiocb
*req
;
338 req
= container_of(ctx
->submit_state
.free_list
.next
, struct io_kiocb
, comp_list
);
339 wq_stack_extract(&ctx
->submit_state
.free_list
);
343 static inline bool io_alloc_req(struct io_ring_ctx
*ctx
, struct io_kiocb
**req
)
345 if (unlikely(io_req_cache_empty(ctx
))) {
346 if (!__io_alloc_req_refill(ctx
))
349 *req
= io_extract_req(ctx
);
353 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx
*ctx
)
355 return likely(ctx
->submitter_task
== current
);
358 static inline bool io_allowed_run_tw(struct io_ring_ctx
*ctx
)
360 return likely(!(ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) ||
361 ctx
->submitter_task
== current
);
364 static inline void io_req_queue_tw_complete(struct io_kiocb
*req
, s32 res
)
366 io_req_set_res(req
, res
, 0);
367 req
->io_task_work
.func
= io_req_task_complete
;
368 io_req_task_work_add(req
);
372 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
375 static inline size_t uring_sqe_size(struct io_ring_ctx
*ctx
)
377 if (ctx
->flags
& IORING_SETUP_SQE128
)
378 return 2 * sizeof(struct io_uring_sqe
);
379 return sizeof(struct io_uring_sqe
);