]>
Commit | Line | Data |
---|---|---|
de23077e JA |
1 | #ifndef IOU_CORE_H |
2 | #define IOU_CORE_H | |
3 | ||
4 | #include <linux/errno.h> | |
cd40cae2 | 5 | #include <linux/lockdep.h> |
b5d3ae20 | 6 | #include <linux/resume_user_mode.h> |
c1755c25 | 7 | #include <linux/kasan.h> |
ab1c84d8 | 8 | #include <linux/io_uring_types.h> |
44648532 | 9 | #include <uapi/linux/eventpoll.h> |
ab1c84d8 | 10 | #include "io-wq.h" |
a6b21fbb | 11 | #include "slist.h" |
ab1c84d8 | 12 | #include "filetable.h" |
de23077e | 13 | |
f3b44f92 JA |
14 | #ifndef CREATE_TRACE_POINTS |
15 | #include <trace/events/io_uring.h> | |
16 | #endif | |
17 | ||
97b388d7 JA |
18 | enum { |
19 | IOU_OK = 0, | |
20 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, | |
114eccdf | 21 | |
704ea888 JA |
22 | /* |
23 | * Requeue the task_work to restart operations on this request. The | |
24 | * actual value isn't important, should just be not an otherwise | |
25 | * valid error code, yet less than -MAX_ERRNO and valid internally. | |
26 | */ | |
27 | IOU_REQUEUE = -3072, | |
28 | ||
114eccdf | 29 | /* |
91482864 PB |
30 | * Intended only when both IO_URING_F_MULTISHOT is passed |
31 | * to indicate to the poll runner that multishot should be | |
114eccdf DY |
32 | * removed and the result is set on req->cqe.res. |
33 | */ | |
34 | IOU_STOP_MULTISHOT = -ECANCELED, | |
97b388d7 JA |
35 | }; |
36 | ||
20d6b633 | 37 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); |
056695bf | 38 | void io_req_cqe_overflow(struct io_kiocb *req); |
c0e0d6ba | 39 | int io_run_task_work_sig(struct io_ring_ctx *ctx); |
973fc83f | 40 | void io_req_defer_failed(struct io_kiocb *req, s32 res); |
1bec951c | 41 | void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); |
b529c96a | 42 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
b6b2bb58 | 43 | bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags); |
9046c641 PB |
44 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
45 | ||
46 | struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); | |
47 | ||
48 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); | |
49 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, | |
50 | unsigned issue_flags); | |
51 | ||
8501fe70 | 52 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); |
9046c641 | 53 | bool io_alloc_async_data(struct io_kiocb *req); |
9046c641 | 54 | void io_req_task_queue(struct io_kiocb *req); |
a282967c PB |
55 | void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use); |
56 | void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); | |
9046c641 | 57 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); |
a282967c | 58 | void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); |
9046c641 PB |
59 | void tctx_task_work(struct callback_head *cb); |
60 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); | |
61 | int io_uring_alloc_task_context(struct task_struct *task, | |
62 | struct io_ring_ctx *ctx); | |
63 | ||
6e76ac59 JT |
64 | int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, |
65 | int start, int end); | |
66 | ||
a282967c | 67 | int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts); |
9046c641 PB |
68 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); |
69 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); | |
ec26c225 | 70 | void __io_submit_flush_completions(struct io_ring_ctx *ctx); |
9046c641 PB |
71 | int io_req_prep_async(struct io_kiocb *req); |
72 | ||
73 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); | |
74 | void io_wq_submit_work(struct io_wq_work *work); | |
75 | ||
76 | void io_free_req(struct io_kiocb *req); | |
77 | void io_queue_next(struct io_kiocb *req); | |
63809137 | 78 | void io_task_refs_refill(struct io_uring_task *tctx); |
bd1a3783 | 79 | bool __io_alloc_req_refill(struct io_ring_ctx *ctx); |
9046c641 PB |
80 | |
81 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, | |
82 | bool cancel_all); | |
83 | ||
edecf168 JA |
84 | void *io_mem_alloc(size_t size); |
85 | void io_mem_free(void *ptr); | |
86 | ||
c4320315 JA |
87 | enum { |
88 | IO_EVENTFD_OP_SIGNAL_BIT, | |
89 | IO_EVENTFD_OP_FREE_BIT, | |
90 | }; | |
91 | ||
92 | void io_eventfd_ops(struct rcu_head *rcu); | |
93 | void io_activate_pollwq(struct io_ring_ctx *ctx); | |
94 | ||
1658633c JA |
95 | #if defined(CONFIG_PROVE_LOCKING) |
96 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) | |
97 | { | |
98 | lockdep_assert(in_task()); | |
99 | ||
100 | if (ctx->flags & IORING_SETUP_IOPOLL) { | |
101 | lockdep_assert_held(&ctx->uring_lock); | |
102 | } else if (!ctx->task_complete) { | |
103 | lockdep_assert_held(&ctx->completion_lock); | |
104 | } else if (ctx->submitter_task) { | |
105 | /* | |
106 | * ->submitter_task may be NULL and we can still post a CQE, | |
107 | * if the ring has been setup with IORING_SETUP_R_DISABLED. | |
108 | * Not from an SQE, as those cannot be submitted, but via | |
109 | * updating tagged resources. | |
110 | */ | |
111 | if (ctx->submitter_task->flags & PF_EXITING) | |
112 | lockdep_assert(current_work()); | |
113 | else | |
114 | lockdep_assert(current == ctx->submitter_task); | |
115 | } | |
116 | } | |
117 | #else | |
118 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) | |
119 | { | |
120 | } | |
121 | #endif | |
f26cc959 | 122 | |
e52d2e58 PB |
123 | static inline void io_req_task_work_add(struct io_kiocb *req) |
124 | { | |
8501fe70 | 125 | __io_req_task_work_add(req, 0); |
e52d2e58 PB |
126 | } |
127 | ||
9046c641 PB |
128 | #define io_for_each_link(pos, head) \ |
129 | for (pos = (head); pos; pos = pos->link) | |
f3b44f92 | 130 | |
59fbc409 PB |
131 | static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, |
132 | struct io_uring_cqe **ret, | |
133 | bool overflow) | |
f3b44f92 | 134 | { |
20d6b633 | 135 | io_lockdep_assert_cq_locked(ctx); |
f3b44f92 | 136 | |
20d6b633 PB |
137 | if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { |
138 | if (unlikely(!io_cqe_cache_refill(ctx, overflow))) | |
59fbc409 | 139 | return false; |
f3b44f92 | 140 | } |
59fbc409 | 141 | *ret = ctx->cqe_cached; |
20d6b633 PB |
142 | ctx->cached_cq_tail++; |
143 | ctx->cqe_cached++; | |
144 | if (ctx->flags & IORING_SETUP_CQE32) | |
145 | ctx->cqe_cached++; | |
59fbc409 | 146 | return true; |
aa1df3a3 PB |
147 | } |
148 | ||
59fbc409 | 149 | static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) |
aa1df3a3 | 150 | { |
59fbc409 | 151 | return io_get_cqe_overflow(ctx, ret, false); |
f3b44f92 JA |
152 | } |
153 | ||
093a650b PB |
154 | static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, |
155 | struct io_kiocb *req) | |
f3b44f92 JA |
156 | { |
157 | struct io_uring_cqe *cqe; | |
158 | ||
e8c328c3 PB |
159 | /* |
160 | * If we can't get a cq entry, userspace overflowed the | |
161 | * submission (by quite a lot). Increment the overflow count in | |
162 | * the ring. | |
163 | */ | |
59fbc409 | 164 | if (unlikely(!io_get_cqe(ctx, &cqe))) |
f66f7342 | 165 | return false; |
e0486f3f | 166 | |
a0727c73 PB |
167 | if (trace_io_uring_complete_enabled()) |
168 | trace_io_uring_complete(req->ctx, req, req->cqe.user_data, | |
169 | req->cqe.res, req->cqe.flags, | |
b24c5d75 | 170 | req->big_cqe.extra1, req->big_cqe.extra2); |
e0486f3f | 171 | |
e8c328c3 | 172 | memcpy(cqe, &req->cqe, sizeof(*cqe)); |
e8c328c3 | 173 | if (ctx->flags & IORING_SETUP_CQE32) { |
b24c5d75 PB |
174 | memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); |
175 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); | |
f3b44f92 | 176 | } |
e8c328c3 | 177 | return true; |
f3b44f92 JA |
178 | } |
179 | ||
531113bb JA |
180 | static inline void req_set_fail(struct io_kiocb *req) |
181 | { | |
182 | req->flags |= REQ_F_FAIL; | |
183 | if (req->flags & REQ_F_CQE_SKIP) { | |
184 | req->flags &= ~REQ_F_CQE_SKIP; | |
185 | req->flags |= REQ_F_SKIP_LINK_CQES; | |
186 | } | |
187 | } | |
188 | ||
de23077e JA |
189 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
190 | { | |
191 | req->cqe.res = res; | |
192 | req->cqe.flags = cflags; | |
193 | } | |
194 | ||
99f15d8d JA |
195 | static inline bool req_has_async_data(struct io_kiocb *req) |
196 | { | |
197 | return req->flags & REQ_F_ASYNC_DATA; | |
198 | } | |
199 | ||
17bc2837 | 200 | static inline void io_put_file(struct io_kiocb *req) |
531113bb | 201 | { |
17bc2837 JA |
202 | if (!(req->flags & REQ_F_FIXED_FILE) && req->file) |
203 | fput(req->file); | |
531113bb JA |
204 | } |
205 | ||
cd40cae2 JA |
206 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
207 | unsigned issue_flags) | |
208 | { | |
209 | lockdep_assert_held(&ctx->uring_lock); | |
210 | if (issue_flags & IO_URING_F_UNLOCKED) | |
211 | mutex_unlock(&ctx->uring_lock); | |
212 | } | |
213 | ||
214 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, | |
215 | unsigned issue_flags) | |
216 | { | |
217 | /* | |
218 | * "Normal" inline submissions always hold the uring_lock, since we | |
219 | * grab it from the system call. Same is true for the SQPOLL offload. | |
220 | * The only exception is when we've detached the request and issue it | |
221 | * from an async worker thread, grab the lock for that case. | |
222 | */ | |
223 | if (issue_flags & IO_URING_F_UNLOCKED) | |
224 | mutex_lock(&ctx->uring_lock); | |
225 | lockdep_assert_held(&ctx->uring_lock); | |
226 | } | |
227 | ||
f9ead18c JA |
228 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
229 | { | |
230 | /* order cqe stores with ring update */ | |
231 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); | |
232 | } | |
233 | ||
7b235dd8 PB |
234 | static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) |
235 | { | |
bca39f39 | 236 | if (wq_has_sleeper(&ctx->poll_wq)) |
7b235dd8 PB |
237 | __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, |
238 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); | |
239 | } | |
240 | ||
6e7248ad | 241 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
f3b44f92 JA |
242 | { |
243 | /* | |
44648532 JA |
244 | * Trigger waitqueue handler on all waiters on our waitqueue. This |
245 | * won't necessarily wake up all the tasks, io_should_wake() will make | |
246 | * that decision. | |
247 | * | |
248 | * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter | |
249 | * set in the mask so that if we recurse back into our own poll | |
250 | * waitqueue handlers, we know we have a dependency between eventfd or | |
251 | * epoll and should terminate multishot poll at that point. | |
f3b44f92 | 252 | */ |
6e7248ad | 253 | if (wq_has_sleeper(&ctx->cq_wait)) |
44648532 JA |
254 | __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, |
255 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); | |
f3b44f92 JA |
256 | } |
257 | ||
17437f31 JA |
258 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
259 | { | |
260 | struct io_rings *r = ctx->rings; | |
261 | ||
262 | return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; | |
263 | } | |
264 | ||
265 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) | |
266 | { | |
267 | struct io_rings *rings = ctx->rings; | |
e3ef728f | 268 | unsigned int entries; |
17437f31 JA |
269 | |
270 | /* make sure SQ entry isn't read before tail */ | |
e3ef728f JA |
271 | entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
272 | return min(entries, ctx->sq_entries); | |
17437f31 JA |
273 | } |
274 | ||
c0e0d6ba | 275 | static inline int io_run_task_work(void) |
17437f31 | 276 | { |
7cfe7a09 JA |
277 | /* |
278 | * Always check-and-clear the task_work notification signal. With how | |
279 | * signaling works for task_work, we can find it set with nothing to | |
280 | * run. We need to clear it for that case, like get_signal() does. | |
281 | */ | |
282 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) | |
283 | clear_notify_signal(); | |
b5d3ae20 JA |
284 | /* |
285 | * PF_IO_WORKER never returns to userspace, so check here if we have | |
286 | * notify work that needs processing. | |
287 | */ | |
288 | if (current->flags & PF_IO_WORKER && | |
2f2bb1ff JA |
289 | test_thread_flag(TIF_NOTIFY_RESUME)) { |
290 | __set_current_state(TASK_RUNNING); | |
b5d3ae20 | 291 | resume_user_mode_work(NULL); |
2f2bb1ff | 292 | } |
46a525e1 | 293 | if (task_work_pending(current)) { |
17437f31 | 294 | __set_current_state(TASK_RUNNING); |
46a525e1 | 295 | task_work_run(); |
c0e0d6ba | 296 | return 1; |
17437f31 JA |
297 | } |
298 | ||
c0e0d6ba DY |
299 | return 0; |
300 | } | |
301 | ||
dac6a0ea JA |
302 | static inline bool io_task_work_pending(struct io_ring_ctx *ctx) |
303 | { | |
6434ec01 | 304 | return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); |
dac6a0ea JA |
305 | } |
306 | ||
a282967c | 307 | static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) |
aa1e90f6 | 308 | { |
a282967c | 309 | if (!ts->locked) { |
aa1e90f6 | 310 | mutex_lock(&ctx->uring_lock); |
a282967c | 311 | ts->locked = true; |
aa1e90f6 PB |
312 | } |
313 | } | |
314 | ||
9da070b1 PB |
315 | /* |
316 | * Don't complete immediately but use deferred completion infrastructure. | |
317 | * Protected by ->uring_lock and can only be used either with | |
318 | * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. | |
319 | */ | |
320 | static inline void io_req_complete_defer(struct io_kiocb *req) | |
321 | __must_hold(&req->ctx->uring_lock) | |
aa1e90f6 PB |
322 | { |
323 | struct io_submit_state *state = &req->ctx->submit_state; | |
324 | ||
9da070b1 PB |
325 | lockdep_assert_held(&req->ctx->uring_lock); |
326 | ||
aa1e90f6 PB |
327 | wq_list_add_tail(&req->comp_list, &state->compl_reqs); |
328 | } | |
329 | ||
46929b08 PB |
330 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
331 | { | |
bca39f39 PB |
332 | if (unlikely(ctx->off_timeout_used || ctx->drain_active || |
333 | ctx->has_evfd || ctx->poll_activated)) | |
46929b08 PB |
334 | __io_commit_cqring_flush(ctx); |
335 | } | |
336 | ||
63809137 PB |
337 | static inline void io_get_task_refs(int nr) |
338 | { | |
339 | struct io_uring_task *tctx = current->io_uring; | |
340 | ||
341 | tctx->cached_refs -= nr; | |
342 | if (unlikely(tctx->cached_refs < 0)) | |
343 | io_task_refs_refill(tctx); | |
344 | } | |
345 | ||
bd1a3783 PB |
346 | static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) |
347 | { | |
348 | return !ctx->submit_state.free_list.next; | |
349 | } | |
350 | ||
c1755c25 | 351 | extern struct kmem_cache *req_cachep; |
b3a4dbc8 | 352 | extern struct kmem_cache *io_buf_cachep; |
c1755c25 | 353 | |
c8576f3e | 354 | static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) |
bd1a3783 | 355 | { |
c1755c25 | 356 | struct io_kiocb *req; |
bd1a3783 | 357 | |
c1755c25 | 358 | req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); |
c1755c25 BL |
359 | wq_stack_extract(&ctx->submit_state.free_list); |
360 | return req; | |
bd1a3783 PB |
361 | } |
362 | ||
c8576f3e PB |
363 | static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) |
364 | { | |
365 | if (unlikely(io_req_cache_empty(ctx))) { | |
366 | if (!__io_alloc_req_refill(ctx)) | |
367 | return false; | |
368 | } | |
369 | *req = io_extract_req(ctx); | |
370 | return true; | |
371 | } | |
372 | ||
140102ae PB |
373 | static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) |
374 | { | |
375 | return likely(ctx->submitter_task == current); | |
376 | } | |
377 | ||
76de6749 PB |
378 | static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) |
379 | { | |
6567506b PB |
380 | return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || |
381 | ctx->submitter_task == current); | |
76de6749 PB |
382 | } |
383 | ||
833b5dff PB |
384 | static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) |
385 | { | |
386 | io_req_set_res(req, res, 0); | |
387 | req->io_task_work.func = io_req_task_complete; | |
388 | io_req_task_work_add(req); | |
389 | } | |
390 | ||
96c7d4f8 BL |
391 | /* |
392 | * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each | |
393 | * slot. | |
394 | */ | |
395 | static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) | |
396 | { | |
397 | if (ctx->flags & IORING_SETUP_SQE128) | |
398 | return 2 * sizeof(struct io_uring_sqe); | |
399 | return sizeof(struct io_uring_sqe); | |
400 | } | |
de23077e | 401 | #endif |