]> git.ipfire.org Git - thirdparty/linux.git/blob - io_uring/io_uring.c
Merge 6.7-rc4 into char-misc-linus
[thirdparty/linux.git] / io_uring / io_uring.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqe (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
41 */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
50
51 #include <linux/sched/signal.h>
52 #include <linux/fs.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/mm.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
61 #include <net/sock.h>
62 #include <net/af_unix.h>
63 #include <net/scm.h>
64 #include <linux/anon_inodes.h>
65 #include <linux/sched/mm.h>
66 #include <linux/uaccess.h>
67 #include <linux/nospec.h>
68 #include <linux/highmem.h>
69 #include <linux/fsnotify.h>
70 #include <linux/fadvise.h>
71 #include <linux/task_work.h>
72 #include <linux/io_uring.h>
73 #include <linux/audit.h>
74 #include <linux/security.h>
75 #include <asm/shmparam.h>
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/io_uring.h>
79
80 #include <uapi/linux/io_uring.h>
81
82 #include "io-wq.h"
83
84 #include "io_uring.h"
85 #include "opdef.h"
86 #include "refs.h"
87 #include "tctx.h"
88 #include "sqpoll.h"
89 #include "fdinfo.h"
90 #include "kbuf.h"
91 #include "rsrc.h"
92 #include "cancel.h"
93 #include "net.h"
94 #include "notif.h"
95 #include "waitid.h"
96 #include "futex.h"
97
98 #include "timeout.h"
99 #include "poll.h"
100 #include "rw.h"
101 #include "alloc_cache.h"
102
103 #define IORING_MAX_ENTRIES 32768
104 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
105
106 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
107 IORING_REGISTER_LAST + IORING_OP_LAST)
108
109 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
110 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
111
112 #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
113 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
114
115 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
116 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
117 REQ_F_ASYNC_DATA)
118
119 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
120 IO_REQ_CLEAN_FLAGS)
121
122 #define IO_TCTX_REFS_CACHE_NR (1U << 10)
123
124 #define IO_COMPL_BATCH 32
125 #define IO_REQ_ALLOC_BATCH 8
126
127 enum {
128 IO_CHECK_CQ_OVERFLOW_BIT,
129 IO_CHECK_CQ_DROPPED_BIT,
130 };
131
132 enum {
133 IO_EVENTFD_OP_SIGNAL_BIT,
134 IO_EVENTFD_OP_FREE_BIT,
135 };
136
137 struct io_defer_entry {
138 struct list_head list;
139 struct io_kiocb *req;
140 u32 seq;
141 };
142
143 /* requests with any of those set should undergo io_disarm_next() */
144 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
145 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
146
147 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
148 struct task_struct *task,
149 bool cancel_all);
150
151 static void io_queue_sqe(struct io_kiocb *req);
152
153 struct kmem_cache *req_cachep;
154
155 static int __read_mostly sysctl_io_uring_disabled;
156 static int __read_mostly sysctl_io_uring_group = -1;
157
158 #ifdef CONFIG_SYSCTL
159 static struct ctl_table kernel_io_uring_disabled_table[] = {
160 {
161 .procname = "io_uring_disabled",
162 .data = &sysctl_io_uring_disabled,
163 .maxlen = sizeof(sysctl_io_uring_disabled),
164 .mode = 0644,
165 .proc_handler = proc_dointvec_minmax,
166 .extra1 = SYSCTL_ZERO,
167 .extra2 = SYSCTL_TWO,
168 },
169 {
170 .procname = "io_uring_group",
171 .data = &sysctl_io_uring_group,
172 .maxlen = sizeof(gid_t),
173 .mode = 0644,
174 .proc_handler = proc_dointvec,
175 },
176 {},
177 };
178 #endif
179
180 struct sock *io_uring_get_socket(struct file *file)
181 {
182 #if defined(CONFIG_UNIX)
183 if (io_is_uring_fops(file)) {
184 struct io_ring_ctx *ctx = file->private_data;
185
186 return ctx->ring_sock->sk;
187 }
188 #endif
189 return NULL;
190 }
191 EXPORT_SYMBOL(io_uring_get_socket);
192
193 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
194 {
195 if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
196 ctx->submit_state.cqes_count)
197 __io_submit_flush_completions(ctx);
198 }
199
200 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
201 {
202 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
203 }
204
205 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
206 {
207 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
208 }
209
210 static bool io_match_linked(struct io_kiocb *head)
211 {
212 struct io_kiocb *req;
213
214 io_for_each_link(req, head) {
215 if (req->flags & REQ_F_INFLIGHT)
216 return true;
217 }
218 return false;
219 }
220
221 /*
222 * As io_match_task() but protected against racing with linked timeouts.
223 * User must not hold timeout_lock.
224 */
225 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
226 bool cancel_all)
227 {
228 bool matched;
229
230 if (task && head->task != task)
231 return false;
232 if (cancel_all)
233 return true;
234
235 if (head->flags & REQ_F_LINK_TIMEOUT) {
236 struct io_ring_ctx *ctx = head->ctx;
237
238 /* protect against races with linked timeouts */
239 spin_lock_irq(&ctx->timeout_lock);
240 matched = io_match_linked(head);
241 spin_unlock_irq(&ctx->timeout_lock);
242 } else {
243 matched = io_match_linked(head);
244 }
245 return matched;
246 }
247
248 static inline void req_fail_link_node(struct io_kiocb *req, int res)
249 {
250 req_set_fail(req);
251 io_req_set_res(req, res, 0);
252 }
253
254 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
255 {
256 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
257 }
258
259 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
260 {
261 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
262
263 complete(&ctx->ref_comp);
264 }
265
266 static __cold void io_fallback_req_func(struct work_struct *work)
267 {
268 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
269 fallback_work.work);
270 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
271 struct io_kiocb *req, *tmp;
272 struct io_tw_state ts = { .locked = true, };
273
274 mutex_lock(&ctx->uring_lock);
275 llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
276 req->io_task_work.func(req, &ts);
277 if (WARN_ON_ONCE(!ts.locked))
278 return;
279 io_submit_flush_completions(ctx);
280 mutex_unlock(&ctx->uring_lock);
281 }
282
283 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
284 {
285 unsigned hash_buckets = 1U << bits;
286 size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
287
288 table->hbs = kmalloc(hash_size, GFP_KERNEL);
289 if (!table->hbs)
290 return -ENOMEM;
291
292 table->hash_bits = bits;
293 init_hash_table(table, hash_buckets);
294 return 0;
295 }
296
297 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
298 {
299 struct io_ring_ctx *ctx;
300 int hash_bits;
301
302 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
303 if (!ctx)
304 return NULL;
305
306 xa_init(&ctx->io_bl_xa);
307
308 /*
309 * Use 5 bits less than the max cq entries, that should give us around
310 * 32 entries per hash list if totally full and uniformly spread, but
311 * don't keep too many buckets to not overconsume memory.
312 */
313 hash_bits = ilog2(p->cq_entries) - 5;
314 hash_bits = clamp(hash_bits, 1, 8);
315 if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
316 goto err;
317 if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
318 goto err;
319 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
320 0, GFP_KERNEL))
321 goto err;
322
323 ctx->flags = p->flags;
324 init_waitqueue_head(&ctx->sqo_sq_wait);
325 INIT_LIST_HEAD(&ctx->sqd_list);
326 INIT_LIST_HEAD(&ctx->cq_overflow_list);
327 INIT_LIST_HEAD(&ctx->io_buffers_cache);
328 INIT_HLIST_HEAD(&ctx->io_buf_list);
329 io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
330 sizeof(struct io_rsrc_node));
331 io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
332 sizeof(struct async_poll));
333 io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
334 sizeof(struct io_async_msghdr));
335 io_futex_cache_init(ctx);
336 init_completion(&ctx->ref_comp);
337 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
338 mutex_init(&ctx->uring_lock);
339 init_waitqueue_head(&ctx->cq_wait);
340 init_waitqueue_head(&ctx->poll_wq);
341 init_waitqueue_head(&ctx->rsrc_quiesce_wq);
342 spin_lock_init(&ctx->completion_lock);
343 spin_lock_init(&ctx->timeout_lock);
344 INIT_WQ_LIST(&ctx->iopoll_list);
345 INIT_LIST_HEAD(&ctx->io_buffers_comp);
346 INIT_LIST_HEAD(&ctx->defer_list);
347 INIT_LIST_HEAD(&ctx->timeout_list);
348 INIT_LIST_HEAD(&ctx->ltimeout_list);
349 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
350 init_llist_head(&ctx->work_llist);
351 INIT_LIST_HEAD(&ctx->tctx_list);
352 ctx->submit_state.free_list.next = NULL;
353 INIT_WQ_LIST(&ctx->locked_free_list);
354 INIT_HLIST_HEAD(&ctx->waitid_list);
355 #ifdef CONFIG_FUTEX
356 INIT_HLIST_HEAD(&ctx->futex_list);
357 #endif
358 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
359 INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
360 INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
361 return ctx;
362 err:
363 kfree(ctx->cancel_table.hbs);
364 kfree(ctx->cancel_table_locked.hbs);
365 kfree(ctx->io_bl);
366 xa_destroy(&ctx->io_bl_xa);
367 kfree(ctx);
368 return NULL;
369 }
370
371 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
372 {
373 struct io_rings *r = ctx->rings;
374
375 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
376 ctx->cq_extra--;
377 }
378
379 static bool req_need_defer(struct io_kiocb *req, u32 seq)
380 {
381 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
382 struct io_ring_ctx *ctx = req->ctx;
383
384 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
385 }
386
387 return false;
388 }
389
390 static void io_clean_op(struct io_kiocb *req)
391 {
392 if (req->flags & REQ_F_BUFFER_SELECTED) {
393 spin_lock(&req->ctx->completion_lock);
394 io_put_kbuf_comp(req);
395 spin_unlock(&req->ctx->completion_lock);
396 }
397
398 if (req->flags & REQ_F_NEED_CLEANUP) {
399 const struct io_cold_def *def = &io_cold_defs[req->opcode];
400
401 if (def->cleanup)
402 def->cleanup(req);
403 }
404 if ((req->flags & REQ_F_POLLED) && req->apoll) {
405 kfree(req->apoll->double_poll);
406 kfree(req->apoll);
407 req->apoll = NULL;
408 }
409 if (req->flags & REQ_F_INFLIGHT) {
410 struct io_uring_task *tctx = req->task->io_uring;
411
412 atomic_dec(&tctx->inflight_tracked);
413 }
414 if (req->flags & REQ_F_CREDS)
415 put_cred(req->creds);
416 if (req->flags & REQ_F_ASYNC_DATA) {
417 kfree(req->async_data);
418 req->async_data = NULL;
419 }
420 req->flags &= ~IO_REQ_CLEAN_FLAGS;
421 }
422
423 static inline void io_req_track_inflight(struct io_kiocb *req)
424 {
425 if (!(req->flags & REQ_F_INFLIGHT)) {
426 req->flags |= REQ_F_INFLIGHT;
427 atomic_inc(&req->task->io_uring->inflight_tracked);
428 }
429 }
430
431 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
432 {
433 if (WARN_ON_ONCE(!req->link))
434 return NULL;
435
436 req->flags &= ~REQ_F_ARM_LTIMEOUT;
437 req->flags |= REQ_F_LINK_TIMEOUT;
438
439 /* linked timeouts should have two refs once prep'ed */
440 io_req_set_refcount(req);
441 __io_req_set_refcount(req->link, 2);
442 return req->link;
443 }
444
445 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
446 {
447 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
448 return NULL;
449 return __io_prep_linked_timeout(req);
450 }
451
452 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
453 {
454 io_queue_linked_timeout(__io_prep_linked_timeout(req));
455 }
456
457 static inline void io_arm_ltimeout(struct io_kiocb *req)
458 {
459 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
460 __io_arm_ltimeout(req);
461 }
462
463 static void io_prep_async_work(struct io_kiocb *req)
464 {
465 const struct io_issue_def *def = &io_issue_defs[req->opcode];
466 struct io_ring_ctx *ctx = req->ctx;
467
468 if (!(req->flags & REQ_F_CREDS)) {
469 req->flags |= REQ_F_CREDS;
470 req->creds = get_current_cred();
471 }
472
473 req->work.list.next = NULL;
474 req->work.flags = 0;
475 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
476 if (req->flags & REQ_F_FORCE_ASYNC)
477 req->work.flags |= IO_WQ_WORK_CONCURRENT;
478
479 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
480 req->flags |= io_file_get_flags(req->file);
481
482 if (req->file && (req->flags & REQ_F_ISREG)) {
483 bool should_hash = def->hash_reg_file;
484
485 /* don't serialize this request if the fs doesn't need it */
486 if (should_hash && (req->file->f_flags & O_DIRECT) &&
487 (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE))
488 should_hash = false;
489 if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
490 io_wq_hash_work(&req->work, file_inode(req->file));
491 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
492 if (def->unbound_nonreg_file)
493 req->work.flags |= IO_WQ_WORK_UNBOUND;
494 }
495 }
496
497 static void io_prep_async_link(struct io_kiocb *req)
498 {
499 struct io_kiocb *cur;
500
501 if (req->flags & REQ_F_LINK_TIMEOUT) {
502 struct io_ring_ctx *ctx = req->ctx;
503
504 spin_lock_irq(&ctx->timeout_lock);
505 io_for_each_link(cur, req)
506 io_prep_async_work(cur);
507 spin_unlock_irq(&ctx->timeout_lock);
508 } else {
509 io_for_each_link(cur, req)
510 io_prep_async_work(cur);
511 }
512 }
513
514 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
515 {
516 struct io_kiocb *link = io_prep_linked_timeout(req);
517 struct io_uring_task *tctx = req->task->io_uring;
518
519 BUG_ON(!tctx);
520 BUG_ON(!tctx->io_wq);
521
522 /* init ->work of the whole link before punting */
523 io_prep_async_link(req);
524
525 /*
526 * Not expected to happen, but if we do have a bug where this _can_
527 * happen, catch it here and ensure the request is marked as
528 * canceled. That will make io-wq go through the usual work cancel
529 * procedure rather than attempt to run this request (or create a new
530 * worker for it).
531 */
532 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
533 req->work.flags |= IO_WQ_WORK_CANCEL;
534
535 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
536 io_wq_enqueue(tctx->io_wq, &req->work);
537 if (link)
538 io_queue_linked_timeout(link);
539 }
540
541 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
542 {
543 while (!list_empty(&ctx->defer_list)) {
544 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
545 struct io_defer_entry, list);
546
547 if (req_need_defer(de->req, de->seq))
548 break;
549 list_del_init(&de->list);
550 io_req_task_queue(de->req);
551 kfree(de);
552 }
553 }
554
555
556 static void io_eventfd_ops(struct rcu_head *rcu)
557 {
558 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
559 int ops = atomic_xchg(&ev_fd->ops, 0);
560
561 if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
562 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
563
564 /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
565 * ordering in a race but if references are 0 we know we have to free
566 * it regardless.
567 */
568 if (atomic_dec_and_test(&ev_fd->refs)) {
569 eventfd_ctx_put(ev_fd->cq_ev_fd);
570 kfree(ev_fd);
571 }
572 }
573
574 static void io_eventfd_signal(struct io_ring_ctx *ctx)
575 {
576 struct io_ev_fd *ev_fd = NULL;
577
578 rcu_read_lock();
579 /*
580 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
581 * and eventfd_signal
582 */
583 ev_fd = rcu_dereference(ctx->io_ev_fd);
584
585 /*
586 * Check again if ev_fd exists incase an io_eventfd_unregister call
587 * completed between the NULL check of ctx->io_ev_fd at the start of
588 * the function and rcu_read_lock.
589 */
590 if (unlikely(!ev_fd))
591 goto out;
592 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
593 goto out;
594 if (ev_fd->eventfd_async && !io_wq_current_is_worker())
595 goto out;
596
597 if (likely(eventfd_signal_allowed())) {
598 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
599 } else {
600 atomic_inc(&ev_fd->refs);
601 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
602 call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
603 else
604 atomic_dec(&ev_fd->refs);
605 }
606
607 out:
608 rcu_read_unlock();
609 }
610
611 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
612 {
613 bool skip;
614
615 spin_lock(&ctx->completion_lock);
616
617 /*
618 * Eventfd should only get triggered when at least one event has been
619 * posted. Some applications rely on the eventfd notification count
620 * only changing IFF a new CQE has been added to the CQ ring. There's
621 * no depedency on 1:1 relationship between how many times this
622 * function is called (and hence the eventfd count) and number of CQEs
623 * posted to the CQ ring.
624 */
625 skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
626 ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
627 spin_unlock(&ctx->completion_lock);
628 if (skip)
629 return;
630
631 io_eventfd_signal(ctx);
632 }
633
634 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
635 {
636 if (ctx->poll_activated)
637 io_poll_wq_wake(ctx);
638 if (ctx->off_timeout_used)
639 io_flush_timeouts(ctx);
640 if (ctx->drain_active) {
641 spin_lock(&ctx->completion_lock);
642 io_queue_deferred(ctx);
643 spin_unlock(&ctx->completion_lock);
644 }
645 if (ctx->has_evfd)
646 io_eventfd_flush_signal(ctx);
647 }
648
649 static inline void __io_cq_lock(struct io_ring_ctx *ctx)
650 {
651 if (!ctx->lockless_cq)
652 spin_lock(&ctx->completion_lock);
653 }
654
655 static inline void io_cq_lock(struct io_ring_ctx *ctx)
656 __acquires(ctx->completion_lock)
657 {
658 spin_lock(&ctx->completion_lock);
659 }
660
661 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
662 {
663 io_commit_cqring(ctx);
664 if (!ctx->task_complete) {
665 if (!ctx->lockless_cq)
666 spin_unlock(&ctx->completion_lock);
667 /* IOPOLL rings only need to wake up if it's also SQPOLL */
668 if (!ctx->syscall_iopoll)
669 io_cqring_wake(ctx);
670 }
671 io_commit_cqring_flush(ctx);
672 }
673
674 static void io_cq_unlock_post(struct io_ring_ctx *ctx)
675 __releases(ctx->completion_lock)
676 {
677 io_commit_cqring(ctx);
678 spin_unlock(&ctx->completion_lock);
679 io_cqring_wake(ctx);
680 io_commit_cqring_flush(ctx);
681 }
682
683 /* Returns true if there are no backlogged entries after the flush */
684 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
685 {
686 struct io_overflow_cqe *ocqe;
687 LIST_HEAD(list);
688
689 spin_lock(&ctx->completion_lock);
690 list_splice_init(&ctx->cq_overflow_list, &list);
691 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
692 spin_unlock(&ctx->completion_lock);
693
694 while (!list_empty(&list)) {
695 ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
696 list_del(&ocqe->list);
697 kfree(ocqe);
698 }
699 }
700
701 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
702 {
703 size_t cqe_size = sizeof(struct io_uring_cqe);
704
705 if (__io_cqring_events(ctx) == ctx->cq_entries)
706 return;
707
708 if (ctx->flags & IORING_SETUP_CQE32)
709 cqe_size <<= 1;
710
711 io_cq_lock(ctx);
712 while (!list_empty(&ctx->cq_overflow_list)) {
713 struct io_uring_cqe *cqe;
714 struct io_overflow_cqe *ocqe;
715
716 if (!io_get_cqe_overflow(ctx, &cqe, true))
717 break;
718 ocqe = list_first_entry(&ctx->cq_overflow_list,
719 struct io_overflow_cqe, list);
720 memcpy(cqe, &ocqe->cqe, cqe_size);
721 list_del(&ocqe->list);
722 kfree(ocqe);
723 }
724
725 if (list_empty(&ctx->cq_overflow_list)) {
726 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
727 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
728 }
729 io_cq_unlock_post(ctx);
730 }
731
732 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
733 {
734 /* iopoll syncs against uring_lock, not completion_lock */
735 if (ctx->flags & IORING_SETUP_IOPOLL)
736 mutex_lock(&ctx->uring_lock);
737 __io_cqring_overflow_flush(ctx);
738 if (ctx->flags & IORING_SETUP_IOPOLL)
739 mutex_unlock(&ctx->uring_lock);
740 }
741
742 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
743 {
744 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
745 io_cqring_do_overflow_flush(ctx);
746 }
747
748 /* can be called by any task */
749 static void io_put_task_remote(struct task_struct *task)
750 {
751 struct io_uring_task *tctx = task->io_uring;
752
753 percpu_counter_sub(&tctx->inflight, 1);
754 if (unlikely(atomic_read(&tctx->in_cancel)))
755 wake_up(&tctx->wait);
756 put_task_struct(task);
757 }
758
759 /* used by a task to put its own references */
760 static void io_put_task_local(struct task_struct *task)
761 {
762 task->io_uring->cached_refs++;
763 }
764
765 /* must to be called somewhat shortly after putting a request */
766 static inline void io_put_task(struct task_struct *task)
767 {
768 if (likely(task == current))
769 io_put_task_local(task);
770 else
771 io_put_task_remote(task);
772 }
773
774 void io_task_refs_refill(struct io_uring_task *tctx)
775 {
776 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
777
778 percpu_counter_add(&tctx->inflight, refill);
779 refcount_add(refill, &current->usage);
780 tctx->cached_refs += refill;
781 }
782
783 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
784 {
785 struct io_uring_task *tctx = task->io_uring;
786 unsigned int refs = tctx->cached_refs;
787
788 if (refs) {
789 tctx->cached_refs = 0;
790 percpu_counter_sub(&tctx->inflight, refs);
791 put_task_struct_many(task, refs);
792 }
793 }
794
795 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
796 s32 res, u32 cflags, u64 extra1, u64 extra2)
797 {
798 struct io_overflow_cqe *ocqe;
799 size_t ocq_size = sizeof(struct io_overflow_cqe);
800 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
801
802 lockdep_assert_held(&ctx->completion_lock);
803
804 if (is_cqe32)
805 ocq_size += sizeof(struct io_uring_cqe);
806
807 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
808 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
809 if (!ocqe) {
810 /*
811 * If we're in ring overflow flush mode, or in task cancel mode,
812 * or cannot allocate an overflow entry, then we need to drop it
813 * on the floor.
814 */
815 io_account_cq_overflow(ctx);
816 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
817 return false;
818 }
819 if (list_empty(&ctx->cq_overflow_list)) {
820 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
821 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
822
823 }
824 ocqe->cqe.user_data = user_data;
825 ocqe->cqe.res = res;
826 ocqe->cqe.flags = cflags;
827 if (is_cqe32) {
828 ocqe->cqe.big_cqe[0] = extra1;
829 ocqe->cqe.big_cqe[1] = extra2;
830 }
831 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
832 return true;
833 }
834
835 void io_req_cqe_overflow(struct io_kiocb *req)
836 {
837 io_cqring_event_overflow(req->ctx, req->cqe.user_data,
838 req->cqe.res, req->cqe.flags,
839 req->big_cqe.extra1, req->big_cqe.extra2);
840 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
841 }
842
843 /*
844 * writes to the cq entry need to come after reading head; the
845 * control dependency is enough as we're using WRITE_ONCE to
846 * fill the cq entry
847 */
848 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
849 {
850 struct io_rings *rings = ctx->rings;
851 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
852 unsigned int free, queued, len;
853
854 /*
855 * Posting into the CQ when there are pending overflowed CQEs may break
856 * ordering guarantees, which will affect links, F_MORE users and more.
857 * Force overflow the completion.
858 */
859 if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
860 return false;
861
862 /* userspace may cheat modifying the tail, be safe and do min */
863 queued = min(__io_cqring_events(ctx), ctx->cq_entries);
864 free = ctx->cq_entries - queued;
865 /* we need a contiguous range, limit based on the current array offset */
866 len = min(free, ctx->cq_entries - off);
867 if (!len)
868 return false;
869
870 if (ctx->flags & IORING_SETUP_CQE32) {
871 off <<= 1;
872 len <<= 1;
873 }
874
875 ctx->cqe_cached = &rings->cqes[off];
876 ctx->cqe_sentinel = ctx->cqe_cached + len;
877 return true;
878 }
879
880 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
881 u32 cflags)
882 {
883 struct io_uring_cqe *cqe;
884
885 ctx->cq_extra++;
886
887 /*
888 * If we can't get a cq entry, userspace overflowed the
889 * submission (by quite a lot). Increment the overflow count in
890 * the ring.
891 */
892 if (likely(io_get_cqe(ctx, &cqe))) {
893 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
894
895 WRITE_ONCE(cqe->user_data, user_data);
896 WRITE_ONCE(cqe->res, res);
897 WRITE_ONCE(cqe->flags, cflags);
898
899 if (ctx->flags & IORING_SETUP_CQE32) {
900 WRITE_ONCE(cqe->big_cqe[0], 0);
901 WRITE_ONCE(cqe->big_cqe[1], 0);
902 }
903 return true;
904 }
905 return false;
906 }
907
908 static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
909 __must_hold(&ctx->uring_lock)
910 {
911 struct io_submit_state *state = &ctx->submit_state;
912 unsigned int i;
913
914 lockdep_assert_held(&ctx->uring_lock);
915 for (i = 0; i < state->cqes_count; i++) {
916 struct io_uring_cqe *cqe = &ctx->completion_cqes[i];
917
918 if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
919 if (ctx->lockless_cq) {
920 spin_lock(&ctx->completion_lock);
921 io_cqring_event_overflow(ctx, cqe->user_data,
922 cqe->res, cqe->flags, 0, 0);
923 spin_unlock(&ctx->completion_lock);
924 } else {
925 io_cqring_event_overflow(ctx, cqe->user_data,
926 cqe->res, cqe->flags, 0, 0);
927 }
928 }
929 }
930 state->cqes_count = 0;
931 }
932
933 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
934 bool allow_overflow)
935 {
936 bool filled;
937
938 io_cq_lock(ctx);
939 filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
940 if (!filled && allow_overflow)
941 filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
942
943 io_cq_unlock_post(ctx);
944 return filled;
945 }
946
947 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
948 {
949 return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
950 }
951
952 /*
953 * A helper for multishot requests posting additional CQEs.
954 * Should only be used from a task_work including IO_URING_F_MULTISHOT.
955 */
956 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
957 {
958 struct io_ring_ctx *ctx = req->ctx;
959 u64 user_data = req->cqe.user_data;
960 struct io_uring_cqe *cqe;
961
962 if (!defer)
963 return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
964
965 lockdep_assert_held(&ctx->uring_lock);
966
967 if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) {
968 __io_cq_lock(ctx);
969 __io_flush_post_cqes(ctx);
970 /* no need to flush - flush is deferred */
971 __io_cq_unlock_post(ctx);
972 }
973
974 /* For defered completions this is not as strict as it is otherwise,
975 * however it's main job is to prevent unbounded posted completions,
976 * and in that it works just as well.
977 */
978 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
979 return false;
980
981 cqe = &ctx->completion_cqes[ctx->submit_state.cqes_count++];
982 cqe->user_data = user_data;
983 cqe->res = res;
984 cqe->flags = cflags;
985 return true;
986 }
987
988 static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
989 {
990 struct io_ring_ctx *ctx = req->ctx;
991 struct io_rsrc_node *rsrc_node = NULL;
992
993 io_cq_lock(ctx);
994 if (!(req->flags & REQ_F_CQE_SKIP)) {
995 if (!io_fill_cqe_req(ctx, req))
996 io_req_cqe_overflow(req);
997 }
998
999 /*
1000 * If we're the last reference to this request, add to our locked
1001 * free_list cache.
1002 */
1003 if (req_ref_put_and_test(req)) {
1004 if (req->flags & IO_REQ_LINK_FLAGS) {
1005 if (req->flags & IO_DISARM_MASK)
1006 io_disarm_next(req);
1007 if (req->link) {
1008 io_req_task_queue(req->link);
1009 req->link = NULL;
1010 }
1011 }
1012 io_put_kbuf_comp(req);
1013 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1014 io_clean_op(req);
1015 io_put_file(req);
1016
1017 rsrc_node = req->rsrc_node;
1018 /*
1019 * Selected buffer deallocation in io_clean_op() assumes that
1020 * we don't hold ->completion_lock. Clean them here to avoid
1021 * deadlocks.
1022 */
1023 io_put_task_remote(req->task);
1024 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
1025 ctx->locked_free_nr++;
1026 }
1027 io_cq_unlock_post(ctx);
1028
1029 if (rsrc_node) {
1030 io_ring_submit_lock(ctx, issue_flags);
1031 io_put_rsrc_node(ctx, rsrc_node);
1032 io_ring_submit_unlock(ctx, issue_flags);
1033 }
1034 }
1035
1036 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
1037 {
1038 if (req->ctx->task_complete && req->ctx->submitter_task != current) {
1039 req->io_task_work.func = io_req_task_complete;
1040 io_req_task_work_add(req);
1041 } else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
1042 !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
1043 __io_req_complete_post(req, issue_flags);
1044 } else {
1045 struct io_ring_ctx *ctx = req->ctx;
1046
1047 mutex_lock(&ctx->uring_lock);
1048 __io_req_complete_post(req, issue_flags & ~IO_URING_F_UNLOCKED);
1049 mutex_unlock(&ctx->uring_lock);
1050 }
1051 }
1052
1053 void io_req_defer_failed(struct io_kiocb *req, s32 res)
1054 __must_hold(&ctx->uring_lock)
1055 {
1056 const struct io_cold_def *def = &io_cold_defs[req->opcode];
1057
1058 lockdep_assert_held(&req->ctx->uring_lock);
1059
1060 req_set_fail(req);
1061 io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
1062 if (def->fail)
1063 def->fail(req);
1064 io_req_complete_defer(req);
1065 }
1066
1067 /*
1068 * Don't initialise the fields below on every allocation, but do that in
1069 * advance and keep them valid across allocations.
1070 */
1071 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1072 {
1073 req->ctx = ctx;
1074 req->link = NULL;
1075 req->async_data = NULL;
1076 /* not necessary, but safer to zero */
1077 memset(&req->cqe, 0, sizeof(req->cqe));
1078 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
1079 }
1080
1081 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1082 struct io_submit_state *state)
1083 {
1084 spin_lock(&ctx->completion_lock);
1085 wq_list_splice(&ctx->locked_free_list, &state->free_list);
1086 ctx->locked_free_nr = 0;
1087 spin_unlock(&ctx->completion_lock);
1088 }
1089
1090 /*
1091 * A request might get retired back into the request caches even before opcode
1092 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1093 * Because of that, io_alloc_req() should be called only under ->uring_lock
1094 * and with extra caution to not get a request that is still worked on.
1095 */
1096 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
1097 __must_hold(&ctx->uring_lock)
1098 {
1099 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1100 void *reqs[IO_REQ_ALLOC_BATCH];
1101 int ret, i;
1102
1103 /*
1104 * If we have more than a batch's worth of requests in our IRQ side
1105 * locked cache, grab the lock and move them over to our submission
1106 * side cache.
1107 */
1108 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
1109 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
1110 if (!io_req_cache_empty(ctx))
1111 return true;
1112 }
1113
1114 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
1115
1116 /*
1117 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1118 * retry single alloc to be on the safe side.
1119 */
1120 if (unlikely(ret <= 0)) {
1121 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1122 if (!reqs[0])
1123 return false;
1124 ret = 1;
1125 }
1126
1127 percpu_ref_get_many(&ctx->refs, ret);
1128 for (i = 0; i < ret; i++) {
1129 struct io_kiocb *req = reqs[i];
1130
1131 io_preinit_req(req, ctx);
1132 io_req_add_to_cache(req, ctx);
1133 }
1134 return true;
1135 }
1136
1137 __cold void io_free_req(struct io_kiocb *req)
1138 {
1139 /* refs were already put, restore them for io_req_task_complete() */
1140 req->flags &= ~REQ_F_REFCOUNT;
1141 /* we only want to free it, don't post CQEs */
1142 req->flags |= REQ_F_CQE_SKIP;
1143 req->io_task_work.func = io_req_task_complete;
1144 io_req_task_work_add(req);
1145 }
1146
1147 static void __io_req_find_next_prep(struct io_kiocb *req)
1148 {
1149 struct io_ring_ctx *ctx = req->ctx;
1150
1151 spin_lock(&ctx->completion_lock);
1152 io_disarm_next(req);
1153 spin_unlock(&ctx->completion_lock);
1154 }
1155
1156 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1157 {
1158 struct io_kiocb *nxt;
1159
1160 /*
1161 * If LINK is set, we have dependent requests in this chain. If we
1162 * didn't fail this request, queue the first one up, moving any other
1163 * dependencies to the next request. In case of failure, fail the rest
1164 * of the chain.
1165 */
1166 if (unlikely(req->flags & IO_DISARM_MASK))
1167 __io_req_find_next_prep(req);
1168 nxt = req->link;
1169 req->link = NULL;
1170 return nxt;
1171 }
1172
1173 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1174 {
1175 if (!ctx)
1176 return;
1177 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1178 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1179 if (ts->locked) {
1180 io_submit_flush_completions(ctx);
1181 mutex_unlock(&ctx->uring_lock);
1182 ts->locked = false;
1183 }
1184 percpu_ref_put(&ctx->refs);
1185 }
1186
1187 static unsigned int handle_tw_list(struct llist_node *node,
1188 struct io_ring_ctx **ctx,
1189 struct io_tw_state *ts,
1190 struct llist_node *last)
1191 {
1192 unsigned int count = 0;
1193
1194 while (node && node != last) {
1195 struct llist_node *next = node->next;
1196 struct io_kiocb *req = container_of(node, struct io_kiocb,
1197 io_task_work.node);
1198
1199 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1200
1201 if (req->ctx != *ctx) {
1202 ctx_flush_and_put(*ctx, ts);
1203 *ctx = req->ctx;
1204 /* if not contended, grab and improve batching */
1205 ts->locked = mutex_trylock(&(*ctx)->uring_lock);
1206 percpu_ref_get(&(*ctx)->refs);
1207 }
1208 INDIRECT_CALL_2(req->io_task_work.func,
1209 io_poll_task_func, io_req_rw_complete,
1210 req, ts);
1211 node = next;
1212 count++;
1213 if (unlikely(need_resched())) {
1214 ctx_flush_and_put(*ctx, ts);
1215 *ctx = NULL;
1216 cond_resched();
1217 }
1218 }
1219
1220 return count;
1221 }
1222
1223 /**
1224 * io_llist_xchg - swap all entries in a lock-less list
1225 * @head: the head of lock-less list to delete all entries
1226 * @new: new entry as the head of the list
1227 *
1228 * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1229 * The order of entries returned is from the newest to the oldest added one.
1230 */
1231 static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1232 struct llist_node *new)
1233 {
1234 return xchg(&head->first, new);
1235 }
1236
1237 /**
1238 * io_llist_cmpxchg - possibly swap all entries in a lock-less list
1239 * @head: the head of lock-less list to delete all entries
1240 * @old: expected old value of the first entry of the list
1241 * @new: new entry as the head of the list
1242 *
1243 * perform a cmpxchg on the first entry of the list.
1244 */
1245
1246 static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
1247 struct llist_node *old,
1248 struct llist_node *new)
1249 {
1250 return cmpxchg(&head->first, old, new);
1251 }
1252
1253 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1254 {
1255 struct llist_node *node = llist_del_all(&tctx->task_list);
1256 struct io_ring_ctx *last_ctx = NULL;
1257 struct io_kiocb *req;
1258
1259 while (node) {
1260 req = container_of(node, struct io_kiocb, io_task_work.node);
1261 node = node->next;
1262 if (sync && last_ctx != req->ctx) {
1263 if (last_ctx) {
1264 flush_delayed_work(&last_ctx->fallback_work);
1265 percpu_ref_put(&last_ctx->refs);
1266 }
1267 last_ctx = req->ctx;
1268 percpu_ref_get(&last_ctx->refs);
1269 }
1270 if (llist_add(&req->io_task_work.node,
1271 &req->ctx->fallback_llist))
1272 schedule_delayed_work(&req->ctx->fallback_work, 1);
1273 }
1274
1275 if (last_ctx) {
1276 flush_delayed_work(&last_ctx->fallback_work);
1277 percpu_ref_put(&last_ctx->refs);
1278 }
1279 }
1280
1281 void tctx_task_work(struct callback_head *cb)
1282 {
1283 struct io_tw_state ts = {};
1284 struct io_ring_ctx *ctx = NULL;
1285 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1286 task_work);
1287 struct llist_node fake = {};
1288 struct llist_node *node;
1289 unsigned int loops = 0;
1290 unsigned int count = 0;
1291
1292 if (unlikely(current->flags & PF_EXITING)) {
1293 io_fallback_tw(tctx, true);
1294 return;
1295 }
1296
1297 do {
1298 loops++;
1299 node = io_llist_xchg(&tctx->task_list, &fake);
1300 count += handle_tw_list(node, &ctx, &ts, &fake);
1301
1302 /* skip expensive cmpxchg if there are items in the list */
1303 if (READ_ONCE(tctx->task_list.first) != &fake)
1304 continue;
1305 if (ts.locked && !wq_list_empty(&ctx->submit_state.compl_reqs)) {
1306 io_submit_flush_completions(ctx);
1307 if (READ_ONCE(tctx->task_list.first) != &fake)
1308 continue;
1309 }
1310 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1311 } while (node != &fake);
1312
1313 ctx_flush_and_put(ctx, &ts);
1314
1315 /* relaxed read is enough as only the task itself sets ->in_cancel */
1316 if (unlikely(atomic_read(&tctx->in_cancel)))
1317 io_uring_drop_tctx_refs(current);
1318
1319 trace_io_uring_task_work_run(tctx, count, loops);
1320 }
1321
1322 static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
1323 {
1324 struct io_ring_ctx *ctx = req->ctx;
1325 unsigned nr_wait, nr_tw, nr_tw_prev;
1326 struct llist_node *first;
1327
1328 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
1329 flags &= ~IOU_F_TWQ_LAZY_WAKE;
1330
1331 first = READ_ONCE(ctx->work_llist.first);
1332 do {
1333 nr_tw_prev = 0;
1334 if (first) {
1335 struct io_kiocb *first_req = container_of(first,
1336 struct io_kiocb,
1337 io_task_work.node);
1338 /*
1339 * Might be executed at any moment, rely on
1340 * SLAB_TYPESAFE_BY_RCU to keep it alive.
1341 */
1342 nr_tw_prev = READ_ONCE(first_req->nr_tw);
1343 }
1344 nr_tw = nr_tw_prev + 1;
1345 /* Large enough to fail the nr_wait comparison below */
1346 if (!(flags & IOU_F_TWQ_LAZY_WAKE))
1347 nr_tw = -1U;
1348
1349 req->nr_tw = nr_tw;
1350 req->io_task_work.node.next = first;
1351 } while (!try_cmpxchg(&ctx->work_llist.first, &first,
1352 &req->io_task_work.node));
1353
1354 if (!first) {
1355 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1356 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1357 if (ctx->has_evfd)
1358 io_eventfd_signal(ctx);
1359 }
1360
1361 nr_wait = atomic_read(&ctx->cq_wait_nr);
1362 /* no one is waiting */
1363 if (!nr_wait)
1364 return;
1365 /* either not enough or the previous add has already woken it up */
1366 if (nr_wait > nr_tw || nr_tw_prev >= nr_wait)
1367 return;
1368 /* pairs with set_current_state() in io_cqring_wait() */
1369 smp_mb__after_atomic();
1370 wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
1371 }
1372
1373 static void io_req_normal_work_add(struct io_kiocb *req)
1374 {
1375 struct io_uring_task *tctx = req->task->io_uring;
1376 struct io_ring_ctx *ctx = req->ctx;
1377
1378 /* task_work already pending, we're done */
1379 if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1380 return;
1381
1382 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1383 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1384
1385 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1386 return;
1387
1388 io_fallback_tw(tctx, false);
1389 }
1390
1391 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1392 {
1393 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
1394 rcu_read_lock();
1395 io_req_local_work_add(req, flags);
1396 rcu_read_unlock();
1397 } else {
1398 io_req_normal_work_add(req);
1399 }
1400 }
1401
1402 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1403 {
1404 struct llist_node *node;
1405
1406 node = llist_del_all(&ctx->work_llist);
1407 while (node) {
1408 struct io_kiocb *req = container_of(node, struct io_kiocb,
1409 io_task_work.node);
1410
1411 node = node->next;
1412 io_req_normal_work_add(req);
1413 }
1414 }
1415
1416 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1417 {
1418 struct llist_node *node;
1419 unsigned int loops = 0;
1420 int ret = 0;
1421
1422 if (WARN_ON_ONCE(ctx->submitter_task != current))
1423 return -EEXIST;
1424 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1425 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1426 again:
1427 /*
1428 * llists are in reverse order, flip it back the right way before
1429 * running the pending items.
1430 */
1431 node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL));
1432 while (node) {
1433 struct llist_node *next = node->next;
1434 struct io_kiocb *req = container_of(node, struct io_kiocb,
1435 io_task_work.node);
1436 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1437 INDIRECT_CALL_2(req->io_task_work.func,
1438 io_poll_task_func, io_req_rw_complete,
1439 req, ts);
1440 ret++;
1441 node = next;
1442 }
1443 loops++;
1444
1445 if (!llist_empty(&ctx->work_llist))
1446 goto again;
1447 if (ts->locked) {
1448 io_submit_flush_completions(ctx);
1449 if (!llist_empty(&ctx->work_llist))
1450 goto again;
1451 }
1452 trace_io_uring_local_work_run(ctx, ret, loops);
1453 return ret;
1454 }
1455
1456 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
1457 {
1458 struct io_tw_state ts = { .locked = true, };
1459 int ret;
1460
1461 if (llist_empty(&ctx->work_llist))
1462 return 0;
1463
1464 ret = __io_run_local_work(ctx, &ts);
1465 /* shouldn't happen! */
1466 if (WARN_ON_ONCE(!ts.locked))
1467 mutex_lock(&ctx->uring_lock);
1468 return ret;
1469 }
1470
1471 static int io_run_local_work(struct io_ring_ctx *ctx)
1472 {
1473 struct io_tw_state ts = {};
1474 int ret;
1475
1476 ts.locked = mutex_trylock(&ctx->uring_lock);
1477 ret = __io_run_local_work(ctx, &ts);
1478 if (ts.locked)
1479 mutex_unlock(&ctx->uring_lock);
1480
1481 return ret;
1482 }
1483
1484 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
1485 {
1486 io_tw_lock(req->ctx, ts);
1487 io_req_defer_failed(req, req->cqe.res);
1488 }
1489
1490 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
1491 {
1492 io_tw_lock(req->ctx, ts);
1493 /* req->task == current here, checking PF_EXITING is safe */
1494 if (unlikely(req->task->flags & PF_EXITING))
1495 io_req_defer_failed(req, -EFAULT);
1496 else if (req->flags & REQ_F_FORCE_ASYNC)
1497 io_queue_iowq(req, ts);
1498 else
1499 io_queue_sqe(req);
1500 }
1501
1502 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1503 {
1504 io_req_set_res(req, ret, 0);
1505 req->io_task_work.func = io_req_task_cancel;
1506 io_req_task_work_add(req);
1507 }
1508
1509 void io_req_task_queue(struct io_kiocb *req)
1510 {
1511 req->io_task_work.func = io_req_task_submit;
1512 io_req_task_work_add(req);
1513 }
1514
1515 void io_queue_next(struct io_kiocb *req)
1516 {
1517 struct io_kiocb *nxt = io_req_find_next(req);
1518
1519 if (nxt)
1520 io_req_task_queue(nxt);
1521 }
1522
1523 static void io_free_batch_list(struct io_ring_ctx *ctx,
1524 struct io_wq_work_node *node)
1525 __must_hold(&ctx->uring_lock)
1526 {
1527 do {
1528 struct io_kiocb *req = container_of(node, struct io_kiocb,
1529 comp_list);
1530
1531 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1532 if (req->flags & REQ_F_REFCOUNT) {
1533 node = req->comp_list.next;
1534 if (!req_ref_put_and_test(req))
1535 continue;
1536 }
1537 if ((req->flags & REQ_F_POLLED) && req->apoll) {
1538 struct async_poll *apoll = req->apoll;
1539
1540 if (apoll->double_poll)
1541 kfree(apoll->double_poll);
1542 if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
1543 kfree(apoll);
1544 req->flags &= ~REQ_F_POLLED;
1545 }
1546 if (req->flags & IO_REQ_LINK_FLAGS)
1547 io_queue_next(req);
1548 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1549 io_clean_op(req);
1550 }
1551 io_put_file(req);
1552
1553 io_req_put_rsrc_locked(req, ctx);
1554
1555 io_put_task(req->task);
1556 node = req->comp_list.next;
1557 io_req_add_to_cache(req, ctx);
1558 } while (node);
1559 }
1560
1561 void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1562 __must_hold(&ctx->uring_lock)
1563 {
1564 struct io_submit_state *state = &ctx->submit_state;
1565 struct io_wq_work_node *node;
1566
1567 __io_cq_lock(ctx);
1568 /* must come first to preserve CQE ordering in failure cases */
1569 if (state->cqes_count)
1570 __io_flush_post_cqes(ctx);
1571 __wq_list_for_each(node, &state->compl_reqs) {
1572 struct io_kiocb *req = container_of(node, struct io_kiocb,
1573 comp_list);
1574
1575 if (!(req->flags & REQ_F_CQE_SKIP) &&
1576 unlikely(!io_fill_cqe_req(ctx, req))) {
1577 if (ctx->lockless_cq) {
1578 spin_lock(&ctx->completion_lock);
1579 io_req_cqe_overflow(req);
1580 spin_unlock(&ctx->completion_lock);
1581 } else {
1582 io_req_cqe_overflow(req);
1583 }
1584 }
1585 }
1586 __io_cq_unlock_post(ctx);
1587
1588 if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
1589 io_free_batch_list(ctx, state->compl_reqs.first);
1590 INIT_WQ_LIST(&state->compl_reqs);
1591 }
1592 }
1593
1594 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1595 {
1596 /* See comment at the top of this file */
1597 smp_rmb();
1598 return __io_cqring_events(ctx);
1599 }
1600
1601 /*
1602 * We can't just wait for polled events to come to us, we have to actively
1603 * find and complete them.
1604 */
1605 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1606 {
1607 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1608 return;
1609
1610 mutex_lock(&ctx->uring_lock);
1611 while (!wq_list_empty(&ctx->iopoll_list)) {
1612 /* let it sleep and repeat later if can't complete a request */
1613 if (io_do_iopoll(ctx, true) == 0)
1614 break;
1615 /*
1616 * Ensure we allow local-to-the-cpu processing to take place,
1617 * in this case we need to ensure that we reap all events.
1618 * Also let task_work, etc. to progress by releasing the mutex
1619 */
1620 if (need_resched()) {
1621 mutex_unlock(&ctx->uring_lock);
1622 cond_resched();
1623 mutex_lock(&ctx->uring_lock);
1624 }
1625 }
1626 mutex_unlock(&ctx->uring_lock);
1627 }
1628
1629 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1630 {
1631 unsigned int nr_events = 0;
1632 unsigned long check_cq;
1633
1634 if (!io_allowed_run_tw(ctx))
1635 return -EEXIST;
1636
1637 check_cq = READ_ONCE(ctx->check_cq);
1638 if (unlikely(check_cq)) {
1639 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1640 __io_cqring_overflow_flush(ctx);
1641 /*
1642 * Similarly do not spin if we have not informed the user of any
1643 * dropped CQE.
1644 */
1645 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1646 return -EBADR;
1647 }
1648 /*
1649 * Don't enter poll loop if we already have events pending.
1650 * If we do, we can potentially be spinning for commands that
1651 * already triggered a CQE (eg in error).
1652 */
1653 if (io_cqring_events(ctx))
1654 return 0;
1655
1656 do {
1657 int ret = 0;
1658
1659 /*
1660 * If a submit got punted to a workqueue, we can have the
1661 * application entering polling for a command before it gets
1662 * issued. That app will hold the uring_lock for the duration
1663 * of the poll right here, so we need to take a breather every
1664 * now and then to ensure that the issue has a chance to add
1665 * the poll to the issued list. Otherwise we can spin here
1666 * forever, while the workqueue is stuck trying to acquire the
1667 * very same mutex.
1668 */
1669 if (wq_list_empty(&ctx->iopoll_list) ||
1670 io_task_work_pending(ctx)) {
1671 u32 tail = ctx->cached_cq_tail;
1672
1673 (void) io_run_local_work_locked(ctx);
1674
1675 if (task_work_pending(current) ||
1676 wq_list_empty(&ctx->iopoll_list)) {
1677 mutex_unlock(&ctx->uring_lock);
1678 io_run_task_work();
1679 mutex_lock(&ctx->uring_lock);
1680 }
1681 /* some requests don't go through iopoll_list */
1682 if (tail != ctx->cached_cq_tail ||
1683 wq_list_empty(&ctx->iopoll_list))
1684 break;
1685 }
1686 ret = io_do_iopoll(ctx, !min);
1687 if (unlikely(ret < 0))
1688 return ret;
1689
1690 if (task_sigpending(current))
1691 return -EINTR;
1692 if (need_resched())
1693 break;
1694
1695 nr_events += ret;
1696 } while (nr_events < min);
1697
1698 return 0;
1699 }
1700
1701 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
1702 {
1703 if (ts->locked)
1704 io_req_complete_defer(req);
1705 else
1706 io_req_complete_post(req, IO_URING_F_UNLOCKED);
1707 }
1708
1709 /*
1710 * After the iocb has been issued, it's safe to be found on the poll list.
1711 * Adding the kiocb to the list AFTER submission ensures that we don't
1712 * find it from a io_do_iopoll() thread before the issuer is done
1713 * accessing the kiocb cookie.
1714 */
1715 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1716 {
1717 struct io_ring_ctx *ctx = req->ctx;
1718 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1719
1720 /* workqueue context doesn't hold uring_lock, grab it now */
1721 if (unlikely(needs_lock))
1722 mutex_lock(&ctx->uring_lock);
1723
1724 /*
1725 * Track whether we have multiple files in our lists. This will impact
1726 * how we do polling eventually, not spinning if we're on potentially
1727 * different devices.
1728 */
1729 if (wq_list_empty(&ctx->iopoll_list)) {
1730 ctx->poll_multi_queue = false;
1731 } else if (!ctx->poll_multi_queue) {
1732 struct io_kiocb *list_req;
1733
1734 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1735 comp_list);
1736 if (list_req->file != req->file)
1737 ctx->poll_multi_queue = true;
1738 }
1739
1740 /*
1741 * For fast devices, IO may have already completed. If it has, add
1742 * it to the front so we find it first.
1743 */
1744 if (READ_ONCE(req->iopoll_completed))
1745 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1746 else
1747 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1748
1749 if (unlikely(needs_lock)) {
1750 /*
1751 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1752 * in sq thread task context or in io worker task context. If
1753 * current task context is sq thread, we don't need to check
1754 * whether should wake up sq thread.
1755 */
1756 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1757 wq_has_sleeper(&ctx->sq_data->wait))
1758 wake_up(&ctx->sq_data->wait);
1759
1760 mutex_unlock(&ctx->uring_lock);
1761 }
1762 }
1763
1764 unsigned int io_file_get_flags(struct file *file)
1765 {
1766 unsigned int res = 0;
1767
1768 if (S_ISREG(file_inode(file)->i_mode))
1769 res |= REQ_F_ISREG;
1770 if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
1771 res |= REQ_F_SUPPORT_NOWAIT;
1772 return res;
1773 }
1774
1775 bool io_alloc_async_data(struct io_kiocb *req)
1776 {
1777 WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size);
1778 req->async_data = kmalloc(io_cold_defs[req->opcode].async_size, GFP_KERNEL);
1779 if (req->async_data) {
1780 req->flags |= REQ_F_ASYNC_DATA;
1781 return false;
1782 }
1783 return true;
1784 }
1785
1786 int io_req_prep_async(struct io_kiocb *req)
1787 {
1788 const struct io_cold_def *cdef = &io_cold_defs[req->opcode];
1789 const struct io_issue_def *def = &io_issue_defs[req->opcode];
1790
1791 /* assign early for deferred execution for non-fixed file */
1792 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
1793 req->file = io_file_get_normal(req, req->cqe.fd);
1794 if (!cdef->prep_async)
1795 return 0;
1796 if (WARN_ON_ONCE(req_has_async_data(req)))
1797 return -EFAULT;
1798 if (!def->manual_alloc) {
1799 if (io_alloc_async_data(req))
1800 return -EAGAIN;
1801 }
1802 return cdef->prep_async(req);
1803 }
1804
1805 static u32 io_get_sequence(struct io_kiocb *req)
1806 {
1807 u32 seq = req->ctx->cached_sq_head;
1808 struct io_kiocb *cur;
1809
1810 /* need original cached_sq_head, but it was increased for each req */
1811 io_for_each_link(cur, req)
1812 seq--;
1813 return seq;
1814 }
1815
1816 static __cold void io_drain_req(struct io_kiocb *req)
1817 __must_hold(&ctx->uring_lock)
1818 {
1819 struct io_ring_ctx *ctx = req->ctx;
1820 struct io_defer_entry *de;
1821 int ret;
1822 u32 seq = io_get_sequence(req);
1823
1824 /* Still need defer if there is pending req in defer list. */
1825 spin_lock(&ctx->completion_lock);
1826 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1827 spin_unlock(&ctx->completion_lock);
1828 queue:
1829 ctx->drain_active = false;
1830 io_req_task_queue(req);
1831 return;
1832 }
1833 spin_unlock(&ctx->completion_lock);
1834
1835 io_prep_async_link(req);
1836 de = kmalloc(sizeof(*de), GFP_KERNEL);
1837 if (!de) {
1838 ret = -ENOMEM;
1839 io_req_defer_failed(req, ret);
1840 return;
1841 }
1842
1843 spin_lock(&ctx->completion_lock);
1844 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1845 spin_unlock(&ctx->completion_lock);
1846 kfree(de);
1847 goto queue;
1848 }
1849
1850 trace_io_uring_defer(req);
1851 de->req = req;
1852 de->seq = seq;
1853 list_add_tail(&de->list, &ctx->defer_list);
1854 spin_unlock(&ctx->completion_lock);
1855 }
1856
1857 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1858 unsigned int issue_flags)
1859 {
1860 if (req->file || !def->needs_file)
1861 return true;
1862
1863 if (req->flags & REQ_F_FIXED_FILE)
1864 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1865 else
1866 req->file = io_file_get_normal(req, req->cqe.fd);
1867
1868 return !!req->file;
1869 }
1870
1871 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1872 {
1873 const struct io_issue_def *def = &io_issue_defs[req->opcode];
1874 const struct cred *creds = NULL;
1875 int ret;
1876
1877 if (unlikely(!io_assign_file(req, def, issue_flags)))
1878 return -EBADF;
1879
1880 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1881 creds = override_creds(req->creds);
1882
1883 if (!def->audit_skip)
1884 audit_uring_entry(req->opcode);
1885
1886 ret = def->issue(req, issue_flags);
1887
1888 if (!def->audit_skip)
1889 audit_uring_exit(!ret, ret);
1890
1891 if (creds)
1892 revert_creds(creds);
1893
1894 if (ret == IOU_OK) {
1895 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1896 io_req_complete_defer(req);
1897 else
1898 io_req_complete_post(req, issue_flags);
1899 } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
1900 return ret;
1901
1902 /* If the op doesn't have a file, we're not polling for it */
1903 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1904 io_iopoll_req_issued(req, issue_flags);
1905
1906 return 0;
1907 }
1908
1909 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1910 {
1911 io_tw_lock(req->ctx, ts);
1912 return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
1913 IO_URING_F_COMPLETE_DEFER);
1914 }
1915
1916 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1917 {
1918 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1919 struct io_kiocb *nxt = NULL;
1920
1921 if (req_ref_put_and_test(req)) {
1922 if (req->flags & IO_REQ_LINK_FLAGS)
1923 nxt = io_req_find_next(req);
1924 io_free_req(req);
1925 }
1926 return nxt ? &nxt->work : NULL;
1927 }
1928
1929 void io_wq_submit_work(struct io_wq_work *work)
1930 {
1931 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1932 const struct io_issue_def *def = &io_issue_defs[req->opcode];
1933 unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1934 bool needs_poll = false;
1935 int ret = 0, err = -ECANCELED;
1936
1937 /* one will be dropped by ->io_wq_free_work() after returning to io-wq */
1938 if (!(req->flags & REQ_F_REFCOUNT))
1939 __io_req_set_refcount(req, 2);
1940 else
1941 req_ref_get(req);
1942
1943 io_arm_ltimeout(req);
1944
1945 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1946 if (work->flags & IO_WQ_WORK_CANCEL) {
1947 fail:
1948 io_req_task_queue_fail(req, err);
1949 return;
1950 }
1951 if (!io_assign_file(req, def, issue_flags)) {
1952 err = -EBADF;
1953 work->flags |= IO_WQ_WORK_CANCEL;
1954 goto fail;
1955 }
1956
1957 if (req->flags & REQ_F_FORCE_ASYNC) {
1958 bool opcode_poll = def->pollin || def->pollout;
1959
1960 if (opcode_poll && file_can_poll(req->file)) {
1961 needs_poll = true;
1962 issue_flags |= IO_URING_F_NONBLOCK;
1963 }
1964 }
1965
1966 do {
1967 ret = io_issue_sqe(req, issue_flags);
1968 if (ret != -EAGAIN)
1969 break;
1970
1971 /*
1972 * If REQ_F_NOWAIT is set, then don't wait or retry with
1973 * poll. -EAGAIN is final for that case.
1974 */
1975 if (req->flags & REQ_F_NOWAIT)
1976 break;
1977
1978 /*
1979 * We can get EAGAIN for iopolled IO even though we're
1980 * forcing a sync submission from here, since we can't
1981 * wait for request slots on the block side.
1982 */
1983 if (!needs_poll) {
1984 if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1985 break;
1986 if (io_wq_worker_stopped())
1987 break;
1988 cond_resched();
1989 continue;
1990 }
1991
1992 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1993 return;
1994 /* aborted or ready, in either case retry blocking */
1995 needs_poll = false;
1996 issue_flags &= ~IO_URING_F_NONBLOCK;
1997 } while (1);
1998
1999 /* avoid locking problems by failing it from a clean context */
2000 if (ret < 0)
2001 io_req_task_queue_fail(req, ret);
2002 }
2003
2004 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
2005 unsigned int issue_flags)
2006 {
2007 struct io_ring_ctx *ctx = req->ctx;
2008 struct io_fixed_file *slot;
2009 struct file *file = NULL;
2010
2011 io_ring_submit_lock(ctx, issue_flags);
2012
2013 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
2014 goto out;
2015 fd = array_index_nospec(fd, ctx->nr_user_files);
2016 slot = io_fixed_file_slot(&ctx->file_table, fd);
2017 file = io_slot_file(slot);
2018 req->flags |= io_slot_flags(slot);
2019 io_req_set_rsrc_node(req, ctx, 0);
2020 out:
2021 io_ring_submit_unlock(ctx, issue_flags);
2022 return file;
2023 }
2024
2025 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
2026 {
2027 struct file *file = fget(fd);
2028
2029 trace_io_uring_file_get(req, fd);
2030
2031 /* we don't allow fixed io_uring files */
2032 if (file && io_is_uring_fops(file))
2033 io_req_track_inflight(req);
2034 return file;
2035 }
2036
2037 static void io_queue_async(struct io_kiocb *req, int ret)
2038 __must_hold(&req->ctx->uring_lock)
2039 {
2040 struct io_kiocb *linked_timeout;
2041
2042 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
2043 io_req_defer_failed(req, ret);
2044 return;
2045 }
2046
2047 linked_timeout = io_prep_linked_timeout(req);
2048
2049 switch (io_arm_poll_handler(req, 0)) {
2050 case IO_APOLL_READY:
2051 io_kbuf_recycle(req, 0);
2052 io_req_task_queue(req);
2053 break;
2054 case IO_APOLL_ABORTED:
2055 io_kbuf_recycle(req, 0);
2056 io_queue_iowq(req, NULL);
2057 break;
2058 case IO_APOLL_OK:
2059 break;
2060 }
2061
2062 if (linked_timeout)
2063 io_queue_linked_timeout(linked_timeout);
2064 }
2065
2066 static inline void io_queue_sqe(struct io_kiocb *req)
2067 __must_hold(&req->ctx->uring_lock)
2068 {
2069 int ret;
2070
2071 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
2072
2073 /*
2074 * We async punt it if the file wasn't marked NOWAIT, or if the file
2075 * doesn't support non-blocking read/write attempts
2076 */
2077 if (likely(!ret))
2078 io_arm_ltimeout(req);
2079 else
2080 io_queue_async(req, ret);
2081 }
2082
2083 static void io_queue_sqe_fallback(struct io_kiocb *req)
2084 __must_hold(&req->ctx->uring_lock)
2085 {
2086 if (unlikely(req->flags & REQ_F_FAIL)) {
2087 /*
2088 * We don't submit, fail them all, for that replace hardlinks
2089 * with normal links. Extra REQ_F_LINK is tolerated.
2090 */
2091 req->flags &= ~REQ_F_HARDLINK;
2092 req->flags |= REQ_F_LINK;
2093 io_req_defer_failed(req, req->cqe.res);
2094 } else {
2095 int ret = io_req_prep_async(req);
2096
2097 if (unlikely(ret)) {
2098 io_req_defer_failed(req, ret);
2099 return;
2100 }
2101
2102 if (unlikely(req->ctx->drain_active))
2103 io_drain_req(req);
2104 else
2105 io_queue_iowq(req, NULL);
2106 }
2107 }
2108
2109 /*
2110 * Check SQE restrictions (opcode and flags).
2111 *
2112 * Returns 'true' if SQE is allowed, 'false' otherwise.
2113 */
2114 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
2115 struct io_kiocb *req,
2116 unsigned int sqe_flags)
2117 {
2118 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
2119 return false;
2120
2121 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
2122 ctx->restrictions.sqe_flags_required)
2123 return false;
2124
2125 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
2126 ctx->restrictions.sqe_flags_required))
2127 return false;
2128
2129 return true;
2130 }
2131
2132 static void io_init_req_drain(struct io_kiocb *req)
2133 {
2134 struct io_ring_ctx *ctx = req->ctx;
2135 struct io_kiocb *head = ctx->submit_state.link.head;
2136
2137 ctx->drain_active = true;
2138 if (head) {
2139 /*
2140 * If we need to drain a request in the middle of a link, drain
2141 * the head request and the next request/link after the current
2142 * link. Considering sequential execution of links,
2143 * REQ_F_IO_DRAIN will be maintained for every request of our
2144 * link.
2145 */
2146 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2147 ctx->drain_next = true;
2148 }
2149 }
2150
2151 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2152 const struct io_uring_sqe *sqe)
2153 __must_hold(&ctx->uring_lock)
2154 {
2155 const struct io_issue_def *def;
2156 unsigned int sqe_flags;
2157 int personality;
2158 u8 opcode;
2159
2160 /* req is partially pre-initialised, see io_preinit_req() */
2161 req->opcode = opcode = READ_ONCE(sqe->opcode);
2162 /* same numerical values with corresponding REQ_F_*, safe to copy */
2163 req->flags = sqe_flags = READ_ONCE(sqe->flags);
2164 req->cqe.user_data = READ_ONCE(sqe->user_data);
2165 req->file = NULL;
2166 req->rsrc_node = NULL;
2167 req->task = current;
2168
2169 if (unlikely(opcode >= IORING_OP_LAST)) {
2170 req->opcode = 0;
2171 return -EINVAL;
2172 }
2173 def = &io_issue_defs[opcode];
2174 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
2175 /* enforce forwards compatibility on users */
2176 if (sqe_flags & ~SQE_VALID_FLAGS)
2177 return -EINVAL;
2178 if (sqe_flags & IOSQE_BUFFER_SELECT) {
2179 if (!def->buffer_select)
2180 return -EOPNOTSUPP;
2181 req->buf_index = READ_ONCE(sqe->buf_group);
2182 }
2183 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2184 ctx->drain_disabled = true;
2185 if (sqe_flags & IOSQE_IO_DRAIN) {
2186 if (ctx->drain_disabled)
2187 return -EOPNOTSUPP;
2188 io_init_req_drain(req);
2189 }
2190 }
2191 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2192 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2193 return -EACCES;
2194 /* knock it to the slow queue path, will be drained there */
2195 if (ctx->drain_active)
2196 req->flags |= REQ_F_FORCE_ASYNC;
2197 /* if there is no link, we're at "next" request and need to drain */
2198 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2199 ctx->drain_next = false;
2200 ctx->drain_active = true;
2201 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2202 }
2203 }
2204
2205 if (!def->ioprio && sqe->ioprio)
2206 return -EINVAL;
2207 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2208 return -EINVAL;
2209
2210 if (def->needs_file) {
2211 struct io_submit_state *state = &ctx->submit_state;
2212
2213 req->cqe.fd = READ_ONCE(sqe->fd);
2214
2215 /*
2216 * Plug now if we have more than 2 IO left after this, and the
2217 * target is potentially a read/write to block based storage.
2218 */
2219 if (state->need_plug && def->plug) {
2220 state->plug_started = true;
2221 state->need_plug = false;
2222 blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2223 }
2224 }
2225
2226 personality = READ_ONCE(sqe->personality);
2227 if (personality) {
2228 int ret;
2229
2230 req->creds = xa_load(&ctx->personalities, personality);
2231 if (!req->creds)
2232 return -EINVAL;
2233 get_cred(req->creds);
2234 ret = security_uring_override_creds(req->creds);
2235 if (ret) {
2236 put_cred(req->creds);
2237 return ret;
2238 }
2239 req->flags |= REQ_F_CREDS;
2240 }
2241
2242 return def->prep(req, sqe);
2243 }
2244
2245 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2246 struct io_kiocb *req, int ret)
2247 {
2248 struct io_ring_ctx *ctx = req->ctx;
2249 struct io_submit_link *link = &ctx->submit_state.link;
2250 struct io_kiocb *head = link->head;
2251
2252 trace_io_uring_req_failed(sqe, req, ret);
2253
2254 /*
2255 * Avoid breaking links in the middle as it renders links with SQPOLL
2256 * unusable. Instead of failing eagerly, continue assembling the link if
2257 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2258 * should find the flag and handle the rest.
2259 */
2260 req_fail_link_node(req, ret);
2261 if (head && !(head->flags & REQ_F_FAIL))
2262 req_fail_link_node(head, -ECANCELED);
2263
2264 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2265 if (head) {
2266 link->last->link = req;
2267 link->head = NULL;
2268 req = head;
2269 }
2270 io_queue_sqe_fallback(req);
2271 return ret;
2272 }
2273
2274 if (head)
2275 link->last->link = req;
2276 else
2277 link->head = req;
2278 link->last = req;
2279 return 0;
2280 }
2281
2282 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2283 const struct io_uring_sqe *sqe)
2284 __must_hold(&ctx->uring_lock)
2285 {
2286 struct io_submit_link *link = &ctx->submit_state.link;
2287 int ret;
2288
2289 ret = io_init_req(ctx, req, sqe);
2290 if (unlikely(ret))
2291 return io_submit_fail_init(sqe, req, ret);
2292
2293 trace_io_uring_submit_req(req);
2294
2295 /*
2296 * If we already have a head request, queue this one for async
2297 * submittal once the head completes. If we don't have a head but
2298 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2299 * submitted sync once the chain is complete. If none of those
2300 * conditions are true (normal request), then just queue it.
2301 */
2302 if (unlikely(link->head)) {
2303 ret = io_req_prep_async(req);
2304 if (unlikely(ret))
2305 return io_submit_fail_init(sqe, req, ret);
2306
2307 trace_io_uring_link(req, link->head);
2308 link->last->link = req;
2309 link->last = req;
2310
2311 if (req->flags & IO_REQ_LINK_FLAGS)
2312 return 0;
2313 /* last request of the link, flush it */
2314 req = link->head;
2315 link->head = NULL;
2316 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2317 goto fallback;
2318
2319 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2320 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2321 if (req->flags & IO_REQ_LINK_FLAGS) {
2322 link->head = req;
2323 link->last = req;
2324 } else {
2325 fallback:
2326 io_queue_sqe_fallback(req);
2327 }
2328 return 0;
2329 }
2330
2331 io_queue_sqe(req);
2332 return 0;
2333 }
2334
2335 /*
2336 * Batched submission is done, ensure local IO is flushed out.
2337 */
2338 static void io_submit_state_end(struct io_ring_ctx *ctx)
2339 {
2340 struct io_submit_state *state = &ctx->submit_state;
2341
2342 if (unlikely(state->link.head))
2343 io_queue_sqe_fallback(state->link.head);
2344 /* flush only after queuing links as they can generate completions */
2345 io_submit_flush_completions(ctx);
2346 if (state->plug_started)
2347 blk_finish_plug(&state->plug);
2348 }
2349
2350 /*
2351 * Start submission side cache.
2352 */
2353 static void io_submit_state_start(struct io_submit_state *state,
2354 unsigned int max_ios)
2355 {
2356 state->plug_started = false;
2357 state->need_plug = max_ios > 2;
2358 state->submit_nr = max_ios;
2359 /* set only head, no need to init link_last in advance */
2360 state->link.head = NULL;
2361 }
2362
2363 static void io_commit_sqring(struct io_ring_ctx *ctx)
2364 {
2365 struct io_rings *rings = ctx->rings;
2366
2367 /*
2368 * Ensure any loads from the SQEs are done at this point,
2369 * since once we write the new head, the application could
2370 * write new data to them.
2371 */
2372 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2373 }
2374
2375 /*
2376 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2377 * that is mapped by userspace. This means that care needs to be taken to
2378 * ensure that reads are stable, as we cannot rely on userspace always
2379 * being a good citizen. If members of the sqe are validated and then later
2380 * used, it's important that those reads are done through READ_ONCE() to
2381 * prevent a re-load down the line.
2382 */
2383 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
2384 {
2385 unsigned mask = ctx->sq_entries - 1;
2386 unsigned head = ctx->cached_sq_head++ & mask;
2387
2388 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
2389 head = READ_ONCE(ctx->sq_array[head]);
2390 if (unlikely(head >= ctx->sq_entries)) {
2391 /* drop invalid entries */
2392 spin_lock(&ctx->completion_lock);
2393 ctx->cq_extra--;
2394 spin_unlock(&ctx->completion_lock);
2395 WRITE_ONCE(ctx->rings->sq_dropped,
2396 READ_ONCE(ctx->rings->sq_dropped) + 1);
2397 return false;
2398 }
2399 }
2400
2401 /*
2402 * The cached sq head (or cq tail) serves two purposes:
2403 *
2404 * 1) allows us to batch the cost of updating the user visible
2405 * head updates.
2406 * 2) allows the kernel side to track the head on its own, even
2407 * though the application is the one updating it.
2408 */
2409
2410 /* double index for 128-byte SQEs, twice as long */
2411 if (ctx->flags & IORING_SETUP_SQE128)
2412 head <<= 1;
2413 *sqe = &ctx->sq_sqes[head];
2414 return true;
2415 }
2416
2417 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2418 __must_hold(&ctx->uring_lock)
2419 {
2420 unsigned int entries = io_sqring_entries(ctx);
2421 unsigned int left;
2422 int ret;
2423
2424 if (unlikely(!entries))
2425 return 0;
2426 /* make sure SQ entry isn't read before tail */
2427 ret = left = min(nr, entries);
2428 io_get_task_refs(left);
2429 io_submit_state_start(&ctx->submit_state, left);
2430
2431 do {
2432 const struct io_uring_sqe *sqe;
2433 struct io_kiocb *req;
2434
2435 if (unlikely(!io_alloc_req(ctx, &req)))
2436 break;
2437 if (unlikely(!io_get_sqe(ctx, &sqe))) {
2438 io_req_add_to_cache(req, ctx);
2439 break;
2440 }
2441
2442 /*
2443 * Continue submitting even for sqe failure if the
2444 * ring was setup with IORING_SETUP_SUBMIT_ALL
2445 */
2446 if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2447 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2448 left--;
2449 break;
2450 }
2451 } while (--left);
2452
2453 if (unlikely(left)) {
2454 ret -= left;
2455 /* try again if it submitted nothing and can't allocate a req */
2456 if (!ret && io_req_cache_empty(ctx))
2457 ret = -EAGAIN;
2458 current->io_uring->cached_refs += left;
2459 }
2460
2461 io_submit_state_end(ctx);
2462 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2463 io_commit_sqring(ctx);
2464 return ret;
2465 }
2466
2467 struct io_wait_queue {
2468 struct wait_queue_entry wq;
2469 struct io_ring_ctx *ctx;
2470 unsigned cq_tail;
2471 unsigned nr_timeouts;
2472 ktime_t timeout;
2473 };
2474
2475 static inline bool io_has_work(struct io_ring_ctx *ctx)
2476 {
2477 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
2478 !llist_empty(&ctx->work_llist);
2479 }
2480
2481 static inline bool io_should_wake(struct io_wait_queue *iowq)
2482 {
2483 struct io_ring_ctx *ctx = iowq->ctx;
2484 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2485
2486 /*
2487 * Wake up if we have enough events, or if a timeout occurred since we
2488 * started waiting. For timeouts, we always want to return to userspace,
2489 * regardless of event count.
2490 */
2491 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2492 }
2493
2494 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2495 int wake_flags, void *key)
2496 {
2497 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
2498
2499 /*
2500 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2501 * the task, and the next invocation will do it.
2502 */
2503 if (io_should_wake(iowq) || io_has_work(iowq->ctx))
2504 return autoremove_wake_function(curr, mode, wake_flags, key);
2505 return -1;
2506 }
2507
2508 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2509 {
2510 if (!llist_empty(&ctx->work_llist)) {
2511 __set_current_state(TASK_RUNNING);
2512 if (io_run_local_work(ctx) > 0)
2513 return 0;
2514 }
2515 if (io_run_task_work() > 0)
2516 return 0;
2517 if (task_sigpending(current))
2518 return -EINTR;
2519 return 0;
2520 }
2521
2522 static bool current_pending_io(void)
2523 {
2524 struct io_uring_task *tctx = current->io_uring;
2525
2526 if (!tctx)
2527 return false;
2528 return percpu_counter_read_positive(&tctx->inflight);
2529 }
2530
2531 /* when returns >0, the caller should retry */
2532 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2533 struct io_wait_queue *iowq)
2534 {
2535 int io_wait, ret;
2536
2537 if (unlikely(READ_ONCE(ctx->check_cq)))
2538 return 1;
2539 if (unlikely(!llist_empty(&ctx->work_llist)))
2540 return 1;
2541 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
2542 return 1;
2543 if (unlikely(task_sigpending(current)))
2544 return -EINTR;
2545 if (unlikely(io_should_wake(iowq)))
2546 return 0;
2547
2548 /*
2549 * Mark us as being in io_wait if we have pending requests, so cpufreq
2550 * can take into account that the task is waiting for IO - turns out
2551 * to be important for low QD IO.
2552 */
2553 io_wait = current->in_iowait;
2554 if (current_pending_io())
2555 current->in_iowait = 1;
2556 ret = 0;
2557 if (iowq->timeout == KTIME_MAX)
2558 schedule();
2559 else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
2560 ret = -ETIME;
2561 current->in_iowait = io_wait;
2562 return ret;
2563 }
2564
2565 /*
2566 * Wait until events become available, if we don't already have some. The
2567 * application must reap them itself, as they reside on the shared cq ring.
2568 */
2569 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2570 const sigset_t __user *sig, size_t sigsz,
2571 struct __kernel_timespec __user *uts)
2572 {
2573 struct io_wait_queue iowq;
2574 struct io_rings *rings = ctx->rings;
2575 int ret;
2576
2577 if (!io_allowed_run_tw(ctx))
2578 return -EEXIST;
2579 if (!llist_empty(&ctx->work_llist))
2580 io_run_local_work(ctx);
2581 io_run_task_work();
2582 io_cqring_overflow_flush(ctx);
2583 /* if user messes with these they will just get an early return */
2584 if (__io_cqring_events_user(ctx) >= min_events)
2585 return 0;
2586
2587 if (sig) {
2588 #ifdef CONFIG_COMPAT
2589 if (in_compat_syscall())
2590 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2591 sigsz);
2592 else
2593 #endif
2594 ret = set_user_sigmask(sig, sigsz);
2595
2596 if (ret)
2597 return ret;
2598 }
2599
2600 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2601 iowq.wq.private = current;
2602 INIT_LIST_HEAD(&iowq.wq.entry);
2603 iowq.ctx = ctx;
2604 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2605 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2606 iowq.timeout = KTIME_MAX;
2607
2608 if (uts) {
2609 struct timespec64 ts;
2610
2611 if (get_timespec64(&ts, uts))
2612 return -EFAULT;
2613 iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2614 }
2615
2616 trace_io_uring_cqring_wait(ctx, min_events);
2617 do {
2618 unsigned long check_cq;
2619
2620 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2621 int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
2622
2623 atomic_set(&ctx->cq_wait_nr, nr_wait);
2624 set_current_state(TASK_INTERRUPTIBLE);
2625 } else {
2626 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2627 TASK_INTERRUPTIBLE);
2628 }
2629
2630 ret = io_cqring_wait_schedule(ctx, &iowq);
2631 __set_current_state(TASK_RUNNING);
2632 atomic_set(&ctx->cq_wait_nr, 0);
2633
2634 if (ret < 0)
2635 break;
2636 /*
2637 * Run task_work after scheduling and before io_should_wake().
2638 * If we got woken because of task_work being processed, run it
2639 * now rather than let the caller do another wait loop.
2640 */
2641 io_run_task_work();
2642 if (!llist_empty(&ctx->work_llist))
2643 io_run_local_work(ctx);
2644
2645 check_cq = READ_ONCE(ctx->check_cq);
2646 if (unlikely(check_cq)) {
2647 /* let the caller flush overflows, retry */
2648 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2649 io_cqring_do_overflow_flush(ctx);
2650 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
2651 ret = -EBADR;
2652 break;
2653 }
2654 }
2655
2656 if (io_should_wake(&iowq)) {
2657 ret = 0;
2658 break;
2659 }
2660 cond_resched();
2661 } while (1);
2662
2663 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
2664 finish_wait(&ctx->cq_wait, &iowq.wq);
2665 restore_saved_sigmask_unless(ret == -EINTR);
2666
2667 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2668 }
2669
2670 void io_mem_free(void *ptr)
2671 {
2672 if (!ptr)
2673 return;
2674
2675 folio_put(virt_to_folio(ptr));
2676 }
2677
2678 static void io_pages_free(struct page ***pages, int npages)
2679 {
2680 struct page **page_array;
2681 int i;
2682
2683 if (!pages)
2684 return;
2685
2686 page_array = *pages;
2687 if (!page_array)
2688 return;
2689
2690 for (i = 0; i < npages; i++)
2691 unpin_user_page(page_array[i]);
2692 kvfree(page_array);
2693 *pages = NULL;
2694 }
2695
2696 static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
2697 unsigned long uaddr, size_t size)
2698 {
2699 struct page **page_array;
2700 unsigned int nr_pages;
2701 void *page_addr;
2702 int ret, i;
2703
2704 *npages = 0;
2705
2706 if (uaddr & (PAGE_SIZE - 1) || !size)
2707 return ERR_PTR(-EINVAL);
2708
2709 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2710 if (nr_pages > USHRT_MAX)
2711 return ERR_PTR(-EINVAL);
2712 page_array = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
2713 if (!page_array)
2714 return ERR_PTR(-ENOMEM);
2715
2716 ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
2717 page_array);
2718 if (ret != nr_pages) {
2719 err:
2720 io_pages_free(&page_array, ret > 0 ? ret : 0);
2721 return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
2722 }
2723
2724 page_addr = page_address(page_array[0]);
2725 for (i = 0; i < nr_pages; i++) {
2726 ret = -EINVAL;
2727
2728 /*
2729 * Can't support mapping user allocated ring memory on 32-bit
2730 * archs where it could potentially reside in highmem. Just
2731 * fail those with -EINVAL, just like we did on kernels that
2732 * didn't support this feature.
2733 */
2734 if (PageHighMem(page_array[i]))
2735 goto err;
2736
2737 /*
2738 * No support for discontig pages for now, should either be a
2739 * single normal page, or a huge page. Later on we can add
2740 * support for remapping discontig pages, for now we will
2741 * just fail them with EINVAL.
2742 */
2743 if (page_address(page_array[i]) != page_addr)
2744 goto err;
2745 page_addr += PAGE_SIZE;
2746 }
2747
2748 *pages = page_array;
2749 *npages = nr_pages;
2750 return page_to_virt(page_array[0]);
2751 }
2752
2753 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2754 size_t size)
2755 {
2756 return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr,
2757 size);
2758 }
2759
2760 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2761 size_t size)
2762 {
2763 return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
2764 size);
2765 }
2766
2767 static void io_rings_free(struct io_ring_ctx *ctx)
2768 {
2769 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
2770 io_mem_free(ctx->rings);
2771 io_mem_free(ctx->sq_sqes);
2772 ctx->rings = NULL;
2773 ctx->sq_sqes = NULL;
2774 } else {
2775 io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
2776 ctx->n_ring_pages = 0;
2777 io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
2778 ctx->n_sqe_pages = 0;
2779 }
2780 }
2781
2782 void *io_mem_alloc(size_t size)
2783 {
2784 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2785 void *ret;
2786
2787 ret = (void *) __get_free_pages(gfp, get_order(size));
2788 if (ret)
2789 return ret;
2790 return ERR_PTR(-ENOMEM);
2791 }
2792
2793 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2794 unsigned int cq_entries, size_t *sq_offset)
2795 {
2796 struct io_rings *rings;
2797 size_t off, sq_array_size;
2798
2799 off = struct_size(rings, cqes, cq_entries);
2800 if (off == SIZE_MAX)
2801 return SIZE_MAX;
2802 if (ctx->flags & IORING_SETUP_CQE32) {
2803 if (check_shl_overflow(off, 1, &off))
2804 return SIZE_MAX;
2805 }
2806
2807 #ifdef CONFIG_SMP
2808 off = ALIGN(off, SMP_CACHE_BYTES);
2809 if (off == 0)
2810 return SIZE_MAX;
2811 #endif
2812
2813 if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
2814 if (sq_offset)
2815 *sq_offset = SIZE_MAX;
2816 return off;
2817 }
2818
2819 if (sq_offset)
2820 *sq_offset = off;
2821
2822 sq_array_size = array_size(sizeof(u32), sq_entries);
2823 if (sq_array_size == SIZE_MAX)
2824 return SIZE_MAX;
2825
2826 if (check_add_overflow(off, sq_array_size, &off))
2827 return SIZE_MAX;
2828
2829 return off;
2830 }
2831
2832 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2833 unsigned int eventfd_async)
2834 {
2835 struct io_ev_fd *ev_fd;
2836 __s32 __user *fds = arg;
2837 int fd;
2838
2839 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2840 lockdep_is_held(&ctx->uring_lock));
2841 if (ev_fd)
2842 return -EBUSY;
2843
2844 if (copy_from_user(&fd, fds, sizeof(*fds)))
2845 return -EFAULT;
2846
2847 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
2848 if (!ev_fd)
2849 return -ENOMEM;
2850
2851 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
2852 if (IS_ERR(ev_fd->cq_ev_fd)) {
2853 int ret = PTR_ERR(ev_fd->cq_ev_fd);
2854 kfree(ev_fd);
2855 return ret;
2856 }
2857
2858 spin_lock(&ctx->completion_lock);
2859 ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
2860 spin_unlock(&ctx->completion_lock);
2861
2862 ev_fd->eventfd_async = eventfd_async;
2863 ctx->has_evfd = true;
2864 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
2865 atomic_set(&ev_fd->refs, 1);
2866 atomic_set(&ev_fd->ops, 0);
2867 return 0;
2868 }
2869
2870 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2871 {
2872 struct io_ev_fd *ev_fd;
2873
2874 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2875 lockdep_is_held(&ctx->uring_lock));
2876 if (ev_fd) {
2877 ctx->has_evfd = false;
2878 rcu_assign_pointer(ctx->io_ev_fd, NULL);
2879 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
2880 call_rcu(&ev_fd->rcu, io_eventfd_ops);
2881 return 0;
2882 }
2883
2884 return -ENXIO;
2885 }
2886
2887 static void io_req_caches_free(struct io_ring_ctx *ctx)
2888 {
2889 struct io_kiocb *req;
2890 int nr = 0;
2891
2892 mutex_lock(&ctx->uring_lock);
2893 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
2894
2895 while (!io_req_cache_empty(ctx)) {
2896 req = io_extract_req(ctx);
2897 kmem_cache_free(req_cachep, req);
2898 nr++;
2899 }
2900 if (nr)
2901 percpu_ref_put_many(&ctx->refs, nr);
2902 mutex_unlock(&ctx->uring_lock);
2903 }
2904
2905 static void io_rsrc_node_cache_free(struct io_cache_entry *entry)
2906 {
2907 kfree(container_of(entry, struct io_rsrc_node, cache));
2908 }
2909
2910 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2911 {
2912 io_sq_thread_finish(ctx);
2913 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2914 if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
2915 return;
2916
2917 mutex_lock(&ctx->uring_lock);
2918 if (ctx->buf_data)
2919 __io_sqe_buffers_unregister(ctx);
2920 if (ctx->file_data)
2921 __io_sqe_files_unregister(ctx);
2922 io_cqring_overflow_kill(ctx);
2923 io_eventfd_unregister(ctx);
2924 io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
2925 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2926 io_futex_cache_free(ctx);
2927 io_destroy_buffers(ctx);
2928 mutex_unlock(&ctx->uring_lock);
2929 if (ctx->sq_creds)
2930 put_cred(ctx->sq_creds);
2931 if (ctx->submitter_task)
2932 put_task_struct(ctx->submitter_task);
2933
2934 /* there are no registered resources left, nobody uses it */
2935 if (ctx->rsrc_node)
2936 io_rsrc_node_destroy(ctx, ctx->rsrc_node);
2937
2938 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2939
2940 #if defined(CONFIG_UNIX)
2941 if (ctx->ring_sock) {
2942 ctx->ring_sock->file = NULL; /* so that iput() is called */
2943 sock_release(ctx->ring_sock);
2944 }
2945 #endif
2946 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2947
2948 io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
2949 if (ctx->mm_account) {
2950 mmdrop(ctx->mm_account);
2951 ctx->mm_account = NULL;
2952 }
2953 io_rings_free(ctx);
2954 io_kbuf_mmap_list_free(ctx);
2955
2956 percpu_ref_exit(&ctx->refs);
2957 free_uid(ctx->user);
2958 io_req_caches_free(ctx);
2959 if (ctx->hash_map)
2960 io_wq_put_hash(ctx->hash_map);
2961 kfree(ctx->cancel_table.hbs);
2962 kfree(ctx->cancel_table_locked.hbs);
2963 kfree(ctx->io_bl);
2964 xa_destroy(&ctx->io_bl_xa);
2965 kfree(ctx);
2966 }
2967
2968 static __cold void io_activate_pollwq_cb(struct callback_head *cb)
2969 {
2970 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
2971 poll_wq_task_work);
2972
2973 mutex_lock(&ctx->uring_lock);
2974 ctx->poll_activated = true;
2975 mutex_unlock(&ctx->uring_lock);
2976
2977 /*
2978 * Wake ups for some events between start of polling and activation
2979 * might've been lost due to loose synchronisation.
2980 */
2981 wake_up_all(&ctx->poll_wq);
2982 percpu_ref_put(&ctx->refs);
2983 }
2984
2985 static __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
2986 {
2987 spin_lock(&ctx->completion_lock);
2988 /* already activated or in progress */
2989 if (ctx->poll_activated || ctx->poll_wq_task_work.func)
2990 goto out;
2991 if (WARN_ON_ONCE(!ctx->task_complete))
2992 goto out;
2993 if (!ctx->submitter_task)
2994 goto out;
2995 /*
2996 * with ->submitter_task only the submitter task completes requests, we
2997 * only need to sync with it, which is done by injecting a tw
2998 */
2999 init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
3000 percpu_ref_get(&ctx->refs);
3001 if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
3002 percpu_ref_put(&ctx->refs);
3003 out:
3004 spin_unlock(&ctx->completion_lock);
3005 }
3006
3007 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3008 {
3009 struct io_ring_ctx *ctx = file->private_data;
3010 __poll_t mask = 0;
3011
3012 if (unlikely(!ctx->poll_activated))
3013 io_activate_pollwq(ctx);
3014
3015 poll_wait(file, &ctx->poll_wq, wait);
3016 /*
3017 * synchronizes with barrier from wq_has_sleeper call in
3018 * io_commit_cqring
3019 */
3020 smp_rmb();
3021 if (!io_sqring_full(ctx))
3022 mask |= EPOLLOUT | EPOLLWRNORM;
3023
3024 /*
3025 * Don't flush cqring overflow list here, just do a simple check.
3026 * Otherwise there could possible be ABBA deadlock:
3027 * CPU0 CPU1
3028 * ---- ----
3029 * lock(&ctx->uring_lock);
3030 * lock(&ep->mtx);
3031 * lock(&ctx->uring_lock);
3032 * lock(&ep->mtx);
3033 *
3034 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
3035 * pushes them to do the flush.
3036 */
3037
3038 if (__io_cqring_events_user(ctx) || io_has_work(ctx))
3039 mask |= EPOLLIN | EPOLLRDNORM;
3040
3041 return mask;
3042 }
3043
3044 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
3045 {
3046 const struct cred *creds;
3047
3048 creds = xa_erase(&ctx->personalities, id);
3049 if (creds) {
3050 put_cred(creds);
3051 return 0;
3052 }
3053
3054 return -EINVAL;
3055 }
3056
3057 struct io_tctx_exit {
3058 struct callback_head task_work;
3059 struct completion completion;
3060 struct io_ring_ctx *ctx;
3061 };
3062
3063 static __cold void io_tctx_exit_cb(struct callback_head *cb)
3064 {
3065 struct io_uring_task *tctx = current->io_uring;
3066 struct io_tctx_exit *work;
3067
3068 work = container_of(cb, struct io_tctx_exit, task_work);
3069 /*
3070 * When @in_cancel, we're in cancellation and it's racy to remove the
3071 * node. It'll be removed by the end of cancellation, just ignore it.
3072 * tctx can be NULL if the queueing of this task_work raced with
3073 * work cancelation off the exec path.
3074 */
3075 if (tctx && !atomic_read(&tctx->in_cancel))
3076 io_uring_del_tctx_node((unsigned long)work->ctx);
3077 complete(&work->completion);
3078 }
3079
3080 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
3081 {
3082 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3083
3084 return req->ctx == data;
3085 }
3086
3087 static __cold void io_ring_exit_work(struct work_struct *work)
3088 {
3089 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
3090 unsigned long timeout = jiffies + HZ * 60 * 5;
3091 unsigned long interval = HZ / 20;
3092 struct io_tctx_exit exit;
3093 struct io_tctx_node *node;
3094 int ret;
3095
3096 /*
3097 * If we're doing polled IO and end up having requests being
3098 * submitted async (out-of-line), then completions can come in while
3099 * we're waiting for refs to drop. We need to reap these manually,
3100 * as nobody else will be looking for them.
3101 */
3102 do {
3103 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
3104 mutex_lock(&ctx->uring_lock);
3105 io_cqring_overflow_kill(ctx);
3106 mutex_unlock(&ctx->uring_lock);
3107 }
3108
3109 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3110 io_move_task_work_from_local(ctx);
3111
3112 while (io_uring_try_cancel_requests(ctx, NULL, true))
3113 cond_resched();
3114
3115 if (ctx->sq_data) {
3116 struct io_sq_data *sqd = ctx->sq_data;
3117 struct task_struct *tsk;
3118
3119 io_sq_thread_park(sqd);
3120 tsk = sqd->thread;
3121 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
3122 io_wq_cancel_cb(tsk->io_uring->io_wq,
3123 io_cancel_ctx_cb, ctx, true);
3124 io_sq_thread_unpark(sqd);
3125 }
3126
3127 io_req_caches_free(ctx);
3128
3129 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
3130 /* there is little hope left, don't run it too often */
3131 interval = HZ * 60;
3132 }
3133 /*
3134 * This is really an uninterruptible wait, as it has to be
3135 * complete. But it's also run from a kworker, which doesn't
3136 * take signals, so it's fine to make it interruptible. This
3137 * avoids scenarios where we knowingly can wait much longer
3138 * on completions, for example if someone does a SIGSTOP on
3139 * a task that needs to finish task_work to make this loop
3140 * complete. That's a synthetic situation that should not
3141 * cause a stuck task backtrace, and hence a potential panic
3142 * on stuck tasks if that is enabled.
3143 */
3144 } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
3145
3146 init_completion(&exit.completion);
3147 init_task_work(&exit.task_work, io_tctx_exit_cb);
3148 exit.ctx = ctx;
3149 /*
3150 * Some may use context even when all refs and requests have been put,
3151 * and they are free to do so while still holding uring_lock or
3152 * completion_lock, see io_req_task_submit(). Apart from other work,
3153 * this lock/unlock section also waits them to finish.
3154 */
3155 mutex_lock(&ctx->uring_lock);
3156 while (!list_empty(&ctx->tctx_list)) {
3157 WARN_ON_ONCE(time_after(jiffies, timeout));
3158
3159 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
3160 ctx_node);
3161 /* don't spin on a single task if cancellation failed */
3162 list_rotate_left(&ctx->tctx_list);
3163 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
3164 if (WARN_ON_ONCE(ret))
3165 continue;
3166
3167 mutex_unlock(&ctx->uring_lock);
3168 /*
3169 * See comment above for
3170 * wait_for_completion_interruptible_timeout() on why this
3171 * wait is marked as interruptible.
3172 */
3173 wait_for_completion_interruptible(&exit.completion);
3174 mutex_lock(&ctx->uring_lock);
3175 }
3176 mutex_unlock(&ctx->uring_lock);
3177 spin_lock(&ctx->completion_lock);
3178 spin_unlock(&ctx->completion_lock);
3179
3180 /* pairs with RCU read section in io_req_local_work_add() */
3181 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3182 synchronize_rcu();
3183
3184 io_ring_ctx_free(ctx);
3185 }
3186
3187 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3188 {
3189 unsigned long index;
3190 struct creds *creds;
3191
3192 mutex_lock(&ctx->uring_lock);
3193 percpu_ref_kill(&ctx->refs);
3194 xa_for_each(&ctx->personalities, index, creds)
3195 io_unregister_personality(ctx, index);
3196 if (ctx->rings)
3197 io_poll_remove_all(ctx, NULL, true);
3198 mutex_unlock(&ctx->uring_lock);
3199
3200 /*
3201 * If we failed setting up the ctx, we might not have any rings
3202 * and therefore did not submit any requests
3203 */
3204 if (ctx->rings)
3205 io_kill_timeouts(ctx, NULL, true);
3206
3207 flush_delayed_work(&ctx->fallback_work);
3208
3209 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
3210 /*
3211 * Use system_unbound_wq to avoid spawning tons of event kworkers
3212 * if we're exiting a ton of rings at the same time. It just adds
3213 * noise and overhead, there's no discernable change in runtime
3214 * over using system_wq.
3215 */
3216 queue_work(system_unbound_wq, &ctx->exit_work);
3217 }
3218
3219 static int io_uring_release(struct inode *inode, struct file *file)
3220 {
3221 struct io_ring_ctx *ctx = file->private_data;
3222
3223 file->private_data = NULL;
3224 io_ring_ctx_wait_and_kill(ctx);
3225 return 0;
3226 }
3227
3228 struct io_task_cancel {
3229 struct task_struct *task;
3230 bool all;
3231 };
3232
3233 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
3234 {
3235 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3236 struct io_task_cancel *cancel = data;
3237
3238 return io_match_task_safe(req, cancel->task, cancel->all);
3239 }
3240
3241 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3242 struct task_struct *task,
3243 bool cancel_all)
3244 {
3245 struct io_defer_entry *de;
3246 LIST_HEAD(list);
3247
3248 spin_lock(&ctx->completion_lock);
3249 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3250 if (io_match_task_safe(de->req, task, cancel_all)) {
3251 list_cut_position(&list, &ctx->defer_list, &de->list);
3252 break;
3253 }
3254 }
3255 spin_unlock(&ctx->completion_lock);
3256 if (list_empty(&list))
3257 return false;
3258
3259 while (!list_empty(&list)) {
3260 de = list_first_entry(&list, struct io_defer_entry, list);
3261 list_del_init(&de->list);
3262 io_req_task_queue_fail(de->req, -ECANCELED);
3263 kfree(de);
3264 }
3265 return true;
3266 }
3267
3268 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3269 {
3270 struct io_tctx_node *node;
3271 enum io_wq_cancel cret;
3272 bool ret = false;
3273
3274 mutex_lock(&ctx->uring_lock);
3275 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3276 struct io_uring_task *tctx = node->task->io_uring;
3277
3278 /*
3279 * io_wq will stay alive while we hold uring_lock, because it's
3280 * killed after ctx nodes, which requires to take the lock.
3281 */
3282 if (!tctx || !tctx->io_wq)
3283 continue;
3284 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
3285 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3286 }
3287 mutex_unlock(&ctx->uring_lock);
3288
3289 return ret;
3290 }
3291
3292 static bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
3293 struct task_struct *task, bool cancel_all)
3294 {
3295 struct hlist_node *tmp;
3296 struct io_kiocb *req;
3297 bool ret = false;
3298
3299 lockdep_assert_held(&ctx->uring_lock);
3300
3301 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
3302 hash_node) {
3303 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
3304 struct io_uring_cmd);
3305 struct file *file = req->file;
3306
3307 if (!cancel_all && req->task != task)
3308 continue;
3309
3310 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
3311 /* ->sqe isn't available if no async data */
3312 if (!req_has_async_data(req))
3313 cmd->sqe = NULL;
3314 file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL);
3315 ret = true;
3316 }
3317 }
3318 io_submit_flush_completions(ctx);
3319
3320 return ret;
3321 }
3322
3323 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3324 struct task_struct *task,
3325 bool cancel_all)
3326 {
3327 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
3328 struct io_uring_task *tctx = task ? task->io_uring : NULL;
3329 enum io_wq_cancel cret;
3330 bool ret = false;
3331
3332 /* set it so io_req_local_work_add() would wake us up */
3333 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
3334 atomic_set(&ctx->cq_wait_nr, 1);
3335 smp_mb();
3336 }
3337
3338 /* failed during ring init, it couldn't have issued any requests */
3339 if (!ctx->rings)
3340 return false;
3341
3342 if (!task) {
3343 ret |= io_uring_try_cancel_iowq(ctx);
3344 } else if (tctx && tctx->io_wq) {
3345 /*
3346 * Cancels requests of all rings, not only @ctx, but
3347 * it's fine as the task is in exit/exec.
3348 */
3349 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
3350 &cancel, true);
3351 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3352 }
3353
3354 /* SQPOLL thread does its own polling */
3355 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3356 (ctx->sq_data && ctx->sq_data->thread == current)) {
3357 while (!wq_list_empty(&ctx->iopoll_list)) {
3358 io_iopoll_try_reap_events(ctx);
3359 ret = true;
3360 cond_resched();
3361 }
3362 }
3363
3364 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3365 io_allowed_defer_tw_run(ctx))
3366 ret |= io_run_local_work(ctx) > 0;
3367 ret |= io_cancel_defer_files(ctx, task, cancel_all);
3368 mutex_lock(&ctx->uring_lock);
3369 ret |= io_poll_remove_all(ctx, task, cancel_all);
3370 ret |= io_waitid_remove_all(ctx, task, cancel_all);
3371 ret |= io_futex_remove_all(ctx, task, cancel_all);
3372 ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
3373 mutex_unlock(&ctx->uring_lock);
3374 ret |= io_kill_timeouts(ctx, task, cancel_all);
3375 if (task)
3376 ret |= io_run_task_work() > 0;
3377 return ret;
3378 }
3379
3380 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
3381 {
3382 if (tracked)
3383 return atomic_read(&tctx->inflight_tracked);
3384 return percpu_counter_sum(&tctx->inflight);
3385 }
3386
3387 /*
3388 * Find any io_uring ctx that this task has registered or done IO on, and cancel
3389 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
3390 */
3391 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3392 {
3393 struct io_uring_task *tctx = current->io_uring;
3394 struct io_ring_ctx *ctx;
3395 struct io_tctx_node *node;
3396 unsigned long index;
3397 s64 inflight;
3398 DEFINE_WAIT(wait);
3399
3400 WARN_ON_ONCE(sqd && sqd->thread != current);
3401
3402 if (!current->io_uring)
3403 return;
3404 if (tctx->io_wq)
3405 io_wq_exit_start(tctx->io_wq);
3406
3407 atomic_inc(&tctx->in_cancel);
3408 do {
3409 bool loop = false;
3410
3411 io_uring_drop_tctx_refs(current);
3412 /* read completions before cancelations */
3413 inflight = tctx_inflight(tctx, !cancel_all);
3414 if (!inflight)
3415 break;
3416
3417 if (!sqd) {
3418 xa_for_each(&tctx->xa, index, node) {
3419 /* sqpoll task will cancel all its requests */
3420 if (node->ctx->sq_data)
3421 continue;
3422 loop |= io_uring_try_cancel_requests(node->ctx,
3423 current, cancel_all);
3424 }
3425 } else {
3426 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3427 loop |= io_uring_try_cancel_requests(ctx,
3428 current,
3429 cancel_all);
3430 }
3431
3432 if (loop) {
3433 cond_resched();
3434 continue;
3435 }
3436
3437 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3438 io_run_task_work();
3439 io_uring_drop_tctx_refs(current);
3440 xa_for_each(&tctx->xa, index, node) {
3441 if (!llist_empty(&node->ctx->work_llist)) {
3442 WARN_ON_ONCE(node->ctx->submitter_task &&
3443 node->ctx->submitter_task != current);
3444 goto end_wait;
3445 }
3446 }
3447 /*
3448 * If we've seen completions, retry without waiting. This
3449 * avoids a race where a completion comes in before we did
3450 * prepare_to_wait().
3451 */
3452 if (inflight == tctx_inflight(tctx, !cancel_all))
3453 schedule();
3454 end_wait:
3455 finish_wait(&tctx->wait, &wait);
3456 } while (1);
3457
3458 io_uring_clean_tctx(tctx);
3459 if (cancel_all) {
3460 /*
3461 * We shouldn't run task_works after cancel, so just leave
3462 * ->in_cancel set for normal exit.
3463 */
3464 atomic_dec(&tctx->in_cancel);
3465 /* for exec all current's requests should be gone, kill tctx */
3466 __io_uring_free(current);
3467 }
3468 }
3469
3470 void __io_uring_cancel(bool cancel_all)
3471 {
3472 io_uring_cancel_generic(cancel_all, NULL);
3473 }
3474
3475 static void *io_uring_validate_mmap_request(struct file *file,
3476 loff_t pgoff, size_t sz)
3477 {
3478 struct io_ring_ctx *ctx = file->private_data;
3479 loff_t offset = pgoff << PAGE_SHIFT;
3480 struct page *page;
3481 void *ptr;
3482
3483 switch (offset & IORING_OFF_MMAP_MASK) {
3484 case IORING_OFF_SQ_RING:
3485 case IORING_OFF_CQ_RING:
3486 /* Don't allow mmap if the ring was setup without it */
3487 if (ctx->flags & IORING_SETUP_NO_MMAP)
3488 return ERR_PTR(-EINVAL);
3489 ptr = ctx->rings;
3490 break;
3491 case IORING_OFF_SQES:
3492 /* Don't allow mmap if the ring was setup without it */
3493 if (ctx->flags & IORING_SETUP_NO_MMAP)
3494 return ERR_PTR(-EINVAL);
3495 ptr = ctx->sq_sqes;
3496 break;
3497 case IORING_OFF_PBUF_RING: {
3498 unsigned int bgid;
3499
3500 bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
3501 rcu_read_lock();
3502 ptr = io_pbuf_get_address(ctx, bgid);
3503 rcu_read_unlock();
3504 if (!ptr)
3505 return ERR_PTR(-EINVAL);
3506 break;
3507 }
3508 default:
3509 return ERR_PTR(-EINVAL);
3510 }
3511
3512 page = virt_to_head_page(ptr);
3513 if (sz > page_size(page))
3514 return ERR_PTR(-EINVAL);
3515
3516 return ptr;
3517 }
3518
3519 #ifdef CONFIG_MMU
3520
3521 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3522 {
3523 size_t sz = vma->vm_end - vma->vm_start;
3524 unsigned long pfn;
3525 void *ptr;
3526
3527 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
3528 if (IS_ERR(ptr))
3529 return PTR_ERR(ptr);
3530
3531 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3532 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3533 }
3534
3535 static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp,
3536 unsigned long addr, unsigned long len,
3537 unsigned long pgoff, unsigned long flags)
3538 {
3539 void *ptr;
3540
3541 /*
3542 * Do not allow to map to user-provided address to avoid breaking the
3543 * aliasing rules. Userspace is not able to guess the offset address of
3544 * kernel kmalloc()ed memory area.
3545 */
3546 if (addr)
3547 return -EINVAL;
3548
3549 ptr = io_uring_validate_mmap_request(filp, pgoff, len);
3550 if (IS_ERR(ptr))
3551 return -ENOMEM;
3552
3553 /*
3554 * Some architectures have strong cache aliasing requirements.
3555 * For such architectures we need a coherent mapping which aliases
3556 * kernel memory *and* userspace memory. To achieve that:
3557 * - use a NULL file pointer to reference physical memory, and
3558 * - use the kernel virtual address of the shared io_uring context
3559 * (instead of the userspace-provided address, which has to be 0UL
3560 * anyway).
3561 * - use the same pgoff which the get_unmapped_area() uses to
3562 * calculate the page colouring.
3563 * For architectures without such aliasing requirements, the
3564 * architecture will return any suitable mapping because addr is 0.
3565 */
3566 filp = NULL;
3567 flags |= MAP_SHARED;
3568 pgoff = 0; /* has been translated to ptr above */
3569 #ifdef SHM_COLOUR
3570 addr = (uintptr_t) ptr;
3571 pgoff = addr >> PAGE_SHIFT;
3572 #else
3573 addr = 0UL;
3574 #endif
3575 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
3576 }
3577
3578 #else /* !CONFIG_MMU */
3579
3580 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3581 {
3582 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
3583 }
3584
3585 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
3586 {
3587 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
3588 }
3589
3590 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
3591 unsigned long addr, unsigned long len,
3592 unsigned long pgoff, unsigned long flags)
3593 {
3594 void *ptr;
3595
3596 ptr = io_uring_validate_mmap_request(file, pgoff, len);
3597 if (IS_ERR(ptr))
3598 return PTR_ERR(ptr);
3599
3600 return (unsigned long) ptr;
3601 }
3602
3603 #endif /* !CONFIG_MMU */
3604
3605 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3606 {
3607 if (flags & IORING_ENTER_EXT_ARG) {
3608 struct io_uring_getevents_arg arg;
3609
3610 if (argsz != sizeof(arg))
3611 return -EINVAL;
3612 if (copy_from_user(&arg, argp, sizeof(arg)))
3613 return -EFAULT;
3614 }
3615 return 0;
3616 }
3617
3618 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3619 struct __kernel_timespec __user **ts,
3620 const sigset_t __user **sig)
3621 {
3622 struct io_uring_getevents_arg arg;
3623
3624 /*
3625 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3626 * is just a pointer to the sigset_t.
3627 */
3628 if (!(flags & IORING_ENTER_EXT_ARG)) {
3629 *sig = (const sigset_t __user *) argp;
3630 *ts = NULL;
3631 return 0;
3632 }
3633
3634 /*
3635 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3636 * timespec and sigset_t pointers if good.
3637 */
3638 if (*argsz != sizeof(arg))
3639 return -EINVAL;
3640 if (copy_from_user(&arg, argp, sizeof(arg)))
3641 return -EFAULT;
3642 if (arg.pad)
3643 return -EINVAL;
3644 *sig = u64_to_user_ptr(arg.sigmask);
3645 *argsz = arg.sigmask_sz;
3646 *ts = u64_to_user_ptr(arg.ts);
3647 return 0;
3648 }
3649
3650 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3651 u32, min_complete, u32, flags, const void __user *, argp,
3652 size_t, argsz)
3653 {
3654 struct io_ring_ctx *ctx;
3655 struct file *file;
3656 long ret;
3657
3658 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3659 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3660 IORING_ENTER_REGISTERED_RING)))
3661 return -EINVAL;
3662
3663 /*
3664 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3665 * need only dereference our task private array to find it.
3666 */
3667 if (flags & IORING_ENTER_REGISTERED_RING) {
3668 struct io_uring_task *tctx = current->io_uring;
3669
3670 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3671 return -EINVAL;
3672 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3673 file = tctx->registered_rings[fd];
3674 if (unlikely(!file))
3675 return -EBADF;
3676 } else {
3677 file = fget(fd);
3678 if (unlikely(!file))
3679 return -EBADF;
3680 ret = -EOPNOTSUPP;
3681 if (unlikely(!io_is_uring_fops(file)))
3682 goto out;
3683 }
3684
3685 ctx = file->private_data;
3686 ret = -EBADFD;
3687 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3688 goto out;
3689
3690 /*
3691 * For SQ polling, the thread will do all submissions and completions.
3692 * Just return the requested submit count, and wake the thread if
3693 * we were asked to.
3694 */
3695 ret = 0;
3696 if (ctx->flags & IORING_SETUP_SQPOLL) {
3697 io_cqring_overflow_flush(ctx);
3698
3699 if (unlikely(ctx->sq_data->thread == NULL)) {
3700 ret = -EOWNERDEAD;
3701 goto out;
3702 }
3703 if (flags & IORING_ENTER_SQ_WAKEUP)
3704 wake_up(&ctx->sq_data->wait);
3705 if (flags & IORING_ENTER_SQ_WAIT)
3706 io_sqpoll_wait_sq(ctx);
3707
3708 ret = to_submit;
3709 } else if (to_submit) {
3710 ret = io_uring_add_tctx_node(ctx);
3711 if (unlikely(ret))
3712 goto out;
3713
3714 mutex_lock(&ctx->uring_lock);
3715 ret = io_submit_sqes(ctx, to_submit);
3716 if (ret != to_submit) {
3717 mutex_unlock(&ctx->uring_lock);
3718 goto out;
3719 }
3720 if (flags & IORING_ENTER_GETEVENTS) {
3721 if (ctx->syscall_iopoll)
3722 goto iopoll_locked;
3723 /*
3724 * Ignore errors, we'll soon call io_cqring_wait() and
3725 * it should handle ownership problems if any.
3726 */
3727 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3728 (void)io_run_local_work_locked(ctx);
3729 }
3730 mutex_unlock(&ctx->uring_lock);
3731 }
3732
3733 if (flags & IORING_ENTER_GETEVENTS) {
3734 int ret2;
3735
3736 if (ctx->syscall_iopoll) {
3737 /*
3738 * We disallow the app entering submit/complete with
3739 * polling, but we still need to lock the ring to
3740 * prevent racing with polled issue that got punted to
3741 * a workqueue.
3742 */
3743 mutex_lock(&ctx->uring_lock);
3744 iopoll_locked:
3745 ret2 = io_validate_ext_arg(flags, argp, argsz);
3746 if (likely(!ret2)) {
3747 min_complete = min(min_complete,
3748 ctx->cq_entries);
3749 ret2 = io_iopoll_check(ctx, min_complete);
3750 }
3751 mutex_unlock(&ctx->uring_lock);
3752 } else {
3753 const sigset_t __user *sig;
3754 struct __kernel_timespec __user *ts;
3755
3756 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
3757 if (likely(!ret2)) {
3758 min_complete = min(min_complete,
3759 ctx->cq_entries);
3760 ret2 = io_cqring_wait(ctx, min_complete, sig,
3761 argsz, ts);
3762 }
3763 }
3764
3765 if (!ret) {
3766 ret = ret2;
3767
3768 /*
3769 * EBADR indicates that one or more CQE were dropped.
3770 * Once the user has been informed we can clear the bit
3771 * as they are obviously ok with those drops.
3772 */
3773 if (unlikely(ret2 == -EBADR))
3774 clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3775 &ctx->check_cq);
3776 }
3777 }
3778 out:
3779 if (!(flags & IORING_ENTER_REGISTERED_RING))
3780 fput(file);
3781 return ret;
3782 }
3783
3784 static const struct file_operations io_uring_fops = {
3785 .release = io_uring_release,
3786 .mmap = io_uring_mmap,
3787 #ifndef CONFIG_MMU
3788 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
3789 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
3790 #else
3791 .get_unmapped_area = io_uring_mmu_get_unmapped_area,
3792 #endif
3793 .poll = io_uring_poll,
3794 #ifdef CONFIG_PROC_FS
3795 .show_fdinfo = io_uring_show_fdinfo,
3796 #endif
3797 };
3798
3799 bool io_is_uring_fops(struct file *file)
3800 {
3801 return file->f_op == &io_uring_fops;
3802 }
3803
3804 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3805 struct io_uring_params *p)
3806 {
3807 struct io_rings *rings;
3808 size_t size, sq_array_offset;
3809 void *ptr;
3810
3811 /* make sure these are sane, as we already accounted them */
3812 ctx->sq_entries = p->sq_entries;
3813 ctx->cq_entries = p->cq_entries;
3814
3815 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3816 if (size == SIZE_MAX)
3817 return -EOVERFLOW;
3818
3819 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3820 rings = io_mem_alloc(size);
3821 else
3822 rings = io_rings_map(ctx, p->cq_off.user_addr, size);
3823
3824 if (IS_ERR(rings))
3825 return PTR_ERR(rings);
3826
3827 ctx->rings = rings;
3828 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3829 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3830 rings->sq_ring_mask = p->sq_entries - 1;
3831 rings->cq_ring_mask = p->cq_entries - 1;
3832 rings->sq_ring_entries = p->sq_entries;
3833 rings->cq_ring_entries = p->cq_entries;
3834
3835 if (p->flags & IORING_SETUP_SQE128)
3836 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3837 else
3838 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3839 if (size == SIZE_MAX) {
3840 io_rings_free(ctx);
3841 return -EOVERFLOW;
3842 }
3843
3844 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3845 ptr = io_mem_alloc(size);
3846 else
3847 ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
3848
3849 if (IS_ERR(ptr)) {
3850 io_rings_free(ctx);
3851 return PTR_ERR(ptr);
3852 }
3853
3854 ctx->sq_sqes = ptr;
3855 return 0;
3856 }
3857
3858 static int io_uring_install_fd(struct file *file)
3859 {
3860 int fd;
3861
3862 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3863 if (fd < 0)
3864 return fd;
3865 fd_install(fd, file);
3866 return fd;
3867 }
3868
3869 /*
3870 * Allocate an anonymous fd, this is what constitutes the application
3871 * visible backing of an io_uring instance. The application mmaps this
3872 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3873 * we have to tie this fd to a socket for file garbage collection purposes.
3874 */
3875 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3876 {
3877 struct file *file;
3878 #if defined(CONFIG_UNIX)
3879 int ret;
3880
3881 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3882 &ctx->ring_sock);
3883 if (ret)
3884 return ERR_PTR(ret);
3885 #endif
3886
3887 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
3888 O_RDWR | O_CLOEXEC, NULL);
3889 #if defined(CONFIG_UNIX)
3890 if (IS_ERR(file)) {
3891 sock_release(ctx->ring_sock);
3892 ctx->ring_sock = NULL;
3893 } else {
3894 ctx->ring_sock->file = file;
3895 }
3896 #endif
3897 return file;
3898 }
3899
3900 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3901 struct io_uring_params __user *params)
3902 {
3903 struct io_ring_ctx *ctx;
3904 struct io_uring_task *tctx;
3905 struct file *file;
3906 int ret;
3907
3908 if (!entries)
3909 return -EINVAL;
3910 if (entries > IORING_MAX_ENTRIES) {
3911 if (!(p->flags & IORING_SETUP_CLAMP))
3912 return -EINVAL;
3913 entries = IORING_MAX_ENTRIES;
3914 }
3915
3916 if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3917 && !(p->flags & IORING_SETUP_NO_MMAP))
3918 return -EINVAL;
3919
3920 /*
3921 * Use twice as many entries for the CQ ring. It's possible for the
3922 * application to drive a higher depth than the size of the SQ ring,
3923 * since the sqes are only used at submission time. This allows for
3924 * some flexibility in overcommitting a bit. If the application has
3925 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3926 * of CQ ring entries manually.
3927 */
3928 p->sq_entries = roundup_pow_of_two(entries);
3929 if (p->flags & IORING_SETUP_CQSIZE) {
3930 /*
3931 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3932 * to a power-of-two, if it isn't already. We do NOT impose
3933 * any cq vs sq ring sizing.
3934 */
3935 if (!p->cq_entries)
3936 return -EINVAL;
3937 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3938 if (!(p->flags & IORING_SETUP_CLAMP))
3939 return -EINVAL;
3940 p->cq_entries = IORING_MAX_CQ_ENTRIES;
3941 }
3942 p->cq_entries = roundup_pow_of_two(p->cq_entries);
3943 if (p->cq_entries < p->sq_entries)
3944 return -EINVAL;
3945 } else {
3946 p->cq_entries = 2 * p->sq_entries;
3947 }
3948
3949 ctx = io_ring_ctx_alloc(p);
3950 if (!ctx)
3951 return -ENOMEM;
3952
3953 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3954 !(ctx->flags & IORING_SETUP_IOPOLL) &&
3955 !(ctx->flags & IORING_SETUP_SQPOLL))
3956 ctx->task_complete = true;
3957
3958 if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
3959 ctx->lockless_cq = true;
3960
3961 /*
3962 * lazy poll_wq activation relies on ->task_complete for synchronisation
3963 * purposes, see io_activate_pollwq()
3964 */
3965 if (!ctx->task_complete)
3966 ctx->poll_activated = true;
3967
3968 /*
3969 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3970 * space applications don't need to do io completion events
3971 * polling again, they can rely on io_sq_thread to do polling
3972 * work, which can reduce cpu usage and uring_lock contention.
3973 */
3974 if (ctx->flags & IORING_SETUP_IOPOLL &&
3975 !(ctx->flags & IORING_SETUP_SQPOLL))
3976 ctx->syscall_iopoll = 1;
3977
3978 ctx->compat = in_compat_syscall();
3979 if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
3980 ctx->user = get_uid(current_user());
3981
3982 /*
3983 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3984 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3985 */
3986 ret = -EINVAL;
3987 if (ctx->flags & IORING_SETUP_SQPOLL) {
3988 /* IPI related flags don't make sense with SQPOLL */
3989 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3990 IORING_SETUP_TASKRUN_FLAG |
3991 IORING_SETUP_DEFER_TASKRUN))
3992 goto err;
3993 ctx->notify_method = TWA_SIGNAL_NO_IPI;
3994 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3995 ctx->notify_method = TWA_SIGNAL_NO_IPI;
3996 } else {
3997 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3998 !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3999 goto err;
4000 ctx->notify_method = TWA_SIGNAL;
4001 }
4002
4003 /*
4004 * For DEFER_TASKRUN we require the completion task to be the same as the
4005 * submission task. This implies that there is only one submitter, so enforce
4006 * that.
4007 */
4008 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
4009 !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
4010 goto err;
4011 }
4012
4013 /*
4014 * This is just grabbed for accounting purposes. When a process exits,
4015 * the mm is exited and dropped before the files, hence we need to hang
4016 * on to this mm purely for the purposes of being able to unaccount
4017 * memory (locked/pinned vm). It's not used for anything else.
4018 */
4019 mmgrab(current->mm);
4020 ctx->mm_account = current->mm;
4021
4022 ret = io_allocate_scq_urings(ctx, p);
4023 if (ret)
4024 goto err;
4025
4026 ret = io_sq_offload_create(ctx, p);
4027 if (ret)
4028 goto err;
4029
4030 ret = io_rsrc_init(ctx);
4031 if (ret)
4032 goto err;
4033
4034 p->sq_off.head = offsetof(struct io_rings, sq.head);
4035 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
4036 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
4037 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
4038 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
4039 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
4040 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
4041 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
4042 p->sq_off.resv1 = 0;
4043 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
4044 p->sq_off.user_addr = 0;
4045
4046 p->cq_off.head = offsetof(struct io_rings, cq.head);
4047 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
4048 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
4049 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
4050 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
4051 p->cq_off.cqes = offsetof(struct io_rings, cqes);
4052 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
4053 p->cq_off.resv1 = 0;
4054 if (!(ctx->flags & IORING_SETUP_NO_MMAP))
4055 p->cq_off.user_addr = 0;
4056
4057 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
4058 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
4059 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
4060 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
4061 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
4062 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
4063 IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING;
4064
4065 if (copy_to_user(params, p, sizeof(*p))) {
4066 ret = -EFAULT;
4067 goto err;
4068 }
4069
4070 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
4071 && !(ctx->flags & IORING_SETUP_R_DISABLED))
4072 WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
4073
4074 file = io_uring_get_file(ctx);
4075 if (IS_ERR(file)) {
4076 ret = PTR_ERR(file);
4077 goto err;
4078 }
4079
4080 ret = __io_uring_add_tctx_node(ctx);
4081 if (ret)
4082 goto err_fput;
4083 tctx = current->io_uring;
4084
4085 /*
4086 * Install ring fd as the very last thing, so we don't risk someone
4087 * having closed it before we finish setup
4088 */
4089 if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
4090 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
4091 else
4092 ret = io_uring_install_fd(file);
4093 if (ret < 0)
4094 goto err_fput;
4095
4096 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
4097 return ret;
4098 err:
4099 io_ring_ctx_wait_and_kill(ctx);
4100 return ret;
4101 err_fput:
4102 fput(file);
4103 return ret;
4104 }
4105
4106 /*
4107 * Sets up an aio uring context, and returns the fd. Applications asks for a
4108 * ring size, we return the actual sq/cq ring sizes (among other things) in the
4109 * params structure passed in.
4110 */
4111 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
4112 {
4113 struct io_uring_params p;
4114 int i;
4115
4116 if (copy_from_user(&p, params, sizeof(p)))
4117 return -EFAULT;
4118 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
4119 if (p.resv[i])
4120 return -EINVAL;
4121 }
4122
4123 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
4124 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
4125 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
4126 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
4127 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
4128 IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
4129 IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
4130 IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY |
4131 IORING_SETUP_NO_SQARRAY))
4132 return -EINVAL;
4133
4134 return io_uring_create(entries, &p, params);
4135 }
4136
4137 static inline bool io_uring_allowed(void)
4138 {
4139 int disabled = READ_ONCE(sysctl_io_uring_disabled);
4140 kgid_t io_uring_group;
4141
4142 if (disabled == 2)
4143 return false;
4144
4145 if (disabled == 0 || capable(CAP_SYS_ADMIN))
4146 return true;
4147
4148 io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
4149 if (!gid_valid(io_uring_group))
4150 return false;
4151
4152 return in_group_p(io_uring_group);
4153 }
4154
4155 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
4156 struct io_uring_params __user *, params)
4157 {
4158 if (!io_uring_allowed())
4159 return -EPERM;
4160
4161 return io_uring_setup(entries, params);
4162 }
4163
4164 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
4165 unsigned nr_args)
4166 {
4167 struct io_uring_probe *p;
4168 size_t size;
4169 int i, ret;
4170
4171 size = struct_size(p, ops, nr_args);
4172 if (size == SIZE_MAX)
4173 return -EOVERFLOW;
4174 p = kzalloc(size, GFP_KERNEL);
4175 if (!p)
4176 return -ENOMEM;
4177
4178 ret = -EFAULT;
4179 if (copy_from_user(p, arg, size))
4180 goto out;
4181 ret = -EINVAL;
4182 if (memchr_inv(p, 0, size))
4183 goto out;
4184
4185 p->last_op = IORING_OP_LAST - 1;
4186 if (nr_args > IORING_OP_LAST)
4187 nr_args = IORING_OP_LAST;
4188
4189 for (i = 0; i < nr_args; i++) {
4190 p->ops[i].op = i;
4191 if (!io_issue_defs[i].not_supported)
4192 p->ops[i].flags = IO_URING_OP_SUPPORTED;
4193 }
4194 p->ops_len = i;
4195
4196 ret = 0;
4197 if (copy_to_user(arg, p, size))
4198 ret = -EFAULT;
4199 out:
4200 kfree(p);
4201 return ret;
4202 }
4203
4204 static int io_register_personality(struct io_ring_ctx *ctx)
4205 {
4206 const struct cred *creds;
4207 u32 id;
4208 int ret;
4209
4210 creds = get_current_cred();
4211
4212 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
4213 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
4214 if (ret < 0) {
4215 put_cred(creds);
4216 return ret;
4217 }
4218 return id;
4219 }
4220
4221 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
4222 void __user *arg, unsigned int nr_args)
4223 {
4224 struct io_uring_restriction *res;
4225 size_t size;
4226 int i, ret;
4227
4228 /* Restrictions allowed only if rings started disabled */
4229 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
4230 return -EBADFD;
4231
4232 /* We allow only a single restrictions registration */
4233 if (ctx->restrictions.registered)
4234 return -EBUSY;
4235
4236 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
4237 return -EINVAL;
4238
4239 size = array_size(nr_args, sizeof(*res));
4240 if (size == SIZE_MAX)
4241 return -EOVERFLOW;
4242
4243 res = memdup_user(arg, size);
4244 if (IS_ERR(res))
4245 return PTR_ERR(res);
4246
4247 ret = 0;
4248
4249 for (i = 0; i < nr_args; i++) {
4250 switch (res[i].opcode) {
4251 case IORING_RESTRICTION_REGISTER_OP:
4252 if (res[i].register_op >= IORING_REGISTER_LAST) {
4253 ret = -EINVAL;
4254 goto out;
4255 }
4256
4257 __set_bit(res[i].register_op,
4258 ctx->restrictions.register_op);
4259 break;
4260 case IORING_RESTRICTION_SQE_OP:
4261 if (res[i].sqe_op >= IORING_OP_LAST) {
4262 ret = -EINVAL;
4263 goto out;
4264 }
4265
4266 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
4267 break;
4268 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
4269 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
4270 break;
4271 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
4272 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
4273 break;
4274 default:
4275 ret = -EINVAL;
4276 goto out;
4277 }
4278 }
4279
4280 out:
4281 /* Reset all restrictions if an error happened */
4282 if (ret != 0)
4283 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
4284 else
4285 ctx->restrictions.registered = true;
4286
4287 kfree(res);
4288 return ret;
4289 }
4290
4291 static int io_register_enable_rings(struct io_ring_ctx *ctx)
4292 {
4293 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
4294 return -EBADFD;
4295
4296 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
4297 WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
4298 /*
4299 * Lazy activation attempts would fail if it was polled before
4300 * submitter_task is set.
4301 */
4302 if (wq_has_sleeper(&ctx->poll_wq))
4303 io_activate_pollwq(ctx);
4304 }
4305
4306 if (ctx->restrictions.registered)
4307 ctx->restricted = 1;
4308
4309 ctx->flags &= ~IORING_SETUP_R_DISABLED;
4310 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
4311 wake_up(&ctx->sq_data->wait);
4312 return 0;
4313 }
4314
4315 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
4316 cpumask_var_t new_mask)
4317 {
4318 int ret;
4319
4320 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
4321 ret = io_wq_cpu_affinity(current->io_uring, new_mask);
4322 } else {
4323 mutex_unlock(&ctx->uring_lock);
4324 ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
4325 mutex_lock(&ctx->uring_lock);
4326 }
4327
4328 return ret;
4329 }
4330
4331 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
4332 void __user *arg, unsigned len)
4333 {
4334 cpumask_var_t new_mask;
4335 int ret;
4336
4337 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4338 return -ENOMEM;
4339
4340 cpumask_clear(new_mask);
4341 if (len > cpumask_size())
4342 len = cpumask_size();
4343
4344 if (in_compat_syscall()) {
4345 ret = compat_get_bitmap(cpumask_bits(new_mask),
4346 (const compat_ulong_t __user *)arg,
4347 len * 8 /* CHAR_BIT */);
4348 } else {
4349 ret = copy_from_user(new_mask, arg, len);
4350 }
4351
4352 if (ret) {
4353 free_cpumask_var(new_mask);
4354 return -EFAULT;
4355 }
4356
4357 ret = __io_register_iowq_aff(ctx, new_mask);
4358 free_cpumask_var(new_mask);
4359 return ret;
4360 }
4361
4362 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
4363 {
4364 return __io_register_iowq_aff(ctx, NULL);
4365 }
4366
4367 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
4368 void __user *arg)
4369 __must_hold(&ctx->uring_lock)
4370 {
4371 struct io_tctx_node *node;
4372 struct io_uring_task *tctx = NULL;
4373 struct io_sq_data *sqd = NULL;
4374 __u32 new_count[2];
4375 int i, ret;
4376
4377 if (copy_from_user(new_count, arg, sizeof(new_count)))
4378 return -EFAULT;
4379 for (i = 0; i < ARRAY_SIZE(new_count); i++)
4380 if (new_count[i] > INT_MAX)
4381 return -EINVAL;
4382
4383 if (ctx->flags & IORING_SETUP_SQPOLL) {
4384 sqd = ctx->sq_data;
4385 if (sqd) {
4386 /*
4387 * Observe the correct sqd->lock -> ctx->uring_lock
4388 * ordering. Fine to drop uring_lock here, we hold
4389 * a ref to the ctx.
4390 */
4391 refcount_inc(&sqd->refs);
4392 mutex_unlock(&ctx->uring_lock);
4393 mutex_lock(&sqd->lock);
4394 mutex_lock(&ctx->uring_lock);
4395 if (sqd->thread)
4396 tctx = sqd->thread->io_uring;
4397 }
4398 } else {
4399 tctx = current->io_uring;
4400 }
4401
4402 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
4403
4404 for (i = 0; i < ARRAY_SIZE(new_count); i++)
4405 if (new_count[i])
4406 ctx->iowq_limits[i] = new_count[i];
4407 ctx->iowq_limits_set = true;
4408
4409 if (tctx && tctx->io_wq) {
4410 ret = io_wq_max_workers(tctx->io_wq, new_count);
4411 if (ret)
4412 goto err;
4413 } else {
4414 memset(new_count, 0, sizeof(new_count));
4415 }
4416
4417 if (sqd) {
4418 mutex_unlock(&sqd->lock);
4419 io_put_sq_data(sqd);
4420 }
4421
4422 if (copy_to_user(arg, new_count, sizeof(new_count)))
4423 return -EFAULT;
4424
4425 /* that's it for SQPOLL, only the SQPOLL task creates requests */
4426 if (sqd)
4427 return 0;
4428
4429 /* now propagate the restriction to all registered users */
4430 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
4431 struct io_uring_task *tctx = node->task->io_uring;
4432
4433 if (WARN_ON_ONCE(!tctx->io_wq))
4434 continue;
4435
4436 for (i = 0; i < ARRAY_SIZE(new_count); i++)
4437 new_count[i] = ctx->iowq_limits[i];
4438 /* ignore errors, it always returns zero anyway */
4439 (void)io_wq_max_workers(tctx->io_wq, new_count);
4440 }
4441 return 0;
4442 err:
4443 if (sqd) {
4444 mutex_unlock(&sqd->lock);
4445 io_put_sq_data(sqd);
4446 }
4447 return ret;
4448 }
4449
4450 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4451 void __user *arg, unsigned nr_args)
4452 __releases(ctx->uring_lock)
4453 __acquires(ctx->uring_lock)
4454 {
4455 int ret;
4456
4457 /*
4458 * We don't quiesce the refs for register anymore and so it can't be
4459 * dying as we're holding a file ref here.
4460 */
4461 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
4462 return -ENXIO;
4463
4464 if (ctx->submitter_task && ctx->submitter_task != current)
4465 return -EEXIST;
4466
4467 if (ctx->restricted) {
4468 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
4469 if (!test_bit(opcode, ctx->restrictions.register_op))
4470 return -EACCES;
4471 }
4472
4473 switch (opcode) {
4474 case IORING_REGISTER_BUFFERS:
4475 ret = -EFAULT;
4476 if (!arg)
4477 break;
4478 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
4479 break;
4480 case IORING_UNREGISTER_BUFFERS:
4481 ret = -EINVAL;
4482 if (arg || nr_args)
4483 break;
4484 ret = io_sqe_buffers_unregister(ctx);
4485 break;
4486 case IORING_REGISTER_FILES:
4487 ret = -EFAULT;
4488 if (!arg)
4489 break;
4490 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
4491 break;
4492 case IORING_UNREGISTER_FILES:
4493 ret = -EINVAL;
4494 if (arg || nr_args)
4495 break;
4496 ret = io_sqe_files_unregister(ctx);
4497 break;
4498 case IORING_REGISTER_FILES_UPDATE:
4499 ret = io_register_files_update(ctx, arg, nr_args);
4500 break;
4501 case IORING_REGISTER_EVENTFD:
4502 ret = -EINVAL;
4503 if (nr_args != 1)
4504 break;
4505 ret = io_eventfd_register(ctx, arg, 0);
4506 break;
4507 case IORING_REGISTER_EVENTFD_ASYNC:
4508 ret = -EINVAL;
4509 if (nr_args != 1)
4510 break;
4511 ret = io_eventfd_register(ctx, arg, 1);
4512 break;
4513 case IORING_UNREGISTER_EVENTFD:
4514 ret = -EINVAL;
4515 if (arg || nr_args)
4516 break;
4517 ret = io_eventfd_unregister(ctx);
4518 break;
4519 case IORING_REGISTER_PROBE:
4520 ret = -EINVAL;
4521 if (!arg || nr_args > 256)
4522 break;
4523 ret = io_probe(ctx, arg, nr_args);
4524 break;
4525 case IORING_REGISTER_PERSONALITY:
4526 ret = -EINVAL;
4527 if (arg || nr_args)
4528 break;
4529 ret = io_register_personality(ctx);
4530 break;
4531 case IORING_UNREGISTER_PERSONALITY:
4532 ret = -EINVAL;
4533 if (arg)
4534 break;
4535 ret = io_unregister_personality(ctx, nr_args);
4536 break;
4537 case IORING_REGISTER_ENABLE_RINGS:
4538 ret = -EINVAL;
4539 if (arg || nr_args)
4540 break;
4541 ret = io_register_enable_rings(ctx);
4542 break;
4543 case IORING_REGISTER_RESTRICTIONS:
4544 ret = io_register_restrictions(ctx, arg, nr_args);
4545 break;
4546 case IORING_REGISTER_FILES2:
4547 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
4548 break;
4549 case IORING_REGISTER_FILES_UPDATE2:
4550 ret = io_register_rsrc_update(ctx, arg, nr_args,
4551 IORING_RSRC_FILE);
4552 break;
4553 case IORING_REGISTER_BUFFERS2:
4554 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
4555 break;
4556 case IORING_REGISTER_BUFFERS_UPDATE:
4557 ret = io_register_rsrc_update(ctx, arg, nr_args,
4558 IORING_RSRC_BUFFER);
4559 break;
4560 case IORING_REGISTER_IOWQ_AFF:
4561 ret = -EINVAL;
4562 if (!arg || !nr_args)
4563 break;
4564 ret = io_register_iowq_aff(ctx, arg, nr_args);
4565 break;
4566 case IORING_UNREGISTER_IOWQ_AFF:
4567 ret = -EINVAL;
4568 if (arg || nr_args)
4569 break;
4570 ret = io_unregister_iowq_aff(ctx);
4571 break;
4572 case IORING_REGISTER_IOWQ_MAX_WORKERS:
4573 ret = -EINVAL;
4574 if (!arg || nr_args != 2)
4575 break;
4576 ret = io_register_iowq_max_workers(ctx, arg);
4577 break;
4578 case IORING_REGISTER_RING_FDS:
4579 ret = io_ringfd_register(ctx, arg, nr_args);
4580 break;
4581 case IORING_UNREGISTER_RING_FDS:
4582 ret = io_ringfd_unregister(ctx, arg, nr_args);
4583 break;
4584 case IORING_REGISTER_PBUF_RING:
4585 ret = -EINVAL;
4586 if (!arg || nr_args != 1)
4587 break;
4588 ret = io_register_pbuf_ring(ctx, arg);
4589 break;
4590 case IORING_UNREGISTER_PBUF_RING:
4591 ret = -EINVAL;
4592 if (!arg || nr_args != 1)
4593 break;
4594 ret = io_unregister_pbuf_ring(ctx, arg);
4595 break;
4596 case IORING_REGISTER_SYNC_CANCEL:
4597 ret = -EINVAL;
4598 if (!arg || nr_args != 1)
4599 break;
4600 ret = io_sync_cancel(ctx, arg);
4601 break;
4602 case IORING_REGISTER_FILE_ALLOC_RANGE:
4603 ret = -EINVAL;
4604 if (!arg || nr_args)
4605 break;
4606 ret = io_register_file_alloc_range(ctx, arg);
4607 break;
4608 default:
4609 ret = -EINVAL;
4610 break;
4611 }
4612
4613 return ret;
4614 }
4615
4616 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4617 void __user *, arg, unsigned int, nr_args)
4618 {
4619 struct io_ring_ctx *ctx;
4620 long ret = -EBADF;
4621 struct file *file;
4622 bool use_registered_ring;
4623
4624 use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
4625 opcode &= ~IORING_REGISTER_USE_REGISTERED_RING;
4626
4627 if (opcode >= IORING_REGISTER_LAST)
4628 return -EINVAL;
4629
4630 if (use_registered_ring) {
4631 /*
4632 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
4633 * need only dereference our task private array to find it.
4634 */
4635 struct io_uring_task *tctx = current->io_uring;
4636
4637 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
4638 return -EINVAL;
4639 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
4640 file = tctx->registered_rings[fd];
4641 if (unlikely(!file))
4642 return -EBADF;
4643 } else {
4644 file = fget(fd);
4645 if (unlikely(!file))
4646 return -EBADF;
4647 ret = -EOPNOTSUPP;
4648 if (!io_is_uring_fops(file))
4649 goto out_fput;
4650 }
4651
4652 ctx = file->private_data;
4653
4654 mutex_lock(&ctx->uring_lock);
4655 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4656 mutex_unlock(&ctx->uring_lock);
4657 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
4658 out_fput:
4659 if (!use_registered_ring)
4660 fput(file);
4661 return ret;
4662 }
4663
4664 static int __init io_uring_init(void)
4665 {
4666 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
4667 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
4668 BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
4669 } while (0)
4670
4671 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
4672 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
4673 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
4674 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
4675 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
4676 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
4677 BUILD_BUG_SQE_ELEM(1, __u8, flags);
4678 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
4679 BUILD_BUG_SQE_ELEM(4, __s32, fd);
4680 BUILD_BUG_SQE_ELEM(8, __u64, off);
4681 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
4682 BUILD_BUG_SQE_ELEM(8, __u32, cmd_op);
4683 BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
4684 BUILD_BUG_SQE_ELEM(16, __u64, addr);
4685 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
4686 BUILD_BUG_SQE_ELEM(24, __u32, len);
4687 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
4688 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
4689 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
4690 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
4691 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
4692 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
4693 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
4694 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
4695 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
4696 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
4697 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
4698 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
4699 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
4700 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
4701 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
4702 BUILD_BUG_SQE_ELEM(28, __u32, rename_flags);
4703 BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags);
4704 BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags);
4705 BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags);
4706 BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags);
4707 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
4708 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
4709 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
4710 BUILD_BUG_SQE_ELEM(42, __u16, personality);
4711 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
4712 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
4713 BUILD_BUG_SQE_ELEM(44, __u16, addr_len);
4714 BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]);
4715 BUILD_BUG_SQE_ELEM(48, __u64, addr3);
4716 BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
4717 BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
4718
4719 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
4720 sizeof(struct io_uring_rsrc_update));
4721 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
4722 sizeof(struct io_uring_rsrc_update2));
4723
4724 /* ->buf_index is u16 */
4725 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
4726 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
4727 offsetof(struct io_uring_buf_ring, tail));
4728
4729 /* should fit into one byte */
4730 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
4731 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
4732 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
4733
4734 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
4735
4736 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
4737
4738 /* top 8bits are for internal use */
4739 BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0);
4740
4741 io_uring_optable_init();
4742
4743 /*
4744 * Allow user copy in the per-command field, which starts after the
4745 * file in io_kiocb and until the opcode field. The openat2 handling
4746 * requires copying in user memory into the io_kiocb object in that
4747 * range, and HARDENED_USERCOPY will complain if we haven't
4748 * correctly annotated this range.
4749 */
4750 req_cachep = kmem_cache_create_usercopy("io_kiocb",
4751 sizeof(struct io_kiocb), 0,
4752 SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4753 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
4754 offsetof(struct io_kiocb, cmd.data),
4755 sizeof_field(struct io_kiocb, cmd.data), NULL);
4756 io_buf_cachep = kmem_cache_create("io_buffer", sizeof(struct io_buffer), 0,
4757 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
4758 NULL);
4759
4760 #ifdef CONFIG_SYSCTL
4761 register_sysctl_init("kernel", kernel_io_uring_disabled_table);
4762 #endif
4763
4764 return 0;
4765 };
4766 __initcall(io_uring_init);