]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: import 5.15-stable io_uring
authorJens Axboe <axboe@kernel.dk>
Thu, 22 Dec 2022 21:30:11 +0000 (14:30 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Jan 2023 10:39:23 +0000 (11:39 +0100)
No upstream commit exists.

This imports the io_uring codebase from 5.15.85, wholesale. Changes
from that code base:

- Drop IOCB_ALLOC_CACHE, we don't have that in 5.10.
- Drop MKDIRAT/SYMLINKAT/LINKAT. Would require further VFS backports,
  and we don't support these in 5.10 to begin with.
- sock_from_file() old style calling convention.
- Use compat_get_bitmap() only for CONFIG_COMPAT=y

Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
15 files changed:
Makefile
fs/Makefile
fs/io-wq.c [deleted file]
include/linux/io_uring.h
include/linux/sched.h
include/linux/syscalls.h
include/trace/events/io_uring.h
include/uapi/linux/io_uring.h
io_uring/Makefile [new file with mode: 0644]
io_uring/io-wq.c [new file with mode: 0644]
io_uring/io-wq.h [moved from fs/io-wq.h with 81% similarity]
io_uring/io_uring.c [moved from fs/io_uring.c with 51% similarity]
kernel/exit.c
kernel/fork.c
kernel/sched/core.c

index 68f8efa0cc3019c3eb380db60acd65de0b3ce7ef..14bb1bb37770cd60f2aff29d3923cda9a88d39c0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1128,7 +1128,7 @@ export MODORDER := $(extmod-prefix)modules.order
 export MODULES_NSDEPS := $(extmod-prefix)modules.nsdeps
 
 ifeq ($(KBUILD_EXTMOD),)
-core-y         += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
+core-y         += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ io_uring/
 
 vmlinux-dirs   := $(patsubst %/,%,$(filter %/, \
                     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
index 999d1a23f036c9f96a06e056d333e2e3832cdc37..c660ce28f14980b00f284f8f4493b8b12cd47dc7 100644 (file)
@@ -32,8 +32,6 @@ obj-$(CONFIG_TIMERFD)         += timerfd.o
 obj-$(CONFIG_EVENTFD)          += eventfd.o
 obj-$(CONFIG_USERFAULTFD)      += userfaultfd.o
 obj-$(CONFIG_AIO)               += aio.o
-obj-$(CONFIG_IO_URING)         += io_uring.o
-obj-$(CONFIG_IO_WQ)            += io-wq.o
 obj-$(CONFIG_FS_DAX)           += dax.o
 obj-$(CONFIG_FS_ENCRYPTION)    += crypto/
 obj-$(CONFIG_FS_VERITY)                += verity/
diff --git a/fs/io-wq.c b/fs/io-wq.c
deleted file mode 100644 (file)
index 3d5fc76..0000000
+++ /dev/null
@@ -1,1242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Basic worker thread pool for io_uring
- *
- * Copyright (C) 2019 Jens Axboe
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched/signal.h>
-#include <linux/mm.h>
-#include <linux/sched/mm.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <linux/kthread.h>
-#include <linux/rculist_nulls.h>
-#include <linux/fs_struct.h>
-#include <linux/task_work.h>
-#include <linux/blk-cgroup.h>
-#include <linux/audit.h>
-#include <linux/cpu.h>
-
-#include "../kernel/sched/sched.h"
-#include "io-wq.h"
-
-#define WORKER_IDLE_TIMEOUT    (5 * HZ)
-
-enum {
-       IO_WORKER_F_UP          = 1,    /* up and active */
-       IO_WORKER_F_RUNNING     = 2,    /* account as running */
-       IO_WORKER_F_FREE        = 4,    /* worker on free list */
-       IO_WORKER_F_FIXED       = 8,    /* static idle worker */
-       IO_WORKER_F_BOUND       = 16,   /* is doing bounded work */
-};
-
-enum {
-       IO_WQ_BIT_EXIT          = 0,    /* wq exiting */
-       IO_WQ_BIT_CANCEL        = 1,    /* cancel work on list */
-       IO_WQ_BIT_ERROR         = 2,    /* error on setup */
-};
-
-enum {
-       IO_WQE_FLAG_STALLED     = 1,    /* stalled on hash */
-};
-
-/*
- * One for each thread in a wqe pool
- */
-struct io_worker {
-       refcount_t ref;
-       unsigned flags;
-       struct hlist_nulls_node nulls_node;
-       struct list_head all_list;
-       struct task_struct *task;
-       struct io_wqe *wqe;
-
-       struct io_wq_work *cur_work;
-       spinlock_t lock;
-
-       struct rcu_head rcu;
-       struct mm_struct *mm;
-#ifdef CONFIG_BLK_CGROUP
-       struct cgroup_subsys_state *blkcg_css;
-#endif
-       const struct cred *cur_creds;
-       const struct cred *saved_creds;
-       struct files_struct *restore_files;
-       struct nsproxy *restore_nsproxy;
-       struct fs_struct *restore_fs;
-};
-
-#if BITS_PER_LONG == 64
-#define IO_WQ_HASH_ORDER       6
-#else
-#define IO_WQ_HASH_ORDER       5
-#endif
-
-#define IO_WQ_NR_HASH_BUCKETS  (1u << IO_WQ_HASH_ORDER)
-
-struct io_wqe_acct {
-       unsigned nr_workers;
-       unsigned max_workers;
-       atomic_t nr_running;
-};
-
-enum {
-       IO_WQ_ACCT_BOUND,
-       IO_WQ_ACCT_UNBOUND,
-};
-
-/*
- * Per-node worker thread pool
- */
-struct io_wqe {
-       struct {
-               raw_spinlock_t lock;
-               struct io_wq_work_list work_list;
-               unsigned long hash_map;
-               unsigned flags;
-       } ____cacheline_aligned_in_smp;
-
-       int node;
-       struct io_wqe_acct acct[2];
-
-       struct hlist_nulls_head free_list;
-       struct list_head all_list;
-
-       struct io_wq *wq;
-       struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
-};
-
-/*
- * Per io_wq state
-  */
-struct io_wq {
-       struct io_wqe **wqes;
-       unsigned long state;
-
-       free_work_fn *free_work;
-       io_wq_work_fn *do_work;
-
-       struct task_struct *manager;
-       struct user_struct *user;
-       refcount_t refs;
-       struct completion done;
-
-       struct hlist_node cpuhp_node;
-
-       refcount_t use_refs;
-};
-
-static enum cpuhp_state io_wq_online;
-
-static bool io_worker_get(struct io_worker *worker)
-{
-       return refcount_inc_not_zero(&worker->ref);
-}
-
-static void io_worker_release(struct io_worker *worker)
-{
-       if (refcount_dec_and_test(&worker->ref))
-               wake_up_process(worker->task);
-}
-
-/*
- * Note: drops the wqe->lock if returning true! The caller must re-acquire
- * the lock in that case. Some callers need to restart handling if this
- * happens, so we can't just re-acquire the lock on behalf of the caller.
- */
-static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
-{
-       bool dropped_lock = false;
-
-       if (worker->saved_creds) {
-               revert_creds(worker->saved_creds);
-               worker->cur_creds = worker->saved_creds = NULL;
-       }
-
-       if (current->files != worker->restore_files) {
-               __acquire(&wqe->lock);
-               raw_spin_unlock_irq(&wqe->lock);
-               dropped_lock = true;
-
-               task_lock(current);
-               current->files = worker->restore_files;
-               current->nsproxy = worker->restore_nsproxy;
-               task_unlock(current);
-       }
-
-       if (current->fs != worker->restore_fs)
-               current->fs = worker->restore_fs;
-
-       /*
-        * If we have an active mm, we need to drop the wq lock before unusing
-        * it. If we do, return true and let the caller retry the idle loop.
-        */
-       if (worker->mm) {
-               if (!dropped_lock) {
-                       __acquire(&wqe->lock);
-                       raw_spin_unlock_irq(&wqe->lock);
-                       dropped_lock = true;
-               }
-               __set_current_state(TASK_RUNNING);
-               kthread_unuse_mm(worker->mm);
-               mmput(worker->mm);
-               worker->mm = NULL;
-       }
-
-#ifdef CONFIG_BLK_CGROUP
-       if (worker->blkcg_css) {
-               kthread_associate_blkcg(NULL);
-               worker->blkcg_css = NULL;
-       }
-#endif
-       if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
-               current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-       return dropped_lock;
-}
-
-static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
-                                                  struct io_wq_work *work)
-{
-       if (work->flags & IO_WQ_WORK_UNBOUND)
-               return &wqe->acct[IO_WQ_ACCT_UNBOUND];
-
-       return &wqe->acct[IO_WQ_ACCT_BOUND];
-}
-
-static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
-                                                 struct io_worker *worker)
-{
-       if (worker->flags & IO_WORKER_F_BOUND)
-               return &wqe->acct[IO_WQ_ACCT_BOUND];
-
-       return &wqe->acct[IO_WQ_ACCT_UNBOUND];
-}
-
-static void io_worker_exit(struct io_worker *worker)
-{
-       struct io_wqe *wqe = worker->wqe;
-       struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
-
-       /*
-        * If we're not at zero, someone else is holding a brief reference
-        * to the worker. Wait for that to go away.
-        */
-       set_current_state(TASK_INTERRUPTIBLE);
-       if (!refcount_dec_and_test(&worker->ref))
-               schedule();
-       __set_current_state(TASK_RUNNING);
-
-       preempt_disable();
-       current->flags &= ~PF_IO_WORKER;
-       if (worker->flags & IO_WORKER_F_RUNNING)
-               atomic_dec(&acct->nr_running);
-       if (!(worker->flags & IO_WORKER_F_BOUND))
-               atomic_dec(&wqe->wq->user->processes);
-       worker->flags = 0;
-       preempt_enable();
-
-       raw_spin_lock_irq(&wqe->lock);
-       hlist_nulls_del_rcu(&worker->nulls_node);
-       list_del_rcu(&worker->all_list);
-       if (__io_worker_unuse(wqe, worker)) {
-               __release(&wqe->lock);
-               raw_spin_lock_irq(&wqe->lock);
-       }
-       acct->nr_workers--;
-       raw_spin_unlock_irq(&wqe->lock);
-
-       kfree_rcu(worker, rcu);
-       if (refcount_dec_and_test(&wqe->wq->refs))
-               complete(&wqe->wq->done);
-}
-
-static inline bool io_wqe_run_queue(struct io_wqe *wqe)
-       __must_hold(wqe->lock)
-{
-       if (!wq_list_empty(&wqe->work_list) &&
-           !(wqe->flags & IO_WQE_FLAG_STALLED))
-               return true;
-       return false;
-}
-
-/*
- * Check head of free list for an available worker. If one isn't available,
- * caller must wake up the wq manager to create one.
- */
-static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
-       __must_hold(RCU)
-{
-       struct hlist_nulls_node *n;
-       struct io_worker *worker;
-
-       n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
-       if (is_a_nulls(n))
-               return false;
-
-       worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
-       if (io_worker_get(worker)) {
-               wake_up_process(worker->task);
-               io_worker_release(worker);
-               return true;
-       }
-
-       return false;
-}
-
-/*
- * We need a worker. If we find a free one, we're good. If not, and we're
- * below the max number of workers, wake up the manager to create one.
- */
-static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
-{
-       bool ret;
-
-       /*
-        * Most likely an attempt to queue unbounded work on an io_wq that
-        * wasn't setup with any unbounded workers.
-        */
-       if (unlikely(!acct->max_workers))
-               pr_warn_once("io-wq is not configured for unbound workers");
-
-       rcu_read_lock();
-       ret = io_wqe_activate_free_worker(wqe);
-       rcu_read_unlock();
-
-       if (!ret && acct->nr_workers < acct->max_workers)
-               wake_up_process(wqe->wq->manager);
-}
-
-static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
-{
-       struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
-
-       atomic_inc(&acct->nr_running);
-}
-
-static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
-       __must_hold(wqe->lock)
-{
-       struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
-
-       if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
-               io_wqe_wake_worker(wqe, acct);
-}
-
-static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
-{
-       allow_kernel_signal(SIGINT);
-
-       current->flags |= PF_IO_WORKER;
-
-       worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
-       worker->restore_files = current->files;
-       worker->restore_nsproxy = current->nsproxy;
-       worker->restore_fs = current->fs;
-       io_wqe_inc_running(wqe, worker);
-}
-
-/*
- * Worker will start processing some work. Move it to the busy list, if
- * it's currently on the freelist
- */
-static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
-                            struct io_wq_work *work)
-       __must_hold(wqe->lock)
-{
-       bool worker_bound, work_bound;
-
-       if (worker->flags & IO_WORKER_F_FREE) {
-               worker->flags &= ~IO_WORKER_F_FREE;
-               hlist_nulls_del_init_rcu(&worker->nulls_node);
-       }
-
-       /*
-        * If worker is moving from bound to unbound (or vice versa), then
-        * ensure we update the running accounting.
-        */
-       worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
-       work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
-       if (worker_bound != work_bound) {
-               io_wqe_dec_running(wqe, worker);
-               if (work_bound) {
-                       worker->flags |= IO_WORKER_F_BOUND;
-                       wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
-                       wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
-                       atomic_dec(&wqe->wq->user->processes);
-               } else {
-                       worker->flags &= ~IO_WORKER_F_BOUND;
-                       wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
-                       wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
-                       atomic_inc(&wqe->wq->user->processes);
-               }
-               io_wqe_inc_running(wqe, worker);
-        }
-}
-
-/*
- * No work, worker going to sleep. Move to freelist, and unuse mm if we
- * have one attached. Dropping the mm may potentially sleep, so we drop
- * the lock in that case and return success. Since the caller has to
- * retry the loop in that case (we changed task state), we don't regrab
- * the lock if we return success.
- */
-static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
-       __must_hold(wqe->lock)
-{
-       if (!(worker->flags & IO_WORKER_F_FREE)) {
-               worker->flags |= IO_WORKER_F_FREE;
-               hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
-       }
-
-       return __io_worker_unuse(wqe, worker);
-}
-
-static inline unsigned int io_get_work_hash(struct io_wq_work *work)
-{
-       return work->flags >> IO_WQ_HASH_SHIFT;
-}
-
-static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
-       __must_hold(wqe->lock)
-{
-       struct io_wq_work_node *node, *prev;
-       struct io_wq_work *work, *tail;
-       unsigned int hash;
-
-       wq_list_for_each(node, prev, &wqe->work_list) {
-               work = container_of(node, struct io_wq_work, list);
-
-               /* not hashed, can run anytime */
-               if (!io_wq_is_hashed(work)) {
-                       wq_list_del(&wqe->work_list, node, prev);
-                       return work;
-               }
-
-               /* hashed, can run if not already running */
-               hash = io_get_work_hash(work);
-               if (!(wqe->hash_map & BIT(hash))) {
-                       wqe->hash_map |= BIT(hash);
-                       /* all items with this hash lie in [work, tail] */
-                       tail = wqe->hash_tail[hash];
-                       wqe->hash_tail[hash] = NULL;
-                       wq_list_cut(&wqe->work_list, &tail->list, prev);
-                       return work;
-               }
-       }
-
-       return NULL;
-}
-
-static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
-{
-       if (worker->mm) {
-               kthread_unuse_mm(worker->mm);
-               mmput(worker->mm);
-               worker->mm = NULL;
-       }
-
-       if (mmget_not_zero(work->identity->mm)) {
-               kthread_use_mm(work->identity->mm);
-               worker->mm = work->identity->mm;
-               return;
-       }
-
-       /* failed grabbing mm, ensure work gets cancelled */
-       work->flags |= IO_WQ_WORK_CANCEL;
-}
-
-static inline void io_wq_switch_blkcg(struct io_worker *worker,
-                                     struct io_wq_work *work)
-{
-#ifdef CONFIG_BLK_CGROUP
-       if (!(work->flags & IO_WQ_WORK_BLKCG))
-               return;
-       if (work->identity->blkcg_css != worker->blkcg_css) {
-               kthread_associate_blkcg(work->identity->blkcg_css);
-               worker->blkcg_css = work->identity->blkcg_css;
-       }
-#endif
-}
-
-static void io_wq_switch_creds(struct io_worker *worker,
-                              struct io_wq_work *work)
-{
-       const struct cred *old_creds = override_creds(work->identity->creds);
-
-       worker->cur_creds = work->identity->creds;
-       if (worker->saved_creds)
-               put_cred(old_creds); /* creds set by previous switch */
-       else
-               worker->saved_creds = old_creds;
-}
-
-static void io_impersonate_work(struct io_worker *worker,
-                               struct io_wq_work *work)
-{
-       if ((work->flags & IO_WQ_WORK_FILES) &&
-           current->files != work->identity->files) {
-               task_lock(current);
-               current->files = work->identity->files;
-               current->nsproxy = work->identity->nsproxy;
-               task_unlock(current);
-               if (!work->identity->files) {
-                       /* failed grabbing files, ensure work gets cancelled */
-                       work->flags |= IO_WQ_WORK_CANCEL;
-               }
-       }
-       if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
-               current->fs = work->identity->fs;
-       if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm)
-               io_wq_switch_mm(worker, work);
-       if ((work->flags & IO_WQ_WORK_CREDS) &&
-           worker->cur_creds != work->identity->creds)
-               io_wq_switch_creds(worker, work);
-       if (work->flags & IO_WQ_WORK_FSIZE)
-               current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize;
-       else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
-               current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-       io_wq_switch_blkcg(worker, work);
-#ifdef CONFIG_AUDIT
-       current->loginuid = work->identity->loginuid;
-       current->sessionid = work->identity->sessionid;
-#endif
-}
-
-static void io_assign_current_work(struct io_worker *worker,
-                                  struct io_wq_work *work)
-{
-       if (work) {
-               /* flush pending signals before assigning new work */
-               if (signal_pending(current))
-                       flush_signals(current);
-               cond_resched();
-       }
-
-#ifdef CONFIG_AUDIT
-       current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET);
-       current->sessionid = AUDIT_SID_UNSET;
-#endif
-
-       spin_lock_irq(&worker->lock);
-       worker->cur_work = work;
-       spin_unlock_irq(&worker->lock);
-}
-
-static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
-
-static void io_worker_handle_work(struct io_worker *worker)
-       __releases(wqe->lock)
-{
-       struct io_wqe *wqe = worker->wqe;
-       struct io_wq *wq = wqe->wq;
-
-       do {
-               struct io_wq_work *work;
-get_next:
-               /*
-                * If we got some work, mark us as busy. If we didn't, but
-                * the list isn't empty, it means we stalled on hashed work.
-                * Mark us stalled so we don't keep looking for work when we
-                * can't make progress, any work completion or insertion will
-                * clear the stalled flag.
-                */
-               work = io_get_next_work(wqe);
-               if (work)
-                       __io_worker_busy(wqe, worker, work);
-               else if (!wq_list_empty(&wqe->work_list))
-                       wqe->flags |= IO_WQE_FLAG_STALLED;
-
-               raw_spin_unlock_irq(&wqe->lock);
-               if (!work)
-                       break;
-               io_assign_current_work(worker, work);
-
-               /* handle a whole dependent link */
-               do {
-                       struct io_wq_work *old_work, *next_hashed, *linked;
-                       unsigned int hash = io_get_work_hash(work);
-
-                       next_hashed = wq_next_work(work);
-                       io_impersonate_work(worker, work);
-                       /*
-                        * OK to set IO_WQ_WORK_CANCEL even for uncancellable
-                        * work, the worker function will do the right thing.
-                        */
-                       if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
-                               work->flags |= IO_WQ_WORK_CANCEL;
-
-                       old_work = work;
-                       linked = wq->do_work(work);
-
-                       work = next_hashed;
-                       if (!work && linked && !io_wq_is_hashed(linked)) {
-                               work = linked;
-                               linked = NULL;
-                       }
-                       io_assign_current_work(worker, work);
-                       wq->free_work(old_work);
-
-                       if (linked)
-                               io_wqe_enqueue(wqe, linked);
-
-                       if (hash != -1U && !next_hashed) {
-                               raw_spin_lock_irq(&wqe->lock);
-                               wqe->hash_map &= ~BIT_ULL(hash);
-                               wqe->flags &= ~IO_WQE_FLAG_STALLED;
-                               /* skip unnecessary unlock-lock wqe->lock */
-                               if (!work)
-                                       goto get_next;
-                               raw_spin_unlock_irq(&wqe->lock);
-                       }
-               } while (work);
-
-               raw_spin_lock_irq(&wqe->lock);
-       } while (1);
-}
-
-static int io_wqe_worker(void *data)
-{
-       struct io_worker *worker = data;
-       struct io_wqe *wqe = worker->wqe;
-       struct io_wq *wq = wqe->wq;
-
-       io_worker_start(wqe, worker);
-
-       while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-loop:
-               raw_spin_lock_irq(&wqe->lock);
-               if (io_wqe_run_queue(wqe)) {
-                       __set_current_state(TASK_RUNNING);
-                       io_worker_handle_work(worker);
-                       goto loop;
-               }
-               /* drops the lock on success, retry */
-               if (__io_worker_idle(wqe, worker)) {
-                       __release(&wqe->lock);
-                       goto loop;
-               }
-               raw_spin_unlock_irq(&wqe->lock);
-               if (signal_pending(current))
-                       flush_signals(current);
-               if (schedule_timeout(WORKER_IDLE_TIMEOUT))
-                       continue;
-               /* timed out, exit unless we're the fixed worker */
-               if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
-                   !(worker->flags & IO_WORKER_F_FIXED))
-                       break;
-       }
-
-       if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
-               raw_spin_lock_irq(&wqe->lock);
-               if (!wq_list_empty(&wqe->work_list))
-                       io_worker_handle_work(worker);
-               else
-                       raw_spin_unlock_irq(&wqe->lock);
-       }
-
-       io_worker_exit(worker);
-       return 0;
-}
-
-/*
- * Called when a worker is scheduled in. Mark us as currently running.
- */
-void io_wq_worker_running(struct task_struct *tsk)
-{
-       struct io_worker *worker = kthread_data(tsk);
-       struct io_wqe *wqe = worker->wqe;
-
-       if (!(worker->flags & IO_WORKER_F_UP))
-               return;
-       if (worker->flags & IO_WORKER_F_RUNNING)
-               return;
-       worker->flags |= IO_WORKER_F_RUNNING;
-       io_wqe_inc_running(wqe, worker);
-}
-
-/*
- * Called when worker is going to sleep. If there are no workers currently
- * running and we have work pending, wake up a free one or have the manager
- * set one up.
- */
-void io_wq_worker_sleeping(struct task_struct *tsk)
-{
-       struct io_worker *worker = kthread_data(tsk);
-       struct io_wqe *wqe = worker->wqe;
-
-       if (!(worker->flags & IO_WORKER_F_UP))
-               return;
-       if (!(worker->flags & IO_WORKER_F_RUNNING))
-               return;
-
-       worker->flags &= ~IO_WORKER_F_RUNNING;
-
-       raw_spin_lock_irq(&wqe->lock);
-       io_wqe_dec_running(wqe, worker);
-       raw_spin_unlock_irq(&wqe->lock);
-}
-
-static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
-{
-       struct io_wqe_acct *acct = &wqe->acct[index];
-       struct io_worker *worker;
-
-       worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
-       if (!worker)
-               return false;
-
-       refcount_set(&worker->ref, 1);
-       worker->nulls_node.pprev = NULL;
-       worker->wqe = wqe;
-       spin_lock_init(&worker->lock);
-
-       worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
-                               "io_wqe_worker-%d/%d", index, wqe->node);
-       if (IS_ERR(worker->task)) {
-               kfree(worker);
-               return false;
-       }
-       kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
-
-       raw_spin_lock_irq(&wqe->lock);
-       hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
-       list_add_tail_rcu(&worker->all_list, &wqe->all_list);
-       worker->flags |= IO_WORKER_F_FREE;
-       if (index == IO_WQ_ACCT_BOUND)
-               worker->flags |= IO_WORKER_F_BOUND;
-       if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
-               worker->flags |= IO_WORKER_F_FIXED;
-       acct->nr_workers++;
-       raw_spin_unlock_irq(&wqe->lock);
-
-       if (index == IO_WQ_ACCT_UNBOUND)
-               atomic_inc(&wq->user->processes);
-
-       refcount_inc(&wq->refs);
-       wake_up_process(worker->task);
-       return true;
-}
-
-static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
-       __must_hold(wqe->lock)
-{
-       struct io_wqe_acct *acct = &wqe->acct[index];
-
-       /* if we have available workers or no work, no need */
-       if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
-               return false;
-       return acct->nr_workers < acct->max_workers;
-}
-
-static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
-{
-       send_sig(SIGINT, worker->task, 1);
-       return false;
-}
-
-/*
- * Iterate the passed in list and call the specific function for each
- * worker that isn't exiting
- */
-static bool io_wq_for_each_worker(struct io_wqe *wqe,
-                                 bool (*func)(struct io_worker *, void *),
-                                 void *data)
-{
-       struct io_worker *worker;
-       bool ret = false;
-
-       list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
-               if (io_worker_get(worker)) {
-                       /* no task if node is/was offline */
-                       if (worker->task)
-                               ret = func(worker, data);
-                       io_worker_release(worker);
-                       if (ret)
-                               break;
-               }
-       }
-
-       return ret;
-}
-
-static bool io_wq_worker_wake(struct io_worker *worker, void *data)
-{
-       wake_up_process(worker->task);
-       return false;
-}
-
-/*
- * Manager thread. Tasked with creating new workers, if we need them.
- */
-static int io_wq_manager(void *data)
-{
-       struct io_wq *wq = data;
-       int node;
-
-       /* create fixed workers */
-       refcount_set(&wq->refs, 1);
-       for_each_node(node) {
-               if (!node_online(node))
-                       continue;
-               if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
-                       continue;
-               set_bit(IO_WQ_BIT_ERROR, &wq->state);
-               set_bit(IO_WQ_BIT_EXIT, &wq->state);
-               goto out;
-       }
-
-       complete(&wq->done);
-
-       while (!kthread_should_stop()) {
-               if (current->task_works)
-                       task_work_run();
-
-               for_each_node(node) {
-                       struct io_wqe *wqe = wq->wqes[node];
-                       bool fork_worker[2] = { false, false };
-
-                       if (!node_online(node))
-                               continue;
-
-                       raw_spin_lock_irq(&wqe->lock);
-                       if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
-                               fork_worker[IO_WQ_ACCT_BOUND] = true;
-                       if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
-                               fork_worker[IO_WQ_ACCT_UNBOUND] = true;
-                       raw_spin_unlock_irq(&wqe->lock);
-                       if (fork_worker[IO_WQ_ACCT_BOUND])
-                               create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
-                       if (fork_worker[IO_WQ_ACCT_UNBOUND])
-                               create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
-               }
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(HZ);
-       }
-
-       if (current->task_works)
-               task_work_run();
-
-out:
-       if (refcount_dec_and_test(&wq->refs)) {
-               complete(&wq->done);
-               return 0;
-       }
-       /* if ERROR is set and we get here, we have workers to wake */
-       if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
-               rcu_read_lock();
-               for_each_node(node)
-                       io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
-               rcu_read_unlock();
-       }
-       return 0;
-}
-
-static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
-                           struct io_wq_work *work)
-{
-       bool free_worker;
-
-       if (!(work->flags & IO_WQ_WORK_UNBOUND))
-               return true;
-       if (atomic_read(&acct->nr_running))
-               return true;
-
-       rcu_read_lock();
-       free_worker = !hlist_nulls_empty(&wqe->free_list);
-       rcu_read_unlock();
-       if (free_worker)
-               return true;
-
-       if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
-           !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
-               return false;
-
-       return true;
-}
-
-static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
-{
-       struct io_wq *wq = wqe->wq;
-
-       do {
-               struct io_wq_work *old_work = work;
-
-               work->flags |= IO_WQ_WORK_CANCEL;
-               work = wq->do_work(work);
-               wq->free_work(old_work);
-       } while (work);
-}
-
-static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
-{
-       unsigned int hash;
-       struct io_wq_work *tail;
-
-       if (!io_wq_is_hashed(work)) {
-append:
-               wq_list_add_tail(&work->list, &wqe->work_list);
-               return;
-       }
-
-       hash = io_get_work_hash(work);
-       tail = wqe->hash_tail[hash];
-       wqe->hash_tail[hash] = work;
-       if (!tail)
-               goto append;
-
-       wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
-}
-
-static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
-{
-       struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
-       bool do_wake;
-       unsigned long flags;
-
-       /*
-        * Do early check to see if we need a new unbound worker, and if we do,
-        * if we're allowed to do so. This isn't 100% accurate as there's a
-        * gap between this check and incrementing the value, but that's OK.
-        * It's close enough to not be an issue, fork() has the same delay.
-        */
-       if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
-               io_run_cancel(work, wqe);
-               return;
-       }
-
-       raw_spin_lock_irqsave(&wqe->lock, flags);
-       io_wqe_insert_work(wqe, work);
-       wqe->flags &= ~IO_WQE_FLAG_STALLED;
-       do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) ||
-                       !atomic_read(&acct->nr_running);
-       raw_spin_unlock_irqrestore(&wqe->lock, flags);
-
-       if (do_wake)
-               io_wqe_wake_worker(wqe, acct);
-}
-
-void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
-{
-       struct io_wqe *wqe = wq->wqes[numa_node_id()];
-
-       io_wqe_enqueue(wqe, work);
-}
-
-/*
- * Work items that hash to the same value will not be done in parallel.
- * Used to limit concurrent writes, generally hashed by inode.
- */
-void io_wq_hash_work(struct io_wq_work *work, void *val)
-{
-       unsigned int bit;
-
-       bit = hash_ptr(val, IO_WQ_HASH_ORDER);
-       work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
-}
-
-void io_wq_cancel_all(struct io_wq *wq)
-{
-       int node;
-
-       set_bit(IO_WQ_BIT_CANCEL, &wq->state);
-
-       rcu_read_lock();
-       for_each_node(node) {
-               struct io_wqe *wqe = wq->wqes[node];
-
-               io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
-       }
-       rcu_read_unlock();
-}
-
-struct io_cb_cancel_data {
-       work_cancel_fn *fn;
-       void *data;
-       int nr_running;
-       int nr_pending;
-       bool cancel_all;
-};
-
-static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
-{
-       struct io_cb_cancel_data *match = data;
-       unsigned long flags;
-
-       /*
-        * Hold the lock to avoid ->cur_work going out of scope, caller
-        * may dereference the passed in work.
-        */
-       spin_lock_irqsave(&worker->lock, flags);
-       if (worker->cur_work &&
-           !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
-           match->fn(worker->cur_work, match->data)) {
-               send_sig(SIGINT, worker->task, 1);
-               match->nr_running++;
-       }
-       spin_unlock_irqrestore(&worker->lock, flags);
-
-       return match->nr_running && !match->cancel_all;
-}
-
-static inline void io_wqe_remove_pending(struct io_wqe *wqe,
-                                        struct io_wq_work *work,
-                                        struct io_wq_work_node *prev)
-{
-       unsigned int hash = io_get_work_hash(work);
-       struct io_wq_work *prev_work = NULL;
-
-       if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
-               if (prev)
-                       prev_work = container_of(prev, struct io_wq_work, list);
-               if (prev_work && io_get_work_hash(prev_work) == hash)
-                       wqe->hash_tail[hash] = prev_work;
-               else
-                       wqe->hash_tail[hash] = NULL;
-       }
-       wq_list_del(&wqe->work_list, &work->list, prev);
-}
-
-static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
-                                      struct io_cb_cancel_data *match)
-{
-       struct io_wq_work_node *node, *prev;
-       struct io_wq_work *work;
-       unsigned long flags;
-
-retry:
-       raw_spin_lock_irqsave(&wqe->lock, flags);
-       wq_list_for_each(node, prev, &wqe->work_list) {
-               work = container_of(node, struct io_wq_work, list);
-               if (!match->fn(work, match->data))
-                       continue;
-               io_wqe_remove_pending(wqe, work, prev);
-               raw_spin_unlock_irqrestore(&wqe->lock, flags);
-               io_run_cancel(work, wqe);
-               match->nr_pending++;
-               if (!match->cancel_all)
-                       return;
-
-               /* not safe to continue after unlock */
-               goto retry;
-       }
-       raw_spin_unlock_irqrestore(&wqe->lock, flags);
-}
-
-static void io_wqe_cancel_running_work(struct io_wqe *wqe,
-                                      struct io_cb_cancel_data *match)
-{
-       rcu_read_lock();
-       io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
-       rcu_read_unlock();
-}
-
-enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
-                                 void *data, bool cancel_all)
-{
-       struct io_cb_cancel_data match = {
-               .fn             = cancel,
-               .data           = data,
-               .cancel_all     = cancel_all,
-       };
-       int node;
-
-       /*
-        * First check pending list, if we're lucky we can just remove it
-        * from there. CANCEL_OK means that the work is returned as-new,
-        * no completion will be posted for it.
-        */
-       for_each_node(node) {
-               struct io_wqe *wqe = wq->wqes[node];
-
-               io_wqe_cancel_pending_work(wqe, &match);
-               if (match.nr_pending && !match.cancel_all)
-                       return IO_WQ_CANCEL_OK;
-       }
-
-       /*
-        * Now check if a free (going busy) or busy worker has the work
-        * currently running. If we find it there, we'll return CANCEL_RUNNING
-        * as an indication that we attempt to signal cancellation. The
-        * completion will run normally in this case.
-        */
-       for_each_node(node) {
-               struct io_wqe *wqe = wq->wqes[node];
-
-               io_wqe_cancel_running_work(wqe, &match);
-               if (match.nr_running && !match.cancel_all)
-                       return IO_WQ_CANCEL_RUNNING;
-       }
-
-       if (match.nr_running)
-               return IO_WQ_CANCEL_RUNNING;
-       if (match.nr_pending)
-               return IO_WQ_CANCEL_OK;
-       return IO_WQ_CANCEL_NOTFOUND;
-}
-
-struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
-{
-       int ret = -ENOMEM, node;
-       struct io_wq *wq;
-
-       if (WARN_ON_ONCE(!data->free_work || !data->do_work))
-               return ERR_PTR(-EINVAL);
-       if (WARN_ON_ONCE(!bounded))
-               return ERR_PTR(-EINVAL);
-
-       wq = kzalloc(sizeof(*wq), GFP_KERNEL);
-       if (!wq)
-               return ERR_PTR(-ENOMEM);
-
-       wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
-       if (!wq->wqes)
-               goto err_wq;
-
-       ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
-       if (ret)
-               goto err_wqes;
-
-       wq->free_work = data->free_work;
-       wq->do_work = data->do_work;
-
-       /* caller must already hold a reference to this */
-       wq->user = data->user;
-
-       ret = -ENOMEM;
-       for_each_node(node) {
-               struct io_wqe *wqe;
-               int alloc_node = node;
-
-               if (!node_online(alloc_node))
-                       alloc_node = NUMA_NO_NODE;
-               wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
-               if (!wqe)
-                       goto err;
-               wq->wqes[node] = wqe;
-               wqe->node = alloc_node;
-               wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
-               atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
-               if (wq->user) {
-                       wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
-                                       task_rlimit(current, RLIMIT_NPROC);
-               }
-               atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
-               wqe->wq = wq;
-               raw_spin_lock_init(&wqe->lock);
-               INIT_WQ_LIST(&wqe->work_list);
-               INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
-               INIT_LIST_HEAD(&wqe->all_list);
-       }
-
-       init_completion(&wq->done);
-
-       wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
-       if (!IS_ERR(wq->manager)) {
-               wake_up_process(wq->manager);
-               wait_for_completion(&wq->done);
-               if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-               refcount_set(&wq->use_refs, 1);
-               reinit_completion(&wq->done);
-               return wq;
-       }
-
-       ret = PTR_ERR(wq->manager);
-       complete(&wq->done);
-err:
-       cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
-       for_each_node(node)
-               kfree(wq->wqes[node]);
-err_wqes:
-       kfree(wq->wqes);
-err_wq:
-       kfree(wq);
-       return ERR_PTR(ret);
-}
-
-bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
-{
-       if (data->free_work != wq->free_work || data->do_work != wq->do_work)
-               return false;
-
-       return refcount_inc_not_zero(&wq->use_refs);
-}
-
-static void __io_wq_destroy(struct io_wq *wq)
-{
-       int node;
-
-       cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
-
-       set_bit(IO_WQ_BIT_EXIT, &wq->state);
-       if (wq->manager)
-               kthread_stop(wq->manager);
-
-       rcu_read_lock();
-       for_each_node(node)
-               io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
-       rcu_read_unlock();
-
-       wait_for_completion(&wq->done);
-
-       for_each_node(node)
-               kfree(wq->wqes[node]);
-       kfree(wq->wqes);
-       kfree(wq);
-}
-
-void io_wq_destroy(struct io_wq *wq)
-{
-       if (refcount_dec_and_test(&wq->use_refs))
-               __io_wq_destroy(wq);
-}
-
-struct task_struct *io_wq_get_task(struct io_wq *wq)
-{
-       return wq->manager;
-}
-
-static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
-{
-       struct task_struct *task = worker->task;
-       struct rq_flags rf;
-       struct rq *rq;
-
-       rq = task_rq_lock(task, &rf);
-       do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
-       task->flags |= PF_NO_SETAFFINITY;
-       task_rq_unlock(rq, task, &rf);
-       return false;
-}
-
-static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
-       struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
-       int i;
-
-       rcu_read_lock();
-       for_each_node(i)
-               io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
-       rcu_read_unlock();
-       return 0;
-}
-
-static __init int io_wq_init(void)
-{
-       int ret;
-
-       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
-                                       io_wq_cpu_online, NULL);
-       if (ret < 0)
-               return ret;
-       io_wq_online = ret;
-       return 0;
-}
-subsys_initcall(io_wq_init);
index 35b2d845704d9175466d979138d413d85423664b..649a4d7c241bccb14099cbcbc427f6163ea8dc5c 100644 (file)
@@ -5,50 +5,20 @@
 #include <linux/sched.h>
 #include <linux/xarray.h>
 
-struct io_identity {
-       struct files_struct             *files;
-       struct mm_struct                *mm;
-#ifdef CONFIG_BLK_CGROUP
-       struct cgroup_subsys_state      *blkcg_css;
-#endif
-       const struct cred               *creds;
-       struct nsproxy                  *nsproxy;
-       struct fs_struct                *fs;
-       unsigned long                   fsize;
-#ifdef CONFIG_AUDIT
-       kuid_t                          loginuid;
-       unsigned int                    sessionid;
-#endif
-       refcount_t                      count;
-};
-
-struct io_uring_task {
-       /* submission side */
-       struct xarray           xa;
-       struct wait_queue_head  wait;
-       struct file             *last;
-       struct percpu_counter   inflight;
-       struct io_identity      __identity;
-       struct io_identity      *identity;
-       atomic_t                in_idle;
-       bool                    sqpoll;
-};
-
 #if defined(CONFIG_IO_URING)
 struct sock *io_uring_get_socket(struct file *file);
-void __io_uring_task_cancel(void);
-void __io_uring_files_cancel(struct files_struct *files);
+void __io_uring_cancel(bool cancel_all);
 void __io_uring_free(struct task_struct *tsk);
 
-static inline void io_uring_task_cancel(void)
+static inline void io_uring_files_cancel(void)
 {
-       if (current->io_uring && !xa_empty(&current->io_uring->xa))
-               __io_uring_task_cancel();
+       if (current->io_uring)
+               __io_uring_cancel(false);
 }
-static inline void io_uring_files_cancel(struct files_struct *files)
+static inline void io_uring_task_cancel(void)
 {
-       if (current->io_uring && !xa_empty(&current->io_uring->xa))
-               __io_uring_files_cancel(files);
+       if (current->io_uring)
+               __io_uring_cancel(true);
 }
 static inline void io_uring_free(struct task_struct *tsk)
 {
@@ -63,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file)
 static inline void io_uring_task_cancel(void)
 {
 }
-static inline void io_uring_files_cancel(struct files_struct *files)
+static inline void io_uring_files_cancel(void)
 {
 }
 static inline void io_uring_free(struct task_struct *tsk)
index b055c217eb0be25453847eb04f2e3e8619b46420..5da4b3c89f6368f213bfe0b32b8f526b2b13e196 100644 (file)
@@ -885,6 +885,9 @@ struct task_struct {
        /* CLONE_CHILD_CLEARTID: */
        int __user                      *clear_child_tid;
 
+       /* PF_IO_WORKER */
+       void                            *pf_io_worker;
+
        u64                             utime;
        u64                             stime;
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
index aea0ce9f3b745ad8f8bfeed1b74ad9c1307f1c28..a058c96cf21386ca1d0b5e5280db2101c3e13c43 100644 (file)
@@ -341,7 +341,7 @@ asmlinkage long sys_io_uring_setup(u32 entries,
                                struct io_uring_params __user *p);
 asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
                                u32 min_complete, u32 flags,
-                               const sigset_t __user *sig, size_t sigsz);
+                               const void __user *argp, size_t argsz);
 asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
                                void __user *arg, unsigned int nr_args);
 
index 9f0d3b7d56b0f31351e0d498be643524b3e895dc..0dd30de00e5b44ce0c0d8090299fc1ac8a22fa15 100644 (file)
@@ -12,11 +12,11 @@ struct io_wq_work;
 /**
  * io_uring_create - called after a new io_uring context was prepared
  *
- * @fd:                        corresponding file descriptor
- * @ctx:               pointer to a ring context structure
+ * @fd:                corresponding file descriptor
+ * @ctx:       pointer to a ring context structure
  * @sq_entries:        actual SQ size
  * @cq_entries:        actual CQ size
- * @flags:             SQ ring flags, provided to io_uring_setup(2)
+ * @flags:     SQ ring flags, provided to io_uring_setup(2)
  *
  * Allows to trace io_uring creation and provide pointer to a context, that can
  * be used later to find correlated events.
@@ -49,15 +49,15 @@ TRACE_EVENT(io_uring_create,
 );
 
 /**
- * io_uring_register - called after a buffer/file/eventfd was succesfully
+ * io_uring_register - called after a buffer/file/eventfd was successfully
  *                                        registered for a ring
  *
- * @ctx:                       pointer to a ring context structure
- * @opcode:                    describes which operation to perform
+ * @ctx:               pointer to a ring context structure
+ * @opcode:            describes which operation to perform
  * @nr_user_files:     number of registered files
  * @nr_user_bufs:      number of registered buffers
  * @cq_ev_fd:          whether eventfs registered or not
- * @ret:                       return code
+ * @ret:               return code
  *
  * Allows to trace fixed files/buffers/eventfds, that could be registered to
  * avoid an overhead of getting references to them for every operation. This
@@ -142,16 +142,16 @@ TRACE_EVENT(io_uring_queue_async_work,
        TP_ARGS(ctx, rw, req, work, flags),
 
        TP_STRUCT__entry (
-               __field(  void *,                               ctx             )
-               __field(  int,                                  rw              )
-               __field(  void *,                               req             )
+               __field(  void *,                       ctx     )
+               __field(  int,                          rw      )
+               __field(  void *,                       req     )
                __field(  struct io_wq_work *,          work    )
                __field(  unsigned int,                 flags   )
        ),
 
        TP_fast_assign(
                __entry->ctx    = ctx;
-               __entry->rw             = rw;
+               __entry->rw     = rw;
                __entry->req    = req;
                __entry->work   = work;
                __entry->flags  = flags;
@@ -196,10 +196,10 @@ TRACE_EVENT(io_uring_defer,
 
 /**
  * io_uring_link - called before the io_uring request added into link_list of
- *                                another request
+ *                another request
  *
- * @ctx:                       pointer to a ring context structure
- * @req:                       pointer to a linked request
+ * @ctx:               pointer to a ring context structure
+ * @req:               pointer to a linked request
  * @target_req:                pointer to a previous request, that would contain @req
  *
  * Allows to track linked requests, to understand dependencies between requests
@@ -212,8 +212,8 @@ TRACE_EVENT(io_uring_link,
        TP_ARGS(ctx, req, target_req),
 
        TP_STRUCT__entry (
-               __field(  void *,       ctx                     )
-               __field(  void *,       req                     )
+               __field(  void *,       ctx             )
+               __field(  void *,       req             )
                __field(  void *,       target_req      )
        ),
 
@@ -244,7 +244,7 @@ TRACE_EVENT(io_uring_cqring_wait,
        TP_ARGS(ctx, min_events),
 
        TP_STRUCT__entry (
-               __field(  void *,       ctx                     )
+               __field(  void *,       ctx             )
                __field(  int,          min_events      )
        ),
 
@@ -272,7 +272,7 @@ TRACE_EVENT(io_uring_fail_link,
        TP_ARGS(req, link),
 
        TP_STRUCT__entry (
-               __field(  void *,       req             )
+               __field(  void *,       req     )
                __field(  void *,       link    )
        ),
 
@@ -290,38 +290,42 @@ TRACE_EVENT(io_uring_fail_link,
  * @ctx:               pointer to a ring context structure
  * @user_data:         user data associated with the request
  * @res:               result of the request
+ * @cflags:            completion flags
  *
  */
 TRACE_EVENT(io_uring_complete,
 
-       TP_PROTO(void *ctx, u64 user_data, long res),
+       TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
 
-       TP_ARGS(ctx, user_data, res),
+       TP_ARGS(ctx, user_data, res, cflags),
 
        TP_STRUCT__entry (
                __field(  void *,       ctx             )
                __field(  u64,          user_data       )
-               __field(  long,         res             )
+               __field(  int,          res             )
+               __field(  unsigned,     cflags          )
        ),
 
        TP_fast_assign(
                __entry->ctx            = ctx;
                __entry->user_data      = user_data;
                __entry->res            = res;
+               __entry->cflags         = cflags;
        ),
 
-       TP_printk("ring %p, user_data 0x%llx, result %ld",
+       TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
                          __entry->ctx, (unsigned long long)__entry->user_data,
-                         __entry->res)
+                         __entry->res, __entry->cflags)
 );
 
-
 /**
  * io_uring_submit_sqe - called before submitting one SQE
  *
  * @ctx:               pointer to a ring context structure
+ * @req:               pointer to a submitted request
  * @opcode:            opcode of request
  * @user_data:         user data associated with the request
+ * @flags              request flags
  * @force_nonblock:    whether a context blocking or not
  * @sq_thread:         true if sq_thread has submitted this SQE
  *
@@ -330,41 +334,60 @@ TRACE_EVENT(io_uring_complete,
  */
 TRACE_EVENT(io_uring_submit_sqe,
 
-       TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
-                bool sq_thread),
+       TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
+                bool force_nonblock, bool sq_thread),
 
-       TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
+       TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
 
        TP_STRUCT__entry (
                __field(  void *,       ctx             )
+               __field(  void *,       req             )
                __field(  u8,           opcode          )
                __field(  u64,          user_data       )
+               __field(  u32,          flags           )
                __field(  bool,         force_nonblock  )
                __field(  bool,         sq_thread       )
        ),
 
        TP_fast_assign(
                __entry->ctx            = ctx;
+               __entry->req            = req;
                __entry->opcode         = opcode;
                __entry->user_data      = user_data;
+               __entry->flags          = flags;
                __entry->force_nonblock = force_nonblock;
                __entry->sq_thread      = sq_thread;
        ),
 
-       TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
-                         __entry->ctx, __entry->opcode,
-                         (unsigned long long) __entry->user_data,
-                         __entry->force_nonblock, __entry->sq_thread)
+       TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
+                 "non block %d, sq_thread %d", __entry->ctx, __entry->req,
+                 __entry->opcode, (unsigned long long)__entry->user_data,
+                 __entry->flags, __entry->force_nonblock, __entry->sq_thread)
 );
 
+/*
+ * io_uring_poll_arm - called after arming a poll wait if successful
+ *
+ * @ctx:               pointer to a ring context structure
+ * @req:               pointer to the armed request
+ * @opcode:            opcode of request
+ * @user_data:         user data associated with the request
+ * @mask:              request poll events mask
+ * @events:            registered events of interest
+ *
+ * Allows to track which fds are waiting for and what are the events of
+ * interest.
+ */
 TRACE_EVENT(io_uring_poll_arm,
 
-       TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events),
+       TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
+                int mask, int events),
 
-       TP_ARGS(ctx, opcode, user_data, mask, events),
+       TP_ARGS(ctx, req, opcode, user_data, mask, events),
 
        TP_STRUCT__entry (
                __field(  void *,       ctx             )
+               __field(  void *,       req             )
                __field(  u8,           opcode          )
                __field(  u64,          user_data       )
                __field(  int,          mask            )
@@ -373,16 +396,17 @@ TRACE_EVENT(io_uring_poll_arm,
 
        TP_fast_assign(
                __entry->ctx            = ctx;
+               __entry->req            = req;
                __entry->opcode         = opcode;
                __entry->user_data      = user_data;
                __entry->mask           = mask;
                __entry->events         = events;
        ),
 
-       TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
-                         __entry->ctx, __entry->opcode,
-                         (unsigned long long) __entry->user_data,
-                         __entry->mask, __entry->events)
+       TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
+                 __entry->ctx, __entry->req, __entry->opcode,
+                 (unsigned long long) __entry->user_data,
+                 __entry->mask, __entry->events)
 );
 
 TRACE_EVENT(io_uring_poll_wake,
@@ -437,27 +461,40 @@ TRACE_EVENT(io_uring_task_add,
                          __entry->mask)
 );
 
+/*
+ * io_uring_task_run - called when task_work_run() executes the poll events
+ *                     notification callbacks
+ *
+ * @ctx:               pointer to a ring context structure
+ * @req:               pointer to the armed request
+ * @opcode:            opcode of request
+ * @user_data:         user data associated with the request
+ *
+ * Allows to track when notified poll events are processed
+ */
 TRACE_EVENT(io_uring_task_run,
 
-       TP_PROTO(void *ctx, u8 opcode, u64 user_data),
+       TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
 
-       TP_ARGS(ctx, opcode, user_data),
+       TP_ARGS(ctx, req, opcode, user_data),
 
        TP_STRUCT__entry (
                __field(  void *,       ctx             )
+               __field(  void *,       req             )
                __field(  u8,           opcode          )
                __field(  u64,          user_data       )
        ),
 
        TP_fast_assign(
                __entry->ctx            = ctx;
+               __entry->req            = req;
                __entry->opcode         = opcode;
                __entry->user_data      = user_data;
        ),
 
-       TP_printk("ring %p, op %d, data 0x%llx",
-                         __entry->ctx, __entry->opcode,
-                         (unsigned long long) __entry->user_data)
+       TP_printk("ring %p, req %p, op %d, data 0x%llx",
+                 __entry->ctx, __entry->req, __entry->opcode,
+                 (unsigned long long) __entry->user_data)
 );
 
 #endif /* _TRACE_IO_URING_H */
index 98d8e06dea220cfa799569ccefb58c075000f50f..6481db93700287afd050a1cac3b4d0058956cba7 100644 (file)
@@ -42,23 +42,25 @@ struct io_uring_sqe {
                __u32           statx_flags;
                __u32           fadvise_advice;
                __u32           splice_flags;
+               __u32           rename_flags;
+               __u32           unlink_flags;
+               __u32           hardlink_flags;
        };
        __u64   user_data;      /* data to be passed back at completion time */
+       /* pack this to avoid bogus arm OABI complaints */
        union {
-               struct {
-                       /* pack this to avoid bogus arm OABI complaints */
-                       union {
-                               /* index into fixed buffers, if used */
-                               __u16   buf_index;
-                               /* for grouped buffer selection */
-                               __u16   buf_group;
-                       } __attribute__((packed));
-                       /* personality to use, if used */
-                       __u16   personality;
-                       __s32   splice_fd_in;
-               };
-               __u64   __pad2[3];
+               /* index into fixed buffers, if used */
+               __u16   buf_index;
+               /* for grouped buffer selection */
+               __u16   buf_group;
+       } __attribute__((packed));
+       /* personality to use, if used */
+       __u16   personality;
+       union {
+               __s32   splice_fd_in;
+               __u32   file_index;
        };
+       __u64   __pad2[2];
 };
 
 enum {
@@ -132,6 +134,9 @@ enum {
        IORING_OP_PROVIDE_BUFFERS,
        IORING_OP_REMOVE_BUFFERS,
        IORING_OP_TEE,
+       IORING_OP_SHUTDOWN,
+       IORING_OP_RENAMEAT,
+       IORING_OP_UNLINKAT,
 
        /* this goes last, obviously */
        IORING_OP_LAST,
@@ -145,14 +150,34 @@ enum {
 /*
  * sqe->timeout_flags
  */
-#define IORING_TIMEOUT_ABS     (1U << 0)
-
+#define IORING_TIMEOUT_ABS             (1U << 0)
+#define IORING_TIMEOUT_UPDATE          (1U << 1)
+#define IORING_TIMEOUT_BOOTTIME                (1U << 2)
+#define IORING_TIMEOUT_REALTIME                (1U << 3)
+#define IORING_LINK_TIMEOUT_UPDATE     (1U << 4)
+#define IORING_TIMEOUT_CLOCK_MASK      (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
+#define IORING_TIMEOUT_UPDATE_MASK     (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
 /*
  * sqe->splice_flags
  * extends splice(2) flags
  */
 #define SPLICE_F_FD_IN_FIXED   (1U << 31) /* the last bit of __u32 */
 
+/*
+ * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
+ * command flags for POLL_ADD are stored in sqe->len.
+ *
+ * IORING_POLL_ADD_MULTI       Multishot poll. Sets IORING_CQE_F_MORE if
+ *                             the poll handler will continue to report
+ *                             CQEs on behalf of the same SQE.
+ *
+ * IORING_POLL_UPDATE          Update existing poll request, matching
+ *                             sqe->addr as the old user_data field.
+ */
+#define IORING_POLL_ADD_MULTI  (1U << 0)
+#define IORING_POLL_UPDATE_EVENTS      (1U << 1)
+#define IORING_POLL_UPDATE_USER_DATA   (1U << 2)
+
 /*
  * IO completion data structure (Completion Queue Entry)
  */
@@ -166,8 +191,10 @@ struct io_uring_cqe {
  * cqe->flags
  *
  * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
+ * IORING_CQE_F_MORE   If set, parent SQE will generate more CQE entries
  */
 #define IORING_CQE_F_BUFFER            (1U << 0)
+#define IORING_CQE_F_MORE              (1U << 1)
 
 enum {
        IORING_CQE_BUFFER_SHIFT         = 16,
@@ -226,6 +253,7 @@ struct io_cqring_offsets {
 #define IORING_ENTER_GETEVENTS (1U << 0)
 #define IORING_ENTER_SQ_WAKEUP (1U << 1)
 #define IORING_ENTER_SQ_WAIT   (1U << 2)
+#define IORING_ENTER_EXT_ARG   (1U << 3)
 
 /*
  * Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -253,6 +281,10 @@ struct io_uring_params {
 #define IORING_FEAT_CUR_PERSONALITY    (1U << 4)
 #define IORING_FEAT_FAST_POLL          (1U << 5)
 #define IORING_FEAT_POLL_32BITS        (1U << 6)
+#define IORING_FEAT_SQPOLL_NONFIXED    (1U << 7)
+#define IORING_FEAT_EXT_ARG            (1U << 8)
+#define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
+#define IORING_FEAT_RSRC_TAGS          (1U << 10)
 
 /*
  * io_uring_register(2) opcodes and arguments
@@ -272,16 +304,62 @@ enum {
        IORING_REGISTER_RESTRICTIONS            = 11,
        IORING_REGISTER_ENABLE_RINGS            = 12,
 
+       /* extended with tagging */
+       IORING_REGISTER_FILES2                  = 13,
+       IORING_REGISTER_FILES_UPDATE2           = 14,
+       IORING_REGISTER_BUFFERS2                = 15,
+       IORING_REGISTER_BUFFERS_UPDATE          = 16,
+
+       /* set/clear io-wq thread affinities */
+       IORING_REGISTER_IOWQ_AFF                = 17,
+       IORING_UNREGISTER_IOWQ_AFF              = 18,
+
+       /* set/get max number of io-wq workers */
+       IORING_REGISTER_IOWQ_MAX_WORKERS        = 19,
+
        /* this goes last */
        IORING_REGISTER_LAST
 };
 
+/* io-wq worker categories */
+enum {
+       IO_WQ_BOUND,
+       IO_WQ_UNBOUND,
+};
+
+/* deprecated, see struct io_uring_rsrc_update */
 struct io_uring_files_update {
        __u32 offset;
        __u32 resv;
        __aligned_u64 /* __s32 * */ fds;
 };
 
+struct io_uring_rsrc_register {
+       __u32 nr;
+       __u32 resv;
+       __u64 resv2;
+       __aligned_u64 data;
+       __aligned_u64 tags;
+};
+
+struct io_uring_rsrc_update {
+       __u32 offset;
+       __u32 resv;
+       __aligned_u64 data;
+};
+
+struct io_uring_rsrc_update2 {
+       __u32 offset;
+       __u32 resv;
+       __aligned_u64 data;
+       __aligned_u64 tags;
+       __u32 nr;
+       __u32 resv2;
+};
+
+/* Skip updating fd indexes set to this value in the fd table */
+#define IORING_REGISTER_FILES_SKIP     (-2)
+
 #define IO_URING_OP_SUPPORTED  (1U << 0)
 
 struct io_uring_probe_op {
@@ -329,4 +407,11 @@ enum {
        IORING_RESTRICTION_LAST
 };
 
+struct io_uring_getevents_arg {
+       __u64   sigmask;
+       __u32   sigmask_sz;
+       __u32   pad;
+       __u64   ts;
+};
+
 #endif
diff --git a/io_uring/Makefile b/io_uring/Makefile
new file mode 100644 (file)
index 0000000..3680425
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for io_uring
+
+obj-$(CONFIG_IO_URING)         += io_uring.o
+obj-$(CONFIG_IO_WQ)            += io-wq.o
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
new file mode 100644 (file)
index 0000000..6031fb3
--- /dev/null
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic worker thread pool for io_uring
+ *
+ * Copyright (C) 2019 Jens Axboe
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched/signal.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/rculist_nulls.h>
+#include <linux/cpu.h>
+#include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
+
+#include "io-wq.h"
+
+#define WORKER_IDLE_TIMEOUT    (5 * HZ)
+
+enum {
+       IO_WORKER_F_UP          = 1,    /* up and active */
+       IO_WORKER_F_RUNNING     = 2,    /* account as running */
+       IO_WORKER_F_FREE        = 4,    /* worker on free list */
+       IO_WORKER_F_BOUND       = 8,    /* is doing bounded work */
+};
+
+enum {
+       IO_WQ_BIT_EXIT          = 0,    /* wq exiting */
+};
+
+enum {
+       IO_ACCT_STALLED_BIT     = 0,    /* stalled on hash */
+};
+
+/*
+ * One for each thread in a wqe pool
+ */
+struct io_worker {
+       refcount_t ref;
+       unsigned flags;
+       struct hlist_nulls_node nulls_node;
+       struct list_head all_list;
+       struct task_struct *task;
+       struct io_wqe *wqe;
+
+       struct io_wq_work *cur_work;
+       spinlock_t lock;
+
+       struct completion ref_done;
+
+       unsigned long create_state;
+       struct callback_head create_work;
+       int create_index;
+
+       union {
+               struct rcu_head rcu;
+               struct work_struct work;
+       };
+};
+
+#if BITS_PER_LONG == 64
+#define IO_WQ_HASH_ORDER       6
+#else
+#define IO_WQ_HASH_ORDER       5
+#endif
+
+#define IO_WQ_NR_HASH_BUCKETS  (1u << IO_WQ_HASH_ORDER)
+
+struct io_wqe_acct {
+       unsigned nr_workers;
+       unsigned max_workers;
+       int index;
+       atomic_t nr_running;
+       struct io_wq_work_list work_list;
+       unsigned long flags;
+};
+
+enum {
+       IO_WQ_ACCT_BOUND,
+       IO_WQ_ACCT_UNBOUND,
+       IO_WQ_ACCT_NR,
+};
+
+/*
+ * Per-node worker thread pool
+ */
+struct io_wqe {
+       raw_spinlock_t lock;
+       struct io_wqe_acct acct[2];
+
+       int node;
+
+       struct hlist_nulls_head free_list;
+       struct list_head all_list;
+
+       struct wait_queue_entry wait;
+
+       struct io_wq *wq;
+       struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
+
+       cpumask_var_t cpu_mask;
+};
+
+/*
+ * Per io_wq state
+  */
+struct io_wq {
+       unsigned long state;
+
+       free_work_fn *free_work;
+       io_wq_work_fn *do_work;
+
+       struct io_wq_hash *hash;
+
+       atomic_t worker_refs;
+       struct completion worker_done;
+
+       struct hlist_node cpuhp_node;
+
+       struct task_struct *task;
+
+       struct io_wqe *wqes[];
+};
+
+static enum cpuhp_state io_wq_online;
+
+struct io_cb_cancel_data {
+       work_cancel_fn *fn;
+       void *data;
+       int nr_running;
+       int nr_pending;
+       bool cancel_all;
+};
+
+static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
+static void io_wqe_dec_running(struct io_worker *worker);
+static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
+                                       struct io_wqe_acct *acct,
+                                       struct io_cb_cancel_data *match);
+static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
+
+static bool io_worker_get(struct io_worker *worker)
+{
+       return refcount_inc_not_zero(&worker->ref);
+}
+
+static void io_worker_release(struct io_worker *worker)
+{
+       if (refcount_dec_and_test(&worker->ref))
+               complete(&worker->ref_done);
+}
+
+static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
+{
+       return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
+}
+
+static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
+                                                  struct io_wq_work *work)
+{
+       return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
+}
+
+static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
+{
+       return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
+}
+
+static void io_worker_ref_put(struct io_wq *wq)
+{
+       if (atomic_dec_and_test(&wq->worker_refs))
+               complete(&wq->worker_done);
+}
+
+static void io_worker_cancel_cb(struct io_worker *worker)
+{
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+       struct io_wqe *wqe = worker->wqe;
+       struct io_wq *wq = wqe->wq;
+
+       atomic_dec(&acct->nr_running);
+       raw_spin_lock(&worker->wqe->lock);
+       acct->nr_workers--;
+       raw_spin_unlock(&worker->wqe->lock);
+       io_worker_ref_put(wq);
+       clear_bit_unlock(0, &worker->create_state);
+       io_worker_release(worker);
+}
+
+static bool io_task_worker_match(struct callback_head *cb, void *data)
+{
+       struct io_worker *worker;
+
+       if (cb->func != create_worker_cb)
+               return false;
+       worker = container_of(cb, struct io_worker, create_work);
+       return worker == data;
+}
+
+static void io_worker_exit(struct io_worker *worker)
+{
+       struct io_wqe *wqe = worker->wqe;
+       struct io_wq *wq = wqe->wq;
+
+       while (1) {
+               struct callback_head *cb = task_work_cancel_match(wq->task,
+                                               io_task_worker_match, worker);
+
+               if (!cb)
+                       break;
+               io_worker_cancel_cb(worker);
+       }
+
+       if (refcount_dec_and_test(&worker->ref))
+               complete(&worker->ref_done);
+       wait_for_completion(&worker->ref_done);
+
+       raw_spin_lock(&wqe->lock);
+       if (worker->flags & IO_WORKER_F_FREE)
+               hlist_nulls_del_rcu(&worker->nulls_node);
+       list_del_rcu(&worker->all_list);
+       preempt_disable();
+       io_wqe_dec_running(worker);
+       worker->flags = 0;
+       current->flags &= ~PF_IO_WORKER;
+       preempt_enable();
+       raw_spin_unlock(&wqe->lock);
+
+       kfree_rcu(worker, rcu);
+       io_worker_ref_put(wqe->wq);
+       do_exit(0);
+}
+
+static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
+{
+       if (!wq_list_empty(&acct->work_list) &&
+           !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
+               return true;
+       return false;
+}
+
+/*
+ * Check head of free list for an available worker. If one isn't available,
+ * caller must create one.
+ */
+static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
+                                       struct io_wqe_acct *acct)
+       __must_hold(RCU)
+{
+       struct hlist_nulls_node *n;
+       struct io_worker *worker;
+
+       /*
+        * Iterate free_list and see if we can find an idle worker to
+        * activate. If a given worker is on the free_list but in the process
+        * of exiting, keep trying.
+        */
+       hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
+               if (!io_worker_get(worker))
+                       continue;
+               if (io_wqe_get_acct(worker) != acct) {
+                       io_worker_release(worker);
+                       continue;
+               }
+               if (wake_up_process(worker->task)) {
+                       io_worker_release(worker);
+                       return true;
+               }
+               io_worker_release(worker);
+       }
+
+       return false;
+}
+
+/*
+ * We need a worker. If we find a free one, we're good. If not, and we're
+ * below the max number of workers, create one.
+ */
+static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
+{
+       /*
+        * Most likely an attempt to queue unbounded work on an io_wq that
+        * wasn't setup with any unbounded workers.
+        */
+       if (unlikely(!acct->max_workers))
+               pr_warn_once("io-wq is not configured for unbound workers");
+
+       raw_spin_lock(&wqe->lock);
+       if (acct->nr_workers >= acct->max_workers) {
+               raw_spin_unlock(&wqe->lock);
+               return true;
+       }
+       acct->nr_workers++;
+       raw_spin_unlock(&wqe->lock);
+       atomic_inc(&acct->nr_running);
+       atomic_inc(&wqe->wq->worker_refs);
+       return create_io_worker(wqe->wq, wqe, acct->index);
+}
+
+static void io_wqe_inc_running(struct io_worker *worker)
+{
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+
+       atomic_inc(&acct->nr_running);
+}
+
+static void create_worker_cb(struct callback_head *cb)
+{
+       struct io_worker *worker;
+       struct io_wq *wq;
+       struct io_wqe *wqe;
+       struct io_wqe_acct *acct;
+       bool do_create = false;
+
+       worker = container_of(cb, struct io_worker, create_work);
+       wqe = worker->wqe;
+       wq = wqe->wq;
+       acct = &wqe->acct[worker->create_index];
+       raw_spin_lock(&wqe->lock);
+       if (acct->nr_workers < acct->max_workers) {
+               acct->nr_workers++;
+               do_create = true;
+       }
+       raw_spin_unlock(&wqe->lock);
+       if (do_create) {
+               create_io_worker(wq, wqe, worker->create_index);
+       } else {
+               atomic_dec(&acct->nr_running);
+               io_worker_ref_put(wq);
+       }
+       clear_bit_unlock(0, &worker->create_state);
+       io_worker_release(worker);
+}
+
+static bool io_queue_worker_create(struct io_worker *worker,
+                                  struct io_wqe_acct *acct,
+                                  task_work_func_t func)
+{
+       struct io_wqe *wqe = worker->wqe;
+       struct io_wq *wq = wqe->wq;
+
+       /* raced with exit, just ignore create call */
+       if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+               goto fail;
+       if (!io_worker_get(worker))
+               goto fail;
+       /*
+        * create_state manages ownership of create_work/index. We should
+        * only need one entry per worker, as the worker going to sleep
+        * will trigger the condition, and waking will clear it once it
+        * runs the task_work.
+        */
+       if (test_bit(0, &worker->create_state) ||
+           test_and_set_bit_lock(0, &worker->create_state))
+               goto fail_release;
+
+       atomic_inc(&wq->worker_refs);
+       init_task_work(&worker->create_work, func);
+       worker->create_index = acct->index;
+       if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
+               /*
+                * EXIT may have been set after checking it above, check after
+                * adding the task_work and remove any creation item if it is
+                * now set. wq exit does that too, but we can have added this
+                * work item after we canceled in io_wq_exit_workers().
+                */
+               if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+                       io_wq_cancel_tw_create(wq);
+               io_worker_ref_put(wq);
+               return true;
+       }
+       io_worker_ref_put(wq);
+       clear_bit_unlock(0, &worker->create_state);
+fail_release:
+       io_worker_release(worker);
+fail:
+       atomic_dec(&acct->nr_running);
+       io_worker_ref_put(wq);
+       return false;
+}
+
+static void io_wqe_dec_running(struct io_worker *worker)
+       __must_hold(wqe->lock)
+{
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+       struct io_wqe *wqe = worker->wqe;
+
+       if (!(worker->flags & IO_WORKER_F_UP))
+               return;
+
+       if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
+               atomic_inc(&acct->nr_running);
+               atomic_inc(&wqe->wq->worker_refs);
+               raw_spin_unlock(&wqe->lock);
+               io_queue_worker_create(worker, acct, create_worker_cb);
+               raw_spin_lock(&wqe->lock);
+       }
+}
+
+/*
+ * Worker will start processing some work. Move it to the busy list, if
+ * it's currently on the freelist
+ */
+static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
+                            struct io_wq_work *work)
+       __must_hold(wqe->lock)
+{
+       if (worker->flags & IO_WORKER_F_FREE) {
+               worker->flags &= ~IO_WORKER_F_FREE;
+               hlist_nulls_del_init_rcu(&worker->nulls_node);
+       }
+}
+
+/*
+ * No work, worker going to sleep. Move to freelist, and unuse mm if we
+ * have one attached. Dropping the mm may potentially sleep, so we drop
+ * the lock in that case and return success. Since the caller has to
+ * retry the loop in that case (we changed task state), we don't regrab
+ * the lock if we return success.
+ */
+static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
+       __must_hold(wqe->lock)
+{
+       if (!(worker->flags & IO_WORKER_F_FREE)) {
+               worker->flags |= IO_WORKER_F_FREE;
+               hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+       }
+}
+
+static inline unsigned int io_get_work_hash(struct io_wq_work *work)
+{
+       return work->flags >> IO_WQ_HASH_SHIFT;
+}
+
+static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
+{
+       struct io_wq *wq = wqe->wq;
+       bool ret = false;
+
+       spin_lock_irq(&wq->hash->wait.lock);
+       if (list_empty(&wqe->wait.entry)) {
+               __add_wait_queue(&wq->hash->wait, &wqe->wait);
+               if (!test_bit(hash, &wq->hash->map)) {
+                       __set_current_state(TASK_RUNNING);
+                       list_del_init(&wqe->wait.entry);
+                       ret = true;
+               }
+       }
+       spin_unlock_irq(&wq->hash->wait.lock);
+       return ret;
+}
+
+static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
+                                          struct io_worker *worker)
+       __must_hold(wqe->lock)
+{
+       struct io_wq_work_node *node, *prev;
+       struct io_wq_work *work, *tail;
+       unsigned int stall_hash = -1U;
+       struct io_wqe *wqe = worker->wqe;
+
+       wq_list_for_each(node, prev, &acct->work_list) {
+               unsigned int hash;
+
+               work = container_of(node, struct io_wq_work, list);
+
+               /* not hashed, can run anytime */
+               if (!io_wq_is_hashed(work)) {
+                       wq_list_del(&acct->work_list, node, prev);
+                       return work;
+               }
+
+               hash = io_get_work_hash(work);
+               /* all items with this hash lie in [work, tail] */
+               tail = wqe->hash_tail[hash];
+
+               /* hashed, can run if not already running */
+               if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
+                       wqe->hash_tail[hash] = NULL;
+                       wq_list_cut(&acct->work_list, &tail->list, prev);
+                       return work;
+               }
+               if (stall_hash == -1U)
+                       stall_hash = hash;
+               /* fast forward to a next hash, for-each will fix up @prev */
+               node = &tail->list;
+       }
+
+       if (stall_hash != -1U) {
+               bool unstalled;
+
+               /*
+                * Set this before dropping the lock to avoid racing with new
+                * work being added and clearing the stalled bit.
+                */
+               set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+               raw_spin_unlock(&wqe->lock);
+               unstalled = io_wait_on_hash(wqe, stall_hash);
+               raw_spin_lock(&wqe->lock);
+               if (unstalled) {
+                       clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+                       if (wq_has_sleeper(&wqe->wq->hash->wait))
+                               wake_up(&wqe->wq->hash->wait);
+               }
+       }
+
+       return NULL;
+}
+
+static bool io_flush_signals(void)
+{
+       if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
+               __set_current_state(TASK_RUNNING);
+               tracehook_notify_signal();
+               return true;
+       }
+       return false;
+}
+
+static void io_assign_current_work(struct io_worker *worker,
+                                  struct io_wq_work *work)
+{
+       if (work) {
+               io_flush_signals();
+               cond_resched();
+       }
+
+       spin_lock(&worker->lock);
+       worker->cur_work = work;
+       spin_unlock(&worker->lock);
+}
+
+static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
+
+static void io_worker_handle_work(struct io_worker *worker)
+       __releases(wqe->lock)
+{
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+       struct io_wqe *wqe = worker->wqe;
+       struct io_wq *wq = wqe->wq;
+       bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
+
+       do {
+               struct io_wq_work *work;
+get_next:
+               /*
+                * If we got some work, mark us as busy. If we didn't, but
+                * the list isn't empty, it means we stalled on hashed work.
+                * Mark us stalled so we don't keep looking for work when we
+                * can't make progress, any work completion or insertion will
+                * clear the stalled flag.
+                */
+               work = io_get_next_work(acct, worker);
+               if (work)
+                       __io_worker_busy(wqe, worker, work);
+
+               raw_spin_unlock(&wqe->lock);
+               if (!work)
+                       break;
+               io_assign_current_work(worker, work);
+               __set_current_state(TASK_RUNNING);
+
+               /* handle a whole dependent link */
+               do {
+                       struct io_wq_work *next_hashed, *linked;
+                       unsigned int hash = io_get_work_hash(work);
+
+                       next_hashed = wq_next_work(work);
+
+                       if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
+                               work->flags |= IO_WQ_WORK_CANCEL;
+                       wq->do_work(work);
+                       io_assign_current_work(worker, NULL);
+
+                       linked = wq->free_work(work);
+                       work = next_hashed;
+                       if (!work && linked && !io_wq_is_hashed(linked)) {
+                               work = linked;
+                               linked = NULL;
+                       }
+                       io_assign_current_work(worker, work);
+                       if (linked)
+                               io_wqe_enqueue(wqe, linked);
+
+                       if (hash != -1U && !next_hashed) {
+                               /* serialize hash clear with wake_up() */
+                               spin_lock_irq(&wq->hash->wait.lock);
+                               clear_bit(hash, &wq->hash->map);
+                               clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+                               spin_unlock_irq(&wq->hash->wait.lock);
+                               if (wq_has_sleeper(&wq->hash->wait))
+                                       wake_up(&wq->hash->wait);
+                               raw_spin_lock(&wqe->lock);
+                               /* skip unnecessary unlock-lock wqe->lock */
+                               if (!work)
+                                       goto get_next;
+                               raw_spin_unlock(&wqe->lock);
+                       }
+               } while (work);
+
+               raw_spin_lock(&wqe->lock);
+       } while (1);
+}
+
+static int io_wqe_worker(void *data)
+{
+       struct io_worker *worker = data;
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+       struct io_wqe *wqe = worker->wqe;
+       struct io_wq *wq = wqe->wq;
+       bool last_timeout = false;
+       char buf[TASK_COMM_LEN];
+
+       worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+
+       snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
+       set_task_comm(current, buf);
+
+       while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+               long ret;
+
+               set_current_state(TASK_INTERRUPTIBLE);
+loop:
+               raw_spin_lock(&wqe->lock);
+               if (io_acct_run_queue(acct)) {
+                       io_worker_handle_work(worker);
+                       goto loop;
+               }
+               /* timed out, exit unless we're the last worker */
+               if (last_timeout && acct->nr_workers > 1) {
+                       acct->nr_workers--;
+                       raw_spin_unlock(&wqe->lock);
+                       __set_current_state(TASK_RUNNING);
+                       break;
+               }
+               last_timeout = false;
+               __io_worker_idle(wqe, worker);
+               raw_spin_unlock(&wqe->lock);
+               if (io_flush_signals())
+                       continue;
+               ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
+               if (signal_pending(current)) {
+                       struct ksignal ksig;
+
+                       if (!get_signal(&ksig))
+                               continue;
+                       break;
+               }
+               last_timeout = !ret;
+       }
+
+       if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+               raw_spin_lock(&wqe->lock);
+               io_worker_handle_work(worker);
+       }
+
+       io_worker_exit(worker);
+       return 0;
+}
+
+/*
+ * Called when a worker is scheduled in. Mark us as currently running.
+ */
+void io_wq_worker_running(struct task_struct *tsk)
+{
+       struct io_worker *worker = tsk->pf_io_worker;
+
+       if (!worker)
+               return;
+       if (!(worker->flags & IO_WORKER_F_UP))
+               return;
+       if (worker->flags & IO_WORKER_F_RUNNING)
+               return;
+       worker->flags |= IO_WORKER_F_RUNNING;
+       io_wqe_inc_running(worker);
+}
+
+/*
+ * Called when worker is going to sleep. If there are no workers currently
+ * running and we have work pending, wake up a free one or create a new one.
+ */
+void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+       struct io_worker *worker = tsk->pf_io_worker;
+
+       if (!worker)
+               return;
+       if (!(worker->flags & IO_WORKER_F_UP))
+               return;
+       if (!(worker->flags & IO_WORKER_F_RUNNING))
+               return;
+
+       worker->flags &= ~IO_WORKER_F_RUNNING;
+
+       raw_spin_lock(&worker->wqe->lock);
+       io_wqe_dec_running(worker);
+       raw_spin_unlock(&worker->wqe->lock);
+}
+
+static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
+                              struct task_struct *tsk)
+{
+       tsk->pf_io_worker = worker;
+       worker->task = tsk;
+       set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
+       tsk->flags |= PF_NO_SETAFFINITY;
+
+       raw_spin_lock(&wqe->lock);
+       hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+       list_add_tail_rcu(&worker->all_list, &wqe->all_list);
+       worker->flags |= IO_WORKER_F_FREE;
+       raw_spin_unlock(&wqe->lock);
+       wake_up_new_task(tsk);
+}
+
+static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
+{
+       return true;
+}
+
+static inline bool io_should_retry_thread(long err)
+{
+       /*
+        * Prevent perpetual task_work retry, if the task (or its group) is
+        * exiting.
+        */
+       if (fatal_signal_pending(current))
+               return false;
+
+       switch (err) {
+       case -EAGAIN:
+       case -ERESTARTSYS:
+       case -ERESTARTNOINTR:
+       case -ERESTARTNOHAND:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void create_worker_cont(struct callback_head *cb)
+{
+       struct io_worker *worker;
+       struct task_struct *tsk;
+       struct io_wqe *wqe;
+
+       worker = container_of(cb, struct io_worker, create_work);
+       clear_bit_unlock(0, &worker->create_state);
+       wqe = worker->wqe;
+       tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+       if (!IS_ERR(tsk)) {
+               io_init_new_worker(wqe, worker, tsk);
+               io_worker_release(worker);
+               return;
+       } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+               struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+
+               atomic_dec(&acct->nr_running);
+               raw_spin_lock(&wqe->lock);
+               acct->nr_workers--;
+               if (!acct->nr_workers) {
+                       struct io_cb_cancel_data match = {
+                               .fn             = io_wq_work_match_all,
+                               .cancel_all     = true,
+                       };
+
+                       while (io_acct_cancel_pending_work(wqe, acct, &match))
+                               raw_spin_lock(&wqe->lock);
+               }
+               raw_spin_unlock(&wqe->lock);
+               io_worker_ref_put(wqe->wq);
+               kfree(worker);
+               return;
+       }
+
+       /* re-create attempts grab a new worker ref, drop the existing one */
+       io_worker_release(worker);
+       schedule_work(&worker->work);
+}
+
+static void io_workqueue_create(struct work_struct *work)
+{
+       struct io_worker *worker = container_of(work, struct io_worker, work);
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+
+       if (!io_queue_worker_create(worker, acct, create_worker_cont))
+               kfree(worker);
+}
+
+static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+{
+       struct io_wqe_acct *acct = &wqe->acct[index];
+       struct io_worker *worker;
+       struct task_struct *tsk;
+
+       __set_current_state(TASK_RUNNING);
+
+       worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
+       if (!worker) {
+fail:
+               atomic_dec(&acct->nr_running);
+               raw_spin_lock(&wqe->lock);
+               acct->nr_workers--;
+               raw_spin_unlock(&wqe->lock);
+               io_worker_ref_put(wq);
+               return false;
+       }
+
+       refcount_set(&worker->ref, 1);
+       worker->wqe = wqe;
+       spin_lock_init(&worker->lock);
+       init_completion(&worker->ref_done);
+
+       if (index == IO_WQ_ACCT_BOUND)
+               worker->flags |= IO_WORKER_F_BOUND;
+
+       tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+       if (!IS_ERR(tsk)) {
+               io_init_new_worker(wqe, worker, tsk);
+       } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+               kfree(worker);
+               goto fail;
+       } else {
+               INIT_WORK(&worker->work, io_workqueue_create);
+               schedule_work(&worker->work);
+       }
+
+       return true;
+}
+
+/*
+ * Iterate the passed in list and call the specific function for each
+ * worker that isn't exiting
+ */
+static bool io_wq_for_each_worker(struct io_wqe *wqe,
+                                 bool (*func)(struct io_worker *, void *),
+                                 void *data)
+{
+       struct io_worker *worker;
+       bool ret = false;
+
+       list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+               if (io_worker_get(worker)) {
+                       /* no task if node is/was offline */
+                       if (worker->task)
+                               ret = func(worker, data);
+                       io_worker_release(worker);
+                       if (ret)
+                               break;
+               }
+       }
+
+       return ret;
+}
+
+static bool io_wq_worker_wake(struct io_worker *worker, void *data)
+{
+       set_notify_signal(worker->task);
+       wake_up_process(worker->task);
+       return false;
+}
+
+static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
+{
+       struct io_wq *wq = wqe->wq;
+
+       do {
+               work->flags |= IO_WQ_WORK_CANCEL;
+               wq->do_work(work);
+               work = wq->free_work(work);
+       } while (work);
+}
+
+static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
+{
+       struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+       unsigned int hash;
+       struct io_wq_work *tail;
+
+       if (!io_wq_is_hashed(work)) {
+append:
+               wq_list_add_tail(&work->list, &acct->work_list);
+               return;
+       }
+
+       hash = io_get_work_hash(work);
+       tail = wqe->hash_tail[hash];
+       wqe->hash_tail[hash] = work;
+       if (!tail)
+               goto append;
+
+       wq_list_add_after(&work->list, &tail->list, &acct->work_list);
+}
+
+static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
+{
+       return work == data;
+}
+
+static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
+{
+       struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+       unsigned work_flags = work->flags;
+       bool do_create;
+
+       /*
+        * If io-wq is exiting for this task, or if the request has explicitly
+        * been marked as one that should not get executed, cancel it here.
+        */
+       if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
+           (work->flags & IO_WQ_WORK_CANCEL)) {
+               io_run_cancel(work, wqe);
+               return;
+       }
+
+       raw_spin_lock(&wqe->lock);
+       io_wqe_insert_work(wqe, work);
+       clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+
+       rcu_read_lock();
+       do_create = !io_wqe_activate_free_worker(wqe, acct);
+       rcu_read_unlock();
+
+       raw_spin_unlock(&wqe->lock);
+
+       if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
+           !atomic_read(&acct->nr_running))) {
+               bool did_create;
+
+               did_create = io_wqe_create_worker(wqe, acct);
+               if (likely(did_create))
+                       return;
+
+               raw_spin_lock(&wqe->lock);
+               /* fatal condition, failed to create the first worker */
+               if (!acct->nr_workers) {
+                       struct io_cb_cancel_data match = {
+                               .fn             = io_wq_work_match_item,
+                               .data           = work,
+                               .cancel_all     = false,
+                       };
+
+                       if (io_acct_cancel_pending_work(wqe, acct, &match))
+                               raw_spin_lock(&wqe->lock);
+               }
+               raw_spin_unlock(&wqe->lock);
+       }
+}
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+{
+       struct io_wqe *wqe = wq->wqes[numa_node_id()];
+
+       io_wqe_enqueue(wqe, work);
+}
+
+/*
+ * Work items that hash to the same value will not be done in parallel.
+ * Used to limit concurrent writes, generally hashed by inode.
+ */
+void io_wq_hash_work(struct io_wq_work *work, void *val)
+{
+       unsigned int bit;
+
+       bit = hash_ptr(val, IO_WQ_HASH_ORDER);
+       work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+}
+
+static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+{
+       struct io_cb_cancel_data *match = data;
+
+       /*
+        * Hold the lock to avoid ->cur_work going out of scope, caller
+        * may dereference the passed in work.
+        */
+       spin_lock(&worker->lock);
+       if (worker->cur_work &&
+           match->fn(worker->cur_work, match->data)) {
+               set_notify_signal(worker->task);
+               match->nr_running++;
+       }
+       spin_unlock(&worker->lock);
+
+       return match->nr_running && !match->cancel_all;
+}
+
+static inline void io_wqe_remove_pending(struct io_wqe *wqe,
+                                        struct io_wq_work *work,
+                                        struct io_wq_work_node *prev)
+{
+       struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+       unsigned int hash = io_get_work_hash(work);
+       struct io_wq_work *prev_work = NULL;
+
+       if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
+               if (prev)
+                       prev_work = container_of(prev, struct io_wq_work, list);
+               if (prev_work && io_get_work_hash(prev_work) == hash)
+                       wqe->hash_tail[hash] = prev_work;
+               else
+                       wqe->hash_tail[hash] = NULL;
+       }
+       wq_list_del(&acct->work_list, &work->list, prev);
+}
+
+static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
+                                       struct io_wqe_acct *acct,
+                                       struct io_cb_cancel_data *match)
+       __releases(wqe->lock)
+{
+       struct io_wq_work_node *node, *prev;
+       struct io_wq_work *work;
+
+       wq_list_for_each(node, prev, &acct->work_list) {
+               work = container_of(node, struct io_wq_work, list);
+               if (!match->fn(work, match->data))
+                       continue;
+               io_wqe_remove_pending(wqe, work, prev);
+               raw_spin_unlock(&wqe->lock);
+               io_run_cancel(work, wqe);
+               match->nr_pending++;
+               /* not safe to continue after unlock */
+               return true;
+       }
+
+       return false;
+}
+
+static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
+                                      struct io_cb_cancel_data *match)
+{
+       int i;
+retry:
+       raw_spin_lock(&wqe->lock);
+       for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+               struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
+
+               if (io_acct_cancel_pending_work(wqe, acct, match)) {
+                       if (match->cancel_all)
+                               goto retry;
+                       return;
+               }
+       }
+       raw_spin_unlock(&wqe->lock);
+}
+
+static void io_wqe_cancel_running_work(struct io_wqe *wqe,
+                                      struct io_cb_cancel_data *match)
+{
+       rcu_read_lock();
+       io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
+       rcu_read_unlock();
+}
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+                                 void *data, bool cancel_all)
+{
+       struct io_cb_cancel_data match = {
+               .fn             = cancel,
+               .data           = data,
+               .cancel_all     = cancel_all,
+       };
+       int node;
+
+       /*
+        * First check pending list, if we're lucky we can just remove it
+        * from there. CANCEL_OK means that the work is returned as-new,
+        * no completion will be posted for it.
+        */
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+
+               io_wqe_cancel_pending_work(wqe, &match);
+               if (match.nr_pending && !match.cancel_all)
+                       return IO_WQ_CANCEL_OK;
+       }
+
+       /*
+        * Now check if a free (going busy) or busy worker has the work
+        * currently running. If we find it there, we'll return CANCEL_RUNNING
+        * as an indication that we attempt to signal cancellation. The
+        * completion will run normally in this case.
+        */
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+
+               io_wqe_cancel_running_work(wqe, &match);
+               if (match.nr_running && !match.cancel_all)
+                       return IO_WQ_CANCEL_RUNNING;
+       }
+
+       if (match.nr_running)
+               return IO_WQ_CANCEL_RUNNING;
+       if (match.nr_pending)
+               return IO_WQ_CANCEL_OK;
+       return IO_WQ_CANCEL_NOTFOUND;
+}
+
+static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
+                           int sync, void *key)
+{
+       struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
+       int i;
+
+       list_del_init(&wait->entry);
+
+       rcu_read_lock();
+       for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+               struct io_wqe_acct *acct = &wqe->acct[i];
+
+               if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
+                       io_wqe_activate_free_worker(wqe, acct);
+       }
+       rcu_read_unlock();
+       return 1;
+}
+
+struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+{
+       int ret, node, i;
+       struct io_wq *wq;
+
+       if (WARN_ON_ONCE(!data->free_work || !data->do_work))
+               return ERR_PTR(-EINVAL);
+       if (WARN_ON_ONCE(!bounded))
+               return ERR_PTR(-EINVAL);
+
+       wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
+       if (!wq)
+               return ERR_PTR(-ENOMEM);
+       ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+       if (ret)
+               goto err_wq;
+
+       refcount_inc(&data->hash->refs);
+       wq->hash = data->hash;
+       wq->free_work = data->free_work;
+       wq->do_work = data->do_work;
+
+       ret = -ENOMEM;
+       for_each_node(node) {
+               struct io_wqe *wqe;
+               int alloc_node = node;
+
+               if (!node_online(alloc_node))
+                       alloc_node = NUMA_NO_NODE;
+               wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
+               if (!wqe)
+                       goto err;
+               wq->wqes[node] = wqe;
+               if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
+                       goto err;
+               cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
+               wqe->node = alloc_node;
+               wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+               wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+                                       task_rlimit(current, RLIMIT_NPROC);
+               INIT_LIST_HEAD(&wqe->wait.entry);
+               wqe->wait.func = io_wqe_hash_wake;
+               for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+                       struct io_wqe_acct *acct = &wqe->acct[i];
+
+                       acct->index = i;
+                       atomic_set(&acct->nr_running, 0);
+                       INIT_WQ_LIST(&acct->work_list);
+               }
+               wqe->wq = wq;
+               raw_spin_lock_init(&wqe->lock);
+               INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+               INIT_LIST_HEAD(&wqe->all_list);
+       }
+
+       wq->task = get_task_struct(data->task);
+       atomic_set(&wq->worker_refs, 1);
+       init_completion(&wq->worker_done);
+       return wq;
+err:
+       io_wq_put_hash(data->hash);
+       cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+       for_each_node(node) {
+               if (!wq->wqes[node])
+                       continue;
+               free_cpumask_var(wq->wqes[node]->cpu_mask);
+               kfree(wq->wqes[node]);
+       }
+err_wq:
+       kfree(wq);
+       return ERR_PTR(ret);
+}
+
+static bool io_task_work_match(struct callback_head *cb, void *data)
+{
+       struct io_worker *worker;
+
+       if (cb->func != create_worker_cb && cb->func != create_worker_cont)
+               return false;
+       worker = container_of(cb, struct io_worker, create_work);
+       return worker->wqe->wq == data;
+}
+
+void io_wq_exit_start(struct io_wq *wq)
+{
+       set_bit(IO_WQ_BIT_EXIT, &wq->state);
+}
+
+static void io_wq_cancel_tw_create(struct io_wq *wq)
+{
+       struct callback_head *cb;
+
+       while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
+               struct io_worker *worker;
+
+               worker = container_of(cb, struct io_worker, create_work);
+               io_worker_cancel_cb(worker);
+       }
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+       int node;
+
+       if (!wq->task)
+               return;
+
+       io_wq_cancel_tw_create(wq);
+
+       rcu_read_lock();
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+
+               io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
+       }
+       rcu_read_unlock();
+       io_worker_ref_put(wq);
+       wait_for_completion(&wq->worker_done);
+
+       for_each_node(node) {
+               spin_lock_irq(&wq->hash->wait.lock);
+               list_del_init(&wq->wqes[node]->wait.entry);
+               spin_unlock_irq(&wq->hash->wait.lock);
+       }
+       put_task_struct(wq->task);
+       wq->task = NULL;
+}
+
+static void io_wq_destroy(struct io_wq *wq)
+{
+       int node;
+
+       cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+               struct io_cb_cancel_data match = {
+                       .fn             = io_wq_work_match_all,
+                       .cancel_all     = true,
+               };
+               io_wqe_cancel_pending_work(wqe, &match);
+               free_cpumask_var(wqe->cpu_mask);
+               kfree(wqe);
+       }
+       io_wq_put_hash(wq->hash);
+       kfree(wq);
+}
+
+void io_wq_put_and_exit(struct io_wq *wq)
+{
+       WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+
+       io_wq_exit_workers(wq);
+       io_wq_destroy(wq);
+}
+
+struct online_data {
+       unsigned int cpu;
+       bool online;
+};
+
+static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
+{
+       struct online_data *od = data;
+
+       if (od->online)
+               cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
+       else
+               cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
+       return false;
+}
+
+static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
+{
+       struct online_data od = {
+               .cpu = cpu,
+               .online = online
+       };
+       int i;
+
+       rcu_read_lock();
+       for_each_node(i)
+               io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
+       rcu_read_unlock();
+       return 0;
+}
+
+static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+       struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
+
+       return __io_wq_cpu_online(wq, cpu, true);
+}
+
+static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+       struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
+
+       return __io_wq_cpu_online(wq, cpu, false);
+}
+
+int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
+{
+       int i;
+
+       rcu_read_lock();
+       for_each_node(i) {
+               struct io_wqe *wqe = wq->wqes[i];
+
+               if (mask)
+                       cpumask_copy(wqe->cpu_mask, mask);
+               else
+                       cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
+/*
+ * Set max number of unbounded workers, returns old value. If new_count is 0,
+ * then just return the old value.
+ */
+int io_wq_max_workers(struct io_wq *wq, int *new_count)
+{
+       int prev[IO_WQ_ACCT_NR];
+       bool first_node = true;
+       int i, node;
+
+       BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
+
+       for (i = 0; i < 2; i++) {
+               if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
+                       new_count[i] = task_rlimit(current, RLIMIT_NPROC);
+       }
+
+       for (i = 0; i < IO_WQ_ACCT_NR; i++)
+               prev[i] = 0;
+
+       rcu_read_lock();
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+               struct io_wqe_acct *acct;
+
+               raw_spin_lock(&wqe->lock);
+               for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+                       acct = &wqe->acct[i];
+                       if (first_node)
+                               prev[i] = max_t(int, acct->max_workers, prev[i]);
+                       if (new_count[i])
+                               acct->max_workers = new_count[i];
+               }
+               raw_spin_unlock(&wqe->lock);
+               first_node = false;
+       }
+       rcu_read_unlock();
+
+       for (i = 0; i < IO_WQ_ACCT_NR; i++)
+               new_count[i] = prev[i];
+
+       return 0;
+}
+
+static __init int io_wq_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
+                                       io_wq_cpu_online, io_wq_cpu_offline);
+       if (ret < 0)
+               return ret;
+       io_wq_online = ret;
+       return 0;
+}
+subsys_initcall(io_wq_init);
similarity index 81%
rename from fs/io-wq.h
rename to io_uring/io-wq.h
index 75113bcd5889f41d9fe891aaa033db1d6154e0f1..bf5c4c533760574f87d0cab165be2f4c60ecbff3 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef INTERNAL_IO_WQ_H
 #define INTERNAL_IO_WQ_H
 
-#include <linux/io_uring.h>
+#include <linux/refcount.h>
 
 struct io_wq;
 
@@ -9,16 +9,8 @@ enum {
        IO_WQ_WORK_CANCEL       = 1,
        IO_WQ_WORK_HASHED       = 2,
        IO_WQ_WORK_UNBOUND      = 4,
-       IO_WQ_WORK_NO_CANCEL    = 8,
        IO_WQ_WORK_CONCURRENT   = 16,
 
-       IO_WQ_WORK_FILES        = 32,
-       IO_WQ_WORK_FS           = 64,
-       IO_WQ_WORK_MM           = 128,
-       IO_WQ_WORK_CREDS        = 256,
-       IO_WQ_WORK_BLKCG        = 512,
-       IO_WQ_WORK_FSIZE        = 1024,
-
        IO_WQ_HASH_SHIFT        = 24,   /* upper 8 bits are used for hash key */
 };
 
@@ -52,6 +44,7 @@ static inline void wq_list_add_after(struct io_wq_work_node *node,
 static inline void wq_list_add_tail(struct io_wq_work_node *node,
                                    struct io_wq_work_list *list)
 {
+       node->next = NULL;
        if (!list->first) {
                list->last = node;
                WRITE_ONCE(list->first, node);
@@ -59,7 +52,6 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node,
                list->last->next = node;
                list->last = node;
        }
-       node->next = NULL;
 }
 
 static inline void wq_list_cut(struct io_wq_work_list *list,
@@ -95,7 +87,6 @@ static inline void wq_list_del(struct io_wq_work_list *list,
 
 struct io_wq_work {
        struct io_wq_work_node list;
-       struct io_identity *identity;
        unsigned flags;
 };
 
@@ -107,37 +98,48 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
        return container_of(work->list.next, struct io_wq_work, list);
 }
 
-typedef void (free_work_fn)(struct io_wq_work *);
-typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
+typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
+typedef void (io_wq_work_fn)(struct io_wq_work *);
 
-struct io_wq_data {
-       struct user_struct *user;
+struct io_wq_hash {
+       refcount_t refs;
+       unsigned long map;
+       struct wait_queue_head wait;
+};
+
+static inline void io_wq_put_hash(struct io_wq_hash *hash)
+{
+       if (refcount_dec_and_test(&hash->refs))
+               kfree(hash);
+}
 
+struct io_wq_data {
+       struct io_wq_hash *hash;
+       struct task_struct *task;
        io_wq_work_fn *do_work;
        free_work_fn *free_work;
 };
 
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-bool io_wq_get(struct io_wq *wq, struct io_wq_data *data);
-void io_wq_destroy(struct io_wq *wq);
+void io_wq_exit_start(struct io_wq *wq);
+void io_wq_put_and_exit(struct io_wq *wq);
 
 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
 void io_wq_hash_work(struct io_wq_work *work, void *val);
 
+int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+int io_wq_max_workers(struct io_wq *wq, int *new_count);
+
 static inline bool io_wq_is_hashed(struct io_wq_work *work)
 {
        return work->flags & IO_WQ_WORK_HASHED;
 }
 
-void io_wq_cancel_all(struct io_wq *wq);
-
 typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
 
 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
                                        void *data, bool cancel_all);
 
-struct task_struct *io_wq_get_task(struct io_wq *wq);
-
 #if defined(CONFIG_IO_WQ)
 extern void io_wq_worker_sleeping(struct task_struct *);
 extern void io_wq_worker_running(struct task_struct *);
@@ -152,6 +154,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
 
 static inline bool io_wq_current_is_worker(void)
 {
-       return in_task() && (current->flags & PF_IO_WORKER);
+       return in_task() && (current->flags & PF_IO_WORKER) &&
+               current->pf_io_worker;
 }
 #endif
similarity index 51%
rename from fs/io_uring.c
rename to io_uring/io_uring.c
index 1d817374131097c833046ad2ce81295604e5cc2a..473dbd1830a3b62cf7f97eb24024397fe4f55819 100644 (file)
@@ -11,7 +11,7 @@
  * before writing the tail (using smp_load_acquire to read the tail will
  * do). It also needs a smp_mb() before updating CQ head (ordering the
  * entry load(s) with the head store), pairing with an implicit barrier
- * through a control-dependency in io_get_cqring (smp_store_release to
+ * through a control-dependency in io_get_cqe (smp_store_release to
  * store head will do). Failure to do so could lead to reading invalid
  * CQ entries.
  *
@@ -57,7 +57,6 @@
 #include <linux/mman.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
-#include <linux/kthread.h>
 #include <linux/blkdev.h>
 #include <linux/bvec.h>
 #include <linux/net.h>
 #include <linux/fsnotify.h>
 #include <linux/fadvise.h>
 #include <linux/eventpoll.h>
-#include <linux/fs_struct.h>
 #include <linux/splice.h>
 #include <linux/task_work.h>
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
-#include <linux/blk-cgroup.h>
-#include <linux/audit.h>
+#include <linux/tracehook.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
 
 #include <uapi/linux/io_uring.h>
 
-#include "internal.h"
+#include "../fs/internal.h"
 #include "io-wq.h"
 
 #define IORING_MAX_ENTRIES     32768
 #define IORING_MAX_CQ_ENTRIES  (2 * IORING_MAX_ENTRIES)
+#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
 
-/*
- * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
- */
-#define IORING_FILE_TABLE_SHIFT        9
-#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
-#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
-#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
+/* only define max */
+#define IORING_MAX_FIXED_FILES (1U << 15)
 #define IORING_MAX_RESTRICTIONS        (IORING_RESTRICTION_LAST + \
                                 IORING_REGISTER_LAST + IORING_OP_LAST)
 
+#define IO_RSRC_TAG_TABLE_SHIFT        (PAGE_SHIFT - 3)
+#define IO_RSRC_TAG_TABLE_MAX  (1U << IO_RSRC_TAG_TABLE_SHIFT)
+#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
+
+#define IORING_MAX_REG_BUFFERS (1U << 14)
+
+#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
+                               IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
+                               IOSQE_BUFFER_SELECT)
+#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
+                               REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
+
+#define IO_TCTX_REFS_CACHE_NR  (1U << 10)
+
 struct io_uring {
        u32 head ____cacheline_aligned_in_smp;
        u32 tail ____cacheline_aligned_in_smp;
@@ -162,7 +169,7 @@ struct io_rings {
         * Written by the application, shouldn't be modified by the
         * kernel.
         */
-       u32                     cq_flags;
+       u32                     cq_flags;
        /*
         * Number of completion events lost because the queue was full;
         * this should be avoided by the application by making sure
@@ -187,36 +194,64 @@ struct io_rings {
        struct io_uring_cqe     cqes[] ____cacheline_aligned_in_smp;
 };
 
+enum io_uring_cmd_flags {
+       IO_URING_F_NONBLOCK             = 1,
+       IO_URING_F_COMPLETE_DEFER       = 2,
+};
+
 struct io_mapped_ubuf {
        u64             ubuf;
-       size_t          len;
-       struct          bio_vec *bvec;
+       u64             ubuf_end;
        unsigned int    nr_bvecs;
        unsigned long   acct_pages;
+       struct bio_vec  bvec[];
+};
+
+struct io_ring_ctx;
+
+struct io_overflow_cqe {
+       struct io_uring_cqe cqe;
+       struct list_head list;
+};
+
+struct io_fixed_file {
+       /* file * with additional FFS_* flags */
+       unsigned long file_ptr;
+};
+
+struct io_rsrc_put {
+       struct list_head list;
+       u64 tag;
+       union {
+               void *rsrc;
+               struct file *file;
+               struct io_mapped_ubuf *buf;
+       };
 };
 
-struct fixed_file_table {
-       struct file             **files;
+struct io_file_table {
+       struct io_fixed_file *files;
 };
 
-struct fixed_file_ref_node {
+struct io_rsrc_node {
        struct percpu_ref               refs;
        struct list_head                node;
-       struct list_head                file_list;
-       struct fixed_file_data          *file_data;
+       struct list_head                rsrc_list;
+       struct io_rsrc_data             *rsrc_data;
        struct llist_node               llist;
        bool                            done;
 };
 
-struct fixed_file_data {
-       struct fixed_file_table         *table;
+typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
+
+struct io_rsrc_data {
        struct io_ring_ctx              *ctx;
 
-       struct fixed_file_ref_node      *node;
-       struct percpu_ref               refs;
+       u64                             **tags;
+       unsigned int                    nr;
+       rsrc_put_fn                     *do_put;
+       atomic_t                        refs;
        struct completion               done;
-       struct list_head                ref_list;
-       spinlock_t                      lock;
        bool                            quiesce;
 };
 
@@ -235,33 +270,81 @@ struct io_restriction {
        bool registered;
 };
 
+enum {
+       IO_SQ_THREAD_SHOULD_STOP = 0,
+       IO_SQ_THREAD_SHOULD_PARK,
+};
+
 struct io_sq_data {
        refcount_t              refs;
+       atomic_t                park_pending;
        struct mutex            lock;
 
        /* ctx's that are using this sqd */
        struct list_head        ctx_list;
-       struct list_head        ctx_new_list;
-       struct mutex            ctx_lock;
 
        struct task_struct      *thread;
        struct wait_queue_head  wait;
+
+       unsigned                sq_thread_idle;
+       int                     sq_cpu;
+       pid_t                   task_pid;
+       pid_t                   task_tgid;
+
+       unsigned long           state;
+       struct completion       exited;
+};
+
+#define IO_COMPL_BATCH                 32
+#define IO_REQ_CACHE_SIZE              32
+#define IO_REQ_ALLOC_BATCH             8
+
+struct io_submit_link {
+       struct io_kiocb         *head;
+       struct io_kiocb         *last;
+};
+
+struct io_submit_state {
+       struct blk_plug         plug;
+       struct io_submit_link   link;
+
+       /*
+        * io_kiocb alloc cache
+        */
+       void                    *reqs[IO_REQ_CACHE_SIZE];
+       unsigned int            free_reqs;
+
+       bool                    plug_started;
+
+       /*
+        * Batch completion logic
+        */
+       struct io_kiocb         *compl_reqs[IO_COMPL_BATCH];
+       unsigned int            compl_nr;
+       /* inline/task_work completion list, under ->uring_lock */
+       struct list_head        free_list;
+
+       unsigned int            ios_left;
 };
 
 struct io_ring_ctx {
+       /* const or read-mostly hot data */
        struct {
                struct percpu_ref       refs;
-       } ____cacheline_aligned_in_smp;
 
-       struct {
+               struct io_rings         *rings;
                unsigned int            flags;
                unsigned int            compat: 1;
-               unsigned int            limit_mem: 1;
-               unsigned int            cq_overflow_flushed: 1;
                unsigned int            drain_next: 1;
                unsigned int            eventfd_async: 1;
                unsigned int            restricted: 1;
-               unsigned int            sqo_dead: 1;
+               unsigned int            off_timeout_used: 1;
+               unsigned int            drain_active: 1;
+       } ____cacheline_aligned_in_smp;
+
+       /* submission data */
+       struct {
+               struct mutex            uring_lock;
 
                /*
                 * Ring buffer of indices into array of io_uring_sqe, which is
@@ -275,101 +358,59 @@ struct io_ring_ctx {
                 * array.
                 */
                u32                     *sq_array;
+               struct io_uring_sqe     *sq_sqes;
                unsigned                cached_sq_head;
                unsigned                sq_entries;
-               unsigned                sq_mask;
-               unsigned                sq_thread_idle;
-               unsigned                cached_sq_dropped;
-               unsigned                cached_cq_overflow;
-               unsigned long           sq_check_overflow;
-
                struct list_head        defer_list;
+
+               /*
+                * Fixed resources fast path, should be accessed only under
+                * uring_lock, and updated through io_uring_register(2)
+                */
+               struct io_rsrc_node     *rsrc_node;
+               struct io_file_table    file_table;
+               unsigned                nr_user_files;
+               unsigned                nr_user_bufs;
+               struct io_mapped_ubuf   **user_bufs;
+
+               struct io_submit_state  submit_state;
                struct list_head        timeout_list;
+               struct list_head        ltimeout_list;
                struct list_head        cq_overflow_list;
-
-               struct io_uring_sqe     *sq_sqes;
+               struct xarray           io_buffers;
+               struct xarray           personalities;
+               u32                     pers_next;
+               unsigned                sq_thread_idle;
        } ____cacheline_aligned_in_smp;
 
-       struct io_rings *rings;
-
-       /* IO offload */
-       struct io_wq            *io_wq;
-
-       /*
-        * For SQPOLL usage - we hold a reference to the parent task, so we
-        * have access to the ->files
-        */
-       struct task_struct      *sqo_task;
-
-       /* Only used for accounting purposes */
-       struct mm_struct        *mm_account;
-
-#ifdef CONFIG_BLK_CGROUP
-       struct cgroup_subsys_state      *sqo_blkcg_css;
-#endif
+       /* IRQ completion list, under ->completion_lock */
+       struct list_head        locked_free_list;
+       unsigned int            locked_free_nr;
 
+       const struct cred       *sq_creds;      /* cred used for __io_sq_thread() */
        struct io_sq_data       *sq_data;       /* if using sq thread polling */
 
        struct wait_queue_head  sqo_sq_wait;
-       struct wait_queue_entry sqo_wait_entry;
        struct list_head        sqd_list;
 
-       /*
-        * If used, fixed file set. Writers must ensure that ->refs is dead,
-        * readers must ensure that ->refs is alive as long as the file* is
-        * used. Only updated through io_uring_register(2).
-        */
-       struct fixed_file_data  *file_data;
-       unsigned                nr_user_files;
-
-       /* if used, fixed mapped user buffers */
-       unsigned                nr_user_bufs;
-       struct io_mapped_ubuf   *user_bufs;
-
-       struct user_struct      *user;
-
-       const struct cred       *creds;
-
-#ifdef CONFIG_AUDIT
-       kuid_t                  loginuid;
-       unsigned int            sessionid;
-#endif
-
-       struct completion       ref_comp;
-       struct completion       sq_thread_comp;
-
-       /* if all else fails... */
-       struct io_kiocb         *fallback_req;
-
-#if defined(CONFIG_UNIX)
-       struct socket           *ring_sock;
-#endif
-
-       struct xarray           io_buffers;
-
-       struct xarray           personalities;
-       u32                     pers_next;
+       unsigned long           check_cq_overflow;
 
        struct {
                unsigned                cached_cq_tail;
                unsigned                cq_entries;
-               unsigned                cq_mask;
+               struct eventfd_ctx      *cq_ev_fd;
+               struct wait_queue_head  poll_wait;
+               struct wait_queue_head  cq_wait;
+               unsigned                cq_extra;
                atomic_t                cq_timeouts;
                unsigned                cq_last_tm_flush;
-               unsigned long           cq_check_overflow;
-               struct wait_queue_head  cq_wait;
-               struct fasync_struct    *cq_fasync;
-               struct eventfd_ctx      *cq_ev_fd;
-       } ____cacheline_aligned_in_smp;
-
-       struct {
-               struct mutex            uring_lock;
-               wait_queue_head_t       wait;
        } ____cacheline_aligned_in_smp;
 
        struct {
                spinlock_t              completion_lock;
 
+               spinlock_t              timeout_lock;
+
                /*
                 * ->iopoll_list is protected by the ctx->uring_lock for
                 * io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -379,17 +420,62 @@ struct io_ring_ctx {
                struct list_head        iopoll_list;
                struct hlist_head       *cancel_hash;
                unsigned                cancel_hash_bits;
-               bool                    poll_multi_file;
-
-               spinlock_t              inflight_lock;
-               struct list_head        inflight_list;
+               bool                    poll_multi_queue;
        } ____cacheline_aligned_in_smp;
 
-       struct delayed_work             file_put_work;
-       struct llist_head               file_put_llist;
-
-       struct work_struct              exit_work;
        struct io_restriction           restrictions;
+
+       /* slow path rsrc auxilary data, used by update/register */
+       struct {
+               struct io_rsrc_node             *rsrc_backup_node;
+               struct io_mapped_ubuf           *dummy_ubuf;
+               struct io_rsrc_data             *file_data;
+               struct io_rsrc_data             *buf_data;
+
+               struct delayed_work             rsrc_put_work;
+               struct llist_head               rsrc_put_llist;
+               struct list_head                rsrc_ref_list;
+               spinlock_t                      rsrc_ref_lock;
+       };
+
+       /* Keep this last, we don't need it for the fast path */
+       struct {
+               #if defined(CONFIG_UNIX)
+                       struct socket           *ring_sock;
+               #endif
+               /* hashed buffered write serialization */
+               struct io_wq_hash               *hash_map;
+
+               /* Only used for accounting purposes */
+               struct user_struct              *user;
+               struct mm_struct                *mm_account;
+
+               /* ctx exit and cancelation */
+               struct llist_head               fallback_llist;
+               struct delayed_work             fallback_work;
+               struct work_struct              exit_work;
+               struct list_head                tctx_list;
+               struct completion               ref_comp;
+               u32                             iowq_limits[2];
+               bool                            iowq_limits_set;
+       };
+};
+
+struct io_uring_task {
+       /* submission side */
+       int                     cached_refs;
+       struct xarray           xa;
+       struct wait_queue_head  wait;
+       const struct io_ring_ctx *last;
+       struct io_wq            *io_wq;
+       struct percpu_counter   inflight;
+       atomic_t                inflight_tracked;
+       atomic_t                in_idle;
+
+       spinlock_t              task_lock;
+       struct io_wq_work_list  task_list;
+       struct callback_head    task_work;
+       bool                    task_running;
 };
 
 /*
@@ -398,20 +484,24 @@ struct io_ring_ctx {
  */
 struct io_poll_iocb {
        struct file                     *file;
-       union {
-               struct wait_queue_head  *head;
-               u64                     addr;
-       };
+       struct wait_queue_head          *head;
        __poll_t                        events;
-       bool                            done;
-       bool                            canceled;
        struct wait_queue_entry         wait;
 };
 
+struct io_poll_update {
+       struct file                     *file;
+       u64                             old_user_data;
+       u64                             new_user_data;
+       __poll_t                        events;
+       bool                            update_events;
+       bool                            update_user_data;
+};
+
 struct io_close {
        struct file                     *file;
-       struct file                     *put_file;
        int                             fd;
+       u32                             file_slot;
 };
 
 struct io_timeout_data {
@@ -419,6 +509,7 @@ struct io_timeout_data {
        struct hrtimer                  timer;
        struct timespec64               ts;
        enum hrtimer_mode               mode;
+       u32                             flags;
 };
 
 struct io_accept {
@@ -426,6 +517,7 @@ struct io_accept {
        struct sockaddr __user          *addr;
        int __user                      *addr_len;
        int                             flags;
+       u32                             file_slot;
        unsigned long                   nofile;
 };
 
@@ -447,11 +539,20 @@ struct io_timeout {
        u32                             off;
        u32                             target_seq;
        struct list_head                list;
+       /* head of the link, used by linked timeouts only */
+       struct io_kiocb                 *head;
+       /* for linked completions */
+       struct io_kiocb                 *prev;
 };
 
 struct io_timeout_rem {
        struct file                     *file;
        u64                             addr;
+
+       /* timeout update */
+       struct timespec64               ts;
+       u32                             flags;
+       bool                            ltimeout;
 };
 
 struct io_rw {
@@ -470,8 +571,9 @@ struct io_connect {
 struct io_sr_msg {
        struct file                     *file;
        union {
-               struct user_msghdr __user *umsg;
-               void __user             *buf;
+               struct compat_msghdr __user     *umsg_compat;
+               struct user_msghdr __user       *umsg;
+               void __user                     *buf;
        };
        int                             msg_flags;
        int                             bgid;
@@ -482,13 +584,13 @@ struct io_sr_msg {
 struct io_open {
        struct file                     *file;
        int                             dfd;
-       bool                            ignore_nonblock;
+       u32                             file_slot;
        struct filename                 *filename;
        struct open_how                 how;
        unsigned long                   nofile;
 };
 
-struct io_files_update {
+struct io_rsrc_update {
        struct file                     *file;
        u64                             arg;
        u32                             nr_args;
@@ -519,10 +621,10 @@ struct io_epoll {
 
 struct io_splice {
        struct file                     *file_out;
-       struct file                     *file_in;
        loff_t                          off_out;
        loff_t                          off_in;
        u64                             len;
+       int                             splice_fd_in;
        unsigned int                    flags;
 };
 
@@ -544,9 +646,52 @@ struct io_statx {
        struct statx __user             *buffer;
 };
 
+struct io_shutdown {
+       struct file                     *file;
+       int                             how;
+};
+
+struct io_rename {
+       struct file                     *file;
+       int                             old_dfd;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+       int                             flags;
+};
+
+struct io_unlink {
+       struct file                     *file;
+       int                             dfd;
+       int                             flags;
+       struct filename                 *filename;
+};
+
+struct io_mkdir {
+       struct file                     *file;
+       int                             dfd;
+       umode_t                         mode;
+       struct filename                 *filename;
+};
+
+struct io_symlink {
+       struct file                     *file;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+};
+
+struct io_hardlink {
+       struct file                     *file;
+       int                             old_dfd;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+       int                             flags;
+};
+
 struct io_completion {
        struct file                     *file;
-       struct list_head                list;
        u32                             cflags;
 };
 
@@ -556,7 +701,8 @@ struct io_async_connect {
 
 struct io_async_msghdr {
        struct iovec                    fast_iov[UIO_FASTIOV];
-       struct iovec                    *iov;
+       /* points to an allocated iov, if NULL we use fast_iov instead */
+       struct iovec                    *free_iov;
        struct sockaddr __user          *uaddr;
        struct msghdr                   msg;
        struct sockaddr_storage         addr;
@@ -566,6 +712,7 @@ struct io_async_rw {
        struct iovec                    fast_iov[UIO_FASTIOV];
        const struct iovec              *free_iovec;
        struct iov_iter                 iter;
+       struct iov_iter_state           iter_state;
        size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
@@ -578,19 +725,24 @@ enum {
        REQ_F_FORCE_ASYNC_BIT   = IOSQE_ASYNC_BIT,
        REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
 
-       REQ_F_LINK_HEAD_BIT,
-       REQ_F_FAIL_LINK_BIT,
+       /* first byte is taken by user flags, shift it to not overlap */
+       REQ_F_FAIL_BIT          = 8,
        REQ_F_INFLIGHT_BIT,
        REQ_F_CUR_POS_BIT,
        REQ_F_NOWAIT_BIT,
        REQ_F_LINK_TIMEOUT_BIT,
-       REQ_F_ISREG_BIT,
        REQ_F_NEED_CLEANUP_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
-       REQ_F_NO_FILE_TABLE_BIT,
-       REQ_F_WORK_INITIALIZED_BIT,
-       REQ_F_LTIMEOUT_ACTIVE_BIT,
+       REQ_F_COMPLETE_INLINE_BIT,
+       REQ_F_REISSUE_BIT,
+       REQ_F_CREDS_BIT,
+       REQ_F_REFCOUNT_BIT,
+       REQ_F_ARM_LTIMEOUT_BIT,
+       /* keep async read/write and isreg together and in order */
+       REQ_F_NOWAIT_READ_BIT,
+       REQ_F_NOWAIT_WRITE_BIT,
+       REQ_F_ISREG_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -610,11 +762,9 @@ enum {
        /* IOSQE_BUFFER_SELECT */
        REQ_F_BUFFER_SELECT     = BIT(REQ_F_BUFFER_SELECT_BIT),
 
-       /* head of a link */
-       REQ_F_LINK_HEAD         = BIT(REQ_F_LINK_HEAD_BIT),
        /* fail rest of links */
-       REQ_F_FAIL_LINK         = BIT(REQ_F_FAIL_LINK_BIT),
-       /* on inflight list */
+       REQ_F_FAIL              = BIT(REQ_F_FAIL_BIT),
+       /* on inflight list, should be cancelled and waited on exit reliably */
        REQ_F_INFLIGHT          = BIT(REQ_F_INFLIGHT_BIT),
        /* read/write uses file position */
        REQ_F_CUR_POS           = BIT(REQ_F_CUR_POS_BIT),
@@ -622,20 +772,28 @@ enum {
        REQ_F_NOWAIT            = BIT(REQ_F_NOWAIT_BIT),
        /* has or had linked timeout */
        REQ_F_LINK_TIMEOUT      = BIT(REQ_F_LINK_TIMEOUT_BIT),
-       /* regular file */
-       REQ_F_ISREG             = BIT(REQ_F_ISREG_BIT),
        /* needs cleanup */
        REQ_F_NEED_CLEANUP      = BIT(REQ_F_NEED_CLEANUP_BIT),
        /* already went through poll handler */
        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
        /* buffer already selected */
        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
-       /* doesn't need file table for this request */
-       REQ_F_NO_FILE_TABLE     = BIT(REQ_F_NO_FILE_TABLE_BIT),
-       /* io_wq_work is initialized */
-       REQ_F_WORK_INITIALIZED  = BIT(REQ_F_WORK_INITIALIZED_BIT),
-       /* linked timeout is active, i.e. prepared by link's head */
-       REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
+       /* completion is deferred through io_comp_state */
+       REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
+       /* caller should reissue async */
+       REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
+       /* supports async reads */
+       REQ_F_NOWAIT_READ       = BIT(REQ_F_NOWAIT_READ_BIT),
+       /* supports async writes */
+       REQ_F_NOWAIT_WRITE      = BIT(REQ_F_NOWAIT_WRITE_BIT),
+       /* regular file */
+       REQ_F_ISREG             = BIT(REQ_F_ISREG_BIT),
+       /* has creds assigned */
+       REQ_F_CREDS             = BIT(REQ_F_CREDS_BIT),
+       /* skip refcounting if not set */
+       REQ_F_REFCOUNT          = BIT(REQ_F_REFCOUNT_BIT),
+       /* there is a linked timeout that has to be armed */
+       REQ_F_ARM_LTIMEOUT      = BIT(REQ_F_ARM_LTIMEOUT_BIT),
 };
 
 struct async_poll {
@@ -643,6 +801,21 @@ struct async_poll {
        struct io_poll_iocb     *double_poll;
 };
 
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
+
+struct io_task_work {
+       union {
+               struct io_wq_work_node  node;
+               struct llist_node       fallback_node;
+       };
+       io_req_tw_func_t                func;
+};
+
+enum {
+       IORING_RSRC_FILE                = 0,
+       IORING_RSRC_BUFFER              = 1,
+};
+
 /*
  * NOTE! Each of the iocb union members has the file pointer
  * as the first entry in their struct definition. So you can
@@ -654,6 +827,7 @@ struct io_kiocb {
                struct file             *file;
                struct io_rw            rw;
                struct io_poll_iocb     poll;
+               struct io_poll_update   poll_update;
                struct io_accept        accept;
                struct io_sync          sync;
                struct io_cancel        cancel;
@@ -663,13 +837,19 @@ struct io_kiocb {
                struct io_sr_msg        sr_msg;
                struct io_open          open;
                struct io_close         close;
-               struct io_files_update  files_update;
+               struct io_rsrc_update   rsrc_update;
                struct io_fadvise       fadvise;
                struct io_madvise       madvise;
                struct io_epoll         epoll;
                struct io_splice        splice;
                struct io_provide_buf   pbuf;
                struct io_statx         statx;
+               struct io_shutdown      shutdown;
+               struct io_rename        rename;
+               struct io_unlink        unlink;
+               struct io_mkdir         mkdir;
+               struct io_symlink       symlink;
+               struct io_hardlink      hardlink;
                /* use only after cleaning per-op data, see io_clean_op() */
                struct io_completion    compl;
        };
@@ -685,70 +865,44 @@ struct io_kiocb {
 
        struct io_ring_ctx              *ctx;
        unsigned int                    flags;
-       refcount_t                      refs;
+       atomic_t                        refs;
        struct task_struct              *task;
        u64                             user_data;
 
-       struct list_head                link_list;
+       struct io_kiocb                 *link;
+       struct percpu_ref               *fixed_rsrc_refs;
 
-       /*
-        * 1. used with ctx->iopoll_list with reads/writes
-        * 2. to track reqs with ->files (see io_op_def::file_table)
-        */
+       /* used with ctx->iopoll_list with reads/writes */
        struct list_head                inflight_entry;
-
-       struct list_head                iopoll_entry;
-
-       struct percpu_ref               *fixed_file_refs;
-       struct callback_head            task_work;
+       struct io_task_work             io_task_work;
        /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
        struct hlist_node               hash_node;
        struct async_poll               *apoll;
        struct io_wq_work               work;
-};
+       const struct cred               *creds;
 
-struct io_defer_entry {
-       struct list_head        list;
-       struct io_kiocb         *req;
-       u32                     seq;
+       /* store used ubuf, so we can prevent reloading */
+       struct io_mapped_ubuf           *imu;
+       /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
+       struct io_buffer                *kbuf;
+       atomic_t                        poll_refs;
 };
 
-#define IO_IOPOLL_BATCH                        8
-
-struct io_comp_state {
-       unsigned int            nr;
-       struct list_head        list;
+struct io_tctx_node {
+       struct list_head        ctx_node;
+       struct task_struct      *task;
        struct io_ring_ctx      *ctx;
 };
 
-struct io_submit_state {
-       struct blk_plug         plug;
-
-       /*
-        * io_kiocb alloc cache
-        */
-       void                    *reqs[IO_IOPOLL_BATCH];
-       unsigned int            free_reqs;
-
-       /*
-        * Batch completion logic
-        */
-       struct io_comp_state    comp;
-
-       /*
-        * File reference cache
-        */
-       struct file             *file;
-       unsigned int            fd;
-       unsigned int            has_refs;
-       unsigned int            ios_left;
+struct io_defer_entry {
+       struct list_head        list;
+       struct io_kiocb         *req;
+       u32                     seq;
 };
 
 struct io_op_def {
        /* needs req->file assigned */
        unsigned                needs_file : 1;
-       /* don't fail if file grab fails */
-       unsigned                needs_file_no_error : 1;
        /* hash wq insertion if file is a regular file */
        unsigned                hash_reg_file : 1;
        /* unbound wq insertion if file is a non-regular file */
@@ -760,11 +914,12 @@ struct io_op_def {
        unsigned                pollout : 1;
        /* op supports buffer selection */
        unsigned                buffer_select : 1;
-       /* must always have async data allocated */
-       unsigned                needs_async_data : 1;
+       /* do prep async if is going to be punted */
+       unsigned                needs_async_setup : 1;
+       /* should block plug */
+       unsigned                plug : 1;
        /* size of async data needed, if any */
        unsigned short          async_size;
-       unsigned                work_flags;
 };
 
 static const struct io_op_def io_op_defs[] = {
@@ -774,41 +929,36 @@ static const struct io_op_def io_op_defs[] = {
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
-               .needs_async_data       = 1,
+               .needs_async_setup      = 1,
+               .plug                   = 1,
                .async_size             = sizeof(struct io_async_rw),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                         IO_WQ_WORK_FILES,
        },
        [IORING_OP_WRITEV] = {
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_async_data       = 1,
+               .needs_async_setup      = 1,
+               .plug                   = 1,
                .async_size             = sizeof(struct io_async_rw),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                         IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES,
        },
        [IORING_OP_FSYNC] = {
                .needs_file             = 1,
-               .work_flags             = IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_READ_FIXED] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
+               .plug                   = 1,
                .async_size             = sizeof(struct io_async_rw),
-               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM |
-                                         IO_WQ_WORK_FILES,
        },
        [IORING_OP_WRITE_FIXED] = {
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
+               .plug                   = 1,
                .async_size             = sizeof(struct io_async_rw),
-               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
-                                         IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
        },
        [IORING_OP_POLL_ADD] = {
                .needs_file             = 1,
@@ -817,126 +967,91 @@ static const struct io_op_def io_op_defs[] = {
        [IORING_OP_POLL_REMOVE] = {},
        [IORING_OP_SYNC_FILE_RANGE] = {
                .needs_file             = 1,
-               .work_flags             = IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_SENDMSG] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_async_data       = 1,
+               .needs_async_setup      = 1,
                .async_size             = sizeof(struct io_async_msghdr),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                               IO_WQ_WORK_FS,
        },
        [IORING_OP_RECVMSG] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
-               .needs_async_data       = 1,
+               .needs_async_setup      = 1,
                .async_size             = sizeof(struct io_async_msghdr),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                               IO_WQ_WORK_FS,
        },
        [IORING_OP_TIMEOUT] = {
-               .needs_async_data       = 1,
                .async_size             = sizeof(struct io_timeout_data),
-               .work_flags             = IO_WQ_WORK_MM,
        },
-       [IORING_OP_TIMEOUT_REMOVE] = {},
+       [IORING_OP_TIMEOUT_REMOVE] = {
+               /* used by timeout updates' prep() */
+       },
        [IORING_OP_ACCEPT] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
        },
        [IORING_OP_ASYNC_CANCEL] = {},
        [IORING_OP_LINK_TIMEOUT] = {
-               .needs_async_data       = 1,
                .async_size             = sizeof(struct io_timeout_data),
-               .work_flags             = IO_WQ_WORK_MM,
        },
        [IORING_OP_CONNECT] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_async_data       = 1,
+               .needs_async_setup      = 1,
                .async_size             = sizeof(struct io_async_connect),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_FS,
        },
        [IORING_OP_FALLOCATE] = {
                .needs_file             = 1,
-               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
-       },
-       [IORING_OP_OPENAT] = {
-               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
-                                               IO_WQ_WORK_FS,
-       },
-       [IORING_OP_CLOSE] = {
-               .needs_file             = 1,
-               .needs_file_no_error    = 1,
-               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
-       },
-       [IORING_OP_FILES_UPDATE] = {
-               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
-       },
-       [IORING_OP_STATX] = {
-               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
-                                               IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
        },
+       [IORING_OP_OPENAT] = {},
+       [IORING_OP_CLOSE] = {},
+       [IORING_OP_FILES_UPDATE] = {},
+       [IORING_OP_STATX] = {},
        [IORING_OP_READ] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
+               .plug                   = 1,
                .async_size             = sizeof(struct io_async_rw),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                         IO_WQ_WORK_FILES,
        },
        [IORING_OP_WRITE] = {
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
+               .plug                   = 1,
                .async_size             = sizeof(struct io_async_rw),
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                         IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES,
        },
        [IORING_OP_FADVISE] = {
                .needs_file             = 1,
-               .work_flags             = IO_WQ_WORK_BLKCG,
-       },
-       [IORING_OP_MADVISE] = {
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
        },
+       [IORING_OP_MADVISE] = {},
        [IORING_OP_SEND] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                         IO_WQ_WORK_FS,
        },
        [IORING_OP_RECV] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
-               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
-                                         IO_WQ_WORK_FS,
        },
        [IORING_OP_OPENAT2] = {
-               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
-                                               IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_EPOLL_CTL] = {
                .unbound_nonreg_file    = 1,
-               .work_flags             = IO_WQ_WORK_FILES,
        },
        [IORING_OP_SPLICE] = {
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
-               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FILES,
        },
        [IORING_OP_PROVIDE_BUFFERS] = {},
        [IORING_OP_REMOVE_BUFFERS] = {},
@@ -945,43 +1060,47 @@ static const struct io_op_def io_op_defs[] = {
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
        },
+       [IORING_OP_SHUTDOWN] = {
+               .needs_file             = 1,
+       },
+       [IORING_OP_RENAMEAT] = {},
+       [IORING_OP_UNLINKAT] = {},
 };
 
-enum io_mem_account {
-       ACCT_LOCKED,
-       ACCT_PINNED,
-};
+/* requests with any of those set should undergo io_disarm_next() */
+#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
 
-static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
-static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
-                       struct io_ring_ctx *ctx);
+static bool io_disarm_next(struct io_kiocb *req);
+static void io_uring_del_tctx_node(unsigned long index);
+static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+                                        struct task_struct *task,
+                                        bool cancel_all);
+static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
+
+static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
 
-static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
-                            struct io_comp_state *cs);
-static void io_cqring_fill_event(struct io_kiocb *req, long res);
 static void io_put_req(struct io_kiocb *req);
-static void io_put_req_deferred(struct io_kiocb *req, int nr);
-static void io_double_put_req(struct io_kiocb *req);
-static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
-static void __io_queue_linked_timeout(struct io_kiocb *req);
+static void io_put_req_deferred(struct io_kiocb *req);
+static void io_dismantle_req(struct io_kiocb *req);
 static void io_queue_linked_timeout(struct io_kiocb *req);
-static int __io_sqe_files_update(struct io_ring_ctx *ctx,
-                                struct io_uring_files_update *ip,
-                                unsigned nr_args);
-static void __io_clean_op(struct io_kiocb *req);
-static struct file *io_file_get(struct io_submit_state *state,
+static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
+                                    struct io_uring_rsrc_update2 *up,
+                                    unsigned nr_args);
+static void io_clean_op(struct io_kiocb *req);
+static struct file *io_file_get(struct io_ring_ctx *ctx,
                                struct io_kiocb *req, int fd, bool fixed);
-static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
-static void io_file_put_work(struct work_struct *work);
+static void __io_queue_sqe(struct io_kiocb *req);
+static void io_rsrc_put_work(struct work_struct *work);
 
-static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
-                              struct iovec **iovec, struct iov_iter *iter,
-                              bool needs_lock);
-static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
-                            const struct iovec *fast_iov,
-                            struct iov_iter *iter, bool force);
-static void io_req_drop_files(struct io_kiocb *req);
 static void io_req_task_queue(struct io_kiocb *req);
+static void io_submit_flush_completions(struct io_ring_ctx *ctx);
+static int io_req_prep_async(struct io_kiocb *req);
+
+static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
+                                unsigned int issue_flags, u32 slot_index);
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+
+static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
 
 static struct kmem_cache *req_cachep;
 
@@ -1000,21 +1119,67 @@ struct sock *io_uring_get_socket(struct file *file)
 }
 EXPORT_SYMBOL(io_uring_get_socket);
 
-static inline void io_clean_op(struct io_kiocb *req)
+static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
+{
+       if (!*locked) {
+               mutex_lock(&ctx->uring_lock);
+               *locked = true;
+       }
+}
+
+#define io_for_each_link(pos, head) \
+       for (pos = (head); pos; pos = pos->link)
+
+/*
+ * Shamelessly stolen from the mm implementation of page reference checking,
+ * see commit f958d7b528b1 for details.
+ */
+#define req_ref_zero_or_close_to_overflow(req) \
+       ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
+
+static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
 {
-       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
-               __io_clean_op(req);
+       WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+       return atomic_inc_not_zero(&req->refs);
 }
 
-static inline bool __io_match_files(struct io_kiocb *req,
-                                   struct files_struct *files)
+static inline bool req_ref_put_and_test(struct io_kiocb *req)
 {
-       if (req->file && req->file->f_op == &io_uring_fops)
+       if (likely(!(req->flags & REQ_F_REFCOUNT)))
                return true;
 
-       return ((req->flags & REQ_F_WORK_INITIALIZED) &&
-               (req->work.flags & IO_WQ_WORK_FILES)) &&
-               req->work.identity->files == files;
+       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+       return atomic_dec_and_test(&req->refs);
+}
+
+static inline void req_ref_get(struct io_kiocb *req)
+{
+       WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+       atomic_inc(&req->refs);
+}
+
+static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
+{
+       if (!(req->flags & REQ_F_REFCOUNT)) {
+               req->flags |= REQ_F_REFCOUNT;
+               atomic_set(&req->refs, nr);
+       }
+}
+
+static inline void io_req_set_refcount(struct io_kiocb *req)
+{
+       __io_req_set_refcount(req, 1);
+}
+
+static inline void io_req_set_rsrc_node(struct io_kiocb *req)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+
+       if (!req->fixed_rsrc_refs) {
+               req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
+               percpu_ref_get(req->fixed_rsrc_refs);
+       }
 }
 
 static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
@@ -1029,169 +1194,104 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
                percpu_ref_put(ref);
 }
 
-static bool io_match_task(struct io_kiocb *head,
-                         struct task_struct *task,
-                         struct files_struct *files)
+static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
+                         bool cancel_all)
+       __must_hold(&req->ctx->timeout_lock)
 {
-       struct io_kiocb *link;
+       struct io_kiocb *req;
 
-       if (task && head->task != task) {
-               /* in terms of cancelation, always match if req task is dead */
-               if (head->task->flags & PF_EXITING)
-                       return true;
+       if (task && head->task != task)
                return false;
-       }
-       if (!files)
-               return true;
-       if (__io_match_files(head, files))
+       if (cancel_all)
                return true;
-       if (head->flags & REQ_F_LINK_HEAD) {
-               list_for_each_entry(link, &head->link_list, link_list) {
-                       if (__io_match_files(link, files))
-                               return true;
-               }
+
+       io_for_each_link(req, head) {
+               if (req->flags & REQ_F_INFLIGHT)
+                       return true;
        }
        return false;
 }
 
-
-static void io_sq_thread_drop_mm(void)
+static bool io_match_linked(struct io_kiocb *head)
 {
-       struct mm_struct *mm = current->mm;
+       struct io_kiocb *req;
 
-       if (mm) {
-               kthread_unuse_mm(mm);
-               mmput(mm);
-               current->mm = NULL;
+       io_for_each_link(req, head) {
+               if (req->flags & REQ_F_INFLIGHT)
+                       return true;
        }
+       return false;
 }
 
-static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+                              bool cancel_all)
 {
-       struct mm_struct *mm;
-
-       if (current->flags & PF_EXITING)
-               return -EFAULT;
-       if (current->mm)
-               return 0;
+       bool matched;
 
-       /* Should never happen */
-       if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
-               return -EFAULT;
+       if (task && head->task != task)
+               return false;
+       if (cancel_all)
+               return true;
 
-       task_lock(ctx->sqo_task);
-       mm = ctx->sqo_task->mm;
-       if (unlikely(!mm || !mmget_not_zero(mm)))
-               mm = NULL;
-       task_unlock(ctx->sqo_task);
+       if (head->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = head->ctx;
 
-       if (mm) {
-               kthread_use_mm(mm);
-               return 0;
+               /* protect against races with linked timeouts */
+               spin_lock_irq(&ctx->timeout_lock);
+               matched = io_match_linked(head);
+               spin_unlock_irq(&ctx->timeout_lock);
+       } else {
+               matched = io_match_linked(head);
        }
+       return matched;
+}
 
-       return -EFAULT;
-}
-
-static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
-                                  struct io_kiocb *req)
-{
-       if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM))
-               return 0;
-       return __io_sq_thread_acquire_mm(ctx);
-}
-
-static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
-                                        struct cgroup_subsys_state **cur_css)
-
+static inline void req_set_fail(struct io_kiocb *req)
 {
-#ifdef CONFIG_BLK_CGROUP
-       /* puts the old one when swapping */
-       if (*cur_css != ctx->sqo_blkcg_css) {
-               kthread_associate_blkcg(ctx->sqo_blkcg_css);
-               *cur_css = ctx->sqo_blkcg_css;
-       }
-#endif
-}
-
-static void io_sq_thread_unassociate_blkcg(void)
-{
-#ifdef CONFIG_BLK_CGROUP
-       kthread_associate_blkcg(NULL);
-#endif
+       req->flags |= REQ_F_FAIL;
 }
 
-static inline void req_set_fail_links(struct io_kiocb *req)
+static inline void req_fail_link_node(struct io_kiocb *req, int res)
 {
-       if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
-               req->flags |= REQ_F_FAIL_LINK;
+       req_set_fail(req);
+       req->result = res;
 }
 
-/*
- * None of these are dereferenced, they are simply used to check if any of
- * them have changed. If we're under current and check they are still the
- * same, we're fine to grab references to them for actual out-of-line use.
- */
-static void io_init_identity(struct io_identity *id)
+static void io_ring_ctx_ref_free(struct percpu_ref *ref)
 {
-       id->files = current->files;
-       id->mm = current->mm;
-#ifdef CONFIG_BLK_CGROUP
-       rcu_read_lock();
-       id->blkcg_css = blkcg_css();
-       rcu_read_unlock();
-#endif
-       id->creds = current_cred();
-       id->nsproxy = current->nsproxy;
-       id->fs = current->fs;
-       id->fsize = rlimit(RLIMIT_FSIZE);
-#ifdef CONFIG_AUDIT
-       id->loginuid = current->loginuid;
-       id->sessionid = current->sessionid;
-#endif
-       refcount_set(&id->count, 1);
-}
+       struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
 
-static inline void __io_req_init_async(struct io_kiocb *req)
-{
-       memset(&req->work, 0, sizeof(req->work));
-       req->flags |= REQ_F_WORK_INITIALIZED;
+       complete(&ctx->ref_comp);
 }
 
-/*
- * Note: must call io_req_init_async() for the first time you
- * touch any members of io_wq_work.
- */
-static inline void io_req_init_async(struct io_kiocb *req)
+static inline bool io_is_timeout_noseq(struct io_kiocb *req)
 {
-       struct io_uring_task *tctx = req->task->io_uring;
-
-       if (req->flags & REQ_F_WORK_INITIALIZED)
-               return;
-
-       __io_req_init_async(req);
-
-       /* Grab a ref if this isn't our static identity */
-       req->work.identity = tctx->identity;
-       if (tctx->identity != &tctx->__identity)
-               refcount_inc(&req->work.identity->count);
+       return !req->timeout.off;
 }
 
-static inline bool io_async_submit(struct io_ring_ctx *ctx)
+static void io_fallback_req_func(struct work_struct *work)
 {
-       return ctx->flags & IORING_SETUP_SQPOLL;
-}
+       struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
+                                               fallback_work.work);
+       struct llist_node *node = llist_del_all(&ctx->fallback_llist);
+       struct io_kiocb *req, *tmp;
+       bool locked = false;
 
-static void io_ring_ctx_ref_free(struct percpu_ref *ref)
-{
-       struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
+       percpu_ref_get(&ctx->refs);
+       llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
+               req->io_task_work.func(req, &locked);
 
-       complete(&ctx->ref_comp);
-}
+       if (locked) {
+               if (ctx->submit_state.compl_nr)
+                       io_submit_flush_completions(ctx);
+               mutex_unlock(&ctx->uring_lock);
+       }
+       percpu_ref_put(&ctx->refs);
 
-static inline bool io_is_timeout_noseq(struct io_kiocb *req)
-{
-       return !req->timeout.off;
 }
 
 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
@@ -1203,10 +1303,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        if (!ctx)
                return NULL;
 
-       ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
-       if (!ctx->fallback_req)
-               goto err;
-
        /*
         * Use 5 bits less than the max cq entries, that should give us around
         * 32 entries per hash list if totally full and uniformly spread.
@@ -1222,6 +1318,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
                goto err;
        __hash_init(ctx->cancel_hash, 1U << hash_bits);
 
+       ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
+       if (!ctx->dummy_ubuf)
+               goto err;
+       /* set invalid range, so io_import_fixed() fails meeting it */
+       ctx->dummy_ubuf->ubuf = -1UL;
+
        if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
                            PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
                goto err;
@@ -1229,232 +1331,109 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        ctx->flags = p->flags;
        init_waitqueue_head(&ctx->sqo_sq_wait);
        INIT_LIST_HEAD(&ctx->sqd_list);
-       init_waitqueue_head(&ctx->cq_wait);
+       init_waitqueue_head(&ctx->poll_wait);
        INIT_LIST_HEAD(&ctx->cq_overflow_list);
        init_completion(&ctx->ref_comp);
-       init_completion(&ctx->sq_thread_comp);
        xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
        xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
        mutex_init(&ctx->uring_lock);
-       init_waitqueue_head(&ctx->wait);
+       init_waitqueue_head(&ctx->cq_wait);
        spin_lock_init(&ctx->completion_lock);
+       spin_lock_init(&ctx->timeout_lock);
        INIT_LIST_HEAD(&ctx->iopoll_list);
        INIT_LIST_HEAD(&ctx->defer_list);
        INIT_LIST_HEAD(&ctx->timeout_list);
-       spin_lock_init(&ctx->inflight_lock);
-       INIT_LIST_HEAD(&ctx->inflight_list);
-       INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
-       init_llist_head(&ctx->file_put_llist);
+       INIT_LIST_HEAD(&ctx->ltimeout_list);
+       spin_lock_init(&ctx->rsrc_ref_lock);
+       INIT_LIST_HEAD(&ctx->rsrc_ref_list);
+       INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
+       init_llist_head(&ctx->rsrc_put_llist);
+       INIT_LIST_HEAD(&ctx->tctx_list);
+       INIT_LIST_HEAD(&ctx->submit_state.free_list);
+       INIT_LIST_HEAD(&ctx->locked_free_list);
+       INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
        return ctx;
 err:
-       if (ctx->fallback_req)
-               kmem_cache_free(req_cachep, ctx->fallback_req);
+       kfree(ctx->dummy_ubuf);
        kfree(ctx->cancel_hash);
        kfree(ctx);
        return NULL;
 }
 
+static void io_account_cq_overflow(struct io_ring_ctx *ctx)
+{
+       struct io_rings *r = ctx->rings;
+
+       WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
+       ctx->cq_extra--;
+}
+
 static bool req_need_defer(struct io_kiocb *req, u32 seq)
 {
        if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
                struct io_ring_ctx *ctx = req->ctx;
 
-               return seq != ctx->cached_cq_tail
-                               + READ_ONCE(ctx->cached_cq_overflow);
+               return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
        }
 
        return false;
 }
 
-static void __io_commit_cqring(struct io_ring_ctx *ctx)
-{
-       struct io_rings *rings = ctx->rings;
-
-       /* order cqe stores with ring update */
-       smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-}
+#define FFS_ASYNC_READ         0x1UL
+#define FFS_ASYNC_WRITE                0x2UL
+#ifdef CONFIG_64BIT
+#define FFS_ISREG              0x4UL
+#else
+#define FFS_ISREG              0x0UL
+#endif
+#define FFS_MASK               ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
 
-static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
+static inline bool io_req_ffs_set(struct io_kiocb *req)
 {
-       if (req->work.identity == &tctx->__identity)
-               return;
-       if (refcount_dec_and_test(&req->work.identity->count))
-               kfree(req->work.identity);
+       return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
 }
 
-static void io_req_clean_work(struct io_kiocb *req)
+static void io_req_track_inflight(struct io_kiocb *req)
 {
-       if (!(req->flags & REQ_F_WORK_INITIALIZED))
-               return;
-
-       req->flags &= ~REQ_F_WORK_INITIALIZED;
-
-       if (req->work.flags & IO_WQ_WORK_MM) {
-               mmdrop(req->work.identity->mm);
-               req->work.flags &= ~IO_WQ_WORK_MM;
-       }
-#ifdef CONFIG_BLK_CGROUP
-       if (req->work.flags & IO_WQ_WORK_BLKCG) {
-               css_put(req->work.identity->blkcg_css);
-               req->work.flags &= ~IO_WQ_WORK_BLKCG;
-       }
-#endif
-       if (req->work.flags & IO_WQ_WORK_CREDS) {
-               put_cred(req->work.identity->creds);
-               req->work.flags &= ~IO_WQ_WORK_CREDS;
-       }
-       if (req->work.flags & IO_WQ_WORK_FS) {
-               struct fs_struct *fs = req->work.identity->fs;
-
-               spin_lock(&req->work.identity->fs->lock);
-               if (--fs->users)
-                       fs = NULL;
-               spin_unlock(&req->work.identity->fs->lock);
-               if (fs)
-                       free_fs_struct(fs);
-               req->work.flags &= ~IO_WQ_WORK_FS;
+       if (!(req->flags & REQ_F_INFLIGHT)) {
+               req->flags |= REQ_F_INFLIGHT;
+               atomic_inc(&req->task->io_uring->inflight_tracked);
        }
-       if (req->flags & REQ_F_INFLIGHT)
-               io_req_drop_files(req);
-
-       io_put_identity(req->task->io_uring, req);
 }
 
-/*
- * Create a private copy of io_identity, since some fields don't match
- * the current context.
- */
-static bool io_identity_cow(struct io_kiocb *req)
+static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 {
-       struct io_uring_task *tctx = req->task->io_uring;
-       const struct cred *creds = NULL;
-       struct io_identity *id;
-
-       if (req->work.flags & IO_WQ_WORK_CREDS)
-               creds = req->work.identity->creds;
-
-       id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
-       if (unlikely(!id)) {
-               req->work.flags |= IO_WQ_WORK_CANCEL;
-               return false;
-       }
-
-       /*
-        * We can safely just re-init the creds we copied  Either the field
-        * matches the current one, or we haven't grabbed it yet. The only
-        * exception is ->creds, through registered personalities, so handle
-        * that one separately.
-        */
-       io_init_identity(id);
-       if (creds)
-               id->creds = creds;
-
-       /* add one for this request */
-       refcount_inc(&id->count);
+       if (WARN_ON_ONCE(!req->link))
+               return NULL;
 
-       /* drop tctx and req identity references, if needed */
-       if (tctx->identity != &tctx->__identity &&
-           refcount_dec_and_test(&tctx->identity->count))
-               kfree(tctx->identity);
-       if (req->work.identity != &tctx->__identity &&
-           refcount_dec_and_test(&req->work.identity->count))
-               kfree(req->work.identity);
+       req->flags &= ~REQ_F_ARM_LTIMEOUT;
+       req->flags |= REQ_F_LINK_TIMEOUT;
 
-       req->work.identity = id;
-       tctx->identity = id;
-       return true;
+       /* linked timeouts should have two refs once prep'ed */
+       io_req_set_refcount(req);
+       __io_req_set_refcount(req->link, 2);
+       return req->link;
 }
 
-static bool io_grab_identity(struct io_kiocb *req)
+static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
 {
-       const struct io_op_def *def = &io_op_defs[req->opcode];
-       struct io_identity *id = req->work.identity;
-       struct io_ring_ctx *ctx = req->ctx;
-
-       if (def->work_flags & IO_WQ_WORK_FSIZE) {
-               if (id->fsize != rlimit(RLIMIT_FSIZE))
-                       return false;
-               req->work.flags |= IO_WQ_WORK_FSIZE;
-       }
-#ifdef CONFIG_BLK_CGROUP
-       if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
-           (def->work_flags & IO_WQ_WORK_BLKCG)) {
-               rcu_read_lock();
-               if (id->blkcg_css != blkcg_css()) {
-                       rcu_read_unlock();
-                       return false;
-               }
-               /*
-                * This should be rare, either the cgroup is dying or the task
-                * is moving cgroups. Just punt to root for the handful of ios.
-                */
-               if (css_tryget_online(id->blkcg_css))
-                       req->work.flags |= IO_WQ_WORK_BLKCG;
-               rcu_read_unlock();
-       }
-#endif
-       if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
-               if (id->creds != current_cred())
-                       return false;
-               get_cred(id->creds);
-               req->work.flags |= IO_WQ_WORK_CREDS;
-       }
-#ifdef CONFIG_AUDIT
-       if (!uid_eq(current->loginuid, id->loginuid) ||
-           current->sessionid != id->sessionid)
-               return false;
-#endif
-       if (!(req->work.flags & IO_WQ_WORK_FS) &&
-           (def->work_flags & IO_WQ_WORK_FS)) {
-               if (current->fs != id->fs)
-                       return false;
-               spin_lock(&id->fs->lock);
-               if (!id->fs->in_exec) {
-                       id->fs->users++;
-                       req->work.flags |= IO_WQ_WORK_FS;
-               } else {
-                       req->work.flags |= IO_WQ_WORK_CANCEL;
-               }
-               spin_unlock(&current->fs->lock);
-       }
-       if (!(req->work.flags & IO_WQ_WORK_FILES) &&
-           (def->work_flags & IO_WQ_WORK_FILES) &&
-           !(req->flags & REQ_F_NO_FILE_TABLE)) {
-               if (id->files != current->files ||
-                   id->nsproxy != current->nsproxy)
-                       return false;
-               atomic_inc(&id->files->count);
-               get_nsproxy(id->nsproxy);
-
-               if (!(req->flags & REQ_F_INFLIGHT)) {
-                       req->flags |= REQ_F_INFLIGHT;
-
-                       spin_lock_irq(&ctx->inflight_lock);
-                       list_add(&req->inflight_entry, &ctx->inflight_list);
-                       spin_unlock_irq(&ctx->inflight_lock);
-               }
-               req->work.flags |= IO_WQ_WORK_FILES;
-       }
-       if (!(req->work.flags & IO_WQ_WORK_MM) &&
-           (def->work_flags & IO_WQ_WORK_MM)) {
-               if (id->mm != current->mm)
-                       return false;
-               mmgrab(id->mm);
-               req->work.flags |= IO_WQ_WORK_MM;
-       }
-
-       return true;
+       if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
+               return NULL;
+       return __io_prep_linked_timeout(req);
 }
 
 static void io_prep_async_work(struct io_kiocb *req)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_identity *id;
 
-       io_req_init_async(req);
-       id = req->work.identity;
+       if (!(req->flags & REQ_F_CREDS)) {
+               req->flags |= REQ_F_CREDS;
+               req->creds = get_current_cred();
+       }
 
+       req->work.list.next = NULL;
+       req->work.flags = 0;
        if (req->flags & REQ_F_FORCE_ASYNC)
                req->work.flags |= IO_WQ_WORK_CONCURRENT;
 
@@ -1465,92 +1444,77 @@ static void io_prep_async_work(struct io_kiocb *req)
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
-
-       /* if we fail grabbing identity, we must COW, regrab, and retry */
-       if (io_grab_identity(req))
-               return;
-
-       if (!io_identity_cow(req))
-               return;
-
-       /* can't fail at this point */
-       if (!io_grab_identity(req))
-               WARN_ON(1);
 }
 
 static void io_prep_async_link(struct io_kiocb *req)
 {
        struct io_kiocb *cur;
 
-       io_prep_async_work(req);
-       if (req->flags & REQ_F_LINK_HEAD)
-               list_for_each_entry(cur, &req->link_list, link_list)
+       if (req->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = req->ctx;
+
+               spin_lock_irq(&ctx->timeout_lock);
+               io_for_each_link(cur, req)
                        io_prep_async_work(cur);
+               spin_unlock_irq(&ctx->timeout_lock);
+       } else {
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+       }
 }
 
-static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
+static void io_queue_async_work(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_kiocb *link = io_prep_linked_timeout(req);
+       struct io_uring_task *tctx = req->task->io_uring;
 
-       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
-                                       &req->work, req->flags);
-       io_wq_enqueue(ctx->io_wq, &req->work);
-       return link;
-}
+       /* must not take the lock, NULL it as a precaution */
+       locked = NULL;
 
-static void io_queue_async_work(struct io_kiocb *req)
-{
-       struct io_kiocb *link;
+       BUG_ON(!tctx);
+       BUG_ON(!tctx->io_wq);
 
        /* init ->work of the whole link before punting */
        io_prep_async_link(req);
-       link = __io_queue_async_work(req);
 
+       /*
+        * Not expected to happen, but if we do have a bug where this _can_
+        * happen, catch it here and ensure the request is marked as
+        * canceled. That will make io-wq go through the usual work cancel
+        * procedure rather than attempt to run this request (or create a new
+        * worker for it).
+        */
+       if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+               req->work.flags |= IO_WQ_WORK_CANCEL;
+
+       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
+                                       &req->work, req->flags);
+       io_wq_enqueue(tctx->io_wq, &req->work);
        if (link)
                io_queue_linked_timeout(link);
 }
 
 static void io_kill_timeout(struct io_kiocb *req, int status)
+       __must_hold(&req->ctx->completion_lock)
+       __must_hold(&req->ctx->timeout_lock)
 {
        struct io_timeout_data *io = req->async_data;
-       int ret;
 
-       ret = hrtimer_try_to_cancel(&io->timer);
-       if (ret != -1) {
+       if (hrtimer_try_to_cancel(&io->timer) != -1) {
                if (status)
-                       req_set_fail_links(req);
+                       req_set_fail(req);
                atomic_set(&req->ctx->cq_timeouts,
                        atomic_read(&req->ctx->cq_timeouts) + 1);
                list_del_init(&req->timeout.list);
-               io_cqring_fill_event(req, status);
-               io_put_req_deferred(req, 1);
-       }
-}
-
-/*
- * Returns true if we found and killed one or more timeouts
- */
-static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
-                            struct files_struct *files)
-{
-       struct io_kiocb *req, *tmp;
-       int canceled = 0;
-
-       spin_lock_irq(&ctx->completion_lock);
-       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
-               if (io_match_task(req, tsk, files)) {
-                       io_kill_timeout(req, -ECANCELED);
-                       canceled++;
-               }
+               io_fill_cqe_req(req, status, 0);
+               io_put_req_deferred(req);
        }
-       spin_unlock_irq(&ctx->completion_lock);
-       return canceled != 0;
 }
 
-static void __io_queue_deferred(struct io_ring_ctx *ctx)
+static void io_queue_deferred(struct io_ring_ctx *ctx)
 {
-       do {
+       while (!list_empty(&ctx->defer_list)) {
                struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
                                                struct io_defer_entry, list);
 
@@ -1559,19 +1523,16 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
                list_del_init(&de->list);
                io_req_task_queue(de->req);
                kfree(de);
-       } while (!list_empty(&ctx->defer_list));
+       }
 }
 
 static void io_flush_timeouts(struct io_ring_ctx *ctx)
+       __must_hold(&ctx->completion_lock)
 {
+       u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
        struct io_kiocb *req, *tmp;
-       u32 seq;
-
-       if (list_empty(&ctx->timeout_list))
-               return;
-
-       seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
 
+       spin_lock_irq(&ctx->timeout_lock);
        list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
                u32 events_needed, events_got;
 
@@ -1592,441 +1553,564 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
 
                io_kill_timeout(req, 0);
        }
-
        ctx->cq_last_tm_flush = seq;
+       spin_unlock_irq(&ctx->timeout_lock);
 }
 
-static void io_commit_cqring(struct io_ring_ctx *ctx)
+static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
 {
-       io_flush_timeouts(ctx);
-       __io_commit_cqring(ctx);
+       if (ctx->off_timeout_used)
+               io_flush_timeouts(ctx);
+       if (ctx->drain_active)
+               io_queue_deferred(ctx);
+}
 
-       if (unlikely(!list_empty(&ctx->defer_list)))
-               __io_queue_deferred(ctx);
+static inline void io_commit_cqring(struct io_ring_ctx *ctx)
+{
+       if (unlikely(ctx->off_timeout_used || ctx->drain_active))
+               __io_commit_cqring_flush(ctx);
+       /* order cqe stores with ring update */
+       smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
 }
 
 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
 {
        struct io_rings *r = ctx->rings;
 
-       return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
+       return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
 }
 
-static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
+static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
+{
+       return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
+}
+
+static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
 {
        struct io_rings *rings = ctx->rings;
-       unsigned tail;
+       unsigned tail, mask = ctx->cq_entries - 1;
 
-       tail = ctx->cached_cq_tail;
        /*
         * writes to the cq entry need to come after reading head; the
         * control dependency is enough as we're using WRITE_ONCE to
         * fill the cq entry
         */
-       if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
+       if (__io_cqring_events(ctx) == ctx->cq_entries)
                return NULL;
 
-       ctx->cached_cq_tail++;
-       return &rings->cqes[tail & ctx->cq_mask];
+       tail = ctx->cached_cq_tail++;
+       return &rings->cqes[tail & mask];
 }
 
 static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
 {
-       if (!ctx->cq_ev_fd)
+       if (likely(!ctx->cq_ev_fd))
                return false;
        if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
                return false;
-       if (!ctx->eventfd_async)
-               return true;
-       return io_wq_current_is_worker();
+       return !ctx->eventfd_async || io_wq_current_is_worker();
 }
 
+/*
+ * This should only get called when at least one event has been posted.
+ * Some applications rely on the eventfd notification count only changing
+ * IFF a new CQE has been added to the CQ ring. There's no depedency on
+ * 1:1 relationship between how many times this function is called (and
+ * hence the eventfd count) and number of CQEs posted to the CQ ring.
+ */
 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 {
-       if (wq_has_sleeper(&ctx->cq_wait)) {
-               wake_up_interruptible(&ctx->cq_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
-       if (waitqueue_active(&ctx->wait))
-               wake_up(&ctx->wait);
+       /*
+        * wake_up_all() may seem excessive, but io_wake_function() and
+        * io_should_wake() handle the termination of the loop and only
+        * wake as many waiters as we need to.
+        */
+       if (wq_has_sleeper(&ctx->cq_wait))
+               wake_up_all(&ctx->cq_wait);
        if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
+       if (waitqueue_active(&ctx->poll_wait))
+               wake_up_interruptible(&ctx->poll_wait);
 }
 
-static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
+static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 {
-       if (list_empty(&ctx->cq_overflow_list)) {
-               clear_bit(0, &ctx->sq_check_overflow);
-               clear_bit(0, &ctx->cq_check_overflow);
-               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+       /* see waitqueue_active() comment */
+       smp_mb();
+
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               if (waitqueue_active(&ctx->cq_wait))
+                       wake_up_all(&ctx->cq_wait);
        }
+       if (io_should_trigger_evfd(ctx))
+               eventfd_signal(ctx->cq_ev_fd, 1);
+       if (waitqueue_active(&ctx->poll_wait))
+               wake_up_interruptible(&ctx->poll_wait);
 }
 
 /* Returns true if there are no backlogged entries after the flush */
-static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
-                                      struct task_struct *tsk,
-                                      struct files_struct *files)
+static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 {
-       struct io_rings *rings = ctx->rings;
-       struct io_kiocb *req, *tmp;
-       struct io_uring_cqe *cqe;
-       unsigned long flags;
-       LIST_HEAD(list);
-
-       if (!force) {
-               if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
-                   rings->cq_ring_entries))
-                       return false;
-       }
+       bool all_flushed, posted;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
+               return false;
 
-       cqe = NULL;
-       list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
-               if (!io_match_task(req, tsk, files))
-                       continue;
+       posted = false;
+       spin_lock(&ctx->completion_lock);
+       while (!list_empty(&ctx->cq_overflow_list)) {
+               struct io_uring_cqe *cqe = io_get_cqe(ctx);
+               struct io_overflow_cqe *ocqe;
 
-               cqe = io_get_cqring(ctx);
                if (!cqe && !force)
                        break;
+               ocqe = list_first_entry(&ctx->cq_overflow_list,
+                                       struct io_overflow_cqe, list);
+               if (cqe)
+                       memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
+               else
+                       io_account_cq_overflow(ctx);
 
-               list_move(&req->compl.list, &list);
-               if (cqe) {
-                       WRITE_ONCE(cqe->user_data, req->user_data);
-                       WRITE_ONCE(cqe->res, req->result);
-                       WRITE_ONCE(cqe->flags, req->compl.cflags);
-               } else {
-                       ctx->cached_cq_overflow++;
-                       WRITE_ONCE(ctx->rings->cq_overflow,
-                                  ctx->cached_cq_overflow);
-               }
+               posted = true;
+               list_del(&ocqe->list);
+               kfree(ocqe);
        }
 
-       io_commit_cqring(ctx);
-       io_cqring_mark_overflow(ctx);
-
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       io_cqring_ev_posted(ctx);
-
-       while (!list_empty(&list)) {
-               req = list_first_entry(&list, struct io_kiocb, compl.list);
-               list_del(&req->compl.list);
-               io_put_req(req);
+       all_flushed = list_empty(&ctx->cq_overflow_list);
+       if (all_flushed) {
+               clear_bit(0, &ctx->check_cq_overflow);
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
        }
 
-       return cqe != NULL;
+       if (posted)
+               io_commit_cqring(ctx);
+       spin_unlock(&ctx->completion_lock);
+       if (posted)
+               io_cqring_ev_posted(ctx);
+       return all_flushed;
 }
 
-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
-                                    struct task_struct *tsk,
-                                    struct files_struct *files)
+static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
 {
-       if (test_bit(0, &ctx->cq_check_overflow)) {
+       bool ret = true;
+
+       if (test_bit(0, &ctx->check_cq_overflow)) {
                /* iopoll syncs against uring_lock, not completion_lock */
                if (ctx->flags & IORING_SETUP_IOPOLL)
                        mutex_lock(&ctx->uring_lock);
-               __io_cqring_overflow_flush(ctx, force, tsk, files);
+               ret = __io_cqring_overflow_flush(ctx, false);
                if (ctx->flags & IORING_SETUP_IOPOLL)
                        mutex_unlock(&ctx->uring_lock);
        }
+
+       return ret;
 }
 
-static void __io_cqring_fill_event(struct io_kiocb *req, long res,
-                                  unsigned int cflags)
+/* must to be called somewhat shortly after putting a request */
+static inline void io_put_task(struct task_struct *task, int nr)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-       struct io_uring_cqe *cqe;
-
-       trace_io_uring_complete(ctx, req->user_data, res);
+       struct io_uring_task *tctx = task->io_uring;
 
-       /*
-        * If we can't get a cq entry, userspace overflowed the
-        * submission (by quite a lot). Increment the overflow count in
-        * the ring.
-        */
-       cqe = io_get_cqring(ctx);
-       if (likely(cqe)) {
-               WRITE_ONCE(cqe->user_data, req->user_data);
-               WRITE_ONCE(cqe->res, res);
-               WRITE_ONCE(cqe->flags, cflags);
-       } else if (ctx->cq_overflow_flushed ||
-                  atomic_read(&req->task->io_uring->in_idle)) {
-               /*
-                * If we're in ring overflow flush mode, or in task cancel mode,
-                * then we cannot store the request for later flushing, we need
-                * to drop it on the floor.
-                */
-               ctx->cached_cq_overflow++;
-               WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
+       if (likely(task == current)) {
+               tctx->cached_refs += nr;
        } else {
-               if (list_empty(&ctx->cq_overflow_list)) {
-                       set_bit(0, &ctx->sq_check_overflow);
-                       set_bit(0, &ctx->cq_check_overflow);
-                       ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
-               }
-               io_clean_op(req);
-               req->result = res;
-               req->compl.cflags = cflags;
-               refcount_inc(&req->refs);
-               list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
+               percpu_counter_sub(&tctx->inflight, nr);
+               if (unlikely(atomic_read(&tctx->in_idle)))
+                       wake_up(&tctx->wait);
+               put_task_struct_many(task, nr);
        }
 }
 
-static void io_cqring_fill_event(struct io_kiocb *req, long res)
+static void io_task_refs_refill(struct io_uring_task *tctx)
 {
-       __io_cqring_fill_event(req, res, 0);
+       unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
+
+       percpu_counter_add(&tctx->inflight, refill);
+       refcount_add(refill, &current->usage);
+       tctx->cached_refs += refill;
 }
 
-static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
+static inline void io_get_task_refs(int nr)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ctx->completion_lock, flags);
-       __io_cqring_fill_event(req, res, cflags);
-       io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       struct io_uring_task *tctx = current->io_uring;
 
-       io_cqring_ev_posted(ctx);
+       tctx->cached_refs -= nr;
+       if (unlikely(tctx->cached_refs < 0))
+               io_task_refs_refill(tctx);
 }
 
-static void io_submit_flush_completions(struct io_comp_state *cs)
+static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
 {
-       struct io_ring_ctx *ctx = cs->ctx;
+       struct io_uring_task *tctx = task->io_uring;
+       unsigned int refs = tctx->cached_refs;
 
-       spin_lock_irq(&ctx->completion_lock);
-       while (!list_empty(&cs->list)) {
-               struct io_kiocb *req;
+       if (refs) {
+               tctx->cached_refs = 0;
+               percpu_counter_sub(&tctx->inflight, refs);
+               put_task_struct_many(task, refs);
+       }
+}
 
-               req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
-               list_del(&req->compl.list);
-               __io_cqring_fill_event(req, req->result, req->compl.cflags);
+static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
+                                    s32 res, u32 cflags)
+{
+       struct io_overflow_cqe *ocqe;
 
+       ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
+       if (!ocqe) {
                /*
-                * io_free_req() doesn't care about completion_lock unless one
-                * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
-                * because of a potential deadlock with req->work.fs->lock
+                * If we're in ring overflow flush mode, or in task cancel mode,
+                * or cannot allocate an overflow entry, then we need to drop it
+                * on the floor.
                 */
-               if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
-                                |REQ_F_WORK_INITIALIZED)) {
-                       spin_unlock_irq(&ctx->completion_lock);
-                       io_put_req(req);
-                       spin_lock_irq(&ctx->completion_lock);
-               } else {
-                       io_put_req(req);
-               }
+               io_account_cq_overflow(ctx);
+               return false;
        }
-       io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
+       if (list_empty(&ctx->cq_overflow_list)) {
+               set_bit(0, &ctx->check_cq_overflow);
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
 
-       io_cqring_ev_posted(ctx);
-       cs->nr = 0;
+       }
+       ocqe->cqe.user_data = user_data;
+       ocqe->cqe.res = res;
+       ocqe->cqe.flags = cflags;
+       list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
+       return true;
 }
 
-static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
-                             struct io_comp_state *cs)
+static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
+                                s32 res, u32 cflags)
 {
-       if (!cs) {
-               io_cqring_add_event(req, res, cflags);
-               io_put_req(req);
-       } else {
-               io_clean_op(req);
-               req->result = res;
-               req->compl.cflags = cflags;
-               list_add_tail(&req->compl.list, &cs->list);
-               if (++cs->nr >= 32)
-                       io_submit_flush_completions(cs);
+       struct io_uring_cqe *cqe;
+
+       trace_io_uring_complete(ctx, user_data, res, cflags);
+
+       /*
+        * If we can't get a cq entry, userspace overflowed the
+        * submission (by quite a lot). Increment the overflow count in
+        * the ring.
+        */
+       cqe = io_get_cqe(ctx);
+       if (likely(cqe)) {
+               WRITE_ONCE(cqe->user_data, user_data);
+               WRITE_ONCE(cqe->res, res);
+               WRITE_ONCE(cqe->flags, cflags);
+               return true;
        }
+       return io_cqring_event_overflow(ctx, user_data, res, cflags);
 }
 
-static void io_req_complete(struct io_kiocb *req, long res)
+static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
 {
-       __io_req_complete(req, res, 0, NULL);
+       __io_fill_cqe(req->ctx, req->user_data, res, cflags);
 }
 
-static inline bool io_is_fallback_req(struct io_kiocb *req)
+static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
+                                    s32 res, u32 cflags)
 {
-       return req == (struct io_kiocb *)
-                       ((unsigned long) req->ctx->fallback_req & ~1UL);
+       ctx->cq_extra++;
+       return __io_fill_cqe(ctx, user_data, res, cflags);
 }
 
-static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
+static void io_req_complete_post(struct io_kiocb *req, s32 res,
+                                u32 cflags)
 {
-       struct io_kiocb *req;
-
-       req = ctx->fallback_req;
-       if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
-               return req;
+       struct io_ring_ctx *ctx = req->ctx;
 
-       return NULL;
-}
+       spin_lock(&ctx->completion_lock);
+       __io_fill_cqe(ctx, req->user_data, res, cflags);
+       /*
+        * If we're the last reference to this request, add to our locked
+        * free_list cache.
+        */
+       if (req_ref_put_and_test(req)) {
+               if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+                       if (req->flags & IO_DISARM_MASK)
+                               io_disarm_next(req);
+                       if (req->link) {
+                               io_req_task_queue(req->link);
+                               req->link = NULL;
+                       }
+               }
+               io_dismantle_req(req);
+               io_put_task(req->task, 1);
+               list_add(&req->inflight_entry, &ctx->locked_free_list);
+               ctx->locked_free_nr++;
+       } else {
+               if (!percpu_ref_tryget(&ctx->refs))
+                       req = NULL;
+       }
+       io_commit_cqring(ctx);
+       spin_unlock(&ctx->completion_lock);
 
-static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
-                                    struct io_submit_state *state)
+       if (req) {
+               io_cqring_ev_posted(ctx);
+               percpu_ref_put(&ctx->refs);
+       }
+}
+
+static inline bool io_req_needs_clean(struct io_kiocb *req)
 {
-       if (!state->free_reqs) {
-               gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
-               size_t sz;
-               int ret;
+       return req->flags & IO_REQ_CLEAN_FLAGS;
+}
+
+static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
+                                        u32 cflags)
+{
+       if (io_req_needs_clean(req))
+               io_clean_op(req);
+       req->result = res;
+       req->compl.cflags = cflags;
+       req->flags |= REQ_F_COMPLETE_INLINE;
+}
 
-               sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
-               ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
+static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
+                                    s32 res, u32 cflags)
+{
+       if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+               io_req_complete_state(req, res, cflags);
+       else
+               io_req_complete_post(req, res, cflags);
+}
 
-               /*
-                * Bulk alloc is all-or-nothing. If we fail to get a batch,
-                * retry single alloc to be on the safe side.
-                */
-               if (unlikely(ret <= 0)) {
-                       state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
-                       if (!state->reqs[0])
-                               goto fallback;
-                       ret = 1;
-               }
-               state->free_reqs = ret;
+static inline void io_req_complete(struct io_kiocb *req, s32 res)
+{
+       __io_req_complete(req, 0, res, 0);
+}
+
+static void io_req_complete_failed(struct io_kiocb *req, s32 res)
+{
+       req_set_fail(req);
+       io_req_complete_post(req, res, 0);
+}
+
+static void io_req_complete_fail_submit(struct io_kiocb *req)
+{
+       /*
+        * We don't submit, fail them all, for that replace hardlinks with
+        * normal links. Extra REQ_F_LINK is tolerated.
+        */
+       req->flags &= ~REQ_F_HARDLINK;
+       req->flags |= REQ_F_LINK;
+       io_req_complete_failed(req, req->result);
+}
+
+/*
+ * Don't initialise the fields below on every allocation, but do that in
+ * advance and keep them valid across allocations.
+ */
+static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
+{
+       req->ctx = ctx;
+       req->link = NULL;
+       req->async_data = NULL;
+       /* not necessary, but safer to zero */
+       req->result = 0;
+}
+
+static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
+                                       struct io_submit_state *state)
+{
+       spin_lock(&ctx->completion_lock);
+       list_splice_init(&ctx->locked_free_list, &state->free_list);
+       ctx->locked_free_nr = 0;
+       spin_unlock(&ctx->completion_lock);
+}
+
+/* Returns true IFF there are requests in the cache */
+static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
+{
+       struct io_submit_state *state = &ctx->submit_state;
+       int nr;
+
+       /*
+        * If we have more than a batch's worth of requests in our IRQ side
+        * locked cache, grab the lock and move them over to our submission
+        * side cache.
+        */
+       if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
+               io_flush_cached_locked_reqs(ctx, state);
+
+       nr = state->free_reqs;
+       while (!list_empty(&state->free_list)) {
+               struct io_kiocb *req = list_first_entry(&state->free_list,
+                                       struct io_kiocb, inflight_entry);
+
+               list_del(&req->inflight_entry);
+               state->reqs[nr++] = req;
+               if (nr == ARRAY_SIZE(state->reqs))
+                       break;
+       }
+
+       state->free_reqs = nr;
+       return nr != 0;
+}
+
+/*
+ * A request might get retired back into the request caches even before opcode
+ * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
+ * Because of that, io_alloc_req() should be called only under ->uring_lock
+ * and with extra caution to not get a request that is still worked on.
+ */
+static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+       __must_hold(&ctx->uring_lock)
+{
+       struct io_submit_state *state = &ctx->submit_state;
+       gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+       int ret, i;
+
+       BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
+
+       if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
+               goto got_req;
+
+       ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
+                                   state->reqs);
+
+       /*
+        * Bulk alloc is all-or-nothing. If we fail to get a batch,
+        * retry single alloc to be on the safe side.
+        */
+       if (unlikely(ret <= 0)) {
+               state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+               if (!state->reqs[0])
+                       return NULL;
+               ret = 1;
        }
 
+       for (i = 0; i < ret; i++)
+               io_preinit_req(state->reqs[i], ctx);
+       state->free_reqs = ret;
+got_req:
        state->free_reqs--;
        return state->reqs[state->free_reqs];
-fallback:
-       return io_get_fallback_req(ctx);
 }
 
-static inline void io_put_file(struct io_kiocb *req, struct file *file,
-                         bool fixed)
+static inline void io_put_file(struct file *file)
 {
-       if (fixed)
-               percpu_ref_put(req->fixed_file_refs);
-       else
+       if (file)
                fput(file);
 }
 
 static void io_dismantle_req(struct io_kiocb *req)
 {
-       io_clean_op(req);
+       unsigned int flags = req->flags;
 
-       if (req->async_data)
+       if (io_req_needs_clean(req))
+               io_clean_op(req);
+       if (!(flags & REQ_F_FIXED_FILE))
+               io_put_file(req->file);
+       if (req->fixed_rsrc_refs)
+               percpu_ref_put(req->fixed_rsrc_refs);
+       if (req->async_data) {
                kfree(req->async_data);
-       if (req->file)
-               io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
-
-       io_req_clean_work(req);
+               req->async_data = NULL;
+       }
 }
 
 static void __io_free_req(struct io_kiocb *req)
 {
-       struct io_uring_task *tctx = req->task->io_uring;
        struct io_ring_ctx *ctx = req->ctx;
 
        io_dismantle_req(req);
+       io_put_task(req->task, 1);
 
-       percpu_counter_dec(&tctx->inflight);
-       if (atomic_read(&tctx->in_idle))
-               wake_up(&tctx->wait);
-       put_task_struct(req->task);
+       spin_lock(&ctx->completion_lock);
+       list_add(&req->inflight_entry, &ctx->locked_free_list);
+       ctx->locked_free_nr++;
+       spin_unlock(&ctx->completion_lock);
 
-       if (likely(!io_is_fallback_req(req)))
-               kmem_cache_free(req_cachep, req);
-       else
-               clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
        percpu_ref_put(&ctx->refs);
 }
 
-static void io_kill_linked_timeout(struct io_kiocb *req)
+static inline void io_remove_next_linked(struct io_kiocb *req)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-       struct io_kiocb *link;
-       bool cancelled = false;
-       unsigned long flags;
+       struct io_kiocb *nxt = req->link;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
-       link = list_first_entry_or_null(&req->link_list, struct io_kiocb,
-                                       link_list);
-       /*
-        * Can happen if a linked timeout fired and link had been like
-        * req -> link t-out -> link t-out [-> ...]
-        */
-       if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
-               struct io_timeout_data *io = link->async_data;
-               int ret;
-
-               list_del_init(&link->link_list);
-               ret = hrtimer_try_to_cancel(&io->timer);
-               if (ret != -1) {
-                       io_cqring_fill_event(link, -ECANCELED);
-                       io_commit_cqring(ctx);
-                       cancelled = true;
-               }
-       }
-       req->flags &= ~REQ_F_LINK_TIMEOUT;
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
-       if (cancelled) {
-               io_cqring_ev_posted(ctx);
-               io_put_req(link);
-       }
+       req->link = nxt->link;
+       nxt->link = NULL;
 }
 
-static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
+static bool io_kill_linked_timeout(struct io_kiocb *req)
+       __must_hold(&req->ctx->completion_lock)
+       __must_hold(&req->ctx->timeout_lock)
 {
-       struct io_kiocb *nxt;
+       struct io_kiocb *link = req->link;
 
-       /*
-        * The list should never be empty when we are called here. But could
-        * potentially happen if the chain is messed up, check to be on the
-        * safe side.
-        */
-       if (unlikely(list_empty(&req->link_list)))
-               return NULL;
+       if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
+               struct io_timeout_data *io = link->async_data;
 
-       nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
-       list_del_init(&req->link_list);
-       if (!list_empty(&nxt->link_list))
-               nxt->flags |= REQ_F_LINK_HEAD;
-       return nxt;
+               io_remove_next_linked(req);
+               link->timeout.head = NULL;
+               if (hrtimer_try_to_cancel(&io->timer) != -1) {
+                       list_del(&link->timeout.list);
+                       io_fill_cqe_req(link, -ECANCELED, 0);
+                       io_put_req_deferred(link);
+                       return true;
+               }
+       }
+       return false;
 }
 
-/*
- * Called if REQ_F_LINK_HEAD is set, and we fail the head request
- */
 static void io_fail_links(struct io_kiocb *req)
+       __must_hold(&req->ctx->completion_lock)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-       unsigned long flags;
+       struct io_kiocb *nxt, *link = req->link;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
-       while (!list_empty(&req->link_list)) {
-               struct io_kiocb *link = list_first_entry(&req->link_list,
-                                               struct io_kiocb, link_list);
+       req->link = NULL;
+       while (link) {
+               long res = -ECANCELED;
 
-               list_del_init(&link->link_list);
-               trace_io_uring_fail_link(req, link);
+               if (link->flags & REQ_F_FAIL)
+                       res = link->result;
 
-               io_cqring_fill_event(link, -ECANCELED);
+               nxt = link->link;
+               link->link = NULL;
 
-               /*
-                * It's ok to free under spinlock as they're not linked anymore,
-                * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
-                * work.fs->lock.
-                */
-               if (link->flags & REQ_F_WORK_INITIALIZED)
-                       io_put_req_deferred(link, 2);
-               else
-                       io_double_put_req(link);
+               trace_io_uring_fail_link(req, link);
+               io_fill_cqe_req(link, res, 0);
+               io_put_req_deferred(link);
+               link = nxt;
        }
+}
 
-       io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+static bool io_disarm_next(struct io_kiocb *req)
+       __must_hold(&req->ctx->completion_lock)
+{
+       bool posted = false;
 
-       io_cqring_ev_posted(ctx);
+       if (req->flags & REQ_F_ARM_LTIMEOUT) {
+               struct io_kiocb *link = req->link;
+
+               req->flags &= ~REQ_F_ARM_LTIMEOUT;
+               if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
+                       io_remove_next_linked(req);
+                       io_fill_cqe_req(link, -ECANCELED, 0);
+                       io_put_req_deferred(link);
+                       posted = true;
+               }
+       } else if (req->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = req->ctx;
+
+               spin_lock_irq(&ctx->timeout_lock);
+               posted = io_kill_linked_timeout(req);
+               spin_unlock_irq(&ctx->timeout_lock);
+       }
+       if (unlikely((req->flags & REQ_F_FAIL) &&
+                    !(req->flags & REQ_F_HARDLINK))) {
+               posted |= (req->link != NULL);
+               io_fail_links(req);
+       }
+       return posted;
 }
 
 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
 {
-       req->flags &= ~REQ_F_LINK_HEAD;
-       if (req->flags & REQ_F_LINK_TIMEOUT)
-               io_kill_linked_timeout(req);
+       struct io_kiocb *nxt;
 
        /*
         * If LINK is set, we have dependent requests in this chain. If we
@@ -2034,28 +2118,112 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
         * dependencies to the next request. In case of failure, fail the rest
         * of the chain.
         */
-       if (likely(!(req->flags & REQ_F_FAIL_LINK)))
-               return io_req_link_next(req);
-       io_fail_links(req);
-       return NULL;
+       if (req->flags & IO_DISARM_MASK) {
+               struct io_ring_ctx *ctx = req->ctx;
+               bool posted;
+
+               spin_lock(&ctx->completion_lock);
+               posted = io_disarm_next(req);
+               if (posted)
+                       io_commit_cqring(req->ctx);
+               spin_unlock(&ctx->completion_lock);
+               if (posted)
+                       io_cqring_ev_posted(ctx);
+       }
+       nxt = req->link;
+       req->link = NULL;
+       return nxt;
 }
 
-static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
+static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
 {
-       if (likely(!(req->flags & REQ_F_LINK_HEAD)))
+       if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
                return NULL;
        return __io_req_find_next(req);
 }
 
-static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
+static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
+{
+       if (!ctx)
+               return;
+       if (*locked) {
+               if (ctx->submit_state.compl_nr)
+                       io_submit_flush_completions(ctx);
+               mutex_unlock(&ctx->uring_lock);
+               *locked = false;
+       }
+       percpu_ref_put(&ctx->refs);
+}
+
+static void tctx_task_work(struct callback_head *cb)
+{
+       bool locked = false;
+       struct io_ring_ctx *ctx = NULL;
+       struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
+                                                 task_work);
+
+       while (1) {
+               struct io_wq_work_node *node;
+
+               if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
+                       io_submit_flush_completions(ctx);
+
+               spin_lock_irq(&tctx->task_lock);
+               node = tctx->task_list.first;
+               INIT_WQ_LIST(&tctx->task_list);
+               if (!node)
+                       tctx->task_running = false;
+               spin_unlock_irq(&tctx->task_lock);
+               if (!node)
+                       break;
+
+               do {
+                       struct io_wq_work_node *next = node->next;
+                       struct io_kiocb *req = container_of(node, struct io_kiocb,
+                                                           io_task_work.node);
+
+                       if (req->ctx != ctx) {
+                               ctx_flush_and_put(ctx, &locked);
+                               ctx = req->ctx;
+                               /* if not contended, grab and improve batching */
+                               locked = mutex_trylock(&ctx->uring_lock);
+                               percpu_ref_get(&ctx->refs);
+                       }
+                       req->io_task_work.func(req, &locked);
+                       node = next;
+               } while (node);
+
+               cond_resched();
+       }
+
+       ctx_flush_and_put(ctx, &locked);
+
+       /* relaxed read is enough as only the task itself sets ->in_idle */
+       if (unlikely(atomic_read(&tctx->in_idle)))
+               io_uring_drop_tctx_refs(current);
+}
+
+static void io_req_task_work_add(struct io_kiocb *req)
 {
        struct task_struct *tsk = req->task;
-       struct io_ring_ctx *ctx = req->ctx;
+       struct io_uring_task *tctx = tsk->io_uring;
        enum task_work_notify_mode notify;
-       int ret;
+       struct io_wq_work_node *node;
+       unsigned long flags;
+       bool running;
+
+       WARN_ON_ONCE(!tctx);
+
+       spin_lock_irqsave(&tctx->task_lock, flags);
+       wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
+       running = tctx->task_running;
+       if (!running)
+               tctx->task_running = true;
+       spin_unlock_irqrestore(&tctx->task_lock, flags);
 
-       if (tsk->flags & PF_EXITING)
-               return -ESRCH;
+       /* task_work already pending, we're done */
+       if (running)
+               return;
 
        /*
         * SQPOLL kernel thread doesn't need notification, just a wakeup. For
@@ -2063,85 +2231,68 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
         * processing task_work. There's no reliable way to tell if TWA_RESUME
         * will do the job.
         */
-       notify = TWA_NONE;
-       if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
-               notify = TWA_SIGNAL;
-
-       ret = task_work_add(tsk, &req->task_work, notify);
-       if (!ret)
+       notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
+       if (!task_work_add(tsk, &tctx->task_work, notify)) {
                wake_up_process(tsk);
+               return;
+       }
 
-       return ret;
-}
-
-static void __io_req_task_cancel(struct io_kiocb *req, int error)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-
-       spin_lock_irq(&ctx->completion_lock);
-       io_cqring_fill_event(req, error);
-       io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_lock_irqsave(&tctx->task_lock, flags);
+       tctx->task_running = false;
+       node = tctx->task_list.first;
+       INIT_WQ_LIST(&tctx->task_list);
+       spin_unlock_irqrestore(&tctx->task_lock, flags);
 
-       io_cqring_ev_posted(ctx);
-       req_set_fail_links(req);
-       io_double_put_req(req);
+       while (node) {
+               req = container_of(node, struct io_kiocb, io_task_work.node);
+               node = node->next;
+               if (llist_add(&req->io_task_work.fallback_node,
+                             &req->ctx->fallback_llist))
+                       schedule_delayed_work(&req->ctx->fallback_work, 1);
+       }
 }
 
-static void io_req_task_cancel(struct callback_head *cb)
+static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
 {
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
        struct io_ring_ctx *ctx = req->ctx;
 
-       mutex_lock(&ctx->uring_lock);
-       __io_req_task_cancel(req, -ECANCELED);
-       mutex_unlock(&ctx->uring_lock);
-       percpu_ref_put(&ctx->refs);
+       /* not needed for normal modes, but SQPOLL depends on it */
+       io_tw_lock(ctx, locked);
+       io_req_complete_failed(req, req->result);
 }
 
-static void __io_req_task_submit(struct io_kiocb *req)
+static void io_req_task_submit(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       mutex_lock(&ctx->uring_lock);
-       if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx))
-               __io_queue_sqe(req, NULL);
+       io_tw_lock(ctx, locked);
+       /* req->task == current here, checking PF_EXITING is safe */
+       if (likely(!(req->task->flags & PF_EXITING)))
+               __io_queue_sqe(req);
        else
-               __io_req_task_cancel(req, -EFAULT);
-       mutex_unlock(&ctx->uring_lock);
-
-       if (ctx->flags & IORING_SETUP_SQPOLL)
-               io_sq_thread_drop_mm();
+               io_req_complete_failed(req, -EFAULT);
 }
 
-static void io_req_task_submit(struct callback_head *cb)
+static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
 {
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-       struct io_ring_ctx *ctx = req->ctx;
-
-       __io_req_task_submit(req);
-       percpu_ref_put(&ctx->refs);
+       req->result = ret;
+       req->io_task_work.func = io_req_task_cancel;
+       io_req_task_work_add(req);
 }
 
 static void io_req_task_queue(struct io_kiocb *req)
 {
-       int ret;
-
-       init_task_work(&req->task_work, io_req_task_submit);
-       percpu_ref_get(&req->ctx->refs);
-
-       ret = io_req_task_work_add(req, true);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
+       req->io_task_work.func = io_req_task_submit;
+       io_req_task_work_add(req);
+}
 
-               init_task_work(&req->task_work, io_req_task_cancel);
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
-       }
+static void io_req_task_queue_reissue(struct io_kiocb *req)
+{
+       req->io_task_work.func = io_queue_async_work;
+       io_req_task_work_add(req);
 }
 
-static void io_queue_next(struct io_kiocb *req)
+static inline void io_queue_next(struct io_kiocb *req)
 {
        struct io_kiocb *nxt = io_req_find_next(req);
 
@@ -2155,153 +2306,118 @@ static void io_free_req(struct io_kiocb *req)
        __io_free_req(req);
 }
 
-struct req_batch {
-       void *reqs[IO_IOPOLL_BATCH];
-       int to_free;
+static void io_free_req_work(struct io_kiocb *req, bool *locked)
+{
+       io_free_req(req);
+}
 
+struct req_batch {
        struct task_struct      *task;
        int                     task_refs;
+       int                     ctx_refs;
 };
 
 static inline void io_init_req_batch(struct req_batch *rb)
 {
-       rb->to_free = 0;
        rb->task_refs = 0;
+       rb->ctx_refs = 0;
        rb->task = NULL;
 }
 
-static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
-                                     struct req_batch *rb)
+static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
+                                    struct req_batch *rb)
 {
-       kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
-       percpu_ref_put_many(&ctx->refs, rb->to_free);
-       rb->to_free = 0;
+       if (rb->ctx_refs)
+               percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
+       if (rb->task)
+               io_put_task(rb->task, rb->task_refs);
 }
 
-static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
-                                    struct req_batch *rb)
+static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
+                             struct io_submit_state *state)
 {
-       if (rb->to_free)
-               __io_req_free_batch_flush(ctx, rb);
-       if (rb->task) {
-               struct io_uring_task *tctx = rb->task->io_uring;
+       io_queue_next(req);
+       io_dismantle_req(req);
 
-               percpu_counter_sub(&tctx->inflight, rb->task_refs);
-               if (atomic_read(&tctx->in_idle))
-                       wake_up(&tctx->wait);
-               put_task_struct_many(rb->task, rb->task_refs);
-               rb->task = NULL;
+       if (req->task != rb->task) {
+               if (rb->task)
+                       io_put_task(rb->task, rb->task_refs);
+               rb->task = req->task;
+               rb->task_refs = 0;
        }
+       rb->task_refs++;
+       rb->ctx_refs++;
+
+       if (state->free_reqs != ARRAY_SIZE(state->reqs))
+               state->reqs[state->free_reqs++] = req;
+       else
+               list_add(&req->inflight_entry, &state->free_list);
 }
 
-static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
+static void io_submit_flush_completions(struct io_ring_ctx *ctx)
+       __must_hold(&ctx->uring_lock)
 {
-       if (unlikely(io_is_fallback_req(req))) {
-               io_free_req(req);
-               return;
+       struct io_submit_state *state = &ctx->submit_state;
+       int i, nr = state->compl_nr;
+       struct req_batch rb;
+
+       spin_lock(&ctx->completion_lock);
+       for (i = 0; i < nr; i++) {
+               struct io_kiocb *req = state->compl_reqs[i];
+
+               __io_fill_cqe(ctx, req->user_data, req->result,
+                             req->compl.cflags);
        }
-       if (req->flags & REQ_F_LINK_HEAD)
-               io_queue_next(req);
+       io_commit_cqring(ctx);
+       spin_unlock(&ctx->completion_lock);
+       io_cqring_ev_posted(ctx);
 
-       if (req->task != rb->task) {
-               if (rb->task) {
-                       struct io_uring_task *tctx = rb->task->io_uring;
+       io_init_req_batch(&rb);
+       for (i = 0; i < nr; i++) {
+               struct io_kiocb *req = state->compl_reqs[i];
 
-                       percpu_counter_sub(&tctx->inflight, rb->task_refs);
-                       if (atomic_read(&tctx->in_idle))
-                               wake_up(&tctx->wait);
-                       put_task_struct_many(rb->task, rb->task_refs);
-               }
-               rb->task = req->task;
-               rb->task_refs = 0;
+               if (req_ref_put_and_test(req))
+                       io_req_free_batch(&rb, req, &ctx->submit_state);
        }
-       rb->task_refs++;
 
-       io_dismantle_req(req);
-       rb->reqs[rb->to_free++] = req;
-       if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
-               __io_req_free_batch_flush(req->ctx, rb);
+       io_req_free_batch_finish(ctx, &rb);
+       state->compl_nr = 0;
 }
 
 /*
  * Drop reference to request, return next in chain (if there is one) if this
  * was the last reference to this request.
  */
-static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
+static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
 {
        struct io_kiocb *nxt = NULL;
 
-       if (refcount_dec_and_test(&req->refs)) {
+       if (req_ref_put_and_test(req)) {
                nxt = io_req_find_next(req);
                __io_free_req(req);
        }
        return nxt;
 }
 
-static void io_put_req(struct io_kiocb *req)
+static inline void io_put_req(struct io_kiocb *req)
 {
-       if (refcount_dec_and_test(&req->refs))
+       if (req_ref_put_and_test(req))
                io_free_req(req);
 }
 
-static void io_put_req_deferred_cb(struct callback_head *cb)
-{
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
-       io_free_req(req);
-}
-
-static void io_free_req_deferred(struct io_kiocb *req)
+static inline void io_put_req_deferred(struct io_kiocb *req)
 {
-       int ret;
-
-       init_task_work(&req->task_work, io_put_req_deferred_cb);
-       ret = io_req_task_work_add(req, true);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
-
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
+       if (req_ref_put_and_test(req)) {
+               req->io_task_work.func = io_free_req_work;
+               io_req_task_work_add(req);
        }
 }
 
-static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
-{
-       if (refcount_sub_and_test(refs, &req->refs))
-               io_free_req_deferred(req);
-}
-
-static struct io_wq_work *io_steal_work(struct io_kiocb *req)
-{
-       struct io_kiocb *nxt;
-
-       /*
-        * A ref is owned by io-wq in which context we're. So, if that's the
-        * last one, it's safe to steal next work. False negatives are Ok,
-        * it just will be re-punted async in io_put_work()
-        */
-       if (refcount_read(&req->refs) != 1)
-               return NULL;
-
-       nxt = io_req_find_next(req);
-       return nxt ? &nxt->work : NULL;
-}
-
-static void io_double_put_req(struct io_kiocb *req)
-{
-       /* drop both submit and complete references */
-       if (refcount_sub_and_test(2, &req->refs))
-               io_free_req(req);
-}
-
 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
 {
-       struct io_rings *rings = ctx->rings;
-
        /* See comment at the top of this file */
        smp_rmb();
-       return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
+       return __io_cqring_events(ctx);
 }
 
 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
@@ -2327,38 +2443,23 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
 {
        struct io_buffer *kbuf;
 
+       if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
+               return 0;
        kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
        return io_put_kbuf(req, kbuf);
 }
 
 static inline bool io_run_task_work(void)
 {
-       /*
-        * Not safe to run on exiting task, and the task_work handling will
-        * not add work to such a task.
-        */
-       if (unlikely(current->flags & PF_EXITING))
-               return false;
-       if (current->task_works) {
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
                __set_current_state(TASK_RUNNING);
-               task_work_run();
+               tracehook_notify_signal();
                return true;
        }
 
        return false;
 }
 
-static void io_iopoll_queue(struct list_head *again)
-{
-       struct io_kiocb *req;
-
-       do {
-               req = list_first_entry(again, struct io_kiocb, iopoll_entry);
-               list_del(&req->iopoll_entry);
-               __io_complete_rw(req, -EAGAIN, 0, NULL);
-       } while (!list_empty(again));
-}
-
 /*
  * Find and free completed poll iocbs
  */
@@ -2367,41 +2468,25 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 {
        struct req_batch rb;
        struct io_kiocb *req;
-       LIST_HEAD(again);
 
        /* order with ->result store in io_complete_rw_iopoll() */
        smp_rmb();
 
        io_init_req_batch(&rb);
        while (!list_empty(done)) {
-               int cflags = 0;
-
-               req = list_first_entry(done, struct io_kiocb, iopoll_entry);
-               if (READ_ONCE(req->result) == -EAGAIN) {
-                       req->result = 0;
-                       req->iopoll_completed = 0;
-                       list_move_tail(&req->iopoll_entry, &again);
-                       continue;
-               }
-               list_del(&req->iopoll_entry);
-
-               if (req->flags & REQ_F_BUFFER_SELECTED)
-                       cflags = io_put_rw_kbuf(req);
+               req = list_first_entry(done, struct io_kiocb, inflight_entry);
+               list_del(&req->inflight_entry);
 
-               __io_cqring_fill_event(req, req->result, cflags);
+               io_fill_cqe_req(req, req->result, io_put_rw_kbuf(req));
                (*nr_events)++;
 
-               if (refcount_dec_and_test(&req->refs))
-                       io_req_free_batch(&rb, req);
+               if (req_ref_put_and_test(req))
+                       io_req_free_batch(&rb, req, &ctx->submit_state);
        }
 
        io_commit_cqring(ctx);
-       if (ctx->flags & IORING_SETUP_SQPOLL)
-               io_cqring_ev_posted(ctx);
+       io_cqring_ev_posted_iopoll(ctx);
        io_req_free_batch_finish(ctx, &rb);
-
-       if (!list_empty(&again))
-               io_iopoll_queue(&again);
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -2410,17 +2495,16 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
        struct io_kiocb *req, *tmp;
        LIST_HEAD(done);
        bool spin;
-       int ret;
 
        /*
         * Only spin for completions if we don't have multiple devices hanging
         * off our complete list, and we're under the requested amount.
         */
-       spin = !ctx->poll_multi_file && *nr_events < min;
+       spin = !ctx->poll_multi_queue && *nr_events < min;
 
-       ret = 0;
-       list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_entry) {
+       list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
                struct kiocb *kiocb = &req->rw.kiocb;
+               int ret;
 
                /*
                 * Move completed and retryable entries to our local lists.
@@ -2428,50 +2512,27 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
                 * and complete those lists first, if we have entries there.
                 */
                if (READ_ONCE(req->iopoll_completed)) {
-                       list_move_tail(&req->iopoll_entry, &done);
+                       list_move_tail(&req->inflight_entry, &done);
                        continue;
                }
                if (!list_empty(&done))
                        break;
 
                ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
-               if (ret < 0)
-                       break;
+               if (unlikely(ret < 0))
+                       return ret;
+               else if (ret)
+                       spin = false;
 
                /* iopoll may have completed current req */
                if (READ_ONCE(req->iopoll_completed))
-                       list_move_tail(&req->iopoll_entry, &done);
-
-               if (ret && spin)
-                       spin = false;
-               ret = 0;
+                       list_move_tail(&req->inflight_entry, &done);
        }
 
        if (!list_empty(&done))
                io_iopoll_complete(ctx, nr_events, &done);
 
-       return ret;
-}
-
-/*
- * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
- * non-spinning poll check - we'll still enter the driver poll loop, but only
- * as a non-spinning completion check.
- */
-static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                               long min)
-{
-       while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
-               int ret;
-
-               ret = io_do_iopoll(ctx, nr_events, min);
-               if (ret < 0)
-                       return ret;
-               if (*nr_events >= min)
-                       return 0;
-       }
-
-       return 1;
+       return 0;
 }
 
 /*
@@ -2509,7 +2570,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
 {
        unsigned int nr_events = 0;
-       int iters = 0, ret = 0;
+       int ret = 0;
 
        /*
         * We disallow the app entering submit/complete with polling, but we
@@ -2517,17 +2578,16 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
         * that got punted to a workqueue.
         */
        mutex_lock(&ctx->uring_lock);
+       /*
+        * Don't enter poll loop if we already have events pending.
+        * If we do, we can potentially be spinning for commands that
+        * already triggered a CQE (eg in error).
+        */
+       if (test_bit(0, &ctx->check_cq_overflow))
+               __io_cqring_overflow_flush(ctx, false);
+       if (io_cqring_events(ctx))
+               goto out;
        do {
-               /*
-                * Don't enter poll loop if we already have events pending.
-                * If we do, we can potentially be spinning for commands that
-                * already triggered a CQE (eg in error).
-                */
-               if (test_bit(0, &ctx->cq_check_overflow))
-                       __io_cqring_overflow_flush(ctx, false, NULL, NULL);
-               if (io_cqring_events(ctx))
-                       break;
-
                /*
                 * If a submit got punted to a workqueue, we can have the
                 * application entering polling for a command before it gets
@@ -2538,18 +2598,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                 * forever, while the workqueue is stuck trying to acquire the
                 * very same mutex.
                 */
-               if (!(++iters & 7)) {
+               if (list_empty(&ctx->iopoll_list)) {
+                       u32 tail = ctx->cached_cq_tail;
+
                        mutex_unlock(&ctx->uring_lock);
                        io_run_task_work();
                        mutex_lock(&ctx->uring_lock);
-               }
-
-               ret = io_iopoll_getevents(ctx, &nr_events, min);
-               if (ret <= 0)
-                       break;
-               ret = 0;
-       } while (min && !nr_events && !need_resched());
 
+                       /* some requests don't go through iopoll_list */
+                       if (tail != ctx->cached_cq_tail ||
+                           list_empty(&ctx->iopoll_list))
+                               break;
+               }
+               ret = io_do_iopoll(ctx, &nr_events, min);
+       } while (!ret && nr_events < min && !need_resched());
+out:
        mutex_unlock(&ctx->uring_lock);
        return ret;
 }
@@ -2561,79 +2624,129 @@ static void kiocb_end_write(struct io_kiocb *req)
         * thread.
         */
        if (req->flags & REQ_F_ISREG) {
-               struct inode *inode = file_inode(req->file);
+               struct super_block *sb = file_inode(req->file)->i_sb;
 
-               __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+               __sb_writers_acquired(sb, SB_FREEZE_WRITE);
+               sb_end_write(sb);
        }
-       file_end_write(req->file);
-}
-
-static void io_complete_rw_common(struct kiocb *kiocb, long res,
-                                 struct io_comp_state *cs)
-{
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
-       int cflags = 0;
-
-       if (kiocb->ki_flags & IOCB_WRITE)
-               kiocb_end_write(req);
-
-       if (res != req->result)
-               req_set_fail_links(req);
-       if (req->flags & REQ_F_BUFFER_SELECTED)
-               cflags = io_put_rw_kbuf(req);
-       __io_req_complete(req, res, cflags, cs);
 }
 
 #ifdef CONFIG_BLOCK
-static bool io_resubmit_prep(struct io_kiocb *req, int error)
+static bool io_resubmit_prep(struct io_kiocb *req)
 {
-       req_set_fail_links(req);
-       return false;
+       struct io_async_rw *rw = req->async_data;
+
+       if (!rw)
+               return !io_req_prep_async(req);
+       iov_iter_restore(&rw->iter, &rw->iter_state);
+       return true;
 }
-#endif
 
-static bool io_rw_reissue(struct io_kiocb *req, long res)
+static bool io_rw_should_reissue(struct io_kiocb *req)
 {
-#ifdef CONFIG_BLOCK
        umode_t mode = file_inode(req->file)->i_mode;
-       int ret;
+       struct io_ring_ctx *ctx = req->ctx;
 
        if (!S_ISBLK(mode) && !S_ISREG(mode))
                return false;
-       if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
+       if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
+           !(ctx->flags & IORING_SETUP_IOPOLL)))
                return false;
        /*
         * If ref is dying, we might be running poll reap from the exit work.
         * Don't attempt to reissue from that path, just let it fail with
         * -EAGAIN.
         */
-       if (percpu_ref_is_dying(&req->ctx->refs))
+       if (percpu_ref_is_dying(&ctx->refs))
+               return false;
+       /*
+        * Play it safe and assume not safe to re-import and reissue if we're
+        * not in the original thread group (or in task context).
+        */
+       if (!same_thread_group(req->task, current) || !in_task())
                return false;
+       return true;
+}
+#else
+static bool io_resubmit_prep(struct io_kiocb *req)
+{
+       return false;
+}
+static bool io_rw_should_reissue(struct io_kiocb *req)
+{
+       return false;
+}
+#endif
+
+static bool __io_complete_rw_common(struct io_kiocb *req, long res)
+{
+       if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
+               kiocb_end_write(req);
+               fsnotify_modify(req->file);
+       } else {
+               fsnotify_access(req->file);
+       }
+       if (res != req->result) {
+               if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
+                   io_rw_should_reissue(req)) {
+                       req->flags |= REQ_F_REISSUE;
+                       return true;
+               }
+               req_set_fail(req);
+               req->result = res;
+       }
+       return false;
+}
 
-       ret = io_sq_thread_acquire_mm(req->ctx, req);
+static inline int io_fixup_rw_res(struct io_kiocb *req, unsigned res)
+{
+       struct io_async_rw *io = req->async_data;
 
-       if (io_resubmit_prep(req, ret)) {
-               refcount_inc(&req->refs);
-               io_queue_async_work(req);
-               return true;
+       /* add previously done IO, if any */
+       if (io && io->bytes_done > 0) {
+               if (res < 0)
+                       res = io->bytes_done;
+               else
+                       res += io->bytes_done;
        }
+       return res;
+}
 
-#endif
-       return false;
+static void io_req_task_complete(struct io_kiocb *req, bool *locked)
+{
+       unsigned int cflags = io_put_rw_kbuf(req);
+       int res = req->result;
+
+       if (*locked) {
+               struct io_ring_ctx *ctx = req->ctx;
+               struct io_submit_state *state = &ctx->submit_state;
+
+               io_req_complete_state(req, res, cflags);
+               state->compl_reqs[state->compl_nr++] = req;
+               if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
+                       io_submit_flush_completions(ctx);
+       } else {
+               io_req_complete_post(req, res, cflags);
+       }
 }
 
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
-                            struct io_comp_state *cs)
+                            unsigned int issue_flags)
 {
-       if (!io_rw_reissue(req, res))
-               io_complete_rw_common(&req->rw.kiocb, res, cs);
+       if (__io_complete_rw_common(req, res))
+               return;
+       __io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req));
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
-       __io_complete_rw(req, res, res2, NULL);
+       if (__io_complete_rw_common(req, res))
+               return;
+       req->result = io_fixup_rw_res(req, res);
+       req->io_task_work.func = io_req_task_complete;
+       io_req_task_work_add(req);
 }
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -2642,12 +2755,15 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
-
-       if (res != -EAGAIN && res != req->result)
-               req_set_fail_links(req);
+       if (unlikely(res != req->result)) {
+               if (res == -EAGAIN && io_rw_should_reissue(req)) {
+                       req->flags |= REQ_F_REISSUE;
+                       return;
+               }
+       }
 
        WRITE_ONCE(req->result, res);
-       /* order with io_poll_complete() checking ->result */
+       /* order with io_iopoll_complete() checking ->result */
        smp_wmb();
        WRITE_ONCE(req->iopoll_completed, 1);
 }
@@ -2655,12 +2771,17 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 /*
  * After the iocb has been issued, it's safe to be found on the poll list.
  * Adding the kiocb to the list AFTER submission ensures that we don't
- * find it from a io_iopoll_getevents() thread before the issuer is done
+ * find it from a io_do_iopoll() thread before the issuer is done
  * accessing the kiocb cookie.
  */
 static void io_iopoll_req_issued(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       const bool in_async = io_wq_current_is_worker();
+
+       /* workqueue context doesn't hold uring_lock, grab it now */
+       if (unlikely(in_async))
+               mutex_lock(&ctx->uring_lock);
 
        /*
         * Track whether we have multiple files in our lists. This will impact
@@ -2668,14 +2789,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
         * different devices.
         */
        if (list_empty(&ctx->iopoll_list)) {
-               ctx->poll_multi_file = false;
-       } else if (!ctx->poll_multi_file) {
+               ctx->poll_multi_queue = false;
+       } else if (!ctx->poll_multi_queue) {
                struct io_kiocb *list_req;
+               unsigned int queue_num0, queue_num1;
 
                list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
-                                               iopoll_entry);
-               if (list_req->file != req->file)
-                       ctx->poll_multi_file = true;
+                                               inflight_entry);
+
+               if (list_req->file != req->file) {
+                       ctx->poll_multi_queue = true;
+               } else {
+                       queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
+                       queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
+                       if (queue_num0 != queue_num1)
+                               ctx->poll_multi_queue = true;
+               }
        }
 
        /*
@@ -2683,61 +2812,28 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
         * it to the front so we find it first.
         */
        if (READ_ONCE(req->iopoll_completed))
-               list_add(&req->iopoll_entry, &ctx->iopoll_list);
+               list_add(&req->inflight_entry, &ctx->iopoll_list);
        else
-               list_add_tail(&req->iopoll_entry, &ctx->iopoll_list);
-
-       if ((ctx->flags & IORING_SETUP_SQPOLL) &&
-           wq_has_sleeper(&ctx->sq_data->wait))
-               wake_up(&ctx->sq_data->wait);
-}
-
-static void __io_state_file_put(struct io_submit_state *state)
-{
-       if (state->has_refs)
-               fput_many(state->file, state->has_refs);
-       state->file = NULL;
-}
-
-static inline void io_state_file_put(struct io_submit_state *state)
-{
-       if (state->file)
-               __io_state_file_put(state);
-}
+               list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
 
-/*
- * Get as many references to a file as we have IOs left in this submission,
- * assuming most submissions are for one file, or at least that each file
- * has more than one submission.
- */
-static struct file *__io_file_get(struct io_submit_state *state, int fd)
-{
-       if (!state)
-               return fget(fd);
+       if (unlikely(in_async)) {
+               /*
+                * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
+                * in sq thread task context or in io worker task context. If
+                * current task context is sq thread, we don't need to check
+                * whether should wake up sq thread.
+                */
+               if ((ctx->flags & IORING_SETUP_SQPOLL) &&
+                   wq_has_sleeper(&ctx->sq_data->wait))
+                       wake_up(&ctx->sq_data->wait);
 
-       if (state->file) {
-               if (state->fd == fd) {
-                       state->has_refs--;
-                       return state->file;
-               }
-               __io_state_file_put(state);
+               mutex_unlock(&ctx->uring_lock);
        }
-       state->file = fget_many(fd, state->ios_left);
-       if (!state->file)
-               return NULL;
-
-       state->fd = fd;
-       state->has_refs = state->ios_left - 1;
-       return state->file;
 }
 
 static bool io_bdev_nowait(struct block_device *bdev)
 {
-#ifdef CONFIG_BLOCK
        return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
-#else
-       return true;
-#endif
 }
 
 /*
@@ -2745,19 +2841,21 @@ static bool io_bdev_nowait(struct block_device *bdev)
  * any file. For now, just ensure that anything potentially problematic is done
  * inline.
  */
-static bool io_file_supports_async(struct file *file, int rw)
+static bool __io_file_supports_nowait(struct file *file, int rw)
 {
        umode_t mode = file_inode(file)->i_mode;
 
        if (S_ISBLK(mode)) {
-               if (io_bdev_nowait(file->f_inode->i_bdev))
+               if (IS_ENABLED(CONFIG_BLOCK) &&
+                   io_bdev_nowait(I_BDEV(file->f_mapping->host)))
                        return true;
                return false;
        }
        if (S_ISSOCK(mode))
                return true;
        if (S_ISREG(mode)) {
-               if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
+               if (IS_ENABLED(CONFIG_BLOCK) &&
+                   io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
                    file->f_op != &io_uring_fops)
                        return true;
                return false;
@@ -2776,20 +2874,36 @@ static bool io_file_supports_async(struct file *file, int rw)
        return file->f_op->write_iter != NULL;
 }
 
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
+{
+       if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
+               return true;
+       else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
+               return true;
+
+       return __io_file_supports_nowait(req->file, rw);
+}
+
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     int rw)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
+       struct file *file = req->file;
        unsigned ioprio;
        int ret;
 
-       if (S_ISREG(file_inode(req->file)->i_mode))
+       if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
                req->flags |= REQ_F_ISREG;
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
-       if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
-               req->flags |= REQ_F_CUR_POS;
-               kiocb->ki_pos = req->file->f_pos;
+       if (kiocb->ki_pos == -1) {
+               if (!(file->f_mode & FMODE_STREAM)) {
+                       req->flags |= REQ_F_CUR_POS;
+                       kiocb->ki_pos = file->f_pos;
+               } else {
+                       kiocb->ki_pos = 0;
+               }
        }
        kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
        kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
@@ -2797,6 +2911,15 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(ret))
                return ret;
 
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
+               req->flags |= REQ_F_NOWAIT;
+
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
                ret = ioprio_check_cap(ioprio);
@@ -2807,10 +2930,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        } else
                kiocb->ki_ioprio = get_current_ioprio();
 
-       /* don't allow async punt if RWF_NOWAIT was requested */
-       if (kiocb->ki_flags & IOCB_NOWAIT)
-               req->flags |= REQ_F_NOWAIT;
-
        if (ctx->flags & IORING_SETUP_IOPOLL) {
                if (!(kiocb->ki_flags & IOCB_DIRECT) ||
                    !kiocb->ki_filp->f_op->iopoll)
@@ -2825,9 +2944,24 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                kiocb->ki_complete = io_complete_rw;
        }
 
+       /* used for fixed read/write too - just read unconditionally */
+       req->buf_index = READ_ONCE(sqe->buf_index);
+       req->imu = NULL;
+
+       if (req->opcode == IORING_OP_READ_FIXED ||
+           req->opcode == IORING_OP_WRITE_FIXED) {
+               struct io_ring_ctx *ctx = req->ctx;
+               u16 index;
+
+               if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+                       return -EFAULT;
+               index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+               req->imu = ctx->user_bufs[index];
+               io_req_set_rsrc_node(req);
+       }
+
        req->rw.addr = READ_ONCE(sqe->addr);
        req->rw.len = READ_ONCE(sqe->len);
-       req->buf_index = READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -2853,48 +2987,49 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 }
 
 static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
-                      struct io_comp_state *cs)
+                      unsigned int issue_flags)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
-       struct io_async_rw *io = req->async_data;
-
-       /* add previously done IO, if any */
-       if (io && io->bytes_done > 0) {
-               if (ret < 0)
-                       ret = io->bytes_done;
-               else
-                       ret += io->bytes_done;
-       }
 
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = kiocb->ki_pos;
-       if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
-               __io_complete_rw(req, ret, 0, cs);
+       if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
+               __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
+
+       if (req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               if (io_resubmit_prep(req)) {
+                       io_req_task_queue_reissue(req);
+               } else {
+                       unsigned int cflags = io_put_rw_kbuf(req);
+                       struct io_ring_ctx *ctx = req->ctx;
+
+                       ret = io_fixup_rw_res(req, ret);
+                       req_set_fail(req);
+                       if (!(issue_flags & IO_URING_F_NONBLOCK)) {
+                               mutex_lock(&ctx->uring_lock);
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                       }
+               }
+       }
 }
 
-static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
-                              struct iov_iter *iter)
+static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
+                            struct io_mapped_ubuf *imu)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        size_t len = req->rw.len;
-       struct io_mapped_ubuf *imu;
-       u16 index, buf_index = req->buf_index;
+       u64 buf_end, buf_addr = req->rw.addr;
        size_t offset;
-       u64 buf_addr;
 
-       if (unlikely(buf_index >= ctx->nr_user_bufs))
-               return -EFAULT;
-       index = array_index_nospec(buf_index, ctx->nr_user_bufs);
-       imu = &ctx->user_bufs[index];
-       buf_addr = req->rw.addr;
-
-       /* overflow */
-       if (buf_addr + len < buf_addr)
+       if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
                return -EFAULT;
        /* not inside the mapped region */
-       if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
+       if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
                return -EFAULT;
 
        /*
@@ -2939,7 +3074,14 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
                }
        }
 
-       return len;
+       return 0;
+}
+
+static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
+{
+       if (WARN_ON_ONCE(!req->imu))
+               return -EFAULT;
+       return __io_import_fixed(req, rw, iter, req->imu);
 }
 
 static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
@@ -3080,16 +3222,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
        return __io_iov_buffer_select(req, iov, needs_lock);
 }
 
-static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
-                                struct iovec **iovec, struct iov_iter *iter,
-                                bool needs_lock)
+static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
+                          struct iov_iter *iter, bool needs_lock)
 {
        void __user *buf = u64_to_user_ptr(req->rw.addr);
        size_t sqe_len = req->rw.len;
+       u8 opcode = req->opcode;
        ssize_t ret;
-       u8 opcode;
 
-       opcode = req->opcode;
        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
                *iovec = NULL;
                return io_import_fixed(req, rw, iter);
@@ -3114,10 +3254,8 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
 
        if (req->flags & REQ_F_BUFFER_SELECT) {
                ret = io_iov_buffer_select(req, *iovec, needs_lock);
-               if (!ret) {
-                       ret = (*iovec)->iov_len;
-                       iov_iter_init(iter, rw, *iovec, 1, ret);
-               }
+               if (!ret)
+                       iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
                *iovec = NULL;
                return ret;
        }
@@ -3126,18 +3264,6 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
                              req->ctx->compat);
 }
 
-static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
-                              struct iovec **iovec, struct iov_iter *iter,
-                              bool needs_lock)
-{
-       struct io_async_rw *iorw = req->async_data;
-
-       if (!iorw)
-               return __io_import_iovec(rw, req, iovec, iter, needs_lock);
-       *iovec = NULL;
-       return 0;
-}
-
 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 {
        return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
@@ -3230,32 +3356,31 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
        }
 }
 
-static inline int __io_alloc_async_data(struct io_kiocb *req)
+static inline int io_alloc_async_data(struct io_kiocb *req)
 {
        WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
        req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
        return req->async_data == NULL;
 }
 
-static int io_alloc_async_data(struct io_kiocb *req)
-{
-       if (!io_op_defs[req->opcode].needs_async_data)
-               return 0;
-
-       return  __io_alloc_async_data(req);
-}
-
 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
                             const struct iovec *fast_iov,
                             struct iov_iter *iter, bool force)
 {
-       if (!force && !io_op_defs[req->opcode].needs_async_data)
+       if (!force && !io_op_defs[req->opcode].needs_async_setup)
                return 0;
        if (!req->async_data) {
-               if (__io_alloc_async_data(req))
+               struct io_async_rw *iorw;
+
+               if (io_alloc_async_data(req)) {
+                       kfree(iovec);
                        return -ENOMEM;
+               }
 
                io_req_map_rw(req, iovec, fast_iov, iter);
+               iorw = req->async_data;
+               /* we've copied and mapped the iter, ensure state is saved */
+               iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        }
        return 0;
 }
@@ -3264,9 +3389,9 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 {
        struct io_async_rw *iorw = req->async_data;
        struct iovec *iov = iorw->fast_iov;
-       ssize_t ret;
+       int ret;
 
-       ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
+       ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
        if (unlikely(ret < 0))
                return ret;
 
@@ -3274,24 +3399,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
+       iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        return 0;
 }
 
 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       ssize_t ret;
-
-       ret = io_prep_rw(req, sqe);
-       if (ret)
-               return ret;
-
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
-
-       /* either don't need iovec imported or already have it */
-       if (!req->async_data)
-               return 0;
-       return io_rw_prep_async(req, READ);
+       return io_prep_rw(req, sqe, READ);
 }
 
 /*
@@ -3310,7 +3426,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
        struct wait_page_queue *wpq;
        struct io_kiocb *req = wait->private;
        struct wait_page_key *key = arg;
-       int ret;
 
        wpq = container_of(wait, struct wait_page_queue, wait);
 
@@ -3319,22 +3434,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 
        req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
        list_del_init(&wait->entry);
-
-       init_task_work(&req->task_work, io_req_task_submit);
-       percpu_ref_get(&req->ctx->refs);
-
-       /* submit ref gets dropped, acquire a new one */
-       refcount_inc(&req->refs);
-       ret = io_req_task_work_add(req, true);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
-
-               /* queue just for cancelation */
-               init_task_work(&req->task_work, io_req_task_cancel);
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
-       }
+       io_req_task_queue(req);
        return 1;
 }
 
@@ -3381,7 +3481,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
        return true;
 }
 
-static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
+static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
 {
        if (req->file->f_op->read_iter)
                return call_read_iter(req->file, &req->rw.kiocb, iter);
@@ -3391,27 +3491,40 @@ static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
                return -EINVAL;
 }
 
-static int io_read(struct io_kiocb *req, bool force_nonblock,
-                  struct io_comp_state *cs)
+static bool need_read_all(struct io_kiocb *req)
+{
+       return req->flags & REQ_F_ISREG ||
+               S_ISBLK(file_inode(req->file)->i_mode);
+}
+
+static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
-       struct iov_iter iter_cp;
        struct io_async_rw *rw = req->async_data;
-       ssize_t io_size, ret, ret2;
-       bool no_async;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
-       if (rw)
+       if (rw) {
                iter = &rw->iter;
-
-       ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
-       if (ret < 0)
-               return ret;
-       iter_cp = *iter;
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
-       ret = 0;
+               state = &rw->iter_state;
+               /*
+                * We come here from an earlier attempt, restore our state to
+                * match in case it doesn't. It's cheap enough that we don't
+                * need to make this conditional.
+                */
+               iov_iter_restore(iter, state);
+               iovec = NULL;
+       } else {
+               ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
+               if (ret < 0)
+                       return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
+       }
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3419,127 +3532,130 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
        else
                kiocb->ki_flags |= IOCB_NOWAIT;
 
-
        /* If the file doesn't support async, just async punt */
-       no_async = force_nonblock && !io_file_supports_async(req->file, READ);
-       if (no_async)
-               goto copy_iov;
+       if (force_nonblock && !io_file_supports_nowait(req, READ)) {
+               ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
+               return ret ?: -EAGAIN;
+       }
 
-       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
-       if (unlikely(ret))
-               goto out_free;
+       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
+       if (unlikely(ret)) {
+               kfree(iovec);
+               return ret;
+       }
 
        ret = io_iter_do_read(req, iter);
 
-       if (!ret) {
-               goto done;
-       } else if (ret == -EIOCBQUEUED) {
-               ret = 0;
-               goto out_free;
-       } else if (ret == -EAGAIN) {
+       if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
+               req->flags &= ~REQ_F_REISSUE;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
                        goto done;
-               /* no retry on NONBLOCK marked file */
-               if (req->file->f_flags & O_NONBLOCK)
+               /* no retry on NONBLOCK nor RWF_NOWAIT */
+               if (req->flags & REQ_F_NOWAIT)
                        goto done;
-               /* some cases will consume bytes even on error returns */
-               *iter = iter_cp;
                ret = 0;
-               goto copy_iov;
-       } else if (ret < 0) {
-               /* make sure -ERESTARTSYS -> -EINTR is done */
+       } else if (ret == -EIOCBQUEUED) {
+               goto out_free;
+       } else if (ret <= 0 || ret == req->result || !force_nonblock ||
+                  (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
+               /* read all, failed, already did sync or don't want to retry */
                goto done;
        }
 
-       /* read it all, or we did blocking attempt. no retry. */
-       if (!iov_iter_count(iter) || !force_nonblock ||
-           (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
-               goto done;
+       /*
+        * Don't depend on the iter state matching what was consumed, or being
+        * untouched in case of error. Restore it and we'll advance it
+        * manually if we need to.
+        */
+       iov_iter_restore(iter, state);
 
-       io_size -= ret;
-copy_iov:
        ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
-       if (ret2) {
-               ret = ret2;
-               goto out_free;
-       }
-       if (no_async)
-               return -EAGAIN;
-       rw = req->async_data;
-       /* it's copied and will be cleaned with ->io */
-       iovec = NULL;
-       /* now use our persistent iterator, if we aren't already */
-       iter = &rw->iter;
-retry:
-       rw->bytes_done += ret;
-       /* if we can retry, do so with the callbacks armed */
-       if (!io_rw_should_retry(req)) {
-               kiocb->ki_flags &= ~IOCB_WAITQ;
-               return -EAGAIN;
-       }
+       if (ret2)
+               return ret2;
 
+       iovec = NULL;
+       rw = req->async_data;
        /*
-        * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
-        * get -EIOCBQUEUED, then we'll get a notification when the desired
-        * page gets unlocked. We can also get a partial read here, and if we
-        * do, then just retry at the new offset.
+        * Now use our persistent iterator and state, if we aren't already.
+        * We've restored and mapped the iter to match.
         */
-       ret = io_iter_do_read(req, iter);
-       if (ret == -EIOCBQUEUED) {
-               ret = 0;
-               goto out_free;
-       } else if (ret > 0 && ret < io_size) {
+       if (iter != &rw->iter) {
+               iter = &rw->iter;
+               state = &rw->iter_state;
+       }
+
+       do {
+               /*
+                * We end up here because of a partial read, either from
+                * above or inside this loop. Advance the iter by the bytes
+                * that were consumed.
+                */
+               iov_iter_advance(iter, ret);
+               if (!iov_iter_count(iter))
+                       break;
+               rw->bytes_done += ret;
+               iov_iter_save_state(iter, state);
+
+               /* if we can retry, do so with the callbacks armed */
+               if (!io_rw_should_retry(req)) {
+                       kiocb->ki_flags &= ~IOCB_WAITQ;
+                       return -EAGAIN;
+               }
+
+               req->result = iov_iter_count(iter);
+               /*
+                * Now retry read with the IOCB_WAITQ parts set in the iocb. If
+                * we get -EIOCBQUEUED, then we'll get a notification when the
+                * desired page gets unlocked. We can also get a partial read
+                * here, and if we do, then just retry at the new offset.
+                */
+               ret = io_iter_do_read(req, iter);
+               if (ret == -EIOCBQUEUED)
+                       return 0;
                /* we got some bytes, but not all. retry. */
                kiocb->ki_flags &= ~IOCB_WAITQ;
-               goto retry;
-       }
+               iov_iter_restore(iter, state);
+       } while (ret > 0);
 done:
-       kiocb_done(kiocb, ret, cs);
-       ret = 0;
+       kiocb_done(kiocb, ret, issue_flags);
 out_free:
-       /* it's reportedly faster than delegating the null check to kfree() */
+       /* it's faster to check here then delegate to kfree */
        if (iovec)
                kfree(iovec);
-       return ret;
+       return 0;
 }
 
 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       ssize_t ret;
-
-       ret = io_prep_rw(req, sqe);
-       if (ret)
-               return ret;
-
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
-
-       /* either don't need iovec imported or already have it */
-       if (!req->async_data)
-               return 0;
-       return io_rw_prep_async(req, WRITE);
+       return io_prep_rw(req, sqe, WRITE);
 }
 
-static int io_write(struct io_kiocb *req, bool force_nonblock,
-                   struct io_comp_state *cs)
+static int io_write(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
-       struct iov_iter iter_cp;
        struct io_async_rw *rw = req->async_data;
-       ssize_t ret, ret2, io_size;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
-       if (rw)
+       if (rw) {
                iter = &rw->iter;
-
-       ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
-       if (ret < 0)
-               return ret;
-       iter_cp = *iter;
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+               state = &rw->iter_state;
+               iov_iter_restore(iter, state);
+               iovec = NULL;
+       } else {
+               ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
+               if (ret < 0)
+                       return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
+       }
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3548,7 +3664,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
                kiocb->ki_flags |= IOCB_NOWAIT;
 
        /* If the file doesn't support async, just async punt */
-       if (force_nonblock && !io_file_supports_async(req->file, WRITE))
+       if (force_nonblock && !io_file_supports_nowait(req, WRITE))
                goto copy_iov;
 
        /* file path doesn't support NOWAIT for non-direct_IO */
@@ -3556,7 +3672,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
            (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
-       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret))
                goto out_free;
 
@@ -3581,28 +3697,36 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
        else
                ret2 = -EINVAL;
 
+       if (req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               ret2 = -EAGAIN;
+       }
+
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
         * retry them without IOCB_NOWAIT.
         */
        if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
                ret2 = -EAGAIN;
-       /* no retry on NONBLOCK marked file */
-       if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
+       /* no retry on NONBLOCK nor RWF_NOWAIT */
+       if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
                goto done;
        if (!force_nonblock || ret2 != -EAGAIN) {
                /* IOPOLL retry should happen for io-wq threads */
                if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
                        goto copy_iov;
 done:
-               kiocb_done(kiocb, ret2, cs);
+               kiocb_done(kiocb, ret2, issue_flags);
        } else {
 copy_iov:
-               /* some cases will consume bytes even on error returns */
-               *iter = iter_cp;
+               iov_iter_restore(iter, state);
                ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
-               if (!ret)
+               if (!ret) {
+                       if (kiocb->ki_flags & IOCB_WRITE)
+                               kiocb_end_write(req);
                        return -EAGAIN;
+               }
+               return ret;
        }
 out_free:
        /* it's reportedly faster than delegating the null check to kfree() */
@@ -3611,37 +3735,160 @@ out_free:
        return ret;
 }
 
-static int __io_splice_prep(struct io_kiocb *req,
+static int io_renameat_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
-       struct io_splice* sp = &req->splice;
-       unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
+       struct io_rename *ren = &req->rename;
+       const char __user *oldf, *newf;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
 
-       sp->file_in = NULL;
-       sp->len = READ_ONCE(sqe->len);
-       sp->flags = READ_ONCE(sqe->splice_flags);
+       ren->old_dfd = READ_ONCE(sqe->fd);
+       oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+       ren->new_dfd = READ_ONCE(sqe->len);
+       ren->flags = READ_ONCE(sqe->rename_flags);
 
-       if (unlikely(sp->flags & ~valid_flags))
-               return -EINVAL;
+       ren->oldpath = getname(oldf);
+       if (IS_ERR(ren->oldpath))
+               return PTR_ERR(ren->oldpath);
 
-       sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
-                                 (sp->flags & SPLICE_F_FD_IN_FIXED));
-       if (!sp->file_in)
+       ren->newpath = getname(newf);
+       if (IS_ERR(ren->newpath)) {
+               putname(ren->oldpath);
+               return PTR_ERR(ren->newpath);
+       }
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct io_rename *ren = &req->rename;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
+                               ren->newpath, ren->flags);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static int io_unlinkat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_unlink *un = &req->unlink;
+       const char __user *fname;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
+           sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
+
+       un->dfd = READ_ONCE(sqe->fd);
+
+       un->flags = READ_ONCE(sqe->unlink_flags);
+       if (un->flags & ~AT_REMOVEDIR)
+               return -EINVAL;
+
+       fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       un->filename = getname(fname);
+       if (IS_ERR(un->filename))
+               return PTR_ERR(un->filename);
+
        req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
 
-       if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
-               /*
-                * Splice operation will be punted aync, and here need to
-                * modify io_wq_work.flags, so initialize io_wq_work firstly.
-                */
-               io_req_init_async(req);
-               req->work.flags |= IO_WQ_WORK_UNBOUND;
-       }
+static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct io_unlink *un = &req->unlink;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       if (un->flags & AT_REMOVEDIR)
+               ret = do_rmdir(un->dfd, un->filename);
+       else
+               ret = do_unlinkat(un->dfd, un->filename);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static int io_shutdown_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_NET)
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
+                    sqe->buf_index || sqe->splice_fd_in))
+               return -EINVAL;
+
+       req->shutdown.how = READ_ONCE(sqe->len);
+       return 0;
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
+static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
+{
+#if defined(CONFIG_NET)
+       struct socket *sock;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       sock = sock_from_file(req->file, &ret);
+       if (unlikely(!sock))
+               return ret;
+
+       ret = __sys_shutdown_sock(sock, req->shutdown.how);
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
+static int __io_splice_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_splice *sp = &req->splice;
+       unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
 
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+
+       sp->len = READ_ONCE(sqe->len);
+       sp->flags = READ_ONCE(sqe->splice_flags);
+       if (unlikely(sp->flags & ~valid_flags))
+               return -EINVAL;
+       sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
        return 0;
 }
 
@@ -3653,60 +3900,75 @@ static int io_tee_prep(struct io_kiocb *req,
        return __io_splice_prep(req, sqe);
 }
 
-static int io_tee(struct io_kiocb *req, bool force_nonblock)
+static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_splice *sp = &req->splice;
-       struct file *in = sp->file_in;
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
+       struct file *in;
        long ret = 0;
 
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
+
+       in = io_file_get(req->ctx, req, sp->splice_fd_in,
+                                 (sp->flags & SPLICE_F_FD_IN_FIXED));
+       if (!in) {
+               ret = -EBADF;
+               goto done;
+       }
+
        if (sp->len)
                ret = do_tee(in, out, sp->len, flags);
 
-       io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-
+       if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+               io_put_file(in);
+done:
        if (ret != sp->len)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
 
 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_splicesp = &req->splice;
+       struct io_splice *sp = &req->splice;
 
        sp->off_in = READ_ONCE(sqe->splice_off_in);
        sp->off_out = READ_ONCE(sqe->off);
        return __io_splice_prep(req, sqe);
 }
 
-static int io_splice(struct io_kiocb *req, bool force_nonblock)
+static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_splice *sp = &req->splice;
-       struct file *in = sp->file_in;
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
        loff_t *poff_in, *poff_out;
+       struct file *in;
        long ret = 0;
 
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
+       in = io_file_get(req->ctx, req, sp->splice_fd_in,
+                                 (sp->flags & SPLICE_F_FD_IN_FIXED));
+       if (!in) {
+               ret = -EBADF;
+               goto done;
+       }
+
        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
 
        if (sp->len)
                ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
 
-       io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-
+       if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+               io_put_file(in);
+done:
        if (ret != sp->len)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3714,24 +3976,21 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
 /*
  * IORING_OP_NOP just posts a completion event, nothing else.
  */
-static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
+static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
-       __io_req_complete(req, 0, 0, cs);
+       __io_req_complete(req, issue_flags, 0, 0);
        return 0;
 }
 
-static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!req->file)
-               return -EBADF;
-
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
@@ -3747,20 +4006,20 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
-static int io_fsync(struct io_kiocb *req, bool force_nonblock)
+static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
 {
        loff_t end = req->sync.off + req->sync.len;
        int ret;
 
        /* fsync always requires a blocking context */
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
        ret = vfs_fsync_range(req->file, req->sync.off,
                                end > 0 ? end : LLONG_MAX,
                                req->sync.flags & IORING_FSYNC_DATASYNC);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3780,17 +4039,19 @@ static int io_fallocate_prep(struct io_kiocb *req,
        return 0;
 }
 
-static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
+static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
 {
        int ret;
 
        /* fallocate always requiring blocking context */
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
        ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
                                req->sync.len);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
+       else
+               fsnotify_modify(req->file);
        io_req_complete(req, ret);
        return 0;
 }
@@ -3800,7 +4061,9 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
        const char __user *fname;
        int ret;
 
-       if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (unlikely(sqe->ioprio || sqe->buf_index))
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
@@ -3817,20 +4080,21 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                req->open.filename = NULL;
                return ret;
        }
+
+       req->open.file_slot = READ_ONCE(sqe->file_index);
+       if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
+               return -EINVAL;
+
        req->open.nofile = rlimit(RLIMIT_NOFILE);
-       req->open.ignore_nonblock = false;
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
 
 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       u64 flags, mode;
+       u64 mode = READ_ONCE(sqe->len);
+       u64 flags = READ_ONCE(sqe->open_flags);
 
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
-               return -EINVAL;
-       mode = READ_ONCE(sqe->len);
-       flags = READ_ONCE(sqe->open_flags);
        req->open.how = build_open_how(flags, mode);
        return __io_openat_prep(req, sqe);
 }
@@ -3841,8 +4105,6 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        size_t len;
        int ret;
 
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
-               return -EINVAL;
        how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        len = READ_ONCE(sqe->len);
        if (len < OPEN_HOW_SIZE_VER0)
@@ -3856,58 +4118,75 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return __io_openat_prep(req, sqe);
 }
 
-static int io_openat2(struct io_kiocb *req, bool force_nonblock)
+static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct open_flags op;
        struct file *file;
+       bool resolve_nonblock, nonblock_set;
+       bool fixed = !!req->open.file_slot;
        int ret;
 
-       if (force_nonblock && !req->open.ignore_nonblock)
-               return -EAGAIN;
-
        ret = build_open_flags(&req->open.how, &op);
        if (ret)
                goto err;
+       nonblock_set = op.open_flag & O_NONBLOCK;
+       resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
+       if (issue_flags & IO_URING_F_NONBLOCK) {
+               /*
+                * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
+                * it'll always -EAGAIN
+                */
+               if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
+                       return -EAGAIN;
+               op.lookup_flags |= LOOKUP_CACHED;
+               op.open_flag |= O_NONBLOCK;
+       }
 
-       ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
-       if (ret < 0)
-               goto err;
+       if (!fixed) {
+               ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
+               if (ret < 0)
+                       goto err;
+       }
 
        file = do_filp_open(req->open.dfd, req->open.filename, &op);
        if (IS_ERR(file)) {
-               put_unused_fd(ret);
-               ret = PTR_ERR(file);
                /*
-                * A work-around to ensure that /proc/self works that way
-                * that it should - if we get -EOPNOTSUPP back, then assume
-                * that proc_self_get_link() failed us because we're in async
-                * context. We should be safe to retry this from the task
-                * itself with force_nonblock == false set, as it should not
-                * block on lookup. Would be nice to know this upfront and
-                * avoid the async dance, but doesn't seem feasible.
+                * We could hang on to this 'fd' on retrying, but seems like
+                * marginal gain for something that is now known to be a slower
+                * path. So just put it, and we'll get a new one when we retry.
                 */
-               if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
-                       req->open.ignore_nonblock = true;
-                       refcount_inc(&req->refs);
-                       io_req_task_queue(req);
-                       return 0;
-               }
-       } else {
-               fsnotify_open(file);
-               fd_install(ret, file);
+               if (!fixed)
+                       put_unused_fd(ret);
+
+               ret = PTR_ERR(file);
+               /* only retry if RESOLVE_CACHED wasn't already set by application */
+               if (ret == -EAGAIN &&
+                   (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
+                       return -EAGAIN;
+               goto err;
        }
+
+       if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
+               file->f_flags &= ~O_NONBLOCK;
+       fsnotify_open(file);
+
+       if (!fixed)
+               fd_install(ret, file);
+       else
+               ret = io_install_fixed_file(req, file, issue_flags,
+                                           req->open.file_slot - 1);
 err:
        putname(req->open.filename);
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < 0)
-               req_set_fail_links(req);
-       io_req_complete(req, ret);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
-static int io_openat(struct io_kiocb *req, bool force_nonblock)
+static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
 {
-       return io_openat2(req, force_nonblock);
+       return io_openat2(req, issue_flags);
 }
 
 static int io_remove_buffers_prep(struct io_kiocb *req,
@@ -3948,6 +4227,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
                kfree(nxt);
                if (++i == nbufs)
                        return i;
+               cond_resched();
        }
        i++;
        kfree(buf);
@@ -3956,13 +4236,13 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
        return i;
 }
 
-static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
-                            struct io_comp_state *cs)
+static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_provide_buf *p = &req->pbuf;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer *head;
        int ret = 0;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
        io_ring_submit_lock(ctx, !force_nonblock);
 
@@ -3973,16 +4253,11 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
        if (head)
                ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
 
-       /* need to hold the lock to complete IOPOLL requests */
-       if (ctx->flags & IORING_SETUP_IOPOLL) {
-               __io_req_complete(req, ret, 0, cs);
-               io_ring_submit_unlock(ctx, !force_nonblock);
-       } else {
-               io_ring_submit_unlock(ctx, !force_nonblock);
-               __io_req_complete(req, ret, 0, cs);
-       }
+       /* complete before unlock, IOPOLL may need the lock */
+       __io_req_complete(req, issue_flags, ret, 0);
+       io_ring_submit_unlock(ctx, !force_nonblock);
        return 0;
 }
 
@@ -4049,13 +4324,13 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
        return i ? i : -ENOMEM;
 }
 
-static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
-                             struct io_comp_state *cs)
+static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_provide_buf *p = &req->pbuf;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer *head, *list;
        int ret = 0;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
        io_ring_submit_lock(ctx, !force_nonblock);
 
@@ -4065,21 +4340,16 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
 
        ret = io_add_buffers(p, &head);
        if (ret >= 0 && !list) {
-               ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
+               ret = xa_insert(&ctx->io_buffers, p->bgid, head,
+                               GFP_KERNEL_ACCOUNT);
                if (ret < 0)
                        __io_remove_buffers(ctx, head, p->bgid, -1U);
        }
        if (ret < 0)
-               req_set_fail_links(req);
-
-       /* need to hold the lock to complete IOPOLL requests */
-       if (ctx->flags & IORING_SETUP_IOPOLL) {
-               __io_req_complete(req, ret, 0, cs);
-               io_ring_submit_unlock(ctx, !force_nonblock);
-       } else {
-               io_ring_submit_unlock(ctx, !force_nonblock);
-               __io_req_complete(req, ret, 0, cs);
-       }
+               req_set_fail(req);
+       /* complete before unlock, IOPOLL may need the lock */
+       __io_req_complete(req, issue_flags, ret, 0);
+       io_ring_submit_unlock(ctx, !force_nonblock);
        return 0;
 }
 
@@ -4089,7 +4359,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
 #if defined(CONFIG_EPOLL)
        if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
        req->epoll.epfd = READ_ONCE(sqe->fd);
@@ -4110,20 +4380,20 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
 #endif
 }
 
-static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
-                       struct io_comp_state *cs)
+static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
 {
 #if defined(CONFIG_EPOLL)
        struct io_epoll *ie = &req->epoll;
        int ret;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
        ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
        if (force_nonblock && ret == -EAGAIN)
                return -EAGAIN;
 
        if (ret < 0)
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, 0, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 #else
        return -EOPNOTSUPP;
@@ -4147,18 +4417,18 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 #endif
 }
 
-static int io_madvise(struct io_kiocb *req, bool force_nonblock)
+static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
 {
 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
        struct io_madvise *ma = &req->madvise;
        int ret;
 
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
        ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 #else
@@ -4179,12 +4449,12 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
-static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
+static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_fadvise *fa = &req->fadvise;
        int ret;
 
-       if (force_nonblock) {
+       if (issue_flags & IO_URING_F_NONBLOCK) {
                switch (fa->advice) {
                case POSIX_FADV_NORMAL:
                case POSIX_FADV_RANDOM:
@@ -4197,14 +4467,14 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
 
        ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
        if (ret < 0)
-               req_set_fail_links(req);
-       io_req_complete(req, ret);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
@@ -4220,89 +4490,96 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
-static int io_statx(struct io_kiocb *req, bool force_nonblock)
+static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_statx *ctx = &req->statx;
        int ret;
 
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
        ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
                       ctx->buffer);
 
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
 
 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       /*
-        * If we queue this for async, it must not be cancellable. That would
-        * leave the 'file' in an undeterminate state, and here need to modify
-        * io_wq_work.flags, so initialize io_wq_work firstly.
-        */
-       io_req_init_async(req);
-
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
-           sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+           sqe->rw_flags || sqe->buf_index)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
        req->close.fd = READ_ONCE(sqe->fd);
-       if ((req->file && req->file->f_op == &io_uring_fops))
-               return -EBADF;
+       req->close.file_slot = READ_ONCE(sqe->file_index);
+       if (req->close.file_slot && req->close.fd)
+               return -EINVAL;
 
-       req->close.put_file = NULL;
        return 0;
 }
 
-static int io_close(struct io_kiocb *req, bool force_nonblock,
-                   struct io_comp_state *cs)
+static int io_close(struct io_kiocb *req, unsigned int issue_flags)
 {
+       struct files_struct *files = current->files;
        struct io_close *close = &req->close;
-       int ret;
+       struct fdtable *fdt;
+       struct file *file = NULL;
+       int ret = -EBADF;
 
-       /* might be already done during nonblock submission */
-       if (!close->put_file) {
-               ret = close_fd_get_file(close->fd, &close->put_file);
-               if (ret < 0)
-                       return (ret == -ENOENT) ? -EBADF : ret;
+       if (req->close.file_slot) {
+               ret = io_close_fixed(req, issue_flags);
+               goto err;
+       }
+
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       if (close->fd >= fdt->max_fds) {
+               spin_unlock(&files->file_lock);
+               goto err;
+       }
+       file = fdt->fd[close->fd];
+       if (!file || file->f_op == &io_uring_fops) {
+               spin_unlock(&files->file_lock);
+               file = NULL;
+               goto err;
        }
 
        /* if the file has a flush method, be safe and punt to async */
-       if (close->put_file->f_op->flush && force_nonblock) {
-               /* not safe to cancel at this point */
-               req->work.flags |= IO_WQ_WORK_NO_CANCEL;
-               /* was never set, but play safe */
-               req->flags &= ~REQ_F_NOWAIT;
-               /* avoid grabbing files - we don't need the files */
-               req->flags |= REQ_F_NO_FILE_TABLE;
+       if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
+               spin_unlock(&files->file_lock);
                return -EAGAIN;
        }
 
+       ret = __close_fd_get_file(close->fd, &file);
+       spin_unlock(&files->file_lock);
+       if (ret < 0) {
+               if (ret == -ENOENT)
+                       ret = -EBADF;
+               goto err;
+       }
+
        /* No ->flush() or already async, safely close from here */
-       ret = filp_close(close->put_file, req->work.identity->files);
+       ret = filp_close(file, current->files);
+err:
        if (ret < 0)
-               req_set_fail_links(req);
-       fput(close->put_file);
-       close->put_file = NULL;
-       __io_req_complete(req, ret, 0, cs);
+               req_set_fail(req);
+       if (file)
+               fput(file);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
-static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!req->file)
-               return -EBADF;
-
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
@@ -4315,18 +4592,18 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
-static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
+static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
 {
        int ret;
 
        /* sync_file_range always requires a blocking context */
-       if (force_nonblock)
+       if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
        ret = sync_file_range(req->file, req->sync.off, req->sync.len,
                                req->sync.flags);
        if (ret < 0)
-               req_set_fail_links(req);
+               req_set_fail(req);
        io_req_complete(req, ret);
        return 0;
 }
@@ -4340,55 +4617,65 @@ static int io_setup_async_msg(struct io_kiocb *req,
        if (async_msg)
                return -EAGAIN;
        if (io_alloc_async_data(req)) {
-               if (kmsg->iov != kmsg->fast_iov)
-                       kfree(kmsg->iov);
+               kfree(kmsg->free_iov);
                return -ENOMEM;
        }
        async_msg = req->async_data;
        req->flags |= REQ_F_NEED_CLEANUP;
        memcpy(async_msg, kmsg, sizeof(*kmsg));
+       if (async_msg->msg.msg_name)
+               async_msg->msg.msg_name = &async_msg->addr;
+       /* if were using fast_iov, set it to the new one */
+       if (!async_msg->free_iov)
+               async_msg->msg.msg_iter.iov = async_msg->fast_iov;
+
        return -EAGAIN;
 }
 
 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
                               struct io_async_msghdr *iomsg)
 {
-       iomsg->iov = iomsg->fast_iov;
        iomsg->msg.msg_name = &iomsg->addr;
+       iomsg->free_iov = iomsg->fast_iov;
        return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
-                                  req->sr_msg.msg_flags, &iomsg->iov);
+                                  req->sr_msg.msg_flags, &iomsg->free_iov);
 }
 
-static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_sendmsg_prep_async(struct io_kiocb *req)
 {
-       struct io_async_msghdr *async_msg = req->async_data;
-       struct io_sr_msg *sr = &req->sr_msg;
        int ret;
 
+       ret = io_sendmsg_copy_hdr(req, req->async_data);
+       if (!ret)
+               req->flags |= REQ_F_NEED_CLEANUP;
+       return ret;
+}
+
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+       struct io_sr_msg *sr = &req->sr_msg;
+
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(sqe->addr2 || sqe->splice_fd_in || sqe->ioprio))
+       if (unlikely(sqe->addr2 || sqe->file_index))
+               return -EINVAL;
+       if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
                return -EINVAL;
 
-       sr->msg_flags = READ_ONCE(sqe->msg_flags);
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
        sr->len = READ_ONCE(sqe->len);
+       sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
+       if (sr->msg_flags & MSG_DONTWAIT)
+               req->flags |= REQ_F_NOWAIT;
 
 #ifdef CONFIG_COMPAT
        if (req->ctx->compat)
                sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
-
-       if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
-               return 0;
-       ret = io_sendmsg_copy_hdr(req, async_msg);
-       if (!ret)
-               req->flags |= REQ_F_NEED_CLEANUP;
-       return ret;
+       return 0;
 }
 
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
-                     struct io_comp_state *cs)
+static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_async_msghdr iomsg, *kmsg;
        struct socket *sock;
@@ -4400,46 +4687,37 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
        if (unlikely(!sock))
                return ret;
 
-       if (req->async_data) {
-               kmsg = req->async_data;
-               kmsg->msg.msg_name = &kmsg->addr;
-               /* if iov is set, it's allocated already */
-               if (!kmsg->iov)
-                       kmsg->iov = kmsg->fast_iov;
-               kmsg->msg.msg_iter.iov = kmsg->iov;
-       } else {
+       kmsg = req->async_data;
+       if (!kmsg) {
                ret = io_sendmsg_copy_hdr(req, &iomsg);
                if (ret)
                        return ret;
                kmsg = &iomsg;
        }
 
-       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
-       if (flags & MSG_DONTWAIT)
-               req->flags |= REQ_F_NOWAIT;
-       else if (force_nonblock)
+       flags = req->sr_msg.msg_flags;
+       if (issue_flags & IO_URING_F_NONBLOCK)
                flags |= MSG_DONTWAIT;
-
        if (flags & MSG_WAITALL)
                min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 
        ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
-       if (force_nonblock && ret == -EAGAIN)
+       if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
                return io_setup_async_msg(req, kmsg);
        if (ret == -ERESTARTSYS)
                ret = -EINTR;
 
-       if (kmsg->iov != kmsg->fast_iov)
-               kfree(kmsg->iov);
+       /* fast path, check for non-NULL to avoid function call */
+       if (kmsg->free_iov)
+               kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < min_ret)
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, 0, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
-static int io_send(struct io_kiocb *req, bool force_nonblock,
-                  struct io_comp_state *cs)
+static int io_send(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_sr_msg *sr = &req->sr_msg;
        struct msghdr msg;
@@ -4462,25 +4740,22 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
 
-       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
-       if (flags & MSG_DONTWAIT)
-               req->flags |= REQ_F_NOWAIT;
-       else if (force_nonblock)
+       flags = req->sr_msg.msg_flags;
+       if (issue_flags & IO_URING_F_NONBLOCK)
                flags |= MSG_DONTWAIT;
-
        if (flags & MSG_WAITALL)
                min_ret = iov_iter_count(&msg.msg_iter);
 
        msg.msg_flags = flags;
        ret = sock_sendmsg(sock, &msg);
-       if (force_nonblock && ret == -EAGAIN)
+       if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
                return -EAGAIN;
        if (ret == -ERESTARTSYS)
                ret = -EINTR;
 
        if (ret < min_ret)
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, 0, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
@@ -4500,15 +4775,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
        if (req->flags & REQ_F_BUFFER_SELECT) {
                if (iov_len > 1)
                        return -EINVAL;
-               if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
+               if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
                        return -EFAULT;
-               sr->len = iomsg->iov[0].iov_len;
-               iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
-                               sr->len);
-               iomsg->iov = NULL;
+               sr->len = iomsg->fast_iov[0].iov_len;
+               iomsg->free_iov = NULL;
        } else {
+               iomsg->free_iov = iomsg->fast_iov;
                ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
-                                    &iomsg->iov, &iomsg->msg.msg_iter,
+                                    &iomsg->free_iov, &iomsg->msg.msg_iter,
                                     false);
                if (ret > 0)
                        ret = 0;
@@ -4521,16 +4795,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
                                        struct io_async_msghdr *iomsg)
 {
-       struct compat_msghdr __user *msg_compat;
        struct io_sr_msg *sr = &req->sr_msg;
        struct compat_iovec __user *uiov;
        compat_uptr_t ptr;
        compat_size_t len;
        int ret;
 
-       msg_compat = (struct compat_msghdr __user *) sr->umsg;
-       ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
-                                       &ptr, &len);
+       ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
+                                 &ptr, &len);
        if (ret)
                return ret;
 
@@ -4547,11 +4819,11 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
                if (clen < 0)
                        return -EINVAL;
                sr->len = clen;
-               iomsg->iov[0].iov_len = clen;
-               iomsg->iov = NULL;
+               iomsg->free_iov = NULL;
        } else {
+               iomsg->free_iov = iomsg->fast_iov;
                ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
-                                  UIO_FASTIOV, &iomsg->iov,
+                                  UIO_FASTIOV, &iomsg->free_iov,
                                   &iomsg->msg.msg_iter, true);
                if (ret < 0)
                        return ret;
@@ -4565,7 +4837,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
                               struct io_async_msghdr *iomsg)
 {
        iomsg->msg.msg_name = &iomsg->addr;
-       iomsg->iov = iomsg->fast_iov;
 
 #ifdef CONFIG_COMPAT
        if (req->ctx->compat)
@@ -4595,38 +4866,42 @@ static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
        return io_put_kbuf(req, req->sr_msg.kbuf);
 }
 
-static int io_recvmsg_prep(struct io_kiocb *req,
-                          const struct io_uring_sqe *sqe)
+static int io_recvmsg_prep_async(struct io_kiocb *req)
 {
-       struct io_async_msghdr *async_msg = req->async_data;
-       struct io_sr_msg *sr = &req->sr_msg;
        int ret;
 
+       ret = io_recvmsg_copy_hdr(req, req->async_data);
+       if (!ret)
+               req->flags |= REQ_F_NEED_CLEANUP;
+       return ret;
+}
+
+static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+       struct io_sr_msg *sr = &req->sr_msg;
+
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(sqe->addr2 || sqe->splice_fd_in || sqe->ioprio))
+       if (unlikely(sqe->addr2 || sqe->file_index))
+               return -EINVAL;
+       if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
                return -EINVAL;
 
-       sr->msg_flags = READ_ONCE(sqe->msg_flags);
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
        sr->len = READ_ONCE(sqe->len);
        sr->bgid = READ_ONCE(sqe->buf_group);
+       sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
+       if (sr->msg_flags & MSG_DONTWAIT)
+               req->flags |= REQ_F_NOWAIT;
 
 #ifdef CONFIG_COMPAT
        if (req->ctx->compat)
                sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
-
-       if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
-               return 0;
-       ret = io_recvmsg_copy_hdr(req, async_msg);
-       if (!ret)
-               req->flags |= REQ_F_NEED_CLEANUP;
-       return ret;
+       return 0;
 }
 
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
-                     struct io_comp_state *cs)
+static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_async_msghdr iomsg, *kmsg;
        struct socket *sock;
@@ -4634,19 +4909,14 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
        unsigned flags;
        int min_ret = 0;
        int ret, cflags = 0;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
        sock = sock_from_file(req->file, &ret);
        if (unlikely(!sock))
                return ret;
 
-       if (req->async_data) {
-               kmsg = req->async_data;
-               kmsg->msg.msg_name = &kmsg->addr;
-               /* if iov is set, it's allocated already */
-               if (!kmsg->iov)
-                       kmsg->iov = kmsg->fast_iov;
-               kmsg->msg.msg_iter.iov = kmsg->iov;
-       } else {
+       kmsg = req->async_data;
+       if (!kmsg) {
                ret = io_recvmsg_copy_hdr(req, &iomsg);
                if (ret)
                        return ret;
@@ -4658,16 +4928,14 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
                if (IS_ERR(kbuf))
                        return PTR_ERR(kbuf);
                kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
-               iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
+               kmsg->fast_iov[0].iov_len = req->sr_msg.len;
+               iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
                                1, req->sr_msg.len);
        }
 
-       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
-       if (flags & MSG_DONTWAIT)
-               req->flags |= REQ_F_NOWAIT;
-       else if (force_nonblock)
+       flags = req->sr_msg.msg_flags;
+       if (force_nonblock)
                flags |= MSG_DONTWAIT;
-
        if (flags & MSG_WAITALL)
                min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 
@@ -4680,17 +4948,17 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
 
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_recv_kbuf(req);
-       if (kmsg->iov != kmsg->fast_iov)
-               kfree(kmsg->iov);
+       /* fast path, check for non-NULL to avoid function call */
+       if (kmsg->free_iov)
+               kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
        if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, cflags, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, cflags);
        return 0;
 }
 
-static int io_recv(struct io_kiocb *req, bool force_nonblock,
-                  struct io_comp_state *cs)
+static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_buffer *kbuf;
        struct io_sr_msg *sr = &req->sr_msg;
@@ -4701,6 +4969,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
        unsigned flags;
        int min_ret = 0;
        int ret, cflags = 0;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
        sock = sock_from_file(req->file, &ret);
        if (unlikely(!sock))
@@ -4724,12 +4993,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
        msg.msg_iocb = NULL;
        msg.msg_flags = 0;
 
-       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
-       if (flags & MSG_DONTWAIT)
-               req->flags |= REQ_F_NOWAIT;
-       else if (force_nonblock)
+       flags = req->sr_msg.msg_flags;
+       if (force_nonblock)
                flags |= MSG_DONTWAIT;
-
        if (flags & MSG_WAITALL)
                min_ret = iov_iter_count(&msg.msg_iter);
 
@@ -4742,8 +5008,8 @@ out_free:
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_recv_kbuf(req);
        if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, cflags, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, cflags);
        return 0;
 }
 
@@ -4751,48 +5017,79 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_accept *accept = &req->accept;
 
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
+       if (sqe->ioprio || sqe->len || sqe->buf_index)
                return -EINVAL;
 
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
        accept->nofile = rlimit(RLIMIT_NOFILE);
+
+       accept->file_slot = READ_ONCE(sqe->file_index);
+       if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
+               return -EINVAL;
+       if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+               return -EINVAL;
+       if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
+               accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
        return 0;
 }
 
-static int io_accept(struct io_kiocb *req, bool force_nonblock,
-                    struct io_comp_state *cs)
+static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_accept *accept = &req->accept;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
        unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
-       int ret;
+       bool fixed = !!accept->file_slot;
+       struct file *file;
+       int ret, fd;
 
        if (req->file->f_flags & O_NONBLOCK)
                req->flags |= REQ_F_NOWAIT;
 
-       ret = __sys_accept4_file(req->file, file_flags, accept->addr,
-                                       accept->addr_len, accept->flags,
-                                       accept->nofile);
-       if (ret == -EAGAIN && force_nonblock)
-               return -EAGAIN;
-       if (ret < 0) {
+       if (!fixed) {
+               fd = __get_unused_fd_flags(accept->flags, accept->nofile);
+               if (unlikely(fd < 0))
+                       return fd;
+       }
+       file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
+                        accept->flags);
+
+       if (IS_ERR(file)) {
+               if (!fixed)
+                       put_unused_fd(fd);
+               ret = PTR_ERR(file);
+               if (ret == -EAGAIN && force_nonblock)
+                       return -EAGAIN;
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
-               req_set_fail_links(req);
+               req_set_fail(req);
+       } else if (!fixed) {
+               fd_install(fd, file);
+               ret = fd;
+       } else {
+               ret = io_install_fixed_file(req, file, issue_flags,
+                                           accept->file_slot - 1);
        }
-       __io_req_complete(req, ret, 0, cs);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
+static int io_connect_prep_async(struct io_kiocb *req)
+{
+       struct io_async_connect *io = req->async_data;
+       struct io_connect *conn = &req->connect;
+
+       return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
+}
+
 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_connect *conn = &req->connect;
-       struct io_async_connect *io = req->async_data;
 
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
            sqe->splice_fd_in)
@@ -4800,20 +5097,15 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        conn->addr_len =  READ_ONCE(sqe->addr2);
-
-       if (!io)
-               return 0;
-
-       return move_addr_to_kernel(conn->addr, conn->addr_len,
-                                       &io->address);
+       return 0;
 }
 
-static int io_connect(struct io_kiocb *req, bool force_nonblock,
-                     struct io_comp_state *cs)
+static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_async_connect __io, *io;
        unsigned file_flags;
        int ret;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
        if (req->async_data) {
                io = req->async_data;
@@ -4837,7 +5129,6 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock,
                        ret = -ENOMEM;
                        goto out;
                }
-               io = req->async_data;
                memcpy(req->async_data, &__io, sizeof(__io));
                return -EAGAIN;
        }
@@ -4845,248 +5136,352 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock,
                ret = -EINTR;
 out:
        if (ret < 0)
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, 0, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 #else /* !CONFIG_NET */
-static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+#define IO_NETOP_FN(op)                                                        \
+static int io_##op(struct io_kiocb *req, unsigned int issue_flags)     \
+{                                                                      \
+       return -EOPNOTSUPP;                                             \
+}
+
+#define IO_NETOP_PREP(op)                                              \
+IO_NETOP_FN(op)                                                                \
+static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
+{                                                                      \
+       return -EOPNOTSUPP;                                             \
+}                                                                      \
+
+#define IO_NETOP_PREP_ASYNC(op)                                                \
+IO_NETOP_PREP(op)                                                      \
+static int io_##op##_prep_async(struct io_kiocb *req)                  \
+{                                                                      \
+       return -EOPNOTSUPP;                                             \
+}
+
+IO_NETOP_PREP_ASYNC(sendmsg);
+IO_NETOP_PREP_ASYNC(recvmsg);
+IO_NETOP_PREP_ASYNC(connect);
+IO_NETOP_PREP(accept);
+IO_NETOP_FN(send);
+IO_NETOP_FN(recv);
+#endif /* CONFIG_NET */
+
+struct io_poll_table {
+       struct poll_table_struct pt;
+       struct io_kiocb *req;
+       int nr_entries;
+       int error;
+};
+
+#define IO_POLL_CANCEL_FLAG    BIT(31)
+#define IO_POLL_RETRY_FLAG     BIT(30)
+#define IO_POLL_REF_MASK       GENMASK(29, 0)
+
+/*
+ * We usually have 1-2 refs taken, 128 is more than enough and we want to
+ * maximise the margin between this amount and the moment when it overflows.
+ */
+#define IO_POLL_REF_BIAS       128
+
+static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       int v;
+
+       /*
+        * poll_refs are already elevated and we don't have much hope for
+        * grabbing the ownership. Instead of incrementing set a retry flag
+        * to notify the loop that there might have been some change.
+        */
+       v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
+       if (v & IO_POLL_REF_MASK)
+               return false;
+       return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 }
 
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
-                     struct io_comp_state *cs)
+/*
+ * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
+ * bump it and acquire ownership. It's disallowed to modify requests while not
+ * owning it, that prevents from races for enqueueing task_work's and b/w
+ * arming poll and wakeups.
+ */
+static inline bool io_poll_get_ownership(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
+               return io_poll_get_ownership_slowpath(req);
+       return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 }
 
-static int io_send(struct io_kiocb *req, bool force_nonblock,
-                  struct io_comp_state *cs)
+static void io_poll_mark_cancelled(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
 }
 
-static int io_recvmsg_prep(struct io_kiocb *req,
-                          const struct io_uring_sqe *sqe)
+static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
+       if (req->opcode == IORING_OP_POLL_ADD)
+               return req->async_data;
+       return req->apoll->double_poll;
 }
 
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
-                     struct io_comp_state *cs)
+static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       if (req->opcode == IORING_OP_POLL_ADD)
+               return &req->poll;
+       return &req->apoll->poll;
 }
 
-static int io_recv(struct io_kiocb *req, bool force_nonblock,
-                  struct io_comp_state *cs)
+static void io_poll_req_insert(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct hlist_head *list;
+
+       list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
+       hlist_add_head(&req->hash_node, list);
 }
 
-static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
+                             wait_queue_func_t wake_func)
 {
-       return -EOPNOTSUPP;
+       poll->head = NULL;
+#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
+       /* mask in events that we always want/need */
+       poll->events = events | IO_POLL_UNMASK;
+       INIT_LIST_HEAD(&poll->wait.entry);
+       init_waitqueue_func_entry(&poll->wait, wake_func);
 }
 
-static int io_accept(struct io_kiocb *req, bool force_nonblock,
-                    struct io_comp_state *cs)
+static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
 {
-       return -EOPNOTSUPP;
+       struct wait_queue_head *head = smp_load_acquire(&poll->head);
+
+       if (head) {
+               spin_lock_irq(&head->lock);
+               list_del_init(&poll->wait.entry);
+               poll->head = NULL;
+               spin_unlock_irq(&head->lock);
+       }
 }
 
-static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void io_poll_remove_entries(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
+       struct io_poll_iocb *poll = io_poll_get_single(req);
+       struct io_poll_iocb *poll_double = io_poll_get_double(req);
+
+       /*
+        * While we hold the waitqueue lock and the waitqueue is nonempty,
+        * wake_up_pollfree() will wait for us.  However, taking the waitqueue
+        * lock in the first place can race with the waitqueue being freed.
+        *
+        * We solve this as eventpoll does: by taking advantage of the fact that
+        * all users of wake_up_pollfree() will RCU-delay the actual free.  If
+        * we enter rcu_read_lock() and see that the pointer to the queue is
+        * non-NULL, we can then lock it without the memory being freed out from
+        * under us.
+        *
+        * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+        * case the caller deletes the entry from the queue, leaving it empty.
+        * In that case, only RCU prevents the queue memory from being freed.
+        */
+       rcu_read_lock();
+       io_poll_remove_entry(poll);
+       if (poll_double)
+               io_poll_remove_entry(poll_double);
+       rcu_read_unlock();
 }
 
-static int io_connect(struct io_kiocb *req, bool force_nonblock,
-                     struct io_comp_state *cs)
+/*
+ * All poll tw should go through this. Checks for poll events, manages
+ * references, does rewait, etc.
+ *
+ * Returns a negative error on failure. >0 when no action require, which is
+ * either spurious wakeup or multishot CQE is served. 0 when it's done with
+ * the request, then the mask is stored in req->result.
+ */
+static int io_poll_check_events(struct io_kiocb *req)
 {
-       return -EOPNOTSUPP;
-}
-#endif /* CONFIG_NET */
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_poll_iocb *poll = io_poll_get_single(req);
+       int v;
 
-struct io_poll_table {
-       struct poll_table_struct pt;
-       struct io_kiocb *req;
-       int nr_entries;
-       int error;
-};
+       /* req->task == current here, checking PF_EXITING is safe */
+       if (unlikely(req->task->flags & PF_EXITING))
+               io_poll_mark_cancelled(req);
 
-static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
-                          __poll_t mask, task_work_func_t func)
-{
-       bool twa_signal_ok;
-       int ret;
+       do {
+               v = atomic_read(&req->poll_refs);
 
-       /* for instances that support it check for an event match first: */
-       if (mask && !(mask & poll->events))
-               return 0;
+               /* tw handler should be the owner, and so have some references */
+               if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
+                       return 0;
+               if (v & IO_POLL_CANCEL_FLAG)
+                       return -ECANCELED;
+               /*
+                * cqe.res contains only events of the first wake up
+                * and all others are be lost. Redo vfs_poll() to get
+                * up to date state.
+                */
+               if ((v & IO_POLL_REF_MASK) != 1)
+                       req->result = 0;
+               if (v & IO_POLL_RETRY_FLAG) {
+                       req->result = 0;
+                       /*
+                        * We won't find new events that came in between
+                        * vfs_poll and the ref put unless we clear the
+                        * flag in advance.
+                        */
+                       atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
+                       v &= ~IO_POLL_RETRY_FLAG;
+               }
 
-       trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
+               if (!req->result) {
+                       struct poll_table_struct pt = { ._key = poll->events };
 
-       list_del_init(&poll->wait.entry);
+                       req->result = vfs_poll(req->file, &pt) & poll->events;
+               }
 
-       req->result = mask;
-       init_task_work(&req->task_work, func);
-       percpu_ref_get(&req->ctx->refs);
+               /* multishot, just fill an CQE and proceed */
+               if (req->result && !(poll->events & EPOLLONESHOT)) {
+                       __poll_t mask = mangle_poll(req->result & poll->events);
+                       bool filled;
 
-       /*
-        * If we using the signalfd wait_queue_head for this wakeup, then
-        * it's not safe to use TWA_SIGNAL as we could be recursing on the
-        * tsk->sighand->siglock on doing the wakeup. Should not be needed
-        * either, as the normal wakeup will suffice.
-        */
-       twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
+                       spin_lock(&ctx->completion_lock);
+                       filled = io_fill_cqe_aux(ctx, req->user_data, mask,
+                                                IORING_CQE_F_MORE);
+                       io_commit_cqring(ctx);
+                       spin_unlock(&ctx->completion_lock);
+                       if (unlikely(!filled))
+                               return -ECANCELED;
+                       io_cqring_ev_posted(ctx);
+               } else if (req->result) {
+                       return 0;
+               }
 
-       /*
-        * If this fails, then the task is exiting. When a task exits, the
-        * work gets canceled, so just cancel this request as well instead
-        * of executing it. We can't safely execute it anyway, as we may not
-        * have the needed state needed for it anyway.
-        */
-       ret = io_req_task_work_add(req, twa_signal_ok);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
+               /* force the next iteration to vfs_poll() */
+               req->result = 0;
+
+               /*
+                * Release all references, retry if someone tried to restart
+                * task_work while we were executing it.
+                */
+       } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
+                                       IO_POLL_REF_MASK);
 
-               WRITE_ONCE(poll->canceled, true);
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
-       }
        return 1;
 }
 
-static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
-       __acquires(&req->ctx->completion_lock)
+static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       int ret;
 
-       if (!req->result && !READ_ONCE(poll->canceled)) {
-               struct poll_table_struct pt = { ._key = poll->events };
-
-               req->result = vfs_poll(req->file, &pt) & poll->events;
-       }
+       ret = io_poll_check_events(req);
+       if (ret > 0)
+               return;
 
-       spin_lock_irq(&ctx->completion_lock);
-       if (!req->result && !READ_ONCE(poll->canceled)) {
-               add_wait_queue(poll->head, &poll->wait);
-               return true;
+       if (!ret) {
+               req->result = mangle_poll(req->result & req->poll.events);
+       } else {
+               req->result = ret;
+               req_set_fail(req);
        }
 
-       return false;
-}
-
-static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
-{
-       /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
-       if (req->opcode == IORING_OP_POLL_ADD)
-               return req->async_data;
-       return req->apoll->double_poll;
-}
-
-static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
-{
-       if (req->opcode == IORING_OP_POLL_ADD)
-               return &req->poll;
-       return &req->apoll->poll;
+       io_poll_remove_entries(req);
+       spin_lock(&ctx->completion_lock);
+       hash_del(&req->hash_node);
+       spin_unlock(&ctx->completion_lock);
+       io_req_complete_post(req, req->result, 0);
 }
 
-static void io_poll_remove_double(struct io_kiocb *req)
+static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
 {
-       struct io_poll_iocb *poll = io_poll_get_double(req);
+       struct io_ring_ctx *ctx = req->ctx;
+       int ret;
 
-       lockdep_assert_held(&req->ctx->completion_lock);
+       ret = io_poll_check_events(req);
+       if (ret > 0)
+               return;
 
-       if (poll && poll->head) {
-               struct wait_queue_head *head = poll->head;
+       io_poll_remove_entries(req);
+       spin_lock(&ctx->completion_lock);
+       hash_del(&req->hash_node);
+       spin_unlock(&ctx->completion_lock);
 
-               spin_lock(&head->lock);
-               list_del_init(&poll->wait.entry);
-               if (poll->wait.private)
-                       refcount_dec(&req->refs);
-               poll->head = NULL;
-               spin_unlock(&head->lock);
-       }
+       if (!ret)
+               io_req_task_submit(req, locked);
+       else
+               io_req_complete_failed(req, ret);
 }
 
-static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
+static void __io_poll_execute(struct io_kiocb *req, int mask)
 {
-       struct io_ring_ctx *ctx = req->ctx;
+       req->result = mask;
+       if (req->opcode == IORING_OP_POLL_ADD)
+               req->io_task_work.func = io_poll_task_func;
+       else
+               req->io_task_work.func = io_apoll_task_func;
 
-       io_poll_remove_double(req);
-       req->poll.done = true;
-       io_cqring_fill_event(req, error ? error : mangle_poll(mask));
-       io_commit_cqring(ctx);
+       trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
+       io_req_task_work_add(req);
 }
 
-static void io_poll_task_func(struct callback_head *cb)
+static inline void io_poll_execute(struct io_kiocb *req, int res)
 {
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-       struct io_ring_ctx *ctx = req->ctx;
-       struct io_kiocb *nxt;
-
-       if (io_poll_rewait(req, &req->poll)) {
-               spin_unlock_irq(&ctx->completion_lock);
-       } else {
-               hash_del(&req->hash_node);
-               io_poll_complete(req, req->result, 0);
-               spin_unlock_irq(&ctx->completion_lock);
-
-               nxt = io_put_req_find_next(req);
-               io_cqring_ev_posted(ctx);
-               if (nxt)
-                       __io_req_task_submit(nxt);
-       }
+       if (io_poll_get_ownership(req))
+               __io_poll_execute(req, res);
+}
 
-       percpu_ref_put(&ctx->refs);
+static void io_poll_cancel_req(struct io_kiocb *req)
+{
+       io_poll_mark_cancelled(req);
+       /* kick tw, which should complete the request */
+       io_poll_execute(req, 0);
 }
 
-static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
-                              int sync, void *key)
+static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+                       void *key)
 {
        struct io_kiocb *req = wait->private;
-       struct io_poll_iocb *poll = io_poll_get_single(req);
+       struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
+                                                wait);
        __poll_t mask = key_to_poll(key);
 
-       /* for instances that support it check for an event match first: */
-       if (mask && !(mask & poll->events))
-               return 0;
+       if (unlikely(mask & POLLFREE)) {
+               io_poll_mark_cancelled(req);
+               /* we have to kick tw in case it's not already */
+               io_poll_execute(req, 0);
 
-       list_del_init(&wait->entry);
+               /*
+                * If the waitqueue is being freed early but someone is already
+                * holds ownership over it, we have to tear down the request as
+                * best we can. That means immediately removing the request from
+                * its waitqueue and preventing all further accesses to the
+                * waitqueue via the request.
+                */
+               list_del_init(&poll->wait.entry);
 
-       if (poll && poll->head) {
-               bool done;
-
-               spin_lock(&poll->head->lock);
-               done = list_empty(&poll->wait.entry);
-               if (!done)
-                       list_del_init(&poll->wait.entry);
-               /* make sure double remove sees this as being gone */
-               wait->private = NULL;
-               spin_unlock(&poll->head->lock);
-               if (!done) {
-                       /* use wait func handler, so it matches the rq type */
-                       poll->wait.func(&poll->wait, mode, sync, key);
-               }
+               /*
+                * Careful: this *must* be the last step, since as soon
+                * as req->head is NULL'ed out, the request can be
+                * completed and freed, since aio_poll_complete_work()
+                * will no longer need to take the waitqueue lock.
+                */
+               smp_store_release(&poll->head, NULL);
+               return 1;
        }
-       refcount_dec(&req->refs);
+
+       /* for instances that support it check for an event match first */
+       if (mask && !(mask & poll->events))
+               return 0;
+
+       if (io_poll_get_ownership(req))
+               __io_poll_execute(req, mask);
        return 1;
 }
 
-static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
-                             wait_queue_func_t wake_func)
-{
-       poll->head = NULL;
-       poll->done = false;
-       poll->canceled = false;
-       poll->events = events;
-       INIT_LIST_HEAD(&poll->wait.entry);
-       init_waitqueue_func_entry(&poll->wait, wake_func);
-}
-
 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                            struct wait_queue_head *head,
                            struct io_poll_iocb **poll_ptr)
@@ -5099,29 +5494,31 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
         * if this happens.
         */
        if (unlikely(pt->nr_entries)) {
-               struct io_poll_iocb *poll_one = poll;
+               struct io_poll_iocb *first = poll;
 
+               /* double add on the same waitqueue head, ignore */
+               if (first->head == head)
+                       return;
                /* already have a 2nd entry, fail a third attempt */
                if (*poll_ptr) {
+                       if ((*poll_ptr)->head == head)
+                               return;
                        pt->error = -EINVAL;
                        return;
                }
-               /* double add on the same waitqueue head, ignore */
-               if (poll->head == head)
-                       return;
+
                poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
                if (!poll) {
                        pt->error = -ENOMEM;
                        return;
                }
-               io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
-               refcount_inc(&req->refs);
-               poll->wait.private = req;
+               io_init_poll_iocb(poll, first->events, first->wait.func);
                *poll_ptr = poll;
        }
 
        pt->nr_entries++;
        poll->head = head;
+       poll->wait.private = req;
 
        if (poll->events & EPOLLEXCLUSIVE)
                add_wait_queue_exclusive(head, &poll->wait);
@@ -5129,83 +5526,23 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                add_wait_queue(head, &poll->wait);
 }
 
-static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
                               struct poll_table_struct *p)
 {
        struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
-       struct async_poll *apoll = pt->req->apoll;
-
-       __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
-}
-
-static void io_async_task_func(struct callback_head *cb)
-{
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-       struct async_poll *apoll = req->apoll;
-       struct io_ring_ctx *ctx = req->ctx;
-
-       trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
-
-       if (io_poll_rewait(req, &apoll->poll)) {
-               spin_unlock_irq(&ctx->completion_lock);
-               percpu_ref_put(&ctx->refs);
-               return;
-       }
-
-       /* If req is still hashed, it cannot have been canceled. Don't check. */
-       if (hash_hashed(&req->hash_node))
-               hash_del(&req->hash_node);
-
-       io_poll_remove_double(req);
-       spin_unlock_irq(&ctx->completion_lock);
-
-       if (!READ_ONCE(apoll->poll.canceled))
-               __io_req_task_submit(req);
-       else
-               __io_req_task_cancel(req, -ECANCELED);
 
-       percpu_ref_put(&ctx->refs);
-       kfree(apoll->double_poll);
-       kfree(apoll);
-}
-
-static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
-                       void *key)
-{
-       struct io_kiocb *req = wait->private;
-       struct io_poll_iocb *poll = &req->apoll->poll;
-
-       trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
-                                       key_to_poll(key));
-
-       return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
-}
-
-static void io_poll_req_insert(struct io_kiocb *req)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-       struct hlist_head *list;
-
-       list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
-       hlist_add_head(&req->hash_node, list);
+       __io_queue_proc(&pt->req->poll, pt, head,
+                       (struct io_poll_iocb **) &pt->req->async_data);
 }
 
-static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
-                                     struct io_poll_iocb *poll,
-                                     struct io_poll_table *ipt, __poll_t mask,
-                                     wait_queue_func_t wake_func)
-       __acquires(&ctx->completion_lock)
+static int __io_arm_poll_handler(struct io_kiocb *req,
+                                struct io_poll_iocb *poll,
+                                struct io_poll_table *ipt, __poll_t mask)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       bool cancel = false;
-
-       if (req->file->f_op->may_pollfree) {
-               spin_lock_irq(&ctx->completion_lock);
-               return -EOPNOTSUPP;
-       }
 
        INIT_HLIST_NODE(&req->hash_node);
-       io_init_poll_iocb(poll, mask, wake_func);
+       io_init_poll_iocb(poll, mask, io_poll_wake);
        poll->file = req->file;
        poll->wait.private = req;
 
@@ -5214,169 +5551,138 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
        ipt->error = 0;
        ipt->nr_entries = 0;
 
+       /*
+        * Take the ownership to delay any tw execution up until we're done
+        * with poll arming. see io_poll_get_ownership().
+        */
+       atomic_set(&req->poll_refs, 1);
        mask = vfs_poll(req->file, &ipt->pt) & poll->events;
-       if (unlikely(!ipt->nr_entries) && !ipt->error)
-               ipt->error = -EINVAL;
-
-       spin_lock_irq(&ctx->completion_lock);
-       if (ipt->error)
-               io_poll_remove_double(req);
-       if (likely(poll->head)) {
-               spin_lock(&poll->head->lock);
-               if (unlikely(list_empty(&poll->wait.entry))) {
-                       if (ipt->error)
-                               cancel = true;
+
+       if (mask && (poll->events & EPOLLONESHOT)) {
+               io_poll_remove_entries(req);
+               /* no one else has access to the req, forget about the ref */
+               return mask;
+       }
+       if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
+               io_poll_remove_entries(req);
+               if (!ipt->error)
+                       ipt->error = -EINVAL;
+               return 0;
+       }
+
+       spin_lock(&ctx->completion_lock);
+       io_poll_req_insert(req);
+       spin_unlock(&ctx->completion_lock);
+
+       if (mask) {
+               /* can't multishot if failed, just queue the event we've got */
+               if (unlikely(ipt->error || !ipt->nr_entries)) {
+                       poll->events |= EPOLLONESHOT;
                        ipt->error = 0;
-                       mask = 0;
                }
-               if (mask || ipt->error)
-                       list_del_init(&poll->wait.entry);
-               else if (cancel)
-                       WRITE_ONCE(poll->canceled, true);
-               else if (!poll->done) /* actually waiting for an event */
-                       io_poll_req_insert(req);
-               spin_unlock(&poll->head->lock);
+               __io_poll_execute(req, mask);
+               return 0;
        }
 
-       return mask;
+       /*
+        * Try to release ownership. If we see a change of state, e.g.
+        * poll was waken up, queue up a tw, it'll deal with it.
+        */
+       if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
+               __io_poll_execute(req, 0);
+       return 0;
+}
+
+static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+                              struct poll_table_struct *p)
+{
+       struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
+       struct async_poll *apoll = pt->req->apoll;
+
+       __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
 }
 
-static bool io_arm_poll_handler(struct io_kiocb *req)
+enum {
+       IO_APOLL_OK,
+       IO_APOLL_ABORTED,
+       IO_APOLL_READY
+};
+
+static int io_arm_poll_handler(struct io_kiocb *req)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
        struct io_ring_ctx *ctx = req->ctx;
        struct async_poll *apoll;
        struct io_poll_table ipt;
-       __poll_t mask, ret;
-       int rw;
+       __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
+       int ret;
 
        if (!req->file || !file_can_poll(req->file))
-               return false;
+               return IO_APOLL_ABORTED;
        if (req->flags & REQ_F_POLLED)
-               return false;
-       if (def->pollin)
-               rw = READ;
-       else if (def->pollout)
-               rw = WRITE;
-       else
-               return false;
-       /* if we can't nonblock try, then no point in arming a poll handler */
-       if (!io_file_supports_async(req->file, rw))
-               return false;
+               return IO_APOLL_ABORTED;
+       if (!def->pollin && !def->pollout)
+               return IO_APOLL_ABORTED;
+
+       if (def->pollin) {
+               mask |= POLLIN | POLLRDNORM;
+
+               /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
+               if ((req->opcode == IORING_OP_RECVMSG) &&
+                   (req->sr_msg.msg_flags & MSG_ERRQUEUE))
+                       mask &= ~POLLIN;
+       } else {
+               mask |= POLLOUT | POLLWRNORM;
+       }
 
        apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
        if (unlikely(!apoll))
-               return false;
+               return IO_APOLL_ABORTED;
        apoll->double_poll = NULL;
-
-       req->flags |= REQ_F_POLLED;
        req->apoll = apoll;
-
-       mask = 0;
-       if (def->pollin)
-               mask |= POLLIN | POLLRDNORM;
-       if (def->pollout)
-               mask |= POLLOUT | POLLWRNORM;
-
-       /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
-       if ((req->opcode == IORING_OP_RECVMSG) &&
-           (req->sr_msg.msg_flags & MSG_ERRQUEUE))
-               mask &= ~POLLIN;
-
-       mask |= POLLERR | POLLPRI;
-
+       req->flags |= REQ_F_POLLED;
        ipt.pt._qproc = io_async_queue_proc;
 
-       ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
-                                       io_async_wake);
-       if (ret || ipt.error) {
-               io_poll_remove_double(req);
-               spin_unlock_irq(&ctx->completion_lock);
-               kfree(apoll->double_poll);
-               kfree(apoll);
-               return false;
-       }
-       spin_unlock_irq(&ctx->completion_lock);
-       trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
-                                       apoll->poll.events);
-       return true;
-}
-
-static bool __io_poll_remove_one(struct io_kiocb *req,
-                                struct io_poll_iocb *poll)
-{
-       bool do_complete = false;
-
-       spin_lock(&poll->head->lock);
-       WRITE_ONCE(poll->canceled, true);
-       if (!list_empty(&poll->wait.entry)) {
-               list_del_init(&poll->wait.entry);
-               do_complete = true;
-       }
-       spin_unlock(&poll->head->lock);
-       hash_del(&req->hash_node);
-       return do_complete;
-}
-
-static bool io_poll_remove_one(struct io_kiocb *req)
-{
-       bool do_complete;
-
-       io_poll_remove_double(req);
-
-       if (req->opcode == IORING_OP_POLL_ADD) {
-               do_complete = __io_poll_remove_one(req, &req->poll);
-       } else {
-               struct async_poll *apoll = req->apoll;
-
-               /* non-poll requests have submit ref still */
-               do_complete = __io_poll_remove_one(req, &apoll->poll);
-               if (do_complete) {
-                       io_put_req(req);
-                       kfree(apoll->double_poll);
-                       kfree(apoll);
-               }
-       }
-
-       if (do_complete) {
-               io_cqring_fill_event(req, -ECANCELED);
-               io_commit_cqring(req->ctx);
-               req_set_fail_links(req);
-               io_put_req_deferred(req, 1);
-       }
+       ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
+       if (ret || ipt.error)
+               return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
 
-       return do_complete;
+       trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
+                               mask, apoll->poll.events);
+       return IO_APOLL_OK;
 }
 
 /*
  * Returns true if we found and killed one or more poll requests
  */
 static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
-                              struct files_struct *files)
+                              bool cancel_all)
 {
        struct hlist_node *tmp;
        struct io_kiocb *req;
-       int posted = 0, i;
+       bool found = false;
+       int i;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
                struct hlist_head *list;
 
                list = &ctx->cancel_hash[i];
                hlist_for_each_entry_safe(req, tmp, list, hash_node) {
-                       if (io_match_task(req, tsk, files))
-                               posted += io_poll_remove_one(req);
+                       if (io_match_task_safe(req, tsk, cancel_all)) {
+                               hlist_del_init(&req->hash_node);
+                               io_poll_cancel_req(req);
+                               found = true;
+                       }
                }
        }
-       spin_unlock_irq(&ctx->completion_lock);
-
-       if (posted)
-               io_cqring_ev_posted(ctx);
-
-       return posted != 0;
+       spin_unlock(&ctx->completion_lock);
+       return found;
 }
 
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
+                                    bool poll_only)
+       __must_hold(&ctx->completion_lock)
 {
        struct hlist_head *list;
        struct io_kiocb *req;
@@ -5385,107 +5691,161 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
        hlist_for_each_entry(req, list, hash_node) {
                if (sqe_addr != req->user_data)
                        continue;
-               if (io_poll_remove_one(req))
-                       return 0;
-               return -EALREADY;
+               if (poll_only && req->opcode != IORING_OP_POLL_ADD)
+                       continue;
+               return req;
        }
-
-       return -ENOENT;
+       return NULL;
 }
 
-static int io_poll_remove_prep(struct io_kiocb *req,
-                              const struct io_uring_sqe *sqe)
+static bool io_poll_disarm(struct io_kiocb *req)
+       __must_hold(&ctx->completion_lock)
 {
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
-           sqe->poll_events)
-               return -EINVAL;
-
-       req->poll.addr = READ_ONCE(sqe->addr);
-       return 0;
+       if (!io_poll_get_ownership(req))
+               return false;
+       io_poll_remove_entries(req);
+       hash_del(&req->hash_node);
+       return true;
 }
 
-/*
- * Find a running poll command that matches one specified in sqe->addr,
- * and remove it if found.
- */
-static int io_poll_remove(struct io_kiocb *req)
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
+                         bool poll_only)
+       __must_hold(&ctx->completion_lock)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-       u64 addr;
-       int ret;
+       struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
 
-       addr = req->poll.addr;
-       spin_lock_irq(&ctx->completion_lock);
-       ret = io_poll_cancel(ctx, addr);
-       spin_unlock_irq(&ctx->completion_lock);
-
-       if (ret < 0)
-               req_set_fail_links(req);
-       io_req_complete(req, ret);
+       if (!req)
+               return -ENOENT;
+       io_poll_cancel_req(req);
        return 0;
 }
 
-static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
-                       void *key)
+static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
+                                    unsigned int flags)
 {
-       struct io_kiocb *req = wait->private;
-       struct io_poll_iocb *poll = &req->poll;
+       u32 events;
 
-       return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
+       events = READ_ONCE(sqe->poll32_events);
+#ifdef __BIG_ENDIAN
+       events = swahw32(events);
+#endif
+       if (!(flags & IORING_POLL_ADD_MULTI))
+               events |= EPOLLONESHOT;
+       return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
 }
 
-static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
-                              struct poll_table_struct *p)
+static int io_poll_update_prep(struct io_kiocb *req,
+                              const struct io_uring_sqe *sqe)
 {
-       struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
+       struct io_poll_update *upd = &req->poll_update;
+       u32 flags;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
+               return -EINVAL;
+       flags = READ_ONCE(sqe->len);
+       if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
+                     IORING_POLL_ADD_MULTI))
+               return -EINVAL;
+       /* meaningless without update */
+       if (flags == IORING_POLL_ADD_MULTI)
+               return -EINVAL;
+
+       upd->old_user_data = READ_ONCE(sqe->addr);
+       upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
+       upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
+
+       upd->new_user_data = READ_ONCE(sqe->off);
+       if (!upd->update_user_data && upd->new_user_data)
+               return -EINVAL;
+       if (upd->update_events)
+               upd->events = io_poll_parse_events(sqe, flags);
+       else if (sqe->poll32_events)
+               return -EINVAL;
 
-       __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
+       return 0;
 }
 
 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_poll_iocb *poll = &req->poll;
-       u32 events;
+       u32 flags;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
+               return -EINVAL;
+       flags = READ_ONCE(sqe->len);
+       if (flags & ~IORING_POLL_ADD_MULTI)
                return -EINVAL;
 
-       events = READ_ONCE(sqe->poll32_events);
-#ifdef __BIG_ENDIAN
-       events = swahw32(events);
-#endif
-       poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
-                      (events & EPOLLEXCLUSIVE);
+       io_req_set_refcount(req);
+       poll->events = io_poll_parse_events(sqe, flags);
        return 0;
 }
 
-static int io_poll_add(struct io_kiocb *req)
+static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_poll_iocb *poll = &req->poll;
-       struct io_ring_ctx *ctx = req->ctx;
        struct io_poll_table ipt;
-       __poll_t mask;
+       int ret;
 
        ipt.pt._qproc = io_poll_queue_proc;
 
-       mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
-                                       io_poll_wake);
+       ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
+       if (!ret && ipt.error)
+               req_set_fail(req);
+       ret = ret ?: ipt.error;
+       if (ret)
+               __io_req_complete(req, issue_flags, ret, 0);
+       return 0;
+}
 
-       if (mask) { /* no async, we'd stolen it */
-               ipt.error = 0;
-               io_poll_complete(req, mask, 0);
+static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_kiocb *preq;
+       int ret2, ret = 0;
+
+       spin_lock(&ctx->completion_lock);
+       preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
+       if (!preq || !io_poll_disarm(preq)) {
+               spin_unlock(&ctx->completion_lock);
+               ret = preq ? -EALREADY : -ENOENT;
+               goto out;
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
 
-       if (mask) {
-               io_cqring_ev_posted(ctx);
-               io_put_req(req);
+       if (req->poll_update.update_events || req->poll_update.update_user_data) {
+               /* only mask one event flags, keep behavior flags */
+               if (req->poll_update.update_events) {
+                       preq->poll.events &= ~0xffff;
+                       preq->poll.events |= req->poll_update.events & 0xffff;
+                       preq->poll.events |= IO_POLL_UNMASK;
+               }
+               if (req->poll_update.update_user_data)
+                       preq->user_data = req->poll_update.new_user_data;
+
+               ret2 = io_poll_add(preq, issue_flags);
+               /* successfully updated, don't complete poll request */
+               if (!ret2)
+                       goto out;
        }
-       return ipt.error;
+       req_set_fail(preq);
+       io_req_complete(preq, -ECANCELED);
+out:
+       if (ret < 0)
+               req_set_fail(req);
+       /* complete update request, we're done with it */
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
+{
+       req_set_fail(req);
+       io_req_complete_post(req, -ETIME, 0);
 }
 
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
@@ -5496,88 +5856,182 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        struct io_ring_ctx *ctx = req->ctx;
        unsigned long flags;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       spin_lock_irqsave(&ctx->timeout_lock, flags);
        list_del_init(&req->timeout.list);
        atomic_set(&req->ctx->cq_timeouts,
                atomic_read(&req->ctx->cq_timeouts) + 1);
+       spin_unlock_irqrestore(&ctx->timeout_lock, flags);
 
-       io_cqring_fill_event(req, -ETIME);
-       io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
-       io_cqring_ev_posted(ctx);
-       req_set_fail_links(req);
-       io_put_req(req);
+       req->io_task_work.func = io_req_task_timeout;
+       io_req_task_work_add(req);
        return HRTIMER_NORESTART;
 }
 
-static int __io_timeout_cancel(struct io_kiocb *req)
+static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
+                                          __u64 user_data)
+       __must_hold(&ctx->timeout_lock)
 {
-       struct io_timeout_data *io = req->async_data;
-       int ret;
+       struct io_timeout_data *io;
+       struct io_kiocb *req;
+       bool found = false;
 
-       ret = hrtimer_try_to_cancel(&io->timer);
-       if (ret == -1)
-               return -EALREADY;
+       list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
+               found = user_data == req->user_data;
+               if (found)
+                       break;
+       }
+       if (!found)
+               return ERR_PTR(-ENOENT);
+
+       io = req->async_data;
+       if (hrtimer_try_to_cancel(&io->timer) == -1)
+               return ERR_PTR(-EALREADY);
        list_del_init(&req->timeout.list);
+       return req;
+}
+
+static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+       __must_hold(&ctx->completion_lock)
+       __must_hold(&ctx->timeout_lock)
+{
+       struct io_kiocb *req = io_timeout_extract(ctx, user_data);
 
-       req_set_fail_links(req);
-       io_cqring_fill_event(req, -ECANCELED);
-       io_put_req_deferred(req, 1);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req_set_fail(req);
+       io_fill_cqe_req(req, -ECANCELED, 0);
+       io_put_req_deferred(req);
        return 0;
 }
 
-static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
+{
+       switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
+       case IORING_TIMEOUT_BOOTTIME:
+               return CLOCK_BOOTTIME;
+       case IORING_TIMEOUT_REALTIME:
+               return CLOCK_REALTIME;
+       default:
+               /* can't happen, vetted at prep time */
+               WARN_ON_ONCE(1);
+               fallthrough;
+       case 0:
+               return CLOCK_MONOTONIC;
+       }
+}
+
+static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
+                                   struct timespec64 *ts, enum hrtimer_mode mode)
+       __must_hold(&ctx->timeout_lock)
 {
+       struct io_timeout_data *io;
        struct io_kiocb *req;
-       int ret = -ENOENT;
+       bool found = false;
 
-       list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
-               if (user_data == req->user_data) {
-                       ret = 0;
+       list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
+               found = user_data == req->user_data;
+               if (found)
                        break;
-               }
        }
+       if (!found)
+               return -ENOENT;
 
-       if (ret == -ENOENT)
-               return ret;
+       io = req->async_data;
+       if (hrtimer_try_to_cancel(&io->timer) == -1)
+               return -EALREADY;
+       hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
+       io->timer.function = io_link_timeout_fn;
+       hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
+       return 0;
+}
 
-       return __io_timeout_cancel(req);
+static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
+                            struct timespec64 *ts, enum hrtimer_mode mode)
+       __must_hold(&ctx->timeout_lock)
+{
+       struct io_kiocb *req = io_timeout_extract(ctx, user_data);
+       struct io_timeout_data *data;
+
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req->timeout.off = 0; /* noseq */
+       data = req->async_data;
+       list_add_tail(&req->timeout.list, &ctx->timeout_list);
+       hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
+       data->timer.function = io_timeout_fn;
+       hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
+       return 0;
 }
 
 static int io_timeout_remove_prep(struct io_kiocb *req,
                                  const struct io_uring_sqe *sqe)
 {
+       struct io_timeout_rem *tr = &req->timeout_rem;
+
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags ||
-           sqe->splice_fd_in)
+       if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
+               return -EINVAL;
+
+       tr->ltimeout = false;
+       tr->addr = READ_ONCE(sqe->addr);
+       tr->flags = READ_ONCE(sqe->timeout_flags);
+       if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
+               if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
+                       return -EINVAL;
+               if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
+                       tr->ltimeout = true;
+               if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
+                       return -EINVAL;
+               if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
+                       return -EFAULT;
+       } else if (tr->flags) {
+               /* timeout removal doesn't support flags */
                return -EINVAL;
+       }
 
-       req->timeout_rem.addr = READ_ONCE(sqe->addr);
        return 0;
 }
 
+static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
+{
+       return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
+                                           : HRTIMER_MODE_REL;
+}
+
 /*
  * Remove or update an existing timeout command
  */
-static int io_timeout_remove(struct io_kiocb *req)
+static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
 {
+       struct io_timeout_rem *tr = &req->timeout_rem;
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       spin_lock_irq(&ctx->completion_lock);
-       ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
+       if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
+               spin_lock(&ctx->completion_lock);
+               spin_lock_irq(&ctx->timeout_lock);
+               ret = io_timeout_cancel(ctx, tr->addr);
+               spin_unlock_irq(&ctx->timeout_lock);
+               spin_unlock(&ctx->completion_lock);
+       } else {
+               enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
+
+               spin_lock_irq(&ctx->timeout_lock);
+               if (tr->ltimeout)
+                       ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
+               else
+                       ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
+               spin_unlock_irq(&ctx->timeout_lock);
+       }
 
-       io_cqring_fill_event(req, ret);
-       io_commit_cqring(ctx);
-       spin_unlock_irq(&ctx->completion_lock);
-       io_cqring_ev_posted(ctx);
        if (ret < 0)
-               req_set_fail_links(req);
-       io_put_req(req);
+               req_set_fail(req);
+       io_req_complete_post(req, ret, 0);
        return 0;
 }
 
@@ -5596,38 +6050,52 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (off && is_timeout_link)
                return -EINVAL;
        flags = READ_ONCE(sqe->timeout_flags);
-       if (flags & ~IORING_TIMEOUT_ABS)
+       if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
+               return -EINVAL;
+       /* more than one clock specified is invalid, obviously */
+       if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
                return -EINVAL;
 
+       INIT_LIST_HEAD(&req->timeout.list);
        req->timeout.off = off;
+       if (unlikely(off && !req->ctx->off_timeout_used))
+               req->ctx->off_timeout_used = true;
 
        if (!req->async_data && io_alloc_async_data(req))
                return -ENOMEM;
 
        data = req->async_data;
        data->req = req;
+       data->flags = flags;
 
        if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
                return -EFAULT;
 
-       if (flags & IORING_TIMEOUT_ABS)
-               data->mode = HRTIMER_MODE_ABS;
-       else
-               data->mode = HRTIMER_MODE_REL;
-
        INIT_LIST_HEAD(&req->timeout.list);
-       hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
+       data->mode = io_translate_timeout_mode(flags);
+       hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
+
+       if (is_timeout_link) {
+               struct io_submit_link *link = &req->ctx->submit_state.link;
+
+               if (!link->head)
+                       return -EINVAL;
+               if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
+                       return -EINVAL;
+               req->timeout.head = link->last;
+               link->last->flags |= REQ_F_ARM_LTIMEOUT;
+       }
        return 0;
 }
 
-static int io_timeout(struct io_kiocb *req)
+static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_timeout_data *data = req->async_data;
        struct list_head *entry;
        u32 tail, off = req->timeout.off;
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
 
        /*
         * sqe->off holds how many events that need to occur for this
@@ -5666,23 +6134,34 @@ add:
        list_add(&req->timeout.list, entry);
        data->timer.function = io_timeout_fn;
        hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock_irq(&ctx->timeout_lock);
        return 0;
 }
 
+struct io_cancel_data {
+       struct io_ring_ctx *ctx;
+       u64 user_data;
+};
+
 static bool io_cancel_cb(struct io_wq_work *work, void *data)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       struct io_cancel_data *cd = data;
 
-       return req->user_data == (unsigned long) data;
+       return req->ctx == cd->ctx && req->user_data == cd->user_data;
 }
 
-static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
+static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
+                              struct io_ring_ctx *ctx)
 {
+       struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
        enum io_wq_cancel cancel_ret;
        int ret = 0;
 
-       cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
+       if (!tctx || !tctx->io_wq)
+               return -ENOENT;
+
+       cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
        switch (cancel_ret) {
        case IO_WQ_CANCEL_OK:
                ret = 0;
@@ -5698,35 +6177,27 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
        return ret;
 }
 
-static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
-                                    struct io_kiocb *req, __u64 sqe_addr,
-                                    int success_ret)
+static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
 {
-       unsigned long flags;
+       struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
-       if (ret != -ENOENT) {
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               goto done;
-       }
+       WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
-       ret = io_timeout_cancel(ctx, sqe_addr);
+       ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
        if (ret != -ENOENT)
-               goto done;
-       ret = io_poll_cancel(ctx, sqe_addr);
-done:
-       if (!ret)
-               ret = success_ret;
-       io_cqring_fill_event(req, ret);
-       io_commit_cqring(ctx);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       io_cqring_ev_posted(ctx);
+               return ret;
 
-       if (ret < 0)
-               req_set_fail_links(req);
-       io_put_req(req);
+       spin_lock(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
+       ret = io_timeout_cancel(ctx, sqe_addr);
+       spin_unlock_irq(&ctx->timeout_lock);
+       if (ret != -ENOENT)
+               goto out;
+       ret = io_poll_cancel(ctx, sqe_addr, false);
+out:
+       spin_unlock(&ctx->completion_lock);
+       return ret;
 }
 
 static int io_async_cancel_prep(struct io_kiocb *req,
@@ -5744,52 +6215,72 @@ static int io_async_cancel_prep(struct io_kiocb *req,
        return 0;
 }
 
-static int io_async_cancel(struct io_kiocb *req)
+static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       u64 sqe_addr = req->cancel.addr;
+       struct io_tctx_node *node;
+       int ret;
+
+       ret = io_try_cancel_userdata(req, sqe_addr);
+       if (ret != -ENOENT)
+               goto done;
+
+       /* slow path, try all io-wq's */
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       ret = -ENOENT;
+       list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+               struct io_uring_task *tctx = node->task->io_uring;
 
-       io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
+               ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
+               if (ret != -ENOENT)
+                       break;
+       }
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+done:
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete_post(req, ret, 0);
        return 0;
 }
 
-static int io_files_update_prep(struct io_kiocb *req,
+static int io_rsrc_update_prep(struct io_kiocb *req,
                                const struct io_uring_sqe *sqe)
 {
-       if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
-               return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->rw_flags)
+       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
-       req->files_update.offset = READ_ONCE(sqe->off);
-       req->files_update.nr_args = READ_ONCE(sqe->len);
-       if (!req->files_update.nr_args)
+       req->rsrc_update.offset = READ_ONCE(sqe->off);
+       req->rsrc_update.nr_args = READ_ONCE(sqe->len);
+       if (!req->rsrc_update.nr_args)
                return -EINVAL;
-       req->files_update.arg = READ_ONCE(sqe->addr);
+       req->rsrc_update.arg = READ_ONCE(sqe->addr);
        return 0;
 }
 
-static int io_files_update(struct io_kiocb *req, bool force_nonblock,
-                          struct io_comp_state *cs)
+static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_uring_files_update up;
+       struct io_uring_rsrc_update2 up;
        int ret;
 
-       if (force_nonblock)
-               return -EAGAIN;
-
-       up.offset = req->files_update.offset;
-       up.fds = req->files_update.arg;
+       up.offset = req->rsrc_update.offset;
+       up.data = req->rsrc_update.arg;
+       up.nr = 0;
+       up.tags = 0;
+       up.resv = 0;
+       up.resv2 = 0;
 
-       mutex_lock(&ctx->uring_lock);
-       ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
-       mutex_unlock(&ctx->uring_lock);
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
+                                       &up, req->rsrc_update.nr_args);
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
 
        if (ret < 0)
-               req_set_fail_links(req);
-       __io_req_complete(req, ret, 0, cs);
+               req_set_fail(req);
+       __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
 
@@ -5809,11 +6300,11 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_POLL_ADD:
                return io_poll_add_prep(req, sqe);
        case IORING_OP_POLL_REMOVE:
-               return io_poll_remove_prep(req, sqe);
+               return io_poll_update_prep(req, sqe);
        case IORING_OP_FSYNC:
-               return io_prep_fsync(req, sqe);
+               return io_fsync_prep(req, sqe);
        case IORING_OP_SYNC_FILE_RANGE:
-               return io_prep_sfr(req, sqe);
+               return io_sfr_prep(req, sqe);
        case IORING_OP_SENDMSG:
        case IORING_OP_SEND:
                return io_sendmsg_prep(req, sqe);
@@ -5839,7 +6330,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_CLOSE:
                return io_close_prep(req, sqe);
        case IORING_OP_FILES_UPDATE:
-               return io_files_update_prep(req, sqe);
+               return io_rsrc_update_prep(req, sqe);
        case IORING_OP_STATX:
                return io_statx_prep(req, sqe);
        case IORING_OP_FADVISE:
@@ -5858,100 +6349,131 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return io_remove_buffers_prep(req, sqe);
        case IORING_OP_TEE:
                return io_tee_prep(req, sqe);
+       case IORING_OP_SHUTDOWN:
+               return io_shutdown_prep(req, sqe);
+       case IORING_OP_RENAMEAT:
+               return io_renameat_prep(req, sqe);
+       case IORING_OP_UNLINKAT:
+               return io_unlinkat_prep(req, sqe);
        }
 
        printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
                        req->opcode);
-       return-EINVAL;
+       return -EINVAL;
 }
 
-static int io_req_defer_prep(struct io_kiocb *req,
-                            const struct io_uring_sqe *sqe)
+static int io_req_prep_async(struct io_kiocb *req)
 {
-       if (!sqe)
+       if (!io_op_defs[req->opcode].needs_async_setup)
                return 0;
+       if (WARN_ON_ONCE(req->async_data))
+               return -EFAULT;
        if (io_alloc_async_data(req))
                return -EAGAIN;
-       return io_req_prep(req, sqe);
+
+       switch (req->opcode) {
+       case IORING_OP_READV:
+               return io_rw_prep_async(req, READ);
+       case IORING_OP_WRITEV:
+               return io_rw_prep_async(req, WRITE);
+       case IORING_OP_SENDMSG:
+               return io_sendmsg_prep_async(req);
+       case IORING_OP_RECVMSG:
+               return io_recvmsg_prep_async(req);
+       case IORING_OP_CONNECT:
+               return io_connect_prep_async(req);
+       }
+       printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
+                   req->opcode);
+       return -EFAULT;
 }
 
 static u32 io_get_sequence(struct io_kiocb *req)
 {
-       struct io_kiocb *pos;
-       struct io_ring_ctx *ctx = req->ctx;
-       u32 total_submitted, nr_reqs = 1;
+       u32 seq = req->ctx->cached_sq_head;
 
-       if (req->flags & REQ_F_LINK_HEAD)
-               list_for_each_entry(pos, &req->link_list, link_list)
-                       nr_reqs++;
-
-       total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
-       return total_submitted - nr_reqs;
+       /* need original cached_sq_head, but it was increased for each req */
+       io_for_each_link(req, req)
+               seq--;
+       return seq;
 }
 
-static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static bool io_drain_req(struct io_kiocb *req)
 {
+       struct io_kiocb *pos;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_defer_entry *de;
        int ret;
        u32 seq;
 
+       if (req->flags & REQ_F_FAIL) {
+               io_req_complete_fail_submit(req);
+               return true;
+       }
+
+       /*
+        * If we need to drain a request in the middle of a link, drain the
+        * head request and the next request/link after the current link.
+        * Considering sequential execution of links, IOSQE_IO_DRAIN will be
+        * maintained for every request of our link.
+        */
+       if (ctx->drain_next) {
+               req->flags |= REQ_F_IO_DRAIN;
+               ctx->drain_next = false;
+       }
+       /* not interested in head, start from the first linked */
+       io_for_each_link(pos, req->link) {
+               if (pos->flags & REQ_F_IO_DRAIN) {
+                       ctx->drain_next = true;
+                       req->flags |= REQ_F_IO_DRAIN;
+                       break;
+               }
+       }
+
        /* Still need defer if there is pending req in defer list. */
+       spin_lock(&ctx->completion_lock);
        if (likely(list_empty_careful(&ctx->defer_list) &&
-               !(req->flags & REQ_F_IO_DRAIN)))
-               return 0;
+               !(req->flags & REQ_F_IO_DRAIN))) {
+               spin_unlock(&ctx->completion_lock);
+               ctx->drain_active = false;
+               return false;
+       }
+       spin_unlock(&ctx->completion_lock);
 
        seq = io_get_sequence(req);
        /* Still a chance to pass the sequence check */
        if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
-               return 0;
+               return false;
 
-       if (!req->async_data) {
-               ret = io_req_defer_prep(req, sqe);
-               if (ret)
-                       return ret;
-       }
+       ret = io_req_prep_async(req);
+       if (ret)
+               goto fail;
        io_prep_async_link(req);
        de = kmalloc(sizeof(*de), GFP_KERNEL);
-       if (!de)
-               return -ENOMEM;
+       if (!de) {
+               ret = -ENOMEM;
+fail:
+               io_req_complete_failed(req, ret);
+               return true;
+       }
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
-               spin_unlock_irq(&ctx->completion_lock);
+               spin_unlock(&ctx->completion_lock);
                kfree(de);
-               io_queue_async_work(req);
-               return -EIOCBQUEUED;
+               io_queue_async_work(req, NULL);
+               return true;
        }
 
        trace_io_uring_defer(ctx, req, req->user_data);
        de->req = req;
        de->seq = seq;
        list_add_tail(&de->list, &ctx->defer_list);
-       spin_unlock_irq(&ctx->completion_lock);
-       return -EIOCBQUEUED;
-}
-
-static void io_req_drop_files(struct io_kiocb *req)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-       struct io_uring_task *tctx = req->task->io_uring;
-       unsigned long flags;
-
-       if (req->work.flags & IO_WQ_WORK_FILES) {
-               put_files_struct(req->work.identity->files);
-               put_nsproxy(req->work.identity->nsproxy);
-       }
-       spin_lock_irqsave(&ctx->inflight_lock, flags);
-       list_del(&req->inflight_entry);
-       spin_unlock_irqrestore(&ctx->inflight_lock, flags);
-       req->flags &= ~REQ_F_INFLIGHT;
-       req->work.flags &= ~IO_WQ_WORK_FILES;
-       if (atomic_read(&tctx->in_idle))
-               wake_up(&tctx->wait);
+       spin_unlock(&ctx->completion_lock);
+       return true;
 }
 
-static void __io_clean_op(struct io_kiocb *req)
+static void io_clean_op(struct io_kiocb *req)
 {
        if (req->flags & REQ_F_BUFFER_SELECTED) {
                switch (req->opcode) {
@@ -5965,7 +6487,6 @@ static void __io_clean_op(struct io_kiocb *req)
                        kfree(req->sr_msg.kbuf);
                        break;
                }
-               req->flags &= ~REQ_F_BUFFER_SELECTED;
        }
 
        if (req->flags & REQ_F_NEED_CLEANUP) {
@@ -5977,508 +6498,589 @@ static void __io_clean_op(struct io_kiocb *req)
                case IORING_OP_WRITE_FIXED:
                case IORING_OP_WRITE: {
                        struct io_async_rw *io = req->async_data;
-                       if (io->free_iovec)
-                               kfree(io->free_iovec);
+
+                       kfree(io->free_iovec);
                        break;
                        }
                case IORING_OP_RECVMSG:
                case IORING_OP_SENDMSG: {
                        struct io_async_msghdr *io = req->async_data;
-                       if (io->iov != io->fast_iov)
-                               kfree(io->iov);
+
+                       kfree(io->free_iov);
                        break;
                        }
-               case IORING_OP_SPLICE:
-               case IORING_OP_TEE:
-                       io_put_file(req, req->splice.file_in,
-                                   (req->splice.flags & SPLICE_F_FD_IN_FIXED));
-                       break;
                case IORING_OP_OPENAT:
                case IORING_OP_OPENAT2:
                        if (req->open.filename)
                                putname(req->open.filename);
                        break;
+               case IORING_OP_RENAMEAT:
+                       putname(req->rename.oldpath);
+                       putname(req->rename.newpath);
+                       break;
+               case IORING_OP_UNLINKAT:
+                       putname(req->unlink.filename);
+                       break;
                }
-               req->flags &= ~REQ_F_NEED_CLEANUP;
        }
+       if ((req->flags & REQ_F_POLLED) && req->apoll) {
+               kfree(req->apoll->double_poll);
+               kfree(req->apoll);
+               req->apoll = NULL;
+       }
+       if (req->flags & REQ_F_INFLIGHT) {
+               struct io_uring_task *tctx = req->task->io_uring;
+
+               atomic_dec(&tctx->inflight_tracked);
+       }
+       if (req->flags & REQ_F_CREDS)
+               put_cred(req->creds);
+
+       req->flags &= ~IO_REQ_CLEAN_FLAGS;
 }
 
-static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
-                       struct io_comp_state *cs)
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       const struct cred *creds = NULL;
        int ret;
 
+       if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
+               creds = override_creds(req->creds);
+
        switch (req->opcode) {
        case IORING_OP_NOP:
-               ret = io_nop(req, cs);
+               ret = io_nop(req, issue_flags);
                break;
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
        case IORING_OP_READ:
-               ret = io_read(req, force_nonblock, cs);
+               ret = io_read(req, issue_flags);
                break;
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
        case IORING_OP_WRITE:
-               ret = io_write(req, force_nonblock, cs);
+               ret = io_write(req, issue_flags);
                break;
        case IORING_OP_FSYNC:
-               ret = io_fsync(req, force_nonblock);
+               ret = io_fsync(req, issue_flags);
                break;
        case IORING_OP_POLL_ADD:
-               ret = io_poll_add(req);
+               ret = io_poll_add(req, issue_flags);
                break;
        case IORING_OP_POLL_REMOVE:
-               ret = io_poll_remove(req);
+               ret = io_poll_update(req, issue_flags);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
-               ret = io_sync_file_range(req, force_nonblock);
+               ret = io_sync_file_range(req, issue_flags);
                break;
        case IORING_OP_SENDMSG:
-               ret = io_sendmsg(req, force_nonblock, cs);
+               ret = io_sendmsg(req, issue_flags);
                break;
        case IORING_OP_SEND:
-               ret = io_send(req, force_nonblock, cs);
+               ret = io_send(req, issue_flags);
                break;
        case IORING_OP_RECVMSG:
-               ret = io_recvmsg(req, force_nonblock, cs);
+               ret = io_recvmsg(req, issue_flags);
                break;
        case IORING_OP_RECV:
-               ret = io_recv(req, force_nonblock, cs);
+               ret = io_recv(req, issue_flags);
                break;
        case IORING_OP_TIMEOUT:
-               ret = io_timeout(req);
+               ret = io_timeout(req, issue_flags);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
-               ret = io_timeout_remove(req);
+               ret = io_timeout_remove(req, issue_flags);
                break;
        case IORING_OP_ACCEPT:
-               ret = io_accept(req, force_nonblock, cs);
+               ret = io_accept(req, issue_flags);
                break;
        case IORING_OP_CONNECT:
-               ret = io_connect(req, force_nonblock, cs);
+               ret = io_connect(req, issue_flags);
                break;
        case IORING_OP_ASYNC_CANCEL:
-               ret = io_async_cancel(req);
+               ret = io_async_cancel(req, issue_flags);
                break;
        case IORING_OP_FALLOCATE:
-               ret = io_fallocate(req, force_nonblock);
+               ret = io_fallocate(req, issue_flags);
                break;
        case IORING_OP_OPENAT:
-               ret = io_openat(req, force_nonblock);
+               ret = io_openat(req, issue_flags);
                break;
        case IORING_OP_CLOSE:
-               ret = io_close(req, force_nonblock, cs);
+               ret = io_close(req, issue_flags);
                break;
        case IORING_OP_FILES_UPDATE:
-               ret = io_files_update(req, force_nonblock, cs);
+               ret = io_files_update(req, issue_flags);
                break;
        case IORING_OP_STATX:
-               ret = io_statx(req, force_nonblock);
+               ret = io_statx(req, issue_flags);
                break;
        case IORING_OP_FADVISE:
-               ret = io_fadvise(req, force_nonblock);
+               ret = io_fadvise(req, issue_flags);
                break;
        case IORING_OP_MADVISE:
-               ret = io_madvise(req, force_nonblock);
+               ret = io_madvise(req, issue_flags);
                break;
        case IORING_OP_OPENAT2:
-               ret = io_openat2(req, force_nonblock);
+               ret = io_openat2(req, issue_flags);
                break;
        case IORING_OP_EPOLL_CTL:
-               ret = io_epoll_ctl(req, force_nonblock, cs);
+               ret = io_epoll_ctl(req, issue_flags);
                break;
        case IORING_OP_SPLICE:
-               ret = io_splice(req, force_nonblock);
+               ret = io_splice(req, issue_flags);
                break;
        case IORING_OP_PROVIDE_BUFFERS:
-               ret = io_provide_buffers(req, force_nonblock, cs);
+               ret = io_provide_buffers(req, issue_flags);
                break;
        case IORING_OP_REMOVE_BUFFERS:
-               ret = io_remove_buffers(req, force_nonblock, cs);
+               ret = io_remove_buffers(req, issue_flags);
                break;
        case IORING_OP_TEE:
-               ret = io_tee(req, force_nonblock);
+               ret = io_tee(req, issue_flags);
+               break;
+       case IORING_OP_SHUTDOWN:
+               ret = io_shutdown(req, issue_flags);
+               break;
+       case IORING_OP_RENAMEAT:
+               ret = io_renameat(req, issue_flags);
+               break;
+       case IORING_OP_UNLINKAT:
+               ret = io_unlinkat(req, issue_flags);
                break;
        default:
                ret = -EINVAL;
                break;
        }
 
+       if (creds)
+               revert_creds(creds);
        if (ret)
                return ret;
-
        /* If the op doesn't have a file, we're not polling for it */
-       if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
-               const bool in_async = io_wq_current_is_worker();
-
-               /* workqueue context doesn't hold uring_lock, grab it now */
-               if (in_async)
-                       mutex_lock(&ctx->uring_lock);
-
+       if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
                io_iopoll_req_issued(req);
 
-               if (in_async)
-                       mutex_unlock(&ctx->uring_lock);
-       }
-
        return 0;
 }
 
-static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
+{
+       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+       req = io_put_req_find_next(req);
+       return req ? &req->work : NULL;
+}
+
+static void io_wq_submit_work(struct io_wq_work *work)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
        struct io_kiocb *timeout;
        int ret = 0;
 
+       /* one will be dropped by ->io_free_work() after returning to io-wq */
+       if (!(req->flags & REQ_F_REFCOUNT))
+               __io_req_set_refcount(req, 2);
+       else
+               req_ref_get(req);
+
        timeout = io_prep_linked_timeout(req);
        if (timeout)
                io_queue_linked_timeout(timeout);
 
-       /* if NO_CANCEL is set, we must still run the work */
-       if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
-                               IO_WQ_WORK_CANCEL) {
+       /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+       if (work->flags & IO_WQ_WORK_CANCEL)
                ret = -ECANCELED;
-       }
 
        if (!ret) {
                do {
-                       ret = io_issue_sqe(req, false, NULL);
+                       ret = io_issue_sqe(req, 0);
                        /*
                         * We can get EAGAIN for polled IO even though we're
                         * forcing a sync submission from here, since we can't
                         * wait for request slots on the block side.
                         */
-                       if (ret != -EAGAIN)
+                       if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
                                break;
                        cond_resched();
                } while (1);
        }
 
-       if (ret) {
-               struct io_ring_ctx *lock_ctx = NULL;
-
-               if (req->ctx->flags & IORING_SETUP_IOPOLL)
-                       lock_ctx = req->ctx;
-
-               /*
-                * io_iopoll_complete() does not hold completion_lock to
-                * complete polled io, so here for polled io, we can not call
-                * io_req_complete() directly, otherwise there maybe concurrent
-                * access to cqring, defer_list, etc, which is not safe. Given
-                * that io_iopoll_complete() is always called under uring_lock,
-                * so here for polled io, we also get uring_lock to complete
-                * it.
-                */
-               if (lock_ctx)
-                       mutex_lock(&lock_ctx->uring_lock);
-
-               req_set_fail_links(req);
-               io_req_complete(req, ret);
-
-               if (lock_ctx)
-                       mutex_unlock(&lock_ctx->uring_lock);
-       }
+       /* avoid locking problems by failing it from a clean context */
+       if (ret)
+               io_req_task_queue_fail(req, ret);
+}
 
-       return io_steal_work(req);
+static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
+                                                      unsigned i)
+{
+       return &table->files[i];
 }
 
 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
                                              int index)
 {
-       struct fixed_file_table *table;
+       struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
 
-       table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
-       return table->files[index & IORING_FILE_TABLE_MASK];
+       return (struct file *) (slot->file_ptr & FFS_MASK);
 }
 
-static struct file *io_file_get(struct io_submit_state *state,
-                               struct io_kiocb *req, int fd, bool fixed)
+static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
+{
+       unsigned long file_ptr = (unsigned long) file;
+
+       if (__io_file_supports_nowait(file, READ))
+               file_ptr |= FFS_ASYNC_READ;
+       if (__io_file_supports_nowait(file, WRITE))
+               file_ptr |= FFS_ASYNC_WRITE;
+       if (S_ISREG(file_inode(file)->i_mode))
+               file_ptr |= FFS_ISREG;
+       file_slot->file_ptr = file_ptr;
+}
+
+static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
+                                            struct io_kiocb *req, int fd)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        struct file *file;
+       unsigned long file_ptr;
 
-       if (fixed) {
-               if (unlikely((unsigned int)fd >= ctx->nr_user_files))
-                       return NULL;
-               fd = array_index_nospec(fd, ctx->nr_user_files);
-               file = io_file_from_index(ctx, fd);
-               if (file) {
-                       req->fixed_file_refs = &ctx->file_data->node->refs;
-                       percpu_ref_get(req->fixed_file_refs);
-               }
-       } else {
-               trace_io_uring_file_get(ctx, fd);
-               file = __io_file_get(state, fd);
-       }
+       if (unlikely((unsigned int)fd >= ctx->nr_user_files))
+               return NULL;
+       fd = array_index_nospec(fd, ctx->nr_user_files);
+       file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
+       file = (struct file *) (file_ptr & FFS_MASK);
+       file_ptr &= ~FFS_MASK;
+       /* mask in overlapping REQ_F and FFS bits */
+       req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
+       io_req_set_rsrc_node(req);
+       return file;
+}
 
-       if (file && file->f_op == &io_uring_fops &&
-           !(req->flags & REQ_F_INFLIGHT)) {
-               io_req_init_async(req);
-               req->flags |= REQ_F_INFLIGHT;
+static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
+                                      struct io_kiocb *req, int fd)
+{
+       struct file *file = fget(fd);
 
-               spin_lock_irq(&ctx->inflight_lock);
-               list_add(&req->inflight_entry, &ctx->inflight_list);
-               spin_unlock_irq(&ctx->inflight_lock);
-       }
+       trace_io_uring_file_get(ctx, fd);
 
+       /* we don't allow fixed io_uring files */
+       if (file && unlikely(file->f_op == &io_uring_fops))
+               io_req_track_inflight(req);
        return file;
 }
 
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
-                          int fd)
+static inline struct file *io_file_get(struct io_ring_ctx *ctx,
+                                      struct io_kiocb *req, int fd, bool fixed)
 {
-       bool fixed;
+       if (fixed)
+               return io_file_get_fixed(ctx, req, fd);
+       else
+               return io_file_get_normal(ctx, req, fd);
+}
 
-       fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
-       if (unlikely(!fixed && io_async_submit(req->ctx)))
-               return -EBADF;
+static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+{
+       struct io_kiocb *prev = req->timeout.prev;
+       int ret = -ENOENT;
 
-       req->file = io_file_get(state, req, fd, fixed);
-       if (req->file || io_op_defs[req->opcode].needs_file_no_error)
-               return 0;
-       return -EBADF;
+       if (prev) {
+               if (!(req->task->flags & PF_EXITING))
+                       ret = io_try_cancel_userdata(req, prev->user_data);
+               io_req_complete_post(req, ret ?: -ETIME, 0);
+               io_put_req(prev);
+       } else {
+               io_req_complete_post(req, -ETIME, 0);
+       }
 }
 
 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
 {
        struct io_timeout_data *data = container_of(timer,
                                                struct io_timeout_data, timer);
-       struct io_kiocb *req = data->req;
+       struct io_kiocb *prev, *req = data->req;
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_kiocb *prev = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
+       spin_lock_irqsave(&ctx->timeout_lock, flags);
+       prev = req->timeout.head;
+       req->timeout.head = NULL;
 
        /*
         * We don't expect the list to be empty, that will only happen if we
         * race with the completion of the linked work.
         */
-       if (!list_empty(&req->link_list)) {
-               prev = list_entry(req->link_list.prev, struct io_kiocb,
-                                 link_list);
-               list_del_init(&req->link_list);
-               if (!refcount_inc_not_zero(&prev->refs))
+       if (prev) {
+               io_remove_next_linked(prev);
+               if (!req_ref_inc_not_zero(prev))
                        prev = NULL;
        }
-
        list_del(&req->timeout.list);
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       req->timeout.prev = prev;
+       spin_unlock_irqrestore(&ctx->timeout_lock, flags);
 
-       if (prev) {
-               io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
-               io_put_req_deferred(prev, 1);
-       } else {
-               io_cqring_add_event(req, -ETIME, 0);
-               io_put_req_deferred(req, 1);
-       }
+       req->io_task_work.func = io_req_task_link_timeout;
+       io_req_task_work_add(req);
        return HRTIMER_NORESTART;
 }
 
-static void __io_queue_linked_timeout(struct io_kiocb *req)
+static void io_queue_linked_timeout(struct io_kiocb *req)
 {
+       struct io_ring_ctx *ctx = req->ctx;
+
+       spin_lock_irq(&ctx->timeout_lock);
        /*
-        * If the list is now empty, then our linked request finished before
-        * we got a chance to setup the timer
+        * If the back reference is NULL, then our linked request finished
+        * before we got a chance to setup the timer
         */
-       if (!list_empty(&req->link_list)) {
+       if (req->timeout.head) {
                struct io_timeout_data *data = req->async_data;
 
                data->timer.function = io_link_timeout_fn;
                hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
                                data->mode);
+               list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
        }
-}
-
-static void io_queue_linked_timeout(struct io_kiocb *req)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-
-       spin_lock_irq(&ctx->completion_lock);
-       __io_queue_linked_timeout(req);
-       spin_unlock_irq(&ctx->completion_lock);
-
+       spin_unlock_irq(&ctx->timeout_lock);
        /* drop submission reference */
        io_put_req(req);
 }
 
-static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
-{
-       struct io_kiocb *nxt;
-
-       if (!(req->flags & REQ_F_LINK_HEAD))
-               return NULL;
-       if (req->flags & REQ_F_LINK_TIMEOUT)
-               return NULL;
-
-       nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
-                                       link_list);
-       if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
-               return NULL;
-
-       nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
-       req->flags |= REQ_F_LINK_TIMEOUT;
-       return nxt;
-}
-
-static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
+static void __io_queue_sqe(struct io_kiocb *req)
+       __must_hold(&req->ctx->uring_lock)
 {
        struct io_kiocb *linked_timeout;
-       const struct cred *old_creds = NULL;
        int ret;
 
-again:
-       linked_timeout = io_prep_linked_timeout(req);
-
-       if ((req->flags & REQ_F_WORK_INITIALIZED) &&
-           (req->work.flags & IO_WQ_WORK_CREDS) &&
-           req->work.identity->creds != current_cred()) {
-               if (old_creds)
-                       revert_creds(old_creds);
-               if (old_creds == req->work.identity->creds)
-                       old_creds = NULL; /* restored original creds */
-               else
-                       old_creds = override_creds(req->work.identity->creds);
-       }
-
-       ret = io_issue_sqe(req, true, cs);
+issue_sqe:
+       ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
         * doesn't support non-blocking read/write attempts
         */
-       if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
-               if (!io_arm_poll_handler(req)) {
+       if (likely(!ret)) {
+               if (req->flags & REQ_F_COMPLETE_INLINE) {
+                       struct io_ring_ctx *ctx = req->ctx;
+                       struct io_submit_state *state = &ctx->submit_state;
+
+                       state->compl_reqs[state->compl_nr++] = req;
+                       if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
+                               io_submit_flush_completions(ctx);
+                       return;
+               }
+
+               linked_timeout = io_prep_linked_timeout(req);
+               if (linked_timeout)
+                       io_queue_linked_timeout(linked_timeout);
+       } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+               linked_timeout = io_prep_linked_timeout(req);
+
+               switch (io_arm_poll_handler(req)) {
+               case IO_APOLL_READY:
+                       if (linked_timeout)
+                               io_queue_linked_timeout(linked_timeout);
+                       goto issue_sqe;
+               case IO_APOLL_ABORTED:
                        /*
                         * Queued up for async execution, worker will release
                         * submit reference when the iocb is actually submitted.
                         */
-                       io_queue_async_work(req);
+                       io_queue_async_work(req, NULL);
+                       break;
                }
 
                if (linked_timeout)
                        io_queue_linked_timeout(linked_timeout);
-       } else if (likely(!ret)) {
-               /* drop submission reference */
-               req = io_put_req_find_next(req);
-               if (linked_timeout)
-                       io_queue_linked_timeout(linked_timeout);
-
-               if (req) {
-                       if (!(req->flags & REQ_F_FORCE_ASYNC))
-                               goto again;
-                       io_queue_async_work(req);
-               }
        } else {
-               /* un-prep timeout, so it'll be killed as any other linked */
-               req->flags &= ~REQ_F_LINK_TIMEOUT;
-               req_set_fail_links(req);
-               io_put_req(req);
-               io_req_complete(req, ret);
+               io_req_complete_failed(req, ret);
        }
-
-       if (old_creds)
-               revert_creds(old_creds);
 }
 
-static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                        struct io_comp_state *cs)
+static inline void io_queue_sqe(struct io_kiocb *req)
+       __must_hold(&req->ctx->uring_lock)
 {
-       int ret;
+       if (unlikely(req->ctx->drain_active) && io_drain_req(req))
+               return;
 
-       ret = io_req_defer(req, sqe);
-       if (ret) {
-               if (ret != -EIOCBQUEUED) {
-fail_req:
-                       req_set_fail_links(req);
-                       io_put_req(req);
-                       io_req_complete(req, ret);
-               }
-       } else if (req->flags & REQ_F_FORCE_ASYNC) {
-               if (!req->async_data) {
-                       ret = io_req_defer_prep(req, sqe);
-                       if (unlikely(ret))
-                               goto fail_req;
-               }
-               io_queue_async_work(req);
+       if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
+               __io_queue_sqe(req);
+       } else if (req->flags & REQ_F_FAIL) {
+               io_req_complete_fail_submit(req);
        } else {
-               if (sqe) {
-                       ret = io_req_prep(req, sqe);
-                       if (unlikely(ret))
-                               goto fail_req;
-               }
-               __io_queue_sqe(req, cs);
+               int ret = io_req_prep_async(req);
+
+               if (unlikely(ret))
+                       io_req_complete_failed(req, ret);
+               else
+                       io_queue_async_work(req, NULL);
        }
 }
 
-static inline void io_queue_link_head(struct io_kiocb *req,
-                                     struct io_comp_state *cs)
+/*
+ * Check SQE restrictions (opcode and flags).
+ *
+ * Returns 'true' if SQE is allowed, 'false' otherwise.
+ */
+static inline bool io_check_restriction(struct io_ring_ctx *ctx,
+                                       struct io_kiocb *req,
+                                       unsigned int sqe_flags)
 {
-       if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
-               io_put_req(req);
-               io_req_complete(req, -ECANCELED);
-       } else
-               io_queue_sqe(req, NULL, cs);
-}
+       if (likely(!ctx->restricted))
+               return true;
 
-static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                        struct io_kiocb **link, struct io_comp_state *cs)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-       int ret;
+       if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
+               return false;
 
-       /*
-        * If we already have a head request, queue this one for async
-        * submittal once the head completes. If we don't have a head but
-        * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
-        * submitted sync once the chain is complete. If none of those
-        * conditions are true (normal request), then just queue it.
-        */
-       if (*link) {
-               struct io_kiocb *head = *link;
+       if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
+           ctx->restrictions.sqe_flags_required)
+               return false;
 
-               /*
-                * Taking sequential execution of a link, draining both sides
-                * of the link also fullfils IOSQE_IO_DRAIN semantics for all
-                * requests in the link. So, it drains the head and the
-                * next after the link request. The last one is done via
-                * drain_next flag to persist the effect across calls.
-                */
-               if (req->flags & REQ_F_IO_DRAIN) {
-                       head->flags |= REQ_F_IO_DRAIN;
-                       ctx->drain_next = 1;
-               }
-               ret = io_req_defer_prep(req, sqe);
-               if (unlikely(ret)) {
-                       /* fail even hard links since we don't submit */
-                       head->flags |= REQ_F_FAIL_LINK;
-                       return ret;
-               }
-               trace_io_uring_link(ctx, req, head);
-               list_add_tail(&req->link_list, &head->link_list);
+       if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
+                         ctx->restrictions.sqe_flags_required))
+               return false;
 
-               /* last request of a link, enqueue the link */
-               if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
-                       io_queue_link_head(head, cs);
-                       *link = NULL;
+       return true;
+}
+
+static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                      const struct io_uring_sqe *sqe)
+       __must_hold(&ctx->uring_lock)
+{
+       struct io_submit_state *state;
+       unsigned int sqe_flags;
+       int personality, ret = 0;
+
+       /* req is partially pre-initialised, see io_preinit_req() */
+       req->opcode = READ_ONCE(sqe->opcode);
+       /* same numerical values with corresponding REQ_F_*, safe to copy */
+       req->flags = sqe_flags = READ_ONCE(sqe->flags);
+       req->user_data = READ_ONCE(sqe->user_data);
+       req->file = NULL;
+       req->fixed_rsrc_refs = NULL;
+       req->task = current;
+
+       /* enforce forwards compatibility on users */
+       if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
+               return -EINVAL;
+       if (unlikely(req->opcode >= IORING_OP_LAST))
+               return -EINVAL;
+       if (!io_check_restriction(ctx, req, sqe_flags))
+               return -EACCES;
+
+       if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
+           !io_op_defs[req->opcode].buffer_select)
+               return -EOPNOTSUPP;
+       if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
+               ctx->drain_active = true;
+
+       personality = READ_ONCE(sqe->personality);
+       if (personality) {
+               req->creds = xa_load(&ctx->personalities, personality);
+               if (!req->creds)
+                       return -EINVAL;
+               get_cred(req->creds);
+               req->flags |= REQ_F_CREDS;
+       }
+       state = &ctx->submit_state;
+
+       /*
+        * Plug now if we have more than 1 IO left after this, and the target
+        * is potentially a read/write to block based storage.
+        */
+       if (!state->plug_started && state->ios_left > 1 &&
+           io_op_defs[req->opcode].plug) {
+               blk_start_plug(&state->plug);
+               state->plug_started = true;
+       }
+
+       if (io_op_defs[req->opcode].needs_file) {
+               req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
+                                       (sqe_flags & IOSQE_FIXED_FILE));
+               if (unlikely(!req->file))
+                       ret = -EBADF;
+       }
+
+       state->ios_left--;
+       return ret;
+}
+
+static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                        const struct io_uring_sqe *sqe)
+       __must_hold(&ctx->uring_lock)
+{
+       struct io_submit_link *link = &ctx->submit_state.link;
+       int ret;
+
+       ret = io_init_req(ctx, req, sqe);
+       if (unlikely(ret)) {
+fail_req:
+               /* fail even hard links since we don't submit */
+               if (link->head) {
+                       /*
+                        * we can judge a link req is failed or cancelled by if
+                        * REQ_F_FAIL is set, but the head is an exception since
+                        * it may be set REQ_F_FAIL because of other req's failure
+                        * so let's leverage req->result to distinguish if a head
+                        * is set REQ_F_FAIL because of its failure or other req's
+                        * failure so that we can set the correct ret code for it.
+                        * init result here to avoid affecting the normal path.
+                        */
+                       if (!(link->head->flags & REQ_F_FAIL))
+                               req_fail_link_node(link->head, -ECANCELED);
+               } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
+                       /*
+                        * the current req is a normal req, we should return
+                        * error and thus break the submittion loop.
+                        */
+                       io_req_complete_failed(req, ret);
+                       return ret;
                }
+               req_fail_link_node(req, ret);
        } else {
-               if (unlikely(ctx->drain_next)) {
-                       req->flags |= REQ_F_IO_DRAIN;
-                       ctx->drain_next = 0;
+               ret = io_req_prep(req, sqe);
+               if (unlikely(ret))
+                       goto fail_req;
+       }
+
+       /* don't need @sqe from now on */
+       trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
+                                 req->flags, true,
+                                 ctx->flags & IORING_SETUP_SQPOLL);
+
+       /*
+        * If we already have a head request, queue this one for async
+        * submittal once the head completes. If we don't have a head but
+        * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
+        * submitted sync once the chain is complete. If none of those
+        * conditions are true (normal request), then just queue it.
+        */
+       if (link->head) {
+               struct io_kiocb *head = link->head;
+
+               if (!(req->flags & REQ_F_FAIL)) {
+                       ret = io_req_prep_async(req);
+                       if (unlikely(ret)) {
+                               req_fail_link_node(req, ret);
+                               if (!(head->flags & REQ_F_FAIL))
+                                       req_fail_link_node(head, -ECANCELED);
+                       }
                }
-               if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
-                       req->flags |= REQ_F_LINK_HEAD;
-                       INIT_LIST_HEAD(&req->link_list);
+               trace_io_uring_link(ctx, req, head);
+               link->last->link = req;
+               link->last = req;
 
-                       ret = io_req_defer_prep(req, sqe);
-                       if (unlikely(ret))
-                               req->flags |= REQ_F_FAIL_LINK;
-                       *link = req;
+               /* last request of a link, enqueue the link */
+               if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
+                       link->head = NULL;
+                       io_queue_sqe(head);
+               }
+       } else {
+               if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+                       link->head = req;
+                       link->last = req;
                } else {
-                       io_queue_sqe(req, sqe, cs);
+                       io_queue_sqe(req);
                }
        }
 
@@ -6488,29 +7090,27 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 /*
  * Batched submission is done, ensure local IO is flushed out.
  */
-static void io_submit_state_end(struct io_submit_state *state)
+static void io_submit_state_end(struct io_submit_state *state,
+                               struct io_ring_ctx *ctx)
 {
-       if (!list_empty(&state->comp.list))
-               io_submit_flush_completions(&state->comp);
-       blk_finish_plug(&state->plug);
-       io_state_file_put(state);
-       if (state->free_reqs)
-               kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
+       if (state->link.head)
+               io_queue_sqe(state->link.head);
+       if (state->compl_nr)
+               io_submit_flush_completions(ctx);
+       if (state->plug_started)
+               blk_finish_plug(&state->plug);
 }
 
 /*
  * Start submission side cache.
  */
 static void io_submit_state_start(struct io_submit_state *state,
-                                 struct io_ring_ctx *ctx, unsigned int max_ios)
-{
-       blk_start_plug(&state->plug);
-       state->comp.nr = 0;
-       INIT_LIST_HEAD(&state->comp.list);
-       state->comp.ctx = ctx;
-       state->free_reqs = 0;
-       state->file = NULL;
+                                 unsigned int max_ios)
+{
+       state->plug_started = false;
        state->ios_left = max_ios;
+       /* set only head, no need to init link_last in advance */
+       state->link.head = NULL;
 }
 
 static void io_commit_sqring(struct io_ring_ctx *ctx)
@@ -6526,7 +7126,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
 }
 
 /*
- * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
+ * Fetch an sqe, if one is available. Note this returns a pointer to memory
  * that is mapped by userspace. This means that care needs to be taken to
  * ensure that reads are stable, as we cannot rely on userspace always
  * being a good citizen. If members of the sqe are validated and then later
@@ -6535,8 +7135,8 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
  */
 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
 {
-       u32 *sq_array = ctx->sq_array;
-       unsigned head;
+       unsigned head, mask = ctx->sq_entries - 1;
+       unsigned sq_idx = ctx->cached_sq_head++ & mask;
 
        /*
         * The cached sq head (or cq tail) serves two purposes:
@@ -6546,423 +7146,256 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
         * 2) allows the kernel side to track the head on its own, even
         *    though the application is the one updating it.
         */
-       head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
+       head = READ_ONCE(ctx->sq_array[sq_idx]);
        if (likely(head < ctx->sq_entries))
                return &ctx->sq_sqes[head];
 
        /* drop invalid entries */
-       ctx->cached_sq_dropped++;
-       WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
+       ctx->cq_extra--;
+       WRITE_ONCE(ctx->rings->sq_dropped,
+                  READ_ONCE(ctx->rings->sq_dropped) + 1);
        return NULL;
 }
 
-static inline void io_consume_sqe(struct io_ring_ctx *ctx)
-{
-       ctx->cached_sq_head++;
-}
-
-/*
- * Check SQE restrictions (opcode and flags).
- *
- * Returns 'true' if SQE is allowed, 'false' otherwise.
- */
-static inline bool io_check_restriction(struct io_ring_ctx *ctx,
-                                       struct io_kiocb *req,
-                                       unsigned int sqe_flags)
-{
-       if (!ctx->restricted)
-               return true;
-
-       if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
-               return false;
-
-       if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
-           ctx->restrictions.sqe_flags_required)
-               return false;
-
-       if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
-                         ctx->restrictions.sqe_flags_required))
-               return false;
-
-       return true;
-}
-
-#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
-                               IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
-                               IOSQE_BUFFER_SELECT)
-
-static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                      const struct io_uring_sqe *sqe,
-                      struct io_submit_state *state)
-{
-       unsigned int sqe_flags;
-       int id, ret;
-
-       req->opcode = READ_ONCE(sqe->opcode);
-       req->user_data = READ_ONCE(sqe->user_data);
-       req->async_data = NULL;
-       req->file = NULL;
-       req->ctx = ctx;
-       req->flags = 0;
-       /* one is dropped after submission, the other at completion */
-       refcount_set(&req->refs, 2);
-       req->task = current;
-       req->result = 0;
-
-       if (unlikely(req->opcode >= IORING_OP_LAST))
-               return -EINVAL;
-
-       if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
-               return -EFAULT;
-
-       sqe_flags = READ_ONCE(sqe->flags);
-       /* enforce forwards compatibility on users */
-       if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
-               return -EINVAL;
-
-       if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
-               return -EACCES;
-
-       if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
-           !io_op_defs[req->opcode].buffer_select)
-               return -EOPNOTSUPP;
-
-       id = READ_ONCE(sqe->personality);
-       if (id) {
-               struct io_identity *iod;
-
-               iod = xa_load(&ctx->personalities, id);
-               if (unlikely(!iod))
-                       return -EINVAL;
-               refcount_inc(&iod->count);
-
-               __io_req_init_async(req);
-               get_cred(iod->creds);
-               req->work.identity = iod;
-               req->work.flags |= IO_WQ_WORK_CREDS;
-       }
-
-       /* same numerical values with corresponding REQ_F_*, safe to copy */
-       req->flags |= sqe_flags;
-
-       if (!io_op_defs[req->opcode].needs_file)
-               return 0;
-
-       ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
-       state->ios_left--;
-       return ret;
-}
-
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
+       __must_hold(&ctx->uring_lock)
 {
-       struct io_submit_state state;
-       struct io_kiocb *link = NULL;
-       int i, submitted = 0;
-
-       /* if we have a backlog and couldn't flush it all, return BUSY */
-       if (test_bit(0, &ctx->sq_check_overflow)) {
-               if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
-                       return -EBUSY;
-       }
+       int submitted = 0;
 
        /* make sure SQ entry isn't read before tail */
        nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
-
        if (!percpu_ref_tryget_many(&ctx->refs, nr))
                return -EAGAIN;
+       io_get_task_refs(nr);
 
-       percpu_counter_add(&current->io_uring->inflight, nr);
-       refcount_add(nr, &current->usage);
-
-       io_submit_state_start(&state, ctx, nr);
-
-       for (i = 0; i < nr; i++) {
+       io_submit_state_start(&ctx->submit_state, nr);
+       while (submitted < nr) {
                const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
-               int err;
 
-               sqe = io_get_sqe(ctx);
-               if (unlikely(!sqe)) {
-                       io_consume_sqe(ctx);
-                       break;
-               }
-               req = io_alloc_req(ctx, &state);
+               req = io_alloc_req(ctx);
                if (unlikely(!req)) {
                        if (!submitted)
                                submitted = -EAGAIN;
                        break;
                }
-               io_consume_sqe(ctx);
+               sqe = io_get_sqe(ctx);
+               if (unlikely(!sqe)) {
+                       list_add(&req->inflight_entry, &ctx->submit_state.free_list);
+                       break;
+               }
                /* will complete beyond this point, count as submitted */
                submitted++;
-
-               err = io_init_req(ctx, req, sqe, &state);
-               if (unlikely(err)) {
-fail_req:
-                       io_put_req(req);
-                       io_req_complete(req, err);
+               if (io_submit_sqe(ctx, req, sqe))
                        break;
-               }
-
-               trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
-                                               true, io_async_submit(ctx));
-               err = io_submit_sqe(req, sqe, &link, &state.comp);
-               if (err)
-                       goto fail_req;
        }
 
        if (unlikely(submitted != nr)) {
                int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
-               struct io_uring_task *tctx = current->io_uring;
                int unused = nr - ref_used;
 
+               current->io_uring->cached_refs += unused;
                percpu_ref_put_many(&ctx->refs, unused);
-               percpu_counter_sub(&tctx->inflight, unused);
-               put_task_struct_many(current, unused);
        }
-       if (link)
-               io_queue_link_head(link, &state.comp);
-       io_submit_state_end(&state);
 
+       io_submit_state_end(&ctx->submit_state, ctx);
         /* Commit SQ ring head once we've consumed and submitted all SQEs */
        io_commit_sqring(ctx);
 
        return submitted;
 }
 
-static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
+static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
 {
-       /* Tell userspace we may need a wakeup call */
-       spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
-       spin_unlock_irq(&ctx->completion_lock);
+       return READ_ONCE(sqd->state);
 }
 
-static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
+static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
 {
-       spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
-       spin_unlock_irq(&ctx->completion_lock);
+       /* Tell userspace we may need a wakeup call */
+       spin_lock(&ctx->completion_lock);
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
+       spin_unlock(&ctx->completion_lock);
 }
 
-static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
-                              int sync, void *key)
+static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
 {
-       struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
-       int ret;
-
-       ret = autoremove_wake_function(wqe, mode, sync, key);
-       if (ret) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       }
-       return ret;
+       spin_lock(&ctx->completion_lock);
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
+       spin_unlock(&ctx->completion_lock);
 }
 
-enum sq_ret {
-       SQT_IDLE        = 1,
-       SQT_SPIN        = 2,
-       SQT_DID_WORK    = 4,
-};
-
-static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
-                                 unsigned long start_jiffies, bool cap_entries)
+static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 {
-       unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
-       struct io_sq_data *sqd = ctx->sq_data;
        unsigned int to_submit;
        int ret = 0;
 
-again:
-       if (!list_empty(&ctx->iopoll_list)) {
-               unsigned nr_events = 0;
-
-               mutex_lock(&ctx->uring_lock);
-               if (!list_empty(&ctx->iopoll_list) && !need_resched())
-                       io_do_iopoll(ctx, &nr_events, 0);
-               mutex_unlock(&ctx->uring_lock);
-       }
-
        to_submit = io_sqring_entries(ctx);
+       /* if we're handling multiple rings, cap submit size for fairness */
+       if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
+               to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
 
-       /*
-        * If submit got -EBUSY, flag us as needing the application
-        * to enter the kernel to reap and flush events.
-        */
-       if (!to_submit || ret == -EBUSY || need_resched()) {
-               /*
-                * Drop cur_mm before scheduling, we can't hold it for
-                * long periods (or over schedule()). Do this before
-                * adding ourselves to the waitqueue, as the unuse/drop
-                * may sleep.
-                */
-               io_sq_thread_drop_mm();
+       if (!list_empty(&ctx->iopoll_list) || to_submit) {
+               unsigned nr_events = 0;
+               const struct cred *creds = NULL;
 
-               /*
-                * We're polling. If we're within the defined idle
-                * period, then let us spin without work before going
-                * to sleep. The exception is if we got EBUSY doing
-                * more IO, we should wait for the application to
-                * reap events and wake us up.
-                */
-               if (!list_empty(&ctx->iopoll_list) || need_resched() ||
-                   (!time_after(jiffies, timeout) && ret != -EBUSY &&
-                   !percpu_ref_is_dying(&ctx->refs)))
-                       return SQT_SPIN;
+               if (ctx->sq_creds != current_cred())
+                       creds = override_creds(ctx->sq_creds);
 
-               prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
-                                       TASK_INTERRUPTIBLE);
+               mutex_lock(&ctx->uring_lock);
+               if (!list_empty(&ctx->iopoll_list))
+                       io_do_iopoll(ctx, &nr_events, 0);
 
                /*
-                * While doing polled IO, before going to sleep, we need
-                * to check if there are new reqs added to iopoll_list,
-                * it is because reqs may have been punted to io worker
-                * and will be added to iopoll_list later, hence check
-                * the iopoll_list again.
+                * Don't submit if refs are dying, good for io_uring_register(),
+                * but also it is relied upon by io_ring_exit_work()
                 */
-               if ((ctx->flags & IORING_SETUP_IOPOLL) &&
-                   !list_empty_careful(&ctx->iopoll_list)) {
-                       finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
-                       goto again;
-               }
+               if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
+                   !(ctx->flags & IORING_SETUP_R_DISABLED))
+                       ret = io_submit_sqes(ctx, to_submit);
+               mutex_unlock(&ctx->uring_lock);
 
-               to_submit = io_sqring_entries(ctx);
-               if (!to_submit || ret == -EBUSY)
-                       return SQT_IDLE;
+               if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
+                       wake_up(&ctx->sqo_sq_wait);
+               if (creds)
+                       revert_creds(creds);
        }
 
-       finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
-       io_ring_clear_wakeup_flag(ctx);
-
-       /* if we're handling multiple rings, cap submit size for fairness */
-       if (cap_entries && to_submit > 8)
-               to_submit = 8;
-
-       mutex_lock(&ctx->uring_lock);
-       if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead))
-               ret = io_submit_sqes(ctx, to_submit);
-       mutex_unlock(&ctx->uring_lock);
+       return ret;
+}
 
-       if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
-               wake_up(&ctx->sqo_sq_wait);
+static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
+{
+       struct io_ring_ctx *ctx;
+       unsigned sq_thread_idle = 0;
 
-       return SQT_DID_WORK;
+       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+               sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
+       sqd->sq_thread_idle = sq_thread_idle;
 }
 
-static void io_sqd_init_new(struct io_sq_data *sqd)
+static bool io_sqd_handle_event(struct io_sq_data *sqd)
 {
-       struct io_ring_ctx *ctx;
+       bool did_sig = false;
+       struct ksignal ksig;
 
-       while (!list_empty(&sqd->ctx_new_list)) {
-               ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
-               init_wait(&ctx->sqo_wait_entry);
-               ctx->sqo_wait_entry.func = io_sq_wake_function;
-               list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
-               complete(&ctx->sq_thread_comp);
+       if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
+           signal_pending(current)) {
+               mutex_unlock(&sqd->lock);
+               if (signal_pending(current))
+                       did_sig = get_signal(&ksig);
+               cond_resched();
+               mutex_lock(&sqd->lock);
        }
+       return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
 }
 
 static int io_sq_thread(void *data)
 {
-       struct cgroup_subsys_state *cur_css = NULL;
-       const struct cred *old_cred = NULL;
        struct io_sq_data *sqd = data;
        struct io_ring_ctx *ctx;
-       unsigned long start_jiffies;
+       unsigned long timeout = 0;
+       char buf[TASK_COMM_LEN];
+       DEFINE_WAIT(wait);
 
-       start_jiffies = jiffies;
-       while (!kthread_should_stop()) {
-               enum sq_ret ret = 0;
-               bool cap_entries;
+       snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
+       set_task_comm(current, buf);
 
-               /*
-                * Any changes to the sqd lists are synchronized through the
-                * kthread parking. This synchronizes the thread vs users,
-                * the users are synchronized on the sqd->ctx_lock.
-                */
-               if (kthread_should_park()) {
-                       kthread_parkme();
-                       /*
-                        * When sq thread is unparked, in case the previous park operation
-                        * comes from io_put_sq_data(), which means that sq thread is going
-                        * to be stopped, so here needs to have a check.
-                        */
-                       if (kthread_should_stop())
+       if (sqd->sq_cpu != -1)
+               set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+       else
+               set_cpus_allowed_ptr(current, cpu_online_mask);
+       current->flags |= PF_NO_SETAFFINITY;
+
+       mutex_lock(&sqd->lock);
+       while (1) {
+               bool cap_entries, sqt_spin = false;
+
+               if (io_sqd_events_pending(sqd) || signal_pending(current)) {
+                       if (io_sqd_handle_event(sqd))
                                break;
+                       timeout = jiffies + sqd->sq_thread_idle;
                }
 
-               if (unlikely(!list_empty(&sqd->ctx_new_list)))
-                       io_sqd_init_new(sqd);
-
                cap_entries = !list_is_singular(&sqd->ctx_list);
-
                list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
-                       if (current->cred != ctx->creds) {
-                               if (old_cred)
-                                       revert_creds(old_cred);
-                               old_cred = override_creds(ctx->creds);
-                       }
-                       io_sq_thread_associate_blkcg(ctx, &cur_css);
-#ifdef CONFIG_AUDIT
-                       current->loginuid = ctx->loginuid;
-                       current->sessionid = ctx->sessionid;
-#endif
+                       int ret = __io_sq_thread(ctx, cap_entries);
 
-                       ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
-
-                       io_sq_thread_drop_mm();
+                       if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
+                               sqt_spin = true;
                }
+               if (io_run_task_work())
+                       sqt_spin = true;
 
-               if (ret & SQT_SPIN) {
-                       io_run_task_work();
-                       io_sq_thread_drop_mm();
+               if (sqt_spin || !time_after(jiffies, timeout)) {
                        cond_resched();
-               } else if (ret == SQT_IDLE) {
-                       if (kthread_should_park())
-                               continue;
-                       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+                       if (sqt_spin)
+                               timeout = jiffies + sqd->sq_thread_idle;
+                       continue;
+               }
+
+               prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
+               if (!io_sqd_events_pending(sqd) && !current->task_works) {
+                       bool needs_sched = true;
+
+                       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
                                io_ring_set_wakeup_flag(ctx);
-                       schedule();
-                       start_jiffies = jiffies;
+
+                               if ((ctx->flags & IORING_SETUP_IOPOLL) &&
+                                   !list_empty_careful(&ctx->iopoll_list)) {
+                                       needs_sched = false;
+                                       break;
+                               }
+                               if (io_sqring_entries(ctx)) {
+                                       needs_sched = false;
+                                       break;
+                               }
+                       }
+
+                       if (needs_sched) {
+                               mutex_unlock(&sqd->lock);
+                               schedule();
+                               mutex_lock(&sqd->lock);
+                       }
                        list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
                                io_ring_clear_wakeup_flag(ctx);
                }
+
+               finish_wait(&sqd->wait, &wait);
+               timeout = jiffies + sqd->sq_thread_idle;
        }
 
+       io_uring_cancel_generic(true, sqd);
+       sqd->thread = NULL;
+       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+               io_ring_set_wakeup_flag(ctx);
        io_run_task_work();
-       io_sq_thread_drop_mm();
-
-       if (cur_css)
-               io_sq_thread_unassociate_blkcg();
-       if (old_cred)
-               revert_creds(old_cred);
-
-       kthread_parkme();
+       mutex_unlock(&sqd->lock);
 
-       return 0;
+       complete(&sqd->exited);
+       do_exit(0);
 }
 
 struct io_wait_queue {
        struct wait_queue_entry wq;
        struct io_ring_ctx *ctx;
-       unsigned to_wait;
+       unsigned cq_tail;
        unsigned nr_timeouts;
 };
 
 static inline bool io_should_wake(struct io_wait_queue *iowq)
 {
        struct io_ring_ctx *ctx = iowq->ctx;
+       int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
 
        /*
         * Wake up if we have enough events, or if a timeout occurred since we
         * started waiting. For timeouts, we always want to return to userspace,
         * regardless of event count.
         */
-       return io_cqring_events(ctx) >= iowq->to_wait ||
-                       atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+       return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
 }
 
 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
@@ -6975,7 +7408,7 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
         * Cannot safely flush overflowed CQEs from here, ensure we wake up
         * the task, and the next invocation will do it.
         */
-       if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
+       if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
                return autoremove_wake_function(curr, mode, wake_flags, key);
        return -1;
 }
@@ -6986,43 +7419,60 @@ static int io_run_task_work_sig(void)
                return 1;
        if (!signal_pending(current))
                return 0;
-       if (current->jobctl & JOBCTL_TASK_WORK) {
-               spin_lock_irq(&current->sighand->siglock);
-               current->jobctl &= ~JOBCTL_TASK_WORK;
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-               return 1;
-       }
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+               return -ERESTARTSYS;
        return -EINTR;
 }
 
+/* when returns >0, the caller should retry */
+static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+                                         struct io_wait_queue *iowq,
+                                         ktime_t timeout)
+{
+       int ret;
+
+       /* make sure we run task_work before checking for signals */
+       ret = io_run_task_work_sig();
+       if (ret || io_should_wake(iowq))
+               return ret;
+       /* let the caller flush overflows, retry */
+       if (test_bit(0, &ctx->check_cq_overflow))
+               return 1;
+
+       if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+               return -ETIME;
+       return 1;
+}
+
 /*
  * Wait until events become available, if we don't already have some. The
  * application must reap them itself, as they reside on the shared cq ring.
  */
 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
-                         const sigset_t __user *sig, size_t sigsz)
-{
-       struct io_wait_queue iowq = {
-               .wq = {
-                       .private        = current,
-                       .func           = io_wake_function,
-                       .entry          = LIST_HEAD_INIT(iowq.wq.entry),
-               },
-               .ctx            = ctx,
-               .to_wait        = min_events,
-       };
+                         const sigset_t __user *sig, size_t sigsz,
+                         struct __kernel_timespec __user *uts)
+{
+       struct io_wait_queue iowq;
        struct io_rings *rings = ctx->rings;
-       int ret = 0;
+       ktime_t timeout = KTIME_MAX;
+       int ret;
 
        do {
-               io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               io_cqring_overflow_flush(ctx);
                if (io_cqring_events(ctx) >= min_events)
                        return 0;
                if (!io_run_task_work())
                        break;
        } while (1);
 
+       if (uts) {
+               struct timespec64 ts;
+
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+       }
+
        if (sig) {
 #ifdef CONFIG_COMPAT
                if (in_compat_syscall())
@@ -7036,33 +7486,267 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
+       init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+       iowq.wq.private = current;
+       INIT_LIST_HEAD(&iowq.wq.entry);
+       iowq.ctx = ctx;
        iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+       iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
-               io_cqring_overflow_flush(ctx, false, NULL, NULL);
-               prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+               /* if we can't even flush overflow, don't wait for more */
+               if (!io_cqring_overflow_flush(ctx)) {
+                       ret = -EBUSY;
+                       break;
+               }
+               prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
-               /* make sure we run task_work before checking for signals */
-               ret = io_run_task_work_sig();
-               if (ret > 0) {
-                       finish_wait(&ctx->wait, &iowq.wq);
-                       continue;
+               ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
+               finish_wait(&ctx->cq_wait, &iowq.wq);
+               cond_resched();
+       } while (ret > 0);
+
+       restore_saved_sigmask_unless(ret == -EINTR);
+
+       return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
+}
+
+static void io_free_page_table(void **table, size_t size)
+{
+       unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
+
+       for (i = 0; i < nr_tables; i++)
+               kfree(table[i]);
+       kfree(table);
+}
+
+static void **io_alloc_page_table(size_t size)
+{
+       unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
+       size_t init_size = size;
+       void **table;
+
+       table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
+       if (!table)
+               return NULL;
+
+       for (i = 0; i < nr_tables; i++) {
+               unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
+
+               table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
+               if (!table[i]) {
+                       io_free_page_table(table, init_size);
+                       return NULL;
                }
-               else if (ret < 0)
+               size -= this_size;
+       }
+       return table;
+}
+
+static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
+{
+       percpu_ref_exit(&ref_node->refs);
+       kfree(ref_node);
+}
+
+static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
+{
+       struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
+       struct io_ring_ctx *ctx = node->rsrc_data->ctx;
+       unsigned long flags;
+       bool first_add = false;
+       unsigned long delay = HZ;
+
+       spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
+       node->done = true;
+
+       /* if we are mid-quiesce then do not delay */
+       if (node->rsrc_data->quiesce)
+               delay = 0;
+
+       while (!list_empty(&ctx->rsrc_ref_list)) {
+               node = list_first_entry(&ctx->rsrc_ref_list,
+                                           struct io_rsrc_node, node);
+               /* recycle ref nodes in order */
+               if (!node->done)
+                       break;
+               list_del(&node->node);
+               first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
+       }
+       spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
+
+       if (first_add)
+               mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
+}
+
+static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
+{
+       struct io_rsrc_node *ref_node;
+
+       ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
+       if (!ref_node)
+               return NULL;
+
+       if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
+                           0, GFP_KERNEL)) {
+               kfree(ref_node);
+               return NULL;
+       }
+       INIT_LIST_HEAD(&ref_node->node);
+       INIT_LIST_HEAD(&ref_node->rsrc_list);
+       ref_node->done = false;
+       return ref_node;
+}
+
+static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
+                               struct io_rsrc_data *data_to_kill)
+{
+       WARN_ON_ONCE(!ctx->rsrc_backup_node);
+       WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
+
+       if (data_to_kill) {
+               struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
+
+               rsrc_node->rsrc_data = data_to_kill;
+               spin_lock_irq(&ctx->rsrc_ref_lock);
+               list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
+               spin_unlock_irq(&ctx->rsrc_ref_lock);
+
+               atomic_inc(&data_to_kill->refs);
+               percpu_ref_kill(&rsrc_node->refs);
+               ctx->rsrc_node = NULL;
+       }
+
+       if (!ctx->rsrc_node) {
+               ctx->rsrc_node = ctx->rsrc_backup_node;
+               ctx->rsrc_backup_node = NULL;
+       }
+}
+
+static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
+{
+       if (ctx->rsrc_backup_node)
+               return 0;
+       ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
+       return ctx->rsrc_backup_node ? 0 : -ENOMEM;
+}
+
+static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
+{
+       int ret;
+
+       /* As we may drop ->uring_lock, other task may have started quiesce */
+       if (data->quiesce)
+               return -ENXIO;
+
+       data->quiesce = true;
+       do {
+               ret = io_rsrc_node_switch_start(ctx);
+               if (ret)
                        break;
-               if (io_should_wake(&iowq))
+               io_rsrc_node_switch(ctx, data);
+
+               /* kill initial ref, already quiesced if zero */
+               if (atomic_dec_and_test(&data->refs))
                        break;
-               if (test_bit(0, &ctx->cq_check_overflow)) {
-                       finish_wait(&ctx->wait, &iowq.wq);
-                       continue;
+               mutex_unlock(&ctx->uring_lock);
+               flush_delayed_work(&ctx->rsrc_put_work);
+               ret = wait_for_completion_interruptible(&data->done);
+               if (!ret) {
+                       mutex_lock(&ctx->uring_lock);
+                       if (atomic_read(&data->refs) > 0) {
+                               /*
+                                * it has been revived by another thread while
+                                * we were unlocked
+                                */
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               break;
+                       }
+               }
+
+               atomic_inc(&data->refs);
+               /* wait for all works potentially completing data->done */
+               flush_delayed_work(&ctx->rsrc_put_work);
+               reinit_completion(&data->done);
+
+               ret = io_run_task_work_sig();
+               mutex_lock(&ctx->uring_lock);
+       } while (ret >= 0);
+       data->quiesce = false;
+
+       return ret;
+}
+
+static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
+{
+       unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
+       unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
+
+       return &data->tags[table_idx][off];
+}
+
+static void io_rsrc_data_free(struct io_rsrc_data *data)
+{
+       size_t size = data->nr * sizeof(data->tags[0][0]);
+
+       if (data->tags)
+               io_free_page_table((void **)data->tags, size);
+       kfree(data);
+}
+
+static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
+                             u64 __user *utags, unsigned nr,
+                             struct io_rsrc_data **pdata)
+{
+       struct io_rsrc_data *data;
+       int ret = -ENOMEM;
+       unsigned i;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
+       if (!data->tags) {
+               kfree(data);
+               return -ENOMEM;
+       }
+
+       data->nr = nr;
+       data->ctx = ctx;
+       data->do_put = do_put;
+       if (utags) {
+               ret = -EFAULT;
+               for (i = 0; i < nr; i++) {
+                       u64 *tag_slot = io_get_tag_slot(data, i);
+
+                       if (copy_from_user(tag_slot, &utags[i],
+                                          sizeof(*tag_slot)))
+                               goto fail;
                }
-               schedule();
-       } while (1);
-       finish_wait(&ctx->wait, &iowq.wq);
+       }
 
-       restore_saved_sigmask_unless(ret == -EINTR);
+       atomic_set(&data->refs, 1);
+       init_completion(&data->done);
+       *pdata = data;
+       return 0;
+fail:
+       io_rsrc_data_free(data);
+       return ret;
+}
 
-       return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
+static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
+{
+       table->files = kvcalloc(nr_files, sizeof(table->files[0]),
+                               GFP_KERNEL_ACCOUNT);
+       return !!table->files;
+}
+
+static void io_free_file_tables(struct io_file_table *table)
+{
+       kvfree(table->files);
+       table->files = NULL;
 }
 
 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -7086,129 +7770,97 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
                        fput(file);
        }
 #endif
+       io_free_file_tables(&ctx->file_table);
+       io_rsrc_data_free(ctx->file_data);
+       ctx->file_data = NULL;
+       ctx->nr_user_files = 0;
 }
 
-static void io_file_ref_kill(struct percpu_ref *ref)
+static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
-       struct fixed_file_data *data;
+       unsigned nr = ctx->nr_user_files;
+       int ret;
 
-       data = container_of(ref, struct fixed_file_data, refs);
-       complete(&data->done);
-}
+       if (!ctx->file_data)
+               return -ENXIO;
 
-static void io_sqe_files_set_node(struct fixed_file_data *file_data,
-                                 struct fixed_file_ref_node *ref_node)
-{
-       spin_lock_bh(&file_data->lock);
-       file_data->node = ref_node;
-       list_add_tail(&ref_node->node, &file_data->ref_list);
-       spin_unlock_bh(&file_data->lock);
-       percpu_ref_get(&file_data->refs);
+       /*
+        * Quiesce may unlock ->uring_lock, and while it's not held
+        * prevent new requests using the table.
+        */
+       ctx->nr_user_files = 0;
+       ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
+       ctx->nr_user_files = nr;
+       if (!ret)
+               __io_sqe_files_unregister(ctx);
+       return ret;
 }
 
-
-static void io_sqe_files_kill_node(struct fixed_file_data *data)
+static void io_sq_thread_unpark(struct io_sq_data *sqd)
+       __releases(&sqd->lock)
 {
-       struct fixed_file_ref_node *ref_node = NULL;
+       WARN_ON_ONCE(sqd->thread == current);
 
-       spin_lock_bh(&data->lock);
-       ref_node = data->node;
-       spin_unlock_bh(&data->lock);
-       if (ref_node)
-               percpu_ref_kill(&ref_node->refs);
+       /*
+        * Do the dance but not conditional clear_bit() because it'd race with
+        * other threads incrementing park_pending and setting the bit.
+        */
+       clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+       if (atomic_dec_return(&sqd->park_pending))
+               set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+       mutex_unlock(&sqd->lock);
 }
 
-static int io_file_ref_quiesce(struct fixed_file_data *data,
-                              struct io_ring_ctx *ctx)
+static void io_sq_thread_park(struct io_sq_data *sqd)
+       __acquires(&sqd->lock)
 {
-       int ret;
-       struct fixed_file_ref_node *backup_node;
-
-       if (data->quiesce)
-               return -ENXIO;
-
-       data->quiesce = true;
-       do {
-               backup_node = alloc_fixed_file_ref_node(ctx);
-               if (!backup_node)
-                       break;
-
-               io_sqe_files_kill_node(data);
-               percpu_ref_kill(&data->refs);
-               flush_delayed_work(&ctx->file_put_work);
-
-               ret = wait_for_completion_interruptible(&data->done);
-               if (!ret)
-                       break;
-
-               percpu_ref_resurrect(&data->refs);
-               io_sqe_files_set_node(data, backup_node);
-               backup_node = NULL;
-               reinit_completion(&data->done);
-               mutex_unlock(&ctx->uring_lock);
-               ret = io_run_task_work_sig();
-               mutex_lock(&ctx->uring_lock);
-
-               if (ret < 0)
-                       break;
-               backup_node = alloc_fixed_file_ref_node(ctx);
-               ret = -ENOMEM;
-               if (!backup_node)
-                       break;
-       } while (1);
-       data->quiesce = false;
+       WARN_ON_ONCE(sqd->thread == current);
 
-       if (backup_node)
-               destroy_fixed_file_ref_node(backup_node);
-       return ret;
+       atomic_inc(&sqd->park_pending);
+       set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+       mutex_lock(&sqd->lock);
+       if (sqd->thread)
+               wake_up_process(sqd->thread);
 }
 
-static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+static void io_sq_thread_stop(struct io_sq_data *sqd)
 {
-       struct fixed_file_data *data = ctx->file_data;
-       unsigned nr_tables, i;
-       int ret;
+       WARN_ON_ONCE(sqd->thread == current);
+       WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
 
-       /*
-        * percpu_ref_is_dying() is to stop parallel files unregister
-        * Since we possibly drop uring lock later in this function to
-        * run task work.
-        */
-       if (!data || percpu_ref_is_dying(&data->refs))
-               return -ENXIO;
-       ret = io_file_ref_quiesce(data, ctx);
-       if (ret)
-               return ret;
-
-       __io_sqe_files_unregister(ctx);
-       nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
-       for (i = 0; i < nr_tables; i++)
-               kfree(data->table[i].files);
-       kfree(data->table);
-       percpu_ref_exit(&data->refs);
-       kfree(data);
-       ctx->file_data = NULL;
-       ctx->nr_user_files = 0;
-       return 0;
+       set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+       mutex_lock(&sqd->lock);
+       if (sqd->thread)
+               wake_up_process(sqd->thread);
+       mutex_unlock(&sqd->lock);
+       wait_for_completion(&sqd->exited);
 }
 
 static void io_put_sq_data(struct io_sq_data *sqd)
 {
        if (refcount_dec_and_test(&sqd->refs)) {
-               /*
-                * The park is a bit of a work-around, without it we get
-                * warning spews on shutdown with SQPOLL set and affinity
-                * set to a single CPU.
-                */
-               if (sqd->thread) {
-                       kthread_park(sqd->thread);
-                       kthread_stop(sqd->thread);
-               }
+               WARN_ON_ONCE(atomic_read(&sqd->park_pending));
 
+               io_sq_thread_stop(sqd);
                kfree(sqd);
        }
 }
 
+static void io_sq_thread_finish(struct io_ring_ctx *ctx)
+{
+       struct io_sq_data *sqd = ctx->sq_data;
+
+       if (sqd) {
+               io_sq_thread_park(sqd);
+               list_del_init(&ctx->sqd_list);
+               io_sqd_update_thread_idle(sqd);
+               io_sq_thread_unpark(sqd);
+
+               io_put_sq_data(sqd);
+               ctx->sq_data = NULL;
+       }
+}
+
 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
 {
        struct io_ring_ctx *ctx_attach;
@@ -7229,92 +7881,46 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
                fdput(f);
                return ERR_PTR(-EINVAL);
        }
+       if (sqd->task_tgid != current->tgid) {
+               fdput(f);
+               return ERR_PTR(-EPERM);
+       }
 
        refcount_inc(&sqd->refs);
        fdput(f);
        return sqd;
 }
 
-static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
+static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
+                                        bool *attached)
 {
        struct io_sq_data *sqd;
 
-       if (p->flags & IORING_SETUP_ATTACH_WQ)
-               return io_attach_sq_data(p);
+       *attached = false;
+       if (p->flags & IORING_SETUP_ATTACH_WQ) {
+               sqd = io_attach_sq_data(p);
+               if (!IS_ERR(sqd)) {
+                       *attached = true;
+                       return sqd;
+               }
+               /* fall through for EPERM case, setup new sqd/task */
+               if (PTR_ERR(sqd) != -EPERM)
+                       return sqd;
+       }
 
        sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
        if (!sqd)
                return ERR_PTR(-ENOMEM);
 
+       atomic_set(&sqd->park_pending, 0);
        refcount_set(&sqd->refs, 1);
        INIT_LIST_HEAD(&sqd->ctx_list);
-       INIT_LIST_HEAD(&sqd->ctx_new_list);
-       mutex_init(&sqd->ctx_lock);
        mutex_init(&sqd->lock);
        init_waitqueue_head(&sqd->wait);
+       init_completion(&sqd->exited);
        return sqd;
 }
 
-static void io_sq_thread_unpark(struct io_sq_data *sqd)
-       __releases(&sqd->lock)
-{
-       if (!sqd->thread)
-               return;
-       kthread_unpark(sqd->thread);
-       mutex_unlock(&sqd->lock);
-}
-
-static void io_sq_thread_park(struct io_sq_data *sqd)
-       __acquires(&sqd->lock)
-{
-       if (!sqd->thread)
-               return;
-       mutex_lock(&sqd->lock);
-       kthread_park(sqd->thread);
-}
-
-static void io_sq_thread_stop(struct io_ring_ctx *ctx)
-{
-       struct io_sq_data *sqd = ctx->sq_data;
-
-       if (sqd) {
-               if (sqd->thread) {
-                       /*
-                        * We may arrive here from the error branch in
-                        * io_sq_offload_create() where the kthread is created
-                        * without being waked up, thus wake it up now to make
-                        * sure the wait will complete.
-                        */
-                       wake_up_process(sqd->thread);
-                       wait_for_completion(&ctx->sq_thread_comp);
-
-                       io_sq_thread_park(sqd);
-               }
-
-               mutex_lock(&sqd->ctx_lock);
-               list_del(&ctx->sqd_list);
-               mutex_unlock(&sqd->ctx_lock);
-
-               if (sqd->thread) {
-                       finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
-                       io_sq_thread_unpark(sqd);
-               }
-
-               io_put_sq_data(sqd);
-               ctx->sq_data = NULL;
-       }
-}
-
-static void io_finish_async(struct io_ring_ctx *ctx)
-{
-       io_sq_thread_stop(ctx);
-
-       if (ctx->io_wq) {
-               io_wq_destroy(ctx->io_wq);
-               ctx->io_wq = NULL;
-       }
-}
-
 #if defined(CONFIG_UNIX)
 /*
  * Ensure the UNIX gc is aware of our file set, so we are certain that
@@ -7342,7 +7948,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
        skb->scm_io_uring = 1;
 
        nr_files = 0;
-       fpl->user = get_uid(ctx->user);
+       fpl->user = get_uid(current_user());
        for (i = 0; i < nr; i++) {
                struct file *file = io_file_from_index(ctx, i + offset);
 
@@ -7418,35 +8024,9 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
 }
 #endif
 
-static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
-                                   unsigned nr_tables, unsigned nr_files)
-{
-       int i;
-
-       for (i = 0; i < nr_tables; i++) {
-               struct fixed_file_table *table = &file_data->table[i];
-               unsigned this_files;
-
-               this_files = min(nr_files, IORING_MAX_FILES_TABLE);
-               table->files = kcalloc(this_files, sizeof(struct file *),
-                                       GFP_KERNEL_ACCOUNT);
-               if (!table->files)
-                       break;
-               nr_files -= this_files;
-       }
-
-       if (i == nr_tables)
-               return 0;
-
-       for (i = 0; i < nr_tables; i++) {
-               struct fixed_file_table *table = &file_data->table[i];
-               kfree(table->files);
-       }
-       return 1;
-}
-
-static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
+static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
 {
+       struct file *file = prsrc->file;
 #if defined(CONFIG_UNIX)
        struct sock *sock = ctx->ring_sock->sk;
        struct sk_buff_head list, *head = &sock->sk_receive_queue;
@@ -7507,117 +8087,61 @@ static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
 #endif
 }
 
-struct io_file_put {
-       struct list_head list;
-       struct file *file;
-};
-
-static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
+static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
 {
-       struct fixed_file_data *file_data = ref_node->file_data;
-       struct io_ring_ctx *ctx = file_data->ctx;
-       struct io_file_put *pfile, *tmp;
+       struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
+       struct io_ring_ctx *ctx = rsrc_data->ctx;
+       struct io_rsrc_put *prsrc, *tmp;
+
+       list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
+               list_del(&prsrc->list);
+
+               if (prsrc->tag) {
+                       bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
 
-       list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
-               list_del(&pfile->list);
-               io_ring_file_put(ctx, pfile->file);
-               kfree(pfile);
+                       io_ring_submit_lock(ctx, lock_ring);
+                       spin_lock(&ctx->completion_lock);
+                       io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
+                       io_commit_cqring(ctx);
+                       spin_unlock(&ctx->completion_lock);
+                       io_cqring_ev_posted(ctx);
+                       io_ring_submit_unlock(ctx, lock_ring);
+               }
+
+               rsrc_data->do_put(ctx, prsrc);
+               kfree(prsrc);
        }
 
-       percpu_ref_exit(&ref_node->refs);
-       kfree(ref_node);
-       percpu_ref_put(&file_data->refs);
+       io_rsrc_node_destroy(ref_node);
+       if (atomic_dec_and_test(&rsrc_data->refs))
+               complete(&rsrc_data->done);
 }
 
-static void io_file_put_work(struct work_struct *work)
+static void io_rsrc_put_work(struct work_struct *work)
 {
        struct io_ring_ctx *ctx;
        struct llist_node *node;
 
-       ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
-       node = llist_del_all(&ctx->file_put_llist);
+       ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
+       node = llist_del_all(&ctx->rsrc_put_llist);
 
        while (node) {
-               struct fixed_file_ref_node *ref_node;
+               struct io_rsrc_node *ref_node;
                struct llist_node *next = node->next;
 
-               ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
-               __io_file_put_work(ref_node);
+               ref_node = llist_entry(node, struct io_rsrc_node, llist);
+               __io_rsrc_put_work(ref_node);
                node = next;
        }
 }
 
-static void io_file_data_ref_zero(struct percpu_ref *ref)
-{
-       struct fixed_file_ref_node *ref_node;
-       struct fixed_file_data *data;
-       struct io_ring_ctx *ctx;
-       bool first_add = false;
-       int delay = HZ;
-
-       ref_node = container_of(ref, struct fixed_file_ref_node, refs);
-       data = ref_node->file_data;
-       ctx = data->ctx;
-
-       spin_lock_bh(&data->lock);
-       ref_node->done = true;
-
-       while (!list_empty(&data->ref_list)) {
-               ref_node = list_first_entry(&data->ref_list,
-                                       struct fixed_file_ref_node, node);
-               /* recycle ref nodes in order */
-               if (!ref_node->done)
-                       break;
-               list_del(&ref_node->node);
-               first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
-       }
-       spin_unlock_bh(&data->lock);
-
-       if (percpu_ref_is_dying(&data->refs))
-               delay = 0;
-
-       if (!delay)
-               mod_delayed_work(system_wq, &ctx->file_put_work, 0);
-       else if (first_add)
-               queue_delayed_work(system_wq, &ctx->file_put_work, delay);
-}
-
-static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
-                       struct io_ring_ctx *ctx)
-{
-       struct fixed_file_ref_node *ref_node;
-
-       ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
-       if (!ref_node)
-               return NULL;
-
-       if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
-                           0, GFP_KERNEL)) {
-               kfree(ref_node);
-               return NULL;
-       }
-       INIT_LIST_HEAD(&ref_node->node);
-       INIT_LIST_HEAD(&ref_node->file_list);
-       ref_node->file_data = ctx->file_data;
-       ref_node->done = false;
-       return ref_node;
-}
-
-static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
-{
-       percpu_ref_exit(&ref_node->refs);
-       kfree(ref_node);
-}
-
 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
-                                unsigned nr_args)
+                                unsigned nr_args, u64 __user *tags)
 {
        __s32 __user *fds = (__s32 __user *) arg;
-       unsigned nr_tables, i;
        struct file *file;
-       int fd, ret = -ENOMEM;
-       struct fixed_file_ref_node *ref_node;
-       struct fixed_file_data *file_data;
+       int fd, ret;
+       unsigned i;
 
        if (ctx->file_data)
                return -EBUSY;
@@ -7627,44 +8151,34 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                return -EMFILE;
        if (nr_args > rlimit(RLIMIT_NOFILE))
                return -EMFILE;
+       ret = io_rsrc_node_switch_start(ctx);
+       if (ret)
+               return ret;
+       ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
+                                &ctx->file_data);
+       if (ret)
+               return ret;
 
-       file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL_ACCOUNT);
-       if (!file_data)
-               return -ENOMEM;
-       file_data->ctx = ctx;
-       init_completion(&file_data->done);
-       INIT_LIST_HEAD(&file_data->ref_list);
-       spin_lock_init(&file_data->lock);
-
-       nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
-       file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
-                                  GFP_KERNEL_ACCOUNT);
-       if (!file_data->table)
-               goto out_free;
-
-       if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
-                               PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+       ret = -ENOMEM;
+       if (!io_alloc_file_tables(&ctx->file_table, nr_args))
                goto out_free;
 
-       if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
-               goto out_ref;
-       ctx->file_data = file_data;
-
        for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
-               struct fixed_file_table *table;
-               unsigned index;
-
                if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
                        ret = -EFAULT;
                        goto out_fput;
                }
                /* allow sparse sets */
-               if (fd == -1)
+               if (fd == -1) {
+                       ret = -EINVAL;
+                       if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
+                               goto out_fput;
                        continue;
+               }
 
                file = fget(fd);
                ret = -EBADF;
-               if (!file)
+               if (unlikely(!file))
                        goto out_fput;
 
                /*
@@ -7678,24 +8192,16 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                        fput(file);
                        goto out_fput;
                }
-               table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
-               index = i & IORING_FILE_TABLE_MASK;
-               table->files[index] = file;
+               io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
        }
 
        ret = io_sqe_files_scm(ctx);
        if (ret) {
-               io_sqe_files_unregister(ctx);
+               __io_sqe_files_unregister(ctx);
                return ret;
        }
 
-       ref_node = alloc_fixed_file_ref_node(ctx);
-       if (!ref_node) {
-               io_sqe_files_unregister(ctx);
-               return -ENOMEM;
-       }
-
-       io_sqe_files_set_node(file_data, ref_node);
+       io_rsrc_node_switch(ctx, NULL);
        return ret;
 out_fput:
        for (i = 0; i < ctx->nr_user_files; i++) {
@@ -7703,14 +8209,10 @@ out_fput:
                if (file)
                        fput(file);
        }
-       for (i = 0; i < nr_tables; i++)
-               kfree(file_data->table[i].files);
+       io_free_file_tables(&ctx->file_table);
        ctx->nr_user_files = 0;
-out_ref:
-       percpu_ref_exit(&file_data->refs);
 out_free:
-       kfree(file_data->table);
-       kfree(file_data);
+       io_rsrc_data_free(ctx->file_data);
        ctx->file_data = NULL;
        return ret;
 }
@@ -7745,76 +8247,172 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
                        skb = NULL;
                }
        }
-       spin_unlock_irq(&head->lock);
+       spin_unlock_irq(&head->lock);
+
+       if (skb) {
+               fput(file);
+               return 0;
+       }
+
+       return __io_sqe_files_scm(ctx, 1, index);
+#else
+       return 0;
+#endif
+}
+
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+                                struct io_rsrc_node *node, void *rsrc)
+{
+       u64 *tag_slot = io_get_tag_slot(data, idx);
+       struct io_rsrc_put *prsrc;
+
+       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+       if (!prsrc)
+               return -ENOMEM;
+
+       prsrc->tag = *tag_slot;
+       *tag_slot = 0;
+       prsrc->rsrc = rsrc;
+       list_add(&prsrc->list, &node->rsrc_list);
+       return 0;
+}
+
+static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
+                                unsigned int issue_flags, u32 slot_index)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       bool needs_switch = false;
+       struct io_fixed_file *file_slot;
+       int ret = -EBADF;
+
+       io_ring_submit_lock(ctx, !force_nonblock);
+       if (file->f_op == &io_uring_fops)
+               goto err;
+       ret = -ENXIO;
+       if (!ctx->file_data)
+               goto err;
+       ret = -EINVAL;
+       if (slot_index >= ctx->nr_user_files)
+               goto err;
+
+       slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
+
+       if (file_slot->file_ptr) {
+               struct file *old_file;
+
+               ret = io_rsrc_node_switch_start(ctx);
+               if (ret)
+                       goto err;
+
+               old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+               ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+                                           ctx->rsrc_node, old_file);
+               if (ret)
+                       goto err;
+               file_slot->file_ptr = 0;
+               needs_switch = true;
+       }
+
+       *io_get_tag_slot(ctx->file_data, slot_index) = 0;
+       io_fixed_file_set(file_slot, file);
+       ret = io_sqe_file_register(ctx, file, slot_index);
+       if (ret) {
+               file_slot->file_ptr = 0;
+               goto err;
+       }
 
-       if (skb) {
+       ret = 0;
+err:
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, ctx->file_data);
+       io_ring_submit_unlock(ctx, !force_nonblock);
+       if (ret)
                fput(file);
-               return 0;
-       }
-
-       return __io_sqe_files_scm(ctx, 1, index);
-#else
-       return 0;
-#endif
+       return ret;
 }
 
-static int io_queue_file_removal(struct fixed_file_data *data,
-                                struct file *file)
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct io_file_put *pfile;
-       struct fixed_file_ref_node *ref_node = data->node;
+       unsigned int offset = req->close.file_slot - 1;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_fixed_file *file_slot;
+       struct file *file;
+       int ret;
 
-       pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
-       if (!pfile)
-               return -ENOMEM;
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       ret = -ENXIO;
+       if (unlikely(!ctx->file_data))
+               goto out;
+       ret = -EINVAL;
+       if (offset >= ctx->nr_user_files)
+               goto out;
+       ret = io_rsrc_node_switch_start(ctx);
+       if (ret)
+               goto out;
+
+       offset = array_index_nospec(offset, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, offset);
+       ret = -EBADF;
+       if (!file_slot->file_ptr)
+               goto out;
 
-       pfile->file = file;
-       list_add(&pfile->list, &ref_node->file_list);
+       file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+       ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
+       if (ret)
+               goto out;
 
-       return 0;
+       file_slot->file_ptr = 0;
+       io_rsrc_node_switch(ctx, ctx->file_data);
+       ret = 0;
+out:
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       return ret;
 }
 
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
-                                struct io_uring_files_update *up,
+                                struct io_uring_rsrc_update2 *up,
                                 unsigned nr_args)
 {
-       struct fixed_file_data *data = ctx->file_data;
-       struct fixed_file_ref_node *ref_node;
+       u64 __user *tags = u64_to_user_ptr(up->tags);
+       __s32 __user *fds = u64_to_user_ptr(up->data);
+       struct io_rsrc_data *data = ctx->file_data;
+       struct io_fixed_file *file_slot;
        struct file *file;
-       __s32 __user *fds;
-       int fd, i, err;
-       __u32 done;
+       int fd, i, err = 0;
+       unsigned int done;
        bool needs_switch = false;
 
-       if (check_add_overflow(up->offset, nr_args, &done))
-               return -EOVERFLOW;
-       if (done > ctx->nr_user_files)
+       if (!ctx->file_data)
+               return -ENXIO;
+       if (up->offset + nr_args > ctx->nr_user_files)
                return -EINVAL;
 
-       ref_node = alloc_fixed_file_ref_node(ctx);
-       if (!ref_node)
-               return -ENOMEM;
-
-       done = 0;
-       fds = u64_to_user_ptr(up->fds);
-       while (nr_args) {
-               struct fixed_file_table *table;
-               unsigned index;
+       for (done = 0; done < nr_args; done++) {
+               u64 tag = 0;
 
-               err = 0;
-               if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+               if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
+                   copy_from_user(&fd, &fds[done], sizeof(fd))) {
                        err = -EFAULT;
                        break;
                }
-               i = array_index_nospec(up->offset, ctx->nr_user_files);
-               table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
-               index = i & IORING_FILE_TABLE_MASK;
-               if (table->files[index]) {
-                       file = table->files[index];
-                       err = io_queue_file_removal(data, file);
+               if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
+                       err = -EINVAL;
+                       break;
+               }
+               if (fd == IORING_REGISTER_FILES_SKIP)
+                       continue;
+
+               i = array_index_nospec(up->offset + done, ctx->nr_user_files);
+               file_slot = io_fixed_file_slot(&ctx->file_table, i);
+
+               if (file_slot->file_ptr) {
+                       file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+                       err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
                        if (err)
                                break;
-                       table->files[index] = NULL;
+                       file_slot->file_ptr = 0;
                        needs_switch = true;
                }
                if (fd != -1) {
@@ -7836,106 +8434,61 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                err = -EBADF;
                                break;
                        }
-                       table->files[index] = file;
+                       *io_get_tag_slot(data, i) = tag;
+                       io_fixed_file_set(file_slot, file);
                        err = io_sqe_file_register(ctx, file, i);
                        if (err) {
-                               table->files[index] = NULL;
+                               file_slot->file_ptr = 0;
                                fput(file);
                                break;
                        }
                }
-               nr_args--;
-               done++;
-               up->offset++;
        }
 
-       if (needs_switch) {
-               percpu_ref_kill(&data->node->refs);
-               io_sqe_files_set_node(data, ref_node);
-       } else
-               destroy_fixed_file_ref_node(ref_node);
-
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, data);
        return done ? done : err;
 }
 
-static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
-                              unsigned nr_args)
-{
-       struct io_uring_files_update up;
-
-       if (!ctx->file_data)
-               return -ENXIO;
-       if (!nr_args)
-               return -EINVAL;
-       if (copy_from_user(&up, arg, sizeof(up)))
-               return -EFAULT;
-       if (up.resv)
-               return -EINVAL;
-
-       return __io_sqe_files_update(ctx, &up, nr_args);
-}
-
-static void io_free_work(struct io_wq_work *work)
-{
-       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-
-       /* Consider that io_steal_work() relies on this ref */
-       io_put_req(req);
-}
-
-static int io_init_wq_offload(struct io_ring_ctx *ctx,
-                             struct io_uring_params *p)
+static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
+                                       struct task_struct *task)
 {
+       struct io_wq_hash *hash;
        struct io_wq_data data;
-       struct fd f;
-       struct io_ring_ctx *ctx_attach;
        unsigned int concurrency;
-       int ret = 0;
-
-       data.user = ctx->user;
-       data.free_work = io_free_work;
-       data.do_work = io_wq_submit_work;
 
-       if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
-               /* Do QD, or 4 * CPUS, whatever is smallest */
-               concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
-
-               ctx->io_wq = io_wq_create(concurrency, &data);
-               if (IS_ERR(ctx->io_wq)) {
-                       ret = PTR_ERR(ctx->io_wq);
-                       ctx->io_wq = NULL;
+       mutex_lock(&ctx->uring_lock);
+       hash = ctx->hash_map;
+       if (!hash) {
+               hash = kzalloc(sizeof(*hash), GFP_KERNEL);
+               if (!hash) {
+                       mutex_unlock(&ctx->uring_lock);
+                       return ERR_PTR(-ENOMEM);
                }
-               return ret;
+               refcount_set(&hash->refs, 1);
+               init_waitqueue_head(&hash->wait);
+               ctx->hash_map = hash;
        }
+       mutex_unlock(&ctx->uring_lock);
 
-       f = fdget(p->wq_fd);
-       if (!f.file)
-               return -EBADF;
-
-       if (f.file->f_op != &io_uring_fops) {
-               ret = -EINVAL;
-               goto out_fput;
-       }
+       data.hash = hash;
+       data.task = task;
+       data.free_work = io_wq_free_work;
+       data.do_work = io_wq_submit_work;
 
-       ctx_attach = f.file->private_data;
-       /* @io_wq is protected by holding the fd */
-       if (!io_wq_get(ctx_attach->io_wq, &data)) {
-               ret = -EINVAL;
-               goto out_fput;
-       }
+       /* Do QD, or 4 * CPUS, whatever is smallest */
+       concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
 
-       ctx->io_wq = ctx_attach->io_wq;
-out_fput:
-       fdput(f);
-       return ret;
+       return io_wq_create(concurrency, &data);
 }
 
-static int io_uring_alloc_task_context(struct task_struct *task)
+static int io_uring_alloc_task_context(struct task_struct *task,
+                                      struct io_ring_ctx *ctx)
 {
        struct io_uring_task *tctx;
        int ret;
 
-       tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
+       tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
        if (unlikely(!tctx))
                return -ENOMEM;
 
@@ -7945,14 +8498,22 @@ static int io_uring_alloc_task_context(struct task_struct *task)
                return ret;
        }
 
+       tctx->io_wq = io_init_wq_offload(ctx, task);
+       if (IS_ERR(tctx->io_wq)) {
+               ret = PTR_ERR(tctx->io_wq);
+               percpu_counter_destroy(&tctx->inflight);
+               kfree(tctx);
+               return ret;
+       }
+
        xa_init(&tctx->xa);
        init_waitqueue_head(&tctx->wait);
-       tctx->last = NULL;
        atomic_set(&tctx->in_idle, 0);
-       tctx->sqpoll = false;
-       io_init_identity(&tctx->__identity);
-       tctx->identity = &tctx->__identity;
+       atomic_set(&tctx->inflight_tracked, 0);
        task->io_uring = tctx;
+       spin_lock_init(&tctx->task_lock);
+       INIT_WQ_LIST(&tctx->task_list);
+       init_task_work(&tctx->task_work, tctx_task_work);
        return 0;
 }
 
@@ -7961,9 +8522,9 @@ void __io_uring_free(struct task_struct *tsk)
        struct io_uring_task *tctx = tsk->io_uring;
 
        WARN_ON_ONCE(!xa_empty(&tctx->xa));
-       WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
-       if (tctx->identity != &tctx->__identity)
-               kfree(tctx->identity);
+       WARN_ON_ONCE(tctx->io_wq);
+       WARN_ON_ONCE(tctx->cached_refs);
+
        percpu_counter_destroy(&tctx->inflight);
        kfree(tctx);
        tsk->io_uring = NULL;
@@ -7974,54 +8535,71 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
 {
        int ret;
 
+       /* Retain compatibility with failing for an invalid attach attempt */
+       if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
+                               IORING_SETUP_ATTACH_WQ) {
+               struct fd f;
+
+               f = fdget(p->wq_fd);
+               if (!f.file)
+                       return -ENXIO;
+               if (f.file->f_op != &io_uring_fops) {
+                       fdput(f);
+                       return -EINVAL;
+               }
+               fdput(f);
+       }
        if (ctx->flags & IORING_SETUP_SQPOLL) {
+               struct task_struct *tsk;
                struct io_sq_data *sqd;
+               bool attached;
 
-               ret = -EPERM;
-               if (!capable(CAP_SYS_ADMIN))
-                       goto err;
-
-               sqd = io_get_sq_data(p);
+               sqd = io_get_sq_data(p, &attached);
                if (IS_ERR(sqd)) {
                        ret = PTR_ERR(sqd);
                        goto err;
                }
 
+               ctx->sq_creds = get_current_cred();
                ctx->sq_data = sqd;
-               io_sq_thread_park(sqd);
-               mutex_lock(&sqd->ctx_lock);
-               list_add(&ctx->sqd_list, &sqd->ctx_new_list);
-               mutex_unlock(&sqd->ctx_lock);
-               io_sq_thread_unpark(sqd);
-
                ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
                if (!ctx->sq_thread_idle)
                        ctx->sq_thread_idle = HZ;
 
-               if (sqd->thread)
-                       goto done;
+               io_sq_thread_park(sqd);
+               list_add(&ctx->sqd_list, &sqd->ctx_list);
+               io_sqd_update_thread_idle(sqd);
+               /* don't attach to a dying SQPOLL thread, would be racy */
+               ret = (attached && !sqd->thread) ? -ENXIO : 0;
+               io_sq_thread_unpark(sqd);
+
+               if (ret < 0)
+                       goto err;
+               if (attached)
+                       return 0;
 
                if (p->flags & IORING_SETUP_SQ_AFF) {
                        int cpu = p->sq_thread_cpu;
 
                        ret = -EINVAL;
-                       if (cpu >= nr_cpu_ids)
-                               goto err;
-                       if (!cpu_online(cpu))
-                               goto err;
-
-                       sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
-                                                       cpu, "io_uring-sq");
+                       if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+                               goto err_sqpoll;
+                       sqd->sq_cpu = cpu;
                } else {
-                       sqd->thread = kthread_create(io_sq_thread, sqd,
-                                                       "io_uring-sq");
+                       sqd->sq_cpu = -1;
                }
-               if (IS_ERR(sqd->thread)) {
-                       ret = PTR_ERR(sqd->thread);
-                       sqd->thread = NULL;
-                       goto err;
+
+               sqd->task_pid = current->pid;
+               sqd->task_tgid = current->tgid;
+               tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
+               if (IS_ERR(tsk)) {
+                       ret = PTR_ERR(tsk);
+                       goto err_sqpoll;
                }
-               ret = io_uring_alloc_task_context(sqd->thread);
+
+               sqd->thread = tsk;
+               ret = io_uring_alloc_task_context(tsk, ctx);
+               wake_up_new_task(tsk);
                if (ret)
                        goto err;
        } else if (p->flags & IORING_SETUP_SQ_AFF) {
@@ -8030,26 +8608,14 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
                goto err;
        }
 
-done:
-       ret = io_init_wq_offload(ctx, p);
-       if (ret)
-               goto err;
-
        return 0;
+err_sqpoll:
+       complete(&ctx->sq_data->exited);
 err:
-       io_finish_async(ctx);
+       io_sq_thread_finish(ctx);
        return ret;
 }
 
-static void io_sq_offload_start(struct io_ring_ctx *ctx)
-{
-       struct io_sq_data *sqd = ctx->sq_data;
-
-       ctx->flags &= ~IORING_SETUP_R_DISABLED;
-       if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd && sqd->thread)
-               wake_up_process(sqd->thread);
-}
-
 static inline void __io_unaccount_mem(struct user_struct *user,
                                      unsigned long nr_pages)
 {
@@ -8075,37 +8641,27 @@ static inline int __io_account_mem(struct user_struct *user,
        return 0;
 }
 
-static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
-                            enum io_mem_account acct)
+static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
 {
-       if (ctx->limit_mem)
+       if (ctx->user)
                __io_unaccount_mem(ctx->user, nr_pages);
 
-       if (ctx->mm_account) {
-               if (acct == ACCT_LOCKED)
-                       ctx->mm_account->locked_vm -= nr_pages;
-               else if (acct == ACCT_PINNED)
-                       atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
-       }
+       if (ctx->mm_account)
+               atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
 }
 
-static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
-                         enum io_mem_account acct)
+static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
 {
        int ret;
 
-       if (ctx->limit_mem) {
+       if (ctx->user) {
                ret = __io_account_mem(ctx->user, nr_pages);
                if (ret)
                        return ret;
        }
 
-       if (ctx->mm_account) {
-               if (acct == ACCT_LOCKED)
-                       ctx->mm_account->locked_vm += nr_pages;
-               else if (acct == ACCT_PINNED)
-                       atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
-       }
+       if (ctx->mm_account)
+               atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
 
        return 0;
 }
@@ -8124,10 +8680,9 @@ static void io_mem_free(void *ptr)
 
 static void *io_mem_alloc(size_t size)
 {
-       gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
-                               __GFP_NORETRY;
+       gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
 
-       return (void *) __get_free_pages(gfp_flags, get_order(size));
+       return (void *) __get_free_pages(gfp, get_order(size));
 }
 
 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
@@ -8159,41 +8714,58 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
        return off;
 }
 
-static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
+static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
 {
-       size_t pages;
-
-       pages = (size_t)1 << get_order(
-               rings_size(sq_entries, cq_entries, NULL));
-       pages += (size_t)1 << get_order(
-               array_size(sizeof(struct io_uring_sqe), sq_entries));
+       struct io_mapped_ubuf *imu = *slot;
+       unsigned int i;
 
-       return pages;
+       if (imu != ctx->dummy_ubuf) {
+               for (i = 0; i < imu->nr_bvecs; i++)
+                       unpin_user_page(imu->bvec[i].bv_page);
+               if (imu->acct_pages)
+                       io_unaccount_mem(ctx, imu->acct_pages);
+               kvfree(imu);
+       }
+       *slot = NULL;
 }
 
-static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
+static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
 {
-       int i, j;
+       io_buffer_unmap(ctx, &prsrc->buf);
+       prsrc->buf = NULL;
+}
 
-       if (!ctx->user_bufs)
-               return -ENXIO;
+static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
+{
+       unsigned int i;
 
-       for (i = 0; i < ctx->nr_user_bufs; i++) {
-               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
+       for (i = 0; i < ctx->nr_user_bufs; i++)
+               io_buffer_unmap(ctx, &ctx->user_bufs[i]);
+       kfree(ctx->user_bufs);
+       io_rsrc_data_free(ctx->buf_data);
+       ctx->user_bufs = NULL;
+       ctx->buf_data = NULL;
+       ctx->nr_user_bufs = 0;
+}
 
-               for (j = 0; j < imu->nr_bvecs; j++)
-                       unpin_user_page(imu->bvec[j].bv_page);
+static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
+{
+       unsigned nr = ctx->nr_user_bufs;
+       int ret;
 
-               if (imu->acct_pages)
-                       io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
-               kvfree(imu->bvec);
-               imu->nr_bvecs = 0;
-       }
+       if (!ctx->buf_data)
+               return -ENXIO;
 
-       kfree(ctx->user_bufs);
-       ctx->user_bufs = NULL;
+       /*
+        * Quiesce may unlock ->uring_lock, and while it's not held
+        * prevent new requests using the table.
+        */
        ctx->nr_user_bufs = 0;
-       return 0;
+       ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
+       ctx->nr_user_bufs = nr;
+       if (!ret)
+               __io_sqe_buffers_unregister(ctx);
+       return ret;
 }
 
 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
@@ -8245,7 +8817,7 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
 
        /* check previously registered pages */
        for (i = 0; i < ctx->nr_user_bufs; i++) {
-               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
+               struct io_mapped_ubuf *imu = ctx->user_bufs[i];
 
                for (j = 0; j < imu->nr_bvecs; j++) {
                        if (!PageCompound(imu->bvec[j].bv_page))
@@ -8264,6 +8836,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 {
        int i, ret;
 
+       imu->acct_pages = 0;
        for (i = 0; i < nr_pages; i++) {
                if (!PageCompound(pages[i])) {
                        imu->acct_pages++;
@@ -8283,147 +8856,252 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
        if (!imu->acct_pages)
                return 0;
 
-       ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
+       ret = io_account_mem(ctx, imu->acct_pages);
        if (ret)
                imu->acct_pages = 0;
        return ret;
 }
 
-static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
-                                 unsigned nr_args)
+static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
+                                 struct io_mapped_ubuf **pimu,
+                                 struct page **last_hpage)
 {
+       struct io_mapped_ubuf *imu = NULL;
        struct vm_area_struct **vmas = NULL;
        struct page **pages = NULL;
+       unsigned long off, start, end, ubuf;
+       size_t size;
+       int ret, pret, nr_pages, i;
+
+       if (!iov->iov_base) {
+               *pimu = ctx->dummy_ubuf;
+               return 0;
+       }
+
+       ubuf = (unsigned long) iov->iov_base;
+       end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       start = ubuf >> PAGE_SHIFT;
+       nr_pages = end - start;
+
+       *pimu = NULL;
+       ret = -ENOMEM;
+
+       pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+       if (!pages)
+               goto done;
+
+       vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
+                             GFP_KERNEL);
+       if (!vmas)
+               goto done;
+
+       imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
+       if (!imu)
+               goto done;
+
+       ret = 0;
+       mmap_read_lock(current->mm);
+       pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+                             pages, vmas);
+       if (pret == nr_pages) {
+               /* don't support file backed memory */
+               for (i = 0; i < nr_pages; i++) {
+                       struct vm_area_struct *vma = vmas[i];
+
+                       if (vma_is_shmem(vma))
+                               continue;
+                       if (vma->vm_file &&
+                           !is_file_hugepages(vma->vm_file)) {
+                               ret = -EOPNOTSUPP;
+                               break;
+                       }
+               }
+       } else {
+               ret = pret < 0 ? pret : -EFAULT;
+       }
+       mmap_read_unlock(current->mm);
+       if (ret) {
+               /*
+                * if we did partial map, or found file backed vmas,
+                * release any pages we did get
+                */
+               if (pret > 0)
+                       unpin_user_pages(pages, pret);
+               goto done;
+       }
+
+       ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
+       if (ret) {
+               unpin_user_pages(pages, pret);
+               goto done;
+       }
+
+       off = ubuf & ~PAGE_MASK;
+       size = iov->iov_len;
+       for (i = 0; i < nr_pages; i++) {
+               size_t vec_len;
+
+               vec_len = min_t(size_t, size, PAGE_SIZE - off);
+               imu->bvec[i].bv_page = pages[i];
+               imu->bvec[i].bv_len = vec_len;
+               imu->bvec[i].bv_offset = off;
+               off = 0;
+               size -= vec_len;
+       }
+       /* store original address for later verification */
+       imu->ubuf = ubuf;
+       imu->ubuf_end = ubuf + iov->iov_len;
+       imu->nr_bvecs = nr_pages;
+       *pimu = imu;
+       ret = 0;
+done:
+       if (ret)
+               kvfree(imu);
+       kvfree(pages);
+       kvfree(vmas);
+       return ret;
+}
+
+static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
+{
+       ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
+       return ctx->user_bufs ? 0 : -ENOMEM;
+}
+
+static int io_buffer_validate(struct iovec *iov)
+{
+       unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
+
+       /*
+        * Don't impose further limits on the size and buffer
+        * constraints here, we'll -EINVAL later when IO is
+        * submitted if they are wrong.
+        */
+       if (!iov->iov_base)
+               return iov->iov_len ? -EFAULT : 0;
+       if (!iov->iov_len)
+               return -EFAULT;
+
+       /* arbitrary limit, but we need something */
+       if (iov->iov_len > SZ_1G)
+               return -EFAULT;
+
+       if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
+               return -EOVERFLOW;
+
+       return 0;
+}
+
+static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
+                                  unsigned int nr_args, u64 __user *tags)
+{
        struct page *last_hpage = NULL;
-       int i, j, got_pages = 0;
-       int ret = -EINVAL;
+       struct io_rsrc_data *data;
+       int i, ret;
+       struct iovec iov;
 
        if (ctx->user_bufs)
                return -EBUSY;
-       if (!nr_args || nr_args > UIO_MAXIOV)
+       if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
                return -EINVAL;
+       ret = io_rsrc_node_switch_start(ctx);
+       if (ret)
+               return ret;
+       ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
+       if (ret)
+               return ret;
+       ret = io_buffers_map_alloc(ctx, nr_args);
+       if (ret) {
+               io_rsrc_data_free(data);
+               return ret;
+       }
 
-       ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
-                                       GFP_KERNEL);
-       if (!ctx->user_bufs)
-               return -ENOMEM;
-
-       for (i = 0; i < nr_args; i++) {
-               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
-               unsigned long off, start, end, ubuf;
-               int pret, nr_pages;
-               struct iovec iov;
-               size_t size;
-
+       for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
                ret = io_copy_iov(ctx, &iov, arg, i);
                if (ret)
-                       goto err;
+                       break;
+               ret = io_buffer_validate(&iov);
+               if (ret)
+                       break;
+               if (!iov.iov_base && *io_get_tag_slot(data, i)) {
+                       ret = -EINVAL;
+                       break;
+               }
 
-               /*
-                * Don't impose further limits on the size and buffer
-                * constraints here, we'll -EINVAL later when IO is
-                * submitted if they are wrong.
-                */
-               ret = -EFAULT;
-               if (!iov.iov_base || !iov.iov_len)
-                       goto err;
+               ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
+                                            &last_hpage);
+               if (ret)
+                       break;
+       }
 
-               /* arbitrary limit, but we need something */
-               if (iov.iov_len > SZ_1G)
-                       goto err;
+       WARN_ON_ONCE(ctx->buf_data);
 
-               ubuf = (unsigned long) iov.iov_base;
-               end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-               start = ubuf >> PAGE_SHIFT;
-               nr_pages = end - start;
+       ctx->buf_data = data;
+       if (ret)
+               __io_sqe_buffers_unregister(ctx);
+       else
+               io_rsrc_node_switch(ctx, NULL);
+       return ret;
+}
 
-               ret = 0;
-               if (!pages || nr_pages > got_pages) {
-                       kvfree(vmas);
-                       kvfree(pages);
-                       pages = kvmalloc_array(nr_pages, sizeof(struct page *),
-                                               GFP_KERNEL);
-                       vmas = kvmalloc_array(nr_pages,
-                                       sizeof(struct vm_area_struct *),
-                                       GFP_KERNEL);
-                       if (!pages || !vmas) {
-                               ret = -ENOMEM;
-                               goto err;
-                       }
-                       got_pages = nr_pages;
-               }
+static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
+                                  struct io_uring_rsrc_update2 *up,
+                                  unsigned int nr_args)
+{
+       u64 __user *tags = u64_to_user_ptr(up->tags);
+       struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
+       struct page *last_hpage = NULL;
+       bool needs_switch = false;
+       __u32 done;
+       int i, err;
+
+       if (!ctx->buf_data)
+               return -ENXIO;
+       if (up->offset + nr_args > ctx->nr_user_bufs)
+               return -EINVAL;
 
-               imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
-                                               GFP_KERNEL);
-               ret = -ENOMEM;
-               if (!imu->bvec)
-                       goto err;
+       for (done = 0; done < nr_args; done++) {
+               struct io_mapped_ubuf *imu;
+               int offset = up->offset + done;
+               u64 tag = 0;
 
-               ret = 0;
-               mmap_read_lock(current->mm);
-               pret = pin_user_pages(ubuf, nr_pages,
-                                     FOLL_WRITE | FOLL_LONGTERM,
-                                     pages, vmas);
-               if (pret == nr_pages) {
-                       /* don't support file backed memory */
-                       for (j = 0; j < nr_pages; j++) {
-                               struct vm_area_struct *vma = vmas[j];
-
-                               if (vma->vm_file &&
-                                   !is_file_hugepages(vma->vm_file)) {
-                                       ret = -EOPNOTSUPP;
-                                       break;
-                               }
-                       }
-               } else {
-                       ret = pret < 0 ? pret : -EFAULT;
-               }
-               mmap_read_unlock(current->mm);
-               if (ret) {
-                       /*
-                        * if we did partial map, or found file backed vmas,
-                        * release any pages we did get
-                        */
-                       if (pret > 0)
-                               unpin_user_pages(pages, pret);
-                       kvfree(imu->bvec);
-                       goto err;
+               err = io_copy_iov(ctx, &iov, iovs, done);
+               if (err)
+                       break;
+               if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
+                       err = -EFAULT;
+                       break;
                }
-
-               ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
-               if (ret) {
-                       unpin_user_pages(pages, pret);
-                       kvfree(imu->bvec);
-                       goto err;
+               err = io_buffer_validate(&iov);
+               if (err)
+                       break;
+               if (!iov.iov_base && tag) {
+                       err = -EINVAL;
+                       break;
                }
+               err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
+               if (err)
+                       break;
 
-               off = ubuf & ~PAGE_MASK;
-               size = iov.iov_len;
-               for (j = 0; j < nr_pages; j++) {
-                       size_t vec_len;
-
-                       vec_len = min_t(size_t, size, PAGE_SIZE - off);
-                       imu->bvec[j].bv_page = pages[j];
-                       imu->bvec[j].bv_len = vec_len;
-                       imu->bvec[j].bv_offset = off;
-                       off = 0;
-                       size -= vec_len;
+               i = array_index_nospec(offset, ctx->nr_user_bufs);
+               if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
+                       err = io_queue_rsrc_removal(ctx->buf_data, i,
+                                                   ctx->rsrc_node, ctx->user_bufs[i]);
+                       if (unlikely(err)) {
+                               io_buffer_unmap(ctx, &imu);
+                               break;
+                       }
+                       ctx->user_bufs[i] = NULL;
+                       needs_switch = true;
                }
-               /* store original address for later verification */
-               imu->ubuf = ubuf;
-               imu->len = iov.iov_len;
-               imu->nr_bvecs = nr_pages;
 
-               ctx->nr_user_bufs++;
+               ctx->user_bufs[i] = imu;
+               *io_get_tag_slot(ctx->buf_data, offset) = tag;
        }
-       kvfree(pages);
-       kvfree(vmas);
-       return 0;
-err:
-       kvfree(pages);
-       kvfree(vmas);
-       io_sqe_buffer_unregister(ctx);
-       return ret;
+
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, ctx->buf_data);
+       return done ? done : err;
 }
 
 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
@@ -8440,6 +9118,7 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
        ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
        if (IS_ERR(ctx->cq_ev_fd)) {
                int ret = PTR_ERR(ctx->cq_ev_fd);
+
                ctx->cq_ev_fd = NULL;
                return ret;
        }
@@ -8467,26 +9146,68 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
                __io_remove_buffers(ctx, buf, index, -1U);
 }
 
-static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+static void io_req_cache_free(struct list_head *list)
 {
-       io_finish_async(ctx);
-       io_sqe_buffer_unregister(ctx);
+       struct io_kiocb *req, *nxt;
 
-       if (ctx->sqo_task) {
-               put_task_struct(ctx->sqo_task);
-               ctx->sqo_task = NULL;
+       list_for_each_entry_safe(req, nxt, list, inflight_entry) {
+               list_del(&req->inflight_entry);
+               kmem_cache_free(req_cachep, req);
        }
+}
 
-#ifdef CONFIG_BLK_CGROUP
-       if (ctx->sqo_blkcg_css)
-               css_put(ctx->sqo_blkcg_css);
-#endif
+static void io_req_caches_free(struct io_ring_ctx *ctx)
+{
+       struct io_submit_state *state = &ctx->submit_state;
+
+       mutex_lock(&ctx->uring_lock);
+
+       if (state->free_reqs) {
+               kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
+               state->free_reqs = 0;
+       }
+
+       io_flush_cached_locked_reqs(ctx, state);
+       io_req_cache_free(&state->free_list);
+       mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_wait_rsrc_data(struct io_rsrc_data *data)
+{
+       if (data && !atomic_dec_and_test(&data->refs))
+               wait_for_completion(&data->done);
+}
+
+static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+{
+       io_sq_thread_finish(ctx);
+
+       /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
+       io_wait_rsrc_data(ctx->buf_data);
+       io_wait_rsrc_data(ctx->file_data);
 
        mutex_lock(&ctx->uring_lock);
-       io_sqe_files_unregister(ctx);
+       if (ctx->buf_data)
+               __io_sqe_buffers_unregister(ctx);
+       if (ctx->file_data)
+               __io_sqe_files_unregister(ctx);
+       if (ctx->rings)
+               __io_cqring_overflow_flush(ctx, true);
        mutex_unlock(&ctx->uring_lock);
        io_eventfd_unregister(ctx);
        io_destroy_buffers(ctx);
+       if (ctx->sq_creds)
+               put_cred(ctx->sq_creds);
+
+       /* there are no registered resources left, nobody uses it */
+       if (ctx->rsrc_node)
+               io_rsrc_node_destroy(ctx->rsrc_node);
+       if (ctx->rsrc_backup_node)
+               io_rsrc_node_destroy(ctx->rsrc_backup_node);
+       flush_delayed_work(&ctx->rsrc_put_work);
+
+       WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
+       WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
 
 #if defined(CONFIG_UNIX)
        if (ctx->ring_sock) {
@@ -8494,6 +9215,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
                sock_release(ctx->ring_sock);
        }
 #endif
+       WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
 
        if (ctx->mm_account) {
                mmdrop(ctx->mm_account);
@@ -8505,9 +9227,11 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
 
        percpu_ref_exit(&ctx->refs);
        free_uid(ctx->user);
-       put_cred(ctx->creds);
+       io_req_caches_free(ctx);
+       if (ctx->hash_map)
+               io_wq_put_hash(ctx->hash_map);
        kfree(ctx->cancel_hash);
-       kmem_cache_free(req_cachep, ctx->fallback_req);
+       kfree(ctx->dummy_ubuf);
        kfree(ctx);
 }
 
@@ -8516,7 +9240,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        struct io_ring_ctx *ctx = file->private_data;
        __poll_t mask = 0;
 
-       poll_wait(file, &ctx->cq_wait, wait);
+       poll_wait(file, &ctx->poll_wait, wait);
        /*
         * synchronizes with barrier from wq_has_sleeper call in
         * io_commit_cqring
@@ -8538,38 +9262,63 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
         * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
         * pushs them to do the flush.
         */
-       if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
+       if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
 }
 
-static int io_uring_fasync(int fd, struct file *file, int on)
-{
-       struct io_ring_ctx *ctx = file->private_data;
-
-       return fasync_helper(fd, file, on, &ctx->cq_fasync);
-}
-
 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
 {
-       struct io_identity *iod;
+       const struct cred *creds;
 
-       iod = xa_erase(&ctx->personalities, id);
-       if (iod) {
-               put_cred(iod->creds);
-               if (refcount_dec_and_test(&iod->count))
-                       kfree(iod);
+       creds = xa_erase(&ctx->personalities, id);
+       if (creds) {
+               put_cred(creds);
                return 0;
        }
 
        return -EINVAL;
 }
 
+struct io_tctx_exit {
+       struct callback_head            task_work;
+       struct completion               completion;
+       struct io_ring_ctx              *ctx;
+};
+
+static void io_tctx_exit_cb(struct callback_head *cb)
+{
+       struct io_uring_task *tctx = current->io_uring;
+       struct io_tctx_exit *work;
+
+       work = container_of(cb, struct io_tctx_exit, task_work);
+       /*
+        * When @in_idle, we're in cancellation and it's racy to remove the
+        * node. It'll be removed by the end of cancellation, just ignore it.
+        * tctx can be NULL if the queueing of this task_work raced with
+        * work cancelation off the exec path.
+        */
+       if (tctx && !atomic_read(&tctx->in_idle))
+               io_uring_del_tctx_node((unsigned long)work->ctx);
+       complete(&work->completion);
+}
+
+static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
+{
+       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+       return req->ctx == data;
+}
+
 static void io_ring_exit_work(struct work_struct *work)
 {
-       struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
-                                              exit_work);
+       struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
+       unsigned long timeout = jiffies + HZ * 60 * 5;
+       unsigned long interval = HZ / 20;
+       struct io_tctx_exit exit;
+       struct io_tctx_node *node;
+       int ret;
 
        /*
         * If we're doing polled IO and end up having requests being
@@ -8578,53 +9327,100 @@ static void io_ring_exit_work(struct work_struct *work)
         * as nobody else will be looking for them.
         */
        do {
-               io_iopoll_try_reap_events(ctx);
-       } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+               io_uring_try_cancel_requests(ctx, NULL, true);
+               if (ctx->sq_data) {
+                       struct io_sq_data *sqd = ctx->sq_data;
+                       struct task_struct *tsk;
+
+                       io_sq_thread_park(sqd);
+                       tsk = sqd->thread;
+                       if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
+                               io_wq_cancel_cb(tsk->io_uring->io_wq,
+                                               io_cancel_ctx_cb, ctx, true);
+                       io_sq_thread_unpark(sqd);
+               }
+
+               if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
+                       /* there is little hope left, don't run it too often */
+                       interval = HZ * 60;
+               }
+       } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
+
+       init_completion(&exit.completion);
+       init_task_work(&exit.task_work, io_tctx_exit_cb);
+       exit.ctx = ctx;
+       /*
+        * Some may use context even when all refs and requests have been put,
+        * and they are free to do so while still holding uring_lock or
+        * completion_lock, see io_req_task_submit(). Apart from other work,
+        * this lock/unlock section also waits them to finish.
+        */
+       mutex_lock(&ctx->uring_lock);
+       while (!list_empty(&ctx->tctx_list)) {
+               WARN_ON_ONCE(time_after(jiffies, timeout));
+
+               node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
+                                       ctx_node);
+               /* don't spin on a single task if cancellation failed */
+               list_rotate_left(&ctx->tctx_list);
+               ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
+               if (WARN_ON_ONCE(ret))
+                       continue;
+               wake_up_process(node->task);
+
+               mutex_unlock(&ctx->uring_lock);
+               wait_for_completion(&exit.completion);
+               mutex_lock(&ctx->uring_lock);
+       }
+       mutex_unlock(&ctx->uring_lock);
+       spin_lock(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
+
        io_ring_ctx_free(ctx);
 }
 
-static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
+/* Returns true if we found and killed one or more timeouts */
+static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+                            bool cancel_all)
 {
-       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       struct io_kiocb *req, *tmp;
+       int canceled = 0;
 
-       return req->ctx == data;
+       spin_lock(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
+       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+               if (io_match_task(req, tsk, cancel_all)) {
+                       io_kill_timeout(req, -ECANCELED);
+                       canceled++;
+               }
+       }
+       spin_unlock_irq(&ctx->timeout_lock);
+       if (canceled != 0)
+               io_commit_cqring(ctx);
+       spin_unlock(&ctx->completion_lock);
+       if (canceled != 0)
+               io_cqring_ev_posted(ctx);
+       return canceled != 0;
 }
 
 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
        unsigned long index;
-       struct io_identify *iod;
+       struct creds *creds;
 
        mutex_lock(&ctx->uring_lock);
        percpu_ref_kill(&ctx->refs);
-       /* if force is set, the ring is going away. always drop after that */
-
-       if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
-               ctx->sqo_dead = 1;
-
-       ctx->cq_overflow_flushed = 1;
        if (ctx->rings)
-               __io_cqring_overflow_flush(ctx, true, NULL, NULL);
+               __io_cqring_overflow_flush(ctx, true);
+       xa_for_each(&ctx->personalities, index, creds)
+               io_unregister_personality(ctx, index);
        mutex_unlock(&ctx->uring_lock);
 
-       io_kill_timeouts(ctx, NULL, NULL);
-       io_poll_remove_all(ctx, NULL, NULL);
-
-       if (ctx->io_wq)
-               io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
+       io_kill_timeouts(ctx, NULL, true);
+       io_poll_remove_all(ctx, NULL, true);
 
        /* if we failed setting up the ctx, we might not have any rings */
        io_iopoll_try_reap_events(ctx);
-       xa_for_each(&ctx->personalities, index, iod)
-                io_unregister_personality(ctx, index);
-
-       /*
-        * Do this upfront, so we won't have a grace period where the ring
-        * is closed but resources aren't reaped yet. This can cause
-        * spurious failure in setting up a new ring.
-        */
-       io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
-                        ACCT_LOCKED);
 
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
        /*
@@ -8647,352 +9443,290 @@ static int io_uring_release(struct inode *inode, struct file *file)
 
 struct io_task_cancel {
        struct task_struct *task;
-       struct files_struct *files;
+       bool all;
 };
 
 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
        struct io_task_cancel *cancel = data;
-       bool ret;
-
-       if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
-               unsigned long flags;
-               struct io_ring_ctx *ctx = req->ctx;
 
-               /* protect against races with linked timeouts */
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               ret = io_match_task(req, cancel->task, cancel->files);
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       } else {
-               ret = io_match_task(req, cancel->task, cancel->files);
-       }
-       return ret;
+       return io_match_task_safe(req, cancel->task, cancel->all);
 }
 
-static void io_cancel_defer_files(struct io_ring_ctx *ctx,
-                                 struct task_struct *task,
-                                 struct files_struct *files)
+static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+                                 struct task_struct *task, bool cancel_all)
 {
-       struct io_defer_entry *de = NULL;
+       struct io_defer_entry *de;
        LIST_HEAD(list);
 
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        list_for_each_entry_reverse(de, &ctx->defer_list, list) {
-               if (io_match_task(de->req, task, files)) {
+               if (io_match_task_safe(de->req, task, cancel_all)) {
                        list_cut_position(&list, &ctx->defer_list, &de->list);
                        break;
                }
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
+       if (list_empty(&list))
+               return false;
 
        while (!list_empty(&list)) {
                de = list_first_entry(&list, struct io_defer_entry, list);
                list_del_init(&de->list);
-               req_set_fail_links(de->req);
-               io_put_req(de->req);
-               io_req_complete(de->req, -ECANCELED);
+               io_req_complete_failed(de->req, -ECANCELED);
                kfree(de);
        }
+       return true;
 }
 
-static int io_uring_count_inflight(struct io_ring_ctx *ctx,
-                                  struct task_struct *task,
-                                  struct files_struct *files)
-{
-       struct io_kiocb *req;
-       int cnt = 0;
-
-       spin_lock_irq(&ctx->inflight_lock);
-       list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
-               cnt += io_match_task(req, task, files);
-       spin_unlock_irq(&ctx->inflight_lock);
-       return cnt;
-}
-
-static void io_uring_cancel_files(struct io_ring_ctx *ctx,
-                                 struct task_struct *task,
-                                 struct files_struct *files)
+static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
 {
-       while (!list_empty_careful(&ctx->inflight_list)) {
-               struct io_task_cancel cancel = { .task = task, .files = files };
-               DEFINE_WAIT(wait);
-               int inflight;
-
-               inflight = io_uring_count_inflight(ctx, task, files);
-               if (!inflight)
-                       break;
+       struct io_tctx_node *node;
+       enum io_wq_cancel cret;
+       bool ret = false;
 
-               io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
-               io_poll_remove_all(ctx, task, files);
-               io_kill_timeouts(ctx, task, files);
-               /* cancellations _may_ trigger task work */
-               io_run_task_work();
+       mutex_lock(&ctx->uring_lock);
+       list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+               struct io_uring_task *tctx = node->task->io_uring;
 
-               prepare_to_wait(&task->io_uring->wait, &wait,
-                               TASK_UNINTERRUPTIBLE);
-               if (inflight == io_uring_count_inflight(ctx, task, files))
-                       schedule();
-               finish_wait(&task->io_uring->wait, &wait);
+               /*
+                * io_wq will stay alive while we hold uring_lock, because it's
+                * killed after ctx nodes, which requires to take the lock.
+                */
+               if (!tctx || !tctx->io_wq)
+                       continue;
+               cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
+               ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
        }
+       mutex_unlock(&ctx->uring_lock);
+
+       return ret;
 }
 
-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
-                                           struct task_struct *task)
+static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+                                        struct task_struct *task,
+                                        bool cancel_all)
 {
+       struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
+       struct io_uring_task *tctx = task ? task->io_uring : NULL;
+
        while (1) {
-               struct io_task_cancel cancel = { .task = task, .files = NULL, };
                enum io_wq_cancel cret;
                bool ret = false;
 
-               cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
-               if (cret != IO_WQ_CANCEL_NOTFOUND)
-                       ret = true;
+               if (!task) {
+                       ret |= io_uring_try_cancel_iowq(ctx);
+               } else if (tctx && tctx->io_wq) {
+                       /*
+                        * Cancels requests of all rings, not only @ctx, but
+                        * it's fine as the task is in exit/exec.
+                        */
+                       cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
+                                              &cancel, true);
+                       ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+               }
 
                /* SQPOLL thread does its own polling */
-               if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+               if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
+                   (ctx->sq_data && ctx->sq_data->thread == current)) {
                        while (!list_empty_careful(&ctx->iopoll_list)) {
                                io_iopoll_try_reap_events(ctx);
                                ret = true;
                        }
                }
 
-               ret |= io_poll_remove_all(ctx, task, NULL);
-               ret |= io_kill_timeouts(ctx, task, NULL);
+               ret |= io_cancel_defer_files(ctx, task, cancel_all);
+               ret |= io_poll_remove_all(ctx, task, cancel_all);
+               ret |= io_kill_timeouts(ctx, task, cancel_all);
+               if (task)
+                       ret |= io_run_task_work();
                if (!ret)
                        break;
-               io_run_task_work();
                cond_resched();
        }
 }
 
-static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
-{
-       mutex_lock(&ctx->uring_lock);
-       ctx->sqo_dead = 1;
-       if (ctx->flags & IORING_SETUP_R_DISABLED)
-               io_sq_offload_start(ctx);
-       mutex_unlock(&ctx->uring_lock);
-
-       /* make sure callers enter the ring to get error */
-       if (ctx->rings)
-               io_ring_set_wakeup_flag(ctx);
-}
-
-/*
- * We need to iteratively cancel requests, in case a request has dependent
- * hard links. These persist even for failure of cancelations, hence keep
- * looping until none are found.
- */
-static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
-                                         struct files_struct *files)
-{
-       struct task_struct *task = current;
-
-       if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
-               io_disable_sqo_submit(ctx);
-               task = ctx->sq_data->thread;
-               atomic_inc(&task->io_uring->in_idle);
-               io_sq_thread_park(ctx->sq_data);
-       }
-
-       io_cancel_defer_files(ctx, task, files);
-       io_cqring_overflow_flush(ctx, true, task, files);
-
-       if (!files)
-               __io_uring_cancel_task_requests(ctx, task);
-       else
-               io_uring_cancel_files(ctx, task, files);
-
-       if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
-               atomic_dec(&task->io_uring->in_idle);
-               io_sq_thread_unpark(ctx->sq_data);
-       }
-}
-
-/*
- * Note that this task has used io_uring. We use it for cancelation purposes.
- */
-static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
+static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
 {
        struct io_uring_task *tctx = current->io_uring;
+       struct io_tctx_node *node;
        int ret;
 
        if (unlikely(!tctx)) {
-               ret = io_uring_alloc_task_context(current);
+               ret = io_uring_alloc_task_context(current, ctx);
                if (unlikely(ret))
                        return ret;
+
                tctx = current->io_uring;
-       }
-       if (tctx->last != file) {
-               void *old = xa_load(&tctx->xa, (unsigned long)file);
+               if (ctx->iowq_limits_set) {
+                       unsigned int limits[2] = { ctx->iowq_limits[0],
+                                                  ctx->iowq_limits[1], };
 
-               if (!old) {
-                       get_file(file);
-                       ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
-                                               file, GFP_KERNEL));
-                       if (ret) {
-                               fput(file);
+                       ret = io_wq_max_workers(tctx->io_wq, limits);
+                       if (ret)
                                return ret;
-                       }
                }
-               tctx->last = file;
        }
+       if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
+               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               if (!node)
+                       return -ENOMEM;
+               node->ctx = ctx;
+               node->task = current;
 
-       /*
-        * This is race safe in that the task itself is doing this, hence it
-        * cannot be going through the exit/cancel paths at the same time.
-        * This cannot be modified while exit/cancel is running.
-        */
-       if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
-               tctx->sqpoll = true;
+               ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
+                                       node, GFP_KERNEL));
+               if (ret) {
+                       kfree(node);
+                       return ret;
+               }
 
+               mutex_lock(&ctx->uring_lock);
+               list_add(&node->ctx_node, &ctx->tctx_list);
+               mutex_unlock(&ctx->uring_lock);
+       }
+       tctx->last = ctx;
        return 0;
 }
 
 /*
- * Remove this io_uring_file -> task mapping.
+ * Note that this task has used io_uring. We use it for cancelation purposes.
  */
-static void io_uring_del_task_file(struct file *file)
+static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
 {
        struct io_uring_task *tctx = current->io_uring;
 
-       if (tctx->last == file)
-               tctx->last = NULL;
-       file = xa_erase(&tctx->xa, (unsigned long)file);
-       if (file)
-               fput(file);
+       if (likely(tctx && tctx->last == ctx))
+               return 0;
+       return __io_uring_add_tctx_node(ctx);
 }
 
-static void io_uring_remove_task_files(struct io_uring_task *tctx)
+/*
+ * Remove this io_uring_file -> task mapping.
+ */
+static void io_uring_del_tctx_node(unsigned long index)
 {
-       struct file *file;
-       unsigned long index;
+       struct io_uring_task *tctx = current->io_uring;
+       struct io_tctx_node *node;
 
-       xa_for_each(&tctx->xa, index, file)
-               io_uring_del_task_file(file);
-}
+       if (!tctx)
+               return;
+       node = xa_erase(&tctx->xa, index);
+       if (!node)
+               return;
 
-void __io_uring_files_cancel(struct files_struct *files)
-{
-       struct io_uring_task *tctx = current->io_uring;
-       struct file *file;
-       unsigned long index;
+       WARN_ON_ONCE(current != node->task);
+       WARN_ON_ONCE(list_empty(&node->ctx_node));
 
-       /* make sure overflow events are dropped */
-       atomic_inc(&tctx->in_idle);
-       xa_for_each(&tctx->xa, index, file)
-               io_uring_cancel_task_requests(file->private_data, files);
-       atomic_dec(&tctx->in_idle);
+       mutex_lock(&node->ctx->uring_lock);
+       list_del(&node->ctx_node);
+       mutex_unlock(&node->ctx->uring_lock);
 
-       if (files)
-               io_uring_remove_task_files(tctx);
+       if (tctx->last == node->ctx)
+               tctx->last = NULL;
+       kfree(node);
 }
 
-static s64 tctx_inflight(struct io_uring_task *tctx)
+static void io_uring_clean_tctx(struct io_uring_task *tctx)
 {
+       struct io_wq *wq = tctx->io_wq;
+       struct io_tctx_node *node;
        unsigned long index;
-       struct file *file;
-       s64 inflight;
-
-       inflight = percpu_counter_sum(&tctx->inflight);
-       if (!tctx->sqpoll)
-               return inflight;
 
-       /*
-        * If we have SQPOLL rings, then we need to iterate and find them, and
-        * add the pending count for those.
-        */
-       xa_for_each(&tctx->xa, index, file) {
-               struct io_ring_ctx *ctx = file->private_data;
-
-               if (ctx->flags & IORING_SETUP_SQPOLL) {
-                       struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
-
-                       inflight += percpu_counter_sum(&__tctx->inflight);
-               }
+       xa_for_each(&tctx->xa, index, node) {
+               io_uring_del_tctx_node(index);
+               cond_resched();
+       }
+       if (wq) {
+               /*
+                * Must be after io_uring_del_task_file() (removes nodes under
+                * uring_lock) to avoid race with io_uring_try_cancel_iowq().
+                */
+               io_wq_put_and_exit(wq);
+               tctx->io_wq = NULL;
        }
+}
 
-       return inflight;
+static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
+{
+       if (tracked)
+               return atomic_read(&tctx->inflight_tracked);
+       return percpu_counter_sum(&tctx->inflight);
 }
 
 /*
- * Find any io_uring fd that this task has registered or done IO on, and cancel
- * requests.
+ * Find any io_uring ctx that this task has registered or done IO on, and cancel
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
  */
-void __io_uring_task_cancel(void)
+static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
 {
        struct io_uring_task *tctx = current->io_uring;
-       DEFINE_WAIT(wait);
+       struct io_ring_ctx *ctx;
        s64 inflight;
+       DEFINE_WAIT(wait);
 
-       /* make sure overflow events are dropped */
-       atomic_inc(&tctx->in_idle);
+       WARN_ON_ONCE(sqd && sqd->thread != current);
 
-       /* trigger io_disable_sqo_submit() */
-       if (tctx->sqpoll)
-               __io_uring_files_cancel(NULL);
+       if (!current->io_uring)
+               return;
+       if (tctx->io_wq)
+               io_wq_exit_start(tctx->io_wq);
 
+       atomic_inc(&tctx->in_idle);
        do {
+               io_uring_drop_tctx_refs(current);
                /* read completions before cancelations */
-               inflight = tctx_inflight(tctx);
+               inflight = tctx_inflight(tctx, !cancel_all);
                if (!inflight)
                        break;
-               __io_uring_files_cancel(NULL);
-
-               prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
-
-               /*
-                * If we've seen completions, retry without waiting. This
-                * avoids a race where a completion comes in before we did
-                * prepare_to_wait().
-                */
-               if (inflight == tctx_inflight(tctx))
-                       schedule();
-               finish_wait(&tctx->wait, &wait);
-       } while (1);
-
-       atomic_dec(&tctx->in_idle);
-
-       io_uring_remove_task_files(tctx);
-}
-
-static int io_uring_flush(struct file *file, void *data)
-{
-       struct io_uring_task *tctx = current->io_uring;
-       struct io_ring_ctx *ctx = file->private_data;
-
-       if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
-               io_uring_cancel_task_requests(ctx, NULL);
 
-       if (!tctx)
-               return 0;
+               if (!sqd) {
+                       struct io_tctx_node *node;
+                       unsigned long index;
 
-       /* we should have cancelled and erased it before PF_EXITING */
-       WARN_ON_ONCE((current->flags & PF_EXITING) &&
-                    xa_load(&tctx->xa, (unsigned long)file));
+                       xa_for_each(&tctx->xa, index, node) {
+                               /* sqpoll task will cancel all its requests */
+                               if (node->ctx->sq_data)
+                                       continue;
+                               io_uring_try_cancel_requests(node->ctx, current,
+                                                            cancel_all);
+                       }
+               } else {
+                       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+                               io_uring_try_cancel_requests(ctx, current,
+                                                            cancel_all);
+               }
 
-       /*
-        * fput() is pending, will be 2 if the only other ref is our potential
-        * task file note. If the task is exiting, drop regardless of count.
-        */
-       if (atomic_long_read(&file->f_count) != 2)
-               return 0;
+               prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+               io_run_task_work();
+               io_uring_drop_tctx_refs(current);
 
-       if (ctx->flags & IORING_SETUP_SQPOLL) {
-               /* there is only one file note, which is owned by sqo_task */
-               WARN_ON_ONCE(ctx->sqo_task != current &&
-                            xa_load(&tctx->xa, (unsigned long)file));
-               /* sqo_dead check is for when this happens after cancellation */
-               WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
-                            !xa_load(&tctx->xa, (unsigned long)file));
+               /*
+                * If we've seen completions, retry without waiting. This
+                * avoids a race where a completion comes in before we did
+                * prepare_to_wait().
+                */
+               if (inflight == tctx_inflight(tctx, !cancel_all))
+                       schedule();
+               finish_wait(&tctx->wait, &wait);
+       } while (1);
 
-               io_disable_sqo_submit(ctx);
+       io_uring_clean_tctx(tctx);
+       if (cancel_all) {
+               /*
+                * We shouldn't run task_works after cancel, so just leave
+                * ->in_idle set for normal exit.
+                */
+               atomic_dec(&tctx->in_idle);
+               /* for exec all current's requests should be gone, kill tctx */
+               __io_uring_free(current);
        }
+}
 
-       if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
-               io_uring_del_task_file(file);
-       return 0;
+void __io_uring_cancel(bool cancel_all)
+{
+       io_uring_cancel_generic(cancel_all, NULL);
 }
 
 static void *io_uring_validate_mmap_request(struct file *file,
@@ -9067,60 +9801,84 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
 
 static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 {
-       int ret = 0;
        DEFINE_WAIT(wait);
 
        do {
                if (!io_sqring_full(ctx))
                        break;
-
                prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
 
-               if (unlikely(ctx->sqo_dead)) {
-                       ret = -EOWNERDEAD;
-                       break;
-               }
-
                if (!io_sqring_full(ctx))
                        break;
-
                schedule();
        } while (!signal_pending(current));
 
        finish_wait(&ctx->sqo_sq_wait, &wait);
-       return ret;
+       return 0;
+}
+
+static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
+                         struct __kernel_timespec __user **ts,
+                         const sigset_t __user **sig)
+{
+       struct io_uring_getevents_arg arg;
+
+       /*
+        * If EXT_ARG isn't set, then we have no timespec and the argp pointer
+        * is just a pointer to the sigset_t.
+        */
+       if (!(flags & IORING_ENTER_EXT_ARG)) {
+               *sig = (const sigset_t __user *) argp;
+               *ts = NULL;
+               return 0;
+       }
+
+       /*
+        * EXT_ARG is set - ensure we agree on the size of it and copy in our
+        * timespec and sigset_t pointers if good.
+        */
+       if (*argsz != sizeof(arg))
+               return -EINVAL;
+       if (copy_from_user(&arg, argp, sizeof(arg)))
+               return -EFAULT;
+       if (arg.pad)
+               return -EINVAL;
+       *sig = u64_to_user_ptr(arg.sigmask);
+       *argsz = arg.sigmask_sz;
+       *ts = u64_to_user_ptr(arg.ts);
+       return 0;
 }
 
 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
-               u32, min_complete, u32, flags, const sigset_t __user *, sig,
-               size_t, sigsz)
+               u32, min_complete, u32, flags, const void __user *, argp,
+               size_t, argsz)
 {
        struct io_ring_ctx *ctx;
-       long ret = -EBADF;
        int submitted = 0;
        struct fd f;
+       long ret;
 
        io_run_task_work();
 
-       if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
-                       IORING_ENTER_SQ_WAIT))
+       if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
+                              IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
                return -EINVAL;
 
        f = fdget(fd);
-       if (!f.file)
+       if (unlikely(!f.file))
                return -EBADF;
 
        ret = -EOPNOTSUPP;
-       if (f.file->f_op != &io_uring_fops)
+       if (unlikely(f.file->f_op != &io_uring_fops))
                goto out_fput;
 
        ret = -ENXIO;
        ctx = f.file->private_data;
-       if (!percpu_ref_tryget(&ctx->refs))
+       if (unlikely(!percpu_ref_tryget(&ctx->refs)))
                goto out_fput;
 
        ret = -EBADFD;
-       if (ctx->flags & IORING_SETUP_R_DISABLED)
+       if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
                goto out;
 
        /*
@@ -9130,9 +9888,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
         */
        ret = 0;
        if (ctx->flags & IORING_SETUP_SQPOLL) {
-               io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               io_cqring_overflow_flush(ctx);
 
-               if (unlikely(ctx->sqo_dead)) {
+               if (unlikely(ctx->sq_data->thread == NULL)) {
                        ret = -EOWNERDEAD;
                        goto out;
                }
@@ -9145,7 +9903,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                }
                submitted = to_submit;
        } else if (to_submit) {
-               ret = io_uring_add_task_file(ctx, f.file);
+               ret = io_uring_add_tctx_node(ctx);
                if (unlikely(ret))
                        goto out;
                mutex_lock(&ctx->uring_lock);
@@ -9156,6 +9914,13 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        goto out;
        }
        if (flags & IORING_ENTER_GETEVENTS) {
+               const sigset_t __user *sig;
+               struct __kernel_timespec __user *ts;
+
+               ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
+               if (unlikely(ret))
+                       goto out;
+
                min_complete = min(min_complete, ctx->cq_entries);
 
                /*
@@ -9168,7 +9933,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                    !(ctx->flags & IORING_SETUP_SQPOLL)) {
                        ret = io_iopoll_check(ctx, min_complete);
                } else {
-                       ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
+                       ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
                }
        }
 
@@ -9181,9 +9946,8 @@ out_fput:
 
 #ifdef CONFIG_PROC_FS
 static int io_uring_show_cred(struct seq_file *m, unsigned int id,
-               const struct io_identity *iod)
+               const struct cred *cred)
 {
-       const struct cred *cred = iod->creds;
        struct user_namespace *uns = seq_user_ns(m);
        struct group_info *gi;
        kernel_cap_t cap;
@@ -9227,18 +9991,18 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
         */
        has_lock = mutex_trylock(&ctx->uring_lock);
 
-       if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
+       if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
                sq = ctx->sq_data;
+               if (!sq->thread)
+                       sq = NULL;
+       }
 
        seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
        seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
        seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
        for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
-               struct fixed_file_table *table;
-               struct file *f;
+               struct file *f = io_file_from_index(ctx, i);
 
-               table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
-               f = table->files[i & IORING_FILE_TABLE_MASK];
                if (f)
                        seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
                else
@@ -9246,21 +10010,21 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
        }
        seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
        for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
-               struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
+               struct io_mapped_ubuf *buf = ctx->user_bufs[i];
+               unsigned int len = buf->ubuf_end - buf->ubuf;
 
-               seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
-                                               (unsigned int) buf->len);
+               seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
        }
        if (has_lock && !xa_empty(&ctx->personalities)) {
                unsigned long index;
-               const struct io_identity *iod;
+               const struct cred *cred;
 
                seq_printf(m, "Personalities:\n");
-               xa_for_each(&ctx->personalities, index, iod)
-                       io_uring_show_cred(m, index, iod);
+               xa_for_each(&ctx->personalities, index, cred)
+                       io_uring_show_cred(m, index, cred);
        }
        seq_printf(m, "PollList:\n");
-       spin_lock_irq(&ctx->completion_lock);
+       spin_lock(&ctx->completion_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
                struct hlist_head *list = &ctx->cancel_hash[i];
                struct io_kiocb *req;
@@ -9269,7 +10033,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
                                        req->task->task_works != NULL);
        }
-       spin_unlock_irq(&ctx->completion_lock);
+       spin_unlock(&ctx->completion_lock);
        if (has_lock)
                mutex_unlock(&ctx->uring_lock);
 }
@@ -9287,14 +10051,12 @@ static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
 
 static const struct file_operations io_uring_fops = {
        .release        = io_uring_release,
-       .flush          = io_uring_flush,
        .mmap           = io_uring_mmap,
 #ifndef CONFIG_MMU
        .get_unmapped_area = io_uring_nommu_get_unmapped_area,
        .mmap_capabilities = io_uring_nommu_mmap_capabilities,
 #endif
        .poll           = io_uring_poll,
-       .fasync         = io_uring_fasync,
 #ifdef CONFIG_PROC_FS
        .show_fdinfo    = io_uring_show_fdinfo,
 #endif
@@ -9324,8 +10086,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
        rings->cq_ring_mask = p->cq_entries - 1;
        rings->sq_ring_entries = p->sq_entries;
        rings->cq_ring_entries = p->cq_entries;
-       ctx->sq_mask = rings->sq_ring_mask;
-       ctx->cq_mask = rings->cq_ring_mask;
 
        size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
        if (size == SIZE_MAX) {
@@ -9352,7 +10112,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
        if (fd < 0)
                return fd;
 
-       ret = io_uring_add_task_file(ctx, file);
+       ret = io_uring_add_tctx_node(ctx);
        if (ret) {
                put_unused_fd(fd);
                return ret;
@@ -9395,10 +10155,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
 static int io_uring_create(unsigned entries, struct io_uring_params *p,
                           struct io_uring_params __user *params)
 {
-       struct user_struct *user = NULL;
        struct io_ring_ctx *ctx;
        struct file *file;
-       bool limit_mem;
        int ret;
 
        if (!entries)
@@ -9438,34 +10196,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
                p->cq_entries = 2 * p->sq_entries;
        }
 
-       user = get_uid(current_user());
-       limit_mem = !capable(CAP_IPC_LOCK);
-
-       if (limit_mem) {
-               ret = __io_account_mem(user,
-                               ring_pages(p->sq_entries, p->cq_entries));
-               if (ret) {
-                       free_uid(user);
-                       return ret;
-               }
-       }
-
        ctx = io_ring_ctx_alloc(p);
-       if (!ctx) {
-               if (limit_mem)
-                       __io_unaccount_mem(user, ring_pages(p->sq_entries,
-                                                               p->cq_entries));
-               free_uid(user);
+       if (!ctx)
                return -ENOMEM;
-       }
        ctx->compat = in_compat_syscall();
-       ctx->user = user;
-       ctx->creds = get_current_cred();
-#ifdef CONFIG_AUDIT
-       ctx->loginuid = current->loginuid;
-       ctx->sessionid = current->sessionid;
-#endif
-       ctx->sqo_task = get_task_struct(current);
+       if (!capable(CAP_IPC_LOCK))
+               ctx->user = get_uid(current_user());
 
        /*
         * This is just grabbed for accounting purposes. When a process exits,
@@ -9476,35 +10212,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        mmgrab(current->mm);
        ctx->mm_account = current->mm;
 
-#ifdef CONFIG_BLK_CGROUP
-       /*
-        * The sq thread will belong to the original cgroup it was inited in.
-        * If the cgroup goes offline (e.g. disabling the io controller), then
-        * issued bios will be associated with the closest cgroup later in the
-        * block layer.
-        */
-       rcu_read_lock();
-       ctx->sqo_blkcg_css = blkcg_css();
-       ret = css_tryget_online(ctx->sqo_blkcg_css);
-       rcu_read_unlock();
-       if (!ret) {
-               /* don't init against a dying cgroup, have the user try again */
-               ctx->sqo_blkcg_css = NULL;
-               ret = -ENODEV;
-               goto err;
-       }
-#endif
-
-       /*
-        * Account memory _before_ installing the file descriptor. Once
-        * the descriptor is installed, it can get closed at any time. Also
-        * do this before hitting the general error path, as ring freeing
-        * will un-account as well.
-        */
-       io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
-                      ACCT_LOCKED);
-       ctx->limit_mem = limit_mem;
-
        ret = io_allocate_scq_urings(ctx, p);
        if (ret)
                goto err;
@@ -9512,9 +10219,11 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        ret = io_sq_offload_create(ctx, p);
        if (ret)
                goto err;
-
-       if (!(p->flags & IORING_SETUP_R_DISABLED))
-               io_sq_offload_start(ctx);
+       /* always set a rsrc node */
+       ret = io_rsrc_node_switch_start(ctx);
+       if (ret)
+               goto err;
+       io_rsrc_node_switch(ctx, NULL);
 
        memset(&p->sq_off, 0, sizeof(p->sq_off));
        p->sq_off.head = offsetof(struct io_rings, sq.head);
@@ -9537,7 +10246,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
                        IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
-                       IORING_FEAT_POLL_32BITS;
+                       IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
+                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+                       IORING_FEAT_RSRC_TAGS;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -9556,7 +10267,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
         */
        ret = io_uring_install_fd(ctx, file);
        if (ret < 0) {
-               io_disable_sqo_submit(ctx);
                /* fput will clean it up */
                fput(file);
                return ret;
@@ -9565,7 +10275,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
        return ret;
 err:
-       io_disable_sqo_submit(ctx);
        io_ring_ctx_wait_and_kill(ctx);
        return ret;
 }
@@ -9643,22 +10352,16 @@ out:
 
 static int io_register_personality(struct io_ring_ctx *ctx)
 {
-       struct io_identity *iod;
+       const struct cred *creds;
        u32 id;
        int ret;
 
-       iod = kmalloc(sizeof(*iod), GFP_KERNEL);
-       if (unlikely(!iod))
-               return -ENOMEM;
-
-       io_init_identity(iod);
-       iod->creds = get_current_cred();
+       creds = get_current_cred();
 
-       ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod,
+       ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
                        XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
        if (ret < 0) {
-               put_cred(iod->creds);
-               kfree(iod);
+               put_cred(creds);
                return ret;
        }
        return id;
@@ -9742,24 +10445,273 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
        if (ctx->restrictions.registered)
                ctx->restricted = 1;
 
-       io_sq_offload_start(ctx);
+       ctx->flags &= ~IORING_SETUP_R_DISABLED;
+       if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
+               wake_up(&ctx->sq_data->wait);
+       return 0;
+}
+
+static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
+                                    struct io_uring_rsrc_update2 *up,
+                                    unsigned nr_args)
+{
+       __u32 tmp;
+       int err;
+
+       if (check_add_overflow(up->offset, nr_args, &tmp))
+               return -EOVERFLOW;
+       err = io_rsrc_node_switch_start(ctx);
+       if (err)
+               return err;
+
+       switch (type) {
+       case IORING_RSRC_FILE:
+               return __io_sqe_files_update(ctx, up, nr_args);
+       case IORING_RSRC_BUFFER:
+               return __io_sqe_buffers_update(ctx, up, nr_args);
+       }
+       return -EINVAL;
+}
+
+static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
+                                   unsigned nr_args)
+{
+       struct io_uring_rsrc_update2 up;
+
+       if (!nr_args)
+               return -EINVAL;
+       memset(&up, 0, sizeof(up));
+       if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
+               return -EFAULT;
+       if (up.resv || up.resv2)
+               return -EINVAL;
+       return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
+}
+
+static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
+                                  unsigned size, unsigned type)
+{
+       struct io_uring_rsrc_update2 up;
+
+       if (size != sizeof(up))
+               return -EINVAL;
+       if (copy_from_user(&up, arg, sizeof(up)))
+               return -EFAULT;
+       if (!up.nr || up.resv || up.resv2)
+               return -EINVAL;
+       return __io_register_rsrc_update(ctx, type, &up, up.nr);
+}
+
+static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
+                           unsigned int size, unsigned int type)
+{
+       struct io_uring_rsrc_register rr;
+
+       /* keep it extendible */
+       if (size != sizeof(rr))
+               return -EINVAL;
+
+       memset(&rr, 0, sizeof(rr));
+       if (copy_from_user(&rr, arg, size))
+               return -EFAULT;
+       if (!rr.nr || rr.resv || rr.resv2)
+               return -EINVAL;
+
+       switch (type) {
+       case IORING_RSRC_FILE:
+               return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
+                                            rr.nr, u64_to_user_ptr(rr.tags));
+       case IORING_RSRC_BUFFER:
+               return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
+                                              rr.nr, u64_to_user_ptr(rr.tags));
+       }
+       return -EINVAL;
+}
+
+static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
+                               unsigned len)
+{
+       struct io_uring_task *tctx = current->io_uring;
+       cpumask_var_t new_mask;
+       int ret;
+
+       if (!tctx || !tctx->io_wq)
+               return -EINVAL;
+
+       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_clear(new_mask);
+       if (len > cpumask_size())
+               len = cpumask_size();
+
+#ifdef CONFIG_COMPAT
+       if (in_compat_syscall()) {
+               ret = compat_get_bitmap(cpumask_bits(new_mask),
+                                       (const compat_ulong_t __user *)arg,
+                                       len * 8 /* CHAR_BIT */);
+       } else {
+               ret = copy_from_user(new_mask, arg, len);
+       }
+#else
+       ret = copy_from_user(new_mask, arg, len);
+#endif
+
+       if (ret) {
+               free_cpumask_var(new_mask);
+               return -EFAULT;
+       }
+
+       ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
+       free_cpumask_var(new_mask);
+       return ret;
+}
+
+static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
+{
+       struct io_uring_task *tctx = current->io_uring;
+
+       if (!tctx || !tctx->io_wq)
+               return -EINVAL;
+
+       return io_wq_cpu_affinity(tctx->io_wq, NULL);
+}
+
+static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+                                       void __user *arg)
+       __must_hold(&ctx->uring_lock)
+{
+       struct io_tctx_node *node;
+       struct io_uring_task *tctx = NULL;
+       struct io_sq_data *sqd = NULL;
+       __u32 new_count[2];
+       int i, ret;
+
+       if (copy_from_user(new_count, arg, sizeof(new_count)))
+               return -EFAULT;
+       for (i = 0; i < ARRAY_SIZE(new_count); i++)
+               if (new_count[i] > INT_MAX)
+                       return -EINVAL;
+
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               sqd = ctx->sq_data;
+               if (sqd) {
+                       /*
+                        * Observe the correct sqd->lock -> ctx->uring_lock
+                        * ordering. Fine to drop uring_lock here, we hold
+                        * a ref to the ctx.
+                        */
+                       refcount_inc(&sqd->refs);
+                       mutex_unlock(&ctx->uring_lock);
+                       mutex_lock(&sqd->lock);
+                       mutex_lock(&ctx->uring_lock);
+                       if (sqd->thread)
+                               tctx = sqd->thread->io_uring;
+               }
+       } else {
+               tctx = current->io_uring;
+       }
+
+       BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
+
+       for (i = 0; i < ARRAY_SIZE(new_count); i++)
+               if (new_count[i])
+                       ctx->iowq_limits[i] = new_count[i];
+       ctx->iowq_limits_set = true;
+
+       ret = -EINVAL;
+       if (tctx && tctx->io_wq) {
+               ret = io_wq_max_workers(tctx->io_wq, new_count);
+               if (ret)
+                       goto err;
+       } else {
+               memset(new_count, 0, sizeof(new_count));
+       }
+
+       if (sqd) {
+               mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
+
+       if (copy_to_user(arg, new_count, sizeof(new_count)))
+               return -EFAULT;
+
+       /* that's it for SQPOLL, only the SQPOLL task creates requests */
+       if (sqd)
+               return 0;
+
+       /* now propagate the restriction to all registered users */
+       list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+               struct io_uring_task *tctx = node->task->io_uring;
+
+               if (WARN_ON_ONCE(!tctx->io_wq))
+                       continue;
+
+               for (i = 0; i < ARRAY_SIZE(new_count); i++)
+                       new_count[i] = ctx->iowq_limits[i];
+               /* ignore errors, it always returns zero anyway */
+               (void)io_wq_max_workers(tctx->io_wq, new_count);
+       }
        return 0;
+err:
+       if (sqd) {
+               mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
+       return ret;
 }
 
 static bool io_register_op_must_quiesce(int op)
 {
        switch (op) {
+       case IORING_REGISTER_BUFFERS:
+       case IORING_UNREGISTER_BUFFERS:
+       case IORING_REGISTER_FILES:
        case IORING_UNREGISTER_FILES:
        case IORING_REGISTER_FILES_UPDATE:
        case IORING_REGISTER_PROBE:
        case IORING_REGISTER_PERSONALITY:
        case IORING_UNREGISTER_PERSONALITY:
+       case IORING_REGISTER_FILES2:
+       case IORING_REGISTER_FILES_UPDATE2:
+       case IORING_REGISTER_BUFFERS2:
+       case IORING_REGISTER_BUFFERS_UPDATE:
+       case IORING_REGISTER_IOWQ_AFF:
+       case IORING_UNREGISTER_IOWQ_AFF:
+       case IORING_REGISTER_IOWQ_MAX_WORKERS:
                return false;
        default:
                return true;
        }
 }
 
+static int io_ctx_quiesce(struct io_ring_ctx *ctx)
+{
+       long ret;
+
+       percpu_ref_kill(&ctx->refs);
+
+       /*
+        * Drop uring mutex before waiting for references to exit. If another
+        * thread is currently inside io_uring_enter() it might need to grab the
+        * uring_lock to make progress. If we hold it here across the drain
+        * wait, then we can deadlock. It's safe to drop the mutex here, since
+        * no new references will come in after we've killed the percpu ref.
+        */
+       mutex_unlock(&ctx->uring_lock);
+       do {
+               ret = wait_for_completion_interruptible(&ctx->ref_comp);
+               if (!ret)
+                       break;
+               ret = io_run_task_work_sig();
+       } while (ret >= 0);
+       mutex_lock(&ctx->uring_lock);
+
+       if (ret)
+               io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
+       return ret;
+}
+
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                               void __user *arg, unsigned nr_args)
        __releases(ctx->uring_lock)
@@ -9775,58 +10727,32 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
        if (percpu_ref_is_dying(&ctx->refs))
                return -ENXIO;
 
-       if (io_register_op_must_quiesce(opcode)) {
-               percpu_ref_kill(&ctx->refs);
-
-               /*
-                * Drop uring mutex before waiting for references to exit. If
-                * another thread is currently inside io_uring_enter() it might
-                * need to grab the uring_lock to make progress. If we hold it
-                * here across the drain wait, then we can deadlock. It's safe
-                * to drop the mutex here, since no new references will come in
-                * after we've killed the percpu ref.
-                */
-               mutex_unlock(&ctx->uring_lock);
-               do {
-                       ret = wait_for_completion_interruptible(&ctx->ref_comp);
-                       if (!ret)
-                               break;
-                       ret = io_run_task_work_sig();
-                       if (ret < 0)
-                               break;
-               } while (1);
-               mutex_lock(&ctx->uring_lock);
-
-               if (ret) {
-                       io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
-                       return ret;
-               }
-       }
-
        if (ctx->restricted) {
-               if (opcode >= IORING_REGISTER_LAST) {
-                       ret = -EINVAL;
-                       goto out;
-               }
+               if (opcode >= IORING_REGISTER_LAST)
+                       return -EINVAL;
+               opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
+               if (!test_bit(opcode, ctx->restrictions.register_op))
+                       return -EACCES;
+       }
 
-               if (!test_bit(opcode, ctx->restrictions.register_op)) {
-                       ret = -EACCES;
-                       goto out;
-               }
+       if (io_register_op_must_quiesce(opcode)) {
+               ret = io_ctx_quiesce(ctx);
+               if (ret)
+                       return ret;
        }
 
        switch (opcode) {
        case IORING_REGISTER_BUFFERS:
-               ret = io_sqe_buffer_register(ctx, arg, nr_args);
+               ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
                break;
        case IORING_UNREGISTER_BUFFERS:
                ret = -EINVAL;
                if (arg || nr_args)
                        break;
-               ret = io_sqe_buffer_unregister(ctx);
+               ret = io_sqe_buffers_unregister(ctx);
                break;
        case IORING_REGISTER_FILES:
-               ret = io_sqe_files_register(ctx, arg, nr_args);
+               ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
                break;
        case IORING_UNREGISTER_FILES:
                ret = -EINVAL;
@@ -9835,7 +10761,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                ret = io_sqe_files_unregister(ctx);
                break;
        case IORING_REGISTER_FILES_UPDATE:
-               ret = io_sqe_files_update(ctx, arg, nr_args);
+               ret = io_register_files_update(ctx, arg, nr_args);
                break;
        case IORING_REGISTER_EVENTFD:
        case IORING_REGISTER_EVENTFD_ASYNC:
@@ -9883,12 +10809,43 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
        case IORING_REGISTER_RESTRICTIONS:
                ret = io_register_restrictions(ctx, arg, nr_args);
                break;
+       case IORING_REGISTER_FILES2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_FILES_UPDATE2:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_BUFFERS2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
+               break;
+       case IORING_REGISTER_BUFFERS_UPDATE:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_BUFFER);
+               break;
+       case IORING_REGISTER_IOWQ_AFF:
+               ret = -EINVAL;
+               if (!arg || !nr_args)
+                       break;
+               ret = io_register_iowq_aff(ctx, arg, nr_args);
+               break;
+       case IORING_UNREGISTER_IOWQ_AFF:
+               ret = -EINVAL;
+               if (arg || nr_args)
+                       break;
+               ret = io_unregister_iowq_aff(ctx);
+               break;
+       case IORING_REGISTER_IOWQ_MAX_WORKERS:
+               ret = -EINVAL;
+               if (!arg || nr_args != 2)
+                       break;
+               ret = io_register_iowq_max_workers(ctx, arg);
+               break;
        default:
                ret = -EINVAL;
                break;
        }
 
-out:
        if (io_register_op_must_quiesce(opcode)) {
                /* bring the ctx back to life */
                percpu_ref_reinit(&ctx->refs);
@@ -9914,6 +10871,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
 
        ctx = f.file->private_data;
 
+       io_run_task_work();
+
        mutex_lock(&ctx->uring_lock);
        ret = __io_uring_register(ctx, opcode, arg, nr_args);
        mutex_unlock(&ctx->uring_lock);
@@ -9960,12 +10919,27 @@ static int __init io_uring_init(void)
        BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
        BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
        BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
+       BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
        BUILD_BUG_SQE_ELEM(42, __u16,  personality);
        BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
+       BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
+
+       BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
+                    sizeof(struct io_uring_rsrc_update));
+       BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
+                    sizeof(struct io_uring_rsrc_update2));
+
+       /* ->buf_index is u16 */
+       BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
+
+       /* should fit into one byte */
+       BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
 
        BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
-       BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
-       req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
+       BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
+
+       req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+                               SLAB_ACCOUNT);
        return 0;
 };
 __initcall(io_uring_init);
index ab900b661867f6ebac0862e80d6f80fa16d168a2..8989e1d1f79b7810539c65f766a019f20d706715 100644 (file)
@@ -763,7 +763,7 @@ void __noreturn do_exit(long code)
                schedule();
        }
 
-       io_uring_files_cancel(tsk->files);
+       io_uring_files_cancel();
        exit_signals(tsk);  /* sets PF_EXITING */
 
        /* sync mm's RSS info before statistics gathering */
index 55c1a880a281ae5091dc932e0a621b0a3e793d07..68efe2a0b4fbc8b015785d950edff6e85556eca5 100644 (file)
@@ -926,6 +926,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->splice_pipe = NULL;
        tsk->task_frag.page = NULL;
        tsk->wake_q.next = NULL;
+       tsk->pf_io_worker = NULL;
 
        account_kernel_stack(tsk, 1);
 
index da96a309eefed726a522c2895f0b5a1f8e6fe55d..a875bc59804eb7b935d04c13a979ab13a58d5b54 100644 (file)
@@ -21,7 +21,7 @@
 #include <asm/tlb.h>
 
 #include "../workqueue_internal.h"
-#include "../../fs/io-wq.h"
+#include "../../io_uring/io-wq.h"
 #include "../smpboot.h"
 
 #include "pelt.h"