]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.7-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 13:47:41 +0000 (15:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 13:47:41 +0000 (15:47 +0200)
added patches:
io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch
io_uring-enable-lookup-of-links-holding-inflight-files.patch
io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch
io_uring-sanitize-double-poll-handling.patch
task_work-only-grab-task-signal-lock-when-needed.patch

queue-5.7/io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch [new file with mode: 0644]
queue-5.7/io_uring-enable-lookup-of-links-holding-inflight-files.patch [new file with mode: 0644]
queue-5.7/io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch [new file with mode: 0644]
queue-5.7/io_uring-sanitize-double-poll-handling.patch [new file with mode: 0644]
queue-5.7/series
queue-5.7/task_work-only-grab-task-signal-lock-when-needed.patch [new file with mode: 0644]

diff --git a/queue-5.7/io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch b/queue-5.7/io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch
new file mode 100644 (file)
index 0000000..37fde4f
--- /dev/null
@@ -0,0 +1,76 @@
+From 9b7adba9eaec28e0e4343c96d0dbeb9578802f5f Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 10 Aug 2020 10:54:02 -0600
+Subject: io_uring: add missing REQ_F_COMP_LOCKED for nested requests
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 9b7adba9eaec28e0e4343c96d0dbeb9578802f5f upstream.
+
+When we traverse into failing links or timeouts, we need to ensure we
+propagate the REQ_F_COMP_LOCKED flag to ensure that we correctly signal
+to the completion side that we already hold the completion lock.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |   24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1484,12 +1484,9 @@ static void io_req_link_next(struct io_k
+ /*
+  * Called if REQ_F_LINK_HEAD is set, and we fail the head request
+  */
+-static void io_fail_links(struct io_kiocb *req)
++static void __io_fail_links(struct io_kiocb *req)
+ {
+       struct io_ring_ctx *ctx = req->ctx;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ctx->completion_lock, flags);
+       while (!list_empty(&req->link_list)) {
+               struct io_kiocb *link = list_first_entry(&req->link_list,
+@@ -1503,13 +1500,29 @@ static void io_fail_links(struct io_kioc
+                       io_link_cancel_timeout(link);
+               } else {
+                       io_cqring_fill_event(link, -ECANCELED);
++                      link->flags |= REQ_F_COMP_LOCKED;
+                       __io_double_put_req(link);
+               }
+               req->flags &= ~REQ_F_LINK_TIMEOUT;
+       }
+       io_commit_cqring(ctx);
+-      spin_unlock_irqrestore(&ctx->completion_lock, flags);
++}
++
++static void io_fail_links(struct io_kiocb *req)
++{
++      struct io_ring_ctx *ctx = req->ctx;
++
++      if (!(req->flags & REQ_F_COMP_LOCKED)) {
++              unsigned long flags;
++
++              spin_lock_irqsave(&ctx->completion_lock, flags);
++              __io_fail_links(req);
++              spin_unlock_irqrestore(&ctx->completion_lock, flags);
++      } else {
++              __io_fail_links(req);
++      }
++
+       io_cqring_ev_posted(ctx);
+ }
+@@ -4828,6 +4841,7 @@ static int io_timeout_cancel(struct io_r
+               return -EALREADY;
+       req_set_fail_links(req);
++      req->flags |= REQ_F_COMP_LOCKED;
+       io_cqring_fill_event(req, -ECANCELED);
+       io_put_req(req);
+       return 0;
diff --git a/queue-5.7/io_uring-enable-lookup-of-links-holding-inflight-files.patch b/queue-5.7/io_uring-enable-lookup-of-links-holding-inflight-files.patch
new file mode 100644 (file)
index 0000000..30c46c1
--- /dev/null
@@ -0,0 +1,168 @@
+From f254ac04c8744cf7bfed012717eac34eacc65dfb Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 12 Aug 2020 17:33:30 -0600
+Subject: io_uring: enable lookup of links holding inflight files
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit f254ac04c8744cf7bfed012717eac34eacc65dfb upstream.
+
+When a process exits, we cancel whatever requests it has pending that
+are referencing the file table. However, if a link is holding a
+reference, then we cannot find it by simply looking at the inflight
+list.
+
+Enable checking of the poll and timeout list to find the link, and
+cancel it appropriately.
+
+Cc: stable@vger.kernel.org
+Reported-by: Josef <josef.grieb@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |   97 ++++++++++++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 87 insertions(+), 10 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4638,6 +4638,7 @@ static bool io_poll_remove_one(struct io
+               io_cqring_fill_event(req, -ECANCELED);
+               io_commit_cqring(req->ctx);
+               req->flags |= REQ_F_COMP_LOCKED;
++              req_set_fail_links(req);
+               io_put_req(req);
+       }
+@@ -4820,6 +4821,23 @@ static enum hrtimer_restart io_timeout_f
+       return HRTIMER_NORESTART;
+ }
++static int __io_timeout_cancel(struct io_kiocb *req)
++{
++      int ret;
++
++      list_del_init(&req->list);
++
++      ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
++      if (ret == -1)
++              return -EALREADY;
++
++      req_set_fail_links(req);
++      req->flags |= REQ_F_COMP_LOCKED;
++      io_cqring_fill_event(req, -ECANCELED);
++      io_put_req(req);
++      return 0;
++}
++
+ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+ {
+       struct io_kiocb *req;
+@@ -4827,7 +4845,6 @@ static int io_timeout_cancel(struct io_r
+       list_for_each_entry(req, &ctx->timeout_list, list) {
+               if (user_data == req->user_data) {
+-                      list_del_init(&req->list);
+                       ret = 0;
+                       break;
+               }
+@@ -4836,15 +4853,7 @@ static int io_timeout_cancel(struct io_r
+       if (ret == -ENOENT)
+               return ret;
+-      ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
+-      if (ret == -1)
+-              return -EALREADY;
+-
+-      req_set_fail_links(req);
+-      req->flags |= REQ_F_COMP_LOCKED;
+-      io_cqring_fill_event(req, -ECANCELED);
+-      io_put_req(req);
+-      return 0;
++      return __io_timeout_cancel(req);
+ }
+ static int io_timeout_remove_prep(struct io_kiocb *req,
+@@ -7579,6 +7588,71 @@ static int io_uring_release(struct inode
+       return 0;
+ }
++/*
++ * Returns true if 'preq' is the link parent of 'req'
++ */
++static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
++{
++      struct io_kiocb *link;
++
++      if (!(preq->flags & REQ_F_LINK_HEAD))
++              return false;
++
++      list_for_each_entry(link, &preq->link_list, link_list) {
++              if (link == req)
++                      return true;
++      }
++
++      return false;
++}
++
++/*
++ * We're looking to cancel 'req' because it's holding on to our files, but
++ * 'req' could be a link to another request. See if it is, and cancel that
++ * parent request if so.
++ */
++static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
++{
++      struct hlist_node *tmp;
++      struct io_kiocb *preq;
++      bool found = false;
++      int i;
++
++      spin_lock_irq(&ctx->completion_lock);
++      for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
++              struct hlist_head *list;
++
++              list = &ctx->cancel_hash[i];
++              hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
++                      found = io_match_link(preq, req);
++                      if (found) {
++                              io_poll_remove_one(preq);
++                              break;
++                      }
++              }
++      }
++      spin_unlock_irq(&ctx->completion_lock);
++      return found;
++}
++
++static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
++                                 struct io_kiocb *req)
++{
++      struct io_kiocb *preq;
++      bool found = false;
++
++      spin_lock_irq(&ctx->completion_lock);
++      list_for_each_entry(preq, &ctx->timeout_list, list) {
++              found = io_match_link(preq, req);
++              if (found) {
++                      __io_timeout_cancel(preq);
++                      break;
++              }
++      }
++      spin_unlock_irq(&ctx->completion_lock);
++      return found;
++}
++
+ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+                                 struct files_struct *files)
+ {
+@@ -7629,6 +7703,9 @@ static void io_uring_cancel_files(struct
+                       }
+               } else {
+                       io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
++                      /* could be a link, check and remove if it is */
++                      if (!io_poll_remove_link(ctx, cancel_req))
++                              io_timeout_remove_link(ctx, cancel_req);
+                       io_put_req(cancel_req);
+               }
diff --git a/queue-5.7/io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch b/queue-5.7/io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch
new file mode 100644 (file)
index 0000000..54a0d5c
--- /dev/null
@@ -0,0 +1,106 @@
+From 2dd2111d0d383df104b144e0d1f6b5a00cb7cd88 Mon Sep 17 00:00:00 2001
+From: Guoyu Huang <hgy5945@gmail.com>
+Date: Wed, 5 Aug 2020 03:53:50 -0700
+Subject: io_uring: Fix NULL pointer dereference in loop_rw_iter()
+
+From: Guoyu Huang <hgy5945@gmail.com>
+
+commit 2dd2111d0d383df104b144e0d1f6b5a00cb7cd88 upstream.
+
+loop_rw_iter() does not check whether the file has a read or
+write function. This can lead to NULL pointer dereference
+when the user passes in a file descriptor that does not have
+read or write function.
+
+The crash log looks like this:
+
+[   99.834071] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[   99.835364] #PF: supervisor instruction fetch in kernel mode
+[   99.836522] #PF: error_code(0x0010) - not-present page
+[   99.837771] PGD 8000000079d62067 P4D 8000000079d62067 PUD 79d8c067 PMD 0
+[   99.839649] Oops: 0010 [#2] SMP PTI
+[   99.840591] CPU: 1 PID: 333 Comm: io_wqe_worker-0 Tainted: G      D           5.8.0 #2
+[   99.842622] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1 04/01/2014
+[   99.845140] RIP: 0010:0x0
+[   99.845840] Code: Bad RIP value.
+[   99.846672] RSP: 0018:ffffa1c7c01ebc08 EFLAGS: 00010202
+[   99.848018] RAX: 0000000000000000 RBX: ffff92363bd67300 RCX: ffff92363d461208
+[   99.849854] RDX: 0000000000000010 RSI: 00007ffdbf696bb0 RDI: ffff92363bd67300
+[   99.851743] RBP: ffffa1c7c01ebc40 R08: 0000000000000000 R09: 0000000000000000
+[   99.853394] R10: ffffffff9ec692a0 R11: 0000000000000000 R12: 0000000000000010
+[   99.855148] R13: 0000000000000000 R14: ffff92363d461208 R15: ffffa1c7c01ebc68
+[   99.856914] FS:  0000000000000000(0000) GS:ffff92363dd00000(0000) knlGS:0000000000000000
+[   99.858651] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[   99.860032] CR2: ffffffffffffffd6 CR3: 000000007ac66000 CR4: 00000000000006e0
+[   99.861979] Call Trace:
+[   99.862617]  loop_rw_iter.part.0+0xad/0x110
+[   99.863838]  io_write+0x2ae/0x380
+[   99.864644]  ? kvm_sched_clock_read+0x11/0x20
+[   99.865595]  ? sched_clock+0x9/0x10
+[   99.866453]  ? sched_clock_cpu+0x11/0xb0
+[   99.867326]  ? newidle_balance+0x1d4/0x3c0
+[   99.868283]  io_issue_sqe+0xd8f/0x1340
+[   99.869216]  ? __switch_to+0x7f/0x450
+[   99.870280]  ? __switch_to_asm+0x42/0x70
+[   99.871254]  ? __switch_to_asm+0x36/0x70
+[   99.872133]  ? lock_timer_base+0x72/0xa0
+[   99.873155]  ? switch_mm_irqs_off+0x1bf/0x420
+[   99.874152]  io_wq_submit_work+0x64/0x180
+[   99.875192]  ? kthread_use_mm+0x71/0x100
+[   99.876132]  io_worker_handle_work+0x267/0x440
+[   99.877233]  io_wqe_worker+0x297/0x350
+[   99.878145]  kthread+0x112/0x150
+[   99.878849]  ? __io_worker_unuse+0x100/0x100
+[   99.879935]  ? kthread_park+0x90/0x90
+[   99.880874]  ret_from_fork+0x22/0x30
+[   99.881679] Modules linked in:
+[   99.882493] CR2: 0000000000000000
+[   99.883324] ---[ end trace 4453745f4673190b ]---
+[   99.884289] RIP: 0010:0x0
+[   99.884837] Code: Bad RIP value.
+[   99.885492] RSP: 0018:ffffa1c7c01ebc08 EFLAGS: 00010202
+[   99.886851] RAX: 0000000000000000 RBX: ffff92363acd7f00 RCX: ffff92363d461608
+[   99.888561] RDX: 0000000000000010 RSI: 00007ffe040d9e10 RDI: ffff92363acd7f00
+[   99.890203] RBP: ffffa1c7c01ebc40 R08: 0000000000000000 R09: 0000000000000000
+[   99.891907] R10: ffffffff9ec692a0 R11: 0000000000000000 R12: 0000000000000010
+[   99.894106] R13: 0000000000000000 R14: ffff92363d461608 R15: ffffa1c7c01ebc68
+[   99.896079] FS:  0000000000000000(0000) GS:ffff92363dd00000(0000) knlGS:0000000000000000
+[   99.898017] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[   99.899197] CR2: ffffffffffffffd6 CR3: 000000007ac66000 CR4: 00000000000006e0
+
+Fixes: 32960613b7c3 ("io_uring: correctly handle non ->{read,write}_iter() file_operations")
+Cc: stable@vger.kernel.org
+Signed-off-by: Guoyu Huang <hgy5945@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2614,8 +2614,10 @@ static int io_read(struct io_kiocb *req,
+               if (req->file->f_op->read_iter)
+                       ret2 = call_read_iter(req->file, kiocb, &iter);
+-              else
++              else if (req->file->f_op->read)
+                       ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
++              else
++                      ret2 = -EINVAL;
+               /* Catch -EAGAIN return for forced non-blocking submission */
+               if (!force_nonblock || ret2 != -EAGAIN) {
+@@ -2729,8 +2731,10 @@ static int io_write(struct io_kiocb *req
+               if (req->file->f_op->write_iter)
+                       ret2 = call_write_iter(req->file, kiocb, &iter);
+-              else
++              else if (req->file->f_op->write)
+                       ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
++              else
++                      ret2 = -EINVAL;
+               if (!force_nonblock)
+                       current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
diff --git a/queue-5.7/io_uring-sanitize-double-poll-handling.patch b/queue-5.7/io_uring-sanitize-double-poll-handling.patch
new file mode 100644 (file)
index 0000000..75f34ba
--- /dev/null
@@ -0,0 +1,113 @@
+From d4e7cd36a90e38e0276d6ce0c20f5ccef17ec38c Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sat, 15 Aug 2020 11:44:50 -0700
+Subject: io_uring: sanitize double poll handling
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit d4e7cd36a90e38e0276d6ce0c20f5ccef17ec38c upstream.
+
+There's a bit of confusion on the matching pairs of poll vs double poll,
+depending on if the request is a pure poll (IORING_OP_POLL_ADD) or
+poll driven retry.
+
+Add io_poll_get_double() that returns the double poll waitqueue, if any,
+and io_poll_get_single() that returns the original poll waitqueue. With
+that, remove the argument to io_poll_remove_double().
+
+Finally ensure that wait->private is cleared once the double poll handler
+has run, so that remove knows it's already been seen.
+
+Cc: stable@vger.kernel.org # v5.8
+Reported-by: syzbot+7f617d4a9369028b8a2c@syzkaller.appspotmail.com
+Fixes: 18bceab101ad ("io_uring: allow POLL_ADD with double poll_wait() users")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |   32 ++++++++++++++++++++++++--------
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4233,9 +4233,24 @@ static bool io_poll_rewait(struct io_kio
+       return false;
+ }
+-static void io_poll_remove_double(struct io_kiocb *req, void *data)
++static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
+ {
+-      struct io_poll_iocb *poll = data;
++      /* pure poll stashes this in ->io, poll driven retry elsewhere */
++      if (req->opcode == IORING_OP_POLL_ADD)
++              return (struct io_poll_iocb *) req->io;
++      return req->apoll->double_poll;
++}
++
++static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
++{
++      if (req->opcode == IORING_OP_POLL_ADD)
++              return &req->poll;
++      return &req->apoll->poll;
++}
++
++static void io_poll_remove_double(struct io_kiocb *req)
++{
++      struct io_poll_iocb *poll = io_poll_get_double(req);
+       lockdep_assert_held(&req->ctx->completion_lock);
+@@ -4255,7 +4270,7 @@ static void io_poll_complete(struct io_k
+ {
+       struct io_ring_ctx *ctx = req->ctx;
+-      io_poll_remove_double(req, req->io);
++      io_poll_remove_double(req);
+       req->poll.done = true;
+       io_cqring_fill_event(req, error ? error : mangle_poll(mask));
+       io_commit_cqring(ctx);
+@@ -4297,7 +4312,7 @@ static int io_poll_double_wake(struct wa
+                              int sync, void *key)
+ {
+       struct io_kiocb *req = wait->private;
+-      struct io_poll_iocb *poll = req->apoll->double_poll;
++      struct io_poll_iocb *poll = io_poll_get_single(req);
+       __poll_t mask = key_to_poll(key);
+       /* for instances that support it check for an event match first: */
+@@ -4311,6 +4326,8 @@ static int io_poll_double_wake(struct wa
+               done = list_empty(&poll->wait.entry);
+               if (!done)
+                       list_del_init(&poll->wait.entry);
++              /* make sure double remove sees this as being gone */
++              wait->private = NULL;
+               spin_unlock(&poll->head->lock);
+               if (!done)
+                       __io_async_wake(req, poll, mask, io_poll_task_func);
+@@ -4545,7 +4562,7 @@ static bool io_arm_poll_handler(struct i
+       ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
+                                       io_async_wake);
+       if (ret || ipt.error) {
+-              io_poll_remove_double(req, apoll->double_poll);
++              io_poll_remove_double(req);
+               spin_unlock_irq(&ctx->completion_lock);
+               memcpy(&req->work, &apoll->work, sizeof(req->work));
+               kfree(apoll->double_poll);
+@@ -4578,14 +4595,13 @@ static bool io_poll_remove_one(struct io
+ {
+       bool do_complete;
++      io_poll_remove_double(req);
++
+       if (req->opcode == IORING_OP_POLL_ADD) {
+-              io_poll_remove_double(req, req->io);
+               do_complete = __io_poll_remove_one(req, &req->poll);
+       } else {
+               struct async_poll *apoll = req->apoll;
+-              io_poll_remove_double(req, apoll->double_poll);
+-
+               /* non-poll requests have submit ref still */
+               do_complete = __io_poll_remove_one(req, &apoll->poll);
+               if (do_complete) {
index cd8a764918609ea70ed18876ebe86980a42481fb..c6109754d22cc5fdd7c115965d6751e5d54ef41a 100644 (file)
@@ -385,3 +385,8 @@ drm-xen-front-fix-misused-is_err_or_null-checks.patch
 s390-dasd-fix-inability-to-use-dasd-with-diag-driver.patch
 s390-numa-set-node-distance-to-local_distance.patch
 s390-gmap-improve-thp-splitting.patch
+io_uring-sanitize-double-poll-handling.patch
+io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch
+task_work-only-grab-task-signal-lock-when-needed.patch
+io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch
+io_uring-enable-lookup-of-links-holding-inflight-files.patch
diff --git a/queue-5.7/task_work-only-grab-task-signal-lock-when-needed.patch b/queue-5.7/task_work-only-grab-task-signal-lock-when-needed.patch
new file mode 100644 (file)
index 0000000..a09d2a5
--- /dev/null
@@ -0,0 +1,78 @@
+From ebf0d100df0731901c16632f78d78d35f4123bc4 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 13 Aug 2020 09:01:38 -0600
+Subject: task_work: only grab task signal lock when needed
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit ebf0d100df0731901c16632f78d78d35f4123bc4 upstream.
+
+If JOBCTL_TASK_WORK is already set on the targeted task, then we need
+not go through {lock,unlock}_task_sighand() to set it again and queue
+a signal wakeup. This is safe as we're checking it _after_ adding the
+new task_work with cmpxchg().
+
+The ordering is as follows:
+
+task_work_add()                                get_signal()
+--------------------------------------------------------------
+STORE(task->task_works, new_work);     STORE(task->jobctl);
+mb();                                  mb();
+LOAD(task->jobctl);                    LOAD(task->task_works);
+
+This speeds up TWA_SIGNAL handling quite a bit, which is important now
+that io_uring is relying on it for all task_work deliveries.
+
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Jann Horn <jannh@google.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/signal.c    |   16 +++++++++++++++-
+ kernel/task_work.c |    8 +++++++-
+ 2 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2541,7 +2541,21 @@ bool get_signal(struct ksignal *ksig)
+ relock:
+       spin_lock_irq(&sighand->siglock);
+-      current->jobctl &= ~JOBCTL_TASK_WORK;
++      /*
++       * Make sure we can safely read ->jobctl() in task_work add. As Oleg
++       * states:
++       *
++       * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
++       * roughly have
++       *
++       *      task_work_add:                          get_signal:
++       *      STORE(task->task_works, new_work);      STORE(task->jobctl);
++       *      mb();                                   mb();
++       *      LOAD(task->jobctl);                     LOAD(task->task_works);
++       *
++       * and we can rely on STORE-MB-LOAD [ in task_work_add].
++       */
++      smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
+       if (unlikely(current->task_works)) {
+               spin_unlock_irq(&sighand->siglock);
+               task_work_run();
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -42,7 +42,13 @@ task_work_add(struct task_struct *task,
+               set_notify_resume(task);
+               break;
+       case TWA_SIGNAL:
+-              if (lock_task_sighand(task, &flags)) {
++              /*
++               * Only grab the sighand lock if we don't already have some
++               * task_work pending. This pairs with the smp_store_mb()
++               * in get_signal(), see comment there.
++               */
++              if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) &&
++                  lock_task_sighand(task, &flags)) {
+                       task->jobctl |= JOBCTL_TASK_WORK;
+                       signal_wake_up(task, 0);
+                       unlock_task_sighand(task, &flags);