]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 13:16:41 +0000 (15:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 13:16:41 +0000 (15:16 +0200)
added patches:
io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch
io_uring-enable-lookup-of-links-holding-inflight-files.patch
io_uring-hold-ctx-reference-around-task_work-queue-execute.patch

queue-5.8/io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch [new file with mode: 0644]
queue-5.8/io_uring-enable-lookup-of-links-holding-inflight-files.patch [new file with mode: 0644]
queue-5.8/io_uring-hold-ctx-reference-around-task_work-queue-execute.patch [new file with mode: 0644]
queue-5.8/series

diff --git a/queue-5.8/io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch b/queue-5.8/io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch
new file mode 100644 (file)
index 0000000..92786cf
--- /dev/null
@@ -0,0 +1,76 @@
+From 9b7adba9eaec28e0e4343c96d0dbeb9578802f5f Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 10 Aug 2020 10:54:02 -0600
+Subject: io_uring: add missing REQ_F_COMP_LOCKED for nested requests
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 9b7adba9eaec28e0e4343c96d0dbeb9578802f5f upstream.
+
+When we traverse into failing links or timeouts, we need to ensure we
+propagate the REQ_F_COMP_LOCKED flag to ensure that we correctly signal
+to the completion side that we already hold the completion lock.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |   24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1549,12 +1549,9 @@ static void io_req_link_next(struct io_k
+ /*
+  * Called if REQ_F_LINK_HEAD is set, and we fail the head request
+  */
+-static void io_fail_links(struct io_kiocb *req)
++static void __io_fail_links(struct io_kiocb *req)
+ {
+       struct io_ring_ctx *ctx = req->ctx;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ctx->completion_lock, flags);
+       while (!list_empty(&req->link_list)) {
+               struct io_kiocb *link = list_first_entry(&req->link_list,
+@@ -1568,13 +1565,29 @@ static void io_fail_links(struct io_kioc
+                       io_link_cancel_timeout(link);
+               } else {
+                       io_cqring_fill_event(link, -ECANCELED);
++                      link->flags |= REQ_F_COMP_LOCKED;
+                       __io_double_put_req(link);
+               }
+               req->flags &= ~REQ_F_LINK_TIMEOUT;
+       }
+       io_commit_cqring(ctx);
+-      spin_unlock_irqrestore(&ctx->completion_lock, flags);
++}
++
++static void io_fail_links(struct io_kiocb *req)
++{
++      struct io_ring_ctx *ctx = req->ctx;
++
++      if (!(req->flags & REQ_F_COMP_LOCKED)) {
++              unsigned long flags;
++
++              spin_lock_irqsave(&ctx->completion_lock, flags);
++              __io_fail_links(req);
++              spin_unlock_irqrestore(&ctx->completion_lock, flags);
++      } else {
++              __io_fail_links(req);
++      }
++
+       io_cqring_ev_posted(ctx);
+ }
+@@ -4767,6 +4780,7 @@ static int io_timeout_cancel(struct io_r
+               return -EALREADY;
+       req_set_fail_links(req);
++      req->flags |= REQ_F_COMP_LOCKED;
+       io_cqring_fill_event(req, -ECANCELED);
+       io_put_req(req);
+       return 0;
diff --git a/queue-5.8/io_uring-enable-lookup-of-links-holding-inflight-files.patch b/queue-5.8/io_uring-enable-lookup-of-links-holding-inflight-files.patch
new file mode 100644 (file)
index 0000000..ee24f87
--- /dev/null
@@ -0,0 +1,168 @@
+From f254ac04c8744cf7bfed012717eac34eacc65dfb Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 12 Aug 2020 17:33:30 -0600
+Subject: io_uring: enable lookup of links holding inflight files
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit f254ac04c8744cf7bfed012717eac34eacc65dfb upstream.
+
+When a process exits, we cancel whatever requests it has pending that
+are referencing the file table. However, if a link is holding a
+reference, then we cannot find it by simply looking at the inflight
+list.
+
+Enable checking of the poll and timeout list to find the link, and
+cancel it appropriately.
+
+Cc: stable@vger.kernel.org
+Reported-by: Josef <josef.grieb@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |   97 ++++++++++++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 87 insertions(+), 10 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4586,6 +4586,7 @@ static bool io_poll_remove_one(struct io
+               io_cqring_fill_event(req, -ECANCELED);
+               io_commit_cqring(req->ctx);
+               req->flags |= REQ_F_COMP_LOCKED;
++              req_set_fail_links(req);
+               io_put_req(req);
+       }
+@@ -4759,6 +4760,23 @@ static enum hrtimer_restart io_timeout_f
+       return HRTIMER_NORESTART;
+ }
++static int __io_timeout_cancel(struct io_kiocb *req)
++{
++      int ret;
++
++      list_del_init(&req->list);
++
++      ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
++      if (ret == -1)
++              return -EALREADY;
++
++      req_set_fail_links(req);
++      req->flags |= REQ_F_COMP_LOCKED;
++      io_cqring_fill_event(req, -ECANCELED);
++      io_put_req(req);
++      return 0;
++}
++
+ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+ {
+       struct io_kiocb *req;
+@@ -4766,7 +4784,6 @@ static int io_timeout_cancel(struct io_r
+       list_for_each_entry(req, &ctx->timeout_list, list) {
+               if (user_data == req->user_data) {
+-                      list_del_init(&req->list);
+                       ret = 0;
+                       break;
+               }
+@@ -4775,15 +4792,7 @@ static int io_timeout_cancel(struct io_r
+       if (ret == -ENOENT)
+               return ret;
+-      ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
+-      if (ret == -1)
+-              return -EALREADY;
+-
+-      req_set_fail_links(req);
+-      req->flags |= REQ_F_COMP_LOCKED;
+-      io_cqring_fill_event(req, -ECANCELED);
+-      io_put_req(req);
+-      return 0;
++      return __io_timeout_cancel(req);
+ }
+ static int io_timeout_remove_prep(struct io_kiocb *req,
+@@ -7535,6 +7544,71 @@ static bool io_wq_files_match(struct io_
+       return work->files == files;
+ }
++/*
++ * Returns true if 'preq' is the link parent of 'req'
++ */
++static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
++{
++      struct io_kiocb *link;
++
++      if (!(preq->flags & REQ_F_LINK_HEAD))
++              return false;
++
++      list_for_each_entry(link, &preq->link_list, link_list) {
++              if (link == req)
++                      return true;
++      }
++
++      return false;
++}
++
++/*
++ * We're looking to cancel 'req' because it's holding on to our files, but
++ * 'req' could be a link to another request. See if it is, and cancel that
++ * parent request if so.
++ */
++static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
++{
++      struct hlist_node *tmp;
++      struct io_kiocb *preq;
++      bool found = false;
++      int i;
++
++      spin_lock_irq(&ctx->completion_lock);
++      for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
++              struct hlist_head *list;
++
++              list = &ctx->cancel_hash[i];
++              hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
++                      found = io_match_link(preq, req);
++                      if (found) {
++                              io_poll_remove_one(preq);
++                              break;
++                      }
++              }
++      }
++      spin_unlock_irq(&ctx->completion_lock);
++      return found;
++}
++
++static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
++                                 struct io_kiocb *req)
++{
++      struct io_kiocb *preq;
++      bool found = false;
++
++      spin_lock_irq(&ctx->completion_lock);
++      list_for_each_entry(preq, &ctx->timeout_list, list) {
++              found = io_match_link(preq, req);
++              if (found) {
++                      __io_timeout_cancel(preq);
++                      break;
++              }
++      }
++      spin_unlock_irq(&ctx->completion_lock);
++      return found;
++}
++
+ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+                                 struct files_struct *files)
+ {
+@@ -7592,6 +7666,9 @@ static void io_uring_cancel_files(struct
+                       }
+               } else {
+                       io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
++                      /* could be a link, check and remove if it is */
++                      if (!io_poll_remove_link(ctx, cancel_req))
++                              io_timeout_remove_link(ctx, cancel_req);
+                       io_put_req(cancel_req);
+               }
diff --git a/queue-5.8/io_uring-hold-ctx-reference-around-task_work-queue-execute.patch b/queue-5.8/io_uring-hold-ctx-reference-around-task_work-queue-execute.patch
new file mode 100644 (file)
index 0000000..a499f84
--- /dev/null
@@ -0,0 +1,68 @@
+From 6d816e088c359866f9867057e04f244c608c42fe Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 11 Aug 2020 08:04:14 -0600
+Subject: io_uring: hold 'ctx' reference around task_work queue + execute
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 6d816e088c359866f9867057e04f244c608c42fe upstream.
+
+We're holding the request reference, but we need to go one higher
+to ensure that the ctx remains valid after the request has finished.
+If the ring is closed with pending task_work inflight, and the
+given io_kiocb finishes sync during issue, then we need a reference
+to the ring itself around the task_work execution cycle.
+
+Cc: stable@vger.kernel.org # v5.7+
+Reported-by: syzbot+9b260fc33297966f5a8e@syzkaller.appspotmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4140,6 +4140,8 @@ static int __io_async_wake(struct io_kio
+       tsk = req->task;
+       req->result = mask;
+       init_task_work(&req->task_work, func);
++      percpu_ref_get(&req->ctx->refs);
++
+       /*
+        * If this fails, then the task is exiting. When a task exits, the
+        * work gets canceled, so just cancel this request as well instead
+@@ -4239,6 +4241,7 @@ static void io_poll_task_handler(struct
+ static void io_poll_task_func(struct callback_head *cb)
+ {
+       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
++      struct io_ring_ctx *ctx = req->ctx;
+       struct io_kiocb *nxt = NULL;
+       io_poll_task_handler(req, &nxt);
+@@ -4249,6 +4252,7 @@ static void io_poll_task_func(struct cal
+               __io_queue_sqe(nxt, NULL);
+               mutex_unlock(&ctx->uring_lock);
+       }
++      percpu_ref_put(&ctx->refs);
+ }
+ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
+@@ -4365,6 +4369,7 @@ static void io_async_task_func(struct ca
+       if (io_poll_rewait(req, &apoll->poll)) {
+               spin_unlock_irq(&ctx->completion_lock);
++              percpu_ref_put(&ctx->refs);
+               return;
+       }
+@@ -4402,6 +4407,7 @@ end_req:
+               req_set_fail_links(req);
+               io_double_put_req(req);
+       }
++      percpu_ref_put(&ctx->refs);
+ }
+ static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
index 40d2c42bbdf0363e2cd868d0579ebff731862042..971c9fc56040d27611dcc2644c063e9372d7d97a 100644 (file)
@@ -458,3 +458,6 @@ s390-dasd-fix-inability-to-use-dasd-with-diag-driver.patch
 s390-numa-set-node-distance-to-local_distance.patch
 s390-gmap-improve-thp-splitting.patch
 io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch
+io_uring-hold-ctx-reference-around-task_work-queue-execute.patch
+io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch
+io_uring-enable-lookup-of-links-holding-inflight-files.patch