]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2020 12:41:32 +0000 (13:41 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2020 12:41:32 +0000 (13:41 +0100)
added patches:
io_uring-prevent-sq_thread-from-spinning-when-it-should-stop.patch

queue-5.4/io_uring-prevent-sq_thread-from-spinning-when-it-should-stop.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/io_uring-prevent-sq_thread-from-spinning-when-it-should-stop.patch b/queue-5.4/io_uring-prevent-sq_thread-from-spinning-when-it-should-stop.patch
new file mode 100644 (file)
index 0000000..e30ab20
--- /dev/null
@@ -0,0 +1,65 @@
+From 7143b5ac5750f404ff3a594b34fdf3fc2f99f828 Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Fri, 21 Feb 2020 16:42:16 +0100
+Subject: io_uring: prevent sq_thread from spinning when it should stop
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+commit 7143b5ac5750f404ff3a594b34fdf3fc2f99f828 upstream.
+
+This patch drops 'cur_mm' before calling cond_resched(), to prevent
+the sq_thread from spinning even when the user process is finished.
+
+Before this patch, if the user process ended without closing the
+io_uring fd, the sq_thread continues to spin until the
+'sq_thread_idle' timeout ends.
+
+In the worst case where the 'sq_thread_idle' parameter is bigger than
+INT_MAX, the sq_thread will spin forever.
+
+Fixes: 6c271ce2f1d5 ("io_uring: add submission polling")
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/io_uring.c |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2732,16 +2732,6 @@ static int io_sq_thread(void *data)
+               to_submit = io_sqring_entries(ctx);
+               if (!to_submit) {
+                       /*
+-                       * We're polling. If we're within the defined idle
+-                       * period, then let us spin without work before going
+-                       * to sleep.
+-                       */
+-                      if (inflight || !time_after(jiffies, timeout)) {
+-                              cond_resched();
+-                              continue;
+-                      }
+-
+-                      /*
+                        * Drop cur_mm before scheduling, we can't hold it for
+                        * long periods (or over schedule()). Do this before
+                        * adding ourselves to the waitqueue, as the unuse/drop
+@@ -2753,6 +2743,16 @@ static int io_sq_thread(void *data)
+                               cur_mm = NULL;
+                       }
++                      /*
++                       * We're polling. If we're within the defined idle
++                       * period, then let us spin without work before going
++                       * to sleep.
++                       */
++                      if (inflight || !time_after(jiffies, timeout)) {
++                              cond_resched();
++                              continue;
++                      }
++
+                       prepare_to_wait(&ctx->sqo_wait, &wait,
+                                               TASK_INTERRUPTIBLE);
index cc93be49b7f693e3fad9d95071bc51330dd8049a..9fac07fce3982507646df88daba91c3518874d95 100644 (file)
@@ -122,6 +122,6 @@ io_uring-fix-__io_iopoll_check-deadlock-in-io_sq_thread.patch
 alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
 alsa-seq-avoid-concurrent-access-to-queue-flags.patch
 alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
-netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch
 netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch
 rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch
+io_uring-prevent-sq_thread-from-spinning-when-it-should-stop.patch