]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Sep 2024 14:21:35 +0000 (16:21 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Sep 2024 14:21:35 +0000 (16:21 +0200)
added patches:
io_uring-io-wq-stop-setting-pf_no_setaffinity-on-io-wq-workers.patch
io_uring-sqpoll-do-not-set-pf_no_setaffinity-on-sqpoll-threads.patch

queue-6.1/io_uring-io-wq-stop-setting-pf_no_setaffinity-on-io-wq-workers.patch [new file with mode: 0644]
queue-6.1/io_uring-sqpoll-do-not-set-pf_no_setaffinity-on-sqpoll-threads.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/io_uring-io-wq-stop-setting-pf_no_setaffinity-on-io-wq-workers.patch b/queue-6.1/io_uring-io-wq-stop-setting-pf_no_setaffinity-on-io-wq-workers.patch
new file mode 100644 (file)
index 0000000..5e0dfa2
--- /dev/null
@@ -0,0 +1,77 @@
+From 01e68ce08a30db3d842ce7a55f7f6e0474a55f9a Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 8 Mar 2023 07:18:51 -0700
+Subject: io_uring/io-wq: stop setting PF_NO_SETAFFINITY on io-wq workers
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 01e68ce08a30db3d842ce7a55f7f6e0474a55f9a upstream.
+
+Every now and then reports come in that are puzzled on why changing
+affinity on the io-wq workers fails with EINVAL. This happens because they
+set PF_NO_SETAFFINITY as part of their creation, as io-wq organizes
+workers into groups based on what CPU they are running on.
+
+However, this is purely an optimization and not a functional requirement.
+We can allow setting affinity, and just lazily update our worker to wqe
+mappings. If a given io-wq thread times out, it normally exits if there's
+no more work to do. The exception is if it's the last worker available.
+For the timeout case, check the affinity of the worker against group mask
+and exit even if it's the last worker. New workers should be created with
+the right mask and in the right location.
+
+Reported-by:Daniel Dao <dqminh@cloudflare.com>
+Link: https://lore.kernel.org/io-uring/CA+wXwBQwgxB3_UphSny-yAP5b26meeOu1W4TwYVcD_+5gOhvPw@mail.gmail.com/
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io-wq.c |   16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -628,7 +628,7 @@ static int io_wqe_worker(void *data)
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+       struct io_wqe *wqe = worker->wqe;
+       struct io_wq *wq = wqe->wq;
+-      bool last_timeout = false;
++      bool exit_mask = false, last_timeout = false;
+       char buf[TASK_COMM_LEN];
+       worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+@@ -644,8 +644,11 @@ static int io_wqe_worker(void *data)
+                       io_worker_handle_work(worker);
+               raw_spin_lock(&wqe->lock);
+-              /* timed out, exit unless we're the last worker */
+-              if (last_timeout && acct->nr_workers > 1) {
++              /*
++               * Last sleep timed out. Exit if we're not the last worker,
++               * or if someone modified our affinity.
++               */
++              if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
+                       acct->nr_workers--;
+                       raw_spin_unlock(&wqe->lock);
+                       __set_current_state(TASK_RUNNING);
+@@ -664,7 +667,11 @@ static int io_wqe_worker(void *data)
+                               continue;
+                       break;
+               }
+-              last_timeout = !ret;
++              if (!ret) {
++                      last_timeout = true;
++                      exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
++                                                      wqe->cpu_mask);
++              }
+       }
+       if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+@@ -716,7 +723,6 @@ static void io_init_new_worker(struct io
+       tsk->worker_private = worker;
+       worker->task = tsk;
+       set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
+-      tsk->flags |= PF_NO_SETAFFINITY;
+       raw_spin_lock(&wqe->lock);
+       hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
diff --git a/queue-6.1/io_uring-sqpoll-do-not-set-pf_no_setaffinity-on-sqpoll-threads.patch b/queue-6.1/io_uring-sqpoll-do-not-set-pf_no_setaffinity-on-sqpoll-threads.patch
new file mode 100644 (file)
index 0000000..c2a6039
--- /dev/null
@@ -0,0 +1,39 @@
+From a5fc1441af7719e93dc7a638a960befb694ade89 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michal=20Koutn=C3=BD?= <mkoutny@suse.com>
+Date: Tue, 14 Mar 2023 19:33:32 +0100
+Subject: io_uring/sqpoll: Do not set PF_NO_SETAFFINITY on sqpoll threads
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michal Koutný <mkoutny@suse.com>
+
+commit a5fc1441af7719e93dc7a638a960befb694ade89 upstream.
+
+Users may specify a CPU where the sqpoll thread would run. This may
+conflict with cpuset operations because of strict PF_NO_SETAFFINITY
+requirement. That flag is unnecessary for polling "kernel" threads, see
+the reasoning in commit 01e68ce08a30 ("io_uring/io-wq: stop setting
+PF_NO_SETAFFINITY on io-wq workers"). Drop the flag on poll threads too.
+
+Fixes: 01e68ce08a30 ("io_uring/io-wq: stop setting PF_NO_SETAFFINITY on io-wq workers")
+Link: https://lore.kernel.org/all/20230314162559.pnyxdllzgw7jozgx@blackpad/
+Signed-off-by: Michal Koutný <mkoutny@suse.com>
+Link: https://lore.kernel.org/r/20230314183332.25834-1-mkoutny@suse.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/sqpoll.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -233,7 +233,6 @@ static int io_sq_thread(void *data)
+               set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+       else
+               set_cpus_allowed_ptr(current, cpu_online_mask);
+-      current->flags |= PF_NO_SETAFFINITY;
+       /*
+        * Force audit context to get setup, in case we do prep side async
index 3fb5bac3427514a06198c3febf10124fd40b978b..9fbe091e11e4ec88ef5ff50fd09a0f839dd80cb3 100644 (file)
@@ -132,3 +132,5 @@ nfsv4-add-missing-rescheduling-points-in-nfs_client_.patch
 selftests-mptcp-fix-backport-issues.patch
 selftests-mptcp-join-validate-event-numbers.patch
 selftests-mptcp-join-check-re-re-adding-id-0-signal.patch
+io_uring-io-wq-stop-setting-pf_no_setaffinity-on-io-wq-workers.patch
+io_uring-sqpoll-do-not-set-pf_no_setaffinity-on-sqpoll-threads.patch