--- /dev/null
+From 01e68ce08a30db3d842ce7a55f7f6e0474a55f9a Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 8 Mar 2023 07:18:51 -0700
+Subject: io_uring/io-wq: stop setting PF_NO_SETAFFINITY on io-wq workers
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 01e68ce08a30db3d842ce7a55f7f6e0474a55f9a upstream.
+
+Every now and then reports come in that are puzzled on why changing
+affinity on the io-wq workers fails with EINVAL. This happens because they
+set PF_NO_SETAFFINITY as part of their creation, as io-wq organizes
+workers into groups based on what CPU they are running on.
+
+However, this is purely an optimization and not a functional requirement.
+We can allow setting affinity, and just lazily update our worker to wqe
+mappings. If a given io-wq thread times out, it normally exits if there's
+no more work to do. The exception is if it's the last worker available.
+For the timeout case, check the affinity of the worker against group mask
+and exit even if it's the last worker. New workers should be created with
+the right mask and in the right location.
+
+Reported-by:Daniel Dao <dqminh@cloudflare.com>
+Link: https://lore.kernel.org/io-uring/CA+wXwBQwgxB3_UphSny-yAP5b26meeOu1W4TwYVcD_+5gOhvPw@mail.gmail.com/
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io-wq.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -628,7 +628,7 @@ static int io_wqe_worker(void *data)
+ struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+- bool last_timeout = false;
++ bool exit_mask = false, last_timeout = false;
+ char buf[TASK_COMM_LEN];
+
+ worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+@@ -644,8 +644,11 @@ static int io_wqe_worker(void *data)
+ io_worker_handle_work(worker);
+
+ raw_spin_lock(&wqe->lock);
+- /* timed out, exit unless we're the last worker */
+- if (last_timeout && acct->nr_workers > 1) {
++ /*
++ * Last sleep timed out. Exit if we're not the last worker,
++ * or if someone modified our affinity.
++ */
++ if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
+ acct->nr_workers--;
+ raw_spin_unlock(&wqe->lock);
+ __set_current_state(TASK_RUNNING);
+@@ -664,7 +667,11 @@ static int io_wqe_worker(void *data)
+ continue;
+ break;
+ }
+- last_timeout = !ret;
++ if (!ret) {
++ last_timeout = true;
++ exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
++ wqe->cpu_mask);
++ }
+ }
+
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+@@ -716,7 +723,6 @@ static void io_init_new_worker(struct io
+ tsk->worker_private = worker;
+ worker->task = tsk;
+ set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
+- tsk->flags |= PF_NO_SETAFFINITY;
+
+ raw_spin_lock(&wqe->lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
--- /dev/null
+From a5fc1441af7719e93dc7a638a960befb694ade89 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michal=20Koutn=C3=BD?= <mkoutny@suse.com>
+Date: Tue, 14 Mar 2023 19:33:32 +0100
+Subject: io_uring/sqpoll: Do not set PF_NO_SETAFFINITY on sqpoll threads
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michal Koutný <mkoutny@suse.com>
+
+commit a5fc1441af7719e93dc7a638a960befb694ade89 upstream.
+
+Users may specify a CPU where the sqpoll thread would run. This may
+conflict with cpuset operations because of strict PF_NO_SETAFFINITY
+requirement. That flag is unnecessary for polling "kernel" threads, see
+the reasoning in commit 01e68ce08a30 ("io_uring/io-wq: stop setting
+PF_NO_SETAFFINITY on io-wq workers"). Drop the flag on poll threads too.
+
+Fixes: 01e68ce08a30 ("io_uring/io-wq: stop setting PF_NO_SETAFFINITY on io-wq workers")
+Link: https://lore.kernel.org/all/20230314162559.pnyxdllzgw7jozgx@blackpad/
+Signed-off-by: Michal Koutný <mkoutny@suse.com>
+Link: https://lore.kernel.org/r/20230314183332.25834-1-mkoutny@suse.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/sqpoll.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -233,7 +233,6 @@ static int io_sq_thread(void *data)
+ set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+ else
+ set_cpus_allowed_ptr(current, cpu_online_mask);
+- current->flags |= PF_NO_SETAFFINITY;
+
+ /*
+ * Force audit context to get setup, in case we do prep side async