+++ /dev/null
-From 0c217ad3315f37b32b0d98e6f4c1260d0c61c708 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 28 Mar 2024 23:47:37 +0100
-Subject: ASoC: tas2781: mark dvc_tlv with __maybe_unused
-
-From: Gergo Koteles <soyer@irl.hu>
-
-[ Upstream commit 831ec5e3538e989c7995137b5c5c661991a09504 ]
-
-Since we put dvc_tlv static variable to a header file it's copied to
-each module that includes the header. But not all of them are actually
-used it.
-
-Fix this W=1 build warning:
-
-include/sound/tas2781-tlv.h:18:35: warning: 'dvc_tlv' defined but not
-used [-Wunused-const-variable=]
-
-Reported-by: kernel test robot <lkp@intel.com>
-Closes: https://lore.kernel.org/oe-kbuild-all/202403290354.v0StnRpc-lkp@intel.com/
-Fixes: ae065d0ce9e3 ("ALSA: hda/tas2781: remove digital gain kcontrol")
-Signed-off-by: Gergo Koteles <soyer@irl.hu>
-Message-ID: <0e461545a2a6e9b6152985143e50526322e5f76b.1711665731.git.soyer@irl.hu>
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/sound/tas2781-tlv.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
-index 4038dd421150a..1dc59005d241f 100644
---- a/include/sound/tas2781-tlv.h
-+++ b/include/sound/tas2781-tlv.h
-@@ -15,7 +15,7 @@
- #ifndef __TAS2781_TLV_H__
- #define __TAS2781_TLV_H__
-
--static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
-+static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
- static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
-
- #endif
---
-2.43.0
-
--- /dev/null
+From 3b80cff5a4d117c53d38ce805823084eaeffbde6 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 14 Mar 2024 10:46:40 -0600
+Subject: io_uring/kbuf: get rid of bl->is_ready
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 3b80cff5a4d117c53d38ce805823084eaeffbde6 upstream.
+
+Now that xarray is being exclusively used for the buffer_list lookup,
+this check is no longer needed. Get rid of it and the is_ready member.
+
+Cc: stable@vger.kernel.org # v6.4+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/kbuf.c | 8 --------
+ io_uring/kbuf.h | 2 --
+ 2 files changed, 10 deletions(-)
+
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -61,7 +61,6 @@ static int io_buffer_add_list(struct io_
+ * always under the ->uring_lock, but the RCU lookup from mmap does.
+ */
+ bl->bgid = bgid;
+- smp_store_release(&bl->is_ready, 1);
+ return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+ }
+
+@@ -730,13 +729,6 @@ void *io_pbuf_get_address(struct io_ring
+
+ if (!bl || !bl->is_mmap)
+ return NULL;
+- /*
+- * Ensure the list is fully setup. Only strictly needed for RCU lookup
+- * via mmap, and in that case only for the array indexed groups. For
+- * the xarray lookups, it's either visible and ready, or not at all.
+- */
+- if (!smp_load_acquire(&bl->is_ready))
+- return NULL;
+
+ return bl->buf_ring;
+ }
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -29,8 +29,6 @@ struct io_buffer_list {
+ __u8 is_mapped;
+ /* ring mapped provided buffers, but mmap'ed by application */
+ __u8 is_mmap;
+- /* bl is visible from an RCU point of view for lookup */
+- __u8 is_ready;
+ };
+
+ struct io_buffer {
--- /dev/null
+From 09ab7eff38202159271534d2f5ad45526168f2a5 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 14 Mar 2024 10:45:07 -0600
+Subject: io_uring/kbuf: get rid of lower BGID lists
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 09ab7eff38202159271534d2f5ad45526168f2a5 upstream.
+
+Just rely on the xarray for any kind of bgid. This simplifies things, and
+it really doesn't bring us much, if anything.
+
+Cc: stable@vger.kernel.org # v6.4+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/io_uring_types.h | 1
+ io_uring/io_uring.c | 2 -
+ io_uring/kbuf.c | 70 ++++-------------------------------------
+ 3 files changed, 8 insertions(+), 65 deletions(-)
+
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -281,7 +281,6 @@ struct io_ring_ctx {
+
+ struct io_submit_state submit_state;
+
+- struct io_buffer_list *io_bl;
+ struct xarray io_bl_xa;
+
+ struct io_hash_table cancel_table_locked;
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -353,7 +353,6 @@ static __cold struct io_ring_ctx *io_rin
+ err:
+ kfree(ctx->cancel_table.hbs);
+ kfree(ctx->cancel_table_locked.hbs);
+- kfree(ctx->io_bl);
+ xa_destroy(&ctx->io_bl_xa);
+ kfree(ctx);
+ return NULL;
+@@ -2906,7 +2905,6 @@ static __cold void io_ring_ctx_free(stru
+ io_wq_put_hash(ctx->hash_map);
+ kfree(ctx->cancel_table.hbs);
+ kfree(ctx->cancel_table_locked.hbs);
+- kfree(ctx->io_bl);
+ xa_destroy(&ctx->io_bl_xa);
+ kfree(ctx);
+ }
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -17,8 +17,6 @@
+
+ #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
+
+-#define BGID_ARRAY 64
+-
+ /* BIDs are addressed by a 16-bit field in a CQE */
+ #define MAX_BIDS_PER_BGID (1 << 16)
+
+@@ -40,13 +38,9 @@ struct io_buf_free {
+ int inuse;
+ };
+
+-static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
+- struct io_buffer_list *bl,
+- unsigned int bgid)
++static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
++ unsigned int bgid)
+ {
+- if (bl && bgid < BGID_ARRAY)
+- return &bl[bgid];
+-
+ return xa_load(&ctx->io_bl_xa, bgid);
+ }
+
+@@ -55,7 +49,7 @@ static inline struct io_buffer_list *io_
+ {
+ lockdep_assert_held(&ctx->uring_lock);
+
+- return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
++ return __io_buffer_get_list(ctx, bgid);
+ }
+
+ static int io_buffer_add_list(struct io_ring_ctx *ctx,
+@@ -68,10 +62,6 @@ static int io_buffer_add_list(struct io_
+ */
+ bl->bgid = bgid;
+ smp_store_release(&bl->is_ready, 1);
+-
+- if (bgid < BGID_ARRAY)
+- return 0;
+-
+ return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+ }
+
+@@ -217,24 +207,6 @@ void __user *io_buffer_select(struct io_
+ return ret;
+ }
+
+-static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
+-{
+- struct io_buffer_list *bl;
+- int i;
+-
+- bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
+- if (!bl)
+- return -ENOMEM;
+-
+- for (i = 0; i < BGID_ARRAY; i++) {
+- INIT_LIST_HEAD(&bl[i].buf_list);
+- bl[i].bgid = i;
+- }
+-
+- smp_store_release(&ctx->io_bl, bl);
+- return 0;
+-}
+-
+ /*
+ * Mark the given mapped range as free for reuse
+ */
+@@ -309,13 +281,6 @@ void io_destroy_buffers(struct io_ring_c
+ struct list_head *item, *tmp;
+ struct io_buffer *buf;
+ unsigned long index;
+- int i;
+-
+- for (i = 0; i < BGID_ARRAY; i++) {
+- if (!ctx->io_bl)
+- break;
+- __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
+- }
+
+ xa_for_each(&ctx->io_bl_xa, index, bl) {
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+@@ -498,12 +463,6 @@ int io_provide_buffers(struct io_kiocb *
+
+ io_ring_submit_lock(ctx, issue_flags);
+
+- if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
+- ret = io_init_bl_list(ctx);
+- if (ret)
+- goto err;
+- }
+-
+ bl = io_buffer_get_list(ctx, p->bgid);
+ if (unlikely(!bl)) {
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
+@@ -516,14 +475,9 @@ int io_provide_buffers(struct io_kiocb *
+ if (ret) {
+ /*
+ * Doesn't need rcu free as it was never visible, but
+- * let's keep it consistent throughout. Also can't
+- * be a lower indexed array group, as adding one
+- * where lookup failed cannot happen.
++ * let's keep it consistent throughout.
+ */
+- if (p->bgid >= BGID_ARRAY)
+- kfree_rcu(bl, rcu);
+- else
+- WARN_ON_ONCE(1);
++ kfree_rcu(bl, rcu);
+ goto err;
+ }
+ }
+@@ -688,12 +642,6 @@ int io_register_pbuf_ring(struct io_ring
+ if (reg.ring_entries >= 65536)
+ return -EINVAL;
+
+- if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
+- int ret = io_init_bl_list(ctx);
+- if (ret)
+- return ret;
+- }
+-
+ bl = io_buffer_get_list(ctx, reg.bgid);
+ if (bl) {
+ /* if mapped buffer ring OR classic exists, don't allow */
+@@ -743,10 +691,8 @@ int io_unregister_pbuf_ring(struct io_ri
+ return -EINVAL;
+
+ __io_remove_buffers(ctx, bl, -1U);
+- if (bl->bgid >= BGID_ARRAY) {
+- xa_erase(&ctx->io_bl_xa, bl->bgid);
+- kfree_rcu(bl, rcu);
+- }
++ xa_erase(&ctx->io_bl_xa, bl->bgid);
++ kfree_rcu(bl, rcu);
+ return 0;
+ }
+
+@@ -780,7 +726,7 @@ void *io_pbuf_get_address(struct io_ring
+ {
+ struct io_buffer_list *bl;
+
+- bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
++ bl = __io_buffer_get_list(ctx, bgid);
+
+ if (!bl || !bl->is_mmap)
+ return NULL;
--- /dev/null
+From 561e4f9451d65fc2f7eef564e0064373e3019793 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 2 Apr 2024 16:16:03 -0600
+Subject: io_uring/kbuf: hold io_buffer_list reference over mmap
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 561e4f9451d65fc2f7eef564e0064373e3019793 upstream.
+
+If we look up the kbuf, ensure that it doesn't get unregistered until
+after we're done with it. Since we're inside mmap, we cannot safely use
+the io_uring lock. Rely on the fact that we can lookup the buffer list
+under RCU now and grab a reference to it, preventing it from being
+unregistered until we're done with it. The lookup returns the
+io_buffer_list directly with it referenced.
+
+Cc: stable@vger.kernel.org # v6.4+
+Fixes: 5cf4f52e6d8a ("io_uring: free io_buffer_list entries via RCU")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 11 ++++++-----
+ io_uring/kbuf.c | 31 +++++++++++++++++++++++++------
+ io_uring/kbuf.h | 4 +++-
+ 3 files changed, 34 insertions(+), 12 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -3422,14 +3422,15 @@ static void *io_uring_validate_mmap_requ
+ ptr = ctx->sq_sqes;
+ break;
+ case IORING_OFF_PBUF_RING: {
++ struct io_buffer_list *bl;
+ unsigned int bgid;
+
+ bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
+- rcu_read_lock();
+- ptr = io_pbuf_get_address(ctx, bgid);
+- rcu_read_unlock();
+- if (!ptr)
+- return ERR_PTR(-EINVAL);
++ bl = io_pbuf_get_bl(ctx, bgid);
++ if (IS_ERR(bl))
++ return bl;
++ ptr = bl->buf_ring;
++ io_put_bl(ctx, bl);
+ break;
+ }
+ default:
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -275,7 +275,7 @@ static int __io_remove_buffers(struct io
+ return i;
+ }
+
+-static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
++void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
+ {
+ if (atomic_dec_and_test(&bl->refs)) {
+ __io_remove_buffers(ctx, bl, -1U);
+@@ -728,16 +728,35 @@ int io_register_pbuf_status(struct io_ri
+ return 0;
+ }
+
+-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
++struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
++ unsigned long bgid)
+ {
+ struct io_buffer_list *bl;
++ bool ret;
+
+- bl = __io_buffer_get_list(ctx, bgid);
++ /*
++ * We have to be a bit careful here - we're inside mmap and cannot grab
++ * the uring_lock. This means the buffer_list could be simultaneously
++ * going away, if someone is trying to be sneaky. Look it up under rcu
++ * so we know it's not going away, and attempt to grab a reference to
++ * it. If the ref is already zero, then fail the mapping. If successful,
++ * the caller will call io_put_bl() to drop the the reference at at the
++ * end. This may then safely free the buffer_list (and drop the pages)
++ * at that point, vm_insert_pages() would've already grabbed the
++ * necessary vma references.
++ */
++ rcu_read_lock();
++ bl = xa_load(&ctx->io_bl_xa, bgid);
++ /* must be a mmap'able buffer ring and have pages */
++ ret = false;
++ if (bl && bl->is_mmap)
++ ret = atomic_inc_not_zero(&bl->refs);
++ rcu_read_unlock();
+
+- if (!bl || !bl->is_mmap)
+- return NULL;
++ if (ret)
++ return bl;
+
+- return bl->buf_ring;
++ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -61,7 +61,9 @@ unsigned int __io_put_kbuf(struct io_kio
+
+ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+
+-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
++void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
++struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
++ unsigned long bgid);
+
+ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
+ {
--- /dev/null
+From 6b69c4ab4f685327d9e10caf0d84217ba23a8c4b Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Fri, 15 Mar 2024 16:12:51 -0600
+Subject: io_uring/kbuf: protect io_buffer_list teardown with a reference
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 6b69c4ab4f685327d9e10caf0d84217ba23a8c4b upstream.
+
+No functional changes in this patch, just in preparation for being able
+to keep the buffer list alive outside of the ctx->uring_lock.
+
+Cc: stable@vger.kernel.org # v6.4+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/kbuf.c | 15 +++++++++++----
+ io_uring/kbuf.h | 2 ++
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -61,6 +61,7 @@ static int io_buffer_add_list(struct io_
+ * always under the ->uring_lock, but the RCU lookup from mmap does.
+ */
+ bl->bgid = bgid;
++ atomic_set(&bl->refs, 1);
+ return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+ }
+
+@@ -274,6 +275,14 @@ static int __io_remove_buffers(struct io
+ return i;
+ }
+
++static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
++{
++ if (atomic_dec_and_test(&bl->refs)) {
++ __io_remove_buffers(ctx, bl, -1U);
++ kfree_rcu(bl, rcu);
++ }
++}
++
+ void io_destroy_buffers(struct io_ring_ctx *ctx)
+ {
+ struct io_buffer_list *bl;
+@@ -283,8 +292,7 @@ void io_destroy_buffers(struct io_ring_c
+
+ xa_for_each(&ctx->io_bl_xa, index, bl) {
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+- __io_remove_buffers(ctx, bl, -1U);
+- kfree_rcu(bl, rcu);
++ io_put_bl(ctx, bl);
+ }
+
+ /*
+@@ -689,9 +697,8 @@ int io_unregister_pbuf_ring(struct io_ri
+ if (!bl->is_mapped)
+ return -EINVAL;
+
+- __io_remove_buffers(ctx, bl, -1U);
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+- kfree_rcu(bl, rcu);
++ io_put_bl(ctx, bl);
+ return 0;
+ }
+
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -25,6 +25,8 @@ struct io_buffer_list {
+ __u16 head;
+ __u16 mask;
+
++ atomic_t refs;
++
+ /* ring mapped provided buffers */
+ __u8 is_mapped;
+ /* ring mapped provided buffers, but mmap'ed by application */
--- /dev/null
+From 2a975d426c82ff05ec1f0b773798d909fe4a3105 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 1 Apr 2024 11:27:33 -0600
+Subject: io_uring/rw: don't allow multishot reads without NOWAIT support
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 2a975d426c82ff05ec1f0b773798d909fe4a3105 upstream.
+
+Supporting multishot reads requires support for NOWAIT, as the
+alternative would be always having io-wq execute the work item whenever
+the poll readiness triggered. Any fast file type will have NOWAIT
+support (eg it understands both O_NONBLOCK and IOCB_NOWAIT). If the
+given file type does not, then simply resort to single shot execution.
+
+Cc: stable@vger.kernel.org
+Fixes: fc68fcda04910 ("io_uring/rw: add support for IORING_OP_READ_MULTISHOT")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/rw.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -938,6 +938,13 @@ int io_read_mshot(struct io_kiocb *req,
+ ret = __io_read(req, issue_flags);
+
+ /*
++ * If the file doesn't support proper NOWAIT, then disable multishot
++ * and stay in single shot mode.
++ */
++ if (!io_file_supports_nowait(req))
++ req->flags &= ~REQ_F_APOLL_MULTISHOT;
++
++ /*
+ * If we get -EAGAIN, recycle our buffer and just let normal poll
+ * handling arm it.
+ */
+@@ -956,7 +963,7 @@ int io_read_mshot(struct io_kiocb *req,
+ /*
+ * Any successful return value will keep the multishot read armed.
+ */
+- if (ret > 0) {
++ if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
+ /*
+ * Put our buffer and post a CQE. If we fail to post a CQE, then
+ * jump to the termination path. This request is then done.
--- /dev/null
+From 73eaa2b583493b680c6f426531d6736c39643bfb Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 1 Apr 2024 15:16:19 -0600
+Subject: io_uring: use private workqueue for exit work
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 73eaa2b583493b680c6f426531d6736c39643bfb upstream.
+
+Rather than use the system unbound event workqueue, use an io_uring
+specific one. This avoids dependencies with the tty, which also uses
+the system_unbound_wq, and issues flushes of said workqueue from inside
+its poll handling.
+
+Cc: stable@vger.kernel.org
+Reported-by: Rasmus Karlsson <rasmus.karlsson@pajlada.com>
+Tested-by: Rasmus Karlsson <rasmus.karlsson@pajlada.com>
+Tested-by: Iskren Chernev <me@iskren.info>
+Link: https://github.com/axboe/liburing/issues/1113
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -152,6 +152,7 @@ static bool io_uring_try_cancel_requests
+ static void io_queue_sqe(struct io_kiocb *req);
+
+ struct kmem_cache *req_cachep;
++static struct workqueue_struct *iou_wq __ro_after_init;
+
+ static int __read_mostly sysctl_io_uring_disabled;
+ static int __read_mostly sysctl_io_uring_group = -1;
+@@ -3139,7 +3140,7 @@ static __cold void io_ring_ctx_wait_and_
+ * noise and overhead, there's no discernable change in runtime
+ * over using system_wq.
+ */
+- queue_work(system_unbound_wq, &ctx->exit_work);
++ queue_work(iou_wq, &ctx->exit_work);
+ }
+
+ static int io_uring_release(struct inode *inode, struct file *file)
+@@ -4164,6 +4165,8 @@ static int __init io_uring_init(void)
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
+ NULL);
+
++ iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
++
+ #ifdef CONFIG_SYSCTL
+ register_sysctl_init("kernel", kernel_io_uring_disabled_table);
+ #endif
asoc-rt712-sdca-sdw-fix-locking-sequence.patch
asoc-rt722-sdca-sdw-fix-locking-sequence.patch
asoc-ops-fix-wraparound-for-mask-in-snd_soc_get_vols.patch
-asoc-tas2781-mark-dvc_tlv-with-__maybe_unused.patch
spi-s3c64xx-extract-fifo-depth-calculation-to-a-dedi.patch
spi-s3c64xx-sort-headers-alphabetically.patch
spi-s3c64xx-explicitly-include-linux-bits.h.patch
alsa-hda-realtek-add-sound-quirks-for-lenovo-legion-slim-7-16arha7-models.patch
alsa-hda-realtek-cs35l41-support-asus-rog-g634jyr.patch
alsa-hda-realtek-update-panasonic-cf-sz6-quirk-to-support-headset-with-microphone.patch
+io_uring-kbuf-get-rid-of-lower-bgid-lists.patch
+io_uring-kbuf-get-rid-of-bl-is_ready.patch
+io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch
+io_uring-rw-don-t-allow-multishot-reads-without-nowait-support.patch
+io_uring-use-private-workqueue-for-exit-work.patch
+io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch