From da9b67a02c5ae18355f8aaac548fa7b407d1a79a Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 8 Apr 2024 11:12:46 +0200 Subject: [PATCH] 6.6-stable patches added patches: io_uring-kbuf-get-rid-of-bl-is_ready.patch io_uring-kbuf-get-rid-of-lower-bgid-lists.patch io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch io_uring-use-private-workqueue-for-exit-work.patch --- ...781-mark-dvc_tlv-with-__maybe_unused.patch | 45 ---- ...io_uring-kbuf-get-rid-of-bl-is_ready.patch | 55 +++++ ...ing-kbuf-get-rid-of-lower-bgid-lists.patch | 200 ++++++++++++++++++ ...d-io_buffer_list-reference-over-mmap.patch | 114 ++++++++++ ...uffer_list-teardown-with-a-reference.patch | 77 +++++++ ...-use-private-workqueue-for-exit-work.patch | 53 +++++ queue-6.6/series | 6 +- 7 files changed, 504 insertions(+), 46 deletions(-) delete mode 100644 queue-6.6/asoc-tas2781-mark-dvc_tlv-with-__maybe_unused.patch create mode 100644 queue-6.6/io_uring-kbuf-get-rid-of-bl-is_ready.patch create mode 100644 queue-6.6/io_uring-kbuf-get-rid-of-lower-bgid-lists.patch create mode 100644 queue-6.6/io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch create mode 100644 queue-6.6/io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch create mode 100644 queue-6.6/io_uring-use-private-workqueue-for-exit-work.patch diff --git a/queue-6.6/asoc-tas2781-mark-dvc_tlv-with-__maybe_unused.patch b/queue-6.6/asoc-tas2781-mark-dvc_tlv-with-__maybe_unused.patch deleted file mode 100644 index 1662b2b4acb..00000000000 --- a/queue-6.6/asoc-tas2781-mark-dvc_tlv-with-__maybe_unused.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 0221e56c87e6e6da9a7362f5758e60e39831401b Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 28 Mar 2024 23:47:37 +0100 -Subject: ASoC: tas2781: mark dvc_tlv with __maybe_unused - -From: Gergo Koteles - -[ Upstream commit 831ec5e3538e989c7995137b5c5c661991a09504 ] - -Since we put dvc_tlv static variable to a header file it's copied to -each module that includes the header. But not all of them are actually -used it. - -Fix this W=1 build warning: - -include/sound/tas2781-tlv.h:18:35: warning: 'dvc_tlv' defined but not -used [-Wunused-const-variable=] - -Reported-by: kernel test robot -Closes: https://lore.kernel.org/oe-kbuild-all/202403290354.v0StnRpc-lkp@intel.com/ -Fixes: ae065d0ce9e3 ("ALSA: hda/tas2781: remove digital gain kcontrol") -Signed-off-by: Gergo Koteles -Message-ID: <0e461545a2a6e9b6152985143e50526322e5f76b.1711665731.git.soyer@irl.hu> -Signed-off-by: Takashi Iwai -Signed-off-by: Sasha Levin ---- - include/sound/tas2781-tlv.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h -index 4038dd421150a..1dc59005d241f 100644 ---- a/include/sound/tas2781-tlv.h -+++ b/include/sound/tas2781-tlv.h -@@ -15,7 +15,7 @@ - #ifndef __TAS2781_TLV_H__ - #define __TAS2781_TLV_H__ - --static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0); -+static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0); - static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0); - - #endif --- -2.43.0 - diff --git a/queue-6.6/io_uring-kbuf-get-rid-of-bl-is_ready.patch b/queue-6.6/io_uring-kbuf-get-rid-of-bl-is_ready.patch new file mode 100644 index 00000000000..891b262b2b9 --- /dev/null +++ b/queue-6.6/io_uring-kbuf-get-rid-of-bl-is_ready.patch @@ -0,0 +1,55 @@ +From 3b80cff5a4d117c53d38ce805823084eaeffbde6 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 14 Mar 2024 10:46:40 -0600 +Subject: io_uring/kbuf: get rid of bl->is_ready + +From: Jens Axboe + +commit 3b80cff5a4d117c53d38ce805823084eaeffbde6 upstream. + +Now that xarray is being exclusively used for the buffer_list lookup, +this check is no longer needed. Get rid of it and the is_ready member. + +Cc: stable@vger.kernel.org # v6.4+ +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/kbuf.c | 8 -------- + io_uring/kbuf.h | 2 -- + 2 files changed, 10 deletions(-) + +--- a/io_uring/kbuf.c ++++ b/io_uring/kbuf.c +@@ -59,7 +59,6 @@ static int io_buffer_add_list(struct io_ + * always under the ->uring_lock, but the RCU lookup from mmap does. + */ + bl->bgid = bgid; +- smp_store_release(&bl->is_ready, 1); + return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); + } + +@@ -691,13 +690,6 @@ void *io_pbuf_get_address(struct io_ring + + if (!bl || !bl->is_mmap) + return NULL; +- /* +- * Ensure the list is fully setup. Only strictly needed for RCU lookup +- * via mmap, and in that case only for the array indexed groups. For +- * the xarray lookups, it's either visible and ready, or not at all. +- */ +- if (!smp_load_acquire(&bl->is_ready)) +- return NULL; + + return bl->buf_ring; + } +--- a/io_uring/kbuf.h ++++ b/io_uring/kbuf.h +@@ -29,8 +29,6 @@ struct io_buffer_list { + __u8 is_mapped; + /* ring mapped provided buffers, but mmap'ed by application */ + __u8 is_mmap; +- /* bl is visible from an RCU point of view for lookup */ +- __u8 is_ready; + }; + + struct io_buffer { diff --git a/queue-6.6/io_uring-kbuf-get-rid-of-lower-bgid-lists.patch b/queue-6.6/io_uring-kbuf-get-rid-of-lower-bgid-lists.patch new file mode 100644 index 00000000000..fea6abbf272 --- /dev/null +++ b/queue-6.6/io_uring-kbuf-get-rid-of-lower-bgid-lists.patch @@ -0,0 +1,200 @@ +From 09ab7eff38202159271534d2f5ad45526168f2a5 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 14 Mar 2024 10:45:07 -0600 +Subject: io_uring/kbuf: get rid of lower BGID lists + +From: Jens Axboe + +commit 09ab7eff38202159271534d2f5ad45526168f2a5 upstream. + +Just rely on the xarray for any kind of bgid. This simplifies things, and +it really doesn't bring us much, if anything. + +Cc: stable@vger.kernel.org # v6.4+ +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/io_uring_types.h | 1 + io_uring/io_uring.c | 2 - + io_uring/kbuf.c | 70 ++++------------------------------------- + 3 files changed, 8 insertions(+), 65 deletions(-) + +--- a/include/linux/io_uring_types.h ++++ b/include/linux/io_uring_types.h +@@ -250,7 +250,6 @@ struct io_ring_ctx { + + struct io_submit_state submit_state; + +- struct io_buffer_list *io_bl; + struct xarray io_bl_xa; + + struct io_hash_table cancel_table_locked; +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -343,7 +343,6 @@ static __cold struct io_ring_ctx *io_rin + err: + kfree(ctx->cancel_table.hbs); + kfree(ctx->cancel_table_locked.hbs); +- kfree(ctx->io_bl); + xa_destroy(&ctx->io_bl_xa); + kfree(ctx); + return NULL; +@@ -2934,7 +2933,6 @@ static __cold void io_ring_ctx_free(stru + io_wq_put_hash(ctx->hash_map); + kfree(ctx->cancel_table.hbs); + kfree(ctx->cancel_table_locked.hbs); +- kfree(ctx->io_bl); + xa_destroy(&ctx->io_bl_xa); + kfree(ctx); + } +--- a/io_uring/kbuf.c ++++ b/io_uring/kbuf.c +@@ -17,8 +17,6 @@ + + #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf)) + +-#define BGID_ARRAY 64 +- + /* BIDs are addressed by a 16-bit field in a CQE */ + #define MAX_BIDS_PER_BGID (1 << 16) + +@@ -31,13 +29,9 @@ struct io_provide_buf { + __u16 bid; + }; + +-static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx, +- struct io_buffer_list *bl, +- unsigned int bgid) ++static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx, ++ unsigned int bgid) + { +- if (bl && bgid < BGID_ARRAY) +- return &bl[bgid]; +- + return xa_load(&ctx->io_bl_xa, bgid); + } + +@@ -53,7 +47,7 @@ static inline struct io_buffer_list *io_ + { + lockdep_assert_held(&ctx->uring_lock); + +- return __io_buffer_get_list(ctx, ctx->io_bl, bgid); ++ return __io_buffer_get_list(ctx, bgid); + } + + static int io_buffer_add_list(struct io_ring_ctx *ctx, +@@ -66,10 +60,6 @@ static int io_buffer_add_list(struct io_ + */ + bl->bgid = bgid; + smp_store_release(&bl->is_ready, 1); +- +- if (bgid < BGID_ARRAY) +- return 0; +- + return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); + } + +@@ -215,24 +205,6 @@ void __user *io_buffer_select(struct io_ + return ret; + } + +-static __cold int io_init_bl_list(struct io_ring_ctx *ctx) +-{ +- struct io_buffer_list *bl; +- int i; +- +- bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL); +- if (!bl) +- return -ENOMEM; +- +- for (i = 0; i < BGID_ARRAY; i++) { +- INIT_LIST_HEAD(&bl[i].buf_list); +- bl[i].bgid = i; +- } +- +- smp_store_release(&ctx->io_bl, bl); +- return 0; +-} +- + /* + * Mark the given mapped range as free for reuse + */ +@@ -305,13 +277,6 @@ void io_destroy_buffers(struct io_ring_c + { + struct io_buffer_list *bl; + unsigned long index; +- int i; +- +- for (i = 0; i < BGID_ARRAY; i++) { +- if (!ctx->io_bl) +- break; +- __io_remove_buffers(ctx, &ctx->io_bl[i], -1U); +- } + + xa_for_each(&ctx->io_bl_xa, index, bl) { + xa_erase(&ctx->io_bl_xa, bl->bgid); +@@ -485,12 +450,6 @@ int io_provide_buffers(struct io_kiocb * + + io_ring_submit_lock(ctx, issue_flags); + +- if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) { +- ret = io_init_bl_list(ctx); +- if (ret) +- goto err; +- } +- + bl = io_buffer_get_list(ctx, p->bgid); + if (unlikely(!bl)) { + bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); +@@ -503,14 +462,9 @@ int io_provide_buffers(struct io_kiocb * + if (ret) { + /* + * Doesn't need rcu free as it was never visible, but +- * let's keep it consistent throughout. Also can't +- * be a lower indexed array group, as adding one +- * where lookup failed cannot happen. ++ * let's keep it consistent throughout. + */ +- if (p->bgid >= BGID_ARRAY) +- kfree_rcu(bl, rcu); +- else +- WARN_ON_ONCE(1); ++ kfree_rcu(bl, rcu); + goto err; + } + } +@@ -675,12 +629,6 @@ int io_register_pbuf_ring(struct io_ring + if (reg.ring_entries >= 65536) + return -EINVAL; + +- if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) { +- int ret = io_init_bl_list(ctx); +- if (ret) +- return ret; +- } +- + bl = io_buffer_get_list(ctx, reg.bgid); + if (bl) { + /* if mapped buffer ring OR classic exists, don't allow */ +@@ -730,10 +678,8 @@ int io_unregister_pbuf_ring(struct io_ri + return -EINVAL; + + __io_remove_buffers(ctx, bl, -1U); +- if (bl->bgid >= BGID_ARRAY) { +- xa_erase(&ctx->io_bl_xa, bl->bgid); +- kfree_rcu(bl, rcu); +- } ++ xa_erase(&ctx->io_bl_xa, bl->bgid); ++ kfree_rcu(bl, rcu); + return 0; + } + +@@ -741,7 +687,7 @@ void *io_pbuf_get_address(struct io_ring + { + struct io_buffer_list *bl; + +- bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid); ++ bl = __io_buffer_get_list(ctx, bgid); + + if (!bl || !bl->is_mmap) + return NULL; diff --git a/queue-6.6/io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch b/queue-6.6/io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch new file mode 100644 index 00000000000..a3162a02d7e --- /dev/null +++ b/queue-6.6/io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch @@ -0,0 +1,114 @@ +From 561e4f9451d65fc2f7eef564e0064373e3019793 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Tue, 2 Apr 2024 16:16:03 -0600 +Subject: io_uring/kbuf: hold io_buffer_list reference over mmap + +From: Jens Axboe + +commit 561e4f9451d65fc2f7eef564e0064373e3019793 upstream. + +If we look up the kbuf, ensure that it doesn't get unregistered until +after we're done with it. Since we're inside mmap, we cannot safely use +the io_uring lock. Rely on the fact that we can lookup the buffer list +under RCU now and grab a reference to it, preventing it from being +unregistered until we're done with it. The lookup returns the +io_buffer_list directly with it referenced. + +Cc: stable@vger.kernel.org # v6.4+ +Fixes: 5cf4f52e6d8a ("io_uring: free io_buffer_list entries via RCU") +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 11 ++++++----- + io_uring/kbuf.c | 31 +++++++++++++++++++++++++------ + io_uring/kbuf.h | 4 +++- + 3 files changed, 34 insertions(+), 12 deletions(-) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -3429,14 +3429,15 @@ static void *io_uring_validate_mmap_requ + ptr = ctx->sq_sqes; + break; + case IORING_OFF_PBUF_RING: { ++ struct io_buffer_list *bl; + unsigned int bgid; + + bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; +- rcu_read_lock(); +- ptr = io_pbuf_get_address(ctx, bgid); +- rcu_read_unlock(); +- if (!ptr) +- return ERR_PTR(-EINVAL); ++ bl = io_pbuf_get_bl(ctx, bgid); ++ if (IS_ERR(bl)) ++ return bl; ++ ptr = bl->buf_ring; ++ io_put_bl(ctx, bl); + break; + } + default: +--- a/io_uring/kbuf.c ++++ b/io_uring/kbuf.c +@@ -273,7 +273,7 @@ static int __io_remove_buffers(struct io + return i; + } + +-static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) ++void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) + { + if (atomic_dec_and_test(&bl->refs)) { + __io_remove_buffers(ctx, bl, -1U); +@@ -689,16 +689,35 @@ int io_unregister_pbuf_ring(struct io_ri + return 0; + } + +-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid) ++struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, ++ unsigned long bgid) + { + struct io_buffer_list *bl; ++ bool ret; + +- bl = __io_buffer_get_list(ctx, bgid); ++ /* ++ * We have to be a bit careful here - we're inside mmap and cannot grab ++ * the uring_lock. This means the buffer_list could be simultaneously ++ * going away, if someone is trying to be sneaky. Look it up under rcu ++ * so we know it's not going away, and attempt to grab a reference to ++ * it. If the ref is already zero, then fail the mapping. If successful, ++ * the caller will call io_put_bl() to drop the the reference at at the ++ * end. This may then safely free the buffer_list (and drop the pages) ++ * at that point, vm_insert_pages() would've already grabbed the ++ * necessary vma references. ++ */ ++ rcu_read_lock(); ++ bl = xa_load(&ctx->io_bl_xa, bgid); ++ /* must be a mmap'able buffer ring and have pages */ ++ ret = false; ++ if (bl && bl->is_mmap) ++ ret = atomic_inc_not_zero(&bl->refs); ++ rcu_read_unlock(); + +- if (!bl || !bl->is_mmap) +- return NULL; ++ if (ret) ++ return bl; + +- return bl->buf_ring; ++ return ERR_PTR(-EINVAL); + } + + /* +--- a/io_uring/kbuf.h ++++ b/io_uring/kbuf.h +@@ -60,7 +60,9 @@ unsigned int __io_put_kbuf(struct io_kio + + void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); + +-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid); ++void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl); ++struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, ++ unsigned long bgid); + + static inline void io_kbuf_recycle_ring(struct io_kiocb *req) + { diff --git a/queue-6.6/io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch b/queue-6.6/io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch new file mode 100644 index 00000000000..4cc281a47f4 --- /dev/null +++ b/queue-6.6/io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch @@ -0,0 +1,77 @@ +From 6b69c4ab4f685327d9e10caf0d84217ba23a8c4b Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Fri, 15 Mar 2024 16:12:51 -0600 +Subject: io_uring/kbuf: protect io_buffer_list teardown with a reference + +From: Jens Axboe + +commit 6b69c4ab4f685327d9e10caf0d84217ba23a8c4b upstream. + +No functional changes in this patch, just in preparation for being able +to keep the buffer list alive outside of the ctx->uring_lock. + +Cc: stable@vger.kernel.org # v6.4+ +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/kbuf.c | 15 +++++++++++---- + io_uring/kbuf.h | 2 ++ + 2 files changed, 13 insertions(+), 4 deletions(-) + +--- a/io_uring/kbuf.c ++++ b/io_uring/kbuf.c +@@ -59,6 +59,7 @@ static int io_buffer_add_list(struct io_ + * always under the ->uring_lock, but the RCU lookup from mmap does. + */ + bl->bgid = bgid; ++ atomic_set(&bl->refs, 1); + return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); + } + +@@ -272,6 +273,14 @@ static int __io_remove_buffers(struct io + return i; + } + ++static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) ++{ ++ if (atomic_dec_and_test(&bl->refs)) { ++ __io_remove_buffers(ctx, bl, -1U); ++ kfree_rcu(bl, rcu); ++ } ++} ++ + void io_destroy_buffers(struct io_ring_ctx *ctx) + { + struct io_buffer_list *bl; +@@ -279,8 +288,7 @@ void io_destroy_buffers(struct io_ring_c + + xa_for_each(&ctx->io_bl_xa, index, bl) { + xa_erase(&ctx->io_bl_xa, bl->bgid); +- __io_remove_buffers(ctx, bl, -1U); +- kfree_rcu(bl, rcu); ++ io_put_bl(ctx, bl); + } + + while (!list_empty(&ctx->io_buffers_pages)) { +@@ -676,9 +684,8 @@ int io_unregister_pbuf_ring(struct io_ri + if (!bl->is_mapped) + return -EINVAL; + +- __io_remove_buffers(ctx, bl, -1U); + xa_erase(&ctx->io_bl_xa, bl->bgid); +- kfree_rcu(bl, rcu); ++ io_put_bl(ctx, bl); + return 0; + } + +--- a/io_uring/kbuf.h ++++ b/io_uring/kbuf.h +@@ -25,6 +25,8 @@ struct io_buffer_list { + __u16 head; + __u16 mask; + ++ atomic_t refs; ++ + /* ring mapped provided buffers */ + __u8 is_mapped; + /* ring mapped provided buffers, but mmap'ed by application */ diff --git a/queue-6.6/io_uring-use-private-workqueue-for-exit-work.patch b/queue-6.6/io_uring-use-private-workqueue-for-exit-work.patch new file mode 100644 index 00000000000..03278dcb15a --- /dev/null +++ b/queue-6.6/io_uring-use-private-workqueue-for-exit-work.patch @@ -0,0 +1,53 @@ +From 73eaa2b583493b680c6f426531d6736c39643bfb Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Mon, 1 Apr 2024 15:16:19 -0600 +Subject: io_uring: use private workqueue for exit work + +From: Jens Axboe + +commit 73eaa2b583493b680c6f426531d6736c39643bfb upstream. + +Rather than use the system unbound event workqueue, use an io_uring +specific one. This avoids dependencies with the tty, which also uses +the system_unbound_wq, and issues flushes of said workqueue from inside +its poll handling. + +Cc: stable@vger.kernel.org +Reported-by: Rasmus Karlsson +Tested-by: Rasmus Karlsson +Tested-by: Iskren Chernev +Link: https://github.com/axboe/liburing/issues/1113 +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -148,6 +148,7 @@ static bool io_uring_try_cancel_requests + static void io_queue_sqe(struct io_kiocb *req); + + struct kmem_cache *req_cachep; ++static struct workqueue_struct *iou_wq __ro_after_init; + + static int __read_mostly sysctl_io_uring_disabled; + static int __read_mostly sysctl_io_uring_group = -1; +@@ -3180,7 +3181,7 @@ static __cold void io_ring_ctx_wait_and_ + * noise and overhead, there's no discernable change in runtime + * over using system_wq. + */ +- queue_work(system_unbound_wq, &ctx->exit_work); ++ queue_work(iou_wq, &ctx->exit_work); + } + + static int io_uring_release(struct inode *inode, struct file *file) +@@ -4664,6 +4665,8 @@ static int __init io_uring_init(void) + offsetof(struct io_kiocb, cmd.data), + sizeof_field(struct io_kiocb, cmd.data), NULL); + ++ iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); ++ + #ifdef CONFIG_SYSCTL + register_sysctl_init("kernel", kernel_io_uring_disabled_table); + #endif diff --git a/queue-6.6/series b/queue-6.6/series index 94bf5bce9b3..dcc715a04b1 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -171,7 +171,6 @@ asoc-rt711-sdw-fix-locking-sequence.patch asoc-rt712-sdca-sdw-fix-locking-sequence.patch asoc-rt722-sdca-sdw-fix-locking-sequence.patch asoc-ops-fix-wraparound-for-mask-in-snd_soc_get_vols.patch -asoc-tas2781-mark-dvc_tlv-with-__maybe_unused.patch spi-s3c64xx-extract-fifo-depth-calculation-to-a-dedi.patch spi-s3c64xx-sort-headers-alphabetically.patch spi-s3c64xx-explicitly-include-linux-bits.h.patch @@ -207,3 +206,8 @@ ksmbd-validate-payload-size-in-ipc-response.patch ksmbd-do-not-set-smb2_global_cap_encryption-for-smb-3.1.1.patch alsa-hda-realtek-fix-inactive-headset-mic-jack.patch alsa-hda-realtek-update-panasonic-cf-sz6-quirk-to-support-headset-with-microphone.patch +io_uring-kbuf-get-rid-of-lower-bgid-lists.patch +io_uring-kbuf-get-rid-of-bl-is_ready.patch +io_uring-kbuf-protect-io_buffer_list-teardown-with-a-reference.patch +io_uring-use-private-workqueue-for-exit-work.patch +io_uring-kbuf-hold-io_buffer_list-reference-over-mmap.patch -- 2.47.3