]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 3 Dec 2023 17:24:25 +0000 (18:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 3 Dec 2023 17:24:25 +0000 (18:24 +0100)
added patches:
io_uring-enable-io_mem_alloc-free-to-be-used-in-other-parts.patch
io_uring-kbuf-defer-release-of-mapped-buffer-rings.patch

queue-6.6/io_uring-enable-io_mem_alloc-free-to-be-used-in-other-parts.patch [new file with mode: 0644]
queue-6.6/io_uring-kbuf-defer-release-of-mapped-buffer-rings.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/io_uring-enable-io_mem_alloc-free-to-be-used-in-other-parts.patch b/queue-6.6/io_uring-enable-io_mem_alloc-free-to-be-used-in-other-parts.patch
new file mode 100644 (file)
index 0000000..fc18edb
--- /dev/null
@@ -0,0 +1,51 @@
+From edecf1689768452ba1a64b7aaf3a47a817da651a Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 27 Nov 2023 20:53:52 -0700
+Subject: io_uring: enable io_mem_alloc/free to be used in other parts
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit edecf1689768452ba1a64b7aaf3a47a817da651a upstream.
+
+In preparation for using these helpers, make them non-static and add
+them to our internal header.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |    4 ++--
+ io_uring/io_uring.h |    3 +++
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2659,7 +2659,7 @@ static int io_cqring_wait(struct io_ring
+       return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
+ }
+-static void io_mem_free(void *ptr)
++void io_mem_free(void *ptr)
+ {
+       if (!ptr)
+               return;
+@@ -2771,7 +2771,7 @@ static void io_rings_free(struct io_ring
+       }
+ }
+-static void *io_mem_alloc(size_t size)
++void *io_mem_alloc(size_t size)
+ {
+       gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
+       void *ret;
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -86,6 +86,9 @@ bool __io_alloc_req_refill(struct io_rin
+ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+                       bool cancel_all);
++void *io_mem_alloc(size_t size);
++void io_mem_free(void *ptr);
++
+ #if defined(CONFIG_PROVE_LOCKING)
+ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
+ {
diff --git a/queue-6.6/io_uring-kbuf-defer-release-of-mapped-buffer-rings.patch b/queue-6.6/io_uring-kbuf-defer-release-of-mapped-buffer-rings.patch
new file mode 100644 (file)
index 0000000..89bee95
--- /dev/null
@@ -0,0 +1,158 @@
+From c392cbecd8eca4c53f2bf508731257d9d0a21c2d Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 27 Nov 2023 16:47:04 -0700
+Subject: io_uring/kbuf: defer release of mapped buffer rings
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit c392cbecd8eca4c53f2bf508731257d9d0a21c2d upstream.
+
+If a provided buffer ring is setup with IOU_PBUF_RING_MMAP, then the
+kernel allocates the memory for it and the application is expected to
+mmap(2) this memory. However, io_uring uses remap_pfn_range() for this
+operation, so we cannot rely on normal munmap/release on freeing them
+for us.
+
+Stash an io_buf_free entry away for each of these, if any, and provide
+a helper to free them post ->release().
+
+Cc: stable@vger.kernel.org
+Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/io_uring_types.h |    3 ++
+ io_uring/io_uring.c            |    2 +
+ io_uring/kbuf.c                |   44 ++++++++++++++++++++++++++++++++++++-----
+ io_uring/kbuf.h                |    2 +
+ 4 files changed, 46 insertions(+), 5 deletions(-)
+
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -327,6 +327,9 @@ struct io_ring_ctx {
+       struct list_head        io_buffers_cache;
++      /* deferred free list, protected by ->uring_lock */
++      struct hlist_head       io_buf_list;
++
+       /* Keep this last, we don't need it for the fast path */
+       struct wait_queue_head          poll_wq;
+       struct io_restriction           restrictions;
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -323,6 +323,7 @@ static __cold struct io_ring_ctx *io_rin
+       INIT_LIST_HEAD(&ctx->sqd_list);
+       INIT_LIST_HEAD(&ctx->cq_overflow_list);
+       INIT_LIST_HEAD(&ctx->io_buffers_cache);
++      INIT_HLIST_HEAD(&ctx->io_buf_list);
+       io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
+                           sizeof(struct io_rsrc_node));
+       io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
+@@ -2942,6 +2943,7 @@ static __cold void io_ring_ctx_free(stru
+               ctx->mm_account = NULL;
+       }
+       io_rings_free(ctx);
++      io_kbuf_mmap_list_free(ctx);
+       percpu_ref_exit(&ctx->refs);
+       free_uid(ctx->user);
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -41,6 +41,11 @@ static struct io_buffer_list *__io_buffe
+       return xa_load(&ctx->io_bl_xa, bgid);
+ }
++struct io_buf_free {
++      struct hlist_node               list;
++      void                            *mem;
++};
++
+ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
+                                                       unsigned int bgid)
+ {
+@@ -238,7 +243,10 @@ static int __io_remove_buffers(struct io
+       if (bl->is_mapped) {
+               i = bl->buf_ring->tail - bl->head;
+               if (bl->is_mmap) {
+-                      folio_put(virt_to_folio(bl->buf_ring));
++                      /*
++                       * io_kbuf_list_free() will free the page(s) at
++                       * ->release() time.
++                       */
+                       bl->buf_ring = NULL;
+                       bl->is_mmap = 0;
+               } else if (bl->buf_nr_pages) {
+@@ -552,18 +560,28 @@ error_unpin:
+       return -EINVAL;
+ }
+-static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
++static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
++                            struct io_uring_buf_reg *reg,
+                             struct io_buffer_list *bl)
+ {
+-      gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
++      struct io_buf_free *ibf;
+       size_t ring_size;
+       void *ptr;
+       ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
+-      ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
++      ptr = io_mem_alloc(ring_size);
+       if (!ptr)
+               return -ENOMEM;
++      /* Allocate and store deferred free entry */
++      ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
++      if (!ibf) {
++              io_mem_free(ptr);
++              return -ENOMEM;
++      }
++      ibf->mem = ptr;
++      hlist_add_head(&ibf->list, &ctx->io_buf_list);
++
+       bl->buf_ring = ptr;
+       bl->is_mapped = 1;
+       bl->is_mmap = 1;
+@@ -622,7 +640,7 @@ int io_register_pbuf_ring(struct io_ring
+       if (!(reg.flags & IOU_PBUF_RING_MMAP))
+               ret = io_pin_pbuf_ring(&reg, bl);
+       else
+-              ret = io_alloc_pbuf_ring(&reg, bl);
++              ret = io_alloc_pbuf_ring(ctx, &reg, bl);
+       if (!ret) {
+               bl->nr_entries = reg.ring_entries;
+@@ -682,3 +700,19 @@ void *io_pbuf_get_address(struct io_ring
+       return bl->buf_ring;
+ }
++
++/*
++ * Called at or after ->release(), free the mmap'ed buffers that we used
++ * for memory mapped provided buffer rings.
++ */
++void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
++{
++      struct io_buf_free *ibf;
++      struct hlist_node *tmp;
++
++      hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
++              hlist_del(&ibf->list);
++              io_mem_free(ibf->mem);
++              kfree(ibf);
++      }
++}
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -54,6 +54,8 @@ int io_provide_buffers(struct io_kiocb *
+ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
++void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
++
+ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
index 48b4f19220560085458653b413c2d345cc52d45a..39368f327acb5b4c38b5a85a36180c0accff9711 100644 (file)
@@ -68,3 +68,5 @@ btrfs-send-ensure-send_fd-is-writable.patch
 btrfs-make-error-messages-more-clear-when-getting-a-chunk-map.patch
 btrfs-free-the-allocated-memory-if-btrfs_alloc_page_array-fails.patch
 btrfs-fix-64bit-compat-send-ioctl-arguments-not-initializing-version-member.patch
+io_uring-enable-io_mem_alloc-free-to-be-used-in-other-parts.patch
+io_uring-kbuf-defer-release-of-mapped-buffer-rings.patch