]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-6.6/io_uring-kbuf-get-rid-of-lower-bgid-lists.patch
fea6abbf272eeaaf1bb331b4054bc00553e08cb0
[thirdparty/kernel/stable-queue.git] / queue-6.6 / io_uring-kbuf-get-rid-of-lower-bgid-lists.patch
1 From 09ab7eff38202159271534d2f5ad45526168f2a5 Mon Sep 17 00:00:00 2001
2 From: Jens Axboe <axboe@kernel.dk>
3 Date: Thu, 14 Mar 2024 10:45:07 -0600
4 Subject: io_uring/kbuf: get rid of lower BGID lists
5
6 From: Jens Axboe <axboe@kernel.dk>
7
8 commit 09ab7eff38202159271534d2f5ad45526168f2a5 upstream.
9
10 Just rely on the xarray for any kind of bgid. This simplifies things, and
11 it really doesn't bring us much, if anything.
12
13 Cc: stable@vger.kernel.org # v6.4+
14 Signed-off-by: Jens Axboe <axboe@kernel.dk>
15 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
16 ---
17 include/linux/io_uring_types.h | 1
18 io_uring/io_uring.c | 2 -
19 io_uring/kbuf.c | 70 ++++-------------------------------------
20 3 files changed, 8 insertions(+), 65 deletions(-)
21
22 --- a/include/linux/io_uring_types.h
23 +++ b/include/linux/io_uring_types.h
24 @@ -250,7 +250,6 @@ struct io_ring_ctx {
25
26 struct io_submit_state submit_state;
27
28 - struct io_buffer_list *io_bl;
29 struct xarray io_bl_xa;
30
31 struct io_hash_table cancel_table_locked;
32 --- a/io_uring/io_uring.c
33 +++ b/io_uring/io_uring.c
34 @@ -343,7 +343,6 @@ static __cold struct io_ring_ctx *io_rin
35 err:
36 kfree(ctx->cancel_table.hbs);
37 kfree(ctx->cancel_table_locked.hbs);
38 - kfree(ctx->io_bl);
39 xa_destroy(&ctx->io_bl_xa);
40 kfree(ctx);
41 return NULL;
42 @@ -2934,7 +2933,6 @@ static __cold void io_ring_ctx_free(stru
43 io_wq_put_hash(ctx->hash_map);
44 kfree(ctx->cancel_table.hbs);
45 kfree(ctx->cancel_table_locked.hbs);
46 - kfree(ctx->io_bl);
47 xa_destroy(&ctx->io_bl_xa);
48 kfree(ctx);
49 }
50 --- a/io_uring/kbuf.c
51 +++ b/io_uring/kbuf.c
52 @@ -17,8 +17,6 @@
53
54 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
55
56 -#define BGID_ARRAY 64
57 -
58 /* BIDs are addressed by a 16-bit field in a CQE */
59 #define MAX_BIDS_PER_BGID (1 << 16)
60
61 @@ -31,13 +29,9 @@ struct io_provide_buf {
62 __u16 bid;
63 };
64
65 -static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
66 - struct io_buffer_list *bl,
67 - unsigned int bgid)
68 +static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
69 + unsigned int bgid)
70 {
71 - if (bl && bgid < BGID_ARRAY)
72 - return &bl[bgid];
73 -
74 return xa_load(&ctx->io_bl_xa, bgid);
75 }
76
77 @@ -53,7 +47,7 @@ static inline struct io_buffer_list *io_
78 {
79 lockdep_assert_held(&ctx->uring_lock);
80
81 - return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
82 + return __io_buffer_get_list(ctx, bgid);
83 }
84
85 static int io_buffer_add_list(struct io_ring_ctx *ctx,
86 @@ -66,10 +60,6 @@ static int io_buffer_add_list(struct io_
87 */
88 bl->bgid = bgid;
89 smp_store_release(&bl->is_ready, 1);
90 -
91 - if (bgid < BGID_ARRAY)
92 - return 0;
93 -
94 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
95 }
96
97 @@ -215,24 +205,6 @@ void __user *io_buffer_select(struct io_
98 return ret;
99 }
100
101 -static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
102 -{
103 - struct io_buffer_list *bl;
104 - int i;
105 -
106 - bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
107 - if (!bl)
108 - return -ENOMEM;
109 -
110 - for (i = 0; i < BGID_ARRAY; i++) {
111 - INIT_LIST_HEAD(&bl[i].buf_list);
112 - bl[i].bgid = i;
113 - }
114 -
115 - smp_store_release(&ctx->io_bl, bl);
116 - return 0;
117 -}
118 -
119 /*
120 * Mark the given mapped range as free for reuse
121 */
122 @@ -305,13 +277,6 @@ void io_destroy_buffers(struct io_ring_c
123 {
124 struct io_buffer_list *bl;
125 unsigned long index;
126 - int i;
127 -
128 - for (i = 0; i < BGID_ARRAY; i++) {
129 - if (!ctx->io_bl)
130 - break;
131 - __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
132 - }
133
134 xa_for_each(&ctx->io_bl_xa, index, bl) {
135 xa_erase(&ctx->io_bl_xa, bl->bgid);
136 @@ -485,12 +450,6 @@ int io_provide_buffers(struct io_kiocb *
137
138 io_ring_submit_lock(ctx, issue_flags);
139
140 - if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
141 - ret = io_init_bl_list(ctx);
142 - if (ret)
143 - goto err;
144 - }
145 -
146 bl = io_buffer_get_list(ctx, p->bgid);
147 if (unlikely(!bl)) {
148 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
149 @@ -503,14 +462,9 @@ int io_provide_buffers(struct io_kiocb *
150 if (ret) {
151 /*
152 * Doesn't need rcu free as it was never visible, but
153 - * let's keep it consistent throughout. Also can't
154 - * be a lower indexed array group, as adding one
155 - * where lookup failed cannot happen.
156 + * let's keep it consistent throughout.
157 */
158 - if (p->bgid >= BGID_ARRAY)
159 - kfree_rcu(bl, rcu);
160 - else
161 - WARN_ON_ONCE(1);
162 + kfree_rcu(bl, rcu);
163 goto err;
164 }
165 }
166 @@ -675,12 +629,6 @@ int io_register_pbuf_ring(struct io_ring
167 if (reg.ring_entries >= 65536)
168 return -EINVAL;
169
170 - if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
171 - int ret = io_init_bl_list(ctx);
172 - if (ret)
173 - return ret;
174 - }
175 -
176 bl = io_buffer_get_list(ctx, reg.bgid);
177 if (bl) {
178 /* if mapped buffer ring OR classic exists, don't allow */
179 @@ -730,10 +678,8 @@ int io_unregister_pbuf_ring(struct io_ri
180 return -EINVAL;
181
182 __io_remove_buffers(ctx, bl, -1U);
183 - if (bl->bgid >= BGID_ARRAY) {
184 - xa_erase(&ctx->io_bl_xa, bl->bgid);
185 - kfree_rcu(bl, rcu);
186 - }
187 + xa_erase(&ctx->io_bl_xa, bl->bgid);
188 + kfree_rcu(bl, rcu);
189 return 0;
190 }
191
192 @@ -741,7 +687,7 @@ void *io_pbuf_get_address(struct io_ring
193 {
194 struct io_buffer_list *bl;
195
196 - bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
197 + bl = __io_buffer_get_list(ctx, bgid);
198
199 if (!bl || !bl->is_mmap)
200 return NULL;