]> git.ipfire.org Git - thirdparty/linux.git/blame - io_uring/waitid.c
btrfs: split assert into two different asserts when removing block group
[thirdparty/linux.git] / io_uring / waitid.c
CommitLineData
f31ecf67
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for async notification of waitid
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/fs.h>
8#include <linux/file.h>
9#include <linux/compat.h>
10#include <linux/io_uring.h>
11
12#include <uapi/linux/io_uring.h>
13
14#include "io_uring.h"
15#include "cancel.h"
16#include "waitid.h"
17#include "../kernel/exit.h"
18
19static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts);
20
21#define IO_WAITID_CANCEL_FLAG BIT(31)
22#define IO_WAITID_REF_MASK GENMASK(30, 0)
23
24struct io_waitid {
25 struct file *file;
26 int which;
27 pid_t upid;
28 int options;
29 atomic_t refs;
30 struct wait_queue_head *head;
31 struct siginfo __user *infop;
32 struct waitid_info info;
33};
34
35static void io_waitid_free(struct io_kiocb *req)
36{
37 struct io_waitid_async *iwa = req->async_data;
38
39 put_pid(iwa->wo.wo_pid);
40 kfree(req->async_data);
41 req->async_data = NULL;
42 req->flags &= ~REQ_F_ASYNC_DATA;
43}
44
45#ifdef CONFIG_COMPAT
46static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo)
47{
48 struct compat_siginfo __user *infop;
49 bool ret;
50
51 infop = (struct compat_siginfo __user *) iw->infop;
52
53 if (!user_write_access_begin(infop, sizeof(*infop)))
54 return false;
55
56 unsafe_put_user(signo, &infop->si_signo, Efault);
57 unsafe_put_user(0, &infop->si_errno, Efault);
58 unsafe_put_user(iw->info.cause, &infop->si_code, Efault);
59 unsafe_put_user(iw->info.pid, &infop->si_pid, Efault);
60 unsafe_put_user(iw->info.uid, &infop->si_uid, Efault);
61 unsafe_put_user(iw->info.status, &infop->si_status, Efault);
62 ret = true;
63done:
64 user_write_access_end();
65 return ret;
66Efault:
67 ret = false;
68 goto done;
69}
70#endif
71
72static bool io_waitid_copy_si(struct io_kiocb *req, int signo)
73{
74 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
75 bool ret;
76
77 if (!iw->infop)
78 return true;
79
80#ifdef CONFIG_COMPAT
81 if (req->ctx->compat)
82 return io_waitid_compat_copy_si(iw, signo);
83#endif
84
85 if (!user_write_access_begin(iw->infop, sizeof(*iw->infop)))
86 return false;
87
88 unsafe_put_user(signo, &iw->infop->si_signo, Efault);
89 unsafe_put_user(0, &iw->infop->si_errno, Efault);
90 unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault);
91 unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault);
92 unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault);
93 unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault);
94 ret = true;
95done:
96 user_write_access_end();
97 return ret;
98Efault:
99 ret = false;
100 goto done;
101}
102
103static int io_waitid_finish(struct io_kiocb *req, int ret)
104{
105 int signo = 0;
106
107 if (ret > 0) {
108 signo = SIGCHLD;
109 ret = 0;
110 }
111
112 if (!io_waitid_copy_si(req, signo))
113 ret = -EFAULT;
114 io_waitid_free(req);
115 return ret;
116}
117
118static void io_waitid_complete(struct io_kiocb *req, int ret)
119{
120 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
121 struct io_tw_state ts = { .locked = true };
122
123 /* anyone completing better be holding a reference */
124 WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
125
126 lockdep_assert_held(&req->ctx->uring_lock);
127
128 /*
129 * Did cancel find it meanwhile?
130 */
131 if (hlist_unhashed(&req->hash_node))
132 return;
133
134 hlist_del_init(&req->hash_node);
135
136 ret = io_waitid_finish(req, ret);
137 if (ret < 0)
138 req_set_fail(req);
139 io_req_set_res(req, ret, 0);
140 io_req_task_complete(req, &ts);
141}
142
143static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
144{
145 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
146 struct io_waitid_async *iwa = req->async_data;
147
148 /*
149 * Mark us canceled regardless of ownership. This will prevent a
150 * potential retry from a spurious wakeup.
151 */
152 atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs);
153
154 /* claim ownership */
155 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
156 return false;
157
158 spin_lock_irq(&iw->head->lock);
159 list_del_init(&iwa->wo.child_wait.entry);
160 spin_unlock_irq(&iw->head->lock);
161 io_waitid_complete(req, -ECANCELED);
162 return true;
163}
164
165int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
166 unsigned int issue_flags)
167{
168 struct hlist_node *tmp;
169 struct io_kiocb *req;
170 int nr = 0;
171
172 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED))
173 return -ENOENT;
174
175 io_ring_submit_lock(ctx, issue_flags);
176 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
177 if (req->cqe.user_data != cd->data &&
178 !(cd->flags & IORING_ASYNC_CANCEL_ANY))
179 continue;
180 if (__io_waitid_cancel(ctx, req))
181 nr++;
182 if (!(cd->flags & IORING_ASYNC_CANCEL_ALL))
183 break;
184 }
185 io_ring_submit_unlock(ctx, issue_flags);
186
187 if (nr)
188 return nr;
189
190 return -ENOENT;
191}
192
193bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
194 bool cancel_all)
195{
196 struct hlist_node *tmp;
197 struct io_kiocb *req;
198 bool found = false;
199
200 lockdep_assert_held(&ctx->uring_lock);
201
202 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
203 if (!io_match_task_safe(req, task, cancel_all))
204 continue;
205 __io_waitid_cancel(ctx, req);
206 found = true;
207 }
208
209 return found;
210}
211
212static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
213{
214 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
215 struct io_waitid_async *iwa = req->async_data;
216
217 if (!atomic_sub_return(1, &iw->refs))
218 return false;
219
220 /*
221 * Wakeup triggered, racing with us. It was prevented from
222 * completing because of that, queue up the tw to do that.
223 */
224 req->io_task_work.func = io_waitid_cb;
225 io_req_task_work_add(req);
226 remove_wait_queue(iw->head, &iwa->wo.child_wait);
227 return true;
228}
229
230static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
231{
232 struct io_waitid_async *iwa = req->async_data;
233 struct io_ring_ctx *ctx = req->ctx;
234 int ret;
235
236 io_tw_lock(ctx, ts);
237
238 ret = __do_wait(&iwa->wo);
239
240 /*
241 * If we get -ERESTARTSYS here, we need to re-arm and check again
242 * to ensure we get another callback. If the retry works, then we can
243 * just remove ourselves from the waitqueue again and finish the
244 * request.
245 */
246 if (unlikely(ret == -ERESTARTSYS)) {
247 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
248
249 /* Don't retry if cancel found it meanwhile */
250 ret = -ECANCELED;
251 if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) {
252 iw->head = &current->signal->wait_chldexit;
253 add_wait_queue(iw->head, &iwa->wo.child_wait);
254 ret = __do_wait(&iwa->wo);
255 if (ret == -ERESTARTSYS) {
256 /* retry armed, drop our ref */
257 io_waitid_drop_issue_ref(req);
258 return;
259 }
260
261 remove_wait_queue(iw->head, &iwa->wo.child_wait);
262 }
263 }
264
265 io_waitid_complete(req, ret);
266}
267
268static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
269 int sync, void *key)
270{
271 struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait);
272 struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo);
273 struct io_kiocb *req = iwa->req;
274 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
275 struct task_struct *p = key;
276
277 if (!pid_child_should_wake(wo, p))
278 return 0;
279
280 /* cancel is in progress */
281 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
282 return 1;
283
284 req->io_task_work.func = io_waitid_cb;
285 io_req_task_work_add(req);
286 list_del_init(&wait->entry);
287 return 1;
288}
289
290int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
291{
292 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
293
294 if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
295 return -EINVAL;
296
297 iw->which = READ_ONCE(sqe->len);
298 iw->upid = READ_ONCE(sqe->fd);
299 iw->options = READ_ONCE(sqe->file_index);
300 iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
301 return 0;
302}
303
304int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
305{
306 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
307 struct io_ring_ctx *ctx = req->ctx;
308 struct io_waitid_async *iwa;
309 int ret;
310
311 if (io_alloc_async_data(req))
312 return -ENOMEM;
313
314 iwa = req->async_data;
315 iwa->req = req;
316
317 ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
318 iw->options, NULL);
319 if (ret)
320 goto done;
321
322 /*
323 * Mark the request as busy upfront, in case we're racing with the
324 * wakeup. If we are, then we'll notice when we drop this initial
325 * reference again after arming.
326 */
327 atomic_set(&iw->refs, 1);
328
329 /*
330 * Cancel must hold the ctx lock, so there's no risk of cancelation
331 * finding us until a) we remain on the list, and b) the lock is
332 * dropped. We only need to worry about racing with the wakeup
333 * callback.
334 */
335 io_ring_submit_lock(ctx, issue_flags);
336 hlist_add_head(&req->hash_node, &ctx->waitid_list);
337
338 init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
339 iwa->wo.child_wait.private = req->task;
340 iw->head = &current->signal->wait_chldexit;
341 add_wait_queue(iw->head, &iwa->wo.child_wait);
342
343 ret = __do_wait(&iwa->wo);
344 if (ret == -ERESTARTSYS) {
345 /*
346 * Nobody else grabbed a reference, it'll complete when we get
347 * a waitqueue callback, or if someone cancels it.
348 */
349 if (!io_waitid_drop_issue_ref(req)) {
350 io_ring_submit_unlock(ctx, issue_flags);
351 return IOU_ISSUE_SKIP_COMPLETE;
352 }
353
354 /*
355 * Wakeup triggered, racing with us. It was prevented from
356 * completing because of that, queue up the tw to do that.
357 */
358 io_ring_submit_unlock(ctx, issue_flags);
359 return IOU_ISSUE_SKIP_COMPLETE;
360 }
361
362 hlist_del_init(&req->hash_node);
363 remove_wait_queue(iw->head, &iwa->wo.child_wait);
364 ret = io_waitid_finish(req, ret);
365
366 io_ring_submit_unlock(ctx, issue_flags);
367done:
368 if (ret < 0)
369 req_set_fail(req);
370 io_req_set_res(req, ret, 0);
371 return IOU_OK;
372}