]> git.ipfire.org Git - thirdparty/linux.git/blame - io_uring/io_uring.h
eventfd: provide a eventfd_signal_mask() helper
[thirdparty/linux.git] / io_uring / io_uring.h
CommitLineData
de23077e
JA
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
cd40cae2 5#include <linux/lockdep.h>
ab1c84d8
PB
6#include <linux/io_uring_types.h>
7#include "io-wq.h"
a6b21fbb 8#include "slist.h"
ab1c84d8 9#include "filetable.h"
de23077e 10
f3b44f92
JA
11#ifndef CREATE_TRACE_POINTS
12#include <trace/events/io_uring.h>
13#endif
14
97b388d7
JA
15enum {
16 IOU_OK = 0,
17 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
114eccdf
DY
18
19 /*
91482864
PB
20 * Intended only when both IO_URING_F_MULTISHOT is passed
21 * to indicate to the poll runner that multishot should be
114eccdf
DY
22 * removed and the result is set on req->cqe.res.
23 */
24 IOU_STOP_MULTISHOT = -ECANCELED,
97b388d7
JA
25};
26
aa1df3a3 27struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
68494a65 28bool io_req_cqe_overflow(struct io_kiocb *req);
c0e0d6ba 29int io_run_task_work_sig(struct io_ring_ctx *ctx);
b3026767 30int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
c0e0d6ba 31int io_run_local_work(struct io_ring_ctx *ctx);
9046c641
PB
32void io_req_complete_failed(struct io_kiocb *req, s32 res);
33void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
34void io_req_complete_post(struct io_kiocb *req);
52120f0f
DY
35bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
36 bool allow_overflow);
eb42cebb
PB
37bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
38 bool allow_overflow);
9046c641
PB
39void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
40
41struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
42
43struct file *io_file_get_normal(struct io_kiocb *req, int fd);
44struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
45 unsigned issue_flags);
46
f6b543fd
JA
47static inline bool io_req_ffs_set(struct io_kiocb *req)
48{
49 return req->flags & REQ_F_FIXED_FILE;
50}
51
e52d2e58 52void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
9046c641
PB
53bool io_is_uring_fops(struct file *file);
54bool io_alloc_async_data(struct io_kiocb *req);
9046c641
PB
55void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
56void io_req_task_queue(struct io_kiocb *req);
57void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
58void io_req_task_complete(struct io_kiocb *req, bool *locked);
59void io_req_task_queue_fail(struct io_kiocb *req, int ret);
60void io_req_task_submit(struct io_kiocb *req, bool *locked);
61void tctx_task_work(struct callback_head *cb);
62__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
63int io_uring_alloc_task_context(struct task_struct *task,
64 struct io_ring_ctx *ctx);
65
66int io_poll_issue(struct io_kiocb *req, bool *locked);
67int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
68int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
69void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
70int io_req_prep_async(struct io_kiocb *req);
71
72struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
73void io_wq_submit_work(struct io_wq_work *work);
74
75void io_free_req(struct io_kiocb *req);
76void io_queue_next(struct io_kiocb *req);
e70cb608 77void __io_put_task(struct task_struct *task, int nr);
63809137 78void io_task_refs_refill(struct io_uring_task *tctx);
bd1a3783 79bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
9046c641
PB
80
81bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
82 bool cancel_all);
83
e52d2e58
PB
84static inline void io_req_task_work_add(struct io_kiocb *req)
85{
86 __io_req_task_work_add(req, true);
87}
88
9046c641
PB
89#define io_for_each_link(pos, head) \
90 for (pos = (head); pos; pos = pos->link)
f3b44f92 91
25399321
PB
92static inline void io_cq_lock(struct io_ring_ctx *ctx)
93 __acquires(ctx->completion_lock)
94{
95 spin_lock(&ctx->completion_lock);
96}
97
98void io_cq_unlock_post(struct io_ring_ctx *ctx);
99
aa1df3a3
PB
100static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
101 bool overflow)
f3b44f92
JA
102{
103 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
104 struct io_uring_cqe *cqe = ctx->cqe_cached;
105
f3b44f92
JA
106 ctx->cached_cq_tail++;
107 ctx->cqe_cached++;
b3659a65
PB
108 if (ctx->flags & IORING_SETUP_CQE32)
109 ctx->cqe_cached++;
f3b44f92
JA
110 return cqe;
111 }
112
aa1df3a3
PB
113 return __io_get_cqe(ctx, overflow);
114}
115
116static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
117{
118 return io_get_cqe_overflow(ctx, false);
f3b44f92
JA
119}
120
121static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
122 struct io_kiocb *req)
123{
124 struct io_uring_cqe *cqe;
125
e8c328c3
PB
126 /*
127 * If we can't get a cq entry, userspace overflowed the
128 * submission (by quite a lot). Increment the overflow count in
129 * the ring.
130 */
131 cqe = io_get_cqe(ctx);
132 if (unlikely(!cqe))
133 return io_req_cqe_overflow(req);
e0486f3f
DY
134
135 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
136 req->cqe.res, req->cqe.flags,
137 (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
138 (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
139
e8c328c3
PB
140 memcpy(cqe, &req->cqe, sizeof(*cqe));
141
142 if (ctx->flags & IORING_SETUP_CQE32) {
f3b44f92
JA
143 u64 extra1 = 0, extra2 = 0;
144
145 if (req->flags & REQ_F_CQE32_INIT) {
146 extra1 = req->extra1;
147 extra2 = req->extra2;
148 }
149
e8c328c3
PB
150 WRITE_ONCE(cqe->big_cqe[0], extra1);
151 WRITE_ONCE(cqe->big_cqe[1], extra2);
f3b44f92 152 }
e8c328c3 153 return true;
f3b44f92
JA
154}
155
531113bb
JA
156static inline void req_set_fail(struct io_kiocb *req)
157{
158 req->flags |= REQ_F_FAIL;
159 if (req->flags & REQ_F_CQE_SKIP) {
160 req->flags &= ~REQ_F_CQE_SKIP;
161 req->flags |= REQ_F_SKIP_LINK_CQES;
162 }
163}
164
de23077e
JA
165static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
166{
167 req->cqe.res = res;
168 req->cqe.flags = cflags;
169}
170
99f15d8d
JA
171static inline bool req_has_async_data(struct io_kiocb *req)
172{
173 return req->flags & REQ_F_ASYNC_DATA;
174}
175
531113bb
JA
176static inline void io_put_file(struct file *file)
177{
178 if (file)
179 fput(file);
180}
181
cd40cae2
JA
182static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
183 unsigned issue_flags)
184{
185 lockdep_assert_held(&ctx->uring_lock);
186 if (issue_flags & IO_URING_F_UNLOCKED)
187 mutex_unlock(&ctx->uring_lock);
188}
189
190static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
191 unsigned issue_flags)
192{
193 /*
194 * "Normal" inline submissions always hold the uring_lock, since we
195 * grab it from the system call. Same is true for the SQPOLL offload.
196 * The only exception is when we've detached the request and issue it
197 * from an async worker thread, grab the lock for that case.
198 */
199 if (issue_flags & IO_URING_F_UNLOCKED)
200 mutex_lock(&ctx->uring_lock);
201 lockdep_assert_held(&ctx->uring_lock);
202}
203
f9ead18c
JA
204static inline void io_commit_cqring(struct io_ring_ctx *ctx)
205{
206 /* order cqe stores with ring update */
207 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
208}
209
fc86f9d3
PB
210/* requires smb_mb() prior, see wq_has_sleeper() */
211static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
f3b44f92
JA
212{
213 /*
214 * wake_up_all() may seem excessive, but io_wake_function() and
215 * io_should_wake() handle the termination of the loop and only
216 * wake as many waiters as we need to.
217 */
fc86f9d3 218 if (waitqueue_active(&ctx->cq_wait))
f3b44f92
JA
219 wake_up_all(&ctx->cq_wait);
220}
221
fc86f9d3
PB
222static inline void io_cqring_wake(struct io_ring_ctx *ctx)
223{
224 smp_mb();
225 __io_cqring_wake(ctx);
226}
227
17437f31
JA
228static inline bool io_sqring_full(struct io_ring_ctx *ctx)
229{
230 struct io_rings *r = ctx->rings;
231
232 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
233}
234
235static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
236{
237 struct io_rings *rings = ctx->rings;
238
239 /* make sure SQ entry isn't read before tail */
240 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
241}
242
c0e0d6ba 243static inline int io_run_task_work(void)
17437f31 244{
46a525e1
JA
245 if (task_work_pending(current)) {
246 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
247 clear_notify_signal();
17437f31 248 __set_current_state(TASK_RUNNING);
46a525e1 249 task_work_run();
c0e0d6ba 250 return 1;
17437f31
JA
251 }
252
c0e0d6ba
DY
253 return 0;
254}
255
dac6a0ea
JA
256static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
257{
258 return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
259 !wq_list_empty(&ctx->work_llist);
260}
261
c0e0d6ba
DY
262static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
263{
264 int ret = 0;
265 int ret2;
266
267 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
268 ret = io_run_local_work(ctx);
269
270 /* want to run this after in case more is added */
271 ret2 = io_run_task_work();
272
273 /* Try propagate error in favour of if tasks were run,
274 * but still make sure to run them if requested
275 */
276 if (ret >= 0)
277 ret += ret2;
278
279 return ret;
17437f31
JA
280}
281
44f87745
PB
282static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
283{
b3026767
DY
284 bool locked;
285 int ret;
286
44f87745
PB
287 if (llist_empty(&ctx->work_llist))
288 return 0;
b3026767
DY
289
290 locked = true;
291 ret = __io_run_local_work(ctx, &locked);
292 /* shouldn't happen! */
293 if (WARN_ON_ONCE(!locked))
294 mutex_lock(&ctx->uring_lock);
295 return ret;
44f87745
PB
296}
297
aa1e90f6
PB
298static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
299{
300 if (!*locked) {
301 mutex_lock(&ctx->uring_lock);
302 *locked = true;
303 }
304}
305
9da070b1
PB
306/*
307 * Don't complete immediately but use deferred completion infrastructure.
308 * Protected by ->uring_lock and can only be used either with
309 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
310 */
311static inline void io_req_complete_defer(struct io_kiocb *req)
312 __must_hold(&req->ctx->uring_lock)
aa1e90f6
PB
313{
314 struct io_submit_state *state = &req->ctx->submit_state;
315
9da070b1
PB
316 lockdep_assert_held(&req->ctx->uring_lock);
317
aa1e90f6
PB
318 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
319}
320
46929b08
PB
321static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
322{
323 if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
324 __io_commit_cqring_flush(ctx);
325}
326
e70cb608
PB
327/* must to be called somewhat shortly after putting a request */
328static inline void io_put_task(struct task_struct *task, int nr)
329{
330 if (likely(task == current))
331 task->io_uring->cached_refs += nr;
332 else
333 __io_put_task(task, nr);
334}
335
63809137
PB
336static inline void io_get_task_refs(int nr)
337{
338 struct io_uring_task *tctx = current->io_uring;
339
340 tctx->cached_refs -= nr;
341 if (unlikely(tctx->cached_refs < 0))
342 io_task_refs_refill(tctx);
343}
344
bd1a3783
PB
345static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
346{
347 return !ctx->submit_state.free_list.next;
348}
349
350static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
351{
352 if (unlikely(io_req_cache_empty(ctx)))
353 return __io_alloc_req_refill(ctx);
354 return true;
355}
356
357static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
358{
359 struct io_wq_work_node *node;
360
361 node = wq_stack_extract(&ctx->submit_state.free_list);
362 return container_of(node, struct io_kiocb, comp_list);
363}
364
76de6749
PB
365static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
366{
6567506b
PB
367 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
368 ctx->submitter_task == current);
76de6749
PB
369}
370
de23077e 371#endif