]> git.ipfire.org Git - thirdparty/qemu.git/blob - include/block/aio.h
89bbc536f959e0bd5379be7dac9f0344bd3386f5
[thirdparty/qemu.git] / include / block / aio.h
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/coroutine-core.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/thread.h"
24 #include "qemu/timer.h"
25 #include "block/graph-lock.h"
26 #include "hw/qdev-core.h"
27
28
29 typedef struct BlockAIOCB BlockAIOCB;
30 typedef void BlockCompletionFunc(void *opaque, int ret);
31
32 typedef struct AIOCBInfo {
33 void (*cancel_async)(BlockAIOCB *acb);
34 AioContext *(*get_aio_context)(BlockAIOCB *acb);
35 size_t aiocb_size;
36 } AIOCBInfo;
37
38 struct BlockAIOCB {
39 const AIOCBInfo *aiocb_info;
40 BlockDriverState *bs;
41 BlockCompletionFunc *cb;
42 void *opaque;
43 int refcnt;
44 };
45
46 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
47 BlockCompletionFunc *cb, void *opaque);
48 void qemu_aio_unref(void *p);
49 void qemu_aio_ref(void *p);
50
51 typedef struct AioHandler AioHandler;
52 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
53 typedef void QEMUBHFunc(void *opaque);
54 typedef bool AioPollFn(void *opaque);
55 typedef void IOHandler(void *opaque);
56
57 struct ThreadPool;
58 struct LinuxAioState;
59 struct LuringState;
60
61 /* Is polling disabled? */
62 bool aio_poll_disabled(AioContext *ctx);
63
64 /* Callbacks for file descriptor monitoring implementations */
65 typedef struct {
66 /*
67 * update:
68 * @ctx: the AioContext
69 * @old_node: the existing handler or NULL if this file descriptor is being
70 * monitored for the first time
71 * @new_node: the new handler or NULL if this file descriptor is being
72 * removed
73 *
74 * Add/remove/modify a monitored file descriptor.
75 *
76 * Called with ctx->list_lock acquired.
77 */
78 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
79
80 /*
81 * wait:
82 * @ctx: the AioContext
83 * @ready_list: list for handlers that become ready
84 * @timeout: maximum duration to wait, in nanoseconds
85 *
86 * Wait for file descriptors to become ready and place them on ready_list.
87 *
88 * Called with ctx->list_lock incremented but not locked.
89 *
90 * Returns: number of ready file descriptors.
91 */
92 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
93
94 /*
95 * need_wait:
96 * @ctx: the AioContext
97 *
98 * Tell aio_poll() when to stop userspace polling early because ->wait()
99 * has fds ready.
100 *
101 * File descriptor monitoring implementations that cannot poll fd readiness
102 * from userspace should use aio_poll_disabled() here. This ensures that
103 * file descriptors are not starved by handlers that frequently make
104 * progress via userspace polling.
105 *
106 * Returns: true if ->wait() should be called, false otherwise.
107 */
108 bool (*need_wait)(AioContext *ctx);
109 } FDMonOps;
110
111 /*
112 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
113 * scheduled BHs are not processed until the next aio_bh_poll() call. All
114 * active aio_bh_poll() calls chain their slices together in a list, so that
115 * nested aio_bh_poll() calls process all scheduled bottom halves.
116 */
117 typedef QSLIST_HEAD(, QEMUBH) BHList;
118 typedef struct BHListSlice BHListSlice;
119 struct BHListSlice {
120 BHList bh_list;
121 QSIMPLEQ_ENTRY(BHListSlice) next;
122 };
123
124 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
125
126 struct AioContext {
127 GSource source;
128
129 /* Used by AioContext users to protect from multi-threaded access. */
130 QemuRecMutex lock;
131
132 /*
133 * Keep track of readers and writers of the block layer graph.
134 * This is essential to avoid performing additions and removal
135 * of nodes and edges from block graph while some
136 * other thread is traversing it.
137 */
138 BdrvGraphRWlock *bdrv_graph;
139
140 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
141 AioHandlerList aio_handlers;
142
143 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
144 AioHandlerList deleted_aio_handlers;
145
146 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
147 * only written from the AioContext home thread, or under the BQL in
148 * the case of the main AioContext. However, it is read from any
149 * thread so it is still accessed with atomic primitives.
150 *
151 * If this field is 0, everything (file descriptors, bottom halves,
152 * timers) will be re-evaluated before the next blocking poll() or
153 * io_uring wait; therefore, the event_notifier_set call can be
154 * skipped. If it is non-zero, you may need to wake up a concurrent
155 * aio_poll or the glib main event loop, making event_notifier_set
156 * necessary.
157 *
158 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
159 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
160 * Bits 1-31 simply count the number of active calls to aio_poll
161 * that are in the prepare or poll phase.
162 *
163 * The GSource and aio_poll must use a different mechanism because
164 * there is no certainty that a call to GSource's prepare callback
165 * (via g_main_context_prepare) is indeed followed by check and
166 * dispatch. It's not clear whether this would be a bug, but let's
167 * play safe and allow it---it will just cause extra calls to
168 * event_notifier_set until the next call to dispatch.
169 *
170 * Instead, the aio_poll calls include both the prepare and the
171 * dispatch phase, hence a simple counter is enough for them.
172 */
173 uint32_t notify_me;
174
175 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
176 * and to ensure that no callbacks are removed while we're walking and
177 * dispatching them.
178 */
179 QemuLockCnt list_lock;
180
181 /* Bottom Halves pending aio_bh_poll() processing */
182 BHList bh_list;
183
184 /* Chained BH list slices for each nested aio_bh_poll() call */
185 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
186
187 /* Used by aio_notify.
188 *
189 * "notified" is used to avoid expensive event_notifier_test_and_clear
190 * calls. When it is clear, the EventNotifier is clear, or one thread
191 * is going to clear "notified" before processing more events. False
192 * positives are possible, i.e. "notified" could be set even though the
193 * EventNotifier is clear.
194 *
195 * Note that event_notifier_set *cannot* be optimized the same way. For
196 * more information on the problem that would result, see "#ifdef BUG2"
197 * in the docs/aio_notify_accept.promela formal model.
198 */
199 bool notified;
200 EventNotifier notifier;
201
202 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
203 QEMUBH *co_schedule_bh;
204
205 int thread_pool_min;
206 int thread_pool_max;
207 /* Thread pool for performing work and receiving completion callbacks.
208 * Has its own locking.
209 */
210 struct ThreadPool *thread_pool;
211
212 #ifdef CONFIG_LINUX_AIO
213 struct LinuxAioState *linux_aio;
214 #endif
215 #ifdef CONFIG_LINUX_IO_URING
216 struct LuringState *linux_io_uring;
217
218 /* State for file descriptor monitoring using Linux io_uring */
219 struct io_uring fdmon_io_uring;
220 AioHandlerSList submit_list;
221 #endif
222
223 /* TimerLists for calling timers - one per clock type. Has its own
224 * locking.
225 */
226 QEMUTimerListGroup tlg;
227
228 int external_disable_cnt;
229
230 /* Number of AioHandlers without .io_poll() */
231 int poll_disable_cnt;
232
233 /* Polling mode parameters */
234 int64_t poll_ns; /* current polling time in nanoseconds */
235 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
236 int64_t poll_grow; /* polling time growth factor */
237 int64_t poll_shrink; /* polling time shrink factor */
238
239 /* AIO engine parameters */
240 int64_t aio_max_batch; /* maximum number of requests in a batch */
241
242 /*
243 * List of handlers participating in userspace polling. Protected by
244 * ctx->list_lock. Iterated and modified mostly by the event loop thread
245 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
246 * only touches the list to delete nodes if ctx->list_lock's count is zero.
247 */
248 AioHandlerList poll_aio_handlers;
249
250 /* Are we in polling mode or monitoring file descriptors? */
251 bool poll_started;
252
253 /* epoll(7) state used when built with CONFIG_EPOLL */
254 int epollfd;
255
256 const FDMonOps *fdmon_ops;
257 };
258
259 /**
260 * aio_context_new: Allocate a new AioContext.
261 *
262 * AioContext provide a mini event-loop that can be waited on synchronously.
263 * They also provide bottom halves, a service to execute a piece of code
264 * as soon as possible.
265 */
266 AioContext *aio_context_new(Error **errp);
267
268 /**
269 * aio_context_ref:
270 * @ctx: The AioContext to operate on.
271 *
272 * Add a reference to an AioContext.
273 */
274 void aio_context_ref(AioContext *ctx);
275
276 /**
277 * aio_context_unref:
278 * @ctx: The AioContext to operate on.
279 *
280 * Drop a reference to an AioContext.
281 */
282 void aio_context_unref(AioContext *ctx);
283
284 /* Take ownership of the AioContext. If the AioContext will be shared between
285 * threads, and a thread does not want to be interrupted, it will have to
286 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
287 * automatically takes care of calling aio_context_acquire and
288 * aio_context_release.
289 *
290 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
291 * thread still has to call those to avoid being interrupted by the guest.
292 *
293 * Bottom halves, timers and callbacks can be created or removed without
294 * acquiring the AioContext.
295 */
296 void aio_context_acquire(AioContext *ctx);
297
298 /* Relinquish ownership of the AioContext. */
299 void aio_context_release(AioContext *ctx);
300
301 /**
302 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
303 * run only once and as soon as possible.
304 *
305 * @name: A human-readable identifier for debugging purposes.
306 */
307 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
308 const char *name);
309
310 /**
311 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
312 * only once and as soon as possible.
313 *
314 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
315 * name string.
316 */
317 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
318 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
319
320 /**
321 * aio_bh_new_full: Allocate a new bottom half structure.
322 *
323 * Bottom halves are lightweight callbacks whose invocation is guaranteed
324 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
325 * is opaque and must be allocated prior to its use.
326 *
327 * @name: A human-readable identifier for debugging purposes.
328 * @reentrancy_guard: A guard set when entering a cb to prevent
329 * device-reentrancy issues
330 */
331 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
332 const char *name, MemReentrancyGuard *reentrancy_guard);
333
334 /**
335 * aio_bh_new: Allocate a new bottom half structure
336 *
337 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
338 * string.
339 */
340 #define aio_bh_new(ctx, cb, opaque) \
341 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
342
343 /**
344 * aio_bh_new_guarded: Allocate a new bottom half structure with a
345 * reentrancy_guard
346 *
347 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
348 * string.
349 */
350 #define aio_bh_new_guarded(ctx, cb, opaque, guard) \
351 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
352
353 /**
354 * aio_notify: Force processing of pending events.
355 *
356 * Similar to signaling a condition variable, aio_notify forces
357 * aio_poll to exit, so that the next call will re-examine pending events.
358 * The caller of aio_notify will usually call aio_poll again very soon,
359 * or go through another iteration of the GLib main loop. Hence, aio_notify
360 * also has the side effect of recalculating the sets of file descriptors
361 * that the main loop waits for.
362 *
363 * Calling aio_notify is rarely necessary, because for example scheduling
364 * a bottom half calls it already.
365 */
366 void aio_notify(AioContext *ctx);
367
368 /**
369 * aio_notify_accept: Acknowledge receiving an aio_notify.
370 *
371 * aio_notify() uses an EventNotifier in order to wake up a sleeping
372 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
373 * usually rare, but the AioContext has to clear the EventNotifier on
374 * every aio_poll() or g_main_context_iteration() in order to avoid
375 * busy waiting. This event_notifier_test_and_clear() cannot be done
376 * using the usual aio_context_set_event_notifier(), because it must
377 * be done before processing all events (file descriptors, bottom halves,
378 * timers).
379 *
380 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
381 * that is specific to an AioContext's notifier; it is used internally
382 * to clear the EventNotifier only if aio_notify() had been called.
383 */
384 void aio_notify_accept(AioContext *ctx);
385
386 /**
387 * aio_bh_call: Executes callback function of the specified BH.
388 */
389 void aio_bh_call(QEMUBH *bh);
390
391 /**
392 * aio_bh_poll: Poll bottom halves for an AioContext.
393 *
394 * These are internal functions used by the QEMU main loop.
395 * And notice that multiple occurrences of aio_bh_poll cannot
396 * be called concurrently
397 */
398 int aio_bh_poll(AioContext *ctx);
399
400 /**
401 * qemu_bh_schedule: Schedule a bottom half.
402 *
403 * Scheduling a bottom half interrupts the main loop and causes the
404 * execution of the callback that was passed to qemu_bh_new.
405 *
406 * Bottom halves that are scheduled from a bottom half handler are instantly
407 * invoked. This can create an infinite loop if a bottom half handler
408 * schedules itself.
409 *
410 * @bh: The bottom half to be scheduled.
411 */
412 void qemu_bh_schedule(QEMUBH *bh);
413
414 /**
415 * qemu_bh_cancel: Cancel execution of a bottom half.
416 *
417 * Canceling execution of a bottom half undoes the effect of calls to
418 * qemu_bh_schedule without freeing its resources yet. While cancellation
419 * itself is also wait-free and thread-safe, it can of course race with the
420 * loop that executes bottom halves unless you are holding the iothread
421 * mutex. This makes it mostly useless if you are not holding the mutex.
422 *
423 * @bh: The bottom half to be canceled.
424 */
425 void qemu_bh_cancel(QEMUBH *bh);
426
427 /**
428 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
429 *
430 * Deleting a bottom half frees the memory that was allocated for it by
431 * qemu_bh_new. It also implies canceling the bottom half if it was
432 * scheduled.
433 * This func is async. The bottom half will do the delete action at the finial
434 * end.
435 *
436 * @bh: The bottom half to be deleted.
437 */
438 void qemu_bh_delete(QEMUBH *bh);
439
440 /* Return whether there are any pending callbacks from the GSource
441 * attached to the AioContext, before g_poll is invoked.
442 *
443 * This is used internally in the implementation of the GSource.
444 */
445 bool aio_prepare(AioContext *ctx);
446
447 /* Return whether there are any pending callbacks from the GSource
448 * attached to the AioContext, after g_poll is invoked.
449 *
450 * This is used internally in the implementation of the GSource.
451 */
452 bool aio_pending(AioContext *ctx);
453
454 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
455 *
456 * This is used internally in the implementation of the GSource.
457 */
458 void aio_dispatch(AioContext *ctx);
459
460 /* Progress in completing AIO work to occur. This can issue new pending
461 * aio as a result of executing I/O completion or bh callbacks.
462 *
463 * Return whether any progress was made by executing AIO or bottom half
464 * handlers. If @blocking == true, this should always be true except
465 * if someone called aio_notify.
466 *
467 * If there are no pending bottom halves, but there are pending AIO
468 * operations, it may not be possible to make any progress without
469 * blocking. If @blocking is true, this function will wait until one
470 * or more AIO events have completed, to ensure something has moved
471 * before returning.
472 */
473 bool aio_poll(AioContext *ctx, bool blocking);
474
475 /* Register a file descriptor and associated callbacks. Behaves very similarly
476 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
477 * be invoked when using aio_poll().
478 *
479 * Code that invokes AIO completion functions should rely on this function
480 * instead of qemu_set_fd_handler[2].
481 */
482 void aio_set_fd_handler(AioContext *ctx,
483 int fd,
484 bool is_external,
485 IOHandler *io_read,
486 IOHandler *io_write,
487 AioPollFn *io_poll,
488 IOHandler *io_poll_ready,
489 void *opaque);
490
491 /* Register an event notifier and associated callbacks. Behaves very similarly
492 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
493 * will be invoked when using aio_poll().
494 *
495 * Code that invokes AIO completion functions should rely on this function
496 * instead of event_notifier_set_handler.
497 */
498 void aio_set_event_notifier(AioContext *ctx,
499 EventNotifier *notifier,
500 bool is_external,
501 EventNotifierHandler *io_read,
502 AioPollFn *io_poll,
503 EventNotifierHandler *io_poll_ready);
504
505 /* Set polling begin/end callbacks for an event notifier that has already been
506 * registered with aio_set_event_notifier. Do nothing if the event notifier is
507 * not registered.
508 */
509 void aio_set_event_notifier_poll(AioContext *ctx,
510 EventNotifier *notifier,
511 EventNotifierHandler *io_poll_begin,
512 EventNotifierHandler *io_poll_end);
513
514 /* Return a GSource that lets the main loop poll the file descriptors attached
515 * to this AioContext.
516 */
517 GSource *aio_get_g_source(AioContext *ctx);
518
519 /* Return the ThreadPool bound to this AioContext */
520 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
521
522 /* Setup the LinuxAioState bound to this AioContext */
523 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
524
525 /* Return the LinuxAioState bound to this AioContext */
526 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
527
528 /* Setup the LuringState bound to this AioContext */
529 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
530
531 /* Return the LuringState bound to this AioContext */
532 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
533 /**
534 * aio_timer_new_with_attrs:
535 * @ctx: the aio context
536 * @type: the clock type
537 * @scale: the scale
538 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
539 * to assign
540 * @cb: the callback to call on timer expiry
541 * @opaque: the opaque pointer to pass to the callback
542 *
543 * Allocate a new timer (with attributes) attached to the context @ctx.
544 * The function is responsible for memory allocation.
545 *
546 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
547 * Use that unless you really need dynamic memory allocation.
548 *
549 * Returns: a pointer to the new timer
550 */
551 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
552 QEMUClockType type,
553 int scale, int attributes,
554 QEMUTimerCB *cb, void *opaque)
555 {
556 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
557 }
558
559 /**
560 * aio_timer_new:
561 * @ctx: the aio context
562 * @type: the clock type
563 * @scale: the scale
564 * @cb: the callback to call on timer expiry
565 * @opaque: the opaque pointer to pass to the callback
566 *
567 * Allocate a new timer attached to the context @ctx.
568 * See aio_timer_new_with_attrs for details.
569 *
570 * Returns: a pointer to the new timer
571 */
572 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
573 int scale,
574 QEMUTimerCB *cb, void *opaque)
575 {
576 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
577 }
578
579 /**
580 * aio_timer_init_with_attrs:
581 * @ctx: the aio context
582 * @ts: the timer
583 * @type: the clock type
584 * @scale: the scale
585 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
586 * to assign
587 * @cb: the callback to call on timer expiry
588 * @opaque: the opaque pointer to pass to the callback
589 *
590 * Initialise a new timer (with attributes) attached to the context @ctx.
591 * The caller is responsible for memory allocation.
592 */
593 static inline void aio_timer_init_with_attrs(AioContext *ctx,
594 QEMUTimer *ts, QEMUClockType type,
595 int scale, int attributes,
596 QEMUTimerCB *cb, void *opaque)
597 {
598 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
599 }
600
601 /**
602 * aio_timer_init:
603 * @ctx: the aio context
604 * @ts: the timer
605 * @type: the clock type
606 * @scale: the scale
607 * @cb: the callback to call on timer expiry
608 * @opaque: the opaque pointer to pass to the callback
609 *
610 * Initialise a new timer attached to the context @ctx.
611 * See aio_timer_init_with_attrs for details.
612 */
613 static inline void aio_timer_init(AioContext *ctx,
614 QEMUTimer *ts, QEMUClockType type,
615 int scale,
616 QEMUTimerCB *cb, void *opaque)
617 {
618 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
619 }
620
621 /**
622 * aio_compute_timeout:
623 * @ctx: the aio context
624 *
625 * Compute the timeout that a blocking aio_poll should use.
626 */
627 int64_t aio_compute_timeout(AioContext *ctx);
628
629 /**
630 * aio_disable_external:
631 * @ctx: the aio context
632 *
633 * Disable the further processing of external clients.
634 */
635 static inline void aio_disable_external(AioContext *ctx)
636 {
637 qatomic_inc(&ctx->external_disable_cnt);
638 }
639
640 /**
641 * aio_enable_external:
642 * @ctx: the aio context
643 *
644 * Enable the processing of external clients.
645 */
646 static inline void aio_enable_external(AioContext *ctx)
647 {
648 int old;
649
650 old = qatomic_fetch_dec(&ctx->external_disable_cnt);
651 assert(old > 0);
652 if (old == 1) {
653 /* Kick event loop so it re-arms file descriptors */
654 aio_notify(ctx);
655 }
656 }
657
658 /**
659 * aio_external_disabled:
660 * @ctx: the aio context
661 *
662 * Return true if the external clients are disabled.
663 */
664 static inline bool aio_external_disabled(AioContext *ctx)
665 {
666 return qatomic_read(&ctx->external_disable_cnt);
667 }
668
669 /**
670 * aio_node_check:
671 * @ctx: the aio context
672 * @is_external: Whether or not the checked node is an external event source.
673 *
674 * Check if the node's is_external flag is okay to be polled by the ctx at this
675 * moment. True means green light.
676 */
677 static inline bool aio_node_check(AioContext *ctx, bool is_external)
678 {
679 return !is_external || !qatomic_read(&ctx->external_disable_cnt);
680 }
681
682 /**
683 * aio_co_schedule:
684 * @ctx: the aio context
685 * @co: the coroutine
686 *
687 * Start a coroutine on a remote AioContext.
688 *
689 * The coroutine must not be entered by anyone else while aio_co_schedule()
690 * is active. In addition the coroutine must have yielded unless ctx
691 * is the context in which the coroutine is running (i.e. the value of
692 * qemu_get_current_aio_context() from the coroutine itself).
693 */
694 void aio_co_schedule(AioContext *ctx, Coroutine *co);
695
696 /**
697 * aio_co_reschedule_self:
698 * @new_ctx: the new context
699 *
700 * Move the currently running coroutine to new_ctx. If the coroutine is already
701 * running in new_ctx, do nothing.
702 */
703 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
704
705 /**
706 * aio_co_wake:
707 * @co: the coroutine
708 *
709 * Restart a coroutine on the AioContext where it was running last, thus
710 * preventing coroutines from jumping from one context to another when they
711 * go to sleep.
712 *
713 * aio_co_wake may be executed either in coroutine or non-coroutine
714 * context. The coroutine must not be entered by anyone else while
715 * aio_co_wake() is active.
716 */
717 void aio_co_wake(Coroutine *co);
718
719 /**
720 * aio_co_enter:
721 * @ctx: the context to run the coroutine
722 * @co: the coroutine to run
723 *
724 * Enter a coroutine in the specified AioContext.
725 */
726 void aio_co_enter(AioContext *ctx, Coroutine *co);
727
728 /**
729 * Return the AioContext whose event loop runs in the current thread.
730 *
731 * If called from an IOThread this will be the IOThread's AioContext. If
732 * called from the main thread or with the "big QEMU lock" taken it
733 * will be the main loop AioContext.
734 */
735 AioContext *qemu_get_current_aio_context(void);
736
737 void qemu_set_current_aio_context(AioContext *ctx);
738
739 /**
740 * aio_context_setup:
741 * @ctx: the aio context
742 *
743 * Initialize the aio context.
744 */
745 void aio_context_setup(AioContext *ctx);
746
747 /**
748 * aio_context_destroy:
749 * @ctx: the aio context
750 *
751 * Destroy the aio context.
752 */
753 void aio_context_destroy(AioContext *ctx);
754
755 /* Used internally, do not call outside AioContext code */
756 void aio_context_use_g_source(AioContext *ctx);
757
758 /**
759 * aio_context_set_poll_params:
760 * @ctx: the aio context
761 * @max_ns: how long to busy poll for, in nanoseconds
762 * @grow: polling time growth factor
763 * @shrink: polling time shrink factor
764 *
765 * Poll mode can be disabled by setting poll_max_ns to 0.
766 */
767 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
768 int64_t grow, int64_t shrink,
769 Error **errp);
770
771 /**
772 * aio_context_set_aio_params:
773 * @ctx: the aio context
774 * @max_batch: maximum number of requests in a batch, 0 means that the
775 * engine will use its default
776 */
777 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
778 Error **errp);
779
780 /**
781 * aio_context_set_thread_pool_params:
782 * @ctx: the aio context
783 * @min: min number of threads to have readily available in the thread pool
784 * @min: max number of threads the thread pool can contain
785 */
786 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
787 int64_t max, Error **errp);
788 #endif