]> git.ipfire.org Git - thirdparty/qemu.git/blob - tests/test-aio.c
linux-user, mips: add syscall table generation support
[thirdparty/qemu.git] / tests / test-aio.c
1 /*
2 * AioContext tests
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "block/aio.h"
15 #include "qapi/error.h"
16 #include "qemu/timer.h"
17 #include "qemu/sockets.h"
18 #include "qemu/error-report.h"
19 #include "qemu/coroutine.h"
20 #include "qemu/main-loop.h"
21
22 static AioContext *ctx;
23
24 typedef struct {
25 EventNotifier e;
26 int n;
27 int active;
28 bool auto_set;
29 } EventNotifierTestData;
30
31 /* Wait until event notifier becomes inactive */
32 static void wait_until_inactive(EventNotifierTestData *data)
33 {
34 while (data->active > 0) {
35 aio_poll(ctx, true);
36 }
37 }
38
39 /* Simple callbacks for testing. */
40
41 typedef struct {
42 QEMUBH *bh;
43 int n;
44 int max;
45 } BHTestData;
46
47 typedef struct {
48 QEMUTimer timer;
49 QEMUClockType clock_type;
50 int n;
51 int max;
52 int64_t ns;
53 AioContext *ctx;
54 } TimerTestData;
55
56 static void bh_test_cb(void *opaque)
57 {
58 BHTestData *data = opaque;
59 if (++data->n < data->max) {
60 qemu_bh_schedule(data->bh);
61 }
62 }
63
64 static void timer_test_cb(void *opaque)
65 {
66 TimerTestData *data = opaque;
67 if (++data->n < data->max) {
68 timer_mod(&data->timer,
69 qemu_clock_get_ns(data->clock_type) + data->ns);
70 }
71 }
72
73 static void dummy_io_handler_read(EventNotifier *e)
74 {
75 }
76
77 static void bh_delete_cb(void *opaque)
78 {
79 BHTestData *data = opaque;
80 if (++data->n < data->max) {
81 qemu_bh_schedule(data->bh);
82 } else {
83 qemu_bh_delete(data->bh);
84 data->bh = NULL;
85 }
86 }
87
88 static void event_ready_cb(EventNotifier *e)
89 {
90 EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
91 g_assert(event_notifier_test_and_clear(e));
92 data->n++;
93 if (data->active > 0) {
94 data->active--;
95 }
96 if (data->auto_set && data->active) {
97 event_notifier_set(e);
98 }
99 }
100
101 /* Tests using aio_*. */
102
103 typedef struct {
104 QemuMutex start_lock;
105 EventNotifier notifier;
106 bool thread_acquired;
107 } AcquireTestData;
108
109 static void *test_acquire_thread(void *opaque)
110 {
111 AcquireTestData *data = opaque;
112
113 /* Wait for other thread to let us start */
114 qemu_mutex_lock(&data->start_lock);
115 qemu_mutex_unlock(&data->start_lock);
116
117 /* event_notifier_set might be called either before or after
118 * the main thread's call to poll(). The test case's outcome
119 * should be the same in either case.
120 */
121 event_notifier_set(&data->notifier);
122 aio_context_acquire(ctx);
123 aio_context_release(ctx);
124
125 data->thread_acquired = true; /* success, we got here */
126
127 return NULL;
128 }
129
130 static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
131 EventNotifierHandler *handler)
132 {
133 aio_set_event_notifier(ctx, notifier, false, handler, NULL);
134 }
135
136 static void dummy_notifier_read(EventNotifier *n)
137 {
138 event_notifier_test_and_clear(n);
139 }
140
141 static void test_acquire(void)
142 {
143 QemuThread thread;
144 AcquireTestData data;
145
146 /* Dummy event notifier ensures aio_poll() will block */
147 event_notifier_init(&data.notifier, false);
148 set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
149 g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
150
151 qemu_mutex_init(&data.start_lock);
152 qemu_mutex_lock(&data.start_lock);
153 data.thread_acquired = false;
154
155 qemu_thread_create(&thread, "test_acquire_thread",
156 test_acquire_thread,
157 &data, QEMU_THREAD_JOINABLE);
158
159 /* Block in aio_poll(), let other thread kick us and acquire context */
160 aio_context_acquire(ctx);
161 qemu_mutex_unlock(&data.start_lock); /* let the thread run */
162 g_assert(aio_poll(ctx, true));
163 g_assert(!data.thread_acquired);
164 aio_context_release(ctx);
165
166 qemu_thread_join(&thread);
167 set_event_notifier(ctx, &data.notifier, NULL);
168 event_notifier_cleanup(&data.notifier);
169
170 g_assert(data.thread_acquired);
171 }
172
173 static void test_bh_schedule(void)
174 {
175 BHTestData data = { .n = 0 };
176 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
177
178 qemu_bh_schedule(data.bh);
179 g_assert_cmpint(data.n, ==, 0);
180
181 g_assert(aio_poll(ctx, true));
182 g_assert_cmpint(data.n, ==, 1);
183
184 g_assert(!aio_poll(ctx, false));
185 g_assert_cmpint(data.n, ==, 1);
186 qemu_bh_delete(data.bh);
187 }
188
189 static void test_bh_schedule10(void)
190 {
191 BHTestData data = { .n = 0, .max = 10 };
192 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
193
194 qemu_bh_schedule(data.bh);
195 g_assert_cmpint(data.n, ==, 0);
196
197 g_assert(aio_poll(ctx, false));
198 g_assert_cmpint(data.n, ==, 1);
199
200 g_assert(aio_poll(ctx, true));
201 g_assert_cmpint(data.n, ==, 2);
202
203 while (data.n < 10) {
204 aio_poll(ctx, true);
205 }
206 g_assert_cmpint(data.n, ==, 10);
207
208 g_assert(!aio_poll(ctx, false));
209 g_assert_cmpint(data.n, ==, 10);
210 qemu_bh_delete(data.bh);
211 }
212
213 static void test_bh_cancel(void)
214 {
215 BHTestData data = { .n = 0 };
216 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
217
218 qemu_bh_schedule(data.bh);
219 g_assert_cmpint(data.n, ==, 0);
220
221 qemu_bh_cancel(data.bh);
222 g_assert_cmpint(data.n, ==, 0);
223
224 g_assert(!aio_poll(ctx, false));
225 g_assert_cmpint(data.n, ==, 0);
226 qemu_bh_delete(data.bh);
227 }
228
229 static void test_bh_delete(void)
230 {
231 BHTestData data = { .n = 0 };
232 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
233
234 qemu_bh_schedule(data.bh);
235 g_assert_cmpint(data.n, ==, 0);
236
237 qemu_bh_delete(data.bh);
238 g_assert_cmpint(data.n, ==, 0);
239
240 g_assert(!aio_poll(ctx, false));
241 g_assert_cmpint(data.n, ==, 0);
242 }
243
244 static void test_bh_delete_from_cb(void)
245 {
246 BHTestData data1 = { .n = 0, .max = 1 };
247
248 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
249
250 qemu_bh_schedule(data1.bh);
251 g_assert_cmpint(data1.n, ==, 0);
252
253 while (data1.n < data1.max) {
254 aio_poll(ctx, true);
255 }
256 g_assert_cmpint(data1.n, ==, data1.max);
257 g_assert(data1.bh == NULL);
258
259 g_assert(!aio_poll(ctx, false));
260 }
261
262 static void test_bh_delete_from_cb_many(void)
263 {
264 BHTestData data1 = { .n = 0, .max = 1 };
265 BHTestData data2 = { .n = 0, .max = 3 };
266 BHTestData data3 = { .n = 0, .max = 2 };
267 BHTestData data4 = { .n = 0, .max = 4 };
268
269 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
270 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
271 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
272 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
273
274 qemu_bh_schedule(data1.bh);
275 qemu_bh_schedule(data2.bh);
276 qemu_bh_schedule(data3.bh);
277 qemu_bh_schedule(data4.bh);
278 g_assert_cmpint(data1.n, ==, 0);
279 g_assert_cmpint(data2.n, ==, 0);
280 g_assert_cmpint(data3.n, ==, 0);
281 g_assert_cmpint(data4.n, ==, 0);
282
283 g_assert(aio_poll(ctx, false));
284 g_assert_cmpint(data1.n, ==, 1);
285 g_assert_cmpint(data2.n, ==, 1);
286 g_assert_cmpint(data3.n, ==, 1);
287 g_assert_cmpint(data4.n, ==, 1);
288 g_assert(data1.bh == NULL);
289
290 while (data1.n < data1.max ||
291 data2.n < data2.max ||
292 data3.n < data3.max ||
293 data4.n < data4.max) {
294 aio_poll(ctx, true);
295 }
296 g_assert_cmpint(data1.n, ==, data1.max);
297 g_assert_cmpint(data2.n, ==, data2.max);
298 g_assert_cmpint(data3.n, ==, data3.max);
299 g_assert_cmpint(data4.n, ==, data4.max);
300 g_assert(data1.bh == NULL);
301 g_assert(data2.bh == NULL);
302 g_assert(data3.bh == NULL);
303 g_assert(data4.bh == NULL);
304 }
305
306 static void test_bh_flush(void)
307 {
308 BHTestData data = { .n = 0 };
309 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
310
311 qemu_bh_schedule(data.bh);
312 g_assert_cmpint(data.n, ==, 0);
313
314 g_assert(aio_poll(ctx, true));
315 g_assert_cmpint(data.n, ==, 1);
316
317 g_assert(!aio_poll(ctx, false));
318 g_assert_cmpint(data.n, ==, 1);
319 qemu_bh_delete(data.bh);
320 }
321
322 static void test_set_event_notifier(void)
323 {
324 EventNotifierTestData data = { .n = 0, .active = 0 };
325 event_notifier_init(&data.e, false);
326 set_event_notifier(ctx, &data.e, event_ready_cb);
327 g_assert(!aio_poll(ctx, false));
328 g_assert_cmpint(data.n, ==, 0);
329
330 set_event_notifier(ctx, &data.e, NULL);
331 g_assert(!aio_poll(ctx, false));
332 g_assert_cmpint(data.n, ==, 0);
333 event_notifier_cleanup(&data.e);
334 }
335
336 static void test_wait_event_notifier(void)
337 {
338 EventNotifierTestData data = { .n = 0, .active = 1 };
339 event_notifier_init(&data.e, false);
340 set_event_notifier(ctx, &data.e, event_ready_cb);
341 while (aio_poll(ctx, false));
342 g_assert_cmpint(data.n, ==, 0);
343 g_assert_cmpint(data.active, ==, 1);
344
345 event_notifier_set(&data.e);
346 g_assert(aio_poll(ctx, false));
347 g_assert_cmpint(data.n, ==, 1);
348 g_assert_cmpint(data.active, ==, 0);
349
350 g_assert(!aio_poll(ctx, false));
351 g_assert_cmpint(data.n, ==, 1);
352 g_assert_cmpint(data.active, ==, 0);
353
354 set_event_notifier(ctx, &data.e, NULL);
355 g_assert(!aio_poll(ctx, false));
356 g_assert_cmpint(data.n, ==, 1);
357
358 event_notifier_cleanup(&data.e);
359 }
360
361 static void test_flush_event_notifier(void)
362 {
363 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
364 event_notifier_init(&data.e, false);
365 set_event_notifier(ctx, &data.e, event_ready_cb);
366 while (aio_poll(ctx, false));
367 g_assert_cmpint(data.n, ==, 0);
368 g_assert_cmpint(data.active, ==, 10);
369
370 event_notifier_set(&data.e);
371 g_assert(aio_poll(ctx, false));
372 g_assert_cmpint(data.n, ==, 1);
373 g_assert_cmpint(data.active, ==, 9);
374 g_assert(aio_poll(ctx, false));
375
376 wait_until_inactive(&data);
377 g_assert_cmpint(data.n, ==, 10);
378 g_assert_cmpint(data.active, ==, 0);
379 g_assert(!aio_poll(ctx, false));
380
381 set_event_notifier(ctx, &data.e, NULL);
382 g_assert(!aio_poll(ctx, false));
383 event_notifier_cleanup(&data.e);
384 }
385
386 static void test_aio_external_client(void)
387 {
388 int i, j;
389
390 for (i = 1; i < 3; i++) {
391 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
392 event_notifier_init(&data.e, false);
393 aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL);
394 event_notifier_set(&data.e);
395 for (j = 0; j < i; j++) {
396 aio_disable_external(ctx);
397 }
398 for (j = 0; j < i; j++) {
399 assert(!aio_poll(ctx, false));
400 assert(event_notifier_test_and_clear(&data.e));
401 event_notifier_set(&data.e);
402 aio_enable_external(ctx);
403 }
404 assert(aio_poll(ctx, false));
405 set_event_notifier(ctx, &data.e, NULL);
406 event_notifier_cleanup(&data.e);
407 }
408 }
409
410 static void test_wait_event_notifier_noflush(void)
411 {
412 EventNotifierTestData data = { .n = 0 };
413 EventNotifierTestData dummy = { .n = 0, .active = 1 };
414
415 event_notifier_init(&data.e, false);
416 set_event_notifier(ctx, &data.e, event_ready_cb);
417
418 g_assert(!aio_poll(ctx, false));
419 g_assert_cmpint(data.n, ==, 0);
420
421 /* Until there is an active descriptor, aio_poll may or may not call
422 * event_ready_cb. Still, it must not block. */
423 event_notifier_set(&data.e);
424 g_assert(aio_poll(ctx, true));
425 data.n = 0;
426
427 /* An active event notifier forces aio_poll to look at EventNotifiers. */
428 event_notifier_init(&dummy.e, false);
429 set_event_notifier(ctx, &dummy.e, event_ready_cb);
430
431 event_notifier_set(&data.e);
432 g_assert(aio_poll(ctx, false));
433 g_assert_cmpint(data.n, ==, 1);
434 g_assert(!aio_poll(ctx, false));
435 g_assert_cmpint(data.n, ==, 1);
436
437 event_notifier_set(&data.e);
438 g_assert(aio_poll(ctx, false));
439 g_assert_cmpint(data.n, ==, 2);
440 g_assert(!aio_poll(ctx, false));
441 g_assert_cmpint(data.n, ==, 2);
442
443 event_notifier_set(&dummy.e);
444 wait_until_inactive(&dummy);
445 g_assert_cmpint(data.n, ==, 2);
446 g_assert_cmpint(dummy.n, ==, 1);
447 g_assert_cmpint(dummy.active, ==, 0);
448
449 set_event_notifier(ctx, &dummy.e, NULL);
450 event_notifier_cleanup(&dummy.e);
451
452 set_event_notifier(ctx, &data.e, NULL);
453 g_assert(!aio_poll(ctx, false));
454 g_assert_cmpint(data.n, ==, 2);
455
456 event_notifier_cleanup(&data.e);
457 }
458
459 static void test_timer_schedule(void)
460 {
461 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
462 .max = 2,
463 .clock_type = QEMU_CLOCK_REALTIME };
464 EventNotifier e;
465
466 /* aio_poll will not block to wait for timers to complete unless it has
467 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
468 */
469 event_notifier_init(&e, false);
470 set_event_notifier(ctx, &e, dummy_io_handler_read);
471 aio_poll(ctx, false);
472
473 aio_timer_init(ctx, &data.timer, data.clock_type,
474 SCALE_NS, timer_test_cb, &data);
475 timer_mod(&data.timer,
476 qemu_clock_get_ns(data.clock_type) +
477 data.ns);
478
479 g_assert_cmpint(data.n, ==, 0);
480
481 /* timer_mod may well cause an event notifer to have gone off,
482 * so clear that
483 */
484 do {} while (aio_poll(ctx, false));
485
486 g_assert(!aio_poll(ctx, false));
487 g_assert_cmpint(data.n, ==, 0);
488
489 g_usleep(1 * G_USEC_PER_SEC);
490 g_assert_cmpint(data.n, ==, 0);
491
492 g_assert(aio_poll(ctx, false));
493 g_assert_cmpint(data.n, ==, 1);
494
495 /* timer_mod called by our callback */
496 do {} while (aio_poll(ctx, false));
497
498 g_assert(!aio_poll(ctx, false));
499 g_assert_cmpint(data.n, ==, 1);
500
501 g_assert(aio_poll(ctx, true));
502 g_assert_cmpint(data.n, ==, 2);
503
504 /* As max is now 2, an event notifier should not have gone off */
505
506 g_assert(!aio_poll(ctx, false));
507 g_assert_cmpint(data.n, ==, 2);
508
509 set_event_notifier(ctx, &e, NULL);
510 event_notifier_cleanup(&e);
511
512 timer_del(&data.timer);
513 }
514
515 /* Now the same tests, using the context as a GSource. They are
516 * very similar to the ones above, with g_main_context_iteration
517 * replacing aio_poll. However:
518 * - sometimes both the AioContext and the glib main loop wake
519 * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
520 * are replaced by "while (g_main_context_iteration(NULL, false));".
521 * - there is no exact replacement for a blocking wait.
522 * "while (g_main_context_iteration(NULL, true)" seems to work,
523 * but it is not documented _why_ it works. For these tests a
524 * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
525 * works well, and that's what I am using.
526 */
527
528 static void test_source_flush(void)
529 {
530 g_assert(!g_main_context_iteration(NULL, false));
531 aio_notify(ctx);
532 while (g_main_context_iteration(NULL, false));
533 g_assert(!g_main_context_iteration(NULL, false));
534 }
535
536 static void test_source_bh_schedule(void)
537 {
538 BHTestData data = { .n = 0 };
539 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
540
541 qemu_bh_schedule(data.bh);
542 g_assert_cmpint(data.n, ==, 0);
543
544 g_assert(g_main_context_iteration(NULL, true));
545 g_assert_cmpint(data.n, ==, 1);
546
547 g_assert(!g_main_context_iteration(NULL, false));
548 g_assert_cmpint(data.n, ==, 1);
549 qemu_bh_delete(data.bh);
550 }
551
552 static void test_source_bh_schedule10(void)
553 {
554 BHTestData data = { .n = 0, .max = 10 };
555 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
556
557 qemu_bh_schedule(data.bh);
558 g_assert_cmpint(data.n, ==, 0);
559
560 g_assert(g_main_context_iteration(NULL, false));
561 g_assert_cmpint(data.n, ==, 1);
562
563 g_assert(g_main_context_iteration(NULL, true));
564 g_assert_cmpint(data.n, ==, 2);
565
566 while (g_main_context_iteration(NULL, false));
567 g_assert_cmpint(data.n, ==, 10);
568
569 g_assert(!g_main_context_iteration(NULL, false));
570 g_assert_cmpint(data.n, ==, 10);
571 qemu_bh_delete(data.bh);
572 }
573
574 static void test_source_bh_cancel(void)
575 {
576 BHTestData data = { .n = 0 };
577 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
578
579 qemu_bh_schedule(data.bh);
580 g_assert_cmpint(data.n, ==, 0);
581
582 qemu_bh_cancel(data.bh);
583 g_assert_cmpint(data.n, ==, 0);
584
585 while (g_main_context_iteration(NULL, false));
586 g_assert_cmpint(data.n, ==, 0);
587 qemu_bh_delete(data.bh);
588 }
589
590 static void test_source_bh_delete(void)
591 {
592 BHTestData data = { .n = 0 };
593 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
594
595 qemu_bh_schedule(data.bh);
596 g_assert_cmpint(data.n, ==, 0);
597
598 qemu_bh_delete(data.bh);
599 g_assert_cmpint(data.n, ==, 0);
600
601 while (g_main_context_iteration(NULL, false));
602 g_assert_cmpint(data.n, ==, 0);
603 }
604
605 static void test_source_bh_delete_from_cb(void)
606 {
607 BHTestData data1 = { .n = 0, .max = 1 };
608
609 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
610
611 qemu_bh_schedule(data1.bh);
612 g_assert_cmpint(data1.n, ==, 0);
613
614 g_main_context_iteration(NULL, true);
615 g_assert_cmpint(data1.n, ==, data1.max);
616 g_assert(data1.bh == NULL);
617
618 assert(g_main_context_iteration(NULL, false));
619 assert(!g_main_context_iteration(NULL, false));
620 }
621
622 static void test_source_bh_delete_from_cb_many(void)
623 {
624 BHTestData data1 = { .n = 0, .max = 1 };
625 BHTestData data2 = { .n = 0, .max = 3 };
626 BHTestData data3 = { .n = 0, .max = 2 };
627 BHTestData data4 = { .n = 0, .max = 4 };
628
629 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
630 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
631 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
632 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
633
634 qemu_bh_schedule(data1.bh);
635 qemu_bh_schedule(data2.bh);
636 qemu_bh_schedule(data3.bh);
637 qemu_bh_schedule(data4.bh);
638 g_assert_cmpint(data1.n, ==, 0);
639 g_assert_cmpint(data2.n, ==, 0);
640 g_assert_cmpint(data3.n, ==, 0);
641 g_assert_cmpint(data4.n, ==, 0);
642
643 g_assert(g_main_context_iteration(NULL, false));
644 g_assert_cmpint(data1.n, ==, 1);
645 g_assert_cmpint(data2.n, ==, 1);
646 g_assert_cmpint(data3.n, ==, 1);
647 g_assert_cmpint(data4.n, ==, 1);
648 g_assert(data1.bh == NULL);
649
650 while (g_main_context_iteration(NULL, false));
651 g_assert_cmpint(data1.n, ==, data1.max);
652 g_assert_cmpint(data2.n, ==, data2.max);
653 g_assert_cmpint(data3.n, ==, data3.max);
654 g_assert_cmpint(data4.n, ==, data4.max);
655 g_assert(data1.bh == NULL);
656 g_assert(data2.bh == NULL);
657 g_assert(data3.bh == NULL);
658 g_assert(data4.bh == NULL);
659 }
660
661 static void test_source_bh_flush(void)
662 {
663 BHTestData data = { .n = 0 };
664 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
665
666 qemu_bh_schedule(data.bh);
667 g_assert_cmpint(data.n, ==, 0);
668
669 g_assert(g_main_context_iteration(NULL, true));
670 g_assert_cmpint(data.n, ==, 1);
671
672 g_assert(!g_main_context_iteration(NULL, false));
673 g_assert_cmpint(data.n, ==, 1);
674 qemu_bh_delete(data.bh);
675 }
676
677 static void test_source_set_event_notifier(void)
678 {
679 EventNotifierTestData data = { .n = 0, .active = 0 };
680 event_notifier_init(&data.e, false);
681 set_event_notifier(ctx, &data.e, event_ready_cb);
682 while (g_main_context_iteration(NULL, false));
683 g_assert_cmpint(data.n, ==, 0);
684
685 set_event_notifier(ctx, &data.e, NULL);
686 while (g_main_context_iteration(NULL, false));
687 g_assert_cmpint(data.n, ==, 0);
688 event_notifier_cleanup(&data.e);
689 }
690
691 static void test_source_wait_event_notifier(void)
692 {
693 EventNotifierTestData data = { .n = 0, .active = 1 };
694 event_notifier_init(&data.e, false);
695 set_event_notifier(ctx, &data.e, event_ready_cb);
696 while (g_main_context_iteration(NULL, false));
697 g_assert_cmpint(data.n, ==, 0);
698 g_assert_cmpint(data.active, ==, 1);
699
700 event_notifier_set(&data.e);
701 g_assert(g_main_context_iteration(NULL, false));
702 g_assert_cmpint(data.n, ==, 1);
703 g_assert_cmpint(data.active, ==, 0);
704
705 while (g_main_context_iteration(NULL, false));
706 g_assert_cmpint(data.n, ==, 1);
707 g_assert_cmpint(data.active, ==, 0);
708
709 set_event_notifier(ctx, &data.e, NULL);
710 while (g_main_context_iteration(NULL, false));
711 g_assert_cmpint(data.n, ==, 1);
712
713 event_notifier_cleanup(&data.e);
714 }
715
716 static void test_source_flush_event_notifier(void)
717 {
718 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
719 event_notifier_init(&data.e, false);
720 set_event_notifier(ctx, &data.e, event_ready_cb);
721 while (g_main_context_iteration(NULL, false));
722 g_assert_cmpint(data.n, ==, 0);
723 g_assert_cmpint(data.active, ==, 10);
724
725 event_notifier_set(&data.e);
726 g_assert(g_main_context_iteration(NULL, false));
727 g_assert_cmpint(data.n, ==, 1);
728 g_assert_cmpint(data.active, ==, 9);
729 g_assert(g_main_context_iteration(NULL, false));
730
731 while (g_main_context_iteration(NULL, false));
732 g_assert_cmpint(data.n, ==, 10);
733 g_assert_cmpint(data.active, ==, 0);
734 g_assert(!g_main_context_iteration(NULL, false));
735
736 set_event_notifier(ctx, &data.e, NULL);
737 while (g_main_context_iteration(NULL, false));
738 event_notifier_cleanup(&data.e);
739 }
740
741 static void test_source_wait_event_notifier_noflush(void)
742 {
743 EventNotifierTestData data = { .n = 0 };
744 EventNotifierTestData dummy = { .n = 0, .active = 1 };
745
746 event_notifier_init(&data.e, false);
747 set_event_notifier(ctx, &data.e, event_ready_cb);
748
749 while (g_main_context_iteration(NULL, false));
750 g_assert_cmpint(data.n, ==, 0);
751
752 /* Until there is an active descriptor, glib may or may not call
753 * event_ready_cb. Still, it must not block. */
754 event_notifier_set(&data.e);
755 g_main_context_iteration(NULL, true);
756 data.n = 0;
757
758 /* An active event notifier forces aio_poll to look at EventNotifiers. */
759 event_notifier_init(&dummy.e, false);
760 set_event_notifier(ctx, &dummy.e, event_ready_cb);
761
762 event_notifier_set(&data.e);
763 g_assert(g_main_context_iteration(NULL, false));
764 g_assert_cmpint(data.n, ==, 1);
765 g_assert(!g_main_context_iteration(NULL, false));
766 g_assert_cmpint(data.n, ==, 1);
767
768 event_notifier_set(&data.e);
769 g_assert(g_main_context_iteration(NULL, false));
770 g_assert_cmpint(data.n, ==, 2);
771 g_assert(!g_main_context_iteration(NULL, false));
772 g_assert_cmpint(data.n, ==, 2);
773
774 event_notifier_set(&dummy.e);
775 while (g_main_context_iteration(NULL, false));
776 g_assert_cmpint(data.n, ==, 2);
777 g_assert_cmpint(dummy.n, ==, 1);
778 g_assert_cmpint(dummy.active, ==, 0);
779
780 set_event_notifier(ctx, &dummy.e, NULL);
781 event_notifier_cleanup(&dummy.e);
782
783 set_event_notifier(ctx, &data.e, NULL);
784 while (g_main_context_iteration(NULL, false));
785 g_assert_cmpint(data.n, ==, 2);
786
787 event_notifier_cleanup(&data.e);
788 }
789
790 static void test_source_timer_schedule(void)
791 {
792 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
793 .max = 2,
794 .clock_type = QEMU_CLOCK_REALTIME };
795 EventNotifier e;
796 int64_t expiry;
797
798 /* aio_poll will not block to wait for timers to complete unless it has
799 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
800 */
801 event_notifier_init(&e, false);
802 set_event_notifier(ctx, &e, dummy_io_handler_read);
803 do {} while (g_main_context_iteration(NULL, false));
804
805 aio_timer_init(ctx, &data.timer, data.clock_type,
806 SCALE_NS, timer_test_cb, &data);
807 expiry = qemu_clock_get_ns(data.clock_type) +
808 data.ns;
809 timer_mod(&data.timer, expiry);
810
811 g_assert_cmpint(data.n, ==, 0);
812
813 g_usleep(1 * G_USEC_PER_SEC);
814 g_assert_cmpint(data.n, ==, 0);
815
816 g_assert(g_main_context_iteration(NULL, true));
817 g_assert_cmpint(data.n, ==, 1);
818 expiry += data.ns;
819
820 while (data.n < 2) {
821 g_main_context_iteration(NULL, true);
822 }
823
824 g_assert_cmpint(data.n, ==, 2);
825 g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
826
827 set_event_notifier(ctx, &e, NULL);
828 event_notifier_cleanup(&e);
829
830 timer_del(&data.timer);
831 }
832
833 /*
834 * Check that aio_co_enter() can chain many times
835 *
836 * Two coroutines should be able to invoke each other via aio_co_enter() many
837 * times without hitting a limit like stack exhaustion. In other words, the
838 * calls should be chained instead of nested.
839 */
840
841 typedef struct {
842 Coroutine *other;
843 unsigned i;
844 unsigned max;
845 } ChainData;
846
847 static void coroutine_fn chain(void *opaque)
848 {
849 ChainData *data = opaque;
850
851 for (data->i = 0; data->i < data->max; data->i++) {
852 /* Queue up the other coroutine... */
853 aio_co_enter(ctx, data->other);
854
855 /* ...and give control to it */
856 qemu_coroutine_yield();
857 }
858 }
859
860 static void test_queue_chaining(void)
861 {
862 /* This number of iterations hit stack exhaustion in the past: */
863 ChainData data_a = { .max = 25000 };
864 ChainData data_b = { .max = 25000 };
865
866 data_b.other = qemu_coroutine_create(chain, &data_a);
867 data_a.other = qemu_coroutine_create(chain, &data_b);
868
869 qemu_coroutine_enter(data_b.other);
870
871 g_assert_cmpint(data_a.i, ==, data_a.max);
872 g_assert_cmpint(data_b.i, ==, data_b.max - 1);
873
874 /* Allow the second coroutine to terminate */
875 qemu_coroutine_enter(data_a.other);
876
877 g_assert_cmpint(data_b.i, ==, data_b.max);
878 }
879
880 /* End of tests. */
881
882 int main(int argc, char **argv)
883 {
884 qemu_init_main_loop(&error_fatal);
885 ctx = qemu_get_aio_context();
886
887 while (g_main_context_iteration(NULL, false));
888
889 g_test_init(&argc, &argv, NULL);
890 g_test_add_func("/aio/acquire", test_acquire);
891 g_test_add_func("/aio/bh/schedule", test_bh_schedule);
892 g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
893 g_test_add_func("/aio/bh/cancel", test_bh_cancel);
894 g_test_add_func("/aio/bh/delete", test_bh_delete);
895 g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
896 g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
897 g_test_add_func("/aio/bh/flush", test_bh_flush);
898 g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
899 g_test_add_func("/aio/event/wait", test_wait_event_notifier);
900 g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
901 g_test_add_func("/aio/event/flush", test_flush_event_notifier);
902 g_test_add_func("/aio/external-client", test_aio_external_client);
903 g_test_add_func("/aio/timer/schedule", test_timer_schedule);
904
905 g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);
906
907 g_test_add_func("/aio-gsource/flush", test_source_flush);
908 g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
909 g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
910 g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
911 g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
912 g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
913 g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
914 g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
915 g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
916 g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
917 g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
918 g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
919 g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
920 return g_test_run();
921 }