]>
Commit | Line | Data |
---|---|---|
4720cbee KW |
1 | /* |
2 | * Block tests for iothreads | |
3 | * | |
4 | * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "block/block.h" | |
27 | #include "block/blockjob_int.h" | |
28 | #include "sysemu/block-backend.h" | |
29 | #include "qapi/error.h" | |
7e2f096a | 30 | #include "qapi/qmp/qdict.h" |
db725815 | 31 | #include "qemu/main-loop.h" |
4720cbee KW |
32 | #include "iothread.h" |
33 | ||
34 | static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs, | |
35 | uint64_t offset, uint64_t bytes, | |
36 | QEMUIOVector *qiov, int flags) | |
37 | { | |
38 | return 0; | |
39 | } | |
40 | ||
41 | static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs, | |
42 | int64_t offset, int bytes) | |
43 | { | |
44 | return 0; | |
45 | } | |
46 | ||
47 | static int coroutine_fn | |
48 | bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, | |
49 | PreallocMode prealloc, Error **errp) | |
50 | { | |
51 | return 0; | |
52 | } | |
53 | ||
54 | static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs, | |
55 | bool want_zero, | |
56 | int64_t offset, int64_t count, | |
57 | int64_t *pnum, int64_t *map, | |
58 | BlockDriverState **file) | |
59 | { | |
60 | *pnum = count; | |
61 | return 0; | |
62 | } | |
63 | ||
64 | static BlockDriver bdrv_test = { | |
65 | .format_name = "test", | |
66 | .instance_size = 1, | |
67 | ||
68 | .bdrv_co_preadv = bdrv_test_co_prwv, | |
69 | .bdrv_co_pwritev = bdrv_test_co_prwv, | |
70 | .bdrv_co_pdiscard = bdrv_test_co_pdiscard, | |
71 | .bdrv_co_truncate = bdrv_test_co_truncate, | |
72 | .bdrv_co_block_status = bdrv_test_co_block_status, | |
73 | }; | |
74 | ||
75 | static void test_sync_op_pread(BdrvChild *c) | |
76 | { | |
77 | uint8_t buf[512]; | |
78 | int ret; | |
79 | ||
80 | /* Success */ | |
81 | ret = bdrv_pread(c, 0, buf, sizeof(buf)); | |
82 | g_assert_cmpint(ret, ==, 512); | |
83 | ||
84 | /* Early error: Negative offset */ | |
85 | ret = bdrv_pread(c, -2, buf, sizeof(buf)); | |
86 | g_assert_cmpint(ret, ==, -EIO); | |
87 | } | |
88 | ||
89 | static void test_sync_op_pwrite(BdrvChild *c) | |
90 | { | |
91 | uint8_t buf[512]; | |
92 | int ret; | |
93 | ||
94 | /* Success */ | |
95 | ret = bdrv_pwrite(c, 0, buf, sizeof(buf)); | |
96 | g_assert_cmpint(ret, ==, 512); | |
97 | ||
98 | /* Early error: Negative offset */ | |
99 | ret = bdrv_pwrite(c, -2, buf, sizeof(buf)); | |
100 | g_assert_cmpint(ret, ==, -EIO); | |
101 | } | |
102 | ||
103 | static void test_sync_op_blk_pread(BlockBackend *blk) | |
104 | { | |
105 | uint8_t buf[512]; | |
106 | int ret; | |
107 | ||
108 | /* Success */ | |
109 | ret = blk_pread(blk, 0, buf, sizeof(buf)); | |
110 | g_assert_cmpint(ret, ==, 512); | |
111 | ||
112 | /* Early error: Negative offset */ | |
113 | ret = blk_pread(blk, -2, buf, sizeof(buf)); | |
114 | g_assert_cmpint(ret, ==, -EIO); | |
115 | } | |
116 | ||
117 | static void test_sync_op_blk_pwrite(BlockBackend *blk) | |
118 | { | |
119 | uint8_t buf[512]; | |
120 | int ret; | |
121 | ||
122 | /* Success */ | |
123 | ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0); | |
124 | g_assert_cmpint(ret, ==, 512); | |
125 | ||
126 | /* Early error: Negative offset */ | |
127 | ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0); | |
128 | g_assert_cmpint(ret, ==, -EIO); | |
129 | } | |
130 | ||
131 | static void test_sync_op_load_vmstate(BdrvChild *c) | |
132 | { | |
133 | uint8_t buf[512]; | |
134 | int ret; | |
135 | ||
136 | /* Error: Driver does not support snapshots */ | |
137 | ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf)); | |
138 | g_assert_cmpint(ret, ==, -ENOTSUP); | |
139 | } | |
140 | ||
141 | static void test_sync_op_save_vmstate(BdrvChild *c) | |
142 | { | |
143 | uint8_t buf[512]; | |
144 | int ret; | |
145 | ||
146 | /* Error: Driver does not support snapshots */ | |
147 | ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf)); | |
148 | g_assert_cmpint(ret, ==, -ENOTSUP); | |
149 | } | |
150 | ||
151 | static void test_sync_op_pdiscard(BdrvChild *c) | |
152 | { | |
153 | int ret; | |
154 | ||
155 | /* Normal success path */ | |
156 | c->bs->open_flags |= BDRV_O_UNMAP; | |
157 | ret = bdrv_pdiscard(c, 0, 512); | |
158 | g_assert_cmpint(ret, ==, 0); | |
159 | ||
160 | /* Early success: UNMAP not supported */ | |
161 | c->bs->open_flags &= ~BDRV_O_UNMAP; | |
162 | ret = bdrv_pdiscard(c, 0, 512); | |
163 | g_assert_cmpint(ret, ==, 0); | |
164 | ||
165 | /* Early error: Negative offset */ | |
166 | ret = bdrv_pdiscard(c, -2, 512); | |
167 | g_assert_cmpint(ret, ==, -EIO); | |
168 | } | |
169 | ||
170 | static void test_sync_op_blk_pdiscard(BlockBackend *blk) | |
171 | { | |
172 | int ret; | |
173 | ||
174 | /* Early success: UNMAP not supported */ | |
175 | ret = blk_pdiscard(blk, 0, 512); | |
176 | g_assert_cmpint(ret, ==, 0); | |
177 | ||
178 | /* Early error: Negative offset */ | |
179 | ret = blk_pdiscard(blk, -2, 512); | |
180 | g_assert_cmpint(ret, ==, -EIO); | |
181 | } | |
182 | ||
183 | static void test_sync_op_truncate(BdrvChild *c) | |
184 | { | |
185 | int ret; | |
186 | ||
187 | /* Normal success path */ | |
188 | ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL); | |
189 | g_assert_cmpint(ret, ==, 0); | |
190 | ||
191 | /* Early error: Negative offset */ | |
192 | ret = bdrv_truncate(c, -2, PREALLOC_MODE_OFF, NULL); | |
193 | g_assert_cmpint(ret, ==, -EINVAL); | |
194 | ||
195 | /* Error: Read-only image */ | |
196 | c->bs->read_only = true; | |
197 | c->bs->open_flags &= ~BDRV_O_RDWR; | |
198 | ||
199 | ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL); | |
200 | g_assert_cmpint(ret, ==, -EACCES); | |
201 | ||
202 | c->bs->read_only = false; | |
203 | c->bs->open_flags |= BDRV_O_RDWR; | |
204 | } | |
205 | ||
206 | static void test_sync_op_block_status(BdrvChild *c) | |
207 | { | |
208 | int ret; | |
209 | int64_t n; | |
210 | ||
211 | /* Normal success path */ | |
212 | ret = bdrv_is_allocated(c->bs, 0, 65536, &n); | |
213 | g_assert_cmpint(ret, ==, 0); | |
214 | ||
215 | /* Early success: No driver support */ | |
216 | bdrv_test.bdrv_co_block_status = NULL; | |
217 | ret = bdrv_is_allocated(c->bs, 0, 65536, &n); | |
218 | g_assert_cmpint(ret, ==, 1); | |
219 | ||
220 | /* Early success: bytes = 0 */ | |
221 | ret = bdrv_is_allocated(c->bs, 0, 0, &n); | |
222 | g_assert_cmpint(ret, ==, 0); | |
223 | ||
224 | /* Early success: Offset > image size*/ | |
225 | ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n); | |
226 | g_assert_cmpint(ret, ==, 0); | |
227 | } | |
228 | ||
229 | static void test_sync_op_flush(BdrvChild *c) | |
230 | { | |
231 | int ret; | |
232 | ||
233 | /* Normal success path */ | |
234 | ret = bdrv_flush(c->bs); | |
235 | g_assert_cmpint(ret, ==, 0); | |
236 | ||
237 | /* Early success: Read-only image */ | |
238 | c->bs->read_only = true; | |
239 | c->bs->open_flags &= ~BDRV_O_RDWR; | |
240 | ||
241 | ret = bdrv_flush(c->bs); | |
242 | g_assert_cmpint(ret, ==, 0); | |
243 | ||
244 | c->bs->read_only = false; | |
245 | c->bs->open_flags |= BDRV_O_RDWR; | |
246 | } | |
247 | ||
248 | static void test_sync_op_blk_flush(BlockBackend *blk) | |
249 | { | |
250 | BlockDriverState *bs = blk_bs(blk); | |
251 | int ret; | |
252 | ||
253 | /* Normal success path */ | |
254 | ret = blk_flush(blk); | |
255 | g_assert_cmpint(ret, ==, 0); | |
256 | ||
257 | /* Early success: Read-only image */ | |
258 | bs->read_only = true; | |
259 | bs->open_flags &= ~BDRV_O_RDWR; | |
260 | ||
261 | ret = blk_flush(blk); | |
262 | g_assert_cmpint(ret, ==, 0); | |
263 | ||
264 | bs->read_only = false; | |
265 | bs->open_flags |= BDRV_O_RDWR; | |
266 | } | |
267 | ||
268 | static void test_sync_op_check(BdrvChild *c) | |
269 | { | |
270 | BdrvCheckResult result; | |
271 | int ret; | |
272 | ||
273 | /* Error: Driver does not implement check */ | |
274 | ret = bdrv_check(c->bs, &result, 0); | |
275 | g_assert_cmpint(ret, ==, -ENOTSUP); | |
276 | } | |
277 | ||
278 | static void test_sync_op_invalidate_cache(BdrvChild *c) | |
279 | { | |
280 | /* Early success: Image is not inactive */ | |
281 | bdrv_invalidate_cache(c->bs, NULL); | |
282 | } | |
283 | ||
284 | ||
285 | typedef struct SyncOpTest { | |
286 | const char *name; | |
287 | void (*fn)(BdrvChild *c); | |
288 | void (*blkfn)(BlockBackend *blk); | |
289 | } SyncOpTest; | |
290 | ||
291 | const SyncOpTest sync_op_tests[] = { | |
292 | { | |
293 | .name = "/sync-op/pread", | |
294 | .fn = test_sync_op_pread, | |
295 | .blkfn = test_sync_op_blk_pread, | |
296 | }, { | |
297 | .name = "/sync-op/pwrite", | |
298 | .fn = test_sync_op_pwrite, | |
299 | .blkfn = test_sync_op_blk_pwrite, | |
300 | }, { | |
301 | .name = "/sync-op/load_vmstate", | |
302 | .fn = test_sync_op_load_vmstate, | |
303 | }, { | |
304 | .name = "/sync-op/save_vmstate", | |
305 | .fn = test_sync_op_save_vmstate, | |
306 | }, { | |
307 | .name = "/sync-op/pdiscard", | |
308 | .fn = test_sync_op_pdiscard, | |
309 | .blkfn = test_sync_op_blk_pdiscard, | |
310 | }, { | |
311 | .name = "/sync-op/truncate", | |
312 | .fn = test_sync_op_truncate, | |
313 | }, { | |
314 | .name = "/sync-op/block_status", | |
315 | .fn = test_sync_op_block_status, | |
316 | }, { | |
317 | .name = "/sync-op/flush", | |
318 | .fn = test_sync_op_flush, | |
319 | .blkfn = test_sync_op_blk_flush, | |
320 | }, { | |
321 | .name = "/sync-op/check", | |
322 | .fn = test_sync_op_check, | |
323 | }, { | |
324 | .name = "/sync-op/invalidate_cache", | |
325 | .fn = test_sync_op_invalidate_cache, | |
326 | }, | |
327 | }; | |
328 | ||
329 | /* Test synchronous operations that run in a different iothread, so we have to | |
330 | * poll for the coroutine there to return. */ | |
331 | static void test_sync_op(const void *opaque) | |
332 | { | |
333 | const SyncOpTest *t = opaque; | |
334 | IOThread *iothread = iothread_new(); | |
335 | AioContext *ctx = iothread_get_aio_context(iothread); | |
336 | BlockBackend *blk; | |
337 | BlockDriverState *bs; | |
338 | BdrvChild *c; | |
339 | ||
d861ab3a | 340 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
4720cbee KW |
341 | bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); |
342 | bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; | |
343 | blk_insert_bs(blk, bs, &error_abort); | |
344 | c = QLIST_FIRST(&bs->parents); | |
345 | ||
97896a48 | 346 | blk_set_aio_context(blk, ctx, &error_abort); |
4720cbee KW |
347 | aio_context_acquire(ctx); |
348 | t->fn(c); | |
349 | if (t->blkfn) { | |
350 | t->blkfn(blk); | |
351 | } | |
97896a48 | 352 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); |
1b285657 | 353 | aio_context_release(ctx); |
4720cbee KW |
354 | |
355 | bdrv_unref(bs); | |
356 | blk_unref(blk); | |
357 | } | |
358 | ||
93c60f38 KW |
359 | typedef struct TestBlockJob { |
360 | BlockJob common; | |
361 | bool should_complete; | |
362 | int n; | |
363 | } TestBlockJob; | |
364 | ||
365 | static int test_job_prepare(Job *job) | |
366 | { | |
367 | g_assert(qemu_get_current_aio_context() == qemu_get_aio_context()); | |
368 | return 0; | |
369 | } | |
370 | ||
371 | static int coroutine_fn test_job_run(Job *job, Error **errp) | |
372 | { | |
373 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); | |
374 | ||
375 | job_transition_to_ready(&s->common.job); | |
376 | while (!s->should_complete) { | |
377 | s->n++; | |
378 | g_assert(qemu_get_current_aio_context() == job->aio_context); | |
379 | ||
380 | /* Avoid job_sleep_ns() because it marks the job as !busy. We want to | |
381 | * emulate some actual activity (probably some I/O) here so that the | |
382 | * drain involved in AioContext switches has to wait for this activity | |
383 | * to stop. */ | |
384 | qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); | |
385 | ||
386 | job_pause_point(&s->common.job); | |
387 | } | |
388 | ||
389 | g_assert(qemu_get_current_aio_context() == job->aio_context); | |
390 | return 0; | |
391 | } | |
392 | ||
393 | static void test_job_complete(Job *job, Error **errp) | |
394 | { | |
395 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); | |
396 | s->should_complete = true; | |
397 | } | |
398 | ||
399 | BlockJobDriver test_job_driver = { | |
400 | .job_driver = { | |
401 | .instance_size = sizeof(TestBlockJob), | |
402 | .free = block_job_free, | |
403 | .user_resume = block_job_user_resume, | |
404 | .drain = block_job_drain, | |
405 | .run = test_job_run, | |
406 | .complete = test_job_complete, | |
407 | .prepare = test_job_prepare, | |
408 | }, | |
409 | }; | |
410 | ||
411 | static void test_attach_blockjob(void) | |
412 | { | |
413 | IOThread *iothread = iothread_new(); | |
414 | AioContext *ctx = iothread_get_aio_context(iothread); | |
415 | BlockBackend *blk; | |
416 | BlockDriverState *bs; | |
417 | TestBlockJob *tjob; | |
418 | ||
d861ab3a | 419 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
93c60f38 KW |
420 | bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); |
421 | blk_insert_bs(blk, bs, &error_abort); | |
422 | ||
423 | tjob = block_job_create("job0", &test_job_driver, NULL, bs, | |
424 | 0, BLK_PERM_ALL, | |
425 | 0, 0, NULL, NULL, &error_abort); | |
426 | job_start(&tjob->common.job); | |
427 | ||
428 | while (tjob->n == 0) { | |
429 | aio_poll(qemu_get_aio_context(), false); | |
430 | } | |
431 | ||
97896a48 | 432 | blk_set_aio_context(blk, ctx, &error_abort); |
93c60f38 KW |
433 | |
434 | tjob->n = 0; | |
435 | while (tjob->n == 0) { | |
436 | aio_poll(qemu_get_aio_context(), false); | |
437 | } | |
438 | ||
439 | aio_context_acquire(ctx); | |
97896a48 | 440 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); |
93c60f38 KW |
441 | aio_context_release(ctx); |
442 | ||
443 | tjob->n = 0; | |
444 | while (tjob->n == 0) { | |
445 | aio_poll(qemu_get_aio_context(), false); | |
446 | } | |
447 | ||
97896a48 | 448 | blk_set_aio_context(blk, ctx, &error_abort); |
93c60f38 KW |
449 | |
450 | tjob->n = 0; | |
451 | while (tjob->n == 0) { | |
452 | aio_poll(qemu_get_aio_context(), false); | |
453 | } | |
454 | ||
455 | aio_context_acquire(ctx); | |
456 | job_complete_sync(&tjob->common.job, &error_abort); | |
97896a48 | 457 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); |
93c60f38 KW |
458 | aio_context_release(ctx); |
459 | ||
460 | bdrv_unref(bs); | |
461 | blk_unref(blk); | |
462 | } | |
463 | ||
7e2f096a KW |
464 | /* |
465 | * Test that changing the AioContext for one node in a tree (here through blk) | |
466 | * changes all other nodes as well: | |
467 | * | |
468 | * blk | |
469 | * | | |
470 | * | bs_verify [blkverify] | |
471 | * | / \ | |
472 | * | / \ | |
473 | * bs_a [bdrv_test] bs_b [bdrv_test] | |
474 | * | |
475 | */ | |
476 | static void test_propagate_basic(void) | |
477 | { | |
478 | IOThread *iothread = iothread_new(); | |
479 | AioContext *ctx = iothread_get_aio_context(iothread); | |
1b285657 | 480 | AioContext *main_ctx; |
7e2f096a KW |
481 | BlockBackend *blk; |
482 | BlockDriverState *bs_a, *bs_b, *bs_verify; | |
483 | QDict *options; | |
484 | ||
485 | /* Create bs_a and its BlockBackend */ | |
d861ab3a | 486 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
7e2f096a KW |
487 | bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort); |
488 | blk_insert_bs(blk, bs_a, &error_abort); | |
489 | ||
490 | /* Create bs_b */ | |
491 | bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort); | |
492 | ||
493 | /* Create blkverify filter that references both bs_a and bs_b */ | |
494 | options = qdict_new(); | |
495 | qdict_put_str(options, "driver", "blkverify"); | |
496 | qdict_put_str(options, "test", "bs_a"); | |
497 | qdict_put_str(options, "raw", "bs_b"); | |
498 | ||
499 | bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); | |
500 | ||
501 | /* Switch the AioContext */ | |
97896a48 | 502 | blk_set_aio_context(blk, ctx, &error_abort); |
7e2f096a KW |
503 | g_assert(blk_get_aio_context(blk) == ctx); |
504 | g_assert(bdrv_get_aio_context(bs_a) == ctx); | |
505 | g_assert(bdrv_get_aio_context(bs_verify) == ctx); | |
506 | g_assert(bdrv_get_aio_context(bs_b) == ctx); | |
507 | ||
508 | /* Switch the AioContext back */ | |
1b285657 HR |
509 | main_ctx = qemu_get_aio_context(); |
510 | aio_context_acquire(ctx); | |
511 | blk_set_aio_context(blk, main_ctx, &error_abort); | |
512 | aio_context_release(ctx); | |
513 | g_assert(blk_get_aio_context(blk) == main_ctx); | |
514 | g_assert(bdrv_get_aio_context(bs_a) == main_ctx); | |
515 | g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); | |
516 | g_assert(bdrv_get_aio_context(bs_b) == main_ctx); | |
7e2f096a KW |
517 | |
518 | bdrv_unref(bs_verify); | |
519 | bdrv_unref(bs_b); | |
520 | bdrv_unref(bs_a); | |
521 | blk_unref(blk); | |
522 | } | |
523 | ||
524 | /* | |
525 | * Test that diamonds in the graph don't lead to endless recursion: | |
526 | * | |
527 | * blk | |
528 | * | | |
529 | * bs_verify [blkverify] | |
530 | * / \ | |
531 | * / \ | |
532 | * bs_b [raw] bs_c[raw] | |
533 | * \ / | |
534 | * \ / | |
535 | * bs_a [bdrv_test] | |
536 | */ | |
537 | static void test_propagate_diamond(void) | |
538 | { | |
539 | IOThread *iothread = iothread_new(); | |
540 | AioContext *ctx = iothread_get_aio_context(iothread); | |
1b285657 | 541 | AioContext *main_ctx; |
7e2f096a KW |
542 | BlockBackend *blk; |
543 | BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify; | |
544 | QDict *options; | |
545 | ||
546 | /* Create bs_a */ | |
547 | bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort); | |
548 | ||
549 | /* Create bs_b and bc_c */ | |
550 | options = qdict_new(); | |
551 | qdict_put_str(options, "driver", "raw"); | |
552 | qdict_put_str(options, "file", "bs_a"); | |
553 | qdict_put_str(options, "node-name", "bs_b"); | |
554 | bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); | |
555 | ||
556 | options = qdict_new(); | |
557 | qdict_put_str(options, "driver", "raw"); | |
558 | qdict_put_str(options, "file", "bs_a"); | |
559 | qdict_put_str(options, "node-name", "bs_c"); | |
560 | bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); | |
561 | ||
562 | /* Create blkverify filter that references both bs_b and bs_c */ | |
563 | options = qdict_new(); | |
564 | qdict_put_str(options, "driver", "blkverify"); | |
565 | qdict_put_str(options, "test", "bs_b"); | |
566 | qdict_put_str(options, "raw", "bs_c"); | |
567 | ||
568 | bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); | |
d861ab3a | 569 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
7e2f096a KW |
570 | blk_insert_bs(blk, bs_verify, &error_abort); |
571 | ||
572 | /* Switch the AioContext */ | |
97896a48 | 573 | blk_set_aio_context(blk, ctx, &error_abort); |
7e2f096a KW |
574 | g_assert(blk_get_aio_context(blk) == ctx); |
575 | g_assert(bdrv_get_aio_context(bs_verify) == ctx); | |
576 | g_assert(bdrv_get_aio_context(bs_a) == ctx); | |
577 | g_assert(bdrv_get_aio_context(bs_b) == ctx); | |
578 | g_assert(bdrv_get_aio_context(bs_c) == ctx); | |
579 | ||
580 | /* Switch the AioContext back */ | |
1b285657 HR |
581 | main_ctx = qemu_get_aio_context(); |
582 | aio_context_acquire(ctx); | |
583 | blk_set_aio_context(blk, main_ctx, &error_abort); | |
584 | aio_context_release(ctx); | |
585 | g_assert(blk_get_aio_context(blk) == main_ctx); | |
586 | g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); | |
587 | g_assert(bdrv_get_aio_context(bs_a) == main_ctx); | |
588 | g_assert(bdrv_get_aio_context(bs_b) == main_ctx); | |
589 | g_assert(bdrv_get_aio_context(bs_c) == main_ctx); | |
7e2f096a KW |
590 | |
591 | blk_unref(blk); | |
592 | bdrv_unref(bs_verify); | |
593 | bdrv_unref(bs_c); | |
594 | bdrv_unref(bs_b); | |
595 | bdrv_unref(bs_a); | |
596 | } | |
597 | ||
012056f4 KW |
598 | static void test_propagate_mirror(void) |
599 | { | |
600 | IOThread *iothread = iothread_new(); | |
601 | AioContext *ctx = iothread_get_aio_context(iothread); | |
602 | AioContext *main_ctx = qemu_get_aio_context(); | |
087ba459 | 603 | BlockDriverState *src, *target, *filter; |
012056f4 KW |
604 | BlockBackend *blk; |
605 | Job *job; | |
606 | Error *local_err = NULL; | |
607 | ||
608 | /* Create src and target*/ | |
609 | src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort); | |
610 | target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, | |
611 | &error_abort); | |
612 | ||
613 | /* Start a mirror job */ | |
614 | mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0, | |
615 | MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, | |
616 | BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, | |
617 | false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, | |
618 | &error_abort); | |
619 | job = job_get("job0"); | |
087ba459 | 620 | filter = bdrv_find_node("filter_node"); |
012056f4 KW |
621 | |
622 | /* Change the AioContext of src */ | |
623 | bdrv_try_set_aio_context(src, ctx, &error_abort); | |
624 | g_assert(bdrv_get_aio_context(src) == ctx); | |
625 | g_assert(bdrv_get_aio_context(target) == ctx); | |
087ba459 | 626 | g_assert(bdrv_get_aio_context(filter) == ctx); |
012056f4 KW |
627 | g_assert(job->aio_context == ctx); |
628 | ||
629 | /* Change the AioContext of target */ | |
630 | aio_context_acquire(ctx); | |
631 | bdrv_try_set_aio_context(target, main_ctx, &error_abort); | |
632 | aio_context_release(ctx); | |
633 | g_assert(bdrv_get_aio_context(src) == main_ctx); | |
634 | g_assert(bdrv_get_aio_context(target) == main_ctx); | |
087ba459 | 635 | g_assert(bdrv_get_aio_context(filter) == main_ctx); |
012056f4 KW |
636 | |
637 | /* With a BlockBackend on src, changing target must fail */ | |
d861ab3a | 638 | blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); |
012056f4 KW |
639 | blk_insert_bs(blk, src, &error_abort); |
640 | ||
641 | bdrv_try_set_aio_context(target, ctx, &local_err); | |
642 | g_assert(local_err); | |
643 | error_free(local_err); | |
644 | ||
645 | g_assert(blk_get_aio_context(blk) == main_ctx); | |
646 | g_assert(bdrv_get_aio_context(src) == main_ctx); | |
647 | g_assert(bdrv_get_aio_context(target) == main_ctx); | |
087ba459 | 648 | g_assert(bdrv_get_aio_context(filter) == main_ctx); |
012056f4 KW |
649 | |
650 | /* ...unless we explicitly allow it */ | |
651 | aio_context_acquire(ctx); | |
652 | blk_set_allow_aio_context_change(blk, true); | |
653 | bdrv_try_set_aio_context(target, ctx, &error_abort); | |
654 | aio_context_release(ctx); | |
655 | ||
656 | g_assert(blk_get_aio_context(blk) == ctx); | |
657 | g_assert(bdrv_get_aio_context(src) == ctx); | |
658 | g_assert(bdrv_get_aio_context(target) == ctx); | |
087ba459 | 659 | g_assert(bdrv_get_aio_context(filter) == ctx); |
012056f4 KW |
660 | |
661 | job_cancel_sync_all(); | |
662 | ||
663 | aio_context_acquire(ctx); | |
97896a48 | 664 | blk_set_aio_context(blk, main_ctx, &error_abort); |
012056f4 KW |
665 | bdrv_try_set_aio_context(target, main_ctx, &error_abort); |
666 | aio_context_release(ctx); | |
667 | ||
668 | blk_unref(blk); | |
669 | bdrv_unref(src); | |
670 | bdrv_unref(target); | |
671 | } | |
672 | ||
48946d7d KW |
673 | static void test_attach_second_node(void) |
674 | { | |
675 | IOThread *iothread = iothread_new(); | |
676 | AioContext *ctx = iothread_get_aio_context(iothread); | |
677 | AioContext *main_ctx = qemu_get_aio_context(); | |
678 | BlockBackend *blk; | |
679 | BlockDriverState *bs, *filter; | |
680 | QDict *options; | |
681 | ||
682 | blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); | |
683 | bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); | |
684 | blk_insert_bs(blk, bs, &error_abort); | |
685 | ||
686 | options = qdict_new(); | |
687 | qdict_put_str(options, "driver", "raw"); | |
688 | qdict_put_str(options, "file", "base"); | |
689 | ||
690 | filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); | |
691 | g_assert(blk_get_aio_context(blk) == ctx); | |
692 | g_assert(bdrv_get_aio_context(bs) == ctx); | |
693 | g_assert(bdrv_get_aio_context(filter) == ctx); | |
694 | ||
1b285657 | 695 | aio_context_acquire(ctx); |
48946d7d | 696 | blk_set_aio_context(blk, main_ctx, &error_abort); |
1b285657 | 697 | aio_context_release(ctx); |
48946d7d KW |
698 | g_assert(blk_get_aio_context(blk) == main_ctx); |
699 | g_assert(bdrv_get_aio_context(bs) == main_ctx); | |
700 | g_assert(bdrv_get_aio_context(filter) == main_ctx); | |
701 | ||
702 | bdrv_unref(filter); | |
703 | bdrv_unref(bs); | |
704 | blk_unref(blk); | |
705 | } | |
706 | ||
2e9cdab3 KW |
707 | static void test_attach_preserve_blk_ctx(void) |
708 | { | |
709 | IOThread *iothread = iothread_new(); | |
710 | AioContext *ctx = iothread_get_aio_context(iothread); | |
711 | BlockBackend *blk; | |
712 | BlockDriverState *bs; | |
713 | ||
714 | blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); | |
715 | bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); | |
716 | bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; | |
717 | ||
718 | /* Add node to BlockBackend that has an iothread context assigned */ | |
719 | blk_insert_bs(blk, bs, &error_abort); | |
720 | g_assert(blk_get_aio_context(blk) == ctx); | |
721 | g_assert(bdrv_get_aio_context(bs) == ctx); | |
722 | ||
723 | /* Remove the node again */ | |
1b285657 | 724 | aio_context_acquire(ctx); |
2e9cdab3 | 725 | blk_remove_bs(blk); |
1b285657 | 726 | aio_context_release(ctx); |
2e9cdab3 | 727 | g_assert(blk_get_aio_context(blk) == ctx); |
ad943dcb | 728 | g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context()); |
2e9cdab3 KW |
729 | |
730 | /* Re-attach the node */ | |
731 | blk_insert_bs(blk, bs, &error_abort); | |
732 | g_assert(blk_get_aio_context(blk) == ctx); | |
733 | g_assert(bdrv_get_aio_context(bs) == ctx); | |
734 | ||
1b285657 | 735 | aio_context_acquire(ctx); |
2e9cdab3 | 736 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); |
1b285657 | 737 | aio_context_release(ctx); |
2e9cdab3 KW |
738 | bdrv_unref(bs); |
739 | blk_unref(blk); | |
740 | } | |
741 | ||
4720cbee KW |
742 | int main(int argc, char **argv) |
743 | { | |
744 | int i; | |
745 | ||
746 | bdrv_init(); | |
747 | qemu_init_main_loop(&error_abort); | |
748 | ||
749 | g_test_init(&argc, &argv, NULL); | |
750 | ||
751 | for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) { | |
752 | const SyncOpTest *t = &sync_op_tests[i]; | |
753 | g_test_add_data_func(t->name, t, test_sync_op); | |
754 | } | |
755 | ||
93c60f38 | 756 | g_test_add_func("/attach/blockjob", test_attach_blockjob); |
48946d7d | 757 | g_test_add_func("/attach/second_node", test_attach_second_node); |
2e9cdab3 | 758 | g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx); |
7e2f096a KW |
759 | g_test_add_func("/propagate/basic", test_propagate_basic); |
760 | g_test_add_func("/propagate/diamond", test_propagate_diamond); | |
012056f4 | 761 | g_test_add_func("/propagate/mirror", test_propagate_mirror); |
93c60f38 | 762 | |
4720cbee KW |
763 | return g_test_run(); |
764 | } |