2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qemu/main-loop.h"
34 static int coroutine_fn
bdrv_test_co_preadv(BlockDriverState
*bs
,
35 int64_t offset
, int64_t bytes
,
37 BdrvRequestFlags flags
)
42 static int coroutine_fn
bdrv_test_co_pwritev(BlockDriverState
*bs
,
43 int64_t offset
, int64_t bytes
,
45 BdrvRequestFlags flags
)
50 static int coroutine_fn
bdrv_test_co_pdiscard(BlockDriverState
*bs
,
51 int64_t offset
, int64_t bytes
)
56 static int coroutine_fn
57 bdrv_test_co_truncate(BlockDriverState
*bs
, int64_t offset
, bool exact
,
58 PreallocMode prealloc
, BdrvRequestFlags flags
,
64 static int coroutine_fn
bdrv_test_co_block_status(BlockDriverState
*bs
,
66 int64_t offset
, int64_t count
,
67 int64_t *pnum
, int64_t *map
,
68 BlockDriverState
**file
)
74 static BlockDriver bdrv_test
= {
75 .format_name
= "test",
78 .bdrv_co_preadv
= bdrv_test_co_preadv
,
79 .bdrv_co_pwritev
= bdrv_test_co_pwritev
,
80 .bdrv_co_pdiscard
= bdrv_test_co_pdiscard
,
81 .bdrv_co_truncate
= bdrv_test_co_truncate
,
82 .bdrv_co_block_status
= bdrv_test_co_block_status
,
85 static void test_sync_op_pread(BdrvChild
*c
)
91 ret
= bdrv_pread(c
, 0, buf
, sizeof(buf
));
92 g_assert_cmpint(ret
, ==, 512);
94 /* Early error: Negative offset */
95 ret
= bdrv_pread(c
, -2, buf
, sizeof(buf
));
96 g_assert_cmpint(ret
, ==, -EIO
);
99 static void test_sync_op_pwrite(BdrvChild
*c
)
101 uint8_t buf
[512] = { 0 };
105 ret
= bdrv_pwrite(c
, 0, buf
, sizeof(buf
));
106 g_assert_cmpint(ret
, ==, 512);
108 /* Early error: Negative offset */
109 ret
= bdrv_pwrite(c
, -2, buf
, sizeof(buf
));
110 g_assert_cmpint(ret
, ==, -EIO
);
113 static void test_sync_op_blk_pread(BlockBackend
*blk
)
119 ret
= blk_pread(blk
, 0, buf
, sizeof(buf
));
120 g_assert_cmpint(ret
, ==, 512);
122 /* Early error: Negative offset */
123 ret
= blk_pread(blk
, -2, buf
, sizeof(buf
));
124 g_assert_cmpint(ret
, ==, -EIO
);
127 static void test_sync_op_blk_pwrite(BlockBackend
*blk
)
129 uint8_t buf
[512] = { 0 };
133 ret
= blk_pwrite(blk
, 0, buf
, sizeof(buf
), 0);
134 g_assert_cmpint(ret
, ==, 512);
136 /* Early error: Negative offset */
137 ret
= blk_pwrite(blk
, -2, buf
, sizeof(buf
), 0);
138 g_assert_cmpint(ret
, ==, -EIO
);
141 static void test_sync_op_load_vmstate(BdrvChild
*c
)
146 /* Error: Driver does not support snapshots */
147 ret
= bdrv_load_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
148 g_assert_cmpint(ret
, ==, -ENOTSUP
);
151 static void test_sync_op_save_vmstate(BdrvChild
*c
)
153 uint8_t buf
[512] = { 0 };
156 /* Error: Driver does not support snapshots */
157 ret
= bdrv_save_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
158 g_assert_cmpint(ret
, ==, -ENOTSUP
);
161 static void test_sync_op_pdiscard(BdrvChild
*c
)
165 /* Normal success path */
166 c
->bs
->open_flags
|= BDRV_O_UNMAP
;
167 ret
= bdrv_pdiscard(c
, 0, 512);
168 g_assert_cmpint(ret
, ==, 0);
170 /* Early success: UNMAP not supported */
171 c
->bs
->open_flags
&= ~BDRV_O_UNMAP
;
172 ret
= bdrv_pdiscard(c
, 0, 512);
173 g_assert_cmpint(ret
, ==, 0);
175 /* Early error: Negative offset */
176 ret
= bdrv_pdiscard(c
, -2, 512);
177 g_assert_cmpint(ret
, ==, -EIO
);
180 static void test_sync_op_blk_pdiscard(BlockBackend
*blk
)
184 /* Early success: UNMAP not supported */
185 ret
= blk_pdiscard(blk
, 0, 512);
186 g_assert_cmpint(ret
, ==, 0);
188 /* Early error: Negative offset */
189 ret
= blk_pdiscard(blk
, -2, 512);
190 g_assert_cmpint(ret
, ==, -EIO
);
193 static void test_sync_op_truncate(BdrvChild
*c
)
197 /* Normal success path */
198 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
199 g_assert_cmpint(ret
, ==, 0);
201 /* Early error: Negative offset */
202 ret
= bdrv_truncate(c
, -2, false, PREALLOC_MODE_OFF
, 0, NULL
);
203 g_assert_cmpint(ret
, ==, -EINVAL
);
205 /* Error: Read-only image */
206 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
208 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
209 g_assert_cmpint(ret
, ==, -EACCES
);
211 c
->bs
->open_flags
|= BDRV_O_RDWR
;
214 static void test_sync_op_block_status(BdrvChild
*c
)
219 /* Normal success path */
220 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
221 g_assert_cmpint(ret
, ==, 0);
223 /* Early success: No driver support */
224 bdrv_test
.bdrv_co_block_status
= NULL
;
225 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
226 g_assert_cmpint(ret
, ==, 1);
228 /* Early success: bytes = 0 */
229 ret
= bdrv_is_allocated(c
->bs
, 0, 0, &n
);
230 g_assert_cmpint(ret
, ==, 0);
232 /* Early success: Offset > image size*/
233 ret
= bdrv_is_allocated(c
->bs
, 0x1000000, 0x1000000, &n
);
234 g_assert_cmpint(ret
, ==, 0);
237 static void test_sync_op_flush(BdrvChild
*c
)
241 /* Normal success path */
242 ret
= bdrv_flush(c
->bs
);
243 g_assert_cmpint(ret
, ==, 0);
245 /* Early success: Read-only image */
246 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
248 ret
= bdrv_flush(c
->bs
);
249 g_assert_cmpint(ret
, ==, 0);
251 c
->bs
->open_flags
|= BDRV_O_RDWR
;
254 static void test_sync_op_blk_flush(BlockBackend
*blk
)
256 BlockDriverState
*bs
= blk_bs(blk
);
259 /* Normal success path */
260 ret
= blk_flush(blk
);
261 g_assert_cmpint(ret
, ==, 0);
263 /* Early success: Read-only image */
264 bs
->open_flags
&= ~BDRV_O_RDWR
;
266 ret
= blk_flush(blk
);
267 g_assert_cmpint(ret
, ==, 0);
269 bs
->open_flags
|= BDRV_O_RDWR
;
272 static void test_sync_op_check(BdrvChild
*c
)
274 BdrvCheckResult result
;
277 /* Error: Driver does not implement check */
278 ret
= bdrv_check(c
->bs
, &result
, 0);
279 g_assert_cmpint(ret
, ==, -ENOTSUP
);
282 static void test_sync_op_invalidate_cache(BdrvChild
*c
)
284 /* Early success: Image is not inactive */
285 bdrv_invalidate_cache(c
->bs
, NULL
);
289 typedef struct SyncOpTest
{
291 void (*fn
)(BdrvChild
*c
);
292 void (*blkfn
)(BlockBackend
*blk
);
295 const SyncOpTest sync_op_tests
[] = {
297 .name
= "/sync-op/pread",
298 .fn
= test_sync_op_pread
,
299 .blkfn
= test_sync_op_blk_pread
,
301 .name
= "/sync-op/pwrite",
302 .fn
= test_sync_op_pwrite
,
303 .blkfn
= test_sync_op_blk_pwrite
,
305 .name
= "/sync-op/load_vmstate",
306 .fn
= test_sync_op_load_vmstate
,
308 .name
= "/sync-op/save_vmstate",
309 .fn
= test_sync_op_save_vmstate
,
311 .name
= "/sync-op/pdiscard",
312 .fn
= test_sync_op_pdiscard
,
313 .blkfn
= test_sync_op_blk_pdiscard
,
315 .name
= "/sync-op/truncate",
316 .fn
= test_sync_op_truncate
,
318 .name
= "/sync-op/block_status",
319 .fn
= test_sync_op_block_status
,
321 .name
= "/sync-op/flush",
322 .fn
= test_sync_op_flush
,
323 .blkfn
= test_sync_op_blk_flush
,
325 .name
= "/sync-op/check",
326 .fn
= test_sync_op_check
,
328 .name
= "/sync-op/invalidate_cache",
329 .fn
= test_sync_op_invalidate_cache
,
333 /* Test synchronous operations that run in a different iothread, so we have to
334 * poll for the coroutine there to return. */
335 static void test_sync_op(const void *opaque
)
337 const SyncOpTest
*t
= opaque
;
338 IOThread
*iothread
= iothread_new();
339 AioContext
*ctx
= iothread_get_aio_context(iothread
);
341 BlockDriverState
*bs
;
344 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
345 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
346 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
347 blk_insert_bs(blk
, bs
, &error_abort
);
348 c
= QLIST_FIRST(&bs
->parents
);
350 blk_set_aio_context(blk
, ctx
, &error_abort
);
351 aio_context_acquire(ctx
);
356 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
357 aio_context_release(ctx
);
363 typedef struct TestBlockJob
{
365 bool should_complete
;
369 static int test_job_prepare(Job
*job
)
371 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
375 static int coroutine_fn
test_job_run(Job
*job
, Error
**errp
)
377 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
379 job_transition_to_ready(&s
->common
.job
);
380 while (!s
->should_complete
) {
382 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
384 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
385 * emulate some actual activity (probably some I/O) here so that the
386 * drain involved in AioContext switches has to wait for this activity
388 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME
, 1000000);
390 job_pause_point(&s
->common
.job
);
393 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
397 static void test_job_complete(Job
*job
, Error
**errp
)
399 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
400 s
->should_complete
= true;
403 BlockJobDriver test_job_driver
= {
405 .instance_size
= sizeof(TestBlockJob
),
406 .free
= block_job_free
,
407 .user_resume
= block_job_user_resume
,
409 .complete
= test_job_complete
,
410 .prepare
= test_job_prepare
,
414 static void test_attach_blockjob(void)
416 IOThread
*iothread
= iothread_new();
417 AioContext
*ctx
= iothread_get_aio_context(iothread
);
419 BlockDriverState
*bs
;
422 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
423 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
424 blk_insert_bs(blk
, bs
, &error_abort
);
426 tjob
= block_job_create("job0", &test_job_driver
, NULL
, bs
,
428 0, 0, NULL
, NULL
, &error_abort
);
429 job_start(&tjob
->common
.job
);
431 while (tjob
->n
== 0) {
432 aio_poll(qemu_get_aio_context(), false);
435 blk_set_aio_context(blk
, ctx
, &error_abort
);
438 while (tjob
->n
== 0) {
439 aio_poll(qemu_get_aio_context(), false);
442 aio_context_acquire(ctx
);
443 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
444 aio_context_release(ctx
);
447 while (tjob
->n
== 0) {
448 aio_poll(qemu_get_aio_context(), false);
451 blk_set_aio_context(blk
, ctx
, &error_abort
);
454 while (tjob
->n
== 0) {
455 aio_poll(qemu_get_aio_context(), false);
458 aio_context_acquire(ctx
);
459 job_complete_sync(&tjob
->common
.job
, &error_abort
);
460 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
461 aio_context_release(ctx
);
468 * Test that changing the AioContext for one node in a tree (here through blk)
469 * changes all other nodes as well:
473 * | bs_verify [blkverify]
476 * bs_a [bdrv_test] bs_b [bdrv_test]
479 static void test_propagate_basic(void)
481 IOThread
*iothread
= iothread_new();
482 AioContext
*ctx
= iothread_get_aio_context(iothread
);
483 AioContext
*main_ctx
;
485 BlockDriverState
*bs_a
, *bs_b
, *bs_verify
;
489 * Create bs_a and its BlockBackend. We cannot take the RESIZE
490 * permission because blkverify will not share it on the test
493 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
495 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
496 blk_insert_bs(blk
, bs_a
, &error_abort
);
499 bs_b
= bdrv_new_open_driver(&bdrv_test
, "bs_b", BDRV_O_RDWR
, &error_abort
);
501 /* Create blkverify filter that references both bs_a and bs_b */
502 options
= qdict_new();
503 qdict_put_str(options
, "driver", "blkverify");
504 qdict_put_str(options
, "test", "bs_a");
505 qdict_put_str(options
, "raw", "bs_b");
507 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
509 /* Switch the AioContext */
510 blk_set_aio_context(blk
, ctx
, &error_abort
);
511 g_assert(blk_get_aio_context(blk
) == ctx
);
512 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
513 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
514 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
516 /* Switch the AioContext back */
517 main_ctx
= qemu_get_aio_context();
518 aio_context_acquire(ctx
);
519 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
520 aio_context_release(ctx
);
521 g_assert(blk_get_aio_context(blk
) == main_ctx
);
522 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
523 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
524 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
526 bdrv_unref(bs_verify
);
533 * Test that diamonds in the graph don't lead to endless recursion:
537 * bs_verify [blkverify]
540 * bs_b [raw] bs_c[raw]
545 static void test_propagate_diamond(void)
547 IOThread
*iothread
= iothread_new();
548 AioContext
*ctx
= iothread_get_aio_context(iothread
);
549 AioContext
*main_ctx
;
551 BlockDriverState
*bs_a
, *bs_b
, *bs_c
, *bs_verify
;
555 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
557 /* Create bs_b and bc_c */
558 options
= qdict_new();
559 qdict_put_str(options
, "driver", "raw");
560 qdict_put_str(options
, "file", "bs_a");
561 qdict_put_str(options
, "node-name", "bs_b");
562 bs_b
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
564 options
= qdict_new();
565 qdict_put_str(options
, "driver", "raw");
566 qdict_put_str(options
, "file", "bs_a");
567 qdict_put_str(options
, "node-name", "bs_c");
568 bs_c
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
570 /* Create blkverify filter that references both bs_b and bs_c */
571 options
= qdict_new();
572 qdict_put_str(options
, "driver", "blkverify");
573 qdict_put_str(options
, "test", "bs_b");
574 qdict_put_str(options
, "raw", "bs_c");
576 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
578 * Do not take the RESIZE permission: This would require the same
579 * from bs_c and thus from bs_a; however, blkverify will not share
580 * it on bs_b, and thus it will not be available for bs_a.
582 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
584 blk_insert_bs(blk
, bs_verify
, &error_abort
);
586 /* Switch the AioContext */
587 blk_set_aio_context(blk
, ctx
, &error_abort
);
588 g_assert(blk_get_aio_context(blk
) == ctx
);
589 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
590 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
591 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
592 g_assert(bdrv_get_aio_context(bs_c
) == ctx
);
594 /* Switch the AioContext back */
595 main_ctx
= qemu_get_aio_context();
596 aio_context_acquire(ctx
);
597 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
598 aio_context_release(ctx
);
599 g_assert(blk_get_aio_context(blk
) == main_ctx
);
600 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
601 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
602 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
603 g_assert(bdrv_get_aio_context(bs_c
) == main_ctx
);
606 bdrv_unref(bs_verify
);
612 static void test_propagate_mirror(void)
614 IOThread
*iothread
= iothread_new();
615 AioContext
*ctx
= iothread_get_aio_context(iothread
);
616 AioContext
*main_ctx
= qemu_get_aio_context();
617 BlockDriverState
*src
, *target
, *filter
;
620 Error
*local_err
= NULL
;
622 /* Create src and target*/
623 src
= bdrv_new_open_driver(&bdrv_test
, "src", BDRV_O_RDWR
, &error_abort
);
624 target
= bdrv_new_open_driver(&bdrv_test
, "target", BDRV_O_RDWR
,
627 /* Start a mirror job */
628 mirror_start("job0", src
, target
, NULL
, JOB_DEFAULT
, 0, 0, 0,
629 MIRROR_SYNC_MODE_NONE
, MIRROR_OPEN_BACKING_CHAIN
, false,
630 BLOCKDEV_ON_ERROR_REPORT
, BLOCKDEV_ON_ERROR_REPORT
,
631 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND
,
633 job
= job_get("job0");
634 filter
= bdrv_find_node("filter_node");
636 /* Change the AioContext of src */
637 bdrv_try_set_aio_context(src
, ctx
, &error_abort
);
638 g_assert(bdrv_get_aio_context(src
) == ctx
);
639 g_assert(bdrv_get_aio_context(target
) == ctx
);
640 g_assert(bdrv_get_aio_context(filter
) == ctx
);
641 g_assert(job
->aio_context
== ctx
);
643 /* Change the AioContext of target */
644 aio_context_acquire(ctx
);
645 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
646 aio_context_release(ctx
);
647 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
648 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
649 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
651 /* With a BlockBackend on src, changing target must fail */
652 blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
653 blk_insert_bs(blk
, src
, &error_abort
);
655 bdrv_try_set_aio_context(target
, ctx
, &local_err
);
656 error_free_or_abort(&local_err
);
658 g_assert(blk_get_aio_context(blk
) == main_ctx
);
659 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
660 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
661 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
663 /* ...unless we explicitly allow it */
664 aio_context_acquire(ctx
);
665 blk_set_allow_aio_context_change(blk
, true);
666 bdrv_try_set_aio_context(target
, ctx
, &error_abort
);
667 aio_context_release(ctx
);
669 g_assert(blk_get_aio_context(blk
) == ctx
);
670 g_assert(bdrv_get_aio_context(src
) == ctx
);
671 g_assert(bdrv_get_aio_context(target
) == ctx
);
672 g_assert(bdrv_get_aio_context(filter
) == ctx
);
674 job_cancel_sync_all();
676 aio_context_acquire(ctx
);
677 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
678 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
679 aio_context_release(ctx
);
686 static void test_attach_second_node(void)
688 IOThread
*iothread
= iothread_new();
689 AioContext
*ctx
= iothread_get_aio_context(iothread
);
690 AioContext
*main_ctx
= qemu_get_aio_context();
692 BlockDriverState
*bs
, *filter
;
695 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
696 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
697 blk_insert_bs(blk
, bs
, &error_abort
);
699 options
= qdict_new();
700 qdict_put_str(options
, "driver", "raw");
701 qdict_put_str(options
, "file", "base");
703 filter
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
704 g_assert(blk_get_aio_context(blk
) == ctx
);
705 g_assert(bdrv_get_aio_context(bs
) == ctx
);
706 g_assert(bdrv_get_aio_context(filter
) == ctx
);
708 aio_context_acquire(ctx
);
709 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
710 aio_context_release(ctx
);
711 g_assert(blk_get_aio_context(blk
) == main_ctx
);
712 g_assert(bdrv_get_aio_context(bs
) == main_ctx
);
713 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
720 static void test_attach_preserve_blk_ctx(void)
722 IOThread
*iothread
= iothread_new();
723 AioContext
*ctx
= iothread_get_aio_context(iothread
);
725 BlockDriverState
*bs
;
727 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
728 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
729 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
731 /* Add node to BlockBackend that has an iothread context assigned */
732 blk_insert_bs(blk
, bs
, &error_abort
);
733 g_assert(blk_get_aio_context(blk
) == ctx
);
734 g_assert(bdrv_get_aio_context(bs
) == ctx
);
736 /* Remove the node again */
737 aio_context_acquire(ctx
);
739 aio_context_release(ctx
);
740 g_assert(blk_get_aio_context(blk
) == ctx
);
741 g_assert(bdrv_get_aio_context(bs
) == qemu_get_aio_context());
743 /* Re-attach the node */
744 blk_insert_bs(blk
, bs
, &error_abort
);
745 g_assert(blk_get_aio_context(blk
) == ctx
);
746 g_assert(bdrv_get_aio_context(bs
) == ctx
);
748 aio_context_acquire(ctx
);
749 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
750 aio_context_release(ctx
);
755 int main(int argc
, char **argv
)
760 qemu_init_main_loop(&error_abort
);
762 g_test_init(&argc
, &argv
, NULL
);
764 for (i
= 0; i
< ARRAY_SIZE(sync_op_tests
); i
++) {
765 const SyncOpTest
*t
= &sync_op_tests
[i
];
766 g_test_add_data_func(t
->name
, t
, test_sync_op
);
769 g_test_add_func("/attach/blockjob", test_attach_blockjob
);
770 g_test_add_func("/attach/second_node", test_attach_second_node
);
771 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx
);
772 g_test_add_func("/propagate/basic", test_propagate_basic
);
773 g_test_add_func("/propagate/diamond", test_propagate_diamond
);
774 g_test_add_func("/propagate/mirror", test_propagate_mirror
);