3 # Tests for incremental drive-backup
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
27 def io_write_patterns(img
, patterns
):
28 for pattern
in patterns
:
29 iotests
.qemu_io('-c', 'write -P%s %s %s' % pattern
, img
)
39 def transaction_action(action
, **kwargs
):
42 'data': dict((k
.replace('_', '-'), v
) for k
, v
in kwargs
.items())
46 def transaction_bitmap_clear(node
, name
, **kwargs
):
47 return transaction_action('block-dirty-bitmap-clear',
48 node
=node
, name
=name
, **kwargs
)
51 def transaction_drive_backup(device
, target
, **kwargs
):
52 return transaction_action('drive-backup', job_id
=device
, device
=device
,
53 target
=target
, **kwargs
)
57 def __init__(self
, name
, drive
):
63 def base_target(self
):
64 return (self
.drive
['backup'], None)
66 def new_target(self
, num
=None):
70 base
= os
.path
.join(iotests
.test_dir
,
71 "%s.%s." % (self
.drive
['id'], self
.name
))
72 suff
= "%i.%s" % (num
, self
.drive
['fmt'])
73 target
= base
+ "inc" + suff
74 reference
= base
+ "ref" + suff
75 self
.backups
.append((target
, reference
))
76 return (target
, reference
)
78 def last_target(self
):
80 return self
.backups
[-1]
81 return self
.base_target()
84 for image
in self
.backups
.pop():
89 for backup
in self
.backups
:
94 class TestIncrementalBackupBase(iotests
.QMPTestCase
):
95 def __init__(self
, *args
):
96 super(TestIncrementalBackupBase
, self
).__init
__(*args
)
100 self
.vm
= iotests
.VM()
101 self
.err_img
= os
.path
.join(iotests
.test_dir
, 'err.%s' % iotests
.imgfmt
)
105 # Create a base image with a distinctive patterning
106 drive0
= self
.add_node('drive0')
107 self
.img_create(drive0
['file'], drive0
['fmt'])
108 self
.vm
.add_drive(drive0
['file'])
109 self
.write_default_pattern(drive0
['file'])
113 def write_default_pattern(self
, target
):
114 io_write_patterns(target
, (('0x41', 0, 512),
115 ('0xd5', '1M', '32k'),
116 ('0xdc', '32M', '124k')))
119 def add_node(self
, node_id
, fmt
=iotests
.imgfmt
, path
=None, backup
=None):
121 path
= os
.path
.join(iotests
.test_dir
, '%s.%s' % (node_id
, fmt
))
123 backup
= os
.path
.join(iotests
.test_dir
,
124 '%s.full.backup.%s' % (node_id
, fmt
))
131 return self
.drives
[-1]
134 def img_create(self
, img
, fmt
=iotests
.imgfmt
, size
='64M',
135 parent
=None, parentFormat
=None, **kwargs
):
137 for k
,v
in kwargs
.items():
138 optargs
= optargs
+ ['-o', '%s=%s' % (k
,v
)]
139 args
= ['create', '-f', fmt
] + optargs
+ [img
, size
]
141 if parentFormat
is None:
143 args
= args
+ ['-b', parent
, '-F', parentFormat
]
144 iotests
.qemu_img(*args
)
145 self
.files
.append(img
)
148 def do_qmp_backup(self
, error
='Input/output error', **kwargs
):
149 res
= self
.vm
.qmp('drive-backup', **kwargs
)
150 self
.assert_qmp(res
, 'return', {})
151 return self
.wait_qmp_backup(kwargs
['device'], error
)
154 def ignore_job_status_change_events(self
):
156 e
= self
.vm
.event_wait(name
="JOB_STATUS_CHANGE")
157 if e
['data']['status'] == 'null':
160 def wait_qmp_backup(self
, device
, error
='Input/output error'):
161 event
= self
.vm
.event_wait(name
="BLOCK_JOB_COMPLETED",
162 match
={'data': {'device': device
}})
163 self
.assertNotEqual(event
, None)
164 self
.ignore_job_status_change_events()
167 failure
= self
.dictpath(event
, 'data/error')
168 except AssertionError:
170 self
.assert_qmp(event
, 'data/offset', event
['data']['len'])
174 self
.assert_qmp(event
, 'data/error', error
)
178 def wait_qmp_backup_cancelled(self
, device
):
179 event
= self
.vm
.event_wait(name
='BLOCK_JOB_CANCELLED',
180 match
={'data': {'device': device
}})
181 self
.assertNotEqual(event
, None)
182 self
.ignore_job_status_change_events()
185 def create_anchor_backup(self
, drive
=None):
187 drive
= self
.drives
[-1]
188 res
= self
.do_qmp_backup(job_id
=drive
['id'],
189 device
=drive
['id'], sync
='full',
190 format
=drive
['fmt'], target
=drive
['backup'])
192 self
.files
.append(drive
['backup'])
193 return drive
['backup']
196 def make_reference_backup(self
, bitmap
=None):
198 bitmap
= self
.bitmaps
[-1]
199 _
, reference
= bitmap
.last_target()
200 res
= self
.do_qmp_backup(job_id
=bitmap
.drive
['id'],
201 device
=bitmap
.drive
['id'], sync
='full',
202 format
=bitmap
.drive
['fmt'], target
=reference
)
206 def add_bitmap(self
, name
, drive
, **kwargs
):
207 bitmap
= Bitmap(name
, drive
)
208 self
.bitmaps
.append(bitmap
)
209 result
= self
.vm
.qmp('block-dirty-bitmap-add', node
=drive
['id'],
210 name
=bitmap
.name
, **kwargs
)
211 self
.assert_qmp(result
, 'return', {})
215 def prepare_backup(self
, bitmap
=None, parent
=None, **kwargs
):
217 bitmap
= self
.bitmaps
[-1]
219 parent
, _
= bitmap
.last_target()
221 target
, _
= bitmap
.new_target()
222 self
.img_create(target
, bitmap
.drive
['fmt'], parent
=parent
,
227 def create_incremental(self
, bitmap
=None, parent
=None,
228 parentFormat
=None, validate
=True,
231 bitmap
= self
.bitmaps
[-1]
233 parent
, _
= bitmap
.last_target()
236 target
= self
.prepare_backup(bitmap
, parent
)
237 res
= self
.do_qmp_backup(job_id
=bitmap
.drive
['id'],
238 device
=bitmap
.drive
['id'],
239 sync
='incremental', bitmap
=bitmap
.name
,
240 format
=bitmap
.drive
['fmt'], target
=target
,
244 self
.assertFalse(validate
)
246 self
.make_reference_backup(bitmap
)
250 def check_backups(self
):
251 for bitmap
in self
.bitmaps
:
252 for incremental
, reference
in bitmap
.backups
:
253 self
.assertTrue(iotests
.compare_images(incremental
, reference
))
254 last
= bitmap
.last_target()[0]
255 self
.assertTrue(iotests
.compare_images(last
, bitmap
.drive
['file']))
258 def hmp_io_writes(self
, drive
, patterns
):
259 for pattern
in patterns
:
260 self
.vm
.hmp_qemu_io(drive
, 'write -P%s %s %s' % pattern
)
261 self
.vm
.hmp_qemu_io(drive
, 'flush')
264 def do_incremental_simple(self
, **kwargs
):
265 self
.create_anchor_backup()
266 self
.add_bitmap('bitmap0', self
.drives
[0], **kwargs
)
268 # Sanity: Create a "hollow" incremental backup
269 self
.create_incremental()
270 # Three writes: One complete overwrite, one new segment,
271 # and one partial overlap.
272 self
.hmp_io_writes(self
.drives
[0]['id'], (('0xab', 0, 512),
273 ('0xfe', '16M', '256k'),
274 ('0x64', '32736k', '64k')))
275 self
.create_incremental()
276 # Three more writes, one of each kind, like above
277 self
.hmp_io_writes(self
.drives
[0]['id'], (('0x9a', 0, 512),
278 ('0x55', '8M', '352k'),
279 ('0x78', '15872k', '1M')))
280 self
.create_incremental()
287 for bitmap
in self
.bitmaps
:
289 for filename
in self
.files
:
294 class TestIncrementalBackup(TestIncrementalBackupBase
):
295 def test_incremental_simple(self
):
297 Test: Create and verify three incremental backups.
299 Create a bitmap and a full backup before VM execution begins,
300 then create a series of three incremental backups "during execution,"
301 i.e.; after IO requests begin modifying the drive.
303 return self
.do_incremental_simple()
306 def test_small_granularity(self
):
308 Test: Create and verify backups made with a small granularity bitmap.
310 Perform the same test as test_incremental_simple, but with a granularity
311 of only 32KiB instead of the present default of 64KiB.
313 return self
.do_incremental_simple(granularity
=32768)
316 def test_large_granularity(self
):
318 Test: Create and verify backups made with a large granularity bitmap.
320 Perform the same test as test_incremental_simple, but with a granularity
321 of 128KiB instead of the present default of 64KiB.
323 return self
.do_incremental_simple(granularity
=131072)
326 def test_larger_cluster_target(self
):
328 Test: Create and verify backups made to a larger cluster size target.
330 With a default granularity of 64KiB, verify that backups made to a
331 larger cluster size target of 128KiB without a backing file works.
333 drive0
= self
.drives
[0]
335 # Create a cluster_size=128k full backup / "anchor" backup
336 self
.img_create(drive0
['backup'], cluster_size
='128k')
337 self
.assertTrue(self
.do_qmp_backup(device
=drive0
['id'], sync
='full',
338 format
=drive0
['fmt'],
339 target
=drive0
['backup'],
342 # Create bitmap and dirty it with some new writes.
343 # overwrite [32736, 32799] which will dirty bitmap clusters at
344 # 32M-64K and 32M. 32M+64K will be left undirtied.
345 bitmap0
= self
.add_bitmap('bitmap0', drive0
)
346 self
.hmp_io_writes(drive0
['id'],
348 ('0xfe', '16M', '256k'),
349 ('0x64', '32736k', '64k')))
350 # Check the dirty bitmap stats
351 result
= self
.vm
.qmp('query-block')
352 self
.assert_qmp(result
, 'return[0]/dirty-bitmaps[0]/name', 'bitmap0')
353 self
.assert_qmp(result
, 'return[0]/dirty-bitmaps[0]/count', 458752)
354 self
.assert_qmp(result
, 'return[0]/dirty-bitmaps[0]/granularity', 65536)
355 self
.assert_qmp(result
, 'return[0]/dirty-bitmaps[0]/status', 'active')
356 self
.assert_qmp(result
, 'return[0]/dirty-bitmaps[0]/persistent', False)
358 # Prepare a cluster_size=128k backup target without a backing file.
359 (target
, _
) = bitmap0
.new_target()
360 self
.img_create(target
, bitmap0
.drive
['fmt'], cluster_size
='128k')
362 # Perform Incremental Backup
363 self
.assertTrue(self
.do_qmp_backup(device
=bitmap0
.drive
['id'],
366 format
=bitmap0
.drive
['fmt'],
369 self
.make_reference_backup(bitmap0
)
371 # Add the backing file, then compare and exit.
372 iotests
.qemu_img('rebase', '-f', drive0
['fmt'], '-u', '-b',
373 drive0
['backup'], '-F', drive0
['fmt'], target
)
378 def test_incremental_transaction(self
):
379 '''Test: Verify backups made from transactionally created bitmaps.
381 Create a bitmap "before" VM execution begins, then create a second
382 bitmap AFTER writes have already occurred. Use transactions to create
383 a full backup and synchronize both bitmaps to this backup.
384 Create an incremental backup through both bitmaps and verify that
385 both backups match the current drive0 image.
388 drive0
= self
.drives
[0]
389 bitmap0
= self
.add_bitmap('bitmap0', drive0
)
390 self
.hmp_io_writes(drive0
['id'], (('0xab', 0, 512),
391 ('0xfe', '16M', '256k'),
392 ('0x64', '32736k', '64k')))
393 bitmap1
= self
.add_bitmap('bitmap1', drive0
)
395 result
= self
.vm
.qmp('transaction', actions
=[
396 transaction_bitmap_clear(bitmap0
.drive
['id'], bitmap0
.name
),
397 transaction_bitmap_clear(bitmap1
.drive
['id'], bitmap1
.name
),
398 transaction_drive_backup(drive0
['id'], drive0
['backup'],
399 sync
='full', format
=drive0
['fmt'])
401 self
.assert_qmp(result
, 'return', {})
402 self
.wait_until_completed(drive0
['id'])
403 self
.files
.append(drive0
['backup'])
405 self
.hmp_io_writes(drive0
['id'], (('0x9a', 0, 512),
406 ('0x55', '8M', '352k'),
407 ('0x78', '15872k', '1M')))
408 # Both bitmaps should be correctly in sync.
409 self
.create_incremental(bitmap0
)
410 self
.create_incremental(bitmap1
)
415 def do_transaction_failure_test(self
, race
=False):
416 # Create a second drive, with pattern:
417 drive1
= self
.add_node('drive1')
418 self
.img_create(drive1
['file'], drive1
['fmt'])
419 io_write_patterns(drive1
['file'], (('0x14', 0, 512),
420 ('0x5d', '1M', '32k'),
421 ('0xcd', '32M', '124k')))
423 # Create a blkdebug interface to this img as 'drive1'
424 result
= self
.vm
.qmp('blockdev-add',
425 node_name
=drive1
['id'],
426 driver
=drive1
['fmt'],
428 'driver': 'blkdebug',
431 'filename': drive1
['file']
434 'event': 'flush_to_disk',
442 'immediately': False,
447 self
.assert_qmp(result
, 'return', {})
449 # Create bitmaps and full backups for both drives
450 drive0
= self
.drives
[0]
451 dr0bm0
= self
.add_bitmap('bitmap0', drive0
)
452 dr1bm0
= self
.add_bitmap('bitmap0', drive1
)
453 self
.create_anchor_backup(drive0
)
454 self
.create_anchor_backup(drive1
)
455 self
.assert_no_active_block_jobs()
456 self
.assertFalse(self
.vm
.get_qmp_events(wait
=False))
458 # Emulate some writes
460 self
.hmp_io_writes(drive0
['id'], (('0xab', 0, 512),
461 ('0xfe', '16M', '256k'),
462 ('0x64', '32736k', '64k')))
463 self
.hmp_io_writes(drive1
['id'], (('0xba', 0, 512),
464 ('0xef', '16M', '256k'),
465 ('0x46', '32736k', '64k')))
467 # Create incremental backup targets
468 target0
= self
.prepare_backup(dr0bm0
)
469 target1
= self
.prepare_backup(dr1bm0
)
471 # Ask for a new incremental backup per-each drive,
472 # expecting drive1's backup to fail. In the 'race' test,
473 # we expect drive1 to attempt to cancel the empty drive0 job.
475 transaction_drive_backup(drive0
['id'], target0
, sync
='incremental',
476 format
=drive0
['fmt'], mode
='existing',
478 transaction_drive_backup(drive1
['id'], target1
, sync
='incremental',
479 format
=drive1
['fmt'], mode
='existing',
482 result
= self
.vm
.qmp('transaction', actions
=transaction
,
483 properties
={'completion-mode': 'grouped'} )
484 self
.assert_qmp(result
, 'return', {})
486 # Observe that drive0's backup is cancelled and drive1 completes with
488 self
.wait_qmp_backup_cancelled(drive0
['id'])
489 self
.assertFalse(self
.wait_qmp_backup(drive1
['id']))
490 error
= self
.vm
.event_wait('BLOCK_JOB_ERROR')
491 self
.assert_qmp(error
, 'data', {'device': drive1
['id'],
493 'operation': 'read'})
494 self
.assertFalse(self
.vm
.get_qmp_events(wait
=False))
495 self
.assert_no_active_block_jobs()
497 # Delete drive0's successful target and eliminate our record of the
498 # unsuccessful drive1 target.
502 # Don't re-run the transaction, we only wanted to test the race.
506 # Re-run the same transaction:
507 target0
= self
.prepare_backup(dr0bm0
)
508 target1
= self
.prepare_backup(dr1bm0
)
510 # Re-run the exact same transaction.
511 result
= self
.vm
.qmp('transaction', actions
=transaction
,
512 properties
={'completion-mode':'grouped'})
513 self
.assert_qmp(result
, 'return', {})
515 # Both should complete successfully this time.
516 self
.assertTrue(self
.wait_qmp_backup(drive0
['id']))
517 self
.assertTrue(self
.wait_qmp_backup(drive1
['id']))
518 self
.make_reference_backup(dr0bm0
)
519 self
.make_reference_backup(dr1bm0
)
520 self
.assertFalse(self
.vm
.get_qmp_events(wait
=False))
521 self
.assert_no_active_block_jobs()
523 # And the images should of course validate.
527 def test_transaction_failure(self
):
528 '''Test: Verify backups made from a transaction that partially fails.
530 Add a second drive with its own unique pattern, and add a bitmap to each
531 drive. Use blkdebug to interfere with the backup on just one drive and
532 attempt to create a coherent incremental backup across both drives.
534 verify a failure in one but not both, then delete the failed stubs and
535 re-run the same transaction.
537 verify that both incrementals are created successfully.
539 self
.do_transaction_failure_test()
541 def test_transaction_failure_race(self
):
542 '''Test: Verify that transactions with jobs that have no data to
543 transfer do not cause race conditions in the cancellation of the entire
544 transaction job group.
546 self
.do_transaction_failure_test(race
=True)
549 def test_sync_dirty_bitmap_missing(self
):
550 self
.assert_no_active_block_jobs()
551 self
.files
.append(self
.err_img
)
552 result
= self
.vm
.qmp('drive-backup', device
=self
.drives
[0]['id'],
553 sync
='incremental', format
=self
.drives
[0]['fmt'],
555 self
.assert_qmp(result
, 'error/class', 'GenericError')
558 def test_sync_dirty_bitmap_not_found(self
):
559 self
.assert_no_active_block_jobs()
560 self
.files
.append(self
.err_img
)
561 result
= self
.vm
.qmp('drive-backup', device
=self
.drives
[0]['id'],
562 sync
='incremental', bitmap
='unknown',
563 format
=self
.drives
[0]['fmt'], target
=self
.err_img
)
564 self
.assert_qmp(result
, 'error/class', 'GenericError')
567 def test_sync_dirty_bitmap_bad_granularity(self
):
569 Test: Test what happens if we provide an improper granularity.
571 The granularity must always be a power of 2.
573 self
.assert_no_active_block_jobs()
574 self
.assertRaises(AssertionError, self
.add_bitmap
,
575 'bitmap0', self
.drives
[0],
578 def test_growing_before_backup(self
):
580 Test: Add a bitmap, truncate the image, write past the old
583 Incremental backup should not ignore dirty bits past the old
586 self
.assert_no_active_block_jobs()
588 self
.create_anchor_backup()
590 self
.add_bitmap('bitmap0', self
.drives
[0])
592 res
= self
.vm
.qmp('block_resize', device
=self
.drives
[0]['id'],
594 self
.assert_qmp(res
, 'return', {})
596 # Dirty the image past the old end
597 self
.vm
.hmp_qemu_io(self
.drives
[0]['id'], 'write 64M 64k')
599 target
= self
.prepare_backup(size
='65M')
600 self
.create_incremental(target
=target
)
606 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase
):
607 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
610 drive0
= self
.add_node('drive0')
611 self
.img_create(drive0
['file'], drive0
['fmt'])
612 self
.write_default_pattern(drive0
['file'])
615 def test_incremental_failure(self
):
616 '''Test: Verify backups made after a failure are correct.
618 Simulate a failure during an incremental backup block job,
619 emulate additional writes, then create another incremental backup
620 afterwards and verify that the backup created is correct.
623 drive0
= self
.drives
[0]
624 result
= self
.vm
.qmp('blockdev-add',
625 node_name
=drive0
['id'],
626 driver
=drive0
['fmt'],
628 'driver': 'blkdebug',
631 'filename': drive0
['file']
634 'event': 'flush_to_disk',
642 'immediately': False,
647 self
.assert_qmp(result
, 'return', {})
649 self
.create_anchor_backup(drive0
)
650 self
.add_bitmap('bitmap0', drive0
)
651 # Note: at this point, during a normal execution,
652 # Assume that the VM resumes and begins issuing IO requests here.
654 self
.hmp_io_writes(drive0
['id'], (('0xab', 0, 512),
655 ('0xfe', '16M', '256k'),
656 ('0x64', '32736k', '64k')))
658 result
= self
.create_incremental(validate
=False)
659 self
.assertFalse(result
)
660 self
.hmp_io_writes(drive0
['id'], (('0x9a', 0, 512),
661 ('0x55', '8M', '352k'),
662 ('0x78', '15872k', '1M')))
663 self
.create_incremental()
667 def test_incremental_pause(self
):
669 Test an incremental backup that errors into a pause and is resumed.
672 drive0
= self
.drives
[0]
673 # NB: The blkdebug script here looks for a "flush, read, read" pattern.
674 # The flush occurs in hmp_io_writes, the first read in device_add, and
675 # the last read during the block job.
676 result
= self
.vm
.qmp('blockdev-add',
677 node_name
=drive0
['id'],
678 driver
=drive0
['fmt'],
680 'driver': 'blkdebug',
683 'filename': drive0
['file']
686 'event': 'flush_to_disk',
698 'immediately': False,
702 self
.assert_qmp(result
, 'return', {})
703 self
.create_anchor_backup(drive0
)
704 bitmap
= self
.add_bitmap('bitmap0', drive0
)
706 # Emulate guest activity
707 self
.hmp_io_writes(drive0
['id'], (('0xab', 0, 512),
708 ('0xfe', '16M', '256k'),
709 ('0x64', '32736k', '64k')))
711 # For the purposes of query-block visibility of bitmaps, add a drive
712 # frontend after we've written data; otherwise we can't use hmp-io
713 result
= self
.vm
.qmp("device_add",
717 self
.assert_qmp(result
, 'return', {})
719 # Bitmap Status Check
720 query
= self
.vm
.qmp('query-block')
721 ret
= [bmap
for bmap
in query
['return'][0]['dirty-bitmaps']
722 if bmap
.get('name') == bitmap
.name
][0]
723 self
.assert_qmp(ret
, 'count', 458752)
724 self
.assert_qmp(ret
, 'granularity', 65536)
725 self
.assert_qmp(ret
, 'status', 'active')
726 self
.assert_qmp(ret
, 'busy', False)
727 self
.assert_qmp(ret
, 'recording', True)
730 parent
, _
= bitmap
.last_target()
731 target
= self
.prepare_backup(bitmap
, parent
)
732 res
= self
.vm
.qmp('drive-backup',
733 job_id
=bitmap
.drive
['id'],
734 device
=bitmap
.drive
['id'],
737 format
=bitmap
.drive
['fmt'],
740 on_source_error
='stop')
741 self
.assert_qmp(res
, 'return', {})
744 event
= self
.vm
.event_wait(name
="BLOCK_JOB_ERROR",
745 match
={"data":{"device":bitmap
.drive
['id']}})
746 self
.assert_qmp(event
, 'data', {'device': bitmap
.drive
['id'],
748 'operation': 'read'})
750 # Bitmap Status Check
751 query
= self
.vm
.qmp('query-block')
752 ret
= [bmap
for bmap
in query
['return'][0]['dirty-bitmaps']
753 if bmap
.get('name') == bitmap
.name
][0]
754 self
.assert_qmp(ret
, 'count', 458752)
755 self
.assert_qmp(ret
, 'granularity', 65536)
756 self
.assert_qmp(ret
, 'status', 'frozen')
757 self
.assert_qmp(ret
, 'busy', True)
758 self
.assert_qmp(ret
, 'recording', True)
760 # Resume and check incremental backup for consistency
761 res
= self
.vm
.qmp('block-job-resume', device
=bitmap
.drive
['id'])
762 self
.assert_qmp(res
, 'return', {})
763 self
.wait_qmp_backup(bitmap
.drive
['id'])
765 # Bitmap Status Check
766 query
= self
.vm
.qmp('query-block')
767 ret
= [bmap
for bmap
in query
['return'][0]['dirty-bitmaps']
768 if bmap
.get('name') == bitmap
.name
][0]
769 self
.assert_qmp(ret
, 'count', 0)
770 self
.assert_qmp(ret
, 'granularity', 65536)
771 self
.assert_qmp(ret
, 'status', 'active')
772 self
.assert_qmp(ret
, 'busy', False)
773 self
.assert_qmp(ret
, 'recording', True)
776 self
.make_reference_backup(bitmap
)
781 if __name__
== '__main__':
782 iotests
.main(supported_fmts
=['qcow2'],
783 supported_protocols
=['file'])