3 # Tests for IO throttling
5 # Copyright (C) 2015 Red Hat, Inc.
6 # Copyright (C) 2015-2016 Igalia, S.L.
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 nsec_per_sec
= 1000000000
26 class ThrottleTestCase(iotests
.QMPTestCase
):
27 test_img
= "null-aio://"
30 def blockstats(self
, device
):
31 result
= self
.vm
.qmp("query-blockstats")
32 for r
in result
['return']:
33 if r
['device'] == device
:
35 return stat
['rd_bytes'], stat
['rd_operations'], stat
['wr_bytes'], stat
['wr_operations']
36 raise Exception("Device not found for blockstats: %s" % device
)
39 self
.vm
= iotests
.VM()
40 for i
in range(0, self
.max_drives
):
41 self
.vm
.add_drive(self
.test_img
, "file.read-zeroes=on")
47 def configure_throttle(self
, ndrives
, params
):
48 params
['group'] = 'test'
50 # Set the I/O throttling parameters to all drives
51 for i
in range(0, ndrives
):
52 params
['device'] = 'drive%d' % i
53 result
= self
.vm
.qmp("block_set_io_throttle", conv_keys
=False, **params
)
54 self
.assert_qmp(result
, 'return', {})
56 def do_test_throttle(self
, ndrives
, seconds
, params
, first_drive
= 0):
57 def check_limit(limit
, num
):
58 # IO throttling algorithm is discrete, allow 10% error so the test
60 return limit
== 0 or \
61 (num
< seconds
* limit
* 1.1 / ndrives
62 and num
> seconds
* limit
* 0.9 / ndrives
)
64 # Set vm clock to a known value
65 ns
= seconds
* nsec_per_sec
66 self
.vm
.qtest("clock_step %d" % ns
)
68 # Submit enough requests so the throttling mechanism kicks
69 # in. The throttled requests won't be executed until we
70 # advance the virtual clock.
72 rd_nr
= max(params
['bps'] // rq_size
// 2,
73 params
['bps_rd'] // rq_size
,
78 wr_nr
= max(params
['bps'] // rq_size
// 2,
79 params
['bps_wr'] // rq_size
,
85 # Send I/O requests to all drives
86 for i
in range(rd_nr
):
87 for drive
in range(0, ndrives
):
88 idx
= first_drive
+ drive
89 self
.vm
.hmp_qemu_io("drive%d" % idx
, "aio_read %d %d" %
90 (i
* rq_size
, rq_size
))
92 for i
in range(wr_nr
):
93 for drive
in range(0, ndrives
):
94 idx
= first_drive
+ drive
95 self
.vm
.hmp_qemu_io("drive%d" % idx
, "aio_write %d %d" %
96 (i
* rq_size
, rq_size
))
98 # We'll store the I/O stats for each drive in these arrays
99 start_rd_bytes
= [0] * ndrives
100 start_rd_iops
= [0] * ndrives
101 start_wr_bytes
= [0] * ndrives
102 start_wr_iops
= [0] * ndrives
103 end_rd_bytes
= [0] * ndrives
104 end_rd_iops
= [0] * ndrives
105 end_wr_bytes
= [0] * ndrives
106 end_wr_iops
= [0] * ndrives
108 # Read the stats before advancing the clock
109 for i
in range(0, ndrives
):
110 idx
= first_drive
+ i
111 start_rd_bytes
[i
], start_rd_iops
[i
], start_wr_bytes
[i
], \
112 start_wr_iops
[i
] = self
.blockstats('drive%d' % idx
)
114 self
.vm
.qtest("clock_step %d" % ns
)
116 # Read the stats after advancing the clock
117 for i
in range(0, ndrives
):
118 idx
= first_drive
+ i
119 end_rd_bytes
[i
], end_rd_iops
[i
], end_wr_bytes
[i
], \
120 end_wr_iops
[i
] = self
.blockstats('drive%d' % idx
)
122 # Check that the I/O is within the limits and evenly distributed
123 for i
in range(0, ndrives
):
124 rd_bytes
= end_rd_bytes
[i
] - start_rd_bytes
[i
]
125 rd_iops
= end_rd_iops
[i
] - start_rd_iops
[i
]
126 wr_bytes
= end_wr_bytes
[i
] - start_wr_bytes
[i
]
127 wr_iops
= end_wr_iops
[i
] - start_wr_iops
[i
]
129 self
.assertTrue(check_limit(params
['bps'], rd_bytes
+ wr_bytes
))
130 self
.assertTrue(check_limit(params
['bps_rd'], rd_bytes
))
131 self
.assertTrue(check_limit(params
['bps_wr'], wr_bytes
))
132 self
.assertTrue(check_limit(params
['iops'], rd_iops
+ wr_iops
))
133 self
.assertTrue(check_limit(params
['iops_rd'], rd_iops
))
134 self
.assertTrue(check_limit(params
['iops_wr'], wr_iops
))
136 # Allow remaining requests to finish. We submitted twice as many to
137 # ensure the throttle limit is reached.
138 self
.vm
.qtest("clock_step %d" % ns
)
140 # Connect N drives to a VM and test I/O in all of them
142 params
= {"bps": 4096,
149 # Repeat the test with different numbers of drives
150 for ndrives
in range(1, self
.max_drives
+ 1):
151 # Pick each out of all possible params and test
153 limits
= dict([(k
, 0) for k
in params
])
154 limits
[tk
] = params
[tk
] * ndrives
155 self
.configure_throttle(ndrives
, limits
)
156 self
.do_test_throttle(ndrives
, 5, limits
)
158 # Connect N drives to a VM and test I/O in just one of them a time
160 params
= {"bps": 4096,
167 # Repeat the test for each one of the drives
168 for drive
in range(0, self
.max_drives
):
169 # Pick each out of all possible params and test
171 limits
= dict([(k
, 0) for k
in params
])
172 limits
[tk
] = params
[tk
] * self
.max_drives
173 self
.configure_throttle(self
.max_drives
, limits
)
174 self
.do_test_throttle(1, 5, limits
, drive
)
176 def test_burst(self
):
177 params
= {"bps": 4096,
185 # Pick each out of all possible params and test
187 rate
= params
[tk
] * ndrives
188 burst_rate
= rate
* 7
191 # Configure the throttling settings
192 settings
= dict([(k
, 0) for k
in params
])
194 settings
['%s_max' % tk
] = burst_rate
195 settings
['%s_max_length' % tk
] = burst_length
196 self
.configure_throttle(ndrives
, settings
)
198 # Wait for the bucket to empty so we can do bursts
199 wait_ns
= nsec_per_sec
* burst_length
* burst_rate
// rate
200 self
.vm
.qtest("clock_step %d" % wait_ns
)
202 # Test I/O at the max burst rate
203 limits
= dict([(k
, 0) for k
in params
])
204 limits
[tk
] = burst_rate
205 self
.do_test_throttle(ndrives
, burst_length
, limits
)
207 # Now test I/O at the normal rate
209 self
.do_test_throttle(ndrives
, 5, limits
)
211 # Test that removing a drive from a throttle group should not
212 # affect the remaining members of the group.
213 # https://bugzilla.redhat.com/show_bug.cgi?id=1535914
214 def test_remove_group_member(self
):
215 # Create a throttle group with two drives
216 # and set a 4 KB/s read limit.
223 self
.configure_throttle(2, params
)
225 # Read 4KB from drive0. This is performed immediately.
226 self
.vm
.hmp_qemu_io("drive0", "aio_read 0 4096")
228 # Read 2KB. The I/O limit has been exceeded so this
229 # request is throttled and a timer is set to wake it up.
230 self
.vm
.hmp_qemu_io("drive0", "aio_read 0 2048")
232 # Read 2KB again. We're still over the I/O limit so this is
233 # request is also throttled, but no new timer is set since
234 # there's already one.
235 self
.vm
.hmp_qemu_io("drive0", "aio_read 0 2048")
237 # Read from drive1. This request is also throttled, and no
238 # timer is set in drive1 because there's already one in
240 self
.vm
.hmp_qemu_io("drive1", "aio_read 0 4096")
242 # At this point only the first 4KB have been read from drive0.
243 # The other requests are throttled.
244 self
.assertEqual(self
.blockstats('drive0')[0], 4096)
245 self
.assertEqual(self
.blockstats('drive1')[0], 0)
247 # Remove drive0 from the throttle group and disable its I/O limits.
248 # drive1 remains in the group with a throttled request.
250 params
['device'] = 'drive0'
251 result
= self
.vm
.qmp("block_set_io_throttle", conv_keys
=False, **params
)
252 self
.assert_qmp(result
, 'return', {})
254 # Removing the I/O limits from drive0 drains its two pending requests.
255 # The read request in drive1 is still throttled.
256 self
.assertEqual(self
.blockstats('drive0')[0], 8192)
257 self
.assertEqual(self
.blockstats('drive1')[0], 0)
259 # Advance the clock 5 seconds. This completes the request in drive1
260 self
.vm
.qtest("clock_step %d" % (5 * nsec_per_sec
))
262 # Now all requests have been processed.
263 self
.assertEqual(self
.blockstats('drive0')[0], 8192)
264 self
.assertEqual(self
.blockstats('drive1')[0], 4096)
266 class ThrottleTestCoroutine(ThrottleTestCase
):
267 test_img
= "null-co://"
269 class ThrottleTestGroupNames(iotests
.QMPTestCase
):
270 test_img
= "null-aio://"
274 self
.vm
= iotests
.VM()
275 for i
in range(0, self
.max_drives
):
276 self
.vm
.add_drive(self
.test_img
,
277 "throttling.iops-total=100,file.read-zeroes=on")
283 def set_io_throttle(self
, device
, params
):
284 params
["device"] = device
285 result
= self
.vm
.qmp("block_set_io_throttle", conv_keys
=False, **params
)
286 self
.assert_qmp(result
, 'return', {})
288 def verify_name(self
, device
, name
):
289 result
= self
.vm
.qmp("query-block")
290 for r
in result
["return"]:
291 if r
["device"] == device
:
294 self
.assertEqual(info
["group"], name
)
296 self
.assertFalse('group' in info
)
299 raise Exception("No group information found for '%s'" % device
)
301 def test_group_naming(self
):
309 # Check the drives added using the command line.
310 # The default throttling group name is the device name.
311 for i
in range(self
.max_drives
):
312 devname
= "drive%d" % i
313 self
.verify_name(devname
, devname
)
315 # Clear throttling settings => the group name is gone.
316 for i
in range(self
.max_drives
):
317 devname
= "drive%d" % i
318 self
.set_io_throttle(devname
, params
)
319 self
.verify_name(devname
, None)
321 # Set throttling settings using block_set_io_throttle and
322 # check the default group names.
324 for i
in range(self
.max_drives
):
325 devname
= "drive%d" % i
326 self
.set_io_throttle(devname
, params
)
327 self
.verify_name(devname
, devname
)
329 # Set a custom group name for each device
331 devname
= "drive%d" % i
332 groupname
= "group%d" % i
333 params
['group'] = groupname
334 self
.set_io_throttle(devname
, params
)
335 self
.verify_name(devname
, groupname
)
337 # Put drive0 in group1 and check that all other devices remain
339 params
['group'] = 'group1'
340 self
.set_io_throttle('drive0', params
)
341 self
.verify_name('drive0', 'group1')
342 for i
in range(1, self
.max_drives
):
343 devname
= "drive%d" % i
344 groupname
= "group%d" % i
345 self
.verify_name(devname
, groupname
)
347 # Put drive0 in group2 and check that all other devices remain
349 params
['group'] = 'group2'
350 self
.set_io_throttle('drive0', params
)
351 self
.verify_name('drive0', 'group2')
352 for i
in range(1, self
.max_drives
):
353 devname
= "drive%d" % i
354 groupname
= "group%d" % i
355 self
.verify_name(devname
, groupname
)
357 # Clear throttling settings from drive0 check that all other
358 # devices remain unchanged
360 self
.set_io_throttle('drive0', params
)
361 self
.verify_name('drive0', None)
362 for i
in range(1, self
.max_drives
):
363 devname
= "drive%d" % i
364 groupname
= "group%d" % i
365 self
.verify_name(devname
, groupname
)
367 class ThrottleTestRemovableMedia(iotests
.QMPTestCase
):
369 self
.vm
= iotests
.VM()
370 self
.vm
.add_device("{},id=virtio-scsi".format(
371 iotests
.get_virtio_scsi_device()))
377 def test_removable_media(self
):
378 # Add a couple of dummy nodes named cd0 and cd1
379 result
= self
.vm
.qmp("blockdev-add", driver
="null-aio",
380 read_zeroes
=True, node_name
="cd0")
381 self
.assert_qmp(result
, 'return', {})
382 result
= self
.vm
.qmp("blockdev-add", driver
="null-aio",
383 read_zeroes
=True, node_name
="cd1")
384 self
.assert_qmp(result
, 'return', {})
386 # Attach a CD drive with cd0 inserted
387 result
= self
.vm
.qmp("device_add", driver
="scsi-cd",
388 id="dev0", drive
="cd0")
389 self
.assert_qmp(result
, 'return', {})
392 args
= { "id": "dev0", "iops": 100, "iops_rd": 0, "iops_wr": 0,
393 "bps": 50, "bps_rd": 0, "bps_wr": 0 }
394 result
= self
.vm
.qmp("block_set_io_throttle", conv_keys
=False, **args
)
395 self
.assert_qmp(result
, 'return', {})
397 # Check that the I/O limits have been set
398 result
= self
.vm
.qmp("query-block")
399 self
.assert_qmp(result
, 'return[0]/inserted/iops', 100)
400 self
.assert_qmp(result
, 'return[0]/inserted/bps', 50)
402 # Now eject cd0 and insert cd1
403 result
= self
.vm
.qmp("blockdev-open-tray", id='dev0')
404 self
.assert_qmp(result
, 'return', {})
405 result
= self
.vm
.qmp("blockdev-remove-medium", id='dev0')
406 self
.assert_qmp(result
, 'return', {})
407 result
= self
.vm
.qmp("blockdev-insert-medium", id='dev0', node_name
='cd1')
408 self
.assert_qmp(result
, 'return', {})
410 # Check that the I/O limits are still the same
411 result
= self
.vm
.qmp("query-block")
412 self
.assert_qmp(result
, 'return[0]/inserted/iops', 100)
413 self
.assert_qmp(result
, 'return[0]/inserted/bps', 50)
416 result
= self
.vm
.qmp("blockdev-remove-medium", id='dev0')
417 self
.assert_qmp(result
, 'return', {})
419 # Check that we can't set limits if the device has no medium
420 result
= self
.vm
.qmp("block_set_io_throttle", conv_keys
=False, **args
)
421 self
.assert_qmp(result
, 'error/class', 'GenericError')
423 # Remove the CD drive
424 result
= self
.vm
.qmp("device_del", id='dev0')
425 self
.assert_qmp(result
, 'return', {})
428 if __name__
== '__main__':
429 iotests
.main(supported_fmts
=["raw"])