Update V8 to version 4.7.44.
[chromium-blink-merge.git] / build / android / pylib / perf / test_runner.py
blob80a89a38d565d4669e539ccf0233482013784444
1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Runs perf tests.
7 Our buildbot infrastructure requires each slave to run steps serially.
8 This is sub-optimal for android, where these steps can run independently on
9 multiple connected devices.
11 The buildbots will run this script multiple times per cycle:
12 - First: all steps listed in --steps in will be executed in parallel using all
13 connected devices. Step results will be pickled to disk. Each step has a unique
14 name. The result code will be ignored if the step name is listed in
15 --flaky-steps.
16 The buildbot will treat this step as a regular step, and will not process any
17 graph data.
19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
20 with the step results previously saved. The buildbot will then process the graph
21 data accordingly.
23 The JSON steps file contains a dictionary in the format:
24 { "version": int,
25 "steps": {
26 "foo": {
27 "device_affinity": int,
28 "cmd": "script_to_execute foo"
30 "bar": {
31 "device_affinity": int,
32 "cmd": "script_to_execute bar"
37 The JSON flaky steps file contains a list with step names which results should
38 be ignored:
40 "step_name_foo",
41 "step_name_bar"
44 Note that script_to_execute necessarily have to take at least the following
45 option:
46 --device: the serial number to be passed to all adb commands.
47 """
49 import collections
50 import json
51 import logging
52 import os
53 import pickle
54 import shutil
55 import sys
56 import tempfile
57 import threading
58 import time
60 from devil.android import battery_utils
61 from devil.android import device_errors
62 from devil.utils import cmd_helper
63 from pylib import constants
64 from pylib import forwarder
65 from pylib.base import base_test_result
66 from pylib.base import base_test_runner
69 def GetPersistedResult(test_name):
70 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
71 if not os.path.exists(file_name):
72 logging.error('File not found %s', file_name)
73 return None
75 with file(file_name, 'r') as f:
76 return pickle.loads(f.read())
79 def OutputJsonList(json_input, json_output):
80 with file(json_input, 'r') as i:
81 all_steps = json.load(i)
83 step_values = []
84 for k, v in all_steps['steps'].iteritems():
85 data = {'test': k, 'device_affinity': v['device_affinity']}
87 persisted_result = GetPersistedResult(k)
88 if persisted_result:
89 data['start_time'] = persisted_result['start_time']
90 data['end_time'] = persisted_result['end_time']
91 data['total_time'] = persisted_result['total_time']
92 step_values.append(data)
94 with file(json_output, 'w') as o:
95 o.write(json.dumps(step_values))
96 return 0
99 def PrintTestOutput(test_name, json_file_name=None):
100 """Helper method to print the output of previously executed test_name.
102 Args:
103 test_name: name of the test that has been previously executed.
104 json_file_name: name of the file to output chartjson data to.
106 Returns:
107 exit code generated by the test step.
109 persisted_result = GetPersistedResult(test_name)
110 if not persisted_result:
111 return 1
112 logging.info('*' * 80)
113 logging.info('Output from:')
114 logging.info(persisted_result['cmd'])
115 logging.info('*' * 80)
117 output_formatted = ''
118 persisted_outputs = persisted_result['output']
119 for i in xrange(len(persisted_outputs)):
120 output_formatted += '\n\nOutput from run #%d:\n\n%s' % (
121 i, persisted_outputs[i])
122 print output_formatted
124 if json_file_name:
125 with file(json_file_name, 'w') as f:
126 f.write(persisted_result['chartjson'])
128 return persisted_result['exit_code']
131 def PrintSummary(test_names):
132 logging.info('*' * 80)
133 logging.info('Sharding summary')
134 device_total_time = collections.defaultdict(int)
135 for test_name in test_names:
136 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
137 if not os.path.exists(file_name):
138 logging.info('%s : No status file found', test_name)
139 continue
140 with file(file_name, 'r') as f:
141 result = pickle.loads(f.read())
142 logging.info('%s : exit_code=%d in %d secs at %s',
143 result['name'], result['exit_code'], result['total_time'],
144 result['device'])
145 device_total_time[result['device']] += result['total_time']
146 for device, device_time in device_total_time.iteritems():
147 logging.info('Total for device %s : %d secs', device, device_time)
148 logging.info('Total steps time: %d secs', sum(device_total_time.values()))
151 class _HeartBeatLogger(object):
152 # How often to print the heartbeat on flush().
153 _PRINT_INTERVAL = 30.0
155 def __init__(self):
156 """A file-like class for keeping the buildbot alive."""
157 self._len = 0
158 self._tick = time.time()
159 self._stopped = threading.Event()
160 self._timer = threading.Thread(target=self._runner)
161 self._timer.start()
163 def _runner(self):
164 while not self._stopped.is_set():
165 self.flush()
166 self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL)
168 def write(self, data):
169 self._len += len(data)
171 def flush(self):
172 now = time.time()
173 if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL:
174 self._tick = now
175 print '--single-step output length %d' % self._len
176 sys.stdout.flush()
178 def stop(self):
179 self._stopped.set()
182 class TestRunner(base_test_runner.BaseTestRunner):
183 def __init__(self, test_options, device, shard_index, max_shard, tests,
184 flaky_tests):
185 """A TestRunner instance runs a perf test on a single device.
187 Args:
188 test_options: A PerfOptions object.
189 device: Device to run the tests.
190 shard_index: the index of this device.
191 max_shards: the maximum shard index.
192 tests: a dict mapping test_name to command.
193 flaky_tests: a list of flaky test_name.
195 super(TestRunner, self).__init__(device, None)
196 self._options = test_options
197 self._shard_index = shard_index
198 self._max_shard = max_shard
199 self._tests = tests
200 self._flaky_tests = flaky_tests
201 self._output_dir = None
202 self._device_battery = battery_utils.BatteryUtils(self.device)
204 @staticmethod
205 def _SaveResult(result):
206 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name'])
207 if os.path.exists(pickled):
208 with file(pickled, 'r') as f:
209 previous = pickle.loads(f.read())
210 result['output'] = previous['output'] + result['output']
212 with file(pickled, 'w') as f:
213 f.write(pickle.dumps(result))
215 def _CheckDeviceAffinity(self, test_name):
216 """Returns True if test_name has affinity for this shard."""
217 affinity = (self._tests['steps'][test_name]['device_affinity'] %
218 self._max_shard)
219 if self._shard_index == affinity:
220 return True
221 logging.info('Skipping %s on %s (affinity is %s, device is %s)',
222 test_name, self.device_serial, affinity, self._shard_index)
223 return False
225 def _CleanupOutputDirectory(self):
226 if self._output_dir:
227 shutil.rmtree(self._output_dir, ignore_errors=True)
228 self._output_dir = None
230 def _ReadChartjsonOutput(self):
231 if not self._output_dir:
232 return ''
234 json_output_path = os.path.join(self._output_dir, 'results-chart.json')
235 try:
236 with open(json_output_path) as f:
237 return f.read()
238 except IOError:
239 logging.exception('Exception when reading chartjson.')
240 logging.error('This usually means that telemetry did not run, so it could'
241 ' not generate the file. Please check the device running'
242 ' the test.')
243 return ''
245 def _LaunchPerfTest(self, test_name):
246 """Runs a perf test.
248 Args:
249 test_name: the name of the test to be executed.
251 Returns:
252 A tuple containing (Output, base_test_result.ResultType)
254 if not self._CheckDeviceAffinity(test_name):
255 return '', base_test_result.ResultType.PASS
257 try:
258 logging.warning('Unmapping device ports')
259 forwarder.Forwarder.UnmapAllDevicePorts(self.device)
260 self.device.RestartAdbd()
261 except Exception as e: # pylint: disable=broad-except
262 logging.error('Exception when tearing down device %s', e)
264 cmd = ('%s --device %s' %
265 (self._tests['steps'][test_name]['cmd'],
266 self.device_serial))
268 if self._options.collect_chartjson_data:
269 self._output_dir = tempfile.mkdtemp()
270 cmd = cmd + ' --output-dir=%s' % self._output_dir
272 logging.info(
273 'temperature: %s (0.1 C)',
274 str(self._device_battery.GetBatteryInfo().get('temperature')))
275 if self._options.max_battery_temp:
276 self._device_battery.LetBatteryCoolToTemperature(
277 self._options.max_battery_temp)
279 logging.info('Charge level: %s%%',
280 str(self._device_battery.GetBatteryInfo().get('level')))
281 if self._options.min_battery_level:
282 self._device_battery.ChargeDeviceToLevel(
283 self._options.min_battery_level)
285 logging.info('%s : %s', test_name, cmd)
286 start_time = time.time()
288 timeout = self._tests['steps'][test_name].get('timeout', 3600)
289 if self._options.no_timeout:
290 timeout = None
291 logging.info('Timeout for %s test: %s', test_name, timeout)
292 full_cmd = cmd
293 if self._options.dry_run:
294 full_cmd = 'echo %s' % cmd
296 logfile = sys.stdout
297 if self._options.single_step:
298 # Just print a heart-beat so that the outer buildbot scripts won't timeout
299 # without response.
300 logfile = _HeartBeatLogger()
301 cwd = os.path.abspath(constants.DIR_SOURCE_ROOT)
302 if full_cmd.startswith('src/'):
303 cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir))
304 try:
305 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
306 full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile)
307 json_output = self._ReadChartjsonOutput()
308 except cmd_helper.TimeoutError as e:
309 exit_code = -1
310 output = e.output
311 json_output = ''
312 finally:
313 self._CleanupOutputDirectory()
314 if self._options.single_step:
315 logfile.stop()
316 end_time = time.time()
317 if exit_code is None:
318 exit_code = -1
319 logging.info('%s : exit_code=%d in %d secs at %s',
320 test_name, exit_code, end_time - start_time,
321 self.device_serial)
323 if exit_code == 0:
324 result_type = base_test_result.ResultType.PASS
325 else:
326 result_type = base_test_result.ResultType.FAIL
327 # Since perf tests use device affinity, give the device a chance to
328 # recover if it is offline after a failure. Otherwise, the master sharder
329 # will remove it from the pool and future tests on this device will fail.
330 try:
331 self.device.WaitUntilFullyBooted(timeout=120)
332 except device_errors.CommandTimeoutError as e:
333 logging.error('Device failed to return after %s: %s', test_name, e)
335 actual_exit_code = exit_code
336 if test_name in self._flaky_tests:
337 # The exit_code is used at the second stage when printing the
338 # test output. If the test is flaky, force to "0" to get that step green
339 # whilst still gathering data to the perf dashboards.
340 # The result_type is used by the test_dispatcher to retry the test.
341 exit_code = 0
343 persisted_result = {
344 'name': test_name,
345 'output': [output],
346 'chartjson': json_output,
347 'exit_code': exit_code,
348 'actual_exit_code': actual_exit_code,
349 'result_type': result_type,
350 'start_time': start_time,
351 'end_time': end_time,
352 'total_time': end_time - start_time,
353 'device': self.device_serial,
354 'cmd': cmd,
356 self._SaveResult(persisted_result)
358 return (output, result_type)
360 def RunTest(self, test_name):
361 """Run a perf test on the device.
363 Args:
364 test_name: String to use for logging the test result.
366 Returns:
367 A tuple of (TestRunResults, retry).
369 _, result_type = self._LaunchPerfTest(test_name)
370 results = base_test_result.TestRunResults()
371 results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
372 retry = None
373 if not results.DidRunPass():
374 retry = test_name
375 return results, retry