1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
7 Our buildbot infrastructure requires each slave to run steps serially.
8 This is sub-optimal for android, where these steps can run independently on
9 multiple connected devices.
11 The buildbots will run this script multiple times per cycle:
12 - First: all steps listed in --steps in will be executed in parallel using all
13 connected devices. Step results will be pickled to disk. Each step has a unique
14 name. The result code will be ignored if the step name is listed in
16 The buildbot will treat this step as a regular step, and will not process any
19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
20 with the step results previously saved. The buildbot will then process the graph
23 The JSON steps file contains a dictionary in the format:
27 "device_affinity": int,
28 "cmd": "script_to_execute foo"
31 "device_affinity": int,
32 "cmd": "script_to_execute bar"
37 The JSON flaky steps file contains a list with step names which results should
44 Note that script_to_execute necessarily have to take at least the following
46 --device: the serial number to be passed to all adb commands.
60 from devil
.android
import battery_utils
61 from devil
.android
import device_errors
62 from devil
.utils
import cmd_helper
63 from pylib
import constants
64 from pylib
import forwarder
65 from pylib
.base
import base_test_result
66 from pylib
.base
import base_test_runner
69 def GetPersistedResult(test_name
):
70 file_name
= os
.path
.join(constants
.PERF_OUTPUT_DIR
, test_name
)
71 if not os
.path
.exists(file_name
):
72 logging
.error('File not found %s', file_name
)
75 with
file(file_name
, 'r') as f
:
76 return pickle
.loads(f
.read())
79 def OutputJsonList(json_input
, json_output
):
80 with
file(json_input
, 'r') as i
:
81 all_steps
= json
.load(i
)
84 for k
, v
in all_steps
['steps'].iteritems():
85 data
= {'test': k
, 'device_affinity': v
['device_affinity']}
87 persisted_result
= GetPersistedResult(k
)
89 data
['start_time'] = persisted_result
['start_time']
90 data
['end_time'] = persisted_result
['end_time']
91 data
['total_time'] = persisted_result
['total_time']
92 step_values
.append(data
)
94 with
file(json_output
, 'w') as o
:
95 o
.write(json
.dumps(step_values
))
99 def PrintTestOutput(test_name
, json_file_name
=None):
100 """Helper method to print the output of previously executed test_name.
103 test_name: name of the test that has been previously executed.
104 json_file_name: name of the file to output chartjson data to.
107 exit code generated by the test step.
109 persisted_result
= GetPersistedResult(test_name
)
110 if not persisted_result
:
112 logging
.info('*' * 80)
113 logging
.info('Output from:')
114 logging
.info(persisted_result
['cmd'])
115 logging
.info('*' * 80)
117 output_formatted
= ''
118 persisted_outputs
= persisted_result
['output']
119 for i
in xrange(len(persisted_outputs
)):
120 output_formatted
+= '\n\nOutput from run #%d:\n\n%s' % (
121 i
, persisted_outputs
[i
])
122 print output_formatted
125 with
file(json_file_name
, 'w') as f
:
126 f
.write(persisted_result
['chartjson'])
128 return persisted_result
['exit_code']
131 def PrintSummary(test_names
):
132 logging
.info('*' * 80)
133 logging
.info('Sharding summary')
134 device_total_time
= collections
.defaultdict(int)
135 for test_name
in test_names
:
136 file_name
= os
.path
.join(constants
.PERF_OUTPUT_DIR
, test_name
)
137 if not os
.path
.exists(file_name
):
138 logging
.info('%s : No status file found', test_name
)
140 with
file(file_name
, 'r') as f
:
141 result
= pickle
.loads(f
.read())
142 logging
.info('%s : exit_code=%d in %d secs at %s',
143 result
['name'], result
['exit_code'], result
['total_time'],
145 device_total_time
[result
['device']] += result
['total_time']
146 for device
, device_time
in device_total_time
.iteritems():
147 logging
.info('Total for device %s : %d secs', device
, device_time
)
148 logging
.info('Total steps time: %d secs', sum(device_total_time
.values()))
151 class _HeartBeatLogger(object):
152 # How often to print the heartbeat on flush().
153 _PRINT_INTERVAL
= 30.0
156 """A file-like class for keeping the buildbot alive."""
158 self
._tick
= time
.time()
159 self
._stopped
= threading
.Event()
160 self
._timer
= threading
.Thread(target
=self
._runner
)
164 while not self
._stopped
.is_set():
166 self
._stopped
.wait(_HeartBeatLogger
._PRINT
_INTERVAL
)
168 def write(self
, data
):
169 self
._len
+= len(data
)
173 if now
- self
._tick
>= _HeartBeatLogger
._PRINT
_INTERVAL
:
175 print '--single-step output length %d' % self
._len
182 class TestRunner(base_test_runner
.BaseTestRunner
):
183 def __init__(self
, test_options
, device
, shard_index
, max_shard
, tests
,
185 """A TestRunner instance runs a perf test on a single device.
188 test_options: A PerfOptions object.
189 device: Device to run the tests.
190 shard_index: the index of this device.
191 max_shards: the maximum shard index.
192 tests: a dict mapping test_name to command.
193 flaky_tests: a list of flaky test_name.
195 super(TestRunner
, self
).__init
__(device
, None)
196 self
._options
= test_options
197 self
._shard
_index
= shard_index
198 self
._max
_shard
= max_shard
200 self
._flaky
_tests
= flaky_tests
201 self
._output
_dir
= None
202 self
._device
_battery
= battery_utils
.BatteryUtils(self
.device
)
205 def _SaveResult(result
):
206 pickled
= os
.path
.join(constants
.PERF_OUTPUT_DIR
, result
['name'])
207 if os
.path
.exists(pickled
):
208 with
file(pickled
, 'r') as f
:
209 previous
= pickle
.loads(f
.read())
210 result
['output'] = previous
['output'] + result
['output']
212 with
file(pickled
, 'w') as f
:
213 f
.write(pickle
.dumps(result
))
215 def _CheckDeviceAffinity(self
, test_name
):
216 """Returns True if test_name has affinity for this shard."""
217 affinity
= (self
._tests
['steps'][test_name
]['device_affinity'] %
219 if self
._shard
_index
== affinity
:
221 logging
.info('Skipping %s on %s (affinity is %s, device is %s)',
222 test_name
, self
.device_serial
, affinity
, self
._shard
_index
)
225 def _CleanupOutputDirectory(self
):
227 shutil
.rmtree(self
._output
_dir
, ignore_errors
=True)
228 self
._output
_dir
= None
230 def _ReadChartjsonOutput(self
):
231 if not self
._output
_dir
:
234 json_output_path
= os
.path
.join(self
._output
_dir
, 'results-chart.json')
236 with
open(json_output_path
) as f
:
239 logging
.exception('Exception when reading chartjson.')
240 logging
.error('This usually means that telemetry did not run, so it could'
241 ' not generate the file. Please check the device running'
245 def _LaunchPerfTest(self
, test_name
):
249 test_name: the name of the test to be executed.
252 A tuple containing (Output, base_test_result.ResultType)
254 if not self
._CheckDeviceAffinity
(test_name
):
255 return '', base_test_result
.ResultType
.PASS
258 logging
.warning('Unmapping device ports')
259 forwarder
.Forwarder
.UnmapAllDevicePorts(self
.device
)
260 self
.device
.RestartAdbd()
261 except Exception as e
: # pylint: disable=broad-except
262 logging
.error('Exception when tearing down device %s', e
)
264 cmd
= ('%s --device %s' %
265 (self
._tests
['steps'][test_name
]['cmd'],
268 if self
._options
.collect_chartjson_data
:
269 self
._output
_dir
= tempfile
.mkdtemp()
270 cmd
= cmd
+ ' --output-dir=%s' % self
._output
_dir
273 'temperature: %s (0.1 C)',
274 str(self
._device
_battery
.GetBatteryInfo().get('temperature')))
275 if self
._options
.max_battery_temp
:
276 self
._device
_battery
.LetBatteryCoolToTemperature(
277 self
._options
.max_battery_temp
)
279 logging
.info('Charge level: %s%%',
280 str(self
._device
_battery
.GetBatteryInfo().get('level')))
281 if self
._options
.min_battery_level
:
282 self
._device
_battery
.ChargeDeviceToLevel(
283 self
._options
.min_battery_level
)
285 logging
.info('%s : %s', test_name
, cmd
)
286 start_time
= time
.time()
288 timeout
= self
._tests
['steps'][test_name
].get('timeout', 3600)
289 if self
._options
.no_timeout
:
291 logging
.info('Timeout for %s test: %s', test_name
, timeout
)
293 if self
._options
.dry_run
:
294 full_cmd
= 'echo %s' % cmd
297 if self
._options
.single_step
:
298 # Just print a heart-beat so that the outer buildbot scripts won't timeout
300 logfile
= _HeartBeatLogger()
301 cwd
= os
.path
.abspath(constants
.DIR_SOURCE_ROOT
)
302 if full_cmd
.startswith('src/'):
303 cwd
= os
.path
.abspath(os
.path
.join(constants
.DIR_SOURCE_ROOT
, os
.pardir
))
305 exit_code
, output
= cmd_helper
.GetCmdStatusAndOutputWithTimeout(
306 full_cmd
, timeout
, cwd
=cwd
, shell
=True, logfile
=logfile
)
307 json_output
= self
._ReadChartjsonOutput
()
308 except cmd_helper
.TimeoutError
as e
:
313 self
._CleanupOutputDirectory
()
314 if self
._options
.single_step
:
316 end_time
= time
.time()
317 if exit_code
is None:
319 logging
.info('%s : exit_code=%d in %d secs at %s',
320 test_name
, exit_code
, end_time
- start_time
,
324 result_type
= base_test_result
.ResultType
.PASS
326 result_type
= base_test_result
.ResultType
.FAIL
327 # Since perf tests use device affinity, give the device a chance to
328 # recover if it is offline after a failure. Otherwise, the master sharder
329 # will remove it from the pool and future tests on this device will fail.
331 self
.device
.WaitUntilFullyBooted(timeout
=120)
332 except device_errors
.CommandTimeoutError
as e
:
333 logging
.error('Device failed to return after %s: %s', test_name
, e
)
335 actual_exit_code
= exit_code
336 if test_name
in self
._flaky
_tests
:
337 # The exit_code is used at the second stage when printing the
338 # test output. If the test is flaky, force to "0" to get that step green
339 # whilst still gathering data to the perf dashboards.
340 # The result_type is used by the test_dispatcher to retry the test.
346 'chartjson': json_output
,
347 'exit_code': exit_code
,
348 'actual_exit_code': actual_exit_code
,
349 'result_type': result_type
,
350 'start_time': start_time
,
351 'end_time': end_time
,
352 'total_time': end_time
- start_time
,
353 'device': self
.device_serial
,
356 self
._SaveResult
(persisted_result
)
358 return (output
, result_type
)
360 def RunTest(self
, test_name
):
361 """Run a perf test on the device.
364 test_name: String to use for logging the test result.
367 A tuple of (TestRunResults, retry).
369 _
, result_type
= self
._LaunchPerfTest
(test_name
)
370 results
= base_test_result
.TestRunResults()
371 results
.AddResult(base_test_result
.BaseTestResult(test_name
, result_type
))
373 if not results
.DidRunPass():
375 return results
, retry