1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
7 from devil
.android
import device_errors
8 from pylib
import valgrind_tools
9 from pylib
.base
import base_test_result
10 from pylib
.base
import test_run
11 from pylib
.base
import test_collection
14 def handle_shard_failures(f
):
15 """A decorator that handles device failures for per-device functions.
18 f: the function being decorated. The function must take at least one
19 argument, and that argument must be the device.
21 def wrapper(dev
, *args
, **kwargs
):
23 return f(dev
, *args
, **kwargs
)
24 except device_errors
.CommandFailedError
:
25 logging
.exception('Shard failed: %s(%s)', f
.__name
__, str(dev
))
26 except device_errors
.CommandTimeoutError
:
27 logging
.exception('Shard timed out: %s(%s)', f
.__name
__, str(dev
))
28 except device_errors
.DeviceUnreachableError
:
29 logging
.exception('Shard died: %s(%s)', f
.__name
__, str(dev
))
35 class LocalDeviceTestRun(test_run
.TestRun
):
37 def __init__(self
, env
, test_instance
):
38 super(LocalDeviceTestRun
, self
).__init
__(env
, test_instance
)
43 tests
= self
._GetTests
()
45 @handle_shard_failures
46 def run_tests_on_device(dev
, tests
, results
):
49 result
= self
._RunTest
(dev
, test
)
50 if isinstance(result
, base_test_result
.BaseTestResult
):
51 results
.AddResult(result
)
52 elif isinstance(result
, list):
53 results
.AddResults(result
)
56 'Unexpected result type: %s' % type(result
).__name
__)
58 if isinstance(tests
, test_collection
.TestCollection
):
62 if isinstance(tests
, test_collection
.TestCollection
):
63 tests
.test_completed()
64 logging
.info('Finished running tests on this device.')
67 results
= base_test_result
.TestRunResults()
69 while tries
< self
._env
.max_tries
and tests
:
70 logging
.info('STARTING TRY #%d/%d', tries
+ 1, self
._env
.max_tries
)
71 logging
.info('Will run %d tests on %d devices: %s',
72 len(tests
), len(self
._env
.devices
),
73 ', '.join(str(d
) for d
in self
._env
.devices
))
75 logging
.debug(' %s', t
)
77 try_results
= base_test_result
.TestRunResults()
78 if self
._ShouldShard
():
79 tc
= test_collection
.TestCollection(self
._CreateShards
(tests
))
80 self
._env
.parallel_devices
.pMap(
81 run_tests_on_device
, tc
, try_results
).pGet(None)
83 self
._env
.parallel_devices
.pMap(
84 run_tests_on_device
, tests
, try_results
).pGet(None)
86 for result
in try_results
.GetAll():
87 if result
.GetType() in (base_test_result
.ResultType
.PASS
,
88 base_test_result
.ResultType
.SKIP
):
89 results
.AddResult(result
)
91 all_fail_results
[result
.GetName()] = result
93 results_names
= set(r
.GetName() for r
in results
.GetAll())
94 tests
= [t
for t
in tests
if self
._GetTestName
(t
) not in results_names
]
96 logging
.info('FINISHED TRY #%d/%d', tries
, self
._env
.max_tries
)
98 logging
.info('%d failed tests remain.', len(tests
))
100 logging
.info('All tests completed.')
102 all_unknown_test_names
= set(self
._GetTestName
(t
) for t
in tests
)
103 all_failed_test_names
= set(all_fail_results
.iterkeys())
105 unknown_tests
= all_unknown_test_names
.difference(all_failed_test_names
)
106 failed_tests
= all_failed_test_names
.intersection(all_unknown_test_names
)
110 base_test_result
.BaseTestResult(
111 u
, base_test_result
.ResultType
.UNKNOWN
)
112 for u
in unknown_tests
)
114 results
.AddResults(all_fail_results
[f
] for f
in failed_tests
)
118 def GetTool(self
, device
):
119 if not str(device
) in self
._tools
:
120 self
._tools
[str(device
)] = valgrind_tools
.CreateTool(
121 self
._env
.tool
, device
)
122 return self
._tools
[str(device
)]
124 def _CreateShards(self
, tests
):
125 raise NotImplementedError
127 # pylint: disable=no-self-use
128 def _GetTestName(self
, test
):
132 raise NotImplementedError
134 def _RunTest(self
, device
, test
):
135 raise NotImplementedError
137 def _ShouldShard(self
):
138 raise NotImplementedError