1 # Copyright (c) 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
6 """Utility script to launch browser-tests on the Chromoting bot."""
9 from chromoting_test_utilities
import InitialiseTestMachineForLinux
10 from chromoting_test_utilities
import PrintHostLogContents
11 from chromoting_test_utilities
import PROD_DIR_ID
12 from chromoting_test_utilities
import RunCommandInSubProcess
13 from chromoting_test_utilities
import TestCaseSetup
14 from chromoting_test_utilities
import TestMachineCleanup
16 SUCCESS_INDICATOR
= 'SUCCESS: all tests passed.'
19 BROWSER_NOT_STARTED_ERROR
= (
20 'Still waiting for the following processes to finish')
21 TIME_OUT_INDICATOR
= '(TIMED OUT)'
25 def LaunchBTCommand(args
, command
):
26 """Launches the specified browser-test command.
28 If the execution failed because a browser-instance was not launched, retry
31 args: Command line args, used for test-case startup tasks.
32 command: Browser-test command line.
34 global TEST_FAILURE
, FAILING_TESTS
37 while retries
<= MAX_RETRIES
:
39 results
= RunCommandInSubProcess(command
)
41 if SUCCESS_INDICATOR
in results
:
45 # Sometimes, during execution of browser-tests, a browser instance is
46 # not started and the test times out. See http://crbug/480025.
47 # To work around it, check if this execution failed owing to that
49 # There are 2 things to look for in the results:
50 # A line saying "Still waiting for the following processes to finish",
51 # and, because sometimes that line gets logged even if the test
52 # eventually passes, we'll also look for "(TIMED OUT)", before retrying.
54 BROWSER_NOT_STARTED_ERROR
in results
and TIME_OUT_INDICATOR
in results
):
55 # Test failed for some other reason. Let's not retry.
59 # Check that the test passed.
60 if SUCCESS_INDICATOR
not in results
:
62 # Add this command-line to list of tests that failed.
63 FAILING_TESTS
+= command
68 InitialiseTestMachineForLinux(args
.cfg_file
)
70 with
open(args
.commands_file
) as f
:
72 # Replace the PROD_DIR value in the command-line with
73 # the passed in value.
74 line
= line
.replace(PROD_DIR_ID
, args
.prod_dir
)
75 # Launch specified command line for test.
76 LaunchBTCommand(args
, line
)
78 # All tests completed. Include host-logs in the test results.
79 PrintHostLogContents()
82 print '++++++++++AT LEAST 1 TEST FAILED++++++++++'
83 print FAILING_TESTS
.rstrip('\n')
84 print '++++++++++++++++++++++++++++++++++++++++++'
85 raise Exception('At least one test failed.')
87 if __name__
== '__main__':
89 parser
= argparse
.ArgumentParser()
90 parser
.add_argument('-f', '--commands_file',
91 help='path to file listing commands to be launched.')
92 parser
.add_argument('-p', '--prod_dir',
93 help='path to folder having product and test binaries.')
94 parser
.add_argument('-c', '--cfg_file',
95 help='path to test host config file.')
96 parser
.add_argument('--me2me_manifest_file',
97 help='path to me2me host manifest file.')
98 parser
.add_argument('--it2me_manifest_file',
99 help='path to it2me host manifest file.')
101 '-u', '--user_profile_dir',
102 help='path to user-profile-dir, used by connect-to-host tests.')
103 command_line_args
= parser
.parse_args()
105 main(command_line_args
)
107 # Stop host and cleanup user-profile-dir.
108 TestMachineCleanup(command_line_args
.user_profile_dir
)