Linux: Depend on liberation-fonts package for RPMs.
[chromium-blink-merge.git] / components / test / data / password_manager / automated_tests / run_tests.py
blob5b1eeafcf6e67134520c228bb45ca81007caadc8
1 # -*- coding: utf-8 -*-
2 # Copyright 2014 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Encapsulates running tests defined in tests.py.
8 Running this script requires passing --config-path with a path to a config file
9 of the following structure:
11 [data_files]
12 passwords_path=<path to a file with passwords>
13 [binaries]
14 chrome-path=<chrome binary path>
15 chromedriver-path=<chrome driver path>
16 [run_options]
17 # |tests_in_parallel| is optional, the default value is 1.
18 tests_in_parallel=<number of parallel tests>
19 # |tests_to_runs| field is optional, if it is absent all tests will be run.
20 tests_to_run=<test names to run, comma delimited>
21 # |test_cases_to_run| field is optional, if it is absent all test cases
22 # will be run.
23 test_cases_to_run=<test names to run, comma delimited>
24 [logging]
25 # |save-only-failures| is optional, the default is false.
26 save-only-failures=<Boolean parameter which enforces saving results of only
27 failed tests>
28 |permanent-log-file| is a file name of file, where test run results will be
29 saved in irder to compare them with a next run. Should be specified if it's
30 expected that mail will be send.
32 The script uses the Python's logging library to report the test results,
33 as well as debugging information. It emits three levels of logs (in
34 descending order of severity):
35 logging.INFO: Summary of the tests.
36 logging.DEBUG: Details about tests failures.
37 SCRIPT_DEBUG (see below): Debug info of this script.
38 You have to set up appropriate logging handlers to have the logs appear.
39 """
41 import ConfigParser
42 import Queue
43 import argparse
44 import logging
45 import multiprocessing
46 import os
47 import shutil
48 import stopit
49 import simplejson as json
50 import tempfile
51 import time
53 from threading import Thread
55 import tests
58 # Just below logging.DEBUG, use for this script's debug messages instead
59 # of logging.DEBUG, which is already used for detailed test debug messages.
60 SCRIPT_DEBUG = 9
62 class Config:
63 test_cases_to_run = tests.TEST_CASES
64 save_only_fails = False
65 tests_to_run = tests.all_tests.keys()
66 max_tests_in_parallel = 1
67 permanent_log_file = ""
69 def __init__(self, config_path):
70 config = ConfigParser.ConfigParser()
71 config.read(config_path)
72 if config.has_option("run_options", "tests_in_parallel"):
73 self.max_tests_in_parallel = config.getint(
74 "run_options", "tests_in_parallel")
76 self.chrome_path = config.get("binaries", "chrome-path")
77 self.chromedriver_path = config.get("binaries", "chromedriver-path")
78 self.passwords_path = config.get("data_files", "passwords_path")
80 if config.has_option("run_options", "tests_to_run"):
81 self.tests_to_run = config.get("run_options", "tests_to_run").split(",")
83 if config.has_option("run_options", "test_cases_to_run"):
84 self.test_cases_to_run = config.get(
85 "run_options", "test_cases_to_run").split(",")
86 if config.has_option("logging", "save-only-fails"):
87 self.save_only_fails = config.getboolean("logging", "save-only-fails")
89 if config.has_option("logging", "permanent-log-file"):
90 self.permanent_log_file = config.get("logging", "permanent-log-file")
92 def LogResultsOfTestRun(config, results):
93 """ Logs |results| of a test run. """
94 logger = logging.getLogger("run_tests")
95 failed_tests = []
96 failed_tests_num = 0
97 for result in results:
98 website, test_case, success, reason = result
99 if not (config.save_only_fails and success):
100 if not success:
101 logger.debug("%s.%s failed with reason: %s",
102 website, test_case, reason)
104 if not success:
105 failed_tests.append("%s.%s" % (website, test_case))
106 failed_tests_num += 1
108 failed_tests = sorted([name for name in failed_tests])
109 logger.info("%d failed test cases out of %d, failing test cases: %s",
110 failed_tests_num, len(results), failed_tests)
111 if not config.permanent_log_file:
112 return
113 diff_failed = []
114 with open(config.permanent_log_file, "r+") as log:
115 try:
116 runs_data = json.load(log)
117 except ValueError:
118 runs_data = {}
119 prev_run_failed = runs_data.get("previous_run", [])
120 diff_failed = list(set(failed_tests) - set(prev_run_failed))
121 log.seek(0)
122 log.truncate()
123 runs_data["previous_run"] = failed_tests
124 runs_data["new_failures"] = diff_failed
125 json.dump(runs_data, log)
127 def RunTestCaseOnWebsite((website, test_case, config)):
128 """ Runs a |test_case| on a |website|. In case when |test_case| has
129 failed it tries to rerun it. If run takes too long, then it is stopped.
132 profile_path = tempfile.mkdtemp()
133 # The tests can be flaky. This is why we try to rerun up to 3 times.
134 attempts = 3
135 result = ("", "", False, "")
136 logger = logging.getLogger("run_tests")
137 for _ in xrange(attempts):
138 shutil.rmtree(path=profile_path, ignore_errors=True)
139 logger.log(SCRIPT_DEBUG, "Run of test case %s of website %s started",
140 test_case, website)
141 try:
142 with stopit.ThreadingTimeout(seconds=100) as timeout:
143 logger.log(SCRIPT_DEBUG,
144 "Run test with parameters: %s %s %s %s %s %s",
145 config.chrome_path, config.chromedriver_path,
146 profile_path, config.passwords_path,
147 website, test_case)
148 result = tests.RunTest(config.chrome_path, config.chromedriver_path,
149 profile_path, config.passwords_path,
150 website, test_case)[0]
151 if timeout.state != timeout.EXECUTED:
152 result = (website, test_case, False,
153 "Got %d as timeout state (see stopit.ThreadingTimeout for"
154 " the meaning of the number)" % timeout.state)
155 _, _, success, _ = result
156 if success:
157 return result
158 except Exception as e:
159 result = (website, test_case, False, e)
160 return result
163 def RunTests(config_path):
164 """Runs automated tests.
166 Runs the tests and returns the results through logging:
167 On logging.INFO logging level, it returns the summary of how many tests
168 passed and failed.
169 On logging.DEBUG logging level, it returns the failure logs, if any.
170 (On SCRIPT_DEBUG it returns diagnostics for this script.)
172 Args:
173 config_path: The path to the config INI file. See the top of the file
174 for format description.
176 config = Config(config_path)
177 logger = logging.getLogger("run_tests")
178 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(config.tests_to_run),
179 config.tests_to_run)
180 logger.log(SCRIPT_DEBUG, "%d test cases to run: %s",
181 len(config.test_cases_to_run),
182 config.test_cases_to_run)
183 data = [(website, test_case, config)
184 for website in config.tests_to_run
185 for test_case in config.test_cases_to_run]
186 number_of_processes = min([config.max_tests_in_parallel,
187 len(config.test_cases_to_run) *
188 len(config.tests_to_run)])
189 p = multiprocessing.Pool(number_of_processes)
190 results = p.map(RunTestCaseOnWebsite, data)
191 p.close()
192 p.join()
193 LogResultsOfTestRun(config, results)
196 def main():
197 parser = argparse.ArgumentParser()
198 parser.add_argument("config_path", metavar="N",
199 help="Path to the config.ini file.")
200 args = parser.parse_args()
201 RunTests(args.config_path)
204 if __name__ == "__main__":
205 main()